vserver 1.9.5.x5
[linux-2.6.git] / mm / mmap.c
index bc1c46c..d04d60a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/mm.h>
 #include <linux/shm.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
 #include <linux/hugetlb.h>
 #include <linux/profile.h>
 #include <linux/module.h>
+#include <linux/acct.h>
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
 
 #include <asm/uaccess.h>
-#include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/tlb.h>
 
@@ -55,15 +56,103 @@ pgprot_t protection_map[16] = {
        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
-int sysctl_overcommit_memory = 0;      /* default is heuristic overcommit */
+int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
 atomic_t vm_committed_space = ATOMIC_INIT(0);
 
+/*
+ * Check that a process has enough memory to allocate a new virtual
+ * mapping. 0 means there is enough memory for the allocation to
+ * succeed and -ENOMEM implies there is not.
+ *
+ * We currently support three overcommit policies, which are set via the
+ * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
+ *
+ * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
+ * Additional code 2002 Jul 20 by Robert Love.
+ *
+ * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
+ *
+ * Note this is a helper function intended to be used by LSMs which
+ * wish to use this logic.
+ */
+int __vm_enough_memory(long pages, int cap_sys_admin)
+{
+       unsigned long free, allowed;
+
+       vm_acct_memory(pages);
+
+       /*
+        * Sometimes we want to use more memory than we have
+        */
+       if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
+               return 0;
+
+       if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
+               unsigned long n;
+
+               free = get_page_cache_size();
+               free += nr_swap_pages;
+
+               /*
+                * Any slabs which are created with the
+                * SLAB_RECLAIM_ACCOUNT flag claim to have contents
+                * which are reclaimable, under pressure.  The dentry
+                * cache and most inode caches should fall into this
+                */
+               free += atomic_read(&slab_reclaim_pages);
+
+               /*
+                * Leave the last 3% for root
+                */
+               if (!cap_sys_admin)
+                       free -= free / 32;
+
+               if (free > pages)
+                       return 0;
+
+               /*
+                * nr_free_pages() is very expensive on large systems,
+                * only call if we're about to fail.
+                */
+               n = nr_free_pages();
+               if (!cap_sys_admin)
+                       n -= n / 32;
+               free += n;
+
+               if (free > pages)
+                       return 0;
+               vm_unacct_memory(pages);
+               return -ENOMEM;
+       }
+
+       allowed = (totalram_pages - hugetlb_total_pages())
+               * sysctl_overcommit_ratio / 100;
+       /*
+        * Leave the last 3% for root
+        */
+       if (!cap_sys_admin)
+               allowed -= allowed / 32;
+       allowed += total_swap_pages;
+
+       /* Don't let a single process grow too big:
+          leave 3% of the size of this process for other processes */
+       allowed -= current->mm->total_vm / 32;
+
+       if (atomic_read(&vm_committed_space) < allowed)
+               return 0;
+
+       vm_unacct_memory(pages);
+
+       return -ENOMEM;
+}
+
 EXPORT_SYMBOL(sysctl_overcommit_memory);
 EXPORT_SYMBOL(sysctl_overcommit_ratio);
 EXPORT_SYMBOL(sysctl_max_map_count);
 EXPORT_SYMBOL(vm_committed_space);
+EXPORT_SYMBOL(__vm_enough_memory);
 
 /*
  * Requires inode->i_mapping->i_mmap_lock
@@ -91,6 +180,7 @@ static void remove_vm_struct(struct vm_area_struct *vma)
 {
        struct file *file = vma->vm_file;
 
+       might_sleep();
        if (file) {
                struct address_space *mapping = file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
@@ -136,7 +226,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
        }
 
        /* Check against rlimit.. */
-       rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
        if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
                goto out;
 
@@ -280,8 +370,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma)
 
                flush_dcache_mmap_lock(mapping);
                if (unlikely(vma->vm_flags & VM_NONLINEAR))
-                       list_add_tail(&vma->shared.vm_set.list,
-                                       &mapping->i_mmap_nonlinear);
+                       vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
                else
                        vma_prio_tree_insert(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
@@ -307,8 +396,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_file)
                mapping = vma->vm_file->f_mapping;
 
-       if (mapping)
+       if (mapping) {
                spin_lock(&mapping->i_mmap_lock);
+               vma->vm_truncate_count = mapping->truncate_count;
+       }
        anon_vma_lock(vma);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -318,7 +409,6 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
-       mark_mm_hugetlb(mm, vma);
        mm->map_count++;
        validate_mm(mm);
 }
@@ -363,6 +453,7 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next = vma->vm_next;
+       struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
        struct prio_tree_root *root = NULL;
        struct file *file = vma->vm_file;
@@ -379,6 +470,7 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
 again:                 remove_next = 1 + (end > next->vm_end);
                        end = next->vm_end;
                        anon_vma = next->anon_vma;
+                       importer = vma;
                } else if (end > next->vm_start) {
                        /*
                         * vma expands, overlapping part of the next:
@@ -386,6 +478,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                         */
                        adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
                        anon_vma = next->anon_vma;
+                       importer = vma;
                } else if (end < vma->vm_end) {
                        /*
                         * vma shrinks, and !insert tells it's not
@@ -394,6 +487,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                         */
                        adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
                        anon_vma = next->anon_vma;
+                       importer = next;
                }
        }
 
@@ -402,7 +496,16 @@ again:                     remove_next = 1 + (end > next->vm_end);
                if (!(vma->vm_flags & VM_NONLINEAR))
                        root = &mapping->i_mmap;
                spin_lock(&mapping->i_mmap_lock);
+               if (importer &&
+                   vma->vm_truncate_count != next->vm_truncate_count) {
+                       /*
+                        * unmap_mapping_range might be in progress:
+                        * ensure that the expanding vma is rescanned.
+                        */
+                       importer->vm_truncate_count = 0;
+               }
                if (insert) {
+                       insert->vm_truncate_count = vma->vm_truncate_count;
                        /*
                         * Put into prio_tree now, so instantiated pages
                         * are visible to arm/parisc __flush_dcache_page
@@ -419,8 +522,18 @@ again:                     remove_next = 1 + (end > next->vm_end);
         */
        if (vma->anon_vma)
                anon_vma = vma->anon_vma;
-       if (anon_vma)
+       if (anon_vma) {
                spin_lock(&anon_vma->lock);
+               /*
+                * Easily overlooked: when mprotect shifts the boundary,
+                * make sure the expanding vma has anon_vma set if the
+                * shrinking vma had, to cover any anon pages imported.
+                */
+               if (importer && !importer->anon_vma) {
+                       importer->anon_vma = anon_vma;
+                       __anon_vma_link(importer);
+               }
+       }
 
        if (root) {
                flush_dcache_mmap_lock(mapping);
@@ -438,11 +551,8 @@ again:                     remove_next = 1 + (end > next->vm_end);
        }
 
        if (root) {
-               if (adjust_next) {
-                       vma_prio_tree_init(next);
+               if (adjust_next)
                        vma_prio_tree_insert(next, root);
-               }
-               vma_prio_tree_init(vma);
                vma_prio_tree_insert(vma, root);
                flush_dcache_mmap_unlock(mapping);
        }
@@ -721,6 +831,32 @@ none:
        return NULL;
 }
 
+#ifdef CONFIG_PROC_FS
+void __vm_stat_account(struct mm_struct *mm, unsigned long flags,
+                                               struct file *file, long pages)
+{
+       const unsigned long stack_flags
+               = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+
+#ifdef CONFIG_HUGETLB
+       if (flags & VM_HUGETLB) {
+               if (!(flags & VM_DONTCOPY))
+                       mm->shared_vm += pages;
+               return;
+       }
+#endif /* CONFIG_HUGETLB */
+
+       if (file) {
+               mm->shared_vm += pages;
+               if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+                       mm->exec_vm += pages;
+       } else if (flags & stack_flags)
+               mm->stack_vm += pages;
+       if (flags & (VM_RESERVED|VM_IO))
+               mm->reserved_vm += pages;
+}
+#endif /* CONFIG_PROC_FS */
+
 /*
  * The caller must hold down_write(current->mm->mmap_sem).
  */
@@ -750,6 +886,15 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                    (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
                        return -EPERM;
        }
+       /*
+        * Does the application expect PROT_READ to imply PROT_EXEC?
+        *
+        * (the exception is when the underlying filesystem is noexec
+        *  mounted, in which case we dont add PROT_EXEC.)
+        */
+       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+               if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
+                       prot |= PROT_EXEC;
 
        if (!len)
                return addr;
@@ -782,15 +927,17 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
        if (flags & MAP_LOCKED) {
-               if (!capable(CAP_IPC_LOCK))
+               if (!can_do_mlock())
                        return -EPERM;
                vm_flags |= VM_LOCKED;
        }
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+               unsigned long locked, lock_limit;
+               locked = mm->locked_vm << PAGE_SHIFT;
+               lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
                locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
 
@@ -860,7 +1007,7 @@ munmap_back:
 
        /* Check against address space limit. */
        if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->rlim[RLIMIT_AS].rlim_cur)
+           > current->signal->rlim[RLIMIT_AS].rlim_cur)
                return -ENOMEM;
 
        /* check context space, maybe only Private writable mapping? */
@@ -868,7 +1015,7 @@ munmap_back:
                return -ENOMEM;
 
        if (accountable && (!(flags & MAP_NORESERVE) ||
-                       sysctl_overcommit_memory > 1)) {
+                           sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
                if (vm_flags & VM_SHARED) {
                        /* Check memory availability in shmem_file_setup? */
                        vm_flags |= VM_ACCOUNT;
@@ -947,9 +1094,12 @@ munmap_back:
         *         f_op->mmap method. -DaveM
         */
        addr = vma->vm_start;
+       pgoff = vma->vm_pgoff;
+       vm_flags = vma->vm_flags;
 
        if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
+               file = vma->vm_file;
                vma_link(mm, vma, prev, rb_link, rb_parent);
                if (correct_wcount)
                        atomic_inc(&inode->i_writecount);
@@ -965,6 +1115,7 @@ munmap_back:
 out:   
        // mm->total_vm += len >> PAGE_SHIFT;
        vx_vmpages_add(mm, len >> PAGE_SHIFT);
+       __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
                // mm->locked_vm += len >> PAGE_SHIFT;
                vx_vmlocked_add(mm, len >> PAGE_SHIFT);
@@ -976,6 +1127,8 @@ out:
                                        pgoff, flags & MAP_NONBLOCK);
                down_write(&mm->mmap_sem);
        }
+       acct_update_integrals();
+       update_mem_hiwater();
        return addr;
 
 unmap_and_free_vma:
@@ -1008,7 +1161,7 @@ EXPORT_SYMBOL(do_mmap_pgoff);
  * This function "knows" that -ENOMEM has the bits set.
  */
 #ifndef HAVE_ARCH_UNMAPPED_AREA
-static inline unsigned long
+unsigned long
 arch_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
@@ -1052,12 +1205,118 @@ full_search:
                addr = vma->vm_end;
        }
 }
-#else
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-                       unsigned long, unsigned long);
 #endif 
 
+void arch_unmap_area(struct vm_area_struct *area)
+{
+       /*
+        * Is this a new hole at the lowest possible address?
+        */
+       if (area->vm_start >= TASK_UNMAPPED_BASE &&
+                       area->vm_start < area->vm_mm->free_area_cache)
+               area->vm_mm->free_area_cache = area->vm_start;
+}
+
+/*
+ * This mmap-allocator allocates new areas top-down from below the
+ * stack's low limit (the base):
+ */
+#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                         const unsigned long len, const unsigned long pgoff,
+                         const unsigned long flags)
+{
+       struct vm_area_struct *vma, *prev_vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long base = mm->mmap_base, addr = addr0;
+       int first_time = 1;
+
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       /* dont allow allocations above current base */
+       if (mm->free_area_cache > base)
+               mm->free_area_cache = base;
+
+       /* requesting a specific address */
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                               (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+try_again:
+       /* make sure it can fit in the remaining address space */
+       if (mm->free_area_cache < len)
+               goto fail;
+
+       /* either no address requested or cant fit in requested address hole */
+       addr = (mm->free_area_cache - len) & PAGE_MASK;
+       do {
+               /*
+                * Lookup failure means no vma is above this address,
+                * i.e. return with success:
+                */
+               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+                       return addr;
+
+               /*
+                * new region fits between prev_vma->vm_end and
+                * vma->vm_start, use it:
+                */
+               if (addr+len <= vma->vm_start &&
+                               (!prev_vma || (addr >= prev_vma->vm_end)))
+                       /* remember the address as a hint for next time */
+                       return (mm->free_area_cache = addr);
+               else
+                       /* pull free_area_cache down to the first hole */
+                       if (mm->free_area_cache == vma->vm_end)
+                               mm->free_area_cache = vma->vm_start;
+
+               /* try just below the current vma->vm_start */
+               addr = vma->vm_start-len;
+       } while (len <= vma->vm_start);
+
+fail:
+       /*
+        * if hint left us with no space for the requested
+        * mapping then try again:
+        */
+       if (first_time) {
+               mm->free_area_cache = base;
+               first_time = 0;
+               goto try_again;
+       }
+       /*
+        * A failed mmap() very likely causes application failure,
+        * so fall back to the bottom-up function here. This scenario
+        * can happen with large stack limits and large mmap()
+        * allocations.
+        */
+       mm->free_area_cache = TASK_UNMAPPED_BASE;
+       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+       /*
+        * Restore the topdown base:
+        */
+       mm->free_area_cache = base;
+
+       return addr;
+}
+#endif
+
+void arch_unmap_area_topdown(struct vm_area_struct *area)
+{
+       /*
+        * Is this a new hole at the highest possible address?
+        */
+       if (area->vm_end > area->vm_mm->free_area_cache)
+               area->vm_mm->free_area_cache = area->vm_end;
+}
+
 unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
@@ -1092,7 +1351,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                return file->f_op->get_unmapped_area(file, addr, len,
                                                pgoff, flags);
 
-       return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1170,13 +1429,63 @@ out:
        return prev ? prev->vm_next : vma;
 }
 
+/*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+ * grow-up and grow-down cases.
+ */
+static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct rlimit *rlim = current->signal->rlim;
+
+       /* address space limit tests */
+       if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT)
+               return -ENOMEM;
+
+       /* Stack limit test */
+       if (size > rlim[RLIMIT_STACK].rlim_cur)
+               return -ENOMEM;
+
+       /* mlock limit tests */
+       if (vma->vm_flags & VM_LOCKED) {
+               unsigned long locked;
+               unsigned long limit;
+               locked = mm->locked_vm + grow;
+               limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+               if (locked > limit && !capable(CAP_IPC_LOCK))
+                       return -ENOMEM;
+       }
+
+       if (!vx_vmpages_avail(vma->vm_mm, grow))
+               return -ENOMEM;
+
+       /*
+        * Overcommit..  This must be the final test, as it will
+        * update security statistics.
+        */
+       if (security_vm_enough_memory(grow))
+               return -ENOMEM;
+
+       /* Ok, everything looks good - let it rip */
+       // mm->total_vm += grow;
+       vx_vmpages_add(mm, grow);
+       if (vma->vm_flags & VM_LOCKED)
+               // mm->locked_vm += grow;
+               vx_vmlocked_add(mm, grow);
+       __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
+       acct_update_integrals();
+       update_mem_hiwater();
+       return 0;
+}
+
 #ifdef CONFIG_STACK_GROWSUP
 /*
  * vma is the first one with address > vma->vm_end.  Have to extend vma.
  */
 int expand_stack(struct vm_area_struct * vma, unsigned long address)
 {
-       unsigned long grow;
+       int error;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
@@ -1196,30 +1505,21 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
         */
        address += 4 + PAGE_SIZE - 1;
        address &= PAGE_MASK;
-       grow = (address - vma->vm_end) >> PAGE_SHIFT;
+       error = 0;
 
-       /* Overcommit.. vx check first to avoid vm_unacct_memory() */
-       if (!vx_vmpages_avail(vma->vm_mm, grow) ||
-               security_vm_enough_memory(grow)) {
-               anon_vma_unlock(vma);
-               return -ENOMEM;
-       }
-       
-       if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
-                       ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
-                       current->rlim[RLIMIT_AS].rlim_cur) {
-               anon_vma_unlock(vma);
-               vm_unacct_memory(grow);
-               return -ENOMEM;
+       /* Somebody else might have raced and expanded it already */
+       if (address > vma->vm_end) {
+               unsigned long size, grow;
+
+               size = address - vma->vm_start;
+               grow = (address - vma->vm_end) >> PAGE_SHIFT;
+
+               error = acct_stack_growth(vma, size, grow);
+               if (!error)
+                       vma->vm_end = address;
        }
-       vma->vm_end = address;
-       // vma->vm_mm->total_vm += grow;
-       vx_vmpages_add(vma->vm_mm, grow);
-       if (vma->vm_flags & VM_LOCKED)
-               // vma->vm_mm->locked_vm += grow;
-               vx_vmlocked_add(vma->vm_mm, grow);
        anon_vma_unlock(vma);
-       return 0;
+       return error;
 }
 
 struct vm_area_struct *
@@ -1244,7 +1544,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
  */
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-       unsigned long grow;
+       int error;
 
        /*
         * We must make sure the anon_vma is allocated
@@ -1260,31 +1560,23 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
        address &= PAGE_MASK;
-       grow = (vma->vm_start - address) >> PAGE_SHIFT;
+       error = 0;
 
-       /* Overcommit.. vx check first to avoid vm_unacct_memory() */
-       if (!vx_vmpages_avail(vma->vm_mm, grow) ||
-               security_vm_enough_memory(grow)) {
-               anon_vma_unlock(vma);
-               return -ENOMEM;
-       }
-       
-       if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
-                       ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
-                       current->rlim[RLIMIT_AS].rlim_cur) {
-               anon_vma_unlock(vma);
-               vm_unacct_memory(grow);
-               return -ENOMEM;
+       /* Somebody else might have raced and expanded it already */
+       if (address < vma->vm_start) {
+               unsigned long size, grow;
+
+               size = vma->vm_end - address;
+               grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+               error = acct_stack_growth(vma, size, grow);
+               if (!error) {
+                       vma->vm_start = address;
+                       vma->vm_pgoff -= grow;
+               }
        }
-       vma->vm_start = address;
-       vma->vm_pgoff -= grow;
-       // vma->vm_mm->total_vm += grow;
-       vx_vmpages_add(vma->vm_mm, grow);
-       if (vma->vm_flags & VM_LOCKED)
-               // vma->vm_mm->locked_vm += grow;
-               vx_vmlocked_add(vma->vm_mm, grow);
        anon_vma_unlock(vma);
-       return 0;
+       return error;
 }
 
 struct vm_area_struct *
@@ -1329,9 +1621,11 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
 {
        unsigned long first = start & PGDIR_MASK;
        unsigned long last = end + PGDIR_SIZE - 1;
-       unsigned long start_index, end_index;
        struct mm_struct *mm = tlb->mm;
 
+       if (last > MM_VM_SIZE(mm) || last < end)
+               last = MM_VM_SIZE(mm);
+
        if (!prev) {
                prev = mm->mmap;
                if (!prev)
@@ -1354,23 +1648,18 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
                                last = next->vm_start;
                }
                if (prev->vm_end > first)
-                       first = prev->vm_end + PGDIR_SIZE - 1;
+                       first = prev->vm_end;
                break;
        }
 no_mmaps:
        if (last < first)       /* for arches with discontiguous pgd indices */
                return;
-       /*
-        * If the PGD bits are not consecutive in the virtual address, the
-        * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
-        */
-       start_index = pgd_index(first);
-       if (start_index < FIRST_USER_PGD_NR)
-               start_index = FIRST_USER_PGD_NR;
-       end_index = pgd_index(last);
-       if (end_index > start_index) {
-               clear_page_tables(tlb, start_index, end_index - start_index);
-               flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
+       if (first < FIRST_USER_PGD_NR * PGDIR_SIZE)
+               first = FIRST_USER_PGD_NR * PGDIR_SIZE;
+       /* No point trying to free anything if we're in the same pte page */
+       if ((first & PMD_MASK) < (last & PMD_MASK)) {
+               clear_page_range(tlb, first, last);
+               flush_tlb_pgtables(mm, first, last);
        }
 }
 
@@ -1387,17 +1676,12 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
 
        // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
        vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
-       
+
        if (area->vm_flags & VM_LOCKED)
                // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
                vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
-       /*
-        * Is this a new hole at the lowest possible address?
-        */
-       if (area->vm_start >= TASK_UNMAPPED_BASE &&
-                               area->vm_start < area->vm_mm->free_area_cache)
-             area->vm_mm->free_area_cache = area->vm_start;
-
+       vm_stat_unaccount(area);
+       area->vm_mm->unmap_area(area);
        remove_vm_struct(area);
 }
 
@@ -1477,6 +1761,9 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        struct mempolicy *pol;
        struct vm_area_struct *new;
 
+       if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
+               return -EINVAL;
+
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
@@ -1486,7 +1773,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
 
        /* most fields are the same, copy all, and then fixup */
        *new = *vma;
-       vma_prio_tree_init(new);
 
        if (new_below)
                new->vm_end = addr;
@@ -1539,22 +1825,11 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                return 0;
        /* we have  start < mpnt->vm_end  */
 
-       if (is_vm_hugetlb_page(mpnt)) {
-               int ret = is_aligned_hugepage_range(start, len);
-
-               if (ret)
-                       return ret;
-       }
-
        /* if it doesn't overlap, we have nothing.. */
        end = start + len;
        if (mpnt->vm_start >= end)
                return 0;
 
-       /* Something will probably happen, so notify. */
-       if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
-               profile_exec_unmap(mm);
        /*
         * If we need to split any vma, do it now to save pain later.
         *
@@ -1563,16 +1838,18 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
         * places tmp vma above, and higher split_vma places tmp vma below.
         */
        if (start > mpnt->vm_start) {
-               if (split_vma(mm, mpnt, start, 0))
-                       return -ENOMEM;
+               int error = split_vma(mm, mpnt, start, 0);
+               if (error)
+                       return error;
                prev = mpnt;
        }
 
        /* Does it split the last one? */
        last = find_vma(mm, end);
        if (last && end > last->vm_start) {
-               if (split_vma(mm, last, end, 1))
-                       return -ENOMEM;
+               int error = split_vma(mm, last, end, 1);
+               if (error)
+                       return error;
        }
        mpnt = prev? prev->vm_next: mm->mmap;
 
@@ -1597,12 +1874,24 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
        int ret;
        struct mm_struct *mm = current->mm;
 
+       profile_munmap(addr);
+
        down_write(&mm->mmap_sem);
        ret = do_munmap(mm, addr, len);
        up_write(&mm->mmap_sem);
        return ret;
 }
 
+static inline void verify_mm_writelocked(struct mm_struct *mm)
+{
+#ifdef CONFIG_DEBUG_KERNEL
+       if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+               WARN_ON(1);
+               up_read(&mm->mmap_sem);
+       }
+#endif
+}
+
 /*
  *  this is really a simplified "do_mmap".  it only handles
  *  anonymous maps.  eventually we may be able to do some
@@ -1627,14 +1916,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
         * mlock MCL_FUTURE?
         */
        if (mm->def_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+               unsigned long locked, lock_limit;
+               locked = mm->locked_vm << PAGE_SHIFT;
+               lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
                locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
                if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
                        return -ENOMEM;
        }
 
+       /*
+        * mm->mmap_sem is required to protect against another thread
+        * changing the mappings in case we sleep.
+        */
+       verify_mm_writelocked(mm);
+
        /*
         * Clear old maps.  this also does some error checking for us
         */
@@ -1648,7 +1945,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
 
        /* Check against address space limits *after* clearing old maps... */
        if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->rlim[RLIMIT_AS].rlim_cur)
+           > current->signal->rlim[RLIMIT_AS].rlim_cur)
                return -ENOMEM;
 
        if (mm->map_count > sysctl_max_map_count)
@@ -1690,6 +1987,8 @@ out:
                vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
+       acct_update_integrals();
+       update_mem_hiwater();
        return addr;
 }
 
@@ -1702,8 +2001,6 @@ void exit_mmap(struct mm_struct *mm)
        struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
 
-       profile_exit_mmap(mm);
        lru_add_drain();
 
        spin_lock(&mm->page_table_lock);
@@ -1715,7 +2012,8 @@ void exit_mmap(struct mm_struct *mm)
                                        ~0UL, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
        BUG_ON(mm->map_count);  /* This is just debugging */
-       clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
+       clear_page_range(tlb, FIRST_USER_PGD_NR * PGDIR_SIZE, MM_VM_SIZE(mm));
+       
        tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
 
        vma = mm->mmap;
@@ -1745,7 +2043,7 @@ void exit_mmap(struct mm_struct *mm)
  * and into the inode's i_mmap tree.  If vm_file is non-NULL
  * then i_mmap_lock is taken here.
  */
-void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
 {
        struct vm_area_struct * __vma, * prev;
        struct rb_node ** rb_link, * rb_parent;
@@ -1768,8 +2066,9 @@ void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        }
        __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
        if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
+               return -ENOMEM;
        vma_link(mm, vma, prev, rb_link, rb_parent);
+       return 0;
 }
 
 /*
@@ -1807,7 +2106,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
-                       vma_prio_tree_init(new_vma);
                        pol = mpol_copy(vma_policy(vma));
                        if (IS_ERR(pol)) {
                                kmem_cache_free(vm_area_cachep, new_vma);