Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / mm / mmap.c
index f98c388..c34750c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -13,6 +13,7 @@
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <linux/syscalls.h>
+#include <linux/capability.h>
 #include <linux/init.h>
 #include <linux/file.h>
 #include <linux/fs.h>
@@ -24,6 +25,7 @@
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -61,7 +63,7 @@ pgprot_t protection_map[16] = {
 
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
-int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 atomic_t vm_committed_space = ATOMIC_INIT(0);
 
 /*
@@ -120,14 +122,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * only call if we're about to fail.
                 */
                n = nr_free_pages();
+
+               /*
+                * Leave reserved pages. The pages are not for anonymous pages.
+                */
+               if (n <= totalreserve_pages)
+                       goto error;
+               else
+                       n -= totalreserve_pages;
+
+               /*
+                * Leave the last 3% for root
+                */
                if (!cap_sys_admin)
                        n -= n / 32;
                free += n;
 
                if (free > pages)
                        return 0;
-               vm_unacct_memory(pages);
-               return -ENOMEM;
+
+               goto error;
        }
 
        allowed = (totalram_pages - hugetlb_total_pages())
@@ -143,18 +157,18 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
           leave 3% of the size of this process for other processes */
        allowed -= current->mm->total_vm / 32;
 
-       if (atomic_read(&vm_committed_space) < allowed)
+       /*
+        * cast `allowed' as a signed long because vm_committed_space
+        * sometimes has a negative value
+        */
+       if (atomic_read(&vm_committed_space) < (long)allowed)
                return 0;
-
+error:
        vm_unacct_memory(pages);
 
        return -ENOMEM;
 }
 
-EXPORT_SYMBOL(sysctl_overcommit_memory);
-EXPORT_SYMBOL(sysctl_overcommit_ratio);
-EXPORT_SYMBOL(sysctl_max_map_count);
-EXPORT_SYMBOL(vm_committed_space);
 EXPORT_SYMBOL(__vm_enough_memory);
 
 /*
@@ -177,35 +191,38 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
 }
 
 /*
- * Remove one vm structure and free it.
+ * Unlink a file-based vm structure from its prio_tree, to hide
+ * vma from rmap and vmtruncate before freeing its page tables.
  */
-static void remove_vm_struct(struct vm_area_struct *vma)
+void unlink_file_vma(struct vm_area_struct *vma)
 {
        struct file *file = vma->vm_file;
 
-       might_sleep();
        if (file) {
                struct address_space *mapping = file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
                __remove_shared_vm_struct(vma, file, mapping);
                spin_unlock(&mapping->i_mmap_lock);
        }
+}
+
+/*
+ * Close a vm structure and free it, returning the next.
+ */
+static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+{
+       struct vm_area_struct *next = vma->vm_next;
+
+       might_sleep();
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
-       if (file)
-               fput(file);
-       anon_vma_unlink(vma);
+       if (vma->vm_file)
+               fput(vma->vm_file);
        mpol_free(vma_policy(vma));
        kmem_cache_free(vm_area_cachep, vma);
+       return next;
 }
 
-/*
- *  sys_brk() for the most part doesn't need the global kernel
- *  lock, except when an application is doing something nasty
- *  like trying to un-brk an area that has already been mapped
- *  to a regular file.  in this case, the unmapping will need
- *  to invoke file system routines that need the global lock.
- */
 asmlinkage unsigned long sys_brk(unsigned long brk)
 {
        unsigned long rlim, retval;
@@ -216,6 +233,17 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
 
        if (brk < mm->end_code)
                goto out;
+
+       /*
+        * Check against rlimit here. If this check is done later after the test
+        * of oldbrk with newbrk then it can escape the test and let the data
+        * segment grow beyond its set limit the in case where the limit is
+        * not page aligned -Ram Gupta
+        */
+       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+               goto out;
+
        newbrk = PAGE_ALIGN(brk);
        oldbrk = PAGE_ALIGN(mm->brk);
        if (oldbrk == newbrk)
@@ -228,11 +256,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
                goto out;
        }
 
-       /* Check against rlimit.. */
-       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-               goto out;
-
        /* Check against existing mmap mappings. */
        if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
                goto out;
@@ -290,8 +313,7 @@ void validate_mm(struct mm_struct *mm)
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count)
                printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
-       if (bug)
-               BUG();
+       BUG_ON(bug);
 }
 #else
 #define validate_mm(mm) do { } while (0)
@@ -338,6 +360,8 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+       if (vma->vm_flags & VM_EXEC)
+               arch_add_exec_range(mm, vma->vm_end);
        if (prev) {
                vma->vm_next = prev->vm_next;
                prev->vm_next = vma;
@@ -428,8 +452,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        struct rb_node ** rb_link, * rb_parent;
 
        __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
+       BUG_ON(__vma && __vma->vm_start < vma->vm_end);
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        mm->map_count++;
 }
@@ -442,6 +465,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
+       if (vma->vm_flags & VM_EXEC)
+               arch_remove_exec_range(mm, vma->vm_end);
 }
 
 /*
@@ -608,7 +633,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
 
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
@@ -747,6 +772,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                } else                                  /* cases 2, 5, 7 */
                        vma_adjust(prev, prev->vm_start,
                                end, prev->vm_pgoff, NULL);
+               if (prev->vm_flags & VM_EXEC)
+                       arch_add_exec_range(mm, prev->vm_end);
                return prev;
        }
 
@@ -809,8 +836,7 @@ try_prev:
         * (e.g. stash info in next's anon_vma_node when assigning
         * an anon_vma, or when trying vma_merge).  Another time.
         */
-       if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
-               BUG();
+       BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
        if (!near)
                goto none;
 
@@ -835,20 +861,12 @@ none:
 }
 
 #ifdef CONFIG_PROC_FS
-void __vm_stat_account(struct mm_struct *mm, unsigned long flags,
+void vm_stat_account(struct mm_struct *mm, unsigned long flags,
                                                struct file *file, long pages)
 {
        const unsigned long stack_flags
                = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
 
-#ifdef CONFIG_HUGETLB
-       if (flags & VM_HUGETLB) {
-               if (!(flags & VM_DONTCOPY))
-                       mm->shared_vm += pages;
-               return;
-       }
-#endif /* CONFIG_HUGETLB */
-
        if (file) {
                mm->shared_vm += pages;
                if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
@@ -918,7 +936,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
+       addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -1044,12 +1062,11 @@ munmap_back:
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
        }
-       memset(vma, 0, sizeof(*vma));
 
        vma->vm_mm = mm;
        vma->vm_start = addr;
@@ -1113,7 +1130,7 @@ munmap_back:
        }
 out:   
        vx_vmpages_add(mm, len >> PAGE_SHIFT);
-       __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+       vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
                vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
@@ -1175,7 +1192,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                    (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
-       start_addr = addr = mm->free_area_cache;
+       if (len > mm->cached_hole_size) {
+               start_addr = addr = mm->free_area_cache;
+       } else {
+               start_addr = addr = TASK_UNMAPPED_BASE;
+               mm->cached_hole_size = 0;
+       }
 
 full_search:
        for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
@@ -1186,7 +1208,9 @@ full_search:
                         * some holes.
                         */
                        if (start_addr != TASK_UNMAPPED_BASE) {
-                               start_addr = addr = TASK_UNMAPPED_BASE;
+                               addr = TASK_UNMAPPED_BASE;
+                               start_addr = addr;
+                               mm->cached_hole_size = 0;
                                goto full_search;
                        }
                        return -ENOMEM;
@@ -1198,19 +1222,22 @@ full_search:
                        mm->free_area_cache = addr + len;
                        return addr;
                }
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
                addr = vma->vm_end;
        }
 }
 #endif 
 
-void arch_unmap_area(struct vm_area_struct *area)
+void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
 {
        /*
         * Is this a new hole at the lowest possible address?
         */
-       if (area->vm_start >= TASK_UNMAPPED_BASE &&
-                       area->vm_start < area->vm_mm->free_area_cache)
-               area->vm_mm->free_area_cache = area->vm_start;
+       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
+               mm->free_area_cache = addr;
+               mm->cached_hole_size = ~0UL;
+       }
 }
 
 /*
@@ -1240,6 +1267,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        return addr;
        }
 
+       /* check if free_area_cache is useful for us */
+       if (len <= mm->cached_hole_size) {
+               mm->cached_hole_size = 0;
+               mm->free_area_cache = mm->mmap_base;
+       }
+
        /* either no address requested or can't fit in requested address hole */
        addr = mm->free_area_cache;
 
@@ -1251,6 +1284,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        return (mm->free_area_cache = addr-len);
        }
 
+       if (mm->mmap_base < len)
+               goto bottomup;
+
        addr = mm->mmap_base-len;
 
        do {
@@ -1264,50 +1300,62 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        /* remember the address as a hint for next time */
                        return (mm->free_area_cache = addr);
 
+               /* remember the largest hole we saw so far */
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
+
                /* try just below the current vma->vm_start */
                addr = vma->vm_start-len;
        } while (len < vma->vm_start);
 
+bottomup:
        /*
         * A failed mmap() very likely causes application failure,
         * so fall back to the bottom-up function here. This scenario
         * can happen with large stack limits and large mmap()
         * allocations.
         */
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
+       mm->cached_hole_size = ~0UL;
+       mm->free_area_cache = TASK_UNMAPPED_BASE;
        addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
        /*
         * Restore the topdown base:
         */
        mm->free_area_cache = mm->mmap_base;
+       mm->cached_hole_size = ~0UL;
 
        return addr;
 }
 #endif
 
-void arch_unmap_area_topdown(struct vm_area_struct *area)
+void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
 {
        /*
         * Is this a new hole at the highest possible address?
         */
-       if (area->vm_end > area->vm_mm->free_area_cache)
-               area->vm_mm->free_area_cache = area->vm_end;
+       if (addr > mm->free_area_cache)
+               mm->free_area_cache = addr;
 
        /* dont allow allocations above current base */
-       if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base)
-               area->vm_mm->free_area_cache = area->vm_mm->mmap_base;
+       if (mm->free_area_cache > mm->mmap_base)
+               mm->free_area_cache = mm->mmap_base;
 }
 
+
 unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags, int exec)
 {
        unsigned long ret;
 
        if (!(flags & MAP_FIXED)) {
                unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-               get_area = current->mm->get_unmapped_area;
+               if (exec && current->mm->get_unmapped_exec_area)
+                       get_area = current->mm->get_unmapped_exec_area;
+               else
+                       get_area = current->mm->get_unmapped_area;
+
                if (file && file->f_op && file->f_op->get_unmapped_area)
                        get_area = file->f_op->get_unmapped_area;
                addr = get_area(file, addr, len, pgoff, flags);
@@ -1338,7 +1386,71 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        return addr;
 }
 
-EXPORT_SYMBOL(get_unmapped_area);
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+#define SHLIB_BASE             0x00111000
+
+unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+               unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+       unsigned long addr = addr0, len = len0;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long tmp;
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (!addr && !(flags & MAP_FIXED))
+               addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start)) {
+                       return addr;
+               }
+       }
+
+       addr = SHLIB_BASE;
+       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+               /* At this point:  (!vma || addr < vma->vm_end). */
+               if (TASK_SIZE - len < addr)
+                       return -ENOMEM;
+
+               if (!vma || addr + len <= vma->vm_start) {
+                       /*
+                        * Must not let a PROT_EXEC mapping get into the
+                        * brk area:
+                        */
+                       if (addr + len > mm->brk)
+                               goto failed;
+
+                       /*
+                        * Up until the brk area we randomize addresses
+                        * as much as possible:
+                        */
+                       if (addr >= 0x01000000) {
+                               tmp = randomize_range(0x01000000, PAGE_ALIGN(max(mm->start_brk, (unsigned long)0x08000000)), len);
+                               vma = find_vma(mm, tmp);
+                               if (TASK_SIZE - len >= tmp &&
+                                   (!vma || tmp + len <= vma->vm_start))
+                                       return tmp;
+                       }
+                       /*
+                        * Ok, randomization didnt work out - return
+                        * the result of the linear search:
+                        */
+                       return addr;
+               }
+               addr = vma->vm_end;
+       }
+
+failed:
+       return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
+}
+
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
@@ -1413,6 +1525,14 @@ out:
        return prev ? prev->vm_next : vma;
 }
 
+static int over_stack_limit(unsigned long sz)
+{
+       if (sz < EXEC_STACK_BIAS)
+               return 0;
+       return (sz - EXEC_STACK_BIAS) >
+                       current->signal->rlim[RLIMIT_STACK].rlim_cur;
+}
+
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -1428,7 +1548,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > rlim[RLIMIT_STACK].rlim_cur)
+       if (over_stack_limit(size))
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -1452,15 +1572,19 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
        vx_vmpages_add(mm, grow);
        if (vma->vm_flags & VM_LOCKED)
                vx_vmlocked_add(mm, grow);
-       __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
+       vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
        return 0;
 }
 
-#ifdef CONFIG_STACK_GROWSUP
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
 /*
- * vma is the first one with address > vma->vm_end.  Have to extend vma.
+ * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+ * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
-int expand_stack(struct vm_area_struct * vma, unsigned long address)
+#ifndef CONFIG_IA64
+static inline
+#endif
+int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        int error;
 
@@ -1498,6 +1622,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
        anon_vma_unlock(vma);
        return error;
 }
+#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
+
+#ifdef CONFIG_STACK_GROWSUP
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_upwards(vma, address);
+}
 
 struct vm_area_struct *
 find_extend_vma(struct mm_struct *mm, unsigned long addr)
@@ -1580,38 +1711,24 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
 }
 #endif
 
-/* Normal function to fix up a mapping
- * This function is the default for when an area has no specific
- * function.  This may be used as part of a more specific routine.
- *
- * By the time this function is called, the area struct has been
- * removed from the process mapping list.
- */
-static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
-{
-       size_t len = area->vm_end - area->vm_start;
-
-       vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
-
-       if (area->vm_flags & VM_LOCKED)
-               vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
-       vm_stat_unaccount(area);
-       area->vm_mm->unmap_area(area);
-       remove_vm_struct(area);
-}
-
 /*
- * Update the VMA and inode share lists.
- *
- * Ok - we have the memory areas we should free on the 'free' list,
+ * Ok - we have the memory areas we should free on the vma list,
  * so release them, and do the vma updates.
+ *
+ * Called with the mm semaphore held.
  */
-static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 {
+       /* Update high watermark before we lower total_vm */
+       update_hiwater_vm(mm);
        do {
-               struct vm_area_struct *next = vma->vm_next;
-               unmap_vma(mm, vma);
-               vma = next;
+               long nrpages = vma_pages(vma);
+
+               vx_vmpages_sub(mm, nrpages);
+               if (vma->vm_flags & VM_LOCKED)
+                       vx_vmlocked_sub(mm, nrpages);
+               vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+               vma = remove_vma(vma);
        } while (vma);
        validate_mm(mm);
 }
@@ -1619,7 +1736,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 /*
  * Get rid of page table information in the indicated region.
  *
- * Called with the page table lock held.
+ * Called with the mm semaphore held.
  */
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1630,14 +1747,13 @@ static void unmap_region(struct mm_struct *mm,
        unsigned long nr_accounted = 0;
 
        lru_add_drain();
-       spin_lock(&mm->page_table_lock);
        tlb = tlb_gather_mmu(mm, 0);
-       unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
+       update_hiwater_rss(mm);
+       unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
        free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
                                 next? next->vm_start: 0);
        tlb_finish_mmu(tlb, start, end);
-       spin_unlock(&mm->page_table_lock);
 }
 
 /*
@@ -1650,6 +1766,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct vm_area_struct **insertion_point;
        struct vm_area_struct *tail_vma = NULL;
+       unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        do {
@@ -1660,6 +1777,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
        tail_vma->vm_next = NULL;
+       if (mm->unmap_area == arch_unmap_area)
+               addr = prev ? prev->vm_end : mm->mmap_base;
+       else
+               addr = vma ?  vma->vm_start : mm->mmap_base;
+       mm->unmap_area(mm, addr);
        mm->mmap_cache = NULL;          /* Kill the cache. */
 }
 
@@ -1706,10 +1828,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
 
-       if (new_below)
+       if (new_below) {
+               unsigned long old_end = vma->vm_end;
+
                vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
                        ((addr - new->vm_start) >> PAGE_SHIFT), new);
-       else
+               if (vma->vm_flags & VM_EXEC)
+                       arch_remove_exec_range(mm, old_end);
+       } else
                vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
        return 0;
@@ -1772,7 +1898,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        unmap_region(mm, vma, prev, start, end);
 
        /* Fix up all other VM information */
-       unmap_vma_list(mm, vma);
+       remove_vma_list(mm, vma);
 
        return 0;
 }
@@ -1794,7 +1920,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
 
 static inline void verify_mm_writelocked(struct mm_struct *mm)
 {
-#ifdef CONFIG_DEBUG_KERNEL
+#ifdef CONFIG_DEBUG_VM
        if (unlikely(down_read_trylock(&mm->mmap_sem))) {
                WARN_ON(1);
                up_read(&mm->mmap_sem);
@@ -1861,7 +1987,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       if (security_vm_enough_memory(len >> PAGE_SHIFT))
+       if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
+               !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -1874,12 +2001,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
        }
-       memset(vma, 0, sizeof(*vma));
 
        vma->vm_mm = mm;
        vma->vm_start = addr;
@@ -1908,34 +2034,27 @@ void exit_mmap(struct mm_struct *mm)
        unsigned long end;
 
        lru_add_drain();
-
-       spin_lock(&mm->page_table_lock);
-
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
+       /* Don't update_hiwater_rss(mm) here, do_exit already did */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
-       end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
+       end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
+       arch_flush_exec_range(mm);
 
-       mm->mmap = mm->mmap_cache = NULL;
-       mm->mm_rb = RB_ROOT;
-       set_mm_counter(mm, rss, 0);
+       set_mm_counter(mm, file_rss, 0);
+       set_mm_counter(mm, anon_rss, 0);
        vx_vmpages_sub(mm, mm->total_vm);
        vx_vmlocked_sub(mm, mm->locked_vm);
 
-       spin_unlock(&mm->page_table_lock);
-
        /*
-        * Walk the list again, actually closing and freeing it
-        * without holding any MM locks.
+        * Walk the list again, actually closing and freeing it,
+        * with preemption enabled, without holding any MM locks.
         */
-       while (vma) {
-               struct vm_area_struct *next = vma->vm_next;
-               remove_vm_struct(vma);
-               vma = next;
-       }
+       while (vma)
+               vma = remove_vma(vma);
 
        BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
@@ -1968,6 +2087,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
        if (__vma && __vma->vm_start < vma->vm_end)
                return -ENOMEM;
+       if ((vma->vm_flags & VM_ACCOUNT) &&
+               (security_vm_enough_memory(vma_pages(vma)) ||
+               !vx_vmpages_avail(mm, vma_pages(vma))))
+               return -ENOMEM;
        vma_link(mm, vma, prev, rb_link, rb_parent);
        return 0;
 }
@@ -2043,3 +2166,81 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
                return 0;
        return 1;
 }
+
+
+static struct page *
+special_mapping_nopage(struct vm_area_struct *vma,
+                      unsigned long address, int *type)
+{
+       struct page **pages;
+
+       BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+
+       address -= vma->vm_start;
+       for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
+               address -= PAGE_SIZE;
+
+       if (*pages) {
+               get_page(*pages);
+               return *pages;
+       }
+
+       return NOPAGE_SIGBUS;
+}
+
+static struct vm_operations_struct special_mapping_vmops = {
+       .nopage = special_mapping_nopage,
+};
+
+unsigned int vdso_populate = 1;
+
+/*
+ * Insert a new vma covering the given region, with the given flags and
+ * protections.  Its pages are supplied by the given null-terminated array.
+ * The region past the last page supplied will always produce SIGBUS.
+ * The array pointer and the pages it points to are assumed to stay alive
+ * for as long as this mapping might exist.
+ */
+int install_special_mapping(struct mm_struct *mm,
+                           unsigned long addr, unsigned long len,
+                           unsigned long vm_flags, pgprot_t pgprot,
+                           struct page **pages)
+{
+       struct vm_area_struct *vma;
+       int err;
+
+       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       if (unlikely(vma == NULL))
+               return -ENOMEM;
+       memset(vma, 0, sizeof(*vma));
+
+       vma->vm_mm = mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
+
+       vma->vm_flags = vm_flags;
+       vma->vm_page_prot = pgprot;
+
+       vma->vm_ops = &special_mapping_vmops;
+       vma->vm_private_data = pages;
+
+       insert_vm_struct(mm, vma);
+       mm->total_vm += len >> PAGE_SHIFT;
+
+       if (!vdso_populate)
+               return 0;
+
+       err = 0;
+       while (*pages) {
+               struct page *page = *pages++;
+               get_page(page);
+               err = install_page(mm, vma, addr, page, vma->vm_page_prot);
+               if (err) {
+                       put_page(page);
+                       break;
+               }
+               addr += PAGE_SIZE;
+       }
+
+       return err;
+}