This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / mm / mmap.c
index 3e11800..3bcd0ae 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -23,7 +23,6 @@
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
-#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -91,7 +90,6 @@ static void remove_vm_struct(struct vm_area_struct *vma)
 {
        struct file *file = vma->vm_file;
 
-       might_sleep();
        if (file) {
                struct address_space *mapping = file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
@@ -321,6 +319,7 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
+       mark_mm_hugetlb(mm, vma);
        mm->map_count++;
        validate_mm(mm);
 }
@@ -367,7 +366,6 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next = vma->vm_next;
-       struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
        struct prio_tree_root *root = NULL;
        struct file *file = vma->vm_file;
@@ -391,7 +389,6 @@ again:                      remove_next = 1 + (end > next->vm_end);
                         */
                        adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
                        anon_vma = next->anon_vma;
-                       importer = vma;
                } else if (end < vma->vm_end) {
                        /*
                         * vma shrinks, and !insert tells it's not
@@ -400,7 +397,6 @@ again:                      remove_next = 1 + (end > next->vm_end);
                         */
                        adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
                        anon_vma = next->anon_vma;
-                       importer = next;
                }
        }
 
@@ -426,18 +422,8 @@ again:                     remove_next = 1 + (end > next->vm_end);
         */
        if (vma->anon_vma)
                anon_vma = vma->anon_vma;
-       if (anon_vma) {
+       if (anon_vma)
                spin_lock(&anon_vma->lock);
-               /*
-                * Easily overlooked: when mprotect shifts the boundary,
-                * make sure the expanding vma has anon_vma set if the
-                * shrinking vma had, to cover any anon pages imported.
-                */
-               if (importer && !importer->anon_vma) {
-                       importer->anon_vma = anon_vma;
-                       __anon_vma_link(importer);
-               }
-       }
 
        if (root) {
                flush_dcache_mmap_lock(mapping);
@@ -758,13 +744,6 @@ unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
        int accountable = 1;
        unsigned long charged = 0;
 
-       /*
-        * Does the application expect PROT_READ to imply PROT_EXEC:
-        */
-       if (unlikely((prot & PROT_READ) &&
-                       (current->personality & READ_IMPLIES_EXEC)))
-               prot |= PROT_EXEC;
-
        if (file) {
                if (is_file_hugepages(file))
                        accountable = 0;
@@ -796,7 +775,7 @@ unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+       addr = get_unmapped_area(file, addr, len, pgoff, flags, prot & PROT_EXEC);
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -808,17 +787,15 @@ unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
        if (flags & MAP_LOCKED) {
-               if (!can_do_mlock())
+               if (!capable(CAP_IPC_LOCK))
                        return -EPERM;
                vm_flags |= VM_LOCKED;
        }
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
-               unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
-               lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
+               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
                locked += len;
-               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
                        return -EAGAIN;
        }
 
@@ -1036,9 +1013,9 @@ EXPORT_SYMBOL(do_mmap_pgoff);
  * This function "knows" that -ENOMEM has the bits set.
  */
 #ifndef HAVE_ARCH_UNMAPPED_AREA
-unsigned long
+static inline unsigned long
 arch_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
+               unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
@@ -1080,120 +1057,15 @@ full_search:
                addr = vma->vm_end;
        }
 }
+#else
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+                       unsigned long, unsigned long, unsigned long);
 #endif 
 
-void arch_unmap_area(struct vm_area_struct *area)
-{
-       /*
-        * Is this a new hole at the lowest possible address?
-        */
-       if (area->vm_start >= TASK_UNMAPPED_BASE &&
-                       area->vm_start < area->vm_mm->free_area_cache)
-               area->vm_mm->free_area_cache = area->vm_start;
-}
-
-/*
- * This mmap-allocator allocates new areas top-down from below the
- * stack's low limit (the base):
- */
 unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-                         const unsigned long len, const unsigned long pgoff,
-                         const unsigned long flags)
-{
-       struct vm_area_struct *vma, *prev_vma;
-       struct mm_struct *mm = current->mm;
-       unsigned long base = mm->mmap_base, addr = addr0;
-       int first_time = 1;
-
-       /* requested length too big for entire address space */
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       /* dont allow allocations above current base */
-       if (mm->free_area_cache > base)
-               mm->free_area_cache = base;
-
-       /* requesting a specific address */
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start))
-                       return addr;
-       }
-
-try_again:
-       /* make sure it can fit in the remaining address space */
-       if (mm->free_area_cache < len)
-               goto fail;
-
-       /* either no address requested or cant fit in requested address hole */
-       addr = (mm->free_area_cache - len) & PAGE_MASK;
-       do {
-               /*
-                * Lookup failure means no vma is above this address,
-                * i.e. return with success:
-                */
-               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
-                       return addr;
-
-               /*
-                * new region fits between prev_vma->vm_end and
-                * vma->vm_start, use it:
-                */
-               if (addr+len <= vma->vm_start &&
-                               (!prev_vma || (addr >= prev_vma->vm_end)))
-                       /* remember the address as a hint for next time */
-                       return (mm->free_area_cache = addr);
-               else
-                       /* pull free_area_cache down to the first hole */
-                       if (mm->free_area_cache == vma->vm_end)
-                               mm->free_area_cache = vma->vm_start;
-
-               /* try just below the current vma->vm_start */
-               addr = vma->vm_start-len;
-       } while (len <= vma->vm_start);
-
-fail:
-       /*
-        * if hint left us with no space for the requested
-        * mapping then try again:
-        */
-       if (first_time) {
-               mm->free_area_cache = base;
-               first_time = 0;
-               goto try_again;
-       }
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
-       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-       /*
-        * Restore the topdown base:
-        */
-       mm->free_area_cache = base;
-
-       return addr;
-}
-
-void arch_unmap_area_topdown(struct vm_area_struct *area)
-{
-       /*
-        * Is this a new hole at the highest possible address?
-        */
-       if (area->vm_end > area->vm_mm->free_area_cache)
-               area->vm_mm->free_area_cache = area->vm_end;
-}
-
-
-unsigned long
-get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags, int exec)
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags, unsigned long exec)
 {
        if (flags & MAP_FIXED) {
                unsigned long ret;
@@ -1225,80 +1097,10 @@ get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
                return file->f_op->get_unmapped_area(file, addr, len,
                                                pgoff, flags);
 
-       if (exec && current->mm->get_unmapped_exec_area)
-               return current->mm->get_unmapped_exec_area(file, addr, len, pgoff, flags);
-       else
-               return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       return arch_get_unmapped_area(file, addr, len, pgoff, flags, exec);
 }
 
-EXPORT_SYMBOL(get_unmapped_area_prot);
-
-
-#define SHLIB_BASE             0x00111000
-
-unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
-               unsigned long len0, unsigned long pgoff, unsigned long flags)
-{
-       unsigned long addr = addr0, len = len0;
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long tmp;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-               
-       if (!addr && !(flags & MAP_FIXED))
-               addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start)) {
-                       return addr;
-               }
-       }
-
-       addr = SHLIB_BASE;
-
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr) {
-                       return -ENOMEM;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Must not let a PROT_EXEC mapping get into the
-                        * brk area:
-                        */
-                       if (addr + len > mm->brk)
-                               goto failed;
-                       
-                       /*
-                        * Up until the brk area we randomize addresses
-                        * as much as possible:
-                        */
-                       if (addr >= 0x01000000) {
-                               tmp = randomize_range(0x01000000, mm->brk, len);
-                               vma = find_vma(mm, tmp);
-                               if (TASK_SIZE - len >= tmp &&
-                                   (!vma || tmp + len <= vma->vm_start))
-                                       return tmp;
-                       }
-                       /*
-                        * Ok, randomization didnt work out - return
-                        * the result of the linear search:
-                        */
-                       return addr;
-               }
-               addr = vma->vm_end;
-       }
-       
-failed:
-       return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
-}
-
-
+EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
@@ -1409,9 +1211,9 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
        address &= PAGE_MASK;
        grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
-       /* Overcommit.. vx check first to avoid vm_unacct_memory() */
-       if (!vx_vmpages_avail(vma->vm_mm, grow) ||
-               security_vm_enough_memory(grow)) {
+       /* Overcommit.. */
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(vma->vm_mm, grow)) {
                anon_vma_unlock(vma);
                return -ENOMEM;
        }
@@ -1423,6 +1225,7 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
                vm_unacct_memory(grow);
                return -ENOMEM;
        }
+
        vma->vm_end = address;
        // vma->vm_mm->total_vm += grow;
        vx_vmpages_add(vma->vm_mm, grow);
@@ -1473,9 +1276,9 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
        address &= PAGE_MASK;
        grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
-        /* Overcommit.. vx check first to avoid vm_unacct_memory() */
-       if (!vx_vmpages_avail(vma->vm_mm, grow) ||
-               security_vm_enough_memory(grow)) {
+       /* Overcommit.. */
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(vma->vm_mm, grow)) {
                anon_vma_unlock(vma);
                return -ENOMEM;
        }
@@ -1487,6 +1290,7 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
                vm_unacct_memory(grow);
                return -ENOMEM;
        }
+
        vma->vm_start = address;
        vma->vm_pgoff -= grow;
        // vma->vm_mm->total_vm += grow;
@@ -1595,6 +1399,7 @@ no_mmaps:
 static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
 {
        size_t len = area->vm_end - area->vm_start;
+       unsigned long old_end = area->vm_end;
 
        // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
        vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
@@ -1602,8 +1407,20 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
        if (area->vm_flags & VM_LOCKED)
                // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
                vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
-       area->vm_mm->unmap_area(area);
+       /*
+        * Is this a new hole at the lowest possible address?
+        */
+       if (area->vm_start >= TASK_UNMAPPED_BASE &&
+                               area->vm_start < area->vm_mm->free_area_cache)
+             area->vm_mm->free_area_cache = area->vm_start;
+       /*
+        * Is this a new hole at the highest possible address?
+        */
+       if (area->vm_start > area->vm_mm->non_executable_cache)
+               area->vm_mm->non_executable_cache = area->vm_start;
        remove_vm_struct(area);
+       if (unlikely(area->vm_flags & VM_EXEC))
+               arch_remove_exec_range(mm, old_end);
 }
 
 /*
@@ -1836,14 +1653,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
         * mlock MCL_FUTURE?
         */
        if (mm->def_flags & VM_LOCKED) {
-               unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
-               lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
+               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
                locked += len;
-               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
                        return -EAGAIN;
-               if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
-                       return -ENOMEM;
+               /* vserver checks ? */
        }
 
        /*