Merge to Fedora kernel-2.6.7-1.492
[linux-2.6.git] / mm / mmap.c
index d600588..fa6b34f 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -25,7 +25,6 @@
 #include <linux/rmap.h>
 
 #include <asm/uaccess.h>
-#include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/tlb.h>
 
@@ -91,6 +90,7 @@ static void remove_vm_struct(struct vm_area_struct *vma)
 {
        struct file *file = vma->vm_file;
 
+       might_sleep();
        if (file) {
                struct address_space *mapping = file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
@@ -295,10 +295,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *prev, struct rb_node **rb_link,
        struct rb_node *rb_parent)
 {
-       vma_prio_tree_init(vma);
        __vma_link_list(mm, vma, prev, rb_parent);
        __vma_link_rb(mm, vma, rb_link, rb_parent);
-       __vma_link_file(vma);
        __anon_vma_link(vma);
 }
 
@@ -314,20 +312,22 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        if (mapping)
                spin_lock(&mapping->i_mmap_lock);
        anon_vma_lock(vma);
+
        __vma_link(mm, vma, prev, rb_link, rb_parent);
+       __vma_link_file(vma);
+
        anon_vma_unlock(vma);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
-       mark_mm_hugetlb(mm, vma);
        mm->map_count++;
        validate_mm(mm);
 }
 
 /*
- * Insert vm structure into process list sorted by address and into the
- * inode's i_mmap tree. The caller should hold mm->mmap_sem and
- * ->f_mappping->i_mmap_lock if vm_file is non-NULL.
+ * Helper for vma_adjust in the split_vma insert case:
+ * insert vm structure into list and rbtree and anon_vma,
+ * but it has already been inserted into prio_tree earlier.
  */
 static void
 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
@@ -339,13 +339,12 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        if (__vma && __vma->vm_start < vma->vm_end)
                BUG();
        __vma_link(mm, vma, prev, rb_link, rb_parent);
-       mark_mm_hugetlb(mm, vma);
        mm->map_count++;
-       validate_mm(mm);
 }
 
-static inline void __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
-                 struct vm_area_struct *prev)
+static inline void
+__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+               struct vm_area_struct *prev)
 {
        prev->vm_next = vma->vm_next;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
@@ -367,6 +366,7 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next = vma->vm_next;
+       struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
        struct prio_tree_root *root = NULL;
        struct file *file = vma->vm_file;
@@ -376,21 +376,30 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
 
        if (next && !insert) {
                if (end >= next->vm_end) {
+                       /*
+                        * vma expands, overlapping all the next, and
+                        * perhaps the one after too (mprotect case 6).
+                        */
 again:                 remove_next = 1 + (end > next->vm_end);
                        end = next->vm_end;
                        anon_vma = next->anon_vma;
-               } else if (end < vma->vm_end || end > next->vm_start) {
+               } else if (end > next->vm_start) {
                        /*
-                        * vma shrinks, and !insert tells it's not
-                        * split_vma inserting another: so it must
-                        * be mprotect shifting the boundary down.
-                        *   Or:
                         * vma expands, overlapping part of the next:
-                        * must be mprotect shifting the boundary up.
+                        * mprotect case 5 shifting the boundary up.
                         */
-                       BUG_ON(vma->vm_end != next->vm_start);
-                       adjust_next = end - next->vm_start;
+                       adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
                        anon_vma = next->anon_vma;
+                       importer = vma;
+               } else if (end < vma->vm_end) {
+                       /*
+                        * vma shrinks, and !insert tells it's not
+                        * split_vma inserting another: so it must be
+                        * mprotect case 4 shifting the boundary down.
+                        */
+                       adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
+                       anon_vma = next->anon_vma;
+                       importer = next;
                }
        }
 
@@ -399,6 +408,15 @@ again:                     remove_next = 1 + (end > next->vm_end);
                if (!(vma->vm_flags & VM_NONLINEAR))
                        root = &mapping->i_mmap;
                spin_lock(&mapping->i_mmap_lock);
+               if (insert) {
+                       /*
+                        * Put into prio_tree now, so instantiated pages
+                        * are visible to arm/parisc __flush_dcache_page
+                        * throughout; but we cannot insert into address
+                        * space until vma start or end is updated.
+                        */
+                       __vma_link_file(insert);
+               }
        }
 
        /*
@@ -407,8 +425,18 @@ again:                     remove_next = 1 + (end > next->vm_end);
         */
        if (vma->anon_vma)
                anon_vma = vma->anon_vma;
-       if (anon_vma)
+       if (anon_vma) {
                spin_lock(&anon_vma->lock);
+               /*
+                * Easily overlooked: when mprotect shifts the boundary,
+                * make sure the expanding vma has anon_vma set if the
+                * shrinking vma had, to cover any anon pages imported.
+                */
+               if (importer && !importer->anon_vma) {
+                       importer->anon_vma = anon_vma;
+                       __anon_vma_link(importer);
+               }
+       }
 
        if (root) {
                flush_dcache_mmap_lock(mapping);
@@ -421,8 +449,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
        vma->vm_end = end;
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
-               next->vm_start += adjust_next;
-               next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+               next->vm_start += adjust_next << PAGE_SHIFT;
+               next->vm_pgoff += adjust_next;
        }
 
        if (root) {
@@ -715,11 +743,11 @@ none:
  * The caller must hold down_write(current->mm->mmap_sem).
  */
 
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
-                       unsigned long len, unsigned long prot,
-                       unsigned long flags, unsigned long pgoff)
+unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file, 
+                           unsigned long addr, unsigned long len,
+                           unsigned long prot, unsigned long flags,
+                           unsigned long pgoff)
 {
-       struct mm_struct * mm = current->mm;
        struct vm_area_struct * vma, * prev;
        struct inode *inode;
        unsigned int vm_flags;
@@ -772,15 +800,17 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
        if (flags & MAP_LOCKED) {
-               if (!capable(CAP_IPC_LOCK))
+               if (!can_do_mlock())
                        return -EPERM;
                vm_flags |= VM_LOCKED;
        }
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+               unsigned long locked, lock_limit;
+               locked = mm->locked_vm << PAGE_SHIFT;
+               lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
                locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
 
@@ -853,6 +883,10 @@ munmap_back:
            > current->rlim[RLIMIT_AS].rlim_cur)
                return -ENOMEM;
 
+       /* check context space, maybe only Private writable mapping? */
+       if (!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
+               return -ENOMEM;
+
        if (accountable && (!(flags & MAP_NORESERVE) ||
                        sysctl_overcommit_memory > 1)) {
                if (vm_flags & VM_SHARED) {
@@ -949,9 +983,11 @@ munmap_back:
                kmem_cache_free(vm_area_cachep, vma);
        }
 out:   
-       mm->total_vm += len >> PAGE_SHIFT;
+       // mm->total_vm += len >> PAGE_SHIFT;
+       vx_vmpages_add(mm, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
+               // mm->locked_vm += len >> PAGE_SHIFT;
+               vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
        if (flags & MAP_POPULATE) {
@@ -1191,7 +1227,8 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
        grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
        /* Overcommit.. */
-       if (security_vm_enough_memory(grow)) {
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(vma->vm_mm, grow)) {
                anon_vma_unlock(vma);
                return -ENOMEM;
        }
@@ -1203,10 +1240,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
                vm_unacct_memory(grow);
                return -ENOMEM;
        }
+
        vma->vm_end = address;
-       vma->vm_mm->total_vm += grow;
+       // vma->vm_mm->total_vm += grow;
+       vx_vmpages_add(vma->vm_mm, grow);
        if (vma->vm_flags & VM_LOCKED)
-               vma->vm_mm->locked_vm += grow;
+               // vma->vm_mm->locked_vm += grow;
+               vx_vmlocked_add(vma->vm_mm, grow);
        anon_vma_unlock(vma);
        return 0;
 }
@@ -1252,7 +1292,8 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
        grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
        /* Overcommit.. */
-       if (security_vm_enough_memory(grow)) {
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(vma->vm_mm, grow)) {
                anon_vma_unlock(vma);
                return -ENOMEM;
        }
@@ -1264,11 +1305,14 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
                vm_unacct_memory(grow);
                return -ENOMEM;
        }
+
        vma->vm_start = address;
        vma->vm_pgoff -= grow;
-       vma->vm_mm->total_vm += grow;
+       // vma->vm_mm->total_vm += grow;
+       vx_vmpages_add(vma->vm_mm, grow);
        if (vma->vm_flags & VM_LOCKED)
-               vma->vm_mm->locked_vm += grow;
+               // vma->vm_mm->locked_vm += grow;
+               vx_vmlocked_add(vma->vm_mm, grow);
        anon_vma_unlock(vma);
        return 0;
 }
@@ -1370,10 +1414,14 @@ no_mmaps:
 static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
 {
        size_t len = area->vm_end - area->vm_start;
+       unsigned long old_end = area->vm_end;
 
-       area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+       // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+       vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
+       
        if (area->vm_flags & VM_LOCKED)
-               area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+               // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+               vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
        /*
         * Is this a new hole at the lowest possible address?
         */
@@ -1385,10 +1433,9 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
         */
        if (area->vm_start > area->vm_mm->non_executable_cache)
                area->vm_mm->non_executable_cache = area->vm_start;
-                                                                                                                                                                                                    
        remove_vm_struct(area);
        if (unlikely(area->vm_flags & VM_EXEC))
-               arch_remove_exec_range(mm, area->vm_end);
+               arch_remove_exec_range(mm, old_end);
 }
 
 /*
@@ -1476,12 +1523,10 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
 
        /* most fields are the same, copy all, and then fixup */
        *new = *vma;
+       vma_prio_tree_init(new);
 
-       if (new_below) {
-               if (vma->vm_flags & VM_EXEC)
-                       arch_remove_exec_range(mm, new->vm_end);
+       if (new_below)
                new->vm_end = addr;
-       }
        else {
                new->vm_start = addr;
                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
@@ -1500,10 +1545,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
 
-       if (new_below)
+       if (new_below) {
+               unsigned long old_end = vma->vm_end;
+
                vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
                        ((addr - new->vm_start) >> PAGE_SHIFT), new);
-       else
+               if (vma->vm_flags & VM_EXEC)
+                       arch_remove_exec_range(mm, old_end);
+       } else
                vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
        return 0;
@@ -1619,10 +1668,13 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
         * mlock MCL_FUTURE?
         */
        if (mm->def_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+               unsigned long locked, lock_limit;
+               locked = mm->locked_vm << PAGE_SHIFT;
+               lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
                locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
+               /* vserver checks ? */
        }
 
        /*
@@ -1644,7 +1696,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       if (security_vm_enough_memory(len >> PAGE_SHIFT))
+       if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
+               !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -1672,9 +1725,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_page_prot = protection_map[flags & 0x0f];
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
-       mm->total_vm += len >> PAGE_SHIFT;
+       // mm->total_vm += len >> PAGE_SHIFT;
+       vx_vmpages_add(mm, len >> PAGE_SHIFT);
        if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
+               // mm->locked_vm += len >> PAGE_SHIFT;
+               vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
        return addr;
@@ -1708,9 +1763,12 @@ void exit_mmap(struct mm_struct *mm)
        vma = mm->mmap;
        mm->mmap = mm->mmap_cache = NULL;
        mm->mm_rb = RB_ROOT;
-       mm->rss = 0;
-       mm->total_vm = 0;
-       mm->locked_vm = 0;
+       // mm->rss = 0;
+       vx_rsspages_sub(mm, mm->rss);
+       // mm->total_vm = 0;
+       vx_vmpages_sub(mm, mm->total_vm);
+       // mm->locked_vm = 0;
+       vx_vmlocked_sub(mm, mm->locked_vm);
        arch_flush_exec_range(mm);
 
        spin_unlock(&mm->page_table_lock);
@@ -1792,6 +1850,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
+                       vma_prio_tree_init(new_vma);
                        pol = mpol_copy(vma_policy(vma));
                        if (IS_ERR(pol)) {
                                kmem_cache_free(vm_area_cachep, new_vma);