vserver 2.0-rc4
[linux-2.6.git] / mm / mmap.c
index d04d60a..800b745 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1113,11 +1113,9 @@ munmap_back:
                kmem_cache_free(vm_area_cachep, vma);
        }
 out:   
-       // mm->total_vm += len >> PAGE_SHIFT;
        vx_vmpages_add(mm, len >> PAGE_SHIFT);
        __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               // mm->locked_vm += len >> PAGE_SHIFT;
                vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
@@ -1321,37 +1319,40 @@ unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
-       if (flags & MAP_FIXED) {
-               unsigned long ret;
+       unsigned long ret;
 
-               if (addr > TASK_SIZE - len)
-                       return -ENOMEM;
-               if (addr & ~PAGE_MASK)
-                       return -EINVAL;
-               if (file && is_file_hugepages(file))  {
-                       /*
-                        * Check if the given range is hugepage aligned, and
-                        * can be made suitable for hugepages.
-                        */
-                       ret = prepare_hugepage_range(addr, len);
-               } else {
-                       /*
-                        * Ensure that a normal request is not falling in a
-                        * reserved hugepage range.  For some archs like IA-64,
-                        * there is a separate region for hugepages.
-                        */
-                       ret = is_hugepage_only_range(addr, len);
-               }
-               if (ret)
-                       return -EINVAL;
-               return addr;
-       }
+       if (!(flags & MAP_FIXED)) {
+               unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-       if (file && file->f_op && file->f_op->get_unmapped_area)
-               return file->f_op->get_unmapped_area(file, addr, len,
-                                               pgoff, flags);
+               get_area = current->mm->get_unmapped_area;
+               if (file && file->f_op && file->f_op->get_unmapped_area)
+                       get_area = file->f_op->get_unmapped_area;
+               addr = get_area(file, addr, len, pgoff, flags);
+               if (IS_ERR_VALUE(addr))
+                       return addr;
+       }
 
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       if (addr > TASK_SIZE - len)
+               return -ENOMEM;
+       if (addr & ~PAGE_MASK)
+               return -EINVAL;
+       if (file && is_file_hugepages(file))  {
+               /*
+                * Check if the given range is hugepage aligned, and
+                * can be made suitable for hugepages.
+                */
+               ret = prepare_hugepage_range(addr, len);
+       } else {
+               /*
+                * Ensure that a normal request is not falling in a
+                * reserved hugepage range.  For some archs like IA-64,
+                * there is a separate region for hugepages.
+                */
+               ret = is_hugepage_only_range(addr, len);
+       }
+       if (ret)
+               return -EINVAL;
+       return addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1468,10 +1469,8 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                return -ENOMEM;
 
        /* Ok, everything looks good - let it rip */
-       // mm->total_vm += grow;
        vx_vmpages_add(mm, grow);
        if (vma->vm_flags & VM_LOCKED)
-               // mm->locked_vm += grow;
                vx_vmlocked_add(mm, grow);
        __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
        acct_update_integrals();
@@ -1674,11 +1673,9 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
 {
        size_t len = area->vm_end - area->vm_start;
 
-       // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
        vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
 
        if (area->vm_flags & VM_LOCKED)
-               // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
                vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
        vm_stat_unaccount(area);
        area->vm_mm->unmap_area(area);
@@ -1980,10 +1977,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_page_prot = protection_map[flags & 0x0f];
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
-       // mm->total_vm += len >> PAGE_SHIFT;
        vx_vmpages_add(mm, len >> PAGE_SHIFT);
        if (flags & VM_LOCKED) {
-               // mm->locked_vm += len >> PAGE_SHIFT;
                vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
@@ -2019,11 +2014,8 @@ void exit_mmap(struct mm_struct *mm)
        vma = mm->mmap;
        mm->mmap = mm->mmap_cache = NULL;
        mm->mm_rb = RB_ROOT;
-       // mm->rss = 0;
        vx_rsspages_sub(mm, mm->rss);
-       // mm->total_vm = 0;
        vx_vmpages_sub(mm, mm->total_vm);
-       // mm->locked_vm = 0;
        vx_vmlocked_sub(mm, mm->locked_vm);
 
        spin_unlock(&mm->page_table_lock);