patch-2.6.6-vs1.9.0
[linux-2.6.git] / mm / mmap.c
index bbdcb91..f7990f9 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -614,6 +614,10 @@ munmap_back:
            > current->rlim[RLIMIT_AS].rlim_cur)
                return -ENOMEM;
 
+       /* check context space, maybe only Private writable mapping? */
+       if (!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
+               return -ENOMEM;
+
        if (accountable && (!(flags & MAP_NORESERVE) ||
                        sysctl_overcommit_memory > 1)) {
                if (vm_flags & VM_SHARED) {
@@ -708,9 +712,11 @@ munmap_back:
                kmem_cache_free(vm_area_cachep, vma);
        }
 out:   
-       mm->total_vm += len >> PAGE_SHIFT;
+       // mm->total_vm += len >> PAGE_SHIFT;
+       vx_vmpages_add(mm, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
+               // mm->locked_vm += len >> PAGE_SHIFT;
+               vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
        if (flags & MAP_POPULATE) {
@@ -935,7 +941,8 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
        grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
        /* Overcommit.. */
-       if (security_vm_enough_memory(grow)) {
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(vma->vm_mm, grow)) {
                spin_unlock(&vma->vm_mm->page_table_lock);
                return -ENOMEM;
        }
@@ -947,10 +954,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
                vm_unacct_memory(grow);
                return -ENOMEM;
        }
+
        vma->vm_end = address;
-       vma->vm_mm->total_vm += grow;
+       // vma->vm_mm->total_vm += grow;
+       vx_vmpages_add(vma->vm_mm, grow);
        if (vma->vm_flags & VM_LOCKED)
-               vma->vm_mm->locked_vm += grow;
+               // vma->vm_mm->locked_vm += grow;
+               vx_vmlocked_add(vma->vm_mm, grow);
        spin_unlock(&vma->vm_mm->page_table_lock);
        return 0;
 }
@@ -989,7 +999,8 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
        grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
        /* Overcommit.. */
-       if (security_vm_enough_memory(grow)) {
+       if (security_vm_enough_memory(grow) ||
+               !vx_vmpages_avail(vma->vm_mm, grow)) {
                spin_unlock(&vma->vm_mm->page_table_lock);
                return -ENOMEM;
        }
@@ -1001,11 +1012,14 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
                vm_unacct_memory(grow);
                return -ENOMEM;
        }
+
        vma->vm_start = address;
        vma->vm_pgoff -= grow;
-       vma->vm_mm->total_vm += grow;
+       // vma->vm_mm->total_vm += grow;
+       vx_vmpages_add(vma->vm_mm, grow);
        if (vma->vm_flags & VM_LOCKED)
-               vma->vm_mm->locked_vm += grow;
+               // vma->vm_mm->locked_vm += grow;
+               vx_vmlocked_add(vma->vm_mm, grow);
        spin_unlock(&vma->vm_mm->page_table_lock);
        return 0;
 }
@@ -1108,9 +1122,12 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
 {
        size_t len = area->vm_end - area->vm_start;
 
-       area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+       // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+       vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
+       
        if (area->vm_flags & VM_LOCKED)
-               area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+               // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+               vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
        /*
         * Is this a new hole at the lowest possible address?
         */
@@ -1365,6 +1382,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
                locked += len;
                if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
                        return -EAGAIN;
+               /* vserver checks ? */
        }
 
        /*
@@ -1386,7 +1404,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       if (security_vm_enough_memory(len >> PAGE_SHIFT))
+       if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
+               !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -1419,9 +1438,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma_link(mm, vma, prev, rb_link, rb_parent);
 
 out:
-       mm->total_vm += len >> PAGE_SHIFT;
+       // mm->total_vm += len >> PAGE_SHIFT;
+       vx_vmpages_add(mm, len >> PAGE_SHIFT);
        if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
+               // mm->locked_vm += len >> PAGE_SHIFT;
+               vx_vmlocked_add(mm, len >> PAGE_SHIFT);
                make_pages_present(addr, addr + len);
        }
        return addr;
@@ -1455,9 +1476,12 @@ void exit_mmap(struct mm_struct *mm)
        vma = mm->mmap;
        mm->mmap = mm->mmap_cache = NULL;
        mm->mm_rb = RB_ROOT;
-       mm->rss = 0;
-       mm->total_vm = 0;
-       mm->locked_vm = 0;
+       // mm->rss = 0;
+       vx_rsspages_sub(mm, mm->rss);
+       // mm->total_vm = 0;
+       vx_vmpages_sub(mm, mm->total_vm);
+       // mm->locked_vm = 0;
+       vx_vmlocked_sub(mm, mm->locked_vm);
 
        spin_unlock(&mm->page_table_lock);