linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / mm / mmap.c
index 5d92d30..3156c79 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -25,7 +25,6 @@
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
-#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -65,13 +64,6 @@ pgprot_t protection_map[16] = {
        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
-{
-       return protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-}
-EXPORT_SYMBOL(vm_get_page_prot);
-
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
@@ -108,7 +100,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
                unsigned long n;
 
-               free = global_page_state(NR_FILE_PAGES);
+               free = get_page_cache_size();
                free += nr_swap_pages;
 
                /*
@@ -133,26 +125,14 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * only call if we're about to fail.
                 */
                n = nr_free_pages();
-
-               /*
-                * Leave reserved pages. The pages are not for anonymous pages.
-                */
-               if (n <= totalreserve_pages)
-                       goto error;
-               else
-                       n -= totalreserve_pages;
-
-               /*
-                * Leave the last 3% for root
-                */
                if (!cap_sys_admin)
                        n -= n / 32;
                free += n;
 
                if (free > pages)
                        return 0;
-
-               goto error;
+               vm_unacct_memory(pages);
+               return -ENOMEM;
        }
 
        allowed = (totalram_pages - hugetlb_total_pages())
@@ -174,7 +154,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
         */
        if (atomic_read(&vm_committed_space) < (long)allowed)
                return 0;
-error:
+
        vm_unacct_memory(pages);
 
        return -ENOMEM;
@@ -244,17 +224,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
 
        if (brk < mm->end_code)
                goto out;
-
-       /*
-        * Check against rlimit here. If this check is done later after the test
-        * of oldbrk with newbrk then it can escape the test and let the data
-        * segment grow beyond its set limit the in case where the limit is
-        * not page aligned -Ram Gupta
-        */
-       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-               goto out;
-
        newbrk = PAGE_ALIGN(brk);
        oldbrk = PAGE_ALIGN(mm->brk);
        if (oldbrk == newbrk)
@@ -267,6 +236,11 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
                goto out;
        }
 
+       /* Check against rlimit.. */
+       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+               goto out;
+
        /* Check against existing mmap mappings. */
        if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
                goto out;
@@ -324,7 +298,8 @@ void validate_mm(struct mm_struct *mm)
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count)
                printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
-       BUG_ON(bug);
+       if (bug)
+               BUG();
 }
 #else
 #define validate_mm(mm) do { } while (0)
@@ -371,8 +346,6 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
-       if (vma->vm_flags & VM_EXEC)
-               arch_add_exec_range(mm, vma->vm_end);
        if (prev) {
                vma->vm_next = prev->vm_next;
                prev->vm_next = vma;
@@ -463,7 +436,8 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        struct rb_node ** rb_link, * rb_parent;
 
        __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
-       BUG_ON(__vma && __vma->vm_start < vma->vm_end);
+       if (__vma && __vma->vm_start < vma->vm_end)
+               BUG();
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        mm->map_count++;
 }
@@ -476,8 +450,6 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
-       if (vma->vm_flags & VM_EXEC)
-               arch_remove_exec_range(mm, vma->vm_end);
 }
 
 /*
@@ -644,7 +616,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
 
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
@@ -783,8 +755,6 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                } else                                  /* cases 2, 5, 7 */
                        vma_adjust(prev, prev->vm_start,
                                end, prev->vm_pgoff, NULL);
-               if (prev->vm_flags & VM_EXEC)
-                       arch_add_exec_range(mm, prev->vm_end);
                return prev;
        }
 
@@ -847,7 +817,8 @@ try_prev:
         * (e.g. stash info in next's anon_vma_node when assigning
         * an anon_vma, or when trying vma_merge).  Another time.
         */
-       BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
+       if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
+               BUG();
        if (!near)
                goto none;
 
@@ -878,6 +849,14 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
        const unsigned long stack_flags
                = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
 
+#ifdef CONFIG_HUGETLB
+       if (flags & VM_HUGETLB) {
+               if (!(flags & VM_DONTCOPY))
+                       mm->shared_vm += pages;
+               return;
+       }
+#endif /* CONFIG_HUGETLB */
+
        if (file) {
                mm->shared_vm += pages;
                if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
@@ -951,7 +930,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+       addr = get_unmapped_area(file, addr, len, pgoff, flags);
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -1077,18 +1056,18 @@ munmap_back:
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
        }
+       memset(vma, 0, sizeof(*vma));
 
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       vma->vm_page_prot = protection_map[vm_flags & 0x0f];
        vma->vm_pgoff = pgoff;
 
        if (file) {
@@ -1129,10 +1108,6 @@ munmap_back:
        pgoff = vma->vm_pgoff;
        vm_flags = vma->vm_flags;
 
-       if (vma_wants_writenotify(vma))
-               vma->vm_page_prot =
-                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
-
        if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
                file = vma->vm_file;
@@ -1361,21 +1336,16 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
                mm->free_area_cache = mm->mmap_base;
 }
 
-
 unsigned long
-get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags, int exec)
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags)
 {
        unsigned long ret;
 
        if (!(flags & MAP_FIXED)) {
                unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-               if (exec && current->mm->get_unmapped_exec_area)
-                       get_area = current->mm->get_unmapped_exec_area;
-               else
-                       get_area = current->mm->get_unmapped_area;
-
+               get_area = current->mm->get_unmapped_area;
                if (file && file->f_op && file->f_op->get_unmapped_area)
                        get_area = file->f_op->get_unmapped_area;
                addr = get_area(file, addr, len, pgoff, flags);
@@ -1406,71 +1376,7 @@ get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
        return addr;
 }
 
-EXPORT_SYMBOL(get_unmapped_area_prot);
-
-#define SHLIB_BASE             0x00110000
-
-unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
-               unsigned long len0, unsigned long pgoff, unsigned long flags)
-{
-       unsigned long addr = addr0, len = len0;
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long tmp;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (!addr && !(flags & MAP_FIXED))
-               addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start)) {
-                       return addr;
-               }
-       }
-
-       addr = SHLIB_BASE;
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr)
-                       return -ENOMEM;
-
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Must not let a PROT_EXEC mapping get into the
-                        * brk area:
-                        */
-                       if (addr + len > mm->brk)
-                               goto failed;
-
-                       /*
-                        * Up until the brk area we randomize addresses
-                        * as much as possible:
-                        */
-                       if (addr >= 0x01000000) {
-                               tmp = randomize_range(0x01000000, PAGE_ALIGN(max(mm->start_brk, (unsigned long)0x08000000)), len);
-                               vma = find_vma(mm, tmp);
-                               if (TASK_SIZE - len >= tmp &&
-                                   (!vma || tmp + len <= vma->vm_start))
-                                       return tmp;
-                       }
-                       /*
-                        * Ok, randomization didnt work out - return
-                        * the result of the linear search:
-                        */
-                       return addr;
-               }
-               addr = vma->vm_end;
-       }
-
-failed:
-       return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
-}
-
+EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
@@ -1545,14 +1451,6 @@ out:
        return prev ? prev->vm_next : vma;
 }
 
-static int over_stack_limit(unsigned long sz)
-{
-       if (sz < EXEC_STACK_BIAS)
-               return 0;
-       return (sz - EXEC_STACK_BIAS) >
-                       current->signal->rlim[RLIMIT_STACK].rlim_cur;
-}
-
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -1568,7 +1466,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                return -ENOMEM;
 
        /* Stack limit test */
-       if (over_stack_limit(size))
+       if (size > rlim[RLIMIT_STACK].rlim_cur)
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -1848,14 +1746,10 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
 
-       if (new_below) {
-               unsigned long old_end = vma->vm_end;
-
+       if (new_below)
                vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
                        ((addr - new->vm_start) >> PAGE_SHIFT), new);
-               if (vma->vm_flags & VM_EXEC)
-                       arch_remove_exec_range(mm, old_end);
-       } else
+       else
                vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
        return 0;
@@ -2026,19 +1920,19 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
        }
+       memset(vma, 0, sizeof(*vma));
 
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
        vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       vma->vm_page_prot = protection_map[flags & 0x0f];
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
        vx_vmpages_add(mm, len >> PAGE_SHIFT);
@@ -2059,10 +1953,6 @@ void exit_mmap(struct mm_struct *mm)
        unsigned long nr_accounted = 0;
        unsigned long end;
 
-#ifdef arch_exit_mmap
-       arch_exit_mmap(mm);
-#endif
-
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
@@ -2072,7 +1962,6 @@ void exit_mmap(struct mm_struct *mm)
        vm_unacct_memory(nr_accounted);
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
-       arch_flush_exec_range(mm);
 
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
@@ -2196,81 +2085,3 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
                return 0;
        return 1;
 }
-
-
-static struct page *
-special_mapping_nopage(struct vm_area_struct *vma,
-                      unsigned long address, int *type)
-{
-       struct page **pages;
-
-       BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-
-       address -= vma->vm_start;
-       for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
-               address -= PAGE_SIZE;
-
-       if (*pages) {
-               get_page(*pages);
-               return *pages;
-       }
-
-       return NOPAGE_SIGBUS;
-}
-
-static struct vm_operations_struct special_mapping_vmops = {
-       .nopage = special_mapping_nopage,
-};
-
-unsigned int vdso_populate = 1;
-
-/*
- * Insert a new vma covering the given region, with the given flags and
- * protections.  Its pages are supplied by the given null-terminated array.
- * The region past the last page supplied will always produce SIGBUS.
- * The array pointer and the pages it points to are assumed to stay alive
- * for as long as this mapping might exist.
- */
-int install_special_mapping(struct mm_struct *mm,
-                           unsigned long addr, unsigned long len,
-                           unsigned long vm_flags, pgprot_t pgprot,
-                           struct page **pages)
-{
-       struct vm_area_struct *vma;
-       int err;
-
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (unlikely(vma == NULL))
-               return -ENOMEM;
-       memset(vma, 0, sizeof(*vma));
-
-       vma->vm_mm = mm;
-       vma->vm_start = addr;
-       vma->vm_end = addr + len;
-
-       vma->vm_flags = vm_flags;
-       vma->vm_page_prot = pgprot;
-
-       vma->vm_ops = &special_mapping_vmops;
-       vma->vm_private_data = pages;
-
-       insert_vm_struct(mm, vma);
-       vx_vmpages_add(mm, len >> PAGE_SHIFT);
-
-       if (!vdso_populate)
-               return 0;
-
-       err = 0;
-       while (*pages) {
-               struct page *page = *pages++;
-               get_page(page);
-               err = install_page(mm, vma, addr, page, vma->vm_page_prot);
-               if (err) {
-                       put_page(page);
-                       break;
-               }
-               addr += PAGE_SIZE;
-       }
-
-       return err;
-}