fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / mm / mmap.c
index 3156c79..ebbd5b6 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -25,6 +25,7 @@
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -64,6 +65,13 @@ pgprot_t protection_map[16] = {
        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+       return protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+}
+EXPORT_SYMBOL(vm_get_page_prot);
+
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
@@ -100,7 +108,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
                unsigned long n;
 
-               free = get_page_cache_size();
+               free = global_page_state(NR_FILE_PAGES);
                free += nr_swap_pages;
 
                /*
@@ -109,7 +117,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += atomic_read(&slab_reclaim_pages);
+               free += global_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave the last 3% for root
@@ -125,14 +133,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * only call if we're about to fail.
                 */
                n = nr_free_pages();
+
+               /*
+                * Leave reserved pages. The pages are not for anonymous pages.
+                */
+               if (n <= totalreserve_pages)
+                       goto error;
+               else
+                       n -= totalreserve_pages;
+
+               /*
+                * Leave the last 3% for root
+                */
                if (!cap_sys_admin)
                        n -= n / 32;
                free += n;
 
                if (free > pages)
                        return 0;
-               vm_unacct_memory(pages);
-               return -ENOMEM;
+
+               goto error;
        }
 
        allowed = (totalram_pages - hugetlb_total_pages())
@@ -154,7 +174,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
         */
        if (atomic_read(&vm_committed_space) < (long)allowed)
                return 0;
-
+error:
        vm_unacct_memory(pages);
 
        return -ENOMEM;
@@ -169,7 +189,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
        if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file->f_dentry->d_inode->i_writecount);
+               atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
        if (vma->vm_flags & VM_SHARED)
                mapping->i_mmap_writable--;
 
@@ -224,6 +244,17 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
 
        if (brk < mm->end_code)
                goto out;
+
+       /*
+        * Check against rlimit here. If this check is done later after the test
+        * of oldbrk with newbrk then it can escape the test and let the data
+        * segment grow beyond its set limit the in case where the limit is
+        * not page aligned -Ram Gupta
+        */
+       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+               goto out;
+
        newbrk = PAGE_ALIGN(brk);
        oldbrk = PAGE_ALIGN(mm->brk);
        if (oldbrk == newbrk)
@@ -236,11 +267,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
                goto out;
        }
 
-       /* Check against rlimit.. */
-       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-               goto out;
-
        /* Check against existing mmap mappings. */
        if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
                goto out;
@@ -298,8 +324,7 @@ void validate_mm(struct mm_struct *mm)
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count)
                printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
-       if (bug)
-               BUG();
+       BUG_ON(bug);
 }
 #else
 #define validate_mm(mm) do { } while (0)
@@ -346,6 +371,8 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+       if (vma->vm_flags & VM_EXEC)
+               arch_add_exec_range(mm, vma->vm_end);
        if (prev) {
                vma->vm_next = prev->vm_next;
                prev->vm_next = vma;
@@ -375,7 +402,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma)
                struct address_space *mapping = file->f_mapping;
 
                if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file->f_dentry->d_inode->i_writecount);
+                       atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
                if (vma->vm_flags & VM_SHARED)
                        mapping->i_mmap_writable++;
 
@@ -436,8 +463,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        struct rb_node ** rb_link, * rb_parent;
 
        __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
+       BUG_ON(__vma && __vma->vm_start < vma->vm_end);
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        mm->map_count++;
 }
@@ -450,6 +476,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
+       if (vma->vm_flags & VM_EXEC)
+               arch_remove_exec_range(mm, vma->vm_end);
 }
 
 /*
@@ -616,7 +644,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
 
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
@@ -755,6 +783,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                } else                                  /* cases 2, 5, 7 */
                        vma_adjust(prev, prev->vm_start,
                                end, prev->vm_pgoff, NULL);
+               if (prev->vm_flags & VM_EXEC)
+                       arch_add_exec_range(mm, prev->vm_end);
                return prev;
        }
 
@@ -817,8 +847,7 @@ try_prev:
         * (e.g. stash info in next's anon_vma_node when assigning
         * an anon_vma, or when trying vma_merge).  Another time.
         */
-       if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
-               BUG();
+       BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
        if (!near)
                goto none;
 
@@ -849,14 +878,6 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
        const unsigned long stack_flags
                = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
 
-#ifdef CONFIG_HUGETLB
-       if (flags & VM_HUGETLB) {
-               if (!(flags & VM_DONTCOPY))
-                       mm->shared_vm += pages;
-               return;
-       }
-#endif /* CONFIG_HUGETLB */
-
        if (file) {
                mm->shared_vm += pages;
                if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
@@ -886,17 +907,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        int accountable = 1;
        unsigned long charged = 0, reqprot = prot;
 
-       if (file) {
-               if (is_file_hugepages(file))
-                       accountable = 0;
-
-               if (!file->f_op || !file->f_op->mmap)
-                       return -ENODEV;
-
-               if ((prot & PROT_EXEC) &&
-                   (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
-                       return -EPERM;
-       }
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
         *
@@ -904,7 +914,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
         *  mounted, in which case we dont add PROT_EXEC.)
         */
        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-               if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
+               if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
                        prot |= PROT_EXEC;
 
        if (!len)
@@ -930,7 +940,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
+       addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -957,7 +967,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        return -EAGAIN;
        }
 
-       inode = file ? file->f_dentry->d_inode : NULL;
+       inode = file ? file->f_path.dentry->d_inode : NULL;
 
        if (file) {
                switch (flags & MAP_TYPE) {
@@ -986,6 +996,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                case MAP_PRIVATE:
                        if (!(file->f_mode & FMODE_READ))
                                return -EACCES;
+                       if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
+                               if (vm_flags & VM_EXEC)
+                                       return -EPERM;
+                               vm_flags &= ~VM_MAYEXEC;
+                       }
+                       if (is_file_hugepages(file))
+                               accountable = 0;
+
+                       if (!file->f_op || !file->f_op->mmap)
+                               return -ENODEV;
                        break;
 
                default:
@@ -1056,18 +1076,18 @@ munmap_back:
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
        }
-       memset(vma, 0, sizeof(*vma));
 
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+       vma->vm_page_prot = protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
        vma->vm_pgoff = pgoff;
 
        if (file) {
@@ -1108,6 +1128,10 @@ munmap_back:
        pgoff = vma->vm_pgoff;
        vm_flags = vma->vm_flags;
 
+       if (vma_wants_writenotify(vma))
+               vma->vm_page_prot =
+                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+
        if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
                file = vma->vm_file;
@@ -1336,16 +1360,21 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
                mm->free_area_cache = mm->mmap_base;
 }
 
+
 unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags, int exec)
 {
        unsigned long ret;
 
        if (!(flags & MAP_FIXED)) {
                unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-               get_area = current->mm->get_unmapped_area;
+               if (exec && current->mm->get_unmapped_exec_area)
+                       get_area = current->mm->get_unmapped_exec_area;
+               else
+                       get_area = current->mm->get_unmapped_area;
+
                if (file && file->f_op && file->f_op->get_unmapped_area)
                        get_area = file->f_op->get_unmapped_area;
                addr = get_area(file, addr, len, pgoff, flags);
@@ -1362,7 +1391,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                 * Check if the given range is hugepage aligned, and
                 * can be made suitable for hugepages.
                 */
-               ret = prepare_hugepage_range(addr, len);
+               ret = prepare_hugepage_range(addr, len, pgoff);
        } else {
                /*
                 * Ensure that a normal request is not falling in a
@@ -1376,7 +1405,71 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        return addr;
 }
 
-EXPORT_SYMBOL(get_unmapped_area);
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+#define SHLIB_BASE             0x00110000
+
+unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+               unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+       unsigned long addr = addr0, len = len0;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long tmp;
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (!addr && !(flags & MAP_FIXED))
+               addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start)) {
+                       return addr;
+               }
+       }
+
+       addr = SHLIB_BASE;
+       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+               /* At this point:  (!vma || addr < vma->vm_end). */
+               if (TASK_SIZE - len < addr)
+                       return -ENOMEM;
+
+               if (!vma || addr + len <= vma->vm_start) {
+                       /*
+                        * Must not let a PROT_EXEC mapping get into the
+                        * brk area:
+                        */
+                       if (addr + len > mm->brk)
+                               goto failed;
+
+                       /*
+                        * Up until the brk area we randomize addresses
+                        * as much as possible:
+                        */
+                       if (addr >= 0x01000000) {
+                               tmp = randomize_range(0x01000000, PAGE_ALIGN(max(mm->start_brk, (unsigned long)0x08000000)), len);
+                               vma = find_vma(mm, tmp);
+                               if (TASK_SIZE - len >= tmp &&
+                                   (!vma || tmp + len <= vma->vm_start))
+                                       return tmp;
+                       }
+                       /*
+                        * Ok, randomization didnt work out - return
+                        * the result of the linear search:
+                        */
+                       return addr;
+               }
+               addr = vma->vm_end;
+       }
+
+failed:
+       return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
+}
+
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
@@ -1451,6 +1544,14 @@ out:
        return prev ? prev->vm_next : vma;
 }
 
+static int over_stack_limit(unsigned long sz)
+{
+       if (sz < EXEC_STACK_BIAS)
+               return 0;
+       return (sz - EXEC_STACK_BIAS) >
+                       current->signal->rlim[RLIMIT_STACK].rlim_cur;
+}
+
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -1460,13 +1561,14 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
+       unsigned long new_start;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > rlim[RLIMIT_STACK].rlim_cur)
+       if (over_stack_limit(size))
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -1479,6 +1581,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                        return -ENOMEM;
        }
 
+       /* Check to ensure the stack will not grow into a hugetlb-only region */
+       new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
+                       vma->vm_end - size;
+       if (is_hugepage_only_range(vma->vm_mm, new_start, size))
+               return -EFAULT;
+
        /*
         * Overcommit..  This must be the final test, as it will
         * update security statistics.
@@ -1719,7 +1827,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -1746,10 +1854,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
 
-       if (new_below)
+       if (new_below) {
+               unsigned long old_end = vma->vm_end;
+
                vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
                        ((addr - new->vm_start) >> PAGE_SHIFT), new);
-       else
+               if (vma->vm_flags & VM_EXEC)
+                       arch_remove_exec_range(mm, old_end);
+       } else
                vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
        return 0;
@@ -1863,6 +1975,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
 
+       if (is_hugepage_only_range(mm, addr, len))
+               return -EINVAL;
+
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = arch_mmap_check(addr, len, flags);
@@ -1920,19 +2035,19 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
        }
-       memset(vma, 0, sizeof(*vma));
 
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
        vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags & 0x0f];
+       vma->vm_page_prot = protection_map[flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
        vx_vmpages_add(mm, len >> PAGE_SHIFT);
@@ -1953,6 +2068,10 @@ void exit_mmap(struct mm_struct *mm)
        unsigned long nr_accounted = 0;
        unsigned long end;
 
+#ifdef arch_exit_mmap
+       arch_exit_mmap(mm);
+#endif
+
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
@@ -1962,6 +2081,7 @@ void exit_mmap(struct mm_struct *mm)
        vm_unacct_memory(nr_accounted);
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
+       arch_flush_exec_range(mm);
 
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
@@ -2046,7 +2166,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                    vma_start < new_vma->vm_end)
                        *vmap = new_vma;
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
                        pol = mpol_copy(vma_policy(vma));