fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / mm / mmap.c
index 5d92d30..ebbd5b6 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -117,7 +117,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += atomic_read(&slab_reclaim_pages);
+               free += global_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave the last 3% for root
@@ -189,7 +189,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
        if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file->f_dentry->d_inode->i_writecount);
+               atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
        if (vma->vm_flags & VM_SHARED)
                mapping->i_mmap_writable--;
 
@@ -402,7 +402,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma)
                struct address_space *mapping = file->f_mapping;
 
                if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file->f_dentry->d_inode->i_writecount);
+                       atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
                if (vma->vm_flags & VM_SHARED)
                        mapping->i_mmap_writable++;
 
@@ -907,17 +907,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        int accountable = 1;
        unsigned long charged = 0, reqprot = prot;
 
-       if (file) {
-               if (is_file_hugepages(file))
-                       accountable = 0;
-
-               if (!file->f_op || !file->f_op->mmap)
-                       return -ENODEV;
-
-               if ((prot & PROT_EXEC) &&
-                   (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
-                       return -EPERM;
-       }
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
         *
@@ -925,7 +914,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
         *  mounted, in which case we dont add PROT_EXEC.)
         */
        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-               if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
+               if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
                        prot |= PROT_EXEC;
 
        if (!len)
@@ -978,7 +967,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        return -EAGAIN;
        }
 
-       inode = file ? file->f_dentry->d_inode : NULL;
+       inode = file ? file->f_path.dentry->d_inode : NULL;
 
        if (file) {
                switch (flags & MAP_TYPE) {
@@ -1007,6 +996,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                case MAP_PRIVATE:
                        if (!(file->f_mode & FMODE_READ))
                                return -EACCES;
+                       if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
+                               if (vm_flags & VM_EXEC)
+                                       return -EPERM;
+                               vm_flags &= ~VM_MAYEXEC;
+                       }
+                       if (is_file_hugepages(file))
+                               accountable = 0;
+
+                       if (!file->f_op || !file->f_op->mmap)
+                               return -ENODEV;
                        break;
 
                default:
@@ -1392,7 +1391,7 @@ get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
                 * Check if the given range is hugepage aligned, and
                 * can be made suitable for hugepages.
                 */
-               ret = prepare_hugepage_range(addr, len);
+               ret = prepare_hugepage_range(addr, len, pgoff);
        } else {
                /*
                 * Ensure that a normal request is not falling in a
@@ -1562,6 +1561,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
+       unsigned long new_start;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
@@ -1581,6 +1581,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                        return -ENOMEM;
        }
 
+       /* Check to ensure the stack will not grow into a hugetlb-only region */
+       new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
+                       vma->vm_end - size;
+       if (is_hugepage_only_range(vma->vm_mm, new_start, size))
+               return -EFAULT;
+
        /*
         * Overcommit..  This must be the final test, as it will
         * update security statistics.
@@ -1821,7 +1827,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -1969,6 +1975,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
 
+       if (is_hugepage_only_range(mm, addr, len))
+               return -EINVAL;
+
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = arch_mmap_check(addr, len, flags);
@@ -2157,7 +2166,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                    vma_start < new_vma->vm_end)
                        *vmap = new_vma;
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
                        pol = mpol_copy(vma_policy(vma));
@@ -2196,81 +2205,3 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
                return 0;
        return 1;
 }
-
-
-static struct page *
-special_mapping_nopage(struct vm_area_struct *vma,
-                      unsigned long address, int *type)
-{
-       struct page **pages;
-
-       BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-
-       address -= vma->vm_start;
-       for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
-               address -= PAGE_SIZE;
-
-       if (*pages) {
-               get_page(*pages);
-               return *pages;
-       }
-
-       return NOPAGE_SIGBUS;
-}
-
-static struct vm_operations_struct special_mapping_vmops = {
-       .nopage = special_mapping_nopage,
-};
-
-unsigned int vdso_populate = 1;
-
-/*
- * Insert a new vma covering the given region, with the given flags and
- * protections.  Its pages are supplied by the given null-terminated array.
- * The region past the last page supplied will always produce SIGBUS.
- * The array pointer and the pages it points to are assumed to stay alive
- * for as long as this mapping might exist.
- */
-int install_special_mapping(struct mm_struct *mm,
-                           unsigned long addr, unsigned long len,
-                           unsigned long vm_flags, pgprot_t pgprot,
-                           struct page **pages)
-{
-       struct vm_area_struct *vma;
-       int err;
-
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (unlikely(vma == NULL))
-               return -ENOMEM;
-       memset(vma, 0, sizeof(*vma));
-
-       vma->vm_mm = mm;
-       vma->vm_start = addr;
-       vma->vm_end = addr + len;
-
-       vma->vm_flags = vm_flags;
-       vma->vm_page_prot = pgprot;
-
-       vma->vm_ops = &special_mapping_vmops;
-       vma->vm_private_data = pages;
-
-       insert_vm_struct(mm, vma);
-       vx_vmpages_add(mm, len >> PAGE_SHIFT);
-
-       if (!vdso_populate)
-               return 0;
-
-       err = 0;
-       while (*pages) {
-               struct page *page = *pages++;
-               get_page(page);
-               err = install_page(mm, vma, addr, page, vma->vm_page_prot);
-               if (err) {
-                       put_page(page);
-                       break;
-               }
-               addr += PAGE_SIZE;
-       }
-
-       return err;
-}