linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / mm / mprotect.c
index f23231f..653b857 100644 (file)
 #include <linux/mempolicy.h>
 #include <linux/personality.h>
 #include <linux/syscalls.h>
-#include <linux/swap.h>
-#include <linux/swapops.h>
+
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
-#include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
-               unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               unsigned long addr, unsigned long end, pgprot_t newprot)
 {
-       pte_t *pte, oldpte;
+       pte_t *pte;
        spinlock_t *ptl;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        do {
-               oldpte = *pte;
-               if (pte_present(oldpte)) {
+               if (pte_present(*pte)) {
                        pte_t ptent;
 
                        /* Avoid an SMP race with hardware updated dirty/clean
                         * bits by wiping the pte and then setting the new pte
                         * into place.
                         */
-                       ptent = ptep_get_and_clear(mm, addr, pte);
-                       ptent = pte_modify(ptent, newprot);
-                       /*
-                        * Avoid taking write faults for pages we know to be
-                        * dirty.
-                        */
-                       if (dirty_accountable && pte_dirty(ptent))
-                               ptent = pte_mkwrite(ptent);
+                       ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
                        set_pte_at(mm, addr, pte, ptent);
                        lazy_mmu_prot_update(ptent);
-#ifdef CONFIG_MIGRATION
-               } else if (!pte_file(oldpte)) {
-                       swp_entry_t entry = pte_to_swp_entry(oldpte);
-
-                       if (is_write_migration_entry(entry)) {
-                               /*
-                                * A protection check is difficult so
-                                * just be safe and disable write
-                                */
-                               make_migration_entry_read(&entry);
-                               set_pte_at(mm, addr, pte,
-                                       swp_entry_to_pte(entry));
-                       }
-#endif
                }
-
        } while (pte++, addr += PAGE_SIZE, addr != end);
        pte_unmap_unlock(pte - 1, ptl);
 }
 
 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
-               unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               unsigned long addr, unsigned long end, pgprot_t newprot)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -86,13 +59,12 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
+               change_pte_range(mm, pmd, addr, next, newprot);
        } while (pmd++, addr = next, addr != end);
 }
 
 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
-               unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               unsigned long addr, unsigned long end, pgprot_t newprot)
 {
        pud_t *pud;
        unsigned long next;
@@ -102,13 +74,12 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
+               change_pmd_range(mm, pud, addr, next, newprot);
        } while (pud++, addr = next, addr != end);
 }
 
 static void change_protection(struct vm_area_struct *vma,
-               unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               unsigned long addr, unsigned long end, pgprot_t newprot)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
@@ -122,7 +93,7 @@ static void change_protection(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
+               change_pud_range(mm, pgd, addr, next, newprot);
        } while (pgd++, addr = next, addr != end);
        flush_tlb_range(vma, start, end);
 }
@@ -134,10 +105,10 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long oldflags = vma->vm_flags;
        long nrpages = (end - start) >> PAGE_SHIFT;
-       unsigned long charged = 0, old_end = vma->vm_end;
+       unsigned long charged = 0;
+       pgprot_t newprot;
        pgoff_t pgoff;
        int error;
-       int dirty_accountable = 0;
 
        if (newflags == oldflags) {
                *pprev = vma;
@@ -153,7 +124,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
         * a MAP_NORESERVE private mapping to writable will now reserve.
         */
        if (newflags & VM_WRITE) {
-               if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
+               if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) {
                        charged = nrpages;
                        if (security_vm_enough_memory(charged))
                                return -ENOMEM;
@@ -161,6 +132,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
                }
        }
 
+       newprot = protection_map[newflags & 0xf];
+
        /*
         * First try to merge with previous and/or next vma.
         */
@@ -192,20 +165,8 @@ success:
         * held in write mode.
         */
        vma->vm_flags = newflags;
-       if (oldflags & VM_EXEC)
-               arch_remove_exec_range(current->mm, old_end);
-       vma->vm_page_prot = protection_map[newflags &
-               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-       if (vma_wants_writenotify(vma)) {
-               vma->vm_page_prot = protection_map[newflags &
-                       (VM_READ|VM_WRITE|VM_EXEC)];
-               dirty_accountable = 1;
-       }
-
-       if (is_vm_hugetlb_page(vma))
-               hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
-       else
-               change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
+       vma->vm_page_prot = newprot;
+       change_protection(vma, start, end, newprot);
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
        return 0;
@@ -241,7 +202,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC:
         */
-       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+       if (unlikely((prot & PROT_READ) &&
+                       (current->personality & READ_IMPLIES_EXEC)))
                prot |= PROT_EXEC;
 
        vm_flags = calc_vm_prot_bits(prot);
@@ -278,6 +240,11 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
 
                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
+               if (is_vm_hugetlb_page(vma)) {
+                       error = -EACCES;
+                       goto out;
+               }
+
                newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
 
                /* newflags >> 4 shift VM_MAY% in place of VM_% */