X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmprotect.c;h=05dc34246b20e29e46545af8e744c013d6f5ecfc;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=f23231f7e794b4eb39cf47f05599caed44513c88;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/mm/mprotect.c b/mm/mprotect.c index f23231f7e..05dc34246 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -19,8 +19,7 @@ #include #include #include -#include -#include + #include #include #include @@ -28,55 +27,30 @@ #include static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable) + unsigned long addr, unsigned long end, pgprot_t newprot) { - pte_t *pte, oldpte; + pte_t *pte; spinlock_t *ptl; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { - oldpte = *pte; - if (pte_present(oldpte)) { + if (pte_present(*pte)) { pte_t ptent; /* Avoid an SMP race with hardware updated dirty/clean * bits by wiping the pte and then setting the new pte * into place. */ - ptent = ptep_get_and_clear(mm, addr, pte); - ptent = pte_modify(ptent, newprot); - /* - * Avoid taking write faults for pages we know to be - * dirty. - */ - if (dirty_accountable && pte_dirty(ptent)) - ptent = pte_mkwrite(ptent); + ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); set_pte_at(mm, addr, pte, ptent); lazy_mmu_prot_update(ptent); -#ifdef CONFIG_MIGRATION - } else if (!pte_file(oldpte)) { - swp_entry_t entry = pte_to_swp_entry(oldpte); - - if (is_write_migration_entry(entry)) { - /* - * A protection check is difficult so - * just be safe and disable write - */ - make_migration_entry_read(&entry); - set_pte_at(mm, addr, pte, - swp_entry_to_pte(entry)); - } -#endif } - } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(pte - 1, ptl); } static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable) + unsigned long addr, unsigned long end, pgprot_t newprot) { pmd_t *pmd; unsigned long next; @@ -86,13 +60,12 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; - change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); + change_pte_range(mm, pmd, addr, next, newprot); } while (pmd++, addr = next, addr != end); } static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable) + unsigned long addr, unsigned long end, pgprot_t newprot) { pud_t *pud; unsigned long next; @@ -102,13 +75,12 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable); + change_pmd_range(mm, pud, addr, next, newprot); } while (pud++, addr = next, addr != end); } static void change_protection(struct vm_area_struct *vma, - unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable) + unsigned long addr, unsigned long end, pgprot_t newprot) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; @@ -122,7 +94,7 @@ static void change_protection(struct vm_area_struct *vma, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable); + change_pud_range(mm, pgd, addr, next, newprot); } while (pgd++, addr = next, addr != end); flush_tlb_range(vma, start, end); } @@ -135,9 +107,9 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0, old_end = vma->vm_end; + pgprot_t newprot; pgoff_t pgoff; int error; - int dirty_accountable = 0; if (newflags == oldflags) { *pprev = vma; @@ -161,6 +133,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, } } + newprot = protection_map[newflags & 0xf]; + /* * First try to merge with previous and/or next vma. */ @@ -192,20 +166,13 @@ success: * held in write mode. */ vma->vm_flags = newflags; + vma->vm_page_prot = newprot; if (oldflags & VM_EXEC) arch_remove_exec_range(current->mm, old_end); - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; - if (vma_wants_writenotify(vma)) { - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC)]; - dirty_accountable = 1; - } - if (is_vm_hugetlb_page(vma)) - hugetlb_change_protection(vma, start, end, vma->vm_page_prot); + hugetlb_change_protection(vma, start, end, newprot); else - change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); + change_protection(vma, start, end, newprot); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); return 0; @@ -241,7 +208,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) /* * Does the application expect PROT_READ to imply PROT_EXEC: */ - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + if (unlikely((prot & PROT_READ) && + (current->personality & READ_IMPLIES_EXEC))) prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot);