X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmprotect.c;h=653b8571c1ed10e86c94ae727147b77ce0e09788;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=b9d4f20b081fab23ab591b2095bd034259b66eb4;hpb=bc77d24c47b89f1e0efed0b8e4be5f8aad102883;p=linux-2.6.git diff --git a/mm/mprotect.c b/mm/mprotect.c index b9d4f20b0..653b8571c 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -17,110 +17,100 @@ #include #include #include +#include +#include #include -#include #include -#include #include #include -static inline void -change_pte_range(pmd_t *pmd, unsigned long address, - unsigned long size, pgprot_t newprot) +static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, pgprot_t newprot) { - pte_t * pte; - unsigned long end; - - if (pmd_none(*pmd)) - return; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - return; - } - pte = pte_offset_map(pmd, address); - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; + pte_t *pte; + spinlock_t *ptl; + + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { if (pte_present(*pte)) { - pte_t entry; + pte_t ptent; /* Avoid an SMP race with hardware updated dirty/clean * bits by wiping the pte and then setting the new pte * into place. */ - entry = ptep_get_and_clear(pte); - set_pte(pte, pte_modify(entry, newprot)); + ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); + set_pte_at(mm, addr, pte, ptent); + lazy_mmu_prot_update(ptent); } - address += PAGE_SIZE; - pte++; - } while (address && (address < end)); - pte_unmap(pte - 1); + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap_unlock(pte - 1, ptl); } -static inline void -change_pmd_range(pgd_t *pgd, unsigned long address, - unsigned long size, pgprot_t newprot) +static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, pgprot_t newprot) { - pmd_t * pmd; - unsigned long end; - - if (pgd_none(*pgd)) - return; - if (pgd_bad(*pgd)) { - pgd_ERROR(*pgd); - pgd_clear(pgd); - return; - } - pmd = pmd_offset(pgd, address); - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + change_pte_range(mm, pmd, addr, next, newprot); + } while (pmd++, addr = next, addr != end); +} + +static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, pgprot_t newprot) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(pgd, addr); do { - change_pte_range(pmd, address, end - address, newprot); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + change_pmd_range(mm, pud, addr, next, newprot); + } while (pud++, addr = next, addr != end); } -static void -change_protection(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgprot_t newprot) +static void change_protection(struct vm_area_struct *vma, + unsigned long addr, unsigned long end, pgprot_t newprot) { - pgd_t *dir; - unsigned long beg = start; - - dir = pgd_offset(current->mm, start); - flush_cache_range(vma, beg, end); - if (start >= end) - BUG(); - spin_lock(¤t->mm->page_table_lock); + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + unsigned long next; + unsigned long start = addr; + + BUG_ON(addr >= end); + pgd = pgd_offset(mm, addr); + flush_cache_range(vma, addr, end); do { - change_pmd_range(dir, start, end - start, newprot); - start = (start + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (start && (start < end)); - flush_tlb_range(vma, beg, end); - spin_unlock(¤t->mm->page_table_lock); - return; + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + change_pud_range(mm, pgd, addr, next, newprot); + } while (pgd++, addr = next, addr != end); + flush_tlb_range(vma, start, end); } static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, - unsigned long start, unsigned long end, unsigned int newflags) + unsigned long start, unsigned long end, unsigned long newflags) { - struct mm_struct * mm = vma->vm_mm; + struct mm_struct *mm = vma->vm_mm; + unsigned long oldflags = vma->vm_flags; + long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0; pgprot_t newprot; - unsigned int oldflags; pgoff_t pgoff; int error; - if (newflags == vma->vm_flags) { + if (newflags == oldflags) { *pprev = vma; return 0; } @@ -134,8 +124,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, * a MAP_NORESERVE private mapping to writable will now reserve. */ if (newflags & VM_WRITE) { - if (!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) { - charged = (end - start) >> PAGE_SHIFT; + if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) { + charged = nrpages; if (security_vm_enough_memory(charged)) return -ENOMEM; newflags |= VM_ACCOUNT; @@ -155,16 +145,13 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, goto success; } + *pprev = vma; + if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto fail; } - /* - * Unless it returns an error, this function always sets *pprev to - * the first vma for which vma->vm_end >= end. - */ - *pprev = vma; if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); @@ -177,12 +164,11 @@ success: * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ - oldflags = vma->vm_flags; vma->vm_flags = newflags; vma->vm_page_prot = newprot; - if (oldflags & VM_EXEC) - arch_remove_exec_range(current->mm, vma->vm_end); change_protection(vma, start, end, newprot); + vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); + vm_stat_account(mm, newflags, vma->vm_file, nrpages); return 0; fail: @@ -193,7 +179,7 @@ fail: asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot) { - unsigned long vm_flags, nstart, end, tmp; + unsigned long vm_flags, nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; int error = -EINVAL; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); @@ -203,14 +189,22 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) if (start & ~PAGE_MASK) return -EINVAL; + if (!len) + return 0; len = PAGE_ALIGN(len); end = start + len; - if (end < start) + if (end <= start) return -ENOMEM; if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) return -EINVAL; - if (end == start) - return 0; + + reqprot = prot; + /* + * Does the application expect PROT_READ to imply PROT_EXEC: + */ + if (unlikely((prot & PROT_READ) && + (current->personality & READ_IMPLIES_EXEC))) + prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); @@ -242,7 +236,7 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) prev = vma; for (nstart = start ; ; ) { - unsigned int newflags; + unsigned long newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ @@ -253,12 +247,13 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); - if ((newflags & ~(newflags >> 4)) & 0xf) { + /* newflags >> 4 shift VM_MAY% in place of VM_% */ + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { error = -EACCES; goto out; } - error = security_file_mprotect(vma, prot); + error = security_file_mprotect(vma, reqprot, prot); if (error) goto out;