X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=mm%2Fmprotect.c;h=653b8571c1ed10e86c94ae727147b77ce0e09788;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=2f8813676c5db038d113eff6e4a9af746f40a746;hpb=5167311cae6aa3a5ff5afd39f88c32a435c969ef;p=linux-2.6.git diff --git a/mm/mprotect.c b/mm/mprotect.c index 2f8813676..653b8571c 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -22,102 +22,90 @@ #include #include -#include #include #include -static inline void -change_pte_range(pmd_t *pmd, unsigned long address, - unsigned long size, pgprot_t newprot) +static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, pgprot_t newprot) { - pte_t * pte; - unsigned long end; - - if (pmd_none(*pmd)) - return; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - return; - } - pte = pte_offset_map(pmd, address); - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; + pte_t *pte; + spinlock_t *ptl; + + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { if (pte_present(*pte)) { - pte_t entry; + pte_t ptent; /* Avoid an SMP race with hardware updated dirty/clean * bits by wiping the pte and then setting the new pte * into place. */ - entry = ptep_get_and_clear(pte); - set_pte(pte, pte_modify(entry, newprot)); + ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); + set_pte_at(mm, addr, pte, ptent); + lazy_mmu_prot_update(ptent); } - address += PAGE_SIZE; - pte++; - } while (address && (address < end)); - pte_unmap(pte - 1); + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap_unlock(pte - 1, ptl); } -static inline void -change_pmd_range(pgd_t *pgd, unsigned long address, - unsigned long size, pgprot_t newprot) +static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, pgprot_t newprot) { - pmd_t * pmd; - unsigned long end; - - if (pgd_none(*pgd)) - return; - if (pgd_bad(*pgd)) { - pgd_ERROR(*pgd); - pgd_clear(pgd); - return; - } - pmd = pmd_offset(pgd, address); - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + change_pte_range(mm, pmd, addr, next, newprot); + } while (pmd++, addr = next, addr != end); +} + +static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, pgprot_t newprot) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(pgd, addr); do { - change_pte_range(pmd, address, end - address, newprot); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + change_pmd_range(mm, pud, addr, next, newprot); + } while (pud++, addr = next, addr != end); } -static void -change_protection(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgprot_t newprot) +static void change_protection(struct vm_area_struct *vma, + unsigned long addr, unsigned long end, pgprot_t newprot) { - pgd_t *dir; - unsigned long beg = start; - - dir = pgd_offset(current->mm, start); - flush_cache_range(vma, beg, end); - if (start >= end) - BUG(); - spin_lock(¤t->mm->page_table_lock); + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + unsigned long next; + unsigned long start = addr; + + BUG_ON(addr >= end); + pgd = pgd_offset(mm, addr); + flush_cache_range(vma, addr, end); do { - change_pmd_range(dir, start, end - start, newprot); - start = (start + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (start && (start < end)); - flush_tlb_range(vma, beg, end); - spin_unlock(¤t->mm->page_table_lock); - return; + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + change_pud_range(mm, pgd, addr, next, newprot); + } while (pgd++, addr = next, addr != end); + flush_tlb_range(vma, start, end); } static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) { - struct mm_struct * mm = vma->vm_mm; + struct mm_struct *mm = vma->vm_mm; unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; - unsigned long charged = 0, old_end = vma->vm_end; + unsigned long charged = 0; pgprot_t newprot; pgoff_t pgoff; int error; @@ -157,16 +145,13 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, goto success; } + *pprev = vma; + if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto fail; } - /* - * Unless it returns an error, this function always sets *pprev to - * the first vma for which vma->vm_end >= end. - */ - *pprev = vma; if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); @@ -179,14 +164,11 @@ success: * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ - oldflags = vma->vm_flags; vma->vm_flags = newflags; vma->vm_page_prot = newprot; - if (oldflags & VM_EXEC) - arch_remove_exec_range(current->mm, old_end); change_protection(vma, start, end, newprot); - __vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); - __vm_stat_account(mm, newflags, vma->vm_file, nrpages); + vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); + vm_stat_account(mm, newflags, vma->vm_file, nrpages); return 0; fail: @@ -195,10 +177,9 @@ fail: } asmlinkage long -do_mprotect(struct mm_struct *mm, unsigned long start, size_t len, - unsigned long prot) +sys_mprotect(unsigned long start, size_t len, unsigned long prot) { - unsigned long vm_flags, nstart, end, tmp; + unsigned long vm_flags, nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; int error = -EINVAL; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); @@ -208,14 +189,16 @@ do_mprotect(struct mm_struct *mm, unsigned long start, size_t len, if (start & ~PAGE_MASK) return -EINVAL; + if (!len) + return 0; len = PAGE_ALIGN(len); end = start + len; - if (end < start) + if (end <= start) return -ENOMEM; if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) return -EINVAL; - if (end == start) - return 0; + + reqprot = prot; /* * Does the application expect PROT_READ to imply PROT_EXEC: */ @@ -225,9 +208,9 @@ do_mprotect(struct mm_struct *mm, unsigned long start, size_t len, vm_flags = calc_vm_prot_bits(prot); - down_write(&mm->mmap_sem); + down_write(¤t->mm->mmap_sem); - vma = find_vma_prev(mm, start, &prev); + vma = find_vma_prev(current->mm, start, &prev); error = -ENOMEM; if (!vma) goto out; @@ -264,12 +247,13 @@ do_mprotect(struct mm_struct *mm, unsigned long start, size_t len, newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); - if ((newflags & ~(newflags >> 4)) & 0xf) { + /* newflags >> 4 shift VM_MAY% in place of VM_% */ + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { error = -EACCES; goto out; } - error = security_file_mprotect(vma, prot); + error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; @@ -293,11 +277,6 @@ do_mprotect(struct mm_struct *mm, unsigned long start, size_t len, } } out: - up_write(&mm->mmap_sem); + up_write(¤t->mm->mmap_sem); return error; } - -asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot) -{ - return(do_mprotect(current->mm, start, len, prot)); -}