#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
unsigned long addr, unsigned long end, pgprot_t newprot)
{
pte_t *pte;
+ spinlock_t *ptl;
- pte = pte_offset_map(pmd, addr);
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
do {
if (pte_present(*pte)) {
pte_t ptent;
lazy_mmu_prot_update(ptent);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap(pte - 1);
+ pte_unmap_unlock(pte - 1, ptl);
}
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
- spin_lock(&mm->page_table_lock);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
change_pud_range(mm, pgd, addr, next, newprot);
} while (pgd++, addr = next, addr != end);
flush_tlb_range(vma, start, end);
- spin_unlock(&mm->page_table_lock);
}
static int
struct mm_struct *mm = vma->vm_mm;
unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT;
- unsigned long charged = 0, old_end = vma->vm_end;
+ unsigned long charged = 0;
pgprot_t newprot;
pgoff_t pgoff;
int error;
*/
vma->vm_flags = newflags;
vma->vm_page_prot = newprot;
- if (oldflags & VM_EXEC)
- arch_remove_exec_range(current->mm, old_end);
change_protection(vma, start, end, newprot);
- __vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
- __vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+ vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
+ vm_stat_account(mm, newflags, vma->vm_file, nrpages);
return 0;
fail:
}
asmlinkage long
-do_mprotect(struct mm_struct *mm, unsigned long start, size_t len,
- unsigned long prot)
+sys_mprotect(unsigned long start, size_t len, unsigned long prot)
{
unsigned long vm_flags, nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
vm_flags = calc_vm_prot_bits(prot);
- down_write(&mm->mmap_sem);
+ down_write(¤t->mm->mmap_sem);
- vma = find_vma_prev(mm, start, &prev);
+ vma = find_vma_prev(current->mm, start, &prev);
error = -ENOMEM;
if (!vma)
goto out;
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
- if ((newflags & ~(newflags >> 4)) & 0xf) {
+ /* newflags >> 4 shift VM_MAY% in place of VM_% */
+ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
error = -EACCES;
goto out;
}
}
}
out:
- up_write(&mm->mmap_sem);
+ up_write(¤t->mm->mmap_sem);
return error;
}
-
-asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
-{
- return(do_mprotect(current->mm, start, len, prot));
-}