X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fmm%2Ffault.c;h=08de59834857aa966eca824348bc270852980247;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=d823ff897cb88a0b551998a3d850ad2e3e85ff6f;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index d823ff897..08de59834 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -9,40 +9,17 @@ #include #include #include +#include +#include #include #include #include #include -#include +#include extern void die (char *, struct pt_regs *, long); -/* - * This routine is analogous to expand_stack() but instead grows the - * register backing store (which grows towards higher addresses). - * Since the register backing store is access sequentially, we - * disallow growing the RBS by more than a page at a time. Note that - * the VM_GROWSUP flag can be set on any VM area but that's fine - * because the total process size is still limited by RLIMIT_STACK and - * RLIMIT_AS. - */ -static inline long -expand_backing_store (struct vm_area_struct *vma, unsigned long address) -{ - unsigned long grow; - - grow = PAGE_SIZE >> PAGE_SHIFT; - if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur - || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur)) - return -ENOMEM; - vma->vm_end += PAGE_SIZE; - vma->vm_mm->total_vm += grow; - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm += grow; - return 0; -} - /* * Return TRUE if ADDRESS points at a page in the kernel's mapped segment * (inside region 5, on ia64) and that page is present. @@ -51,6 +28,7 @@ static int mapped_kernel_page_is_present (unsigned long address) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; @@ -58,7 +36,11 @@ mapped_kernel_page_is_present (unsigned long address) if (pgd_none(*pgd) || pgd_bad(*pgd)) return 0; - pmd = pmd_offset(pgd, address); + pud = pud_offset(pgd, address); + if (pud_none(*pud) || pud_bad(*pud)) + return 0; + + pmd = pmd_offset(pud, address); if (pmd_none(*pmd) || pmd_bad(*pmd)) return 0; @@ -70,7 +52,7 @@ mapped_kernel_page_is_present (unsigned long address) return pte_present(pte); } -void +void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { int signal = SIGSEGV, code = SEGV_MAPERR; @@ -79,6 +61,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re struct siginfo si; unsigned long mask; + /* mmap_sem is performance critical.... */ + prefetchw(&mm->mmap_sem); + /* * If we're in an interrupt or have no user context, we must not take the fault.. */ @@ -97,6 +82,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re goto bad_area_no_up; #endif + /* + * This is to handle the kprobes on user space access instructions + */ + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, + SIGSEGV) == NOTIFY_STOP) + return; + down_read(&mm->mmap_sem); vma = find_vma_prev(mm, address, &prev_vma); @@ -171,7 +163,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) || REGION_OFFSET(address) >= RGN_MAP_LIMIT) goto bad_area; - if (expand_backing_store(vma, address)) + /* + * Since the register backing store is accessed sequentially, + * we disallow growing it by more than a page at a time. + */ + if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) + goto bad_area; + if (expand_upwards(vma, address)) goto bad_area; } goto good_area; @@ -196,7 +194,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re si.si_signo = signal; si.si_errno = 0; si.si_code = code; - si.si_addr = (void *) address; + si.si_addr = (void __user *) address; si.si_isr = isr; si.si_flags = __ISR_VALID; force_sig_info(signal, &si, current); @@ -204,18 +202,18 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re } no_context: - if (isr & IA64_ISR_SP) { + if ((isr & IA64_ISR_SP) + || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) + { /* - * This fault was due to a speculative load set the "ed" bit in the psr to - * ensure forward progress (target register will get a NaT). + * This fault was due to a speculative load or lfetch.fault, set the "ed" + * bit in the psr to ensure forward progress. (Target register will get a + * NaT for ld.s, lfetch will be canceled.) */ ia64_psr(regs)->ed = 1; return; } - if (ia64_done_with_exception(regs)) - return; - /* * Since we have no vma's for region 5, we might get here even if the address is * valid, due to the VHPT walker inserting a non present translation that becomes @@ -226,6 +224,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) return; + if (ia64_done_with_exception(regs)) + return; + /* * Oops. The kernel tried to access some bad page. We'll have to terminate things * with extreme prejudice.