return prefetch;
}
-static inline int is_prefetch(struct pt_regs *regs, unsigned long addr)
+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
{
if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 6))
+ boot_cpu_data.x86 >= 6)) {
+ /* Catch an obscure case of prefetch inside an NX page. */
+ if (nx_enabled && (error_code & 16))
+ return 0;
return __is_prefetch(regs, addr);
+ }
return 0;
}
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
+ /* When running in the kernel we expect faults to occur only to
+ * addresses in user space. All other faults represent errors in the
+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * erroneous fault occuring in a code path which already holds mmap_sem
+ * we will deadlock attempting to validate the fault against the
+ * address space. Luckily the kernel only validly references user
+ * space from well defined areas of code, which are listed in the
+ * exceptions table.
+ *
+ * As the vast majority of faults will be valid we will only perform
+ * the source reference check when there is a possibilty of a deadlock.
+ * Attempt to lock the address space, if we cannot we then validate the
+ * source. If this is invalid we can skip the address space check,
+ * thus avoiding the deadlock.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((error_code & 4) == 0 &&
+ !search_exception_tables(regs->eip))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
vma = find_vma(mm, address);
if (!vma)
* Valid to do another page fault here because this one came
* from user space.
*/
- if (is_prefetch(regs, address))
+ if (is_prefetch(regs, address, error_code))
return;
tsk->thread.cr2 = address;
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
- info.si_addr = (void *)address;
+ info.si_addr = (void __user *)address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
* had been triggered by is_prefetch fixup_exception would have
* handled it.
*/
- if (is_prefetch(regs, address))
+ if (is_prefetch(regs, address, error_code))
return;
/*
bust_spinlocks(1);
#ifdef CONFIG_X86_PAE
- {
- pgd_t *pgd;
- pmd_t *pmd;
-
+ if (error_code & 16) {
+ pte_t *pte = lookup_address(address);
-
- pgd = init_mm.pgd + pgd_index(address);
- if (pgd_present(*pgd)) {
- pmd = pmd_offset(pgd, address);
- if (pmd_val(*pmd) & _PAGE_NX)
- printk(KERN_CRIT "kernel tried to access NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
- }
+ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+ printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
}
#endif
if (address < PAGE_SIZE)
goto no_context;
/* User space => ok to do another page fault */
- if (is_prefetch(regs, address))
+ if (is_prefetch(regs, address, error_code))
return;
tsk->thread.cr2 = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
- info.si_addr = (void *)address;
+ info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, tsk);
return;