* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/compiler.h>
#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
#include <asm/pgalloc.h>
-#include <asm/hardirq.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/kdebug.h>
#include <asm-generic/sections.h>
+/* Page fault error code bits */
+#define PF_PROT (1<<0) /* or no page found */
+#define PF_WRITE (1<<1)
+#define PF_USER (1<<2)
+#define PF_RSVD (1<<3)
+#define PF_INSTR (1<<4)
+
+static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_page_fault_notifier);
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
+}
+
void bust_spinlocks(int yes)
{
int loglevel_save = console_loglevel;
/* Sometimes the CPU reports invalid exceptions on prefetch.
Check that here and ignore.
Opcode checker based on code by Richard Brunner */
-static int is_prefetch(struct pt_regs *regs, unsigned long addr)
+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
{
- unsigned char *instr = (unsigned char *)(regs->rip);
+ unsigned char *instr;
int scan_more = 1;
int prefetch = 0;
- unsigned char *max_instr = instr + 15;
+ unsigned char *max_instr;
- /* Avoid recursive faults for this common case */
- if (regs->rip == addr)
- return 0;
-
- /* Code segments in LDT could have a non zero base. Don't check
- when that's possible */
- if (regs->cs & (1<<2))
+ /* If it was a exec fault ignore */
+ if (error_code & PF_INSTR)
return 0;
+
+ instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
+ max_instr = instr + 15;
- if ((regs->cs & 3) != 0 && regs->rip >= TASK_SIZE)
+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE64)
return 0;
while (scan_more && instr < max_instr) {
unsigned char instr_hi;
unsigned char instr_lo;
- if (__get_user(opcode, instr))
+ if (probe_kernel_address(instr, opcode))
break;
instr_hi = opcode & 0xf0;
/* Could check the LDT for lm, but for now it's good
enough to assume that long mode only uses well known
segments or kernel. */
- scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
break;
case 0x60:
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
scan_more = 0;
- if (__get_user(opcode, instr))
+ if (probe_kernel_address(instr, opcode))
break;
prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
static int bad_address(void *p)
{
unsigned long dummy;
- return __get_user(dummy, (unsigned long *)p);
+ return probe_kernel_address((unsigned long *)p, dummy);
}
void dump_pagetable(unsigned long address)
{
- pml4_t *pml4;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- asm("movq %%cr3,%0" : "=r" (pml4));
-
- pml4 = __va((unsigned long)pml4 & PHYSICAL_PAGE_MASK);
- pml4 += pml4_index(address);
- printk("PML4 %lx ", pml4_val(*pml4));
- if (bad_address(pml4)) goto bad;
- if (!pml4_present(*pml4)) goto ret;
+ asm("movq %%cr3,%0" : "=r" (pgd));
- pgd = __pgd_offset_k((pgd_t *)pml4_page(*pml4), address);
+ pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
+ pgd += pgd_index(address);
if (bad_address(pgd)) goto bad;
- printk("PGD %lx ", pgd_val(*pgd));
- if (!pgd_present(*pgd)) goto ret;
+ printk("PGD %lx ", pgd_val(*pgd));
+ if (!pgd_present(*pgd)) goto ret;
- pmd = pmd_offset(pgd, address);
+ pud = pud_offset(pgd, address);
+ if (bad_address(pud)) goto bad;
+ printk("PUD %lx ", pud_val(*pud));
+ if (!pud_present(*pud)) goto ret;
+
+ pmd = pmd_offset(pud, address);
if (bad_address(pmd)) goto bad;
printk("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd)) goto ret;
int unhandled_signal(struct task_struct *tsk, int sig)
{
- /* Warn for strace, but not for gdb */
- if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) &&
- (tsk->ptrace & PT_PTRACED))
+ if (is_init(tsk))
+ return 1;
+ if (tracehook_consider_fatal_signal(tsk, sig))
return 0;
return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
}
-int page_fault_trace;
+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code)
+{
+ unsigned long flags = oops_begin();
+ struct task_struct *tsk;
+
+ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
+ current->comm, address);
+ dump_pagetable(address);
+ tsk = current;
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
+ __die("Bad pagetable", regs, error_code);
+ oops_end(flags);
+ do_exit(SIGKILL);
+}
+
+/*
+ * Handle a fault on the vmalloc area
+ *
+ * This assumes no large pages in there.
+ */
+static int vmalloc_fault(unsigned long address)
+{
+ pgd_t *pgd, *pgd_ref;
+ pud_t *pud, *pud_ref;
+ pmd_t *pmd, *pmd_ref;
+ pte_t *pte, *pte_ref;
+
+ /* Copy kernel mappings over when needed. This can also
+ happen within a race in page table update. In the later
+ case just flush. */
+
+ pgd = pgd_offset(current->mm ?: &init_mm, address);
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+ /* Below here mismatches are bugs because these lower tables
+ are shared */
+
+ pud = pud_offset(pgd, address);
+ pud_ref = pud_offset(pgd_ref, address);
+ if (pud_none(*pud_ref))
+ return -1;
+ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ BUG();
+ pmd = pmd_offset(pud, address);
+ pmd_ref = pmd_offset(pud_ref, address);
+ if (pmd_none(*pmd_ref))
+ return -1;
+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ BUG();
+ pte_ref = pte_offset_kernel(pmd_ref, address);
+ if (!pte_present(*pte_ref))
+ return -1;
+ pte = pte_offset_kernel(pmd, address);
+ /* Don't use pte_page here, because the mappings can point
+ outside mem_map, and the NUMA hash lookup cannot handle
+ that. */
+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
+ BUG();
+ return 0;
+}
+
+int page_fault_trace = 0;
int exception_trace = 1;
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- * bit 3 == 1 means fault was an instruction fetch
*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long error_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long address;
const struct exception_table_entry *fixup;
int write;
+ unsigned long flags;
siginfo_t info;
-#ifdef CONFIG_CHECKING
- {
- unsigned long gs;
- struct x8664_pda *pda = cpu_pda + stack_smp_processor_id();
- rdmsrl(MSR_GS_BASE, gs);
- if (gs != (unsigned long)pda) {
- wrmsrl(MSR_GS_BASE, pda);
- printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
- }
- }
-#endif
+ tsk = current;
+ mm = tsk->mm;
+ prefetchw(&mm->mmap_sem);
/* get the address */
__asm__("movq %%cr2,%0":"=r" (address));
+ info.si_code = SEGV_MAPERR;
+
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 9) == 0.
+ */
+ if (unlikely(address >= TASK_SIZE64)) {
+ /*
+ * Don't check for the module range here: its PML4
+ * is always initialized because it's shared with the main
+ * kernel text. Only vmalloc may need PML4 syncups.
+ */
+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
+ ((address >= VMALLOC_START && address < VMALLOC_END))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ }
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+ * fault we could otherwise deadlock.
+ */
+ goto bad_area_nosemaphore;
+ }
+
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
if (likely(regs->eflags & X86_EFLAGS_IF))
local_irq_enable();
printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
- tsk = current;
- mm = tsk->mm;
- info.si_code = SEGV_MAPERR;
-
- /* 5 => page not present and from supervisor mode */
- if (unlikely(!(error_code & 5) &&
- ((address >= VMALLOC_START && address <= VMALLOC_END) ||
- (address >= MODULES_VADDR && address <= MODULES_END))))
- goto vmalloc_fault;
+ if (unlikely(error_code & PF_RSVD))
+ pgtable_bad(address, regs, error_code);
/*
* If we're in an interrupt or have no user
goto bad_area_nosemaphore;
again:
- down_read(&mm->mmap_sem);
+ /* When running in the kernel we expect faults to occur only to
+ * addresses in user space. All other faults represent errors in the
+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * erroneous fault occurring in a code path which already holds mmap_sem
+ * we will deadlock attempting to validate the fault against the
+ * address space. Luckily the kernel only validly references user
+ * space from well defined areas of code, which are listed in the
+ * exceptions table.
+ *
+ * As the vast majority of faults will be valid we will only perform
+ * the source reference check when there is a possibilty of a deadlock.
+ * Attempt to lock the address space, if we cannot we then validate the
+ * source. If this is invalid we can skip the address space check,
+ * thus avoiding the deadlock.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((error_code & PF_USER) == 0 &&
+ !search_exception_tables(regs->rip))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
vma = find_vma(mm, address);
if (!vma)
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (error_code & 4) {
- // XXX: align red zone size with ABI
- if (address + 128 < regs->rsp)
+ /* Allow userspace just enough access below the stack pointer
+ * to let the 'enter' instruction work.
+ */
+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
goto bad_area;
}
if (expand_stack(vma, address))
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
- switch (error_code & 3) {
+ switch (error_code & (PF_PROT|PF_WRITE)) {
default: /* 3: write, present */
/* fall through */
- case 2: /* write, not present */
+ case PF_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write++;
break;
- case 1: /* read, present */
+ case PF_PROT: /* read, present */
goto bad_area;
- case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
* the fault.
*/
switch (handle_mm_fault(mm, vma, address, write)) {
- case 1:
+ case VM_FAULT_MINOR:
tsk->min_flt++;
break;
- case 2:
+ case VM_FAULT_MAJOR:
tsk->maj_flt++;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
goto do_sigbus;
default:
goto out_of_memory;
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
-
-#ifdef CONFIG_IA32_EMULATION
- /* 32bit vsyscall. map on demand. */
- if (test_thread_flag(TIF_IA32) &&
- address >= 0xffffe000 && address < 0xffffe000 + PAGE_SIZE) {
- if (map_syscall32(mm, address) < 0)
- goto out_of_memory2;
- return;
- }
-#endif
-
/* User mode accesses just cause a SIGSEGV */
- if (error_code & 4) {
- if (is_prefetch(regs, address))
+ if (error_code & PF_USER) {
+ if (is_prefetch(regs, address, error_code))
return;
/* Work around K8 erratum #100 K8 in compat mode
return;
if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
- printk(KERN_INFO
- "%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
- tsk->comm, tsk->pid, address, regs->rip,
- regs->rsp, error_code);
+ printk(
+ "%s%s[%d:#%u]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, tsk->pid, tsk->xid, address,
+ regs->rip, regs->rsp, error_code);
}
tsk->thread.cr2 = address;
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
- info.si_addr = (void *)address;
+ info.si_addr = (void __user *)address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
* Hall of shame of CPU/BIOS bugs.
*/
- if (is_prefetch(regs, address))
+ if (is_prefetch(regs, address, error_code))
return;
if (is_errata93(regs, address))
* terminate things with extreme prejudice.
*/
- oops_begin();
+ flags = oops_begin();
if (address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at %016lx RIP: \n" KERN_ALERT,address);
printk_address(regs->rip);
- printk("\n");
dump_pagetable(address);
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
__die("Oops", regs, error_code);
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
- oops_end();
+ oops_end(flags);
do_exit(SIGKILL);
/*
*/
out_of_memory:
up_read(&mm->mmap_sem);
-out_of_memory2:
- if (current->pid == 1) {
+ if (is_init(current)) {
yield();
goto again;
}
- printk("VM: killing process %s\n", tsk->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ tsk->comm, tsk->pid, tsk->xid);
if (error_code & 4)
do_exit(SIGKILL);
goto no_context;
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
- if (!(error_code & 4))
+ if (!(error_code & PF_USER))
goto no_context;
tsk->thread.cr2 = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
- info.si_addr = (void *)address;
+ info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, tsk);
return;
+}
-vmalloc_fault:
- {
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
+DEFINE_SPINLOCK(pgd_lock);
+struct page *pgd_list;
- /*
- * x86-64 has the same kernel 3rd level pages for all CPUs.
- * But for vmalloc/modules the TLB synchronization works lazily,
- * so it can happen that we get a page fault for something
- * that is really already in the page table. Just check if it
- * is really there and when yes flush the local TLB.
- */
- pgd = pgd_offset_k(address);
- if (pgd != current_pgd_offset_k(address))
- BUG();
- if (!pgd_present(*pgd))
- goto bad_area_nosemaphore;
- pmd = pmd_offset(pgd, address);
- if (!pmd_present(*pmd))
- goto bad_area_nosemaphore;
- pte = pte_offset_kernel(pmd, address);
- if (!pte_present(*pte))
- goto bad_area_nosemaphore;
+void vmalloc_sync_all(void)
+{
+ /* Note that races in the updates of insync and start aren't
+ problematic:
+ insync can only get set bits added, and updates to start are only
+ improving performance (without affecting correctness if undone). */
+ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+ static unsigned long start = VMALLOC_START & PGDIR_MASK;
+ unsigned long address;
- __flush_tlb_all();
- return;
+ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
+ if (!test_bit(pgd_index(address), insync)) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
+ struct page *page;
+
+ if (pgd_none(*pgd_ref))
+ continue;
+ spin_lock(&pgd_lock);
+ for (page = pgd_list; page;
+ page = (struct page *)page->index) {
+ pgd_t *pgd;
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
+ spin_unlock(&pgd_lock);
+ set_bit(pgd_index(address), insync);
+ }
+ if (address == start)
+ start = address + PGDIR_SIZE;
}
+ /* Check that there is no need to do the same for the modules area. */
+ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
+ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
+ (__START_KERNEL & PGDIR_MASK)));
+}
+
+static int __init enable_pagefaulttrace(char *str)
+{
+ page_fault_trace = 1;
+ return 1;
}
+__setup("pagefaulttrace", enable_pagefaulttrace);