X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fmm%2Ffault.c;h=80bb11c7a3003b84891be4d5df99979f7b4fe254;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=5d6b2114cc9a4e5d30b9736a339dad5bc03839fc;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 5d6b2114c..80bb11c7a 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -33,7 +33,13 @@ #include #include #include -#include + +/* Page fault error code bits */ +#define PF_PROT (1<<0) /* or no page found */ +#define PF_WRITE (1<<1) +#define PF_USER (1<<2) +#define PF_RSVD (1<<3) +#define PF_INSTR (1<<4) void bust_spinlocks(int yes) { @@ -68,13 +74,13 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, unsigned char *max_instr; /* If it was a exec fault ignore */ - if (error_code & (1<<4)) + if (error_code & PF_INSTR) return 0; instr = (unsigned char *)convert_rip_to_linear(current, regs); max_instr = instr + 15; - if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE) + if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE64) return 0; while (scan_more && instr < max_instr) { @@ -106,7 +112,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, /* Could check the LDT for lm, but for now it's good enough to assume that long mode only uses well known segments or kernel. */ - scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS); + scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); break; case 0x60: @@ -150,8 +156,8 @@ void dump_pagetable(unsigned long address) pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); pgd += pgd_index(address); - printk("PGD %lx ", pgd_val(*pgd)); if (bad_address(pgd)) goto bad; + printk("PGD %lx ", pgd_val(*pgd)); if (!pgd_present(*pgd)) goto ret; pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); @@ -212,9 +218,7 @@ int unhandled_signal(struct task_struct *tsk, int sig) { if (tsk->pid == 1) return 1; - /* Warn for strace, but not for gdb */ - if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) && - (tsk->ptrace & PT_PTRACED)) + if (tsk->ptrace & PT_PTRACED) return 0; return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); @@ -223,17 +227,23 @@ int unhandled_signal(struct task_struct *tsk, int sig) static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, unsigned long error_code) { - oops_begin(); + unsigned long flags = oops_begin(); + struct task_struct *tsk; + printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", current->comm, address); dump_pagetable(address); + tsk = current; + tsk->thread.cr2 = address; + tsk->thread.trap_no = 14; + tsk->thread.error_code = error_code; __die("Bad pagetable", regs, error_code); - oops_end(); + oops_end(flags); do_exit(SIGKILL); } /* - * Handle a fault on the vmalloc or module mapping area + * Handle a fault on the vmalloc area * * This assumes no large pages in there. */ @@ -254,6 +264,8 @@ static int vmalloc_fault(unsigned long address) return -1; if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref)); /* Below here mismatches are bugs because these lower tables are shared */ @@ -279,7 +291,6 @@ static int vmalloc_fault(unsigned long address) that. */ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) BUG(); - __flush_tlb_all(); return 0; } @@ -290,14 +301,9 @@ int exception_trace = 1; * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. - * - * error_code: - * bit 0 == 0 means no page found, 1 means protection fault - * bit 1 == 0 means read, 1 means write - * bit 2 == 0 means kernel, 1 means user-mode - * bit 3 == 1 means fault was an instruction fetch */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long error_code) { struct task_struct *tsk; struct mm_struct *mm; @@ -305,35 +311,16 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) unsigned long address; const struct exception_table_entry *fixup; int write; + unsigned long flags; siginfo_t info; -#ifdef CONFIG_CHECKING - { - unsigned long gs; - struct x8664_pda *pda = cpu_pda + stack_smp_processor_id(); - rdmsrl(MSR_GS_BASE, gs); - if (gs != (unsigned long)pda) { - wrmsrl(MSR_GS_BASE, pda); - printk("page_fault: wrong gs %lx expected %p\n", gs, pda); - } - } -#endif + tsk = current; + mm = tsk->mm; + prefetchw(&mm->mmap_sem); /* get the address */ __asm__("movq %%cr2,%0":"=r" (address)); - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) - return; - - if (likely(regs->eflags & X86_EFLAGS_IF)) - local_irq_enable(); - if (unlikely(page_fault_trace)) - printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n", - regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); - - tsk = current; - mm = tsk->mm; info.si_code = SEGV_MAPERR; @@ -348,16 +335,22 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) * * This verifies that the fault happens in kernel space * (error_code & 4) == 0, and that the fault was not a - * protection error (error_code & 1) == 0. + * protection error (error_code & 9) == 0. */ - if (unlikely(address >= TASK_SIZE)) { - if (!(error_code & 5) && - ((address >= VMALLOC_START && address < VMALLOC_END) || - (address >= MODULES_VADDR && address < MODULES_END))) { - if (vmalloc_fault(address) < 0) - goto bad_area_nosemaphore; - return; + if (unlikely(address >= TASK_SIZE64)) { + /* + * Don't check for the module range here: its PML4 + * is always initialized because it's shared with the main + * kernel text. Only vmalloc may need PML4 syncups. + */ + if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && + ((address >= VMALLOC_START && address < VMALLOC_END))) { + if (vmalloc_fault(address) >= 0) + return; } + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + SIGSEGV) == NOTIFY_STOP) + return; /* * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock. @@ -365,7 +358,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) goto bad_area_nosemaphore; } - if (unlikely(error_code & (1 << 3))) + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + SIGSEGV) == NOTIFY_STOP) + return; + + if (likely(regs->eflags & X86_EFLAGS_IF)) + local_irq_enable(); + + if (unlikely(page_fault_trace)) + printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n", + regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); + + if (unlikely(error_code & PF_RSVD)) pgtable_bad(address, regs, error_code); /* @@ -392,7 +396,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) * thus avoiding the deadlock. */ if (!down_read_trylock(&mm->mmap_sem)) { - if ((error_code & 4) == 0 && + if ((error_code & PF_USER) == 0 && !search_exception_tables(regs->rip)) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); @@ -419,17 +423,17 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) good_area: info.si_code = SEGV_ACCERR; write = 0; - switch (error_code & 3) { + switch (error_code & (PF_PROT|PF_WRITE)) { default: /* 3: write, present */ /* fall through */ - case 2: /* write, not present */ + case PF_WRITE: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto bad_area; write++; break; - case 1: /* read, present */ + case PF_PROT: /* read, present */ goto bad_area; - case 0: /* read, not present */ + case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } @@ -440,13 +444,13 @@ good_area: * the fault. */ switch (handle_mm_fault(mm, vma, address, write)) { - case 1: + case VM_FAULT_MINOR: tsk->min_flt++; break; - case 2: + case VM_FAULT_MAJOR: tsk->maj_flt++; break; - case 0: + case VM_FAULT_SIGBUS: goto do_sigbus; default: goto out_of_memory; @@ -464,7 +468,7 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ - if (error_code & 4) { + if (error_code & PF_USER) { if (is_prefetch(regs, address, error_code)) return; @@ -522,7 +526,7 @@ no_context: * terminate things with extreme prejudice. */ - oops_begin(); + flags = oops_begin(); if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); @@ -532,10 +536,13 @@ no_context: printk_address(regs->rip); printk("\n"); dump_pagetable(address); + tsk->thread.cr2 = address; + tsk->thread.trap_no = 14; + tsk->thread.error_code = error_code; __die("Oops", regs, error_code); /* Executive summary in case the body of the oops scrolled away */ printk(KERN_EMERG "CR2: %016lx\n", address); - oops_end(); + oops_end(flags); do_exit(SIGKILL); /* @@ -557,7 +564,7 @@ do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ - if (!(error_code & 4)) + if (!(error_code & PF_USER)) goto no_context; tsk->thread.cr2 = address; @@ -570,3 +577,52 @@ do_sigbus: force_sig_info(SIGBUS, &info, tsk); return; } + +DEFINE_SPINLOCK(pgd_lock); +struct page *pgd_list; + +void vmalloc_sync_all(void) +{ + /* Note that races in the updates of insync and start aren't + problematic: + insync can only get set bits added, and updates to start are only + improving performance (without affecting correctness if undone). */ + static DECLARE_BITMAP(insync, PTRS_PER_PGD); + static unsigned long start = VMALLOC_START & PGDIR_MASK; + unsigned long address; + + for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { + if (!test_bit(pgd_index(address), insync)) { + const pgd_t *pgd_ref = pgd_offset_k(address); + struct page *page; + + if (pgd_none(*pgd_ref)) + continue; + spin_lock(&pgd_lock); + for (page = pgd_list; page; + page = (struct page *)page->index) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref)); + } + spin_unlock(&pgd_lock); + set_bit(pgd_index(address), insync); + } + if (address == start) + start = address + PGDIR_SIZE; + } + /* Check that there is no need to do the same for the modules area. */ + BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); + BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == + (__START_KERNEL & PGDIR_MASK))); +} + +static int __init enable_pagefaulttrace(char *str) +{ + page_fault_trace = 1; + return 1; +} +__setup("pagefaulttrace", enable_pagefaulttrace);