X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fmm%2Ffault.c;h=7f0fcf219a26e4cec52d2e5bbd729601c356d261;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=f7279468323a62cef22984f3e81707eaa6a93b4b;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index f72794683..7f0fcf219 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -30,40 +30,6 @@ extern void die(const char *,struct pt_regs *,long); -#ifdef CONFIG_KPROBES -ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); -int register_page_fault_notifier(struct notifier_block *nb) -{ - vmalloc_sync_all(); - return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); -} - -int unregister_page_fault_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); -} - -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); -} -#else -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - return NOTIFY_DONE; -} -#endif - - /* * Unlock any spinlocks which will prevent us from getting the * message out @@ -111,15 +77,12 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, unsigned seg = regs->xcs & 0xffff; u32 seg_ar, seg_limit, base, *desc; - /* Unlikely, but must come before segment checks. */ - if (unlikely(regs->eflags & VM_MASK)) { - base = seg << 4; - *eip_limit = base + 0xffff; - return base + (eip & 0xffff); - } - /* The standard kernel/user address space limit. */ *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg; + + /* Unlikely, but must come before segment checks. */ + if (unlikely((regs->eflags & VM_MASK) != 0)) + return eip + (seg << 4); /* By far the most common cases. */ if (likely(seg == __USER_CS || seg == __KERNEL_CS)) @@ -358,7 +321,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, if (unlikely(address >= TASK_SIZE)) { if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) return; - if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, SIGSEGV) == NOTIFY_STOP) return; /* @@ -368,7 +331,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, goto bad_area_nosemaphore; } - if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, SIGSEGV) == NOTIFY_STOP) return; @@ -389,7 +352,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunatly, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occuring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the @@ -417,12 +380,12 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, goto bad_area; if (error_code & 4) { /* - * Accessing the stack below %esp is always a bug. - * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535,$31" pushes - * 32 pointers and then decrements %esp by 65535.) + * accessing the stack below %esp is always a bug. + * The "+ 32" is there due to some instructions (like + * pusha) doing post-decrement on the stack and that + * doesn't show up until later.. */ - if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp) + if (address + 32 < regs->esp) goto bad_area; } if (expand_stack(vma, address))