2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/timer.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/highmem.h>
25 #include <linux/kallsyms.h>
26 #include <linux/ptrace.h>
27 #include <linux/utsname.h>
28 #include <linux/kprobes.h>
29 #include <linux/kexec.h>
30 #include <linux/unwind.h>
31 #include <linux/uaccess.h>
32 #include <linux/nmi.h>
33 #include <linux/bug.h>
36 #include <linux/ioport.h>
37 #include <linux/eisa.h>
41 #include <linux/mca.h>
44 #include <asm/processor.h>
45 #include <asm/system.h>
47 #include <asm/atomic.h>
48 #include <asm/debugreg.h>
52 #include <asm/unwind.h>
54 #include <asm/arch_hooks.h>
55 #include <asm/kdebug.h>
56 #include <asm/stacktrace.h>
58 #include <linux/module.h>
59 #include <linux/vs_context.h>
60 #include <linux/vserver/history.h>
62 #include "mach_traps.h"
64 int panic_on_unrecovered_nmi;
66 asmlinkage int system_call(void);
68 /* Do we ignore FPU interrupts ? */
69 char ignore_fpu_irq = 0;
72 * The IDT has to be page-aligned to simplify the Pentium
73 * F0 0F bug workaround.. We have a special link segment
76 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
78 asmlinkage void divide_error(void);
79 asmlinkage void debug(void);
80 asmlinkage void nmi(void);
81 asmlinkage void int3(void);
82 asmlinkage void overflow(void);
83 asmlinkage void bounds(void);
84 asmlinkage void invalid_op(void);
85 asmlinkage void device_not_available(void);
86 asmlinkage void coprocessor_segment_overrun(void);
87 asmlinkage void invalid_TSS(void);
88 asmlinkage void segment_not_present(void);
89 asmlinkage void stack_segment(void);
90 asmlinkage void general_protection(void);
91 asmlinkage void page_fault(void);
92 asmlinkage void coprocessor_error(void);
93 asmlinkage void simd_coprocessor_error(void);
94 asmlinkage void alignment_check(void);
95 asmlinkage void spurious_interrupt_bug(void);
96 asmlinkage void machine_check(void);
98 int kstack_depth_to_print = 24;
99 ATOMIC_NOTIFIER_HEAD(i386die_chain);
101 extern char last_sysfs_file[];
103 int register_die_notifier(struct notifier_block *nb)
106 return atomic_notifier_chain_register(&i386die_chain, nb);
108 EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
110 int unregister_die_notifier(struct notifier_block *nb)
112 return atomic_notifier_chain_unregister(&i386die_chain, nb);
114 EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
116 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
118 return p > (void *)tinfo &&
119 p < (void *)tinfo + THREAD_SIZE - 3;
122 static inline unsigned long print_context_stack(struct thread_info *tinfo,
123 unsigned long *stack, unsigned long ebp,
124 struct stacktrace_ops *ops, void *data)
128 #ifdef CONFIG_FRAME_POINTER
129 while (valid_stack_ptr(tinfo, (void *)ebp)) {
130 unsigned long new_ebp;
131 addr = *(unsigned long *)(ebp + 4);
132 ops->address(data, addr);
134 * break out of recursive entries (such as
135 * end_of_stack_stop_unwind_function). Also,
136 * we can never allow a frame pointer to
139 new_ebp = *(unsigned long *)ebp;
145 while (valid_stack_ptr(tinfo, stack)) {
147 if (__kernel_text_address(addr))
148 ops->address(data, addr);
154 #define MSG(msg) ops->warning(data, msg)
156 void dump_trace(struct task_struct *task, struct pt_regs *regs,
157 unsigned long *stack,
158 struct stacktrace_ops *ops, void *data)
160 unsigned long ebp = 0;
168 if (task && task != current)
169 stack = (unsigned long *)task->thread.esp;
172 #ifdef CONFIG_FRAME_POINTER
174 if (task == current) {
175 /* Grab ebp right from our regs */
176 asm ("movl %%ebp, %0" : "=r" (ebp) : );
178 /* ebp is the last reg pushed by switch_to */
179 ebp = *(unsigned long *) task->thread.esp;
185 struct thread_info *context;
186 context = (struct thread_info *)
187 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
188 ebp = print_context_stack(context, stack, ebp, ops, data);
189 /* Should be after the line below, but somewhere
190 in early boot context comes out corrupted and we
191 can't reference it -AK */
192 if (ops->stack(data, "IRQ") < 0)
194 stack = (unsigned long*)context->previous_esp;
197 touch_nmi_watchdog();
200 EXPORT_SYMBOL(dump_trace);
203 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
206 print_symbol(msg, symbol);
210 static void print_trace_warning(void *data, char *msg)
212 printk("%s%s\n", (char *)data, msg);
215 static int print_trace_stack(void *data, char *name)
221 * Print one address/symbol entries per line.
223 static void print_trace_address(void *data, unsigned long addr)
225 printk("%s [<%08lx>] ", (char *)data, addr);
226 print_symbol("%s\n", addr);
229 static struct stacktrace_ops print_trace_ops = {
230 .warning = print_trace_warning,
231 .warning_symbol = print_trace_warning_symbol,
232 .stack = print_trace_stack,
233 .address = print_trace_address,
237 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
238 unsigned long * stack, char *log_lvl)
240 dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
241 printk("%s =======================\n", log_lvl);
244 void show_trace(struct task_struct *task, struct pt_regs *regs,
245 unsigned long * stack)
247 show_trace_log_lvl(task, regs, stack, "");
250 static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
251 unsigned long *esp, char *log_lvl)
253 unsigned long *stack;
258 esp = (unsigned long*)task->thread.esp;
260 esp = (unsigned long *)&esp;
264 for(i = 0; i < kstack_depth_to_print; i++) {
265 if (kstack_end(stack))
267 if (i && ((i % 8) == 0))
268 printk("\n%s ", log_lvl);
269 printk("%08lx ", *stack++);
271 printk("\n%sCall Trace:\n", log_lvl);
272 show_trace_log_lvl(task, regs, esp, log_lvl);
275 void show_stack(struct task_struct *task, unsigned long *esp)
278 show_stack_log_lvl(task, NULL, esp, "");
282 * The architecture-independent dump_stack generator
284 void dump_stack(void)
288 show_trace(current, NULL, &stack);
291 EXPORT_SYMBOL(dump_stack);
293 void show_registers(struct pt_regs *regs)
300 esp = (unsigned long) (®s->esp);
302 if (user_mode_vm(regs)) {
305 ss = regs->xss & 0xffff;
308 printk(KERN_EMERG "CPU: %d\n"
309 KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
310 KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
311 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
312 print_tainted(), regs->eflags, init_utsname()->release,
313 (int)strcspn(init_utsname()->version, " "),
314 init_utsname()->version);
315 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
316 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
317 regs->eax, regs->ebx, regs->ecx, regs->edx);
318 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
319 regs->esi, regs->edi, regs->ebp, esp);
320 printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
321 regs->xds & 0xffff, regs->xes & 0xffff, ss);
322 printk(KERN_EMERG "Process %.*s (pid: %d[#%u], ti=%p task=%p task.ti=%p)",
323 TASK_COMM_LEN, current->comm, current->pid, current->xid,
324 current_thread_info(), current, current->thread_info);
326 * When in-kernel, we also print out the stack and code at the
327 * time of the fault..
334 printk("\n" KERN_EMERG "Stack: ");
335 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
337 printk(KERN_EMERG "Code: ");
339 eip = (u8 *)regs->eip - 43;
340 if (eip < (u8 *)PAGE_OFFSET ||
341 probe_kernel_address(eip, c)) {
342 /* try starting at EIP */
343 eip = (u8 *)regs->eip;
346 for (i = 0; i < code_bytes; i++, eip++) {
347 if (eip < (u8 *)PAGE_OFFSET ||
348 probe_kernel_address(eip, c)) {
349 printk(" Bad EIP value.");
352 if (eip == (u8 *)regs->eip)
353 printk("<%02x> ", c);
361 int is_valid_bugaddr(unsigned long eip)
365 if (eip < PAGE_OFFSET)
367 if (probe_kernel_address((unsigned short *)eip, ud2))
370 return ud2 == 0x0b0f;
374 * This is gone through when something in the kernel has done something bad and
375 * is about to be terminated.
377 void die(const char * str, struct pt_regs * regs, long err)
382 int lock_owner_depth;
384 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
386 .lock_owner_depth = 0
388 static int die_counter;
395 if (die.lock_owner != raw_smp_processor_id()) {
397 spin_lock_irqsave(&die.lock, flags);
398 die.lock_owner = smp_processor_id();
399 die.lock_owner_depth = 0;
403 local_save_flags(flags);
405 if (++die.lock_owner_depth < 3) {
410 report_bug(regs->eip);
412 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
413 #ifdef CONFIG_PREEMPT
414 printk(KERN_EMERG "PREEMPT ");
423 #ifdef CONFIG_DEBUG_PAGEALLOC
426 printk("DEBUG_PAGEALLOC");
432 printk(KERN_ALERT "last sysfs file: %s\n", last_sysfs_file);
434 if (notify_die(DIE_OOPS, str, regs, err,
435 current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) {
436 show_registers(regs);
438 /* Executive summary in case the oops scrolled away */
439 esp = (unsigned long) (®s->esp);
441 if (user_mode(regs)) {
443 ss = regs->xss & 0xffff;
445 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
446 print_symbol("%s", regs->eip);
447 printk(" SS:ESP %04x:%08lx\n", ss, esp);
452 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
456 spin_unlock_irqrestore(&die.lock, flags);
461 if (kexec_should_crash(current))
465 panic("Fatal exception in interrupt");
468 panic("Fatal exception");
474 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
476 if (!user_mode_vm(regs))
480 static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
481 struct pt_regs * regs, long error_code,
484 struct task_struct *tsk = current;
485 tsk->thread.error_code = error_code;
486 tsk->thread.trap_no = trapnr;
488 if (regs->eflags & VM_MASK) {
494 if (!user_mode(regs))
499 force_sig_info(signr, info, tsk);
501 force_sig(signr, tsk);
506 if (!fixup_exception(regs))
507 die(str, regs, error_code);
512 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
513 if (ret) goto trap_signal;
518 #define DO_ERROR(trapnr, signr, str, name) \
519 fastcall void do_##name(struct pt_regs * regs, long error_code) \
521 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
524 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
527 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
528 fastcall void do_##name(struct pt_regs * regs, long error_code) \
531 info.si_signo = signr; \
533 info.si_code = sicode; \
534 info.si_addr = (void __user *)siaddr; \
535 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
538 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
541 #define DO_VM86_ERROR(trapnr, signr, str, name) \
542 fastcall void do_##name(struct pt_regs * regs, long error_code) \
544 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
547 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
550 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
551 fastcall void do_##name(struct pt_regs * regs, long error_code) \
554 info.si_signo = signr; \
556 info.si_code = sicode; \
557 info.si_addr = (void __user *)siaddr; \
558 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
561 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
564 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
565 #ifndef CONFIG_KPROBES
566 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
568 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
569 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
570 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
571 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
572 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
573 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
574 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
575 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
579 * lazy-check for CS validity on exec-shield binaries:
581 * the original non-exec stack patch was written by
582 * Solar Designer <solar at openwall.com>. Thanks!
585 check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
587 struct desc_struct *desc1, *desc2;
588 struct vm_area_struct *vma;
591 if (current->mm == NULL)
595 if (current->mm->context.exec_limit != -1UL) {
597 spin_lock(¤t->mm->page_table_lock);
598 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
599 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
601 spin_unlock(¤t->mm->page_table_lock);
602 if (limit >= TASK_SIZE)
604 current->mm->context.exec_limit = limit;
606 set_user_cs(¤t->mm->context.user_cs, limit);
608 desc1 = ¤t->mm->context.user_cs;
609 desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
611 if (desc1->a != desc2->a || desc1->b != desc2->b) {
613 * The CS was not in sync - reload it and retry the
614 * instruction. If the instruction still faults then
615 * we won't hit this branch next time around.
617 if (print_fatal_signals >= 2) {
618 printk("#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
619 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx, CPU_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
621 load_user_cs_desc(cpu, current->mm);
629 * The fixup code for errors in iret jumps to here (iret_exc). It loses
630 * the original trap number and error code. The bogus trap 32 and error
631 * code 0 are what the vanilla kernel delivers via:
632 * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
634 * In case of a general protection fault in the iret instruction, we
635 * need to check for a lazy CS update for exec-shield.
637 fastcall void do_iret_error(struct pt_regs *regs, long error_code)
639 int ok = check_lazy_exec_limit(get_cpu(), regs, error_code);
641 if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
642 error_code, 32, SIGSEGV) != NOTIFY_STOP) {
644 info.si_signo = SIGSEGV;
646 info.si_code = ILL_BADSTK;
648 do_trap(32, SIGSEGV, "iret exception", 0, regs, error_code,
653 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
657 struct tss_struct *tss = &per_cpu(init_tss, cpu);
658 struct thread_struct *thread = ¤t->thread;
662 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
663 * invalid offset set (the LAZY one) and the faulting thread has
664 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
665 * and we set the offset field correctly. Then we let the CPU to
666 * restart the faulting instruction.
668 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
669 thread->io_bitmap_ptr) {
670 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
671 thread->io_bitmap_max);
673 * If the previously set map was extending to higher ports
674 * than the current one, pad extra space with 0xff (no access).
676 if (thread->io_bitmap_max < tss->io_bitmap_max)
677 memset((char *) tss->io_bitmap +
678 thread->io_bitmap_max, 0xff,
679 tss->io_bitmap_max - thread->io_bitmap_max);
680 tss->io_bitmap_max = thread->io_bitmap_max;
681 tss->io_bitmap_base = IO_BITMAP_OFFSET;
682 tss->io_bitmap_owner = thread;
687 current->thread.error_code = error_code;
688 current->thread.trap_no = 13;
690 if (regs->eflags & VM_MASK)
693 if (!user_mode(regs))
696 ok = check_lazy_exec_limit(cpu, regs, error_code);
703 if (print_fatal_signals) {
704 printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
705 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b);
708 current->thread.error_code = error_code;
709 current->thread.trap_no = 13;
710 force_sig(SIGSEGV, current);
716 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
721 if (!fixup_exception(regs)) {
722 if (notify_die(DIE_GPF, "general protection fault", regs,
723 error_code, 13, SIGSEGV) == NOTIFY_STOP)
725 die("general protection fault", regs, error_code);
729 static __kprobes void
730 mem_parity_error(unsigned char reason, struct pt_regs * regs)
732 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
733 "CPU %d.\n", reason, smp_processor_id());
734 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
735 if (panic_on_unrecovered_nmi)
736 panic("NMI: Not continuing");
738 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
740 /* Clear and disable the memory parity error line. */
741 clear_mem_error(reason);
744 static __kprobes void
745 io_check_error(unsigned char reason, struct pt_regs * regs)
747 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
748 show_registers(regs);
750 /* Re-enable the IOCK line, wait for a few seconds */
751 clear_io_check_error(reason);
754 static __kprobes void
755 unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
758 /* Might actually be able to figure out what the guilty party
765 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
766 "CPU %d.\n", reason, smp_processor_id());
767 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
768 if (panic_on_unrecovered_nmi)
769 panic("NMI: Not continuing");
771 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
774 static DEFINE_SPINLOCK(nmi_print_lock);
776 void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
778 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
782 spin_lock(&nmi_print_lock);
784 * We are in trouble anyway, lets at least try
785 * to get a message out.
788 printk(KERN_EMERG "%s", msg);
789 printk(" on CPU%d, eip %08lx, registers:\n",
790 smp_processor_id(), regs->eip);
791 show_registers(regs);
793 spin_unlock(&nmi_print_lock);
796 /* If we are in kernel we are probably nested up pretty bad
797 * and might aswell get out now while we still can.
799 if (!user_mode_vm(regs)) {
800 current->thread.trap_no = 2;
807 static __kprobes void default_do_nmi(struct pt_regs * regs)
809 unsigned char reason = 0;
811 /* Only the BSP gets external NMIs from the system. */
812 if (!smp_processor_id())
813 reason = get_nmi_reason();
815 if (!(reason & 0xc0)) {
816 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
819 #ifdef CONFIG_X86_LOCAL_APIC
821 * Ok, so this is none of the documented NMI sources,
822 * so it must be the NMI watchdog.
824 if (nmi_watchdog_tick(regs, reason))
826 if (!do_nmi_callback(regs, smp_processor_id()))
828 unknown_nmi_error(reason, regs);
832 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
835 mem_parity_error(reason, regs);
837 io_check_error(reason, regs);
839 * Reassert NMI in case it became active meanwhile
840 * as it's edge-triggered.
845 fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
851 cpu = smp_processor_id();
855 default_do_nmi(regs);
860 #ifdef CONFIG_KPROBES
861 fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
863 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
866 /* This is an interrupt gate, because kprobes wants interrupts
867 disabled. Normal trap handlers don't. */
868 restore_interrupts(regs);
869 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
874 * Our handling of the processor debug registers is non-trivial.
875 * We do not clear them on entry and exit from the kernel. Therefore
876 * it is possible to get a watchpoint trap here from inside the kernel.
877 * However, the code in ./ptrace.c has ensured that the user can
878 * only set watchpoints on userspace addresses. Therefore the in-kernel
879 * watchpoint trap can only occur in code which is reading/writing
880 * from user space. Such code must not hold kernel locks (since it
881 * can equally take a page fault), therefore it is safe to call
882 * force_sig_info even though that claims and releases locks.
884 * Code in ./signal.c ensures that the debug control register
885 * is restored before we deliver any signal, and therefore that
886 * user code runs with the correct debug control register even though
889 * Being careful here means that we don't have to be as careful in a
890 * lot of more complicated places (task switching can be a bit lazy
891 * about restoring all the debug state, and ptrace doesn't have to
892 * find every occurrence of the TF bit that could be saved away even
895 fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
897 unsigned int condition;
898 struct task_struct *tsk = current;
900 get_debugreg(condition, 6);
902 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
903 SIGTRAP) == NOTIFY_STOP)
905 /* It's safe to allow irq's after DR6 has been saved */
906 if (regs->eflags & X86_EFLAGS_IF)
909 /* Mask out spurious debug traps due to lazy DR7 setting */
910 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
911 if (!tsk->thread.debugreg[7])
915 if (regs->eflags & VM_MASK)
918 /* Save debug status register where ptrace can see it */
919 tsk->thread.debugreg[6] = condition;
922 * Single-stepping through TF: make sure we ignore any events in
923 * kernel space (but re-enable TF when returning to user mode).
925 if (condition & DR_STEP) {
927 * We already checked v86 mode above, so we can
928 * check for kernel mode by just checking the CPL
931 if (!user_mode(regs))
932 goto clear_TF_reenable;
935 /* Ok, finally something we can handle */
936 send_sigtrap(tsk, regs, error_code);
938 /* Disable additional traps. They'll be re-enabled when
939 * the signal is delivered.
946 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
950 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
951 regs->eflags &= ~TF_MASK;
956 * Note that we play around with the 'TS' bit in an attempt to get
957 * the correct behaviour even in the presence of the asynchronous
960 void math_error(void __user *eip)
962 struct task_struct * task;
964 unsigned short cwd, swd;
967 * Save the info for the exception handler and clear the error.
971 task->thread.trap_no = 16;
972 task->thread.error_code = 0;
973 info.si_signo = SIGFPE;
975 info.si_code = __SI_FAULT;
978 * (~cwd & swd) will mask out exceptions that are not set to unmasked
979 * status. 0x3f is the exception bits in these regs, 0x200 is the
980 * C1 reg you need in case of a stack fault, 0x040 is the stack
981 * fault bit. We should only be taking one exception at a time,
982 * so if this combination doesn't produce any single exception,
983 * then we have a bad program that isn't syncronizing its FPU usage
984 * and it will suffer the consequences since we won't be able to
985 * fully reproduce the context of the exception
987 cwd = get_fpu_cwd(task);
988 swd = get_fpu_swd(task);
989 switch (swd & ~cwd & 0x3f) {
990 case 0x000: /* No unmasked exception */
992 default: /* Multiple exceptions */
994 case 0x001: /* Invalid Op */
996 * swd & 0x240 == 0x040: Stack Underflow
997 * swd & 0x240 == 0x240: Stack Overflow
998 * User must clear the SF bit (0x40) if set
1000 info.si_code = FPE_FLTINV;
1002 case 0x002: /* Denormalize */
1003 case 0x010: /* Underflow */
1004 info.si_code = FPE_FLTUND;
1006 case 0x004: /* Zero Divide */
1007 info.si_code = FPE_FLTDIV;
1009 case 0x008: /* Overflow */
1010 info.si_code = FPE_FLTOVF;
1012 case 0x020: /* Precision */
1013 info.si_code = FPE_FLTRES;
1016 force_sig_info(SIGFPE, &info, task);
1019 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
1022 math_error((void __user *)regs->eip);
1025 static void simd_math_error(void __user *eip)
1027 struct task_struct * task;
1029 unsigned short mxcsr;
1032 * Save the info for the exception handler and clear the error.
1035 save_init_fpu(task);
1036 task->thread.trap_no = 19;
1037 task->thread.error_code = 0;
1038 info.si_signo = SIGFPE;
1040 info.si_code = __SI_FAULT;
1043 * The SIMD FPU exceptions are handled a little differently, as there
1044 * is only a single status/control register. Thus, to determine which
1045 * unmasked exception was caught we must mask the exception mask bits
1046 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1048 mxcsr = get_fpu_mxcsr(task);
1049 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1053 case 0x001: /* Invalid Op */
1054 info.si_code = FPE_FLTINV;
1056 case 0x002: /* Denormalize */
1057 case 0x010: /* Underflow */
1058 info.si_code = FPE_FLTUND;
1060 case 0x004: /* Zero Divide */
1061 info.si_code = FPE_FLTDIV;
1063 case 0x008: /* Overflow */
1064 info.si_code = FPE_FLTOVF;
1066 case 0x020: /* Precision */
1067 info.si_code = FPE_FLTRES;
1070 force_sig_info(SIGFPE, &info, task);
1073 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
1077 /* Handle SIMD FPU exceptions on PIII+ processors. */
1079 simd_math_error((void __user *)regs->eip);
1082 * Handle strange cache flush from user space exception
1083 * in all other cases. This is undocumented behaviour.
1085 if (regs->eflags & VM_MASK) {
1086 handle_vm86_fault((struct kernel_vm86_regs *)regs,
1090 current->thread.trap_no = 19;
1091 current->thread.error_code = error_code;
1092 die_if_kernel("cache flush denied", regs, error_code);
1093 force_sig(SIGSEGV, current);
1097 fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
1101 /* No need to warn about this any longer. */
1102 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1106 fastcall unsigned long patch_espfix_desc(unsigned long uesp,
1109 int cpu = smp_processor_id();
1110 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1111 struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
1112 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
1113 unsigned long new_kesp = kesp - base;
1114 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
1115 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
1116 /* Set up base for espfix segment */
1117 desc &= 0x00f0ff0000000000ULL;
1118 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
1119 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
1120 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
1121 (lim_pages & 0xffff);
1122 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
1127 * 'math_state_restore()' saves the current math information in the
1128 * old math state array, and gets the new ones from the current task
1130 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1131 * Don't touch unless you *really* know how it works.
1133 * Must be called with kernel preemption disabled (in this case,
1134 * local interrupts are disabled at the call-site in entry.S).
1136 asmlinkage void math_state_restore(void)
1138 struct thread_info *thread = current_thread_info();
1139 struct task_struct *tsk = thread->task;
1141 clts(); /* Allow maths ops (or we recurse) */
1142 if (!tsk_used_math(tsk))
1145 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1149 #ifndef CONFIG_MATH_EMULATION
1151 asmlinkage void math_emulate(long arg)
1153 printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
1154 printk(KERN_EMERG "killing %s.\n",current->comm);
1155 force_sig(SIGFPE,current);
1159 #endif /* CONFIG_MATH_EMULATION */
1161 #ifdef CONFIG_X86_F00F_BUG
1162 void __init trap_init_f00f_bug(void)
1164 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
1167 * Update the IDT descriptor and reload the IDT so that
1168 * it uses the read-only mapped virtual address.
1170 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
1171 load_idt(&idt_descr);
1176 * This needs to use 'idt_table' rather than 'idt', and
1177 * thus use the _nonmapped_ version of the IDT, as the
1178 * Pentium F0 0F bugfix can have resulted in the mapped
1179 * IDT being write-protected.
1181 void set_intr_gate(unsigned int n, void *addr)
1183 _set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS);
1187 * This routine sets up an interrupt gate at directory privilege level 3.
1189 static inline void set_system_intr_gate(unsigned int n, void *addr)
1191 _set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS);
1194 static void __init set_trap_gate(unsigned int n, void *addr)
1196 _set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS);
1199 static void __init set_system_gate(unsigned int n, void *addr)
1201 _set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS);
1204 static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
1206 _set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3));
1210 void __init trap_init(void)
1213 void __iomem *p = ioremap(0x0FFFD9, 4);
1214 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1220 #ifdef CONFIG_X86_LOCAL_APIC
1221 init_apic_mappings();
1224 set_trap_gate(0,÷_error);
1225 set_intr_gate(1,&debug);
1226 set_intr_gate(2,&nmi);
1227 set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
1228 set_system_gate(4,&overflow);
1229 set_trap_gate(5,&bounds);
1230 set_trap_gate(6,&invalid_op);
1231 set_trap_gate(7,&device_not_available);
1232 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
1233 set_trap_gate(9,&coprocessor_segment_overrun);
1234 set_trap_gate(10,&invalid_TSS);
1235 set_trap_gate(11,&segment_not_present);
1236 set_trap_gate(12,&stack_segment);
1237 set_trap_gate(13,&general_protection);
1238 set_intr_gate(14,&page_fault);
1239 set_trap_gate(15,&spurious_interrupt_bug);
1240 set_trap_gate(16,&coprocessor_error);
1241 set_trap_gate(17,&alignment_check);
1242 #ifdef CONFIG_X86_MCE
1243 set_trap_gate(18,&machine_check);
1245 set_trap_gate(19,&simd_coprocessor_error);
1249 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1250 * Generates a compile-time "error: zero width for bit-field" if
1251 * the alignment is wrong.
1253 struct fxsrAlignAssert {
1254 int _:!(offsetof(struct task_struct,
1255 thread.i387.fxsave) & 15);
1258 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1259 set_in_cr4(X86_CR4_OSFXSR);
1263 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1265 set_in_cr4(X86_CR4_OSXMMEXCPT);
1269 set_system_gate(SYSCALL_VECTOR,&system_call);
1272 * Should be a barrier for any external CPU state.
1279 static int __init kstack_setup(char *s)
1281 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1284 __setup("kstack=", kstack_setup);