2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/timer.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/highmem.h>
25 #include <linux/kallsyms.h>
26 #include <linux/ptrace.h>
27 #include <linux/utsname.h>
28 #include <linux/kprobes.h>
29 #include <linux/kexec.h>
30 #include <linux/unwind.h>
33 #include <linux/ioport.h>
34 #include <linux/eisa.h>
38 #include <linux/mca.h>
41 #include <asm/processor.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
45 #include <asm/atomic.h>
46 #include <asm/debugreg.h>
50 #include <asm/unwind.h>
52 #include <asm/arch_hooks.h>
53 #include <asm/kdebug.h>
54 #include <asm/stacktrace.h>
56 #include <linux/module.h>
57 #include <linux/vserver/debug.h>
59 #include "mach_traps.h"
61 asmlinkage int system_call(void);
63 struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
66 /* Do we ignore FPU interrupts ? */
67 char ignore_fpu_irq = 0;
69 #ifndef CONFIG_X86_NO_IDT
71 * The IDT has to be page-aligned to simplify the Pentium
72 * F0 0F bug workaround.. We have a special link segment
75 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
78 asmlinkage void divide_error(void);
79 asmlinkage void debug(void);
80 asmlinkage void nmi(void);
81 asmlinkage void int3(void);
82 asmlinkage void overflow(void);
83 asmlinkage void bounds(void);
84 asmlinkage void invalid_op(void);
85 asmlinkage void device_not_available(void);
86 asmlinkage void coprocessor_segment_overrun(void);
87 asmlinkage void invalid_TSS(void);
88 asmlinkage void segment_not_present(void);
89 asmlinkage void stack_segment(void);
90 asmlinkage void general_protection(void);
91 asmlinkage void page_fault(void);
92 asmlinkage void coprocessor_error(void);
93 asmlinkage void simd_coprocessor_error(void);
94 asmlinkage void alignment_check(void);
96 asmlinkage void spurious_interrupt_bug(void);
98 asmlinkage void fixup_4gb_segment(void);
100 asmlinkage void machine_check(void);
102 static int kstack_depth_to_print = 24;
103 #ifdef CONFIG_STACK_UNWIND
104 static int call_trace = 1;
106 #define call_trace (-1)
108 ATOMIC_NOTIFIER_HEAD(i386die_chain);
110 extern char last_sysfs_file[];
112 int register_die_notifier(struct notifier_block *nb)
115 return atomic_notifier_chain_register(&i386die_chain, nb);
117 EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
119 int unregister_die_notifier(struct notifier_block *nb)
121 return atomic_notifier_chain_unregister(&i386die_chain, nb);
123 EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
125 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
127 return p > (void *)tinfo &&
128 p < (void *)tinfo + THREAD_SIZE - 3;
131 static inline unsigned long print_context_stack(struct thread_info *tinfo,
132 unsigned long *stack, unsigned long ebp,
133 struct stacktrace_ops *ops, void *data)
137 #ifdef CONFIG_FRAME_POINTER
138 while (valid_stack_ptr(tinfo, (void *)ebp)) {
139 addr = *(unsigned long *)(ebp + 4);
140 ops->address(data, addr);
142 * break out of recursive entries (such as
143 * end_of_stack_stop_unwind_function):
145 if (ebp == *(unsigned long *)ebp)
147 ebp = *(unsigned long *)ebp;
150 while (valid_stack_ptr(tinfo, stack)) {
152 if (__kernel_text_address(addr))
153 ops->address(data, addr);
159 struct ops_and_data {
160 struct stacktrace_ops *ops;
164 static asmlinkage int
165 dump_trace_unwind(struct unwind_frame_info *info, void *data)
167 struct ops_and_data *oad = (struct ops_and_data *)data;
170 while (unwind(info) == 0 && UNW_PC(info)) {
172 oad->ops->address(oad->data, UNW_PC(info));
173 if (arch_unw_user_mode(info))
179 void dump_trace(struct task_struct *task, struct pt_regs *regs,
180 unsigned long *stack,
181 struct stacktrace_ops *ops, void *data)
188 if (call_trace >= 0) {
190 struct unwind_frame_info info;
191 struct ops_and_data oad = { .ops = ops, .data = data };
194 if (unwind_init_frame_info(&info, task, regs) == 0)
195 unw_ret = dump_trace_unwind(&info, &oad);
196 } else if (task == current)
197 unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
199 if (unwind_init_blocked(&info, task) == 0)
200 unw_ret = dump_trace_unwind(&info, &oad);
203 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
204 ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
206 if (UNW_SP(&info) >= PAGE_OFFSET) {
207 ops->warning(data, "Leftover inexact backtrace:\n");
208 stack = (void *)UNW_SP(&info);
210 ops->warning(data, "Full inexact backtrace again:\n");
211 } else if (call_trace >= 1)
214 ops->warning(data, "Full inexact backtrace again:\n");
216 ops->warning(data, "Inexact backtrace:\n");
221 if (task && task != current)
222 stack = (unsigned long *)task->thread.esp;
225 if (task == current) {
226 /* Grab ebp right from our regs */
227 asm ("movl %%ebp, %0" : "=r" (ebp) : );
229 /* ebp is the last reg pushed by switch_to */
230 ebp = *(unsigned long *) task->thread.esp;
234 struct thread_info *context;
235 context = (struct thread_info *)
236 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
237 ebp = print_context_stack(context, stack, ebp, ops, data);
238 /* Should be after the line below, but somewhere
239 in early boot context comes out corrupted and we
240 can't reference it -AK */
241 if (ops->stack(data, "IRQ") < 0)
243 stack = (unsigned long*)context->previous_esp;
249 EXPORT_SYMBOL(dump_trace);
252 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
255 print_symbol(msg, symbol);
259 static void print_trace_warning(void *data, char *msg)
261 printk("%s%s\n", (char *)data, msg);
264 static int print_trace_stack(void *data, char *name)
270 * Print one address/symbol entries per line.
272 static void print_trace_address(void *data, unsigned long addr)
274 printk("%s [<%08lx>] ", (char *)data, addr);
275 print_symbol("%s\n", addr);
278 static struct stacktrace_ops print_trace_ops = {
279 .warning = print_trace_warning,
280 .warning_symbol = print_trace_warning_symbol,
281 .stack = print_trace_stack,
282 .address = print_trace_address,
286 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
287 unsigned long * stack, char *log_lvl)
289 dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
290 printk("%s =======================\n", log_lvl);
293 void show_trace(struct task_struct *task, struct pt_regs *regs,
294 unsigned long * stack)
296 show_trace_log_lvl(task, regs, stack, "");
299 static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
300 unsigned long *esp, char *log_lvl)
302 unsigned long *stack;
307 esp = (unsigned long*)task->thread.esp;
309 esp = (unsigned long *)&esp;
313 for(i = 0; i < kstack_depth_to_print; i++) {
314 if (kstack_end(stack))
316 if (i && ((i % 8) == 0))
317 printk("\n%s ", log_lvl);
318 printk("%08lx ", *stack++);
320 printk("\n%sCall Trace:\n", log_lvl);
321 show_trace_log_lvl(task, regs, esp, log_lvl);
324 void show_stack(struct task_struct *task, unsigned long *esp)
327 show_stack_log_lvl(task, NULL, esp, "");
331 * The architecture-independent dump_stack generator
333 void dump_stack(void)
337 show_trace(current, NULL, &stack);
340 EXPORT_SYMBOL(dump_stack);
342 void show_registers(struct pt_regs *regs)
349 esp = (unsigned long) (®s->esp);
351 if (user_mode_vm(regs)) {
354 ss = regs->xss & 0xffff;
357 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
358 "EFLAGS: %08lx (%s %.*s) \n",
359 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
360 print_tainted(), regs->eflags, system_utsname.release,
361 (int)strcspn(system_utsname.version, " "),
362 system_utsname.version);
363 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
364 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
365 regs->eax, regs->ebx, regs->ecx, regs->edx);
366 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
367 regs->esi, regs->edi, regs->ebp, esp);
368 printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
369 regs->xds & 0xffff, regs->xes & 0xffff, ss);
370 printk(KERN_EMERG "Process %.*s (pid: %d[#%u], ti=%p task=%p task.ti=%p)",
371 TASK_COMM_LEN, current->comm, current->pid, current->xid,
372 current_thread_info(), current, current->thread_info);
374 * When in-kernel, we also print out the stack and code at the
375 * time of the fault..
380 printk("\n" KERN_EMERG "Stack: ");
381 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
383 printk(KERN_EMERG "Code: ");
385 eip = (u8 __user *)regs->eip - 43;
386 for (i = 0; i < 64; i++, eip++) {
389 if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
390 printk(" Bad EIP value.");
393 if (eip == (u8 __user *)regs->eip)
394 printk("<%02x> ", c);
402 static void handle_BUG(struct pt_regs *regs)
404 unsigned long eip = regs->eip;
407 if (eip < PAGE_OFFSET)
409 if (__get_user(ud2, (unsigned short __user *)eip))
414 printk(KERN_EMERG "------------[ cut here ]------------\n");
415 #ifdef CONFIG_DEBUG_BUGVERBOSE
421 if (__get_user(line, (unsigned short __user *)(eip + 2)))
423 if (__get_user(file, (char * __user *)(eip + 4)) ||
424 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
425 file = "<bad filename>";
427 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
431 printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
434 /* This is gone through when something in the kernel
435 * has done something bad and is about to be terminated.
437 void die(const char * str, struct pt_regs * regs, long err)
442 int lock_owner_depth;
444 .lock = SPIN_LOCK_UNLOCKED,
446 .lock_owner_depth = 0
448 static int die_counter;
455 if (die.lock_owner != raw_smp_processor_id()) {
457 spin_lock_irqsave(&die.lock, flags);
458 die.lock_owner = smp_processor_id();
459 die.lock_owner_depth = 0;
463 local_save_flags(flags);
465 if (++die.lock_owner_depth < 3) {
471 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
472 #ifdef CONFIG_PREEMPT
473 printk(KERN_EMERG "PREEMPT ");
482 #ifdef CONFIG_DEBUG_PAGEALLOC
485 printk("DEBUG_PAGEALLOC");
491 printk(KERN_ALERT "last sysfs file: %s\n", last_sysfs_file);
493 if (notify_die(DIE_OOPS, str, regs, err,
494 current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) {
495 show_registers(regs);
497 /* Executive summary in case the oops scrolled away */
498 esp = (unsigned long) (®s->esp);
500 if (user_mode(regs)) {
502 ss = regs->xss & 0xffff;
504 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
505 print_symbol("%s", regs->eip);
506 printk(" SS:ESP %04x:%08lx\n", ss, esp);
511 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
515 spin_unlock_irqrestore(&die.lock, flags);
520 if (kexec_should_crash(current))
524 panic("Fatal exception in interrupt");
527 panic("Fatal exception");
533 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
535 if (!user_mode_vm(regs))
539 static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
540 struct pt_regs * regs, long error_code,
543 struct task_struct *tsk = current;
544 tsk->thread.error_code = error_code;
545 tsk->thread.trap_no = trapnr;
547 if (regs->eflags & VM_MASK) {
553 if (!user_mode(regs))
558 force_sig_info(signr, info, tsk);
560 force_sig(signr, tsk);
565 if (!fixup_exception(regs))
566 die(str, regs, error_code);
571 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
572 if (ret) goto trap_signal;
577 #define DO_ERROR(trapnr, signr, str, name) \
578 fastcall void do_##name(struct pt_regs * regs, long error_code) \
580 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
583 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
586 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
587 fastcall void do_##name(struct pt_regs * regs, long error_code) \
590 info.si_signo = signr; \
592 info.si_code = sicode; \
593 info.si_addr = (void __user *)siaddr; \
594 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
597 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
600 #define DO_VM86_ERROR(trapnr, signr, str, name) \
601 fastcall void do_##name(struct pt_regs * regs, long error_code) \
603 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
606 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
609 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
610 fastcall void do_##name(struct pt_regs * regs, long error_code) \
613 info.si_signo = signr; \
615 info.si_code = sicode; \
616 info.si_addr = (void __user *)siaddr; \
617 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
620 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
623 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
624 #ifndef CONFIG_KPROBES
625 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
627 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
628 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
629 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
630 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
631 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
632 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
633 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
634 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
638 * lazy-check for CS validity on exec-shield binaries:
640 * the original non-exec stack patch was written by
641 * Solar Designer <solar at openwall.com>. Thanks!
644 check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
646 struct desc_struct *desc1, *desc2;
647 struct vm_area_struct *vma;
650 if (current->mm == NULL)
654 if (current->mm->context.exec_limit != -1UL) {
656 spin_lock(¤t->mm->page_table_lock);
657 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
658 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
660 spin_unlock(¤t->mm->page_table_lock);
661 if (limit >= TASK_SIZE)
663 current->mm->context.exec_limit = limit;
665 set_user_cs(¤t->mm->context.user_cs, limit);
667 desc1 = ¤t->mm->context.user_cs;
668 desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
670 if (desc1->a != desc2->a || desc1->b != desc2->b) {
672 * The CS was not in sync - reload it and retry the
673 * instruction. If the instruction still faults then
674 * we won't hit this branch next time around.
676 if (print_fatal_signals >= 2) {
677 printk("#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
678 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx, CPU_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
680 load_user_cs_desc(cpu, current->mm);
688 * The fixup code for errors in iret jumps to here (iret_exc). It loses
689 * the original trap number and error code. The bogus trap 32 and error
690 * code 0 are what the vanilla kernel delivers via:
691 * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
693 * In case of a general protection fault in the iret instruction, we
694 * need to check for a lazy CS update for exec-shield.
696 fastcall void do_iret_error(struct pt_regs *regs, long error_code)
698 int ok = check_lazy_exec_limit(get_cpu(), regs, error_code);
700 if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
701 error_code, 32, SIGSEGV) != NOTIFY_STOP) {
703 info.si_signo = SIGSEGV;
705 info.si_code = ILL_BADSTK;
707 do_trap(32, SIGSEGV, "iret exception", 0, regs, error_code,
712 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
718 current->thread.error_code = error_code;
719 current->thread.trap_no = 13;
721 if (regs->eflags & VM_MASK)
724 if (!user_mode(regs))
727 ok = check_lazy_exec_limit(cpu, regs, error_code);
734 if (print_fatal_signals) {
735 printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
736 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b);
739 current->thread.error_code = error_code;
740 current->thread.trap_no = 13;
741 force_sig(SIGSEGV, current);
747 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
752 if (!fixup_exception(regs)) {
753 if (notify_die(DIE_GPF, "general protection fault", regs,
754 error_code, 13, SIGSEGV) == NOTIFY_STOP)
756 die("general protection fault", regs, error_code);
760 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
762 printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
764 printk(KERN_EMERG "You probably have a hardware problem with your RAM "
767 /* Clear and disable the memory parity error line. */
768 clear_mem_error(reason);
771 static void io_check_error(unsigned char reason, struct pt_regs * regs)
773 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
774 show_registers(regs);
776 /* Re-enable the IOCK line, wait for a few seconds */
777 clear_io_check_error(reason);
780 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
783 /* Might actually be able to figure out what the guilty party
790 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
791 reason, smp_processor_id());
792 printk("Dazed and confused, but trying to continue\n");
793 printk("Do you have a strange power saving mode enabled?\n");
796 static DEFINE_SPINLOCK(nmi_print_lock);
798 void die_nmi (struct pt_regs *regs, const char *msg)
800 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
804 spin_lock(&nmi_print_lock);
806 * We are in trouble anyway, lets at least try
807 * to get a message out.
810 printk(KERN_EMERG "%s", msg);
811 printk(" on CPU%d, eip %08lx, registers:\n",
812 smp_processor_id(), regs->eip);
813 show_registers(regs);
814 printk(KERN_EMERG "console shuts up ...\n");
816 spin_unlock(&nmi_print_lock);
819 /* If we are in kernel we are probably nested up pretty bad
820 * and might aswell get out now while we still can.
822 if (!user_mode_vm(regs)) {
823 current->thread.trap_no = 2;
830 static void default_do_nmi(struct pt_regs * regs)
832 unsigned char reason = 0;
834 /* Only the BSP gets external NMIs from the system. */
835 if (!smp_processor_id())
836 reason = get_nmi_reason();
838 if (!(reason & 0xc0)) {
839 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
842 #ifdef CONFIG_X86_LOCAL_APIC
844 * Ok, so this is none of the documented NMI sources,
845 * so it must be the NMI watchdog.
848 nmi_watchdog_tick(regs);
852 unknown_nmi_error(reason, regs);
855 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
858 mem_parity_error(reason, regs);
860 io_check_error(reason, regs);
862 * Reassert NMI in case it became active meanwhile
863 * as it's edge-triggered.
868 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
873 static nmi_callback_t nmi_callback = dummy_nmi_callback;
875 fastcall void do_nmi(struct pt_regs * regs, long error_code)
881 cpu = smp_processor_id();
885 if (!rcu_dereference(nmi_callback)(regs, cpu))
886 default_do_nmi(regs);
891 void set_nmi_callback(nmi_callback_t callback)
894 rcu_assign_pointer(nmi_callback, callback);
896 EXPORT_SYMBOL_GPL(set_nmi_callback);
898 void unset_nmi_callback(void)
900 nmi_callback = dummy_nmi_callback;
902 EXPORT_SYMBOL_GPL(unset_nmi_callback);
904 #ifdef CONFIG_KPROBES
905 fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
907 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
910 /* This is an interrupt gate, because kprobes wants interrupts
911 disabled. Normal trap handlers don't. */
912 restore_interrupts(regs);
913 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
918 * Our handling of the processor debug registers is non-trivial.
919 * We do not clear them on entry and exit from the kernel. Therefore
920 * it is possible to get a watchpoint trap here from inside the kernel.
921 * However, the code in ./ptrace.c has ensured that the user can
922 * only set watchpoints on userspace addresses. Therefore the in-kernel
923 * watchpoint trap can only occur in code which is reading/writing
924 * from user space. Such code must not hold kernel locks (since it
925 * can equally take a page fault), therefore it is safe to call
926 * force_sig_info even though that claims and releases locks.
928 * Code in ./signal.c ensures that the debug control register
929 * is restored before we deliver any signal, and therefore that
930 * user code runs with the correct debug control register even though
933 * Being careful here means that we don't have to be as careful in a
934 * lot of more complicated places (task switching can be a bit lazy
935 * about restoring all the debug state, and ptrace doesn't have to
936 * find every occurrence of the TF bit that could be saved away even
939 fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
941 unsigned int condition;
942 struct task_struct *tsk = current;
944 get_debugreg(condition, 6);
946 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
947 SIGTRAP) == NOTIFY_STOP)
949 /* It's safe to allow irq's after DR6 has been saved */
950 if (regs->eflags & X86_EFLAGS_IF)
953 /* Mask out spurious debug traps due to lazy DR7 setting */
954 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
955 if (!tsk->thread.debugreg[7])
959 if (regs->eflags & VM_MASK)
962 /* Save debug status register where ptrace can see it */
963 tsk->thread.debugreg[6] = condition;
966 * Single-stepping through TF: make sure we ignore any events in
967 * kernel space (but re-enable TF when returning to user mode).
969 if (condition & DR_STEP) {
971 * We already checked v86 mode above, so we can
972 * check for kernel mode by just checking the CPL
975 if (!user_mode(regs))
976 goto clear_TF_reenable;
979 /* Ok, finally something we can handle */
980 send_sigtrap(tsk, regs, error_code);
982 /* Disable additional traps. They'll be re-enabled when
983 * the signal is delivered.
990 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
994 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
995 regs->eflags &= ~TF_MASK;
1000 * Note that we play around with the 'TS' bit in an attempt to get
1001 * the correct behaviour even in the presence of the asynchronous
1004 void math_error(void __user *eip)
1006 struct task_struct * task;
1008 unsigned short cwd, swd;
1011 * Save the info for the exception handler and clear the error.
1014 save_init_fpu(task);
1015 task->thread.trap_no = 16;
1016 task->thread.error_code = 0;
1017 info.si_signo = SIGFPE;
1019 info.si_code = __SI_FAULT;
1022 * (~cwd & swd) will mask out exceptions that are not set to unmasked
1023 * status. 0x3f is the exception bits in these regs, 0x200 is the
1024 * C1 reg you need in case of a stack fault, 0x040 is the stack
1025 * fault bit. We should only be taking one exception at a time,
1026 * so if this combination doesn't produce any single exception,
1027 * then we have a bad program that isn't syncronizing its FPU usage
1028 * and it will suffer the consequences since we won't be able to
1029 * fully reproduce the context of the exception
1031 cwd = get_fpu_cwd(task);
1032 swd = get_fpu_swd(task);
1033 switch (swd & ~cwd & 0x3f) {
1034 case 0x000: /* No unmasked exception */
1036 default: /* Multiple exceptions */
1038 case 0x001: /* Invalid Op */
1040 * swd & 0x240 == 0x040: Stack Underflow
1041 * swd & 0x240 == 0x240: Stack Overflow
1042 * User must clear the SF bit (0x40) if set
1044 info.si_code = FPE_FLTINV;
1046 case 0x002: /* Denormalize */
1047 case 0x010: /* Underflow */
1048 info.si_code = FPE_FLTUND;
1050 case 0x004: /* Zero Divide */
1051 info.si_code = FPE_FLTDIV;
1053 case 0x008: /* Overflow */
1054 info.si_code = FPE_FLTOVF;
1056 case 0x020: /* Precision */
1057 info.si_code = FPE_FLTRES;
1060 force_sig_info(SIGFPE, &info, task);
1063 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
1066 math_error((void __user *)regs->eip);
1069 static void simd_math_error(void __user *eip)
1071 struct task_struct * task;
1073 unsigned short mxcsr;
1076 * Save the info for the exception handler and clear the error.
1079 save_init_fpu(task);
1080 task->thread.trap_no = 19;
1081 task->thread.error_code = 0;
1082 info.si_signo = SIGFPE;
1084 info.si_code = __SI_FAULT;
1087 * The SIMD FPU exceptions are handled a little differently, as there
1088 * is only a single status/control register. Thus, to determine which
1089 * unmasked exception was caught we must mask the exception mask bits
1090 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1092 mxcsr = get_fpu_mxcsr(task);
1093 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1097 case 0x001: /* Invalid Op */
1098 info.si_code = FPE_FLTINV;
1100 case 0x002: /* Denormalize */
1101 case 0x010: /* Underflow */
1102 info.si_code = FPE_FLTUND;
1104 case 0x004: /* Zero Divide */
1105 info.si_code = FPE_FLTDIV;
1107 case 0x008: /* Overflow */
1108 info.si_code = FPE_FLTOVF;
1110 case 0x020: /* Precision */
1111 info.si_code = FPE_FLTRES;
1114 force_sig_info(SIGFPE, &info, task);
1117 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
1121 /* Handle SIMD FPU exceptions on PIII+ processors. */
1123 simd_math_error((void __user *)regs->eip);
1126 * Handle strange cache flush from user space exception
1127 * in all other cases. This is undocumented behaviour.
1129 if (regs->eflags & VM_MASK) {
1130 handle_vm86_fault((struct kernel_vm86_regs *)regs,
1134 current->thread.trap_no = 19;
1135 current->thread.error_code = error_code;
1136 die_if_kernel("cache flush denied", regs, error_code);
1137 force_sig(SIGSEGV, current);
1142 fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
1146 /* No need to warn about this any longer. */
1147 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1151 fastcall void setup_x86_bogus_stack(unsigned char * stk)
1153 unsigned long *switch16_ptr, *switch32_ptr;
1154 struct pt_regs *regs;
1155 unsigned long stack_top, stack_bot;
1156 unsigned short iret_frame16_off;
1157 int cpu = smp_processor_id();
1158 /* reserve the space on 32bit stack for the magic switch16 pointer */
1159 memmove(stk, stk + 8, sizeof(struct pt_regs));
1160 switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
1161 regs = (struct pt_regs *)stk;
1162 /* now the switch32 on 16bit stack */
1163 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
1164 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
1165 switch32_ptr = (unsigned long *)(stack_top - 8);
1166 iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
1167 /* copy iret frame on 16bit stack */
1168 memcpy((void *)(stack_bot + iret_frame16_off), ®s->eip, 20);
1169 /* fill in the switch pointers */
1170 switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
1171 switch16_ptr[1] = __ESPFIX_SS;
1172 switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
1173 8 - CPU_16BIT_STACK_SIZE;
1174 switch32_ptr[1] = __KERNEL_DS;
1177 fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
1179 unsigned long *switch32_ptr;
1180 unsigned char *stack16, *stack32;
1181 unsigned long stack_top, stack_bot;
1183 int cpu = smp_processor_id();
1184 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
1185 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
1186 switch32_ptr = (unsigned long *)(stack_top - 8);
1187 /* copy the data from 16bit stack to 32bit stack */
1188 len = CPU_16BIT_STACK_SIZE - 8 - sp;
1189 stack16 = (unsigned char *)(stack_bot + sp);
1190 stack32 = (unsigned char *)
1191 (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
1192 memcpy(stack32, stack16, len);
1198 * 'math_state_restore()' saves the current math information in the
1199 * old math state array, and gets the new ones from the current task
1201 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1202 * Don't touch unless you *really* know how it works.
1204 * Must be called with kernel preemption disabled (in this case,
1205 * local interrupts are disabled at the call-site in entry.S).
1207 asmlinkage void math_state_restore(struct pt_regs regs)
1209 struct thread_info *thread = current_thread_info();
1210 struct task_struct *tsk = thread->task;
1212 /* NB. 'clts' is done for us by Xen during virtual trap. */
1213 if (!tsk_used_math(tsk))
1216 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1219 #ifndef CONFIG_MATH_EMULATION
1221 asmlinkage void math_emulate(long arg)
1223 printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
1224 printk(KERN_EMERG "killing %s.\n",current->comm);
1225 force_sig(SIGFPE,current);
1229 #endif /* CONFIG_MATH_EMULATION */
1231 #ifdef CONFIG_X86_F00F_BUG
1232 void __init trap_init_f00f_bug(void)
1234 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
1237 * Update the IDT descriptor and reload the IDT so that
1238 * it uses the read-only mapped virtual address.
1240 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
1241 load_idt(&idt_descr);
1247 * NB. All these are "trap gates" (i.e. events_mask isn't set) except
1248 * for those that specify <dpl>|4 in the second field.
1250 static trap_info_t trap_table[] = {
1251 { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
1252 { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
1253 { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
1254 { 4, 3, __KERNEL_CS, (unsigned long)overflow },
1255 { 5, 0, __KERNEL_CS, (unsigned long)bounds },
1256 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
1257 { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
1258 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
1259 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
1260 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
1261 { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
1262 { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
1263 { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
1264 { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
1265 { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
1266 { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
1267 #ifdef CONFIG_X86_MCE
1268 { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
1270 { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
1271 { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
1275 void __init trap_init(void)
1277 HYPERVISOR_set_trap_table(trap_table);
1281 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1282 * Generates a compile-time "error: zero width for bit-field" if
1283 * the alignment is wrong.
1285 struct fxsrAlignAssert {
1286 int _:!(offsetof(struct task_struct,
1287 thread.i387.fxsave) & 15);
1290 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1291 set_in_cr4(X86_CR4_OSFXSR);
1295 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1297 set_in_cr4(X86_CR4_OSXMMEXCPT);
1302 * Should be a barrier for any external CPU state.
1307 void smp_trap_init(trap_info_t *trap_ctxt)
1309 trap_info_t *t = trap_table;
1311 for (t = trap_table; t->address; t++) {
1312 trap_ctxt[t->vector].flags = t->flags;
1313 trap_ctxt[t->vector].cs = t->cs;
1314 trap_ctxt[t->vector].address = t->address;
1318 static int __init kstack_setup(char *s)
1320 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1323 __setup("kstack=", kstack_setup);
1325 #ifdef CONFIG_STACK_UNWIND
1326 static int __init call_trace_setup(char *s)
1328 if (strcmp(s, "old") == 0)
1330 else if (strcmp(s, "both") == 0)
1332 else if (strcmp(s, "newfallback") == 0)
1334 else if (strcmp(s, "new") == 2)
1338 __setup("call_trace=", call_trace_setup);