2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
26 #include <linux/kallsyms.h>
27 #include <linux/ptrace.h>
28 #include <linux/utsname.h>
29 #include <linux/kprobes.h>
30 #include <linux/kexec.h>
33 #include <linux/ioport.h>
34 #include <linux/eisa.h>
38 #include <linux/mca.h>
41 #include <asm/processor.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
45 #include <asm/atomic.h>
46 #include <asm/debugreg.h>
52 #include <asm/arch_hooks.h>
53 #include <asm/kdebug.h>
55 #include <linux/module.h>
57 #include "mach_traps.h"
59 asmlinkage int system_call(void);
61 struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
64 /* Do we ignore FPU interrupts ? */
65 char ignore_fpu_irq = 0;
67 #ifndef CONFIG_X86_NO_IDT
69 * The IDT has to be page-aligned to simplify the Pentium
70 * F0 0F bug workaround.. We have a special link segment
73 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
76 asmlinkage void divide_error(void);
77 asmlinkage void debug(void);
78 asmlinkage void nmi(void);
79 asmlinkage void int3(void);
80 asmlinkage void overflow(void);
81 asmlinkage void bounds(void);
82 asmlinkage void invalid_op(void);
83 asmlinkage void device_not_available(void);
84 asmlinkage void coprocessor_segment_overrun(void);
85 asmlinkage void invalid_TSS(void);
86 asmlinkage void segment_not_present(void);
87 asmlinkage void stack_segment(void);
88 asmlinkage void general_protection(void);
89 asmlinkage void page_fault(void);
90 asmlinkage void coprocessor_error(void);
91 asmlinkage void simd_coprocessor_error(void);
92 asmlinkage void alignment_check(void);
94 asmlinkage void spurious_interrupt_bug(void);
96 asmlinkage void fixup_4gb_segment(void);
98 asmlinkage void machine_check(void);
100 static int kstack_depth_to_print = 24;
101 ATOMIC_NOTIFIER_HEAD(i386die_chain);
103 int register_die_notifier(struct notifier_block *nb)
106 return atomic_notifier_chain_register(&i386die_chain, nb);
108 EXPORT_SYMBOL(register_die_notifier);
110 int unregister_die_notifier(struct notifier_block *nb)
112 return atomic_notifier_chain_unregister(&i386die_chain, nb);
114 EXPORT_SYMBOL(unregister_die_notifier);
116 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
118 return p > (void *)tinfo &&
119 p < (void *)tinfo + THREAD_SIZE - 3;
123 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line.
125 static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
131 #if CONFIG_STACK_BACKTRACE_COLS == 1
132 printk(" [<%08lx>] ", addr);
134 printk(" <%08lx> ", addr);
136 print_symbol("%s", addr);
138 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
147 static inline unsigned long print_context_stack(struct thread_info *tinfo,
148 unsigned long *stack, unsigned long ebp,
152 int printed = 0; /* nr of entries already printed on current line */
154 #ifdef CONFIG_FRAME_POINTER
155 while (valid_stack_ptr(tinfo, (void *)ebp)) {
156 addr = *(unsigned long *)(ebp + 4);
157 printed = print_addr_and_symbol(addr, log_lvl, printed);
158 ebp = *(unsigned long *)ebp;
161 while (valid_stack_ptr(tinfo, stack)) {
163 if (__kernel_text_address(addr))
164 printed = print_addr_and_symbol(addr, log_lvl, printed);
173 static void show_trace_log_lvl(struct task_struct *task,
174 unsigned long *stack, char *log_lvl)
181 if (task == current) {
182 /* Grab ebp right from our regs */
183 asm ("movl %%ebp, %0" : "=r" (ebp) : );
185 /* ebp is the last reg pushed by switch_to */
186 ebp = *(unsigned long *) task->thread.esp;
190 struct thread_info *context;
191 context = (struct thread_info *)
192 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
193 ebp = print_context_stack(context, stack, ebp, log_lvl);
194 stack = (unsigned long*)context->previous_esp;
197 printk("%s =======================\n", log_lvl);
201 void show_trace(struct task_struct *task, unsigned long * stack)
203 show_trace_log_lvl(task, stack, "");
206 static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
209 unsigned long *stack;
214 esp = (unsigned long*)task->thread.esp;
216 esp = (unsigned long *)&esp;
221 for(i = 0; i < kstack_depth_to_print; i++) {
222 if (kstack_end(stack))
224 if (i && ((i % 8) == 0))
225 printk("\n%s ", log_lvl);
226 printk("%08lx ", *stack++);
228 printk("\n%sCall Trace:\n", log_lvl);
229 show_trace_log_lvl(task, esp, log_lvl);
232 void show_stack(struct task_struct *task, unsigned long *esp)
235 show_stack_log_lvl(task, esp, "");
239 * The architecture-independent dump_stack generator
241 void dump_stack(void)
245 show_trace(current, &stack);
248 EXPORT_SYMBOL(dump_stack);
250 void show_registers(struct pt_regs *regs)
257 esp = (unsigned long) (®s->esp);
259 if (user_mode_vm(regs)) {
262 ss = regs->xss & 0xffff;
265 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
266 "EFLAGS: %08lx (%s %.*s) \n",
267 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
268 print_tainted(), regs->eflags, system_utsname.release,
269 (int)strcspn(system_utsname.version, " "),
270 system_utsname.version);
271 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
272 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
273 regs->eax, regs->ebx, regs->ecx, regs->edx);
274 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
275 regs->esi, regs->edi, regs->ebp, esp);
276 printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
277 regs->xds & 0xffff, regs->xes & 0xffff, ss);
278 printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
279 current->comm, current->pid, current_thread_info(), current);
281 * When in-kernel, we also print out the stack and code at the
282 * time of the fault..
287 printk("\n" KERN_EMERG "Stack: ");
288 show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG);
290 printk(KERN_EMERG "Code: ");
292 eip = (u8 __user *)regs->eip - 43;
293 for (i = 0; i < 64; i++, eip++) {
296 if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
297 printk(" Bad EIP value.");
300 if (eip == (u8 __user *)regs->eip)
301 printk("<%02x> ", c);
309 static void handle_BUG(struct pt_regs *regs)
319 if (eip < PAGE_OFFSET)
321 if (__get_user(ud2, (unsigned short __user *)eip))
325 if (__get_user(line, (unsigned short __user *)(eip + 2)))
327 if (__get_user(file, (char * __user *)(eip + 4)) ||
328 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
329 file = "<bad filename>";
331 printk(KERN_EMERG "------------[ cut here ]------------\n");
332 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
337 /* Here we know it was a BUG but file-n-line is unavailable */
339 printk(KERN_EMERG "Kernel BUG\n");
342 /* This is gone through when something in the kernel
343 * has done something bad and is about to be terminated.
345 void die(const char * str, struct pt_regs * regs, long err)
350 int lock_owner_depth;
352 .lock = SPIN_LOCK_UNLOCKED,
354 .lock_owner_depth = 0
356 static int die_counter;
361 if (die.lock_owner != raw_smp_processor_id()) {
363 spin_lock_irqsave(&die.lock, flags);
364 die.lock_owner = smp_processor_id();
365 die.lock_owner_depth = 0;
369 local_save_flags(flags);
371 if (++die.lock_owner_depth < 3) {
377 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
378 #ifdef CONFIG_PREEMPT
379 printk(KERN_EMERG "PREEMPT ");
388 #ifdef CONFIG_DEBUG_PAGEALLOC
391 printk("DEBUG_PAGEALLOC");
396 if (notify_die(DIE_OOPS, str, regs, err,
397 current->thread.trap_no, SIGSEGV) !=
399 show_registers(regs);
400 /* Executive summary in case the oops scrolled away */
401 esp = (unsigned long) (®s->esp);
403 if (user_mode(regs)) {
405 ss = regs->xss & 0xffff;
407 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
408 print_symbol("%s", regs->eip);
409 printk(" SS:ESP %04x:%08lx\n", ss, esp);
414 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
418 spin_unlock_irqrestore(&die.lock, flags);
423 if (kexec_should_crash(current))
427 panic("Fatal exception in interrupt");
430 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
432 panic("Fatal exception");
438 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
440 if (!user_mode_vm(regs))
444 static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
445 struct pt_regs * regs, long error_code,
448 struct task_struct *tsk = current;
449 tsk->thread.error_code = error_code;
450 tsk->thread.trap_no = trapnr;
452 if (regs->eflags & VM_MASK) {
458 if (!user_mode(regs))
463 force_sig_info(signr, info, tsk);
465 force_sig(signr, tsk);
470 if (!fixup_exception(regs))
471 die(str, regs, error_code);
476 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
477 if (ret) goto trap_signal;
482 #define DO_ERROR(trapnr, signr, str, name) \
483 fastcall void do_##name(struct pt_regs * regs, long error_code) \
485 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
488 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
491 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
492 fastcall void do_##name(struct pt_regs * regs, long error_code) \
495 info.si_signo = signr; \
497 info.si_code = sicode; \
498 info.si_addr = (void __user *)siaddr; \
499 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
502 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
505 #define DO_VM86_ERROR(trapnr, signr, str, name) \
506 fastcall void do_##name(struct pt_regs * regs, long error_code) \
508 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
511 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
514 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
515 fastcall void do_##name(struct pt_regs * regs, long error_code) \
518 info.si_signo = signr; \
520 info.si_code = sicode; \
521 info.si_addr = (void __user *)siaddr; \
522 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
525 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
528 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
529 #ifndef CONFIG_KPROBES
530 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
532 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
533 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
534 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
535 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
536 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
537 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
538 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
539 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
543 * lazy-check for CS validity on exec-shield binaries:
545 * the original non-exec stack patch was written by
546 * Solar Designer <solar at openwall.com>. Thanks!
549 check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
551 struct desc_struct *desc1, *desc2;
552 struct vm_area_struct *vma;
555 if (current->mm == NULL)
559 if (current->mm->context.exec_limit != -1UL) {
561 spin_lock(¤t->mm->page_table_lock);
562 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
563 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
565 spin_unlock(¤t->mm->page_table_lock);
566 if (limit >= TASK_SIZE)
568 current->mm->context.exec_limit = limit;
570 set_user_cs(¤t->mm->context.user_cs, limit);
572 desc1 = ¤t->mm->context.user_cs;
573 desc2 = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
575 if (desc1->a != desc2->a || desc1->b != desc2->b) {
577 * The CS was not in sync - reload it and retry the
578 * instruction. If the instruction still faults then
579 * we won't hit this branch next time around.
581 if (print_fatal_signals >= 2) {
582 printk("#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
583 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx, CPU_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
585 load_user_cs_desc(cpu, current->mm);
593 * The fixup code for errors in iret jumps to here (iret_exc). It loses
594 * the original trap number and error code. The bogus trap 32 and error
595 * code 0 are what the vanilla kernel delivers via:
596 * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
598 * In case of a general protection fault in the iret instruction, we
599 * need to check for a lazy CS update for exec-shield.
601 fastcall void do_iret_error(struct pt_regs *regs, long error_code)
603 int ok = check_lazy_exec_limit(get_cpu(), regs, error_code);
605 if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
606 error_code, 32, SIGSEGV) != NOTIFY_STOP) {
608 info.si_signo = SIGSEGV;
610 info.si_code = ILL_BADSTK;
612 do_trap(32, SIGSEGV, "iret exception", 0, regs, error_code,
617 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
623 ok = check_lazy_exec_limit(cpu, regs, error_code);
629 if (print_fatal_signals) {
630 printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
631 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b);
634 if (regs->eflags & VM_MASK)
637 if (!user_mode(regs))
640 current->thread.error_code = error_code;
641 current->thread.trap_no = 13;
642 force_sig(SIGSEGV, current);
647 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
651 if (!fixup_exception(regs)) {
652 if (notify_die(DIE_GPF, "general protection fault", regs,
653 error_code, 13, SIGSEGV) == NOTIFY_STOP)
655 die("general protection fault", regs, error_code);
659 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
661 printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
663 printk(KERN_EMERG "You probably have a hardware problem with your RAM "
666 /* Clear and disable the memory parity error line. */
667 clear_mem_error(reason);
670 static void io_check_error(unsigned char reason, struct pt_regs * regs)
672 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
673 show_registers(regs);
675 /* Re-enable the IOCK line, wait for a few seconds */
676 clear_io_check_error(reason);
679 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
682 /* Might actually be able to figure out what the guilty party
689 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
690 reason, smp_processor_id());
691 printk("Dazed and confused, but trying to continue\n");
692 printk("Do you have a strange power saving mode enabled?\n");
695 static DEFINE_SPINLOCK(nmi_print_lock);
697 void die_nmi (struct pt_regs *regs, const char *msg)
699 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
703 spin_lock(&nmi_print_lock);
705 * We are in trouble anyway, lets at least try
706 * to get a message out.
709 printk(KERN_EMERG "%s", msg);
710 printk(" on CPU%d, eip %08lx, registers:\n",
711 smp_processor_id(), regs->eip);
712 show_registers(regs);
713 printk(KERN_EMERG "console shuts up ...\n");
715 spin_unlock(&nmi_print_lock);
718 /* If we are in kernel we are probably nested up pretty bad
719 * and might aswell get out now while we still can.
721 if (!user_mode_vm(regs)) {
722 current->thread.trap_no = 2;
729 static void default_do_nmi(struct pt_regs * regs)
731 unsigned char reason = 0;
733 /* Only the BSP gets external NMIs from the system. */
734 if (!smp_processor_id())
735 reason = get_nmi_reason();
737 if (!(reason & 0xc0)) {
738 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
741 #ifdef CONFIG_X86_LOCAL_APIC
743 * Ok, so this is none of the documented NMI sources,
744 * so it must be the NMI watchdog.
747 nmi_watchdog_tick(regs);
751 unknown_nmi_error(reason, regs);
754 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
757 mem_parity_error(reason, regs);
759 io_check_error(reason, regs);
761 * Reassert NMI in case it became active meanwhile
762 * as it's edge-triggered.
767 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
772 static nmi_callback_t nmi_callback = dummy_nmi_callback;
774 fastcall void do_nmi(struct pt_regs * regs, long error_code)
780 cpu = smp_processor_id();
784 if (!rcu_dereference(nmi_callback)(regs, cpu))
785 default_do_nmi(regs);
790 void set_nmi_callback(nmi_callback_t callback)
793 rcu_assign_pointer(nmi_callback, callback);
795 EXPORT_SYMBOL_GPL(set_nmi_callback);
797 void unset_nmi_callback(void)
799 nmi_callback = dummy_nmi_callback;
801 EXPORT_SYMBOL_GPL(unset_nmi_callback);
803 #ifdef CONFIG_KPROBES
804 fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
806 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
809 /* This is an interrupt gate, because kprobes wants interrupts
810 disabled. Normal trap handlers don't. */
811 restore_interrupts(regs);
812 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
817 * Our handling of the processor debug registers is non-trivial.
818 * We do not clear them on entry and exit from the kernel. Therefore
819 * it is possible to get a watchpoint trap here from inside the kernel.
820 * However, the code in ./ptrace.c has ensured that the user can
821 * only set watchpoints on userspace addresses. Therefore the in-kernel
822 * watchpoint trap can only occur in code which is reading/writing
823 * from user space. Such code must not hold kernel locks (since it
824 * can equally take a page fault), therefore it is safe to call
825 * force_sig_info even though that claims and releases locks.
827 * Code in ./signal.c ensures that the debug control register
828 * is restored before we deliver any signal, and therefore that
829 * user code runs with the correct debug control register even though
832 * Being careful here means that we don't have to be as careful in a
833 * lot of more complicated places (task switching can be a bit lazy
834 * about restoring all the debug state, and ptrace doesn't have to
835 * find every occurrence of the TF bit that could be saved away even
838 fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
840 unsigned int condition;
841 struct task_struct *tsk = current;
843 get_debugreg(condition, 6);
845 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
846 SIGTRAP) == NOTIFY_STOP)
848 /* It's safe to allow irq's after DR6 has been saved */
849 if (regs->eflags & X86_EFLAGS_IF)
852 /* Mask out spurious debug traps due to lazy DR7 setting */
853 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
854 if (!tsk->thread.debugreg[7])
858 if (regs->eflags & VM_MASK)
861 /* Save debug status register where ptrace can see it */
862 tsk->thread.debugreg[6] = condition;
865 * Single-stepping through TF: make sure we ignore any events in
866 * kernel space (but re-enable TF when returning to user mode).
868 if (condition & DR_STEP) {
870 * We already checked v86 mode above, so we can
871 * check for kernel mode by just checking the CPL
874 if (!user_mode(regs))
875 goto clear_TF_reenable;
878 /* Ok, finally something we can handle */
879 send_sigtrap(tsk, regs, error_code);
881 /* Disable additional traps. They'll be re-enabled when
882 * the signal is delivered.
889 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
893 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
894 regs->eflags &= ~TF_MASK;
899 * Note that we play around with the 'TS' bit in an attempt to get
900 * the correct behaviour even in the presence of the asynchronous
903 void math_error(void __user *eip)
905 struct task_struct * task;
907 unsigned short cwd, swd;
910 * Save the info for the exception handler and clear the error.
914 task->thread.trap_no = 16;
915 task->thread.error_code = 0;
916 info.si_signo = SIGFPE;
918 info.si_code = __SI_FAULT;
921 * (~cwd & swd) will mask out exceptions that are not set to unmasked
922 * status. 0x3f is the exception bits in these regs, 0x200 is the
923 * C1 reg you need in case of a stack fault, 0x040 is the stack
924 * fault bit. We should only be taking one exception at a time,
925 * so if this combination doesn't produce any single exception,
926 * then we have a bad program that isn't syncronizing its FPU usage
927 * and it will suffer the consequences since we won't be able to
928 * fully reproduce the context of the exception
930 cwd = get_fpu_cwd(task);
931 swd = get_fpu_swd(task);
932 switch (swd & ~cwd & 0x3f) {
933 case 0x000: /* No unmasked exception */
935 default: /* Multiple exceptions */
937 case 0x001: /* Invalid Op */
939 * swd & 0x240 == 0x040: Stack Underflow
940 * swd & 0x240 == 0x240: Stack Overflow
941 * User must clear the SF bit (0x40) if set
943 info.si_code = FPE_FLTINV;
945 case 0x002: /* Denormalize */
946 case 0x010: /* Underflow */
947 info.si_code = FPE_FLTUND;
949 case 0x004: /* Zero Divide */
950 info.si_code = FPE_FLTDIV;
952 case 0x008: /* Overflow */
953 info.si_code = FPE_FLTOVF;
955 case 0x020: /* Precision */
956 info.si_code = FPE_FLTRES;
959 force_sig_info(SIGFPE, &info, task);
962 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
965 math_error((void __user *)regs->eip);
968 static void simd_math_error(void __user *eip)
970 struct task_struct * task;
972 unsigned short mxcsr;
975 * Save the info for the exception handler and clear the error.
979 task->thread.trap_no = 19;
980 task->thread.error_code = 0;
981 info.si_signo = SIGFPE;
983 info.si_code = __SI_FAULT;
986 * The SIMD FPU exceptions are handled a little differently, as there
987 * is only a single status/control register. Thus, to determine which
988 * unmasked exception was caught we must mask the exception mask bits
989 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
991 mxcsr = get_fpu_mxcsr(task);
992 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
996 case 0x001: /* Invalid Op */
997 info.si_code = FPE_FLTINV;
999 case 0x002: /* Denormalize */
1000 case 0x010: /* Underflow */
1001 info.si_code = FPE_FLTUND;
1003 case 0x004: /* Zero Divide */
1004 info.si_code = FPE_FLTDIV;
1006 case 0x008: /* Overflow */
1007 info.si_code = FPE_FLTOVF;
1009 case 0x020: /* Precision */
1010 info.si_code = FPE_FLTRES;
1013 force_sig_info(SIGFPE, &info, task);
1016 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
1020 /* Handle SIMD FPU exceptions on PIII+ processors. */
1022 simd_math_error((void __user *)regs->eip);
1025 * Handle strange cache flush from user space exception
1026 * in all other cases. This is undocumented behaviour.
1028 if (regs->eflags & VM_MASK) {
1029 handle_vm86_fault((struct kernel_vm86_regs *)regs,
1033 current->thread.trap_no = 19;
1034 current->thread.error_code = error_code;
1035 die_if_kernel("cache flush denied", regs, error_code);
1036 force_sig(SIGSEGV, current);
1041 fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
1045 /* No need to warn about this any longer. */
1046 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1050 fastcall void setup_x86_bogus_stack(unsigned char * stk)
1052 unsigned long *switch16_ptr, *switch32_ptr;
1053 struct pt_regs *regs;
1054 unsigned long stack_top, stack_bot;
1055 unsigned short iret_frame16_off;
1056 int cpu = smp_processor_id();
1057 /* reserve the space on 32bit stack for the magic switch16 pointer */
1058 memmove(stk, stk + 8, sizeof(struct pt_regs));
1059 switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
1060 regs = (struct pt_regs *)stk;
1061 /* now the switch32 on 16bit stack */
1062 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
1063 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
1064 switch32_ptr = (unsigned long *)(stack_top - 8);
1065 iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
1066 /* copy iret frame on 16bit stack */
1067 memcpy((void *)(stack_bot + iret_frame16_off), ®s->eip, 20);
1068 /* fill in the switch pointers */
1069 switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
1070 switch16_ptr[1] = __ESPFIX_SS;
1071 switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
1072 8 - CPU_16BIT_STACK_SIZE;
1073 switch32_ptr[1] = __KERNEL_DS;
1076 fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
1078 unsigned long *switch32_ptr;
1079 unsigned char *stack16, *stack32;
1080 unsigned long stack_top, stack_bot;
1082 int cpu = smp_processor_id();
1083 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
1084 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
1085 switch32_ptr = (unsigned long *)(stack_top - 8);
1086 /* copy the data from 16bit stack to 32bit stack */
1087 len = CPU_16BIT_STACK_SIZE - 8 - sp;
1088 stack16 = (unsigned char *)(stack_bot + sp);
1089 stack32 = (unsigned char *)
1090 (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
1091 memcpy(stack32, stack16, len);
1097 * 'math_state_restore()' saves the current math information in the
1098 * old math state array, and gets the new ones from the current task
1100 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1101 * Don't touch unless you *really* know how it works.
1103 * Must be called with kernel preemption disabled (in this case,
1104 * local interrupts are disabled at the call-site in entry.S).
1106 asmlinkage void math_state_restore(struct pt_regs regs)
1108 struct thread_info *thread = current_thread_info();
1109 struct task_struct *tsk = thread->task;
1111 /* NB. 'clts' is done for us by Xen during virtual trap. */
1112 if (!tsk_used_math(tsk))
1115 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1118 #ifndef CONFIG_MATH_EMULATION
1120 asmlinkage void math_emulate(long arg)
1122 printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
1123 printk(KERN_EMERG "killing %s.\n",current->comm);
1124 force_sig(SIGFPE,current);
1128 #endif /* CONFIG_MATH_EMULATION */
1130 #ifdef CONFIG_X86_F00F_BUG
1131 void __init trap_init_f00f_bug(void)
1133 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
1136 * Update the IDT descriptor and reload the IDT so that
1137 * it uses the read-only mapped virtual address.
1139 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
1140 load_idt(&idt_descr);
1146 * NB. All these are "trap gates" (i.e. events_mask isn't set) except
1147 * for those that specify <dpl>|4 in the second field.
1149 static trap_info_t trap_table[] = {
1150 { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
1151 { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
1152 { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
1153 { 4, 3, __KERNEL_CS, (unsigned long)overflow },
1154 { 5, 0, __KERNEL_CS, (unsigned long)bounds },
1155 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
1156 { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
1157 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
1158 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
1159 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
1160 { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
1161 { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
1162 { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
1163 { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
1164 { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
1165 { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
1166 #ifdef CONFIG_X86_MCE
1167 { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
1169 { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
1170 { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
1174 void __init trap_init(void)
1176 HYPERVISOR_set_trap_table(trap_table);
1180 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1181 * Generates a compile-time "error: zero width for bit-field" if
1182 * the alignment is wrong.
1184 struct fxsrAlignAssert {
1185 int _:!(offsetof(struct task_struct,
1186 thread.i387.fxsave) & 15);
1189 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1190 set_in_cr4(X86_CR4_OSFXSR);
1194 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1196 set_in_cr4(X86_CR4_OSXMMEXCPT);
1201 * Should be a barrier for any external CPU state.
1206 void smp_trap_init(trap_info_t *trap_ctxt)
1208 trap_info_t *t = trap_table;
1210 for (t = trap_table; t->address; t++) {
1211 trap_ctxt[t->vector].flags = t->flags;
1212 trap_ctxt[t->vector].cs = t->cs;
1213 trap_ctxt[t->vector].address = t->address;
1217 static int __init kstack_setup(char *s)
1219 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1222 __setup("kstack=", kstack_setup);