2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
26 #include <linux/kallsyms.h>
27 #include <linux/ptrace.h>
28 #include <linux/version.h>
29 #include <linux/kprobes.h>
32 #include <linux/ioport.h>
33 #include <linux/eisa.h>
37 #include <linux/mca.h>
40 #include <asm/processor.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
44 #include <asm/atomic.h>
45 #include <asm/debugreg.h>
51 #include <asm/arch_hooks.h>
52 #include <asm/kdebug.h>
54 #include <linux/irq.h>
55 #include <linux/module.h>
57 #include "mach_traps.h"
59 asmlinkage int system_call(void);
60 asmlinkage void lcall7(void);
61 asmlinkage void lcall27(void);
63 struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
66 /* Do we ignore FPU interrupts ? */
67 char ignore_fpu_irq = 0;
70 * The IDT has to be page-aligned to simplify the Pentium
71 * F0 0F bug workaround.. We have a special link segment
74 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
76 asmlinkage void divide_error(void);
77 asmlinkage void debug(void);
78 asmlinkage void nmi(void);
79 asmlinkage void int3(void);
80 asmlinkage void overflow(void);
81 asmlinkage void bounds(void);
82 asmlinkage void invalid_op(void);
83 asmlinkage void device_not_available(void);
84 asmlinkage void coprocessor_segment_overrun(void);
85 asmlinkage void invalid_TSS(void);
86 asmlinkage void segment_not_present(void);
87 asmlinkage void stack_segment(void);
88 asmlinkage void general_protection(void);
89 asmlinkage void page_fault(void);
90 asmlinkage void coprocessor_error(void);
91 asmlinkage void simd_coprocessor_error(void);
92 asmlinkage void alignment_check(void);
93 asmlinkage void spurious_interrupt_bug(void);
94 asmlinkage void machine_check(void);
96 static int kstack_depth_to_print = 24;
97 struct notifier_block *i386die_chain;
98 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
100 int register_die_notifier(struct notifier_block *nb)
104 spin_lock_irqsave(&die_notifier_lock, flags);
105 err = notifier_chain_register(&i386die_chain, nb);
106 spin_unlock_irqrestore(&die_notifier_lock, flags);
110 static int valid_stack_ptr(struct task_struct *task, void *p)
112 if (p <= (void *)task->thread_info)
119 #ifdef CONFIG_FRAME_POINTER
120 static void print_context_stack(struct task_struct *task, unsigned long *stack,
125 while (valid_stack_ptr(task, (void *)ebp)) {
126 addr = *(unsigned long *)(ebp + 4);
127 printk(" [<%08lx>] ", addr);
128 print_symbol("%s", addr);
130 ebp = *(unsigned long *)ebp;
134 static void print_context_stack(struct task_struct *task, unsigned long *stack,
139 while (!kstack_end(stack)) {
141 if (__kernel_text_address(addr)) {
142 printk(" [<%08lx>]", addr);
143 print_symbol(" %s", addr);
150 void show_trace(struct task_struct *task, unsigned long * stack)
157 if (!valid_stack_ptr(task, stack)) {
158 printk("Stack pointer is garbage, not printing trace\n");
162 if (task == current) {
163 /* Grab ebp right from our regs */
164 asm ("movl %%ebp, %0" : "=r" (ebp) : );
166 /* ebp is the last reg pushed by switch_to */
167 ebp = *(unsigned long *) task->thread.esp;
171 struct thread_info *context;
172 context = (struct thread_info *)
173 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
174 print_context_stack(task, stack, ebp);
175 stack = (unsigned long*)context->previous_esp;
178 printk(" =======================\n");
182 void show_stack(struct task_struct *task, unsigned long *esp)
184 unsigned long *stack;
189 esp = (unsigned long*)task->thread.esp;
191 esp = (unsigned long *)&esp;
195 for(i = 0; i < kstack_depth_to_print; i++) {
196 if (kstack_end(stack))
198 if (i && ((i % 8) == 0))
200 printk("%08lx ", *stack++);
202 printk("\nCall Trace:\n");
203 show_trace(task, esp);
207 * The architecture-independent dump_stack generator
209 void dump_stack(void)
213 show_trace(current, &stack);
216 EXPORT_SYMBOL(dump_stack);
218 void show_registers(struct pt_regs *regs)
225 esp = (unsigned long) (®s->esp);
230 ss = regs->xss & 0xffff;
233 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
235 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
236 print_tainted(), regs->eflags, UTS_RELEASE);
237 print_symbol("EIP is at %s\n", regs->eip);
238 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
239 regs->eax, regs->ebx, regs->ecx, regs->edx);
240 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
241 regs->esi, regs->edi, regs->ebp, esp);
242 printk("ds: %04x es: %04x ss: %04x\n",
243 regs->xds & 0xffff, regs->xes & 0xffff, ss);
244 printk("Process %s (pid: %d, threadinfo=%p task=%p)",
245 current->comm, current->pid, current_thread_info(), current);
247 * When in-kernel, we also print out the stack and code at the
248 * time of the fault..
254 show_stack(NULL, (unsigned long*)esp);
258 eip = (u8 *)regs->eip - 43;
259 for (i = 0; i < 64; i++, eip++) {
262 if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
263 printk(" Bad EIP value.");
266 if (eip == (u8 *)regs->eip)
267 printk("<%02x> ", c);
275 static void handle_BUG(struct pt_regs *regs)
284 goto no_bug; /* Not in kernel */
288 if (eip < PAGE_OFFSET)
290 if (__get_user(ud2, (unsigned short *)eip))
294 if (__get_user(line, (unsigned short *)(eip + 2)))
296 if (__get_user(file, (char **)(eip + 4)) ||
297 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
298 file = "<bad filename>";
300 printk("------------[ cut here ]------------\n");
301 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
306 /* Here we know it was a BUG but file-n-line is unavailable */
308 printk("Kernel BUG\n");
311 void die(const char * str, struct pt_regs * regs, long err)
316 int lock_owner_depth;
318 .lock = SPIN_LOCK_UNLOCKED,
320 .lock_owner_depth = 0
322 static int die_counter;
324 if (die.lock_owner != smp_processor_id()) {
326 spin_lock_irq(&die.lock);
327 die.lock_owner = smp_processor_id();
328 die.lock_owner_depth = 0;
332 if (++die.lock_owner_depth < 3) {
335 printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
336 #ifdef CONFIG_PREEMPT
344 #ifdef CONFIG_DEBUG_PAGEALLOC
345 printk("DEBUG_PAGEALLOC");
350 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
351 show_registers(regs);
353 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
357 spin_unlock_irq(&die.lock);
359 panic("Fatal exception in interrupt");
362 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
363 set_current_state(TASK_UNINTERRUPTIBLE);
364 schedule_timeout(5 * HZ);
365 panic("Fatal exception");
370 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
372 if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
376 static inline unsigned long get_cr2(void)
378 unsigned long address;
380 /* get the address */
381 __asm__("movl %%cr2,%0":"=r" (address));
385 static inline void do_trap(int trapnr, int signr, char *str, int vm86,
386 struct pt_regs * regs, long error_code, siginfo_t *info)
388 if (regs->eflags & VM_MASK) {
394 if (!(regs->xcs & 3))
398 struct task_struct *tsk = current;
399 tsk->thread.error_code = error_code;
400 tsk->thread.trap_no = trapnr;
402 force_sig_info(signr, info, tsk);
404 force_sig(signr, tsk);
409 if (!fixup_exception(regs))
410 die(str, regs, error_code);
415 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
416 if (ret) goto trap_signal;
421 #define DO_ERROR(trapnr, signr, str, name) \
422 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
424 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
427 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
430 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
431 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
434 info.si_signo = signr; \
436 info.si_code = sicode; \
437 info.si_addr = (void __user *)siaddr; \
438 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
441 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
444 #define DO_VM86_ERROR(trapnr, signr, str, name) \
445 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
447 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
450 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
453 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
454 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
457 info.si_signo = signr; \
459 info.si_code = sicode; \
460 info.si_addr = (void __user *)siaddr; \
461 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
464 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
467 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
468 #ifndef CONFIG_KPROBES
469 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
471 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
472 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
473 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
474 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
475 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
476 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
477 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
478 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
480 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
483 struct tss_struct *tss = &per_cpu(init_tss, cpu);
484 struct thread_struct *thread = ¤t->thread;
487 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
488 * invalid offset set (the LAZY one) and the faulting thread has
489 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
490 * and we set the offset field correctly. Then we let the CPU to
491 * restart the faulting instruction.
493 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
494 thread->io_bitmap_ptr) {
495 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
496 thread->io_bitmap_max);
498 * If the previously set map was extending to higher ports
499 * than the current one, pad extra space with 0xff (no access).
501 if (thread->io_bitmap_max < tss->io_bitmap_max)
502 memset((char *) tss->io_bitmap +
503 thread->io_bitmap_max, 0xff,
504 tss->io_bitmap_max - thread->io_bitmap_max);
505 tss->io_bitmap_max = thread->io_bitmap_max;
506 tss->io_bitmap_base = IO_BITMAP_OFFSET;
512 if (regs->eflags & VM_MASK)
515 if (!(regs->xcs & 3))
518 current->thread.error_code = error_code;
519 current->thread.trap_no = 13;
520 force_sig(SIGSEGV, current);
525 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
529 if (!fixup_exception(regs)) {
530 if (notify_die(DIE_GPF, "general protection fault", regs,
531 error_code, 13, SIGSEGV) == NOTIFY_STOP);
533 die("general protection fault", regs, error_code);
537 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
539 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
540 printk("You probably have a hardware problem with your RAM chips\n");
542 /* Clear and disable the memory parity error line. */
543 clear_mem_error(reason);
546 static void io_check_error(unsigned char reason, struct pt_regs * regs)
550 printk("NMI: IOCK error (debug interrupt?)\n");
551 show_registers(regs);
553 /* Re-enable the IOCK line, wait for a few seconds */
554 reason = (reason & 0xf) | 8;
557 while (--i) udelay(1000);
562 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
565 /* Might actually be able to figure out what the guilty party
572 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
573 reason, smp_processor_id());
574 printk("Dazed and confused, but trying to continue\n");
575 printk("Do you have a strange power saving mode enabled?\n");
578 static spinlock_t nmi_print_lock = SPIN_LOCK_UNLOCKED;
580 void die_nmi (struct pt_regs *regs, const char *msg)
582 spin_lock(&nmi_print_lock);
584 * We are in trouble anyway, lets at least try
585 * to get a message out.
589 printk(" on CPU%d, eip %08lx, registers:\n",
590 smp_processor_id(), regs->eip);
591 show_registers(regs);
592 printk("console shuts up ...\n");
594 spin_unlock(&nmi_print_lock);
599 static void default_do_nmi(struct pt_regs * regs)
601 unsigned char reason = get_nmi_reason();
603 if (!(reason & 0xc0)) {
604 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
607 #ifdef CONFIG_X86_LOCAL_APIC
609 * Ok, so this is none of the documented NMI sources,
610 * so it must be the NMI watchdog.
613 nmi_watchdog_tick(regs);
617 unknown_nmi_error(reason, regs);
620 if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
623 mem_parity_error(reason, regs);
625 io_check_error(reason, regs);
627 * Reassert NMI in case it became active meanwhile
628 * as it's edge-triggered.
633 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
638 static nmi_callback_t nmi_callback = dummy_nmi_callback;
640 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
646 cpu = smp_processor_id();
649 if (!nmi_callback(regs, cpu))
650 default_do_nmi(regs);
655 void set_nmi_callback(nmi_callback_t callback)
657 nmi_callback = callback;
660 void unset_nmi_callback(void)
662 nmi_callback = dummy_nmi_callback;
665 #ifdef CONFIG_KPROBES
666 asmlinkage int do_int3(struct pt_regs *regs, long error_code)
668 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
671 /* This is an interrupt gate, because kprobes wants interrupts
672 disabled. Normal trap handlers don't. */
673 restore_interrupts(regs);
674 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
680 * Our handling of the processor debug registers is non-trivial.
681 * We do not clear them on entry and exit from the kernel. Therefore
682 * it is possible to get a watchpoint trap here from inside the kernel.
683 * However, the code in ./ptrace.c has ensured that the user can
684 * only set watchpoints on userspace addresses. Therefore the in-kernel
685 * watchpoint trap can only occur in code which is reading/writing
686 * from user space. Such code must not hold kernel locks (since it
687 * can equally take a page fault), therefore it is safe to call
688 * force_sig_info even though that claims and releases locks.
690 * Code in ./signal.c ensures that the debug control register
691 * is restored before we deliver any signal, and therefore that
692 * user code runs with the correct debug control register even though
695 * Being careful here means that we don't have to be as careful in a
696 * lot of more complicated places (task switching can be a bit lazy
697 * about restoring all the debug state, and ptrace doesn't have to
698 * find every occurrence of the TF bit that could be saved away even
701 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
703 unsigned int condition;
704 struct task_struct *tsk = current;
707 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
709 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
710 SIGTRAP) == NOTIFY_STOP)
712 /* It's safe to allow irq's after DR6 has been saved */
713 if (regs->eflags & X86_EFLAGS_IF)
716 /* Mask out spurious debug traps due to lazy DR7 setting */
717 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
718 if (!tsk->thread.debugreg[7])
722 if (regs->eflags & VM_MASK)
725 /* Save debug status register where ptrace can see it */
726 tsk->thread.debugreg[6] = condition;
728 /* Mask out spurious TF errors due to lazy TF clearing */
729 if (condition & DR_STEP) {
731 * The TF error should be masked out only if the current
732 * process is not traced and if the TRAP flag has been set
733 * previously by a tracing process (condition detected by
734 * the PT_DTRACE flag); remember that the i386 TRAP flag
735 * can be modified by the process itself in user mode,
736 * allowing programs to debug themselves without the ptrace()
739 if ((regs->xcs & 3) == 0)
740 goto clear_TF_reenable;
741 if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
745 /* Ok, finally something we can handle */
746 tsk->thread.trap_no = 1;
747 tsk->thread.error_code = error_code;
748 info.si_signo = SIGTRAP;
750 info.si_code = TRAP_BRKPT;
752 /* If this is a kernel mode trap, save the user PC on entry to
753 * the kernel, that's what the debugger can make sense of.
755 info.si_addr = ((regs->xcs & 3) == 0) ? (void __user *)tsk->thread.eip
756 : (void __user *)regs->eip;
757 force_sig_info(SIGTRAP, &info, tsk);
759 /* Disable additional traps. They'll be re-enabled when
760 * the signal is delivered.
763 __asm__("movl %0,%%db7"
769 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
773 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
775 regs->eflags &= ~TF_MASK;
780 * Note that we play around with the 'TS' bit in an attempt to get
781 * the correct behaviour even in the presence of the asynchronous
784 void math_error(void __user *eip)
786 struct task_struct * task;
788 unsigned short cwd, swd;
791 * Save the info for the exception handler and clear the error.
795 task->thread.trap_no = 16;
796 task->thread.error_code = 0;
797 info.si_signo = SIGFPE;
799 info.si_code = __SI_FAULT;
802 * (~cwd & swd) will mask out exceptions that are not set to unmasked
803 * status. 0x3f is the exception bits in these regs, 0x200 is the
804 * C1 reg you need in case of a stack fault, 0x040 is the stack
805 * fault bit. We should only be taking one exception at a time,
806 * so if this combination doesn't produce any single exception,
807 * then we have a bad program that isn't syncronizing its FPU usage
808 * and it will suffer the consequences since we won't be able to
809 * fully reproduce the context of the exception
811 cwd = get_fpu_cwd(task);
812 swd = get_fpu_swd(task);
813 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
817 case 0x001: /* Invalid Op */
818 case 0x041: /* Stack Fault */
819 case 0x241: /* Stack Fault | Direction */
820 info.si_code = FPE_FLTINV;
821 /* Should we clear the SF or let user space do it ???? */
823 case 0x002: /* Denormalize */
824 case 0x010: /* Underflow */
825 info.si_code = FPE_FLTUND;
827 case 0x004: /* Zero Divide */
828 info.si_code = FPE_FLTDIV;
830 case 0x008: /* Overflow */
831 info.si_code = FPE_FLTOVF;
833 case 0x020: /* Precision */
834 info.si_code = FPE_FLTRES;
837 force_sig_info(SIGFPE, &info, task);
840 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
843 math_error((void __user *)regs->eip);
846 void simd_math_error(void __user *eip)
848 struct task_struct * task;
850 unsigned short mxcsr;
853 * Save the info for the exception handler and clear the error.
857 task->thread.trap_no = 19;
858 task->thread.error_code = 0;
859 info.si_signo = SIGFPE;
861 info.si_code = __SI_FAULT;
864 * The SIMD FPU exceptions are handled a little differently, as there
865 * is only a single status/control register. Thus, to determine which
866 * unmasked exception was caught we must mask the exception mask bits
867 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
869 mxcsr = get_fpu_mxcsr(task);
870 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
874 case 0x001: /* Invalid Op */
875 info.si_code = FPE_FLTINV;
877 case 0x002: /* Denormalize */
878 case 0x010: /* Underflow */
879 info.si_code = FPE_FLTUND;
881 case 0x004: /* Zero Divide */
882 info.si_code = FPE_FLTDIV;
884 case 0x008: /* Overflow */
885 info.si_code = FPE_FLTOVF;
887 case 0x020: /* Precision */
888 info.si_code = FPE_FLTRES;
891 force_sig_info(SIGFPE, &info, task);
894 asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
898 /* Handle SIMD FPU exceptions on PIII+ processors. */
900 simd_math_error((void __user *)regs->eip);
903 * Handle strange cache flush from user space exception
904 * in all other cases. This is undocumented behaviour.
906 if (regs->eflags & VM_MASK) {
907 handle_vm86_fault((struct kernel_vm86_regs *)regs,
911 die_if_kernel("cache flush denied", regs, error_code);
912 current->thread.trap_no = 19;
913 current->thread.error_code = error_code;
914 force_sig(SIGSEGV, current);
918 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
922 /* No need to warn about this any longer. */
923 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
928 * 'math_state_restore()' saves the current math information in the
929 * old math state array, and gets the new ones from the current task
931 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
932 * Don't touch unless you *really* know how it works.
934 * Must be called with kernel preemption disabled (in this case,
935 * local interrupts are disabled at the call-site in entry.S).
937 asmlinkage void math_state_restore(struct pt_regs regs)
939 struct thread_info *thread = current_thread_info();
940 struct task_struct *tsk = thread->task;
942 clts(); /* Allow maths ops (or we recurse) */
946 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
949 #ifndef CONFIG_MATH_EMULATION
951 asmlinkage void math_emulate(long arg)
953 printk("math-emulation not enabled and no coprocessor found.\n");
954 printk("killing %s.\n",current->comm);
955 force_sig(SIGFPE,current);
959 #endif /* CONFIG_MATH_EMULATION */
961 #ifdef CONFIG_X86_F00F_BUG
962 void __init trap_init_f00f_bug(void)
964 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
967 * Update the IDT descriptor and reload the IDT so that
968 * it uses the read-only mapped virtual address.
970 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
971 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
975 #define _set_gate(gate_addr,type,dpl,addr,seg) \
978 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
980 "movl %%eax,%0\n\t" \
982 :"=m" (*((long *) (gate_addr))), \
983 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
984 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
985 "3" ((char *) (addr)),"2" ((seg) << 16)); \
990 * This needs to use 'idt_table' rather than 'idt', and
991 * thus use the _nonmapped_ version of the IDT, as the
992 * Pentium F0 0F bugfix can have resulted in the mapped
993 * IDT being write-protected.
995 void set_intr_gate(unsigned int n, void *addr)
997 _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
1001 * This routine sets up an interrupt gate at directory privilege level 3.
1003 static inline void set_system_intr_gate(unsigned int n, void *addr)
1005 _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);
1008 static void __init set_trap_gate(unsigned int n, void *addr)
1010 _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
1013 static void __init set_system_gate(unsigned int n, void *addr)
1015 _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
1018 static void __init set_call_gate(void *a, void *addr)
1020 _set_gate(a,12,3,addr,__KERNEL_CS);
1023 static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
1025 _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
1029 void __init trap_init(void)
1032 if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1037 #ifdef CONFIG_X86_LOCAL_APIC
1038 init_apic_mappings();
1041 set_trap_gate(0,÷_error);
1042 set_intr_gate(1,&debug);
1043 set_intr_gate(2,&nmi);
1044 set_system_intr_gate(3, &int3); /* int3-5 can be called from all */
1045 set_system_gate(4,&overflow);
1046 set_system_gate(5,&bounds);
1047 set_trap_gate(6,&invalid_op);
1048 set_trap_gate(7,&device_not_available);
1049 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
1050 set_trap_gate(9,&coprocessor_segment_overrun);
1051 set_trap_gate(10,&invalid_TSS);
1052 set_trap_gate(11,&segment_not_present);
1053 set_trap_gate(12,&stack_segment);
1054 set_trap_gate(13,&general_protection);
1055 set_intr_gate(14,&page_fault);
1056 set_trap_gate(15,&spurious_interrupt_bug);
1057 set_trap_gate(16,&coprocessor_error);
1058 set_trap_gate(17,&alignment_check);
1059 #ifdef CONFIG_X86_MCE
1060 set_trap_gate(18,&machine_check);
1062 set_trap_gate(19,&simd_coprocessor_error);
1064 set_system_gate(SYSCALL_VECTOR,&system_call);
1067 * default LDT is a single-entry callgate to lcall7 for iBCS
1068 * and a callgate to lcall27 for Solaris/x86 binaries
1070 set_call_gate(&default_ldt[0],lcall7);
1071 set_call_gate(&default_ldt[4],lcall27);
1074 * Should be a barrier for any external CPU state.