2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
26 #include <linux/kallsyms.h>
27 #include <linux/ptrace.h>
28 #include <linux/utsname.h>
29 #include <linux/kprobes.h>
32 #include <linux/ioport.h>
33 #include <linux/eisa.h>
37 #include <linux/mca.h>
40 #include <asm/processor.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
44 #include <asm/atomic.h>
45 #include <asm/debugreg.h>
51 #include <asm/arch_hooks.h>
52 #include <asm/kdebug.h>
54 #include <linux/irq.h>
55 #include <linux/module.h>
57 #include "mach_traps.h"
59 asmlinkage int system_call(void);
61 /* Do we ignore FPU interrupts ? */
62 char ignore_fpu_irq = 0;
65 * The IDT has to be page-aligned to simplify the Pentium
66 * F0 0F bug workaround.. We have a special link segment
69 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
71 asmlinkage void divide_error(void);
72 asmlinkage void debug(void);
73 asmlinkage void nmi(void);
74 asmlinkage void int3(void);
75 asmlinkage void overflow(void);
76 asmlinkage void bounds(void);
77 asmlinkage void invalid_op(void);
78 asmlinkage void device_not_available(void);
79 asmlinkage void coprocessor_segment_overrun(void);
80 asmlinkage void invalid_TSS(void);
81 asmlinkage void segment_not_present(void);
82 asmlinkage void stack_segment(void);
83 asmlinkage void general_protection(void);
84 asmlinkage void page_fault(void);
85 asmlinkage void coprocessor_error(void);
86 asmlinkage void simd_coprocessor_error(void);
87 asmlinkage void alignment_check(void);
88 asmlinkage void fixup_4gb_segment(void);
89 asmlinkage void machine_check(void);
91 static int kstack_depth_to_print = 24;
92 struct notifier_block *i386die_chain;
93 static DEFINE_SPINLOCK(die_notifier_lock);
95 int register_die_notifier(struct notifier_block *nb)
99 spin_lock_irqsave(&die_notifier_lock, flags);
100 err = notifier_chain_register(&i386die_chain, nb);
101 spin_unlock_irqrestore(&die_notifier_lock, flags);
105 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
107 return p > (void *)tinfo &&
108 p < (void *)tinfo + THREAD_SIZE - 3;
111 static inline unsigned long print_context_stack(struct thread_info *tinfo,
112 unsigned long *stack, unsigned long ebp)
116 #ifdef CONFIG_FRAME_POINTER
117 while (valid_stack_ptr(tinfo, (void *)ebp)) {
118 addr = *(unsigned long *)(ebp + 4);
119 printk(" [<%08lx>] ", addr);
120 print_symbol("%s", addr);
122 ebp = *(unsigned long *)ebp;
125 while (valid_stack_ptr(tinfo, stack)) {
127 if (__kernel_text_address(addr)) {
128 printk(" [<%08lx>]", addr);
129 print_symbol(" %s", addr);
137 void show_trace(struct task_struct *task, unsigned long * stack)
144 if (task == current) {
145 /* Grab ebp right from our regs */
146 asm ("movl %%ebp, %0" : "=r" (ebp) : );
148 /* ebp is the last reg pushed by switch_to */
149 ebp = *(unsigned long *) task->thread.esp;
153 struct thread_info *context;
154 context = (struct thread_info *)
155 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
156 ebp = print_context_stack(context, stack, ebp);
157 stack = (unsigned long*)context->previous_esp;
160 printk(" =======================\n");
164 void show_stack(struct task_struct *task, unsigned long *esp)
166 unsigned long *stack;
171 esp = (unsigned long*)task->thread.esp;
173 esp = (unsigned long *)&esp;
177 for(i = 0; i < kstack_depth_to_print; i++) {
178 if (kstack_end(stack))
180 if (i && ((i % 8) == 0))
182 printk("%08lx ", *stack++);
184 printk("\nCall Trace:\n");
185 show_trace(task, esp);
189 * The architecture-independent dump_stack generator
191 void dump_stack(void)
195 show_trace(current, &stack);
198 EXPORT_SYMBOL(dump_stack);
200 void show_registers(struct pt_regs *regs)
207 esp = (unsigned long) (®s->esp);
212 ss = regs->xss & 0xffff;
215 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
217 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
218 print_tainted(), regs->eflags, system_utsname.release);
219 print_symbol("EIP is at %s\n", regs->eip);
220 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
221 regs->eax, regs->ebx, regs->ecx, regs->edx);
222 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
223 regs->esi, regs->edi, regs->ebp, esp);
224 printk("ds: %04x es: %04x ss: %04x\n",
225 regs->xds & 0xffff, regs->xes & 0xffff, ss);
226 printk("Process %s (pid: %d, threadinfo=%p task=%p)",
227 current->comm, current->pid, current_thread_info(), current);
229 * When in-kernel, we also print out the stack and code at the
230 * time of the fault..
236 show_stack(NULL, (unsigned long*)esp);
240 eip = (u8 *)regs->eip - 43;
241 for (i = 0; i < 64; i++, eip++) {
244 if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
245 printk(" Bad EIP value.");
248 if (eip == (u8 *)regs->eip)
249 printk("<%02x> ", c);
257 static void handle_BUG(struct pt_regs *regs)
266 goto no_bug; /* Not in kernel */
270 if (eip < PAGE_OFFSET)
272 if (__get_user(ud2, (unsigned short *)eip))
276 if (__get_user(line, (unsigned short *)(eip + 2)))
278 if (__get_user(file, (char **)(eip + 4)) ||
279 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
280 file = "<bad filename>";
282 printk("------------[ cut here ]------------\n");
283 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
288 /* Here we know it was a BUG but file-n-line is unavailable */
290 printk("Kernel BUG\n");
293 void die(const char * str, struct pt_regs * regs, long err)
298 int lock_owner_depth;
300 .lock = SPIN_LOCK_UNLOCKED,
302 .lock_owner_depth = 0
304 static int die_counter;
306 if (die.lock_owner != _smp_processor_id()) {
308 spin_lock_irq(&die.lock);
309 die.lock_owner = smp_processor_id();
310 die.lock_owner_depth = 0;
314 if (++die.lock_owner_depth < 3) {
317 printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
318 #ifdef CONFIG_PREEMPT
326 #ifdef CONFIG_DEBUG_PAGEALLOC
327 printk("DEBUG_PAGEALLOC");
332 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
333 show_registers(regs);
335 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
339 spin_unlock_irq(&die.lock);
341 panic("Fatal exception in interrupt");
344 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
345 set_current_state(TASK_UNINTERRUPTIBLE);
346 schedule_timeout(5 * HZ);
347 panic("Fatal exception");
352 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
354 if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
358 static void do_trap(int trapnr, int signr, char *str, int vm86,
359 struct pt_regs * regs, long error_code, siginfo_t *info)
361 if (regs->eflags & VM_MASK) {
367 if (!(regs->xcs & 2))
371 struct task_struct *tsk = current;
372 tsk->thread.error_code = error_code;
373 tsk->thread.trap_no = trapnr;
375 force_sig_info(signr, info, tsk);
377 force_sig(signr, tsk);
382 if (!fixup_exception(regs))
383 die(str, regs, error_code);
388 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
389 if (ret) goto trap_signal;
394 #define DO_ERROR(trapnr, signr, str, name) \
395 fastcall void do_##name(struct pt_regs * regs, long error_code) \
397 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
400 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
403 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
404 fastcall void do_##name(struct pt_regs * regs, long error_code) \
407 info.si_signo = signr; \
409 info.si_code = sicode; \
410 info.si_addr = (void __user *)siaddr; \
411 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
414 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
417 #define DO_VM86_ERROR(trapnr, signr, str, name) \
418 fastcall void do_##name(struct pt_regs * regs, long error_code) \
420 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
423 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
426 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
427 fastcall void do_##name(struct pt_regs * regs, long error_code) \
430 info.si_signo = signr; \
432 info.si_code = sicode; \
433 info.si_addr = (void __user *)siaddr; \
434 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
437 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
440 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
441 #ifndef CONFIG_KPROBES
442 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
444 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
445 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
446 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
447 DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
448 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
449 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
450 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
451 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
452 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
453 #ifdef CONFIG_X86_MCE
454 DO_ERROR(18, SIGBUS, "machine check", machine_check)
458 * the original non-exec stack patch was written by
459 * Solar Designer <solar at openwall.com>. Thanks!
461 fastcall void do_general_protection(struct pt_regs * regs, long error_code)
464 * If we trapped on an LDT access then ensure that the default_ldt is
465 * loaded, if nothing else. We load default_ldt lazily because LDT
466 * switching costs time and many applications don't need it.
468 if (unlikely((error_code & 6) == 4)) {
470 __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
472 xen_set_ldt((unsigned long)&default_ldt[0], 5);
477 if (regs->eflags & VM_MASK)
480 if (!(regs->xcs & 2))
484 * lazy-check for CS validity on exec-shield binaries:
487 int cpu = smp_processor_id();
488 struct desc_struct *desc1, *desc2;
489 struct vm_area_struct *vma;
490 unsigned long limit = 0;
492 spin_lock(¤t->mm->page_table_lock);
493 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
494 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
496 spin_unlock(¤t->mm->page_table_lock);
498 current->mm->context.exec_limit = limit;
499 set_user_cs(¤t->mm->context.user_cs, limit);
501 desc1 = ¤t->mm->context.user_cs;
502 desc2 = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
505 * The CS was not in sync - reload it and retry the
506 * instruction. If the instruction still faults then
507 * we wont hit this branch next time around.
509 if (desc1->a != desc2->a || desc1->b != desc2->b) {
510 load_user_cs_desc(cpu, current->mm);
515 current->thread.error_code = error_code;
516 current->thread.trap_no = 13;
517 force_sig(SIGSEGV, current);
522 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
526 if (!fixup_exception(regs)) {
527 if (notify_die(DIE_GPF, "general protection fault", regs,
528 error_code, 13, SIGSEGV) == NOTIFY_STOP)
530 die("general protection fault", regs, error_code);
534 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
536 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
537 printk("You probably have a hardware problem with your RAM chips\n");
539 /* Clear and disable the memory parity error line. */
540 clear_mem_error(reason);
543 static void io_check_error(unsigned char reason, struct pt_regs * regs)
547 printk("NMI: IOCK error (debug interrupt?)\n");
548 show_registers(regs);
550 /* Re-enable the IOCK line, wait for a few seconds */
551 reason = (reason & 0xf) | 8;
554 while (--i) udelay(1000);
559 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
562 /* Might actually be able to figure out what the guilty party
569 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
570 reason, smp_processor_id());
571 printk("Dazed and confused, but trying to continue\n");
572 printk("Do you have a strange power saving mode enabled?\n");
575 static DEFINE_SPINLOCK(nmi_print_lock);
577 void die_nmi (struct pt_regs *regs, const char *msg)
579 spin_lock(&nmi_print_lock);
581 * We are in trouble anyway, lets at least try
582 * to get a message out.
586 printk(" on CPU%d, eip %08lx, registers:\n",
587 smp_processor_id(), regs->eip);
588 show_registers(regs);
589 printk("console shuts up ...\n");
591 spin_unlock(&nmi_print_lock);
596 static void default_do_nmi(struct pt_regs * regs)
598 unsigned char reason = 0;
600 /* Only the BSP gets external NMIs from the system. */
601 if (!smp_processor_id())
602 reason = get_nmi_reason();
604 if (!(reason & 0xc0)) {
605 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
608 #ifdef CONFIG_X86_LOCAL_APIC
610 * Ok, so this is none of the documented NMI sources,
611 * so it must be the NMI watchdog.
614 nmi_watchdog_tick(regs);
618 unknown_nmi_error(reason, regs);
621 if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
624 mem_parity_error(reason, regs);
626 io_check_error(reason, regs);
628 * Reassert NMI in case it became active meanwhile
629 * as it's edge-triggered.
634 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
639 static nmi_callback_t nmi_callback = dummy_nmi_callback;
641 fastcall void do_nmi(struct pt_regs * regs, long error_code)
647 cpu = smp_processor_id();
650 if (!nmi_callback(regs, cpu))
651 default_do_nmi(regs);
656 void set_nmi_callback(nmi_callback_t callback)
658 nmi_callback = callback;
661 void unset_nmi_callback(void)
663 nmi_callback = dummy_nmi_callback;
666 #ifdef CONFIG_KPROBES
667 fastcall int do_int3(struct pt_regs *regs, long error_code)
669 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
672 /* This is an interrupt gate, because kprobes wants interrupts
673 disabled. Normal trap handlers don't. */
674 restore_interrupts(regs);
675 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
681 * Our handling of the processor debug registers is non-trivial.
682 * We do not clear them on entry and exit from the kernel. Therefore
683 * it is possible to get a watchpoint trap here from inside the kernel.
684 * However, the code in ./ptrace.c has ensured that the user can
685 * only set watchpoints on userspace addresses. Therefore the in-kernel
686 * watchpoint trap can only occur in code which is reading/writing
687 * from user space. Such code must not hold kernel locks (since it
688 * can equally take a page fault), therefore it is safe to call
689 * force_sig_info even though that claims and releases locks.
691 * Code in ./signal.c ensures that the debug control register
692 * is restored before we deliver any signal, and therefore that
693 * user code runs with the correct debug control register even though
696 * Being careful here means that we don't have to be as careful in a
697 * lot of more complicated places (task switching can be a bit lazy
698 * about restoring all the debug state, and ptrace doesn't have to
699 * find every occurrence of the TF bit that could be saved away even
702 fastcall void do_debug(struct pt_regs * regs, long error_code)
704 unsigned int condition;
705 struct task_struct *tsk = current;
707 condition = HYPERVISOR_get_debugreg(6);
709 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
710 SIGTRAP) == NOTIFY_STOP)
713 /* It's safe to allow irq's after DR6 has been saved */
714 if (regs->eflags & X86_EFLAGS_IF)
718 /* Mask out spurious debug traps due to lazy DR7 setting */
719 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
720 if (!tsk->thread.debugreg[7])
724 if (regs->eflags & VM_MASK)
727 /* Save debug status register where ptrace can see it */
728 tsk->thread.debugreg[6] = condition;
731 * Single-stepping through TF: make sure we ignore any events in
732 * kernel space (but re-enable TF when returning to user mode).
733 * And if the event was due to a debugger (PT_DTRACE), clear the
734 * TF flag so that register information is correct.
736 if (condition & DR_STEP) {
738 * We already checked v86 mode above, so we can
739 * check for kernel mode by just checking the CPL
742 if ((regs->xcs & 2) == 0)
743 goto clear_TF_reenable;
745 if (likely(tsk->ptrace & PT_DTRACE)) {
746 tsk->ptrace &= ~PT_DTRACE;
747 regs->eflags &= ~TF_MASK;
751 /* Ok, finally something we can handle */
752 send_sigtrap(tsk, regs, error_code);
754 /* Disable additional traps. They'll be re-enabled when
755 * the signal is delivered.
758 HYPERVISOR_set_debugreg(7, 0);
762 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
766 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
767 regs->eflags &= ~TF_MASK;
772 * Note that we play around with the 'TS' bit in an attempt to get
773 * the correct behaviour even in the presence of the asynchronous
776 void math_error(void __user *eip)
778 struct task_struct * task;
780 unsigned short cwd, swd;
783 * Save the info for the exception handler and clear the error.
787 task->thread.trap_no = 16;
788 task->thread.error_code = 0;
789 info.si_signo = SIGFPE;
791 info.si_code = __SI_FAULT;
794 * (~cwd & swd) will mask out exceptions that are not set to unmasked
795 * status. 0x3f is the exception bits in these regs, 0x200 is the
796 * C1 reg you need in case of a stack fault, 0x040 is the stack
797 * fault bit. We should only be taking one exception at a time,
798 * so if this combination doesn't produce any single exception,
799 * then we have a bad program that isn't syncronizing its FPU usage
800 * and it will suffer the consequences since we won't be able to
801 * fully reproduce the context of the exception
803 cwd = get_fpu_cwd(task);
804 swd = get_fpu_swd(task);
805 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
809 case 0x001: /* Invalid Op */
810 case 0x041: /* Stack Fault */
811 case 0x241: /* Stack Fault | Direction */
812 info.si_code = FPE_FLTINV;
813 /* Should we clear the SF or let user space do it ???? */
815 case 0x002: /* Denormalize */
816 case 0x010: /* Underflow */
817 info.si_code = FPE_FLTUND;
819 case 0x004: /* Zero Divide */
820 info.si_code = FPE_FLTDIV;
822 case 0x008: /* Overflow */
823 info.si_code = FPE_FLTOVF;
825 case 0x020: /* Precision */
826 info.si_code = FPE_FLTRES;
829 force_sig_info(SIGFPE, &info, task);
832 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
835 math_error((void __user *)regs->eip);
838 void simd_math_error(void __user *eip)
840 struct task_struct * task;
842 unsigned short mxcsr;
845 * Save the info for the exception handler and clear the error.
849 task->thread.trap_no = 19;
850 task->thread.error_code = 0;
851 info.si_signo = SIGFPE;
853 info.si_code = __SI_FAULT;
856 * The SIMD FPU exceptions are handled a little differently, as there
857 * is only a single status/control register. Thus, to determine which
858 * unmasked exception was caught we must mask the exception mask bits
859 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
861 mxcsr = get_fpu_mxcsr(task);
862 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
866 case 0x001: /* Invalid Op */
867 info.si_code = FPE_FLTINV;
869 case 0x002: /* Denormalize */
870 case 0x010: /* Underflow */
871 info.si_code = FPE_FLTUND;
873 case 0x004: /* Zero Divide */
874 info.si_code = FPE_FLTDIV;
876 case 0x008: /* Overflow */
877 info.si_code = FPE_FLTOVF;
879 case 0x020: /* Precision */
880 info.si_code = FPE_FLTRES;
883 force_sig_info(SIGFPE, &info, task);
886 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
890 /* Handle SIMD FPU exceptions on PIII+ processors. */
892 simd_math_error((void __user *)regs->eip);
895 * Handle strange cache flush from user space exception
896 * in all other cases. This is undocumented behaviour.
898 if (regs->eflags & VM_MASK) {
899 handle_vm86_fault((struct kernel_vm86_regs *)regs,
903 die_if_kernel("cache flush denied", regs, error_code);
904 current->thread.trap_no = 19;
905 current->thread.error_code = error_code;
906 force_sig(SIGSEGV, current);
911 * 'math_state_restore()' saves the current math information in the
912 * old math state array, and gets the new ones from the current task
914 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
915 * Don't touch unless you *really* know how it works.
917 * Must be called with kernel preemption disabled (in this case,
918 * local interrupts are disabled at the call-site in entry.S).
920 asmlinkage void math_state_restore(struct pt_regs regs)
922 struct thread_info *thread = current_thread_info();
923 struct task_struct *tsk = thread->task;
925 /* NB. 'clts' is done for us by Xen during virtual trap. */
926 if (!tsk_used_math(tsk))
929 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
932 #ifndef CONFIG_MATH_EMULATION
934 asmlinkage void math_emulate(long arg)
936 printk("math-emulation not enabled and no coprocessor found.\n");
937 printk("killing %s.\n",current->comm);
938 force_sig(SIGFPE,current);
942 #endif /* CONFIG_MATH_EMULATION */
944 #ifdef CONFIG_X86_F00F_BUG
945 void __init trap_init_f00f_bug(void)
947 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
950 * Update the IDT descriptor and reload the IDT so that
951 * it uses the read-only mapped virtual address.
953 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
954 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
959 /* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
960 static trap_info_t trap_table[] = {
961 { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
962 { 1, 0, __KERNEL_CS, (unsigned long)debug },
963 { 3, 3, __KERNEL_CS, (unsigned long)int3 },
964 { 4, 3, __KERNEL_CS, (unsigned long)overflow },
965 { 5, 3, __KERNEL_CS, (unsigned long)bounds },
966 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
967 { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
968 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
969 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
970 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
971 { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
972 { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
973 { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
974 { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
975 { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
976 { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
977 #ifdef CONFIG_X86_MCE
978 { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
980 { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
981 { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
985 void __init trap_init(void)
987 HYPERVISOR_set_trap_table(trap_table);
988 HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
991 * default LDT is a single-entry callgate to lcall7 for iBCS
992 * and a callgate to lcall27 for Solaris/x86 binaries
994 make_lowmem_page_readonly(&default_ldt[0]);
997 * Should be a barrier for any external CPU state.
1002 int smp_trap_init(trap_info_t *trap_ctxt)
1004 trap_info_t *t = trap_table;
1006 for (t = trap_table; t->address; t++) {
1007 trap_ctxt[t->vector].flags = t->flags;
1008 trap_ctxt[t->vector].cs = t->cs;
1009 trap_ctxt[t->vector].address = t->address;
1011 return SYSCALL_VECTOR;