2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
26 #include <linux/kallsyms.h>
27 #include <linux/ptrace.h>
28 #include <linux/version.h>
31 #include <linux/ioport.h>
32 #include <linux/eisa.h>
36 #include <linux/mca.h>
39 #include <asm/processor.h>
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
43 #include <asm/atomic.h>
44 #include <asm/debugreg.h>
50 #include <asm/arch_hooks.h>
52 #include <linux/irq.h>
53 #include <linux/module.h>
55 #include "mach_traps.h"
57 struct desc_struct default_ldt[] __attribute__((__section__(".data.default_ldt"))) = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
58 struct page *default_ldt_page;
60 /* Do we ignore FPU interrupts ? */
61 char ignore_fpu_irq = 0;
64 * The IDT has to be page-aligned to simplify the Pentium
65 * F0 0F bug workaround.. We have a special link segment
68 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
70 asmlinkage void divide_error(void);
71 asmlinkage void debug(void);
72 asmlinkage void nmi(void);
73 asmlinkage void int3(void);
74 asmlinkage void overflow(void);
75 asmlinkage void bounds(void);
76 asmlinkage void invalid_op(void);
77 asmlinkage void device_not_available(void);
78 asmlinkage void coprocessor_segment_overrun(void);
79 asmlinkage void invalid_TSS(void);
80 asmlinkage void segment_not_present(void);
81 asmlinkage void stack_segment(void);
82 asmlinkage void general_protection(void);
83 asmlinkage void page_fault(void);
84 asmlinkage void coprocessor_error(void);
85 asmlinkage void simd_coprocessor_error(void);
86 asmlinkage void alignment_check(void);
87 asmlinkage void spurious_interrupt_bug(void);
88 asmlinkage void machine_check(void);
90 static int kstack_depth_to_print = 24;
92 static int valid_stack_ptr(struct task_struct *task, void *p)
94 if (p <= (void *)task->thread_info)
101 #ifdef CONFIG_FRAME_POINTER
102 static void print_context_stack(struct task_struct *task, unsigned long *stack,
107 while (valid_stack_ptr(task, (void *)ebp)) {
108 addr = *(unsigned long *)(ebp + 4);
109 printk(" [<%08lx>] ", addr);
110 print_symbol("%s", addr);
112 ebp = *(unsigned long *)ebp;
116 static void print_context_stack(struct task_struct *task, unsigned long *stack,
121 while (!kstack_end(stack)) {
123 if (__kernel_text_address(addr)) {
124 printk(" [<%08lx>]", addr);
125 print_symbol(" %s", addr);
132 void show_trace(struct task_struct *task, unsigned long * stack)
139 if (!valid_stack_ptr(task, stack)) {
140 printk("Stack pointer is garbage, not printing trace\n");
144 if (task == current) {
145 /* Grab ebp right from our regs */
146 asm ("movl %%ebp, %0" : "=r" (ebp) : );
148 /* ebp is the last reg pushed by switch_to */
149 ebp = *(unsigned long *) task->thread.esp;
153 struct thread_info *context;
154 context = (struct thread_info *)
155 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
156 print_context_stack(task, stack, ebp);
157 stack = (unsigned long*)context->previous_esp;
160 printk(" =======================\n");
164 void show_stack(struct task_struct *task, unsigned long *esp)
166 unsigned long *stack;
171 esp = (unsigned long*)task->thread.esp;
173 esp = (unsigned long *)&esp;
177 for(i = 0; i < kstack_depth_to_print; i++) {
178 if (kstack_end(stack))
180 if (i && ((i % 8) == 0))
182 printk("%08lx ", *stack++);
184 printk("\nCall Trace:\n");
185 show_trace(task, esp);
189 * The architecture-independent dump_stack generator
191 void dump_stack(void)
195 show_trace(current, &stack);
198 EXPORT_SYMBOL(dump_stack);
200 void show_registers(struct pt_regs *regs)
207 esp = (unsigned long) (®s->esp);
212 ss = regs->xss & 0xffff;
215 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s\nEFLAGS: %08lx"
217 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
218 print_tainted(), regs->eflags, UTS_RELEASE);
219 print_symbol("EIP is at %s\n", regs->eip);
220 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
221 regs->eax, regs->ebx, regs->ecx, regs->edx);
222 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
223 regs->esi, regs->edi, regs->ebp, esp);
224 printk("ds: %04x es: %04x ss: %04x\n",
225 regs->xds & 0xffff, regs->xes & 0xffff, ss);
226 printk("Process %s (pid: %d, threadinfo=%p task=%p)",
227 current->comm, current->pid, current_thread_info(), current);
229 * When in-kernel, we also print out the stack and code at the
230 * time of the fault..
235 show_stack(NULL, (unsigned long*)esp);
238 if(regs->eip < PAGE_OFFSET)
244 if ((user_mode(regs) && get_user(c, &((unsigned char*)regs->eip)[i])) ||
245 (!user_mode(regs) && __direct_get_user(c, &((unsigned char*)regs->eip)[i]))) {
248 printk(" Bad EIP value.");
257 static void handle_BUG(struct pt_regs *regs)
266 goto no_bug; /* Not in kernel */
270 if (__direct_get_user(ud2, (unsigned short *)eip))
274 if (__direct_get_user(line, (unsigned short *)(eip + 2)))
276 if (__direct_get_user(file, (char **)(eip + 4)) ||
277 __direct_get_user(c, file))
278 file = "<bad filename>";
280 printk("------------[ cut here ]------------\n");
281 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
286 /* Here we know it was a BUG but file-n-line is unavailable */
288 printk("Kernel BUG\n");
291 spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
292 static int die_owner = -1;
294 void die(const char * str, struct pt_regs * regs, long err)
296 static int die_counter;
301 if (!spin_trylock(&die_lock)) {
302 if (smp_processor_id() != die_owner)
303 spin_lock(&die_lock);
304 /* allow recursive die to fall through */
306 die_owner = smp_processor_id();
309 printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
310 #ifdef CONFIG_PREEMPT
318 #ifdef CONFIG_DEBUG_PAGEALLOC
319 printk("DEBUG_PAGEALLOC");
324 show_registers(regs);
329 spin_unlock_irq(&die_lock);
331 panic("Fatal exception in interrupt");
336 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
337 set_current_state(TASK_UNINTERRUPTIBLE);
338 schedule_timeout(5 * HZ);
339 panic("Fatal exception");
344 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
346 if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
350 static inline unsigned long get_cr2(void)
352 unsigned long address;
354 /* get the address */
355 __asm__("movl %%cr2,%0":"=r" (address));
359 static inline void do_trap(int trapnr, int signr, char *str, int vm86,
360 struct pt_regs * regs, long error_code, siginfo_t *info)
362 if (regs->eflags & VM_MASK) {
368 if (!(regs->xcs & 3))
372 struct task_struct *tsk = current;
373 tsk->thread.error_code = error_code;
374 tsk->thread.trap_no = trapnr;
376 force_sig_info(signr, info, tsk);
378 force_sig(signr, tsk);
383 if (!fixup_exception(regs))
384 die(str, regs, error_code);
389 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
390 if (ret) goto trap_signal;
395 #define DO_ERROR(trapnr, signr, str, name) \
396 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
398 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
401 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
402 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
405 info.si_signo = signr; \
407 info.si_code = sicode; \
408 info.si_addr = (void __user *)siaddr; \
409 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
412 #define DO_VM86_ERROR(trapnr, signr, str, name) \
413 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
415 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
418 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
419 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
422 info.si_signo = signr; \
424 info.si_code = sicode; \
425 info.si_addr = (void __user *)siaddr; \
426 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
429 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
430 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
431 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
432 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
433 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
434 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
435 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
436 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
437 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
438 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, get_cr2())
441 * the original non-exec stack patch was written by
442 * Solar Designer <solar at openwall.com>. Thanks!
444 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
446 if (regs->eflags & X86_EFLAGS_IF)
449 if (regs->eflags & VM_MASK)
452 if (!(regs->xcs & 3))
456 * lazy-check for CS validity on exec-shield binaries:
459 int cpu = smp_processor_id();
460 struct desc_struct *desc1, *desc2;
461 struct vm_area_struct *vma;
462 unsigned long limit = 0;
464 spin_lock(¤t->mm->page_table_lock);
465 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
466 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
468 spin_unlock(¤t->mm->page_table_lock);
470 current->mm->context.exec_limit = limit;
471 set_user_cs(¤t->mm->context.user_cs, limit);
473 desc1 = ¤t->mm->context.user_cs;
474 desc2 = cpu_gdt_table[cpu] + GDT_ENTRY_DEFAULT_USER_CS;
477 * The CS was not in sync - reload it and retry the
478 * instruction. If the instruction still faults then
479 * we wont hit this branch next time around.
481 if (desc1->a != desc2->a || desc1->b != desc2->b) {
482 if (print_fatal_signals >= 2) {
483 printk("#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
484 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx, CPU_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
486 load_user_cs_desc(cpu, current->mm);
490 if (print_fatal_signals) {
491 printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
492 printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b);
495 current->thread.error_code = error_code;
496 current->thread.trap_no = 13;
497 force_sig(SIGSEGV, current);
502 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
506 if (!fixup_exception(regs))
507 die("general protection fault", regs, error_code);
510 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
512 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
513 printk("You probably have a hardware problem with your RAM chips\n");
515 /* Clear and disable the memory parity error line. */
516 clear_mem_error(reason);
519 static void io_check_error(unsigned char reason, struct pt_regs * regs)
523 printk("NMI: IOCK error (debug interrupt?)\n");
524 show_registers(regs);
526 /* Re-enable the IOCK line, wait for a few seconds */
527 reason = (reason & 0xf) | 8;
530 while (--i) udelay(1000);
535 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
538 /* Might actually be able to figure out what the guilty party
545 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
546 reason, smp_processor_id());
547 printk("Dazed and confused, but trying to continue\n");
548 printk("Do you have a strange power saving mode enabled?\n");
551 static void default_do_nmi(struct pt_regs * regs)
553 unsigned char reason = get_nmi_reason();
555 if (!(reason & 0xc0)) {
556 #ifdef CONFIG_X86_LOCAL_APIC
558 * Ok, so this is none of the documented NMI sources,
559 * so it must be the NMI watchdog.
562 nmi_watchdog_tick(regs);
566 unknown_nmi_error(reason, regs);
570 mem_parity_error(reason, regs);
572 io_check_error(reason, regs);
574 * Reassert NMI in case it became active meanwhile
575 * as it's edge-triggered.
580 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
585 static nmi_callback_t nmi_callback = dummy_nmi_callback;
587 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
593 cpu = smp_processor_id();
596 if (!nmi_callback(regs, cpu))
597 default_do_nmi(regs);
602 void set_nmi_callback(nmi_callback_t callback)
604 nmi_callback = callback;
607 void unset_nmi_callback(void)
609 nmi_callback = dummy_nmi_callback;
613 * Our handling of the processor debug registers is non-trivial.
614 * We do not clear them on entry and exit from the kernel. Therefore
615 * it is possible to get a watchpoint trap here from inside the kernel.
616 * However, the code in ./ptrace.c has ensured that the user can
617 * only set watchpoints on userspace addresses. Therefore the in-kernel
618 * watchpoint trap can only occur in code which is reading/writing
619 * from user space. Such code must not hold kernel locks (since it
620 * can equally take a page fault), therefore it is safe to call
621 * force_sig_info even though that claims and releases locks.
623 * Code in ./signal.c ensures that the debug control register
624 * is restored before we deliver any signal, and therefore that
625 * user code runs with the correct debug control register even though
628 * Being careful here means that we don't have to be as careful in a
629 * lot of more complicated places (task switching can be a bit lazy
630 * about restoring all the debug state, and ptrace doesn't have to
631 * find every occurrence of the TF bit that could be saved away even
634 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
636 unsigned int condition;
637 struct task_struct *tsk = current;
640 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
642 /* It's safe to allow irq's after DR6 has been saved */
643 if (regs->eflags & X86_EFLAGS_IF)
647 * Mask out spurious debug traps due to lazy DR7 setting or
648 * due to 4G/4G kernel mode:
650 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
651 if (!tsk->thread.debugreg[7])
653 if (!user_mode(regs)) {
654 // restore upon return-to-userspace:
655 set_thread_flag(TIF_DB7);
660 if (regs->eflags & VM_MASK)
663 /* Save debug status register where ptrace can see it */
664 tsk->thread.debugreg[6] = condition;
666 /* Mask out spurious TF errors due to lazy TF clearing */
667 if (condition & DR_STEP) {
669 * The TF error should be masked out only if the current
670 * process is not traced and if the TRAP flag has been set
671 * previously by a tracing process (condition detected by
672 * the PT_DTRACE flag); remember that the i386 TRAP flag
673 * can be modified by the process itself in user mode,
674 * allowing programs to debug themselves without the ptrace()
677 if ((regs->xcs & 3) == 0)
678 goto clear_TF_reenable;
679 if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
683 /* Ok, finally something we can handle */
684 tsk->thread.trap_no = 1;
685 tsk->thread.error_code = error_code;
686 info.si_signo = SIGTRAP;
688 info.si_code = TRAP_BRKPT;
690 /* If this is a kernel mode trap, save the user PC on entry to
691 * the kernel, that's what the debugger can make sense of.
693 info.si_addr = ((regs->xcs & 3) == 0) ? (void __user *)tsk->thread.eip
694 : (void __user *)regs->eip;
695 force_sig_info(SIGTRAP, &info, tsk);
697 /* Disable additional traps. They'll be re-enabled when
698 * the signal is delivered.
701 __asm__("movl %0,%%db7"
707 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
711 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
713 regs->eflags &= ~TF_MASK;
718 * Note that we play around with the 'TS' bit in an attempt to get
719 * the correct behaviour even in the presence of the asynchronous
722 void math_error(void __user *eip)
724 struct task_struct * task;
726 unsigned short cwd, swd;
729 * Save the info for the exception handler and clear the error.
733 task->thread.trap_no = 16;
734 task->thread.error_code = 0;
735 info.si_signo = SIGFPE;
737 info.si_code = __SI_FAULT;
740 * (~cwd & swd) will mask out exceptions that are not set to unmasked
741 * status. 0x3f is the exception bits in these regs, 0x200 is the
742 * C1 reg you need in case of a stack fault, 0x040 is the stack
743 * fault bit. We should only be taking one exception at a time,
744 * so if this combination doesn't produce any single exception,
745 * then we have a bad program that isn't syncronizing its FPU usage
746 * and it will suffer the consequences since we won't be able to
747 * fully reproduce the context of the exception
749 cwd = get_fpu_cwd(task);
750 swd = get_fpu_swd(task);
751 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
755 case 0x001: /* Invalid Op */
756 case 0x041: /* Stack Fault */
757 case 0x241: /* Stack Fault | Direction */
758 info.si_code = FPE_FLTINV;
759 /* Should we clear the SF or let user space do it ???? */
761 case 0x002: /* Denormalize */
762 case 0x010: /* Underflow */
763 info.si_code = FPE_FLTUND;
765 case 0x004: /* Zero Divide */
766 info.si_code = FPE_FLTDIV;
768 case 0x008: /* Overflow */
769 info.si_code = FPE_FLTOVF;
771 case 0x020: /* Precision */
772 info.si_code = FPE_FLTRES;
775 force_sig_info(SIGFPE, &info, task);
778 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
781 math_error((void __user *)regs->eip);
784 void simd_math_error(void __user *eip)
786 struct task_struct * task;
788 unsigned short mxcsr;
791 * Save the info for the exception handler and clear the error.
795 task->thread.trap_no = 19;
796 task->thread.error_code = 0;
797 info.si_signo = SIGFPE;
799 info.si_code = __SI_FAULT;
802 * The SIMD FPU exceptions are handled a little differently, as there
803 * is only a single status/control register. Thus, to determine which
804 * unmasked exception was caught we must mask the exception mask bits
805 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
807 mxcsr = get_fpu_mxcsr(task);
808 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
812 case 0x001: /* Invalid Op */
813 info.si_code = FPE_FLTINV;
815 case 0x002: /* Denormalize */
816 case 0x010: /* Underflow */
817 info.si_code = FPE_FLTUND;
819 case 0x004: /* Zero Divide */
820 info.si_code = FPE_FLTDIV;
822 case 0x008: /* Overflow */
823 info.si_code = FPE_FLTOVF;
825 case 0x020: /* Precision */
826 info.si_code = FPE_FLTRES;
829 force_sig_info(SIGFPE, &info, task);
832 asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
836 /* Handle SIMD FPU exceptions on PIII+ processors. */
838 simd_math_error((void __user *)regs->eip);
841 * Handle strange cache flush from user space exception
842 * in all other cases. This is undocumented behaviour.
844 if (regs->eflags & VM_MASK) {
845 handle_vm86_fault((struct kernel_vm86_regs *)regs,
849 die_if_kernel("cache flush denied", regs, error_code);
850 current->thread.trap_no = 19;
851 current->thread.error_code = error_code;
852 force_sig(SIGSEGV, current);
856 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
860 /* No need to warn about this any longer. */
861 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
866 * 'math_state_restore()' saves the current math information in the
867 * old math state array, and gets the new ones from the current task
869 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
870 * Don't touch unless you *really* know how it works.
872 * Must be called with kernel preemption disabled (in this case,
873 * local interrupts are disabled at the call-site in entry.S).
875 asmlinkage void math_state_restore(struct pt_regs regs)
877 struct thread_info *thread = current_thread_info();
878 struct task_struct *tsk = thread->task;
880 clts(); /* Allow maths ops (or we recurse) */
884 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
887 #ifndef CONFIG_MATH_EMULATION
889 asmlinkage void math_emulate(long arg)
891 printk("math-emulation not enabled and no coprocessor found.\n");
892 printk("killing %s.\n",current->comm);
893 force_sig(SIGFPE,current);
897 #endif /* CONFIG_MATH_EMULATION */
899 void __init trap_init_virtual_IDT(void)
902 * "idt" is magic - it overlaps the idt_descr
903 * variable so that updating idt will automatically
904 * update the idt descriptor..
906 __set_fixmap(FIX_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
907 idt_descr.address = __fix_to_virt(FIX_IDT);
909 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
912 void __init trap_init_virtual_GDT(void)
914 int cpu = smp_processor_id();
915 struct Xgt_desc_struct *gdt_desc = cpu_gdt_descr + cpu;
916 struct Xgt_desc_struct tmp_desc = {0, 0};
917 struct tss_struct * t;
919 __asm__ __volatile__("sgdt %0": "=m" (tmp_desc): :"memory");
921 #ifdef CONFIG_X86_HIGH_ENTRY
923 __set_fixmap(FIX_GDT_0, __pa(cpu_gdt_table), PAGE_KERNEL);
924 __set_fixmap(FIX_GDT_1, __pa(cpu_gdt_table) + PAGE_SIZE, PAGE_KERNEL);
925 __set_fixmap(FIX_TSS_0, __pa(init_tss), PAGE_KERNEL);
926 __set_fixmap(FIX_TSS_1, __pa(init_tss) + 1*PAGE_SIZE, PAGE_KERNEL);
927 __set_fixmap(FIX_TSS_2, __pa(init_tss) + 2*PAGE_SIZE, PAGE_KERNEL);
928 __set_fixmap(FIX_TSS_3, __pa(init_tss) + 3*PAGE_SIZE, PAGE_KERNEL);
931 gdt_desc->address = __fix_to_virt(FIX_GDT_0) + sizeof(cpu_gdt_table[0]) * cpu;
933 gdt_desc->address = (unsigned long)cpu_gdt_table[cpu];
935 __asm__ __volatile__("lgdt %0": "=m" (*gdt_desc));
937 #ifdef CONFIG_X86_HIGH_ENTRY
938 t = (struct tss_struct *) __fix_to_virt(FIX_TSS_0) + cpu;
942 set_tss_desc(cpu, t);
943 cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
947 #define _set_gate(gate_addr,type,dpl,addr,seg) \
950 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
952 "movl %%eax,%0\n\t" \
954 :"=m" (*((long *) (gate_addr))), \
955 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
956 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
957 "3" ((char *) (addr)),"2" ((seg) << 16)); \
962 * This needs to use 'idt_table' rather than 'idt', and
963 * thus use the _nonmapped_ version of the IDT, as the
964 * Pentium F0 0F bugfix can have resulted in the mapped
965 * IDT being write-protected.
967 void set_intr_gate(unsigned int n, void *addr)
969 _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
972 void __init set_trap_gate(unsigned int n, void *addr)
974 _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
977 void __init set_system_gate(unsigned int n, void *addr)
979 _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
982 void __init set_call_gate(void *a, void *addr)
984 _set_gate(a,12,3,addr,__KERNEL_CS);
987 static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
989 _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
993 void __init trap_init(void)
996 if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1001 #ifdef CONFIG_X86_LOCAL_APIC
1002 init_apic_mappings();
1004 init_entry_mappings();
1006 set_trap_gate(0,÷_error);
1007 set_intr_gate(1,&debug);
1008 set_intr_gate(2,&nmi);
1009 set_system_gate(3,&int3); /* int3-5 can be called from all */
1010 set_system_gate(4,&overflow);
1011 set_system_gate(5,&bounds);
1012 set_trap_gate(6,&invalid_op);
1013 set_trap_gate(7,&device_not_available);
1014 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
1015 set_trap_gate(9,&coprocessor_segment_overrun);
1016 set_trap_gate(10,&invalid_TSS);
1017 set_trap_gate(11,&segment_not_present);
1018 set_trap_gate(12,&stack_segment);
1019 set_trap_gate(13,&general_protection);
1020 set_intr_gate(14,&page_fault);
1021 set_trap_gate(15,&spurious_interrupt_bug);
1022 set_trap_gate(16,&coprocessor_error);
1023 set_trap_gate(17,&alignment_check);
1024 #ifdef CONFIG_X86_MCE
1025 set_trap_gate(18,&machine_check);
1027 set_trap_gate(19,&simd_coprocessor_error);
1029 set_system_gate(SYSCALL_VECTOR,&system_call);
1032 * default LDT is a single-entry callgate to lcall7 for iBCS
1033 * and a callgate to lcall27 for Solaris/x86 binaries
1036 set_call_gate(&default_ldt[0],lcall7);
1037 set_call_gate(&default_ldt[4],lcall27);
1040 * Should be a barrier for any external CPU state.