2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 * C code is not supposed to know about undefined top of stack. Every time
54 * a C function with an pt_regs argument is called from the SYSCALL based
55 * fast path FIXUP_TOP_OF_STACK is needed.
56 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
60 /* %rsp:at FRAMEEND */
61 .macro FIXUP_TOP_OF_STACK tmp
62 movq %gs:pda_oldrsp,\tmp
64 movq $__USER_DS,SS(%rsp)
65 movq $__USER_CS,CS(%rsp)
67 movq R11(%rsp),\tmp /* get eflags */
68 movq \tmp,EFLAGS(%rsp)
71 .macro RESTORE_TOP_OF_STACK tmp,offset=0
72 movq RSP-\offset(%rsp),\tmp
73 movq \tmp,%gs:pda_oldrsp
74 movq EFLAGS-\offset(%rsp),\tmp
75 movq \tmp,R11-\offset(%rsp)
78 .macro FAKE_STACK_FRAME child_rip
79 /* push in order ss, rsp, eflags, cs, rip */
82 CFI_ADJUST_CFA_OFFSET 8
83 /*CFI_REL_OFFSET ss,0*/
85 CFI_ADJUST_CFA_OFFSET 8
87 pushq $(1<<9) /* eflags - interrupts on */
88 CFI_ADJUST_CFA_OFFSET 8
89 /*CFI_REL_OFFSET rflags,0*/
90 pushq $__KERNEL_CS /* cs */
91 CFI_ADJUST_CFA_OFFSET 8
92 /*CFI_REL_OFFSET cs,0*/
93 pushq \child_rip /* rip */
94 CFI_ADJUST_CFA_OFFSET 8
96 pushq %rax /* orig rax */
97 CFI_ADJUST_CFA_OFFSET 8
100 .macro UNFAKE_STACK_FRAME
102 CFI_ADJUST_CFA_OFFSET -(6*8)
105 .macro CFI_DEFAULT_STACK start=1
110 CFI_DEF_CFA_OFFSET SS+8
112 CFI_REL_OFFSET r15,R15
113 CFI_REL_OFFSET r14,R14
114 CFI_REL_OFFSET r13,R13
115 CFI_REL_OFFSET r12,R12
116 CFI_REL_OFFSET rbp,RBP
117 CFI_REL_OFFSET rbx,RBX
118 CFI_REL_OFFSET r11,R11
119 CFI_REL_OFFSET r10,R10
122 CFI_REL_OFFSET rax,RAX
123 CFI_REL_OFFSET rcx,RCX
124 CFI_REL_OFFSET rdx,RDX
125 CFI_REL_OFFSET rsi,RSI
126 CFI_REL_OFFSET rdi,RDI
127 CFI_REL_OFFSET rip,RIP
128 /*CFI_REL_OFFSET cs,CS*/
129 /*CFI_REL_OFFSET rflags,EFLAGS*/
130 CFI_REL_OFFSET rsp,RSP
131 /*CFI_REL_OFFSET ss,SS*/
134 * A newly forked process directly context switches into this.
139 push kernel_eflags(%rip)
140 CFI_ADJUST_CFA_OFFSET 4
141 popf # reset kernel eflags
142 CFI_ADJUST_CFA_OFFSET -4
144 GET_THREAD_INFO(%rcx)
145 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
149 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
150 je int_ret_from_sys_call
151 testl $_TIF_IA32,threadinfo_flags(%rcx)
152 jnz int_ret_from_sys_call
153 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
154 jmp ret_from_sys_call
157 call syscall_trace_leave
158 GET_THREAD_INFO(%rcx)
163 * System call entry. Upto 6 arguments in registers are supported.
165 * SYSCALL does not save anything on the stack and does not change the
171 * rax system call number
173 * rcx return address for syscall/sysret, C arg3
176 * r10 arg3 (--> moved to rcx for C)
179 * r11 eflags for syscall/sysret, temporary for C
180 * r12-r15,rbp,rbx saved by C code, not touched.
182 * Interrupts are off on entry.
183 * Only called from user space.
185 * XXX if we had a free scratch register we could save the RSP into the stack frame
186 * and report it properly in ps. Unfortunately we haven't.
188 * When user can change the frames always force IRET. That is because
189 * it deals with uncanonical addresses better. SYSRET has trouble
190 * with them due to bugs in both AMD and Intel CPUs.
197 /*CFI_REGISTER rflags,r11*/
199 movq %rsp,%gs:pda_oldrsp
200 movq %gs:pda_kernelstack,%rsp
203 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
204 movq %rcx,RIP-ARGOFFSET(%rsp)
205 CFI_REL_OFFSET rip,RIP-ARGOFFSET
206 GET_THREAD_INFO(%rcx)
207 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
210 cmpq $__NR_syscall_max,%rax
213 call *sys_call_table(,%rax,8) # XXX: rip relative
214 movq %rax,RAX-ARGOFFSET(%rsp)
216 * Syscall return path ending with SYSRET (fast path)
217 * Has incomplete stack frame and undefined top of stack.
219 .globl ret_from_sys_call
221 movl $_TIF_ALLWORK_MASK,%edi
224 GET_THREAD_INFO(%rcx)
226 movl threadinfo_flags(%rcx),%edx
230 movq RIP-ARGOFFSET(%rsp),%rcx
232 RESTORE_ARGS 0,-ARG_SKIP,1
233 /*CFI_REGISTER rflags,r11*/
234 movq %gs:pda_oldrsp,%rsp
238 /* Handle reschedules */
239 /* edx: work, edi: workmask */
242 bt $TIF_NEED_RESCHED,%edx
246 CFI_ADJUST_CFA_OFFSET 8
249 CFI_ADJUST_CFA_OFFSET -8
252 /* Handle a signal */
255 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
258 /* Really a signal */
259 /* edx: work flags (arg3) */
260 leaq do_notify_resume(%rip),%rax
261 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
262 xorl %esi,%esi # oldset -> arg2
263 call ptregscall_common
264 1: movl $_TIF_NEED_RESCHED,%edi
265 /* Use IRET because user could have changed frame. This
266 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
271 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
272 jmp ret_from_sys_call
274 /* Do syscall tracing */
278 movq $-ENOSYS,RAX(%rsp)
279 FIXUP_TOP_OF_STACK %rdi
281 call syscall_trace_enter
282 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
284 cmpq $__NR_syscall_max,%rax
286 movq %r10,%rcx /* fixup for C */
287 call *sys_call_table(,%rax,8)
288 1: movq %rax,RAX-ARGOFFSET(%rsp)
289 /* Use IRET because user could have changed frame */
290 jmp int_ret_from_sys_call
294 * Syscall return path ending with IRET.
295 * Has correct top of stack, but partial stack frame.
297 ENTRY(int_ret_from_sys_call)
299 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
300 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
301 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
302 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
303 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
304 CFI_REL_OFFSET rip,RIP-ARGOFFSET
305 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
306 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
307 CFI_REL_OFFSET rax,RAX-ARGOFFSET
308 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
309 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
310 CFI_REL_OFFSET r8,R8-ARGOFFSET
311 CFI_REL_OFFSET r9,R9-ARGOFFSET
312 CFI_REL_OFFSET r10,R10-ARGOFFSET
313 CFI_REL_OFFSET r11,R11-ARGOFFSET
315 testl $3,CS-ARGOFFSET(%rsp)
316 je retint_restore_args
317 movl $_TIF_ALLWORK_MASK,%edi
318 /* edi: mask to check */
320 GET_THREAD_INFO(%rcx)
321 movl threadinfo_flags(%rcx),%edx
324 andl $~TS_COMPAT,threadinfo_status(%rcx)
327 /* Either reschedule or signal or syscall exit tracking needed. */
328 /* First do a reschedule test. */
329 /* edx: work, edi: workmask */
331 bt $TIF_NEED_RESCHED,%edx
335 CFI_ADJUST_CFA_OFFSET 8
338 CFI_ADJUST_CFA_OFFSET -8
342 /* handle signals and tracing -- both require a full stack frame */
346 /* Check for syscall exit trace */
347 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
350 CFI_ADJUST_CFA_OFFSET 8
351 leaq 8(%rsp),%rdi # &ptregs -> arg1
352 call syscall_trace_leave
354 CFI_ADJUST_CFA_OFFSET -8
355 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
360 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
362 movq %rsp,%rdi # &ptregs -> arg1
363 xorl %esi,%esi # oldset -> arg2
364 call do_notify_resume
365 1: movl $_TIF_NEED_RESCHED,%edi
373 * Certain special system calls that need to save a complete full stack frame.
376 .macro PTREGSCALL label,func,arg
379 leaq \func(%rip),%rax
380 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
381 jmp ptregscall_common
386 PTREGSCALL stub_clone, sys_clone, %r8
387 PTREGSCALL stub_fork, sys_fork, %rdi
388 PTREGSCALL stub_vfork, sys_vfork, %rdi
389 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
390 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
391 PTREGSCALL stub_iopl, sys_iopl, %rsi
393 ENTRY(ptregscall_common)
395 CFI_ADJUST_CFA_OFFSET -8
396 CFI_REGISTER rip, r11
399 CFI_REGISTER rip, r15
400 FIXUP_TOP_OF_STACK %r11
402 RESTORE_TOP_OF_STACK %r11
404 CFI_REGISTER rip, r11
407 CFI_ADJUST_CFA_OFFSET 8
408 CFI_REL_OFFSET rip, 0
415 CFI_ADJUST_CFA_OFFSET -8
416 CFI_REGISTER rip, r11
418 FIXUP_TOP_OF_STACK %r11
420 RESTORE_TOP_OF_STACK %r11
423 jmp int_ret_from_sys_call
427 * sigreturn is special because it needs to restore all registers on return.
428 * This cannot be done with SYSRET, so use the IRET return path instead.
430 ENTRY(stub_rt_sigreturn)
433 CFI_ADJUST_CFA_OFFSET -8
436 FIXUP_TOP_OF_STACK %r11
437 call sys_rt_sigreturn
438 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
440 jmp int_ret_from_sys_call
444 * initial frame state for interrupts and exceptions
448 CFI_DEF_CFA rsp,SS+8-\ref
449 /*CFI_REL_OFFSET ss,SS-\ref*/
450 CFI_REL_OFFSET rsp,RSP-\ref
451 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
452 /*CFI_REL_OFFSET cs,CS-\ref*/
453 CFI_REL_OFFSET rip,RIP-\ref
456 /* initial frame state for interrupts (and exceptions without error code) */
457 #define INTR_FRAME _frame RIP
458 /* initial frame state for exceptions with error code (and interrupts with
459 vector already pushed) */
460 #define XCPT_FRAME _frame ORIG_RAX
463 * Interrupt entry/exit.
465 * Interrupt entry points save only callee clobbered registers in fast path.
467 * Entry runs with interrupts off.
470 /* 0(%rsp): interrupt number */
471 .macro interrupt func
473 #ifdef CONFIG_DEBUG_INFO
477 * Setup a stack frame pointer. This allows gdb to trace
478 * back to the original stack.
481 CFI_DEF_CFA_REGISTER rbp
484 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
489 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
490 movq %gs:pda_irqstackptr,%rax
491 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
492 pushq %rdi # save old stack
493 #ifndef CONFIG_DEBUG_INFO
494 CFI_ADJUST_CFA_OFFSET 8
499 ENTRY(common_interrupt)
502 /* 0(%rsp): oldrsp-ARGOFFSET */
505 #ifndef CONFIG_DEBUG_INFO
506 CFI_ADJUST_CFA_OFFSET -8
509 decl %gs:pda_irqcount
510 #ifdef CONFIG_DEBUG_INFO
512 CFI_DEF_CFA_REGISTER rsp
514 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
516 GET_THREAD_INFO(%rcx)
517 testl $3,CS-ARGOFFSET(%rsp)
520 /* Interrupt came from user space */
522 * Has a correct top of stack, but a partial stack frame
523 * %rcx: thread info. Interrupts off.
525 retint_with_reschedule:
526 movl $_TIF_WORK_MASK,%edi
528 movl threadinfo_flags(%rcx),%edx
540 .section __ex_table,"a"
541 .quad iret_label,bad_iret
544 /* force a signal here? this matches i386 behaviour */
545 /* running with kernel gs */
547 movq $-9999,%rdi /* better code? */
552 /* edi: workmask, edx: work */
555 bt $TIF_NEED_RESCHED,%edx
559 CFI_ADJUST_CFA_OFFSET 8
562 CFI_ADJUST_CFA_OFFSET -8
563 GET_THREAD_INFO(%rcx)
568 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
572 movq $-1,ORIG_RAX(%rsp)
573 xorl %esi,%esi # oldset
574 movq %rsp,%rdi # &pt_regs
575 call do_notify_resume
578 movl $_TIF_NEED_RESCHED,%edi
579 GET_THREAD_INFO(%rcx)
582 #ifdef CONFIG_PREEMPT
583 /* Returning to kernel space. Check if we need preemption */
584 /* rcx: threadinfo. interrupts off. */
587 cmpl $0,threadinfo_preempt_count(%rcx)
588 jnz retint_restore_args
589 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
590 jnc retint_restore_args
591 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
592 jnc retint_restore_args
593 call preempt_schedule_irq
601 .macro apicinterrupt num,func
604 CFI_ADJUST_CFA_OFFSET 8
610 ENTRY(thermal_interrupt)
611 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
613 ENTRY(threshold_interrupt)
614 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
617 ENTRY(reschedule_interrupt)
618 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
620 .macro INVALIDATE_ENTRY num
621 ENTRY(invalidate_interrupt\num)
622 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
634 ENTRY(call_function_interrupt)
635 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
638 #ifdef CONFIG_X86_LOCAL_APIC
639 ENTRY(apic_timer_interrupt)
640 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
642 ENTRY(error_interrupt)
643 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
645 ENTRY(spurious_interrupt)
646 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
650 * Exception entry points.
654 pushq $0 /* push error code/oldrax */
655 CFI_ADJUST_CFA_OFFSET 8
656 pushq %rax /* push real oldrax to the rdi slot */
657 CFI_ADJUST_CFA_OFFSET 8
663 .macro errorentry sym
666 CFI_ADJUST_CFA_OFFSET 8
672 /* error code is on the stack already */
673 /* handle NMI like exceptions that can happen everywhere */
674 .macro paranoidentry sym, ist=0
678 movl $MSR_GS_BASE,%ecx
686 movq %gs:pda_data_offset, %rbp
689 movq ORIG_RAX(%rsp),%rsi
690 movq $-1,ORIG_RAX(%rsp)
692 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
696 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
702 * Exception entry point. This expects an error code/orig_rax on the stack
703 * and the exception handler in %rax.
707 /* rdi slot contains rax, oldrax contains error code */
710 CFI_ADJUST_CFA_OFFSET (14*8)
712 CFI_REL_OFFSET rsi,RSI
713 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
715 CFI_REL_OFFSET rdx,RDX
717 CFI_REL_OFFSET rcx,RCX
718 movq %rsi,10*8(%rsp) /* store rax */
719 CFI_REL_OFFSET rax,RAX
725 CFI_REL_OFFSET r10,R10
727 CFI_REL_OFFSET r11,R11
729 CFI_REL_OFFSET rbx,RBX
731 CFI_REL_OFFSET rbp,RBP
733 CFI_REL_OFFSET r12,R12
735 CFI_REL_OFFSET r13,R13
737 CFI_REL_OFFSET r14,R14
739 CFI_REL_OFFSET r15,R15
748 movq ORIG_RAX(%rsp),%rsi /* get error code */
749 movq $-1,ORIG_RAX(%rsp)
751 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
756 GET_THREAD_INFO(%rcx)
759 movl threadinfo_flags(%rcx),%edx
760 movl $_TIF_WORK_MASK,%edi
770 /* There are two places in the kernel that can potentially fault with
771 usergs. Handle them here. The exception handlers after
772 iret run with kernel gs again, so don't set the user space flag.
773 B stepping K8s sometimes report an truncated RIP for IRET
774 exceptions returning to compat mode. Check for these here too. */
775 leaq iret_label(%rip),%rbp
778 movl %ebp,%ebp /* zero extend */
781 cmpq $gs_change,RIP(%rsp)
785 /* Reload gs selector with exception handling */
786 /* edi: new selector */
790 CFI_ADJUST_CFA_OFFSET 8
795 2: mfence /* workaround */
798 CFI_ADJUST_CFA_OFFSET -8
802 .section __ex_table,"a"
804 .quad gs_change,bad_gs
807 /* running with kernelgs */
809 swapgs /* switch back to user gs */
816 * Create a kernel thread.
818 * C extern interface:
819 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
821 * asm input arguments:
822 * rdi: fn, rsi: arg, rdx: flags
826 FAKE_STACK_FRAME $child_rip
829 # rdi: flags, rsi: usp, rdx: will be &pt_regs
831 orq kernel_thread_flags(%rip),%rdi
844 * It isn't worth to check for reschedule here,
845 * so internally to the x86_64 port you can rely on kernel_thread()
846 * not to reschedule the child before returning, this avoids the need
847 * of hacks for example to fork off the per-CPU idle tasks.
848 * [Hopefully no generic code relies on the reschedule -AK]
858 * Here we are in the child and the registers are set as they were
859 * at kernel_thread() invocation in the parent.
869 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
871 * C extern interface:
872 * extern long execve(char *name, char **argv, char **envp)
874 * asm input arguments:
875 * rdi: name, rsi: argv, rdx: envp
877 * We want to fallback into:
878 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
880 * do_sys_execve asm fallback arguments:
881 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
891 je int_ret_from_sys_call
897 KPROBE_ENTRY(page_fault)
898 errorentry do_page_fault
901 ENTRY(coprocessor_error)
902 zeroentry do_coprocessor_error
904 ENTRY(simd_coprocessor_error)
905 zeroentry do_simd_coprocessor_error
907 ENTRY(device_not_available)
908 zeroentry math_state_restore
910 /* runs on exception stack */
914 CFI_ADJUST_CFA_OFFSET 8
915 paranoidentry do_debug, DEBUG_STACK
920 /* runs on exception stack */
924 CFI_ADJUST_CFA_OFFSET 8
927 * "Paranoid" exit path from exception stack.
928 * Paranoid because this is used by NMIs and cannot take
929 * any kernel state for granted.
930 * We don't do kernel preemption checks here, because only
931 * NMI should be common and it does not enable IRQs and
932 * cannot get reschedule ticks.
934 /* ebx: no swapgs flag */
936 testl %ebx,%ebx /* swapgs needed? */
939 jnz paranoid_userspace
946 GET_THREAD_INFO(%rcx)
947 movl threadinfo_flags(%rcx),%ebx
948 andl $_TIF_WORK_MASK,%ebx
950 movq %rsp,%rdi /* &pt_regs */
952 movq %rax,%rsp /* switch stack for scheduling */
953 testl $_TIF_NEED_RESCHED,%ebx
954 jnz paranoid_schedule
955 movl %ebx,%edx /* arg3: thread flags */
957 xorl %esi,%esi /* arg2: oldset */
958 movq %rsp,%rdi /* arg1: &pt_regs */
959 call do_notify_resume
961 jmp paranoid_userspace
966 jmp paranoid_userspace
973 CFI_ADJUST_CFA_OFFSET 8
974 paranoidentry do_int3, DEBUG_STACK
980 zeroentry do_overflow
986 zeroentry do_invalid_op
988 ENTRY(coprocessor_segment_overrun)
989 zeroentry do_coprocessor_segment_overrun
992 zeroentry do_reserved
994 /* runs on exception stack */
997 paranoidentry do_double_fault
1002 errorentry do_invalid_TSS
1004 ENTRY(segment_not_present)
1005 errorentry do_segment_not_present
1007 /* runs on exception stack */
1008 ENTRY(stack_segment)
1010 paranoidentry do_stack_segment
1014 KPROBE_ENTRY(general_protection)
1015 errorentry do_general_protection
1018 ENTRY(alignment_check)
1019 errorentry do_alignment_check
1022 zeroentry do_divide_error
1024 ENTRY(spurious_interrupt_bug)
1025 zeroentry do_spurious_interrupt_bug
1027 #ifdef CONFIG_X86_MCE
1028 /* runs on exception stack */
1029 ENTRY(machine_check)
1032 CFI_ADJUST_CFA_OFFSET 8
1033 paranoidentry do_machine_check
1040 movq %gs:pda_irqstackptr,%rax
1042 CFI_DEF_CFA_REGISTER rdx
1043 incl %gs:pda_irqcount
1046 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1049 CFI_DEF_CFA_REGISTER rsp
1050 decl %gs:pda_irqcount