2 * linux/arch/i386/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
40 * "current" is in register %ebx during any slow entries.
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
51 #include <asm/dwarf2.h>
52 #include "irq_vectors.h"
53 #include <xen/interface/xen.h>
55 #define nr_syscalls ((syscall_table_size)/4)
83 #define DISABLE_INTERRUPTS cli
84 #define ENABLE_INTERRUPTS sti
86 /* Offsets into shared_info_t. */
87 #define evtchn_upcall_pending /* 0 */
88 #define evtchn_upcall_mask 1
90 #define sizeof_vcpu_shift 6
93 #define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
94 shl $sizeof_vcpu_shift,%esi ; \
95 addl HYPERVISOR_shared_info,%esi
97 #define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
100 #define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
101 #define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
102 #define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
104 #define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
106 #define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
109 #ifdef CONFIG_PREEMPT
110 #define preempt_stop cli; TRACE_IRQS_OFF
113 #define resume_kernel restore_nocheck
116 .macro TRACE_IRQS_IRET
117 #ifdef CONFIG_TRACE_IRQFLAGS
118 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
126 #define resume_userspace_sig check_userspace
128 #define resume_userspace_sig resume_userspace
134 CFI_ADJUST_CFA_OFFSET 4;\
135 /*CFI_REL_OFFSET es, 0;*/\
137 CFI_ADJUST_CFA_OFFSET 4;\
138 /*CFI_REL_OFFSET ds, 0;*/\
140 CFI_ADJUST_CFA_OFFSET 4;\
141 CFI_REL_OFFSET eax, 0;\
143 CFI_ADJUST_CFA_OFFSET 4;\
144 CFI_REL_OFFSET ebp, 0;\
146 CFI_ADJUST_CFA_OFFSET 4;\
147 CFI_REL_OFFSET edi, 0;\
149 CFI_ADJUST_CFA_OFFSET 4;\
150 CFI_REL_OFFSET esi, 0;\
152 CFI_ADJUST_CFA_OFFSET 4;\
153 CFI_REL_OFFSET edx, 0;\
155 CFI_ADJUST_CFA_OFFSET 4;\
156 CFI_REL_OFFSET ecx, 0;\
158 CFI_ADJUST_CFA_OFFSET 4;\
159 CFI_REL_OFFSET ebx, 0;\
160 movl $(__USER_DS), %edx; \
164 #define RESTORE_INT_REGS \
166 CFI_ADJUST_CFA_OFFSET -4;\
169 CFI_ADJUST_CFA_OFFSET -4;\
172 CFI_ADJUST_CFA_OFFSET -4;\
175 CFI_ADJUST_CFA_OFFSET -4;\
178 CFI_ADJUST_CFA_OFFSET -4;\
181 CFI_ADJUST_CFA_OFFSET -4;\
184 CFI_ADJUST_CFA_OFFSET -4;\
187 #define RESTORE_REGS \
190 CFI_ADJUST_CFA_OFFSET -4;\
193 CFI_ADJUST_CFA_OFFSET -4;\
195 .section .fixup,"ax"; \
201 .section __ex_table,"a";\
207 #define RING0_INT_FRAME \
208 CFI_STARTPROC simple;\
209 CFI_DEF_CFA esp, 3*4;\
210 /*CFI_OFFSET cs, -2*4;*/\
213 #define RING0_EC_FRAME \
214 CFI_STARTPROC simple;\
215 CFI_DEF_CFA esp, 4*4;\
216 /*CFI_OFFSET cs, -2*4;*/\
219 #define RING0_PTREGS_FRAME \
220 CFI_STARTPROC simple;\
221 CFI_DEF_CFA esp, OLDESP-EBX;\
222 /*CFI_OFFSET cs, CS-OLDESP;*/\
223 CFI_OFFSET eip, EIP-OLDESP;\
224 /*CFI_OFFSET es, ES-OLDESP;*/\
225 /*CFI_OFFSET ds, DS-OLDESP;*/\
226 CFI_OFFSET eax, EAX-OLDESP;\
227 CFI_OFFSET ebp, EBP-OLDESP;\
228 CFI_OFFSET edi, EDI-OLDESP;\
229 CFI_OFFSET esi, ESI-OLDESP;\
230 CFI_OFFSET edx, EDX-OLDESP;\
231 CFI_OFFSET ecx, ECX-OLDESP;\
232 CFI_OFFSET ebx, EBX-OLDESP
237 CFI_ADJUST_CFA_OFFSET 4
239 GET_THREAD_INFO(%ebp)
241 CFI_ADJUST_CFA_OFFSET -4
242 pushl $0x0202 # Reset kernel eflags
243 CFI_ADJUST_CFA_OFFSET 4
245 CFI_ADJUST_CFA_OFFSET -4
250 * Return to user mode is not as complex as all this looks,
251 * but we want the default path for a system call return to
252 * go as quickly as possible which is why some of this is
253 * less clear than it otherwise should be.
256 # userspace resumption stub bypassing syscall exit tracing
262 GET_THREAD_INFO(%ebp)
264 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
266 testl $(VM_MASK | 2), %eax
268 ENTRY(resume_userspace)
269 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
270 # setting need_resched or sigpending
271 # between sampling and the iret
272 movl TI_flags(%ebp), %ecx
273 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
274 # int/exception return?
278 #ifdef CONFIG_PREEMPT
281 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
284 movl TI_flags(%ebp), %ecx # need_resched set ?
285 testb $_TIF_NEED_RESCHED, %cl
287 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
289 call preempt_schedule_irq
294 /* SYSENTER_RETURN points to after the "sysenter" instruction in
295 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
297 # sysenter call handler stub
298 ENTRY(sysenter_entry)
301 CFI_REGISTER esp, ebp
302 movl SYSENTER_stack_esp0(%esp),%esp
305 * No need to follow this irqs on/off section: the syscall
306 * disabled irqs and here we enable it straight after entry:
310 CFI_ADJUST_CFA_OFFSET 4
311 /*CFI_REL_OFFSET ss, 0*/
313 CFI_ADJUST_CFA_OFFSET 4
314 CFI_REL_OFFSET esp, 0
316 CFI_ADJUST_CFA_OFFSET 4
318 CFI_ADJUST_CFA_OFFSET 4
319 /*CFI_REL_OFFSET cs, 0*/
321 * Push current_thread_info()->sysenter_return to the stack.
322 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
323 * pushed above; +8 corresponds to copy_thread's esp0 setting.
325 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
326 CFI_ADJUST_CFA_OFFSET 4
327 CFI_REL_OFFSET eip, 0
330 * Load the potential sixth argument from user stack.
331 * Careful about security.
333 cmpl $__PAGE_OFFSET-3,%ebp
336 .section __ex_table,"a"
338 .long 1b,syscall_fault
342 CFI_ADJUST_CFA_OFFSET 4
344 GET_THREAD_INFO(%ebp)
346 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
347 testw $(_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
348 jnz syscall_trace_entry
349 cmpl $(nr_syscalls), %eax
351 call *sys_call_table(,%eax,4)
355 movl TI_flags(%ebp), %ecx
356 testw $_TIF_ALLWORK_MASK, %cx
357 jne syscall_exit_work
358 /* if something modifies registers it must also disable sysexit */
360 movl OLDESP(%esp), %ecx
365 sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
367 jnz 14f # process more events if necessary...
370 14: __DISABLE_INTERRUPTS
371 sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
373 CFI_ADJUST_CFA_OFFSET 4
374 call evtchn_do_upcall
376 CFI_ADJUST_CFA_OFFSET -4
381 #endif /* !CONFIG_XEN */
385 # system call handler stub
387 RING0_INT_FRAME # can't unwind into user space anyway
388 pushl %eax # save orig_eax
389 CFI_ADJUST_CFA_OFFSET 4
391 GET_THREAD_INFO(%ebp)
392 testl $TF_MASK,EFLAGS(%esp)
394 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
396 # system call tracing in operation / emulation
397 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
398 testw $(_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
399 jnz syscall_trace_entry
400 cmpl $(nr_syscalls), %eax
403 call *sys_call_table(,%eax,4)
404 movl %eax,EAX(%esp) # store the return value
406 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
407 # setting need_resched or sigpending
408 # between sampling and the iret
410 movl TI_flags(%ebp), %ecx
411 testw $_TIF_ALLWORK_MASK, %cx # current->work
412 jne syscall_exit_work
416 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
417 # Warning: OLDSS(%esp) contains the wrong/random values if we
418 # are returning to the kernel.
419 # See comments in process.c:copy_thread() for details.
420 movb OLDSS(%esp), %ah
422 andl $(VM_MASK | (4 << 8) | 3), %eax
423 cmpl $((4 << 8) | 3), %eax
424 je ldt_ss # returning to user-space with LDT SS
428 movl EFLAGS(%esp), %eax
429 testl $(VM_MASK|NMI_MASK), %eax
431 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
433 andb evtchn_upcall_mask(%esi),%al
434 andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
435 jnz restore_all_enable_events # != 0 => enable event delivery
439 restore_nocheck_notrace:
442 CFI_ADJUST_CFA_OFFSET -4
450 pushl $0 # no error code
454 .section __ex_table,"a"
462 larl OLDSS(%esp), %eax
464 testl $0x00400000, %eax # returning to 32bit stack?
465 jnz restore_nocheck # allright, normal return
466 /* If returning to userspace with 16bit stack,
467 * try to fix the higher word of ESP, as the CPU
469 * This is an "official" bug of all the x86-compatible
470 * CPUs, which we can try to work around to make
471 * dosemu and wine happy. */
472 subl $8, %esp # reserve space for switch16 pointer
473 CFI_ADJUST_CFA_OFFSET 8
477 /* Set up the 16bit stack frame with switch32 pointer on top,
478 * and a switch16 pointer on top of the current frame. */
479 call setup_x86_bogus_stack
480 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
483 lss 20+4(%esp), %esp # switch to 16bit stack
485 .section __ex_table,"a"
491 andl $~NMI_MASK, EFLAGS(%esp)
494 jmp hypercall_page + (__HYPERVISOR_iret * 32)
498 # perform work that needs to be done immediately before resumption
500 RING0_PTREGS_FRAME # can't unwind into user space anyway
502 testb $_TIF_NEED_RESCHED, %cl
506 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
507 # setting need_resched or sigpending
508 # between sampling and the iret
510 movl TI_flags(%ebp), %ecx
511 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
512 # than syscall tracing?
514 testb $_TIF_NEED_RESCHED, %cl
517 work_notifysig: # deal with pending signals and
518 # notify-resume requests
519 testl $VM_MASK, EFLAGS(%esp)
521 jne work_notifysig_v86 # returning to kernel-space or
524 call do_notify_resume
525 jmp resume_userspace_sig
530 pushl %ecx # save ti_flags for do_notify_resume
531 CFI_ADJUST_CFA_OFFSET 4
532 call save_v86_state # %eax contains pt_regs pointer
534 CFI_ADJUST_CFA_OFFSET -4
537 call do_notify_resume
538 jmp resume_userspace_sig
541 # perform syscall exit tracing
544 movl $-ENOSYS,EAX(%esp)
547 call do_syscall_trace
548 movl ORIG_EAX(%esp), %eax
549 cmpl $(nr_syscalls), %eax
553 # perform syscall exit tracing
556 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
559 ENABLE_INTERRUPTS # could let do_syscall_trace() call
563 call do_syscall_trace
567 RING0_INT_FRAME # can't unwind into user space anyway
569 pushl %eax # save orig_eax
570 CFI_ADJUST_CFA_OFFSET 4
572 GET_THREAD_INFO(%ebp)
573 movl $-EFAULT,EAX(%esp)
577 movl $-ENOSYS,EAX(%esp)
582 #define FIXUP_ESPFIX_STACK \
584 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
585 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
586 /* copy data from 16bit stack to 32bit stack */ \
587 call fixup_x86_bogus_stack; \
588 /* put ESP to the proper location */ \
590 #define UNWIND_ESPFIX_STACK \
592 CFI_ADJUST_CFA_OFFSET 4; \
594 /* see if on 16bit stack */ \
595 cmpw $__ESPFIX_SS, %ax; \
598 CFI_ADJUST_CFA_OFFSET -4; \
599 .section .fixup,"ax"; \
600 28: movl $__KERNEL_DS, %eax; \
603 /* switch to 32bit stack */ \
604 FIXUP_ESPFIX_STACK; \
609 * Build the entry stubs and pointer table with
610 * some assembler magic.
617 ENTRY(irq_entries_start)
622 CFI_ADJUST_CFA_OFFSET -4
625 CFI_ADJUST_CFA_OFFSET 4
634 * the CPU automatically disables interrupts when executing an IRQ vector,
635 * so IRQ-flags tracing has to follow that:
646 #define BUILD_INTERRUPT(name, nr) \
651 CFI_ADJUST_CFA_OFFSET 4; \
659 /* The include is where all of the SMP etc. interrupts come from */
660 #include "entry_arch.h"
662 #define UNWIND_ESPFIX_STACK
667 pushl $0 # no error code
668 CFI_ADJUST_CFA_OFFSET 4
669 pushl $do_divide_error
670 CFI_ADJUST_CFA_OFFSET 4
674 CFI_ADJUST_CFA_OFFSET 4
675 /*CFI_REL_OFFSET ds, 0*/
677 CFI_ADJUST_CFA_OFFSET 4
678 CFI_REL_OFFSET eax, 0
681 CFI_ADJUST_CFA_OFFSET 4
682 CFI_REL_OFFSET ebp, 0
684 CFI_ADJUST_CFA_OFFSET 4
685 CFI_REL_OFFSET edi, 0
687 CFI_ADJUST_CFA_OFFSET 4
688 CFI_REL_OFFSET esi, 0
690 CFI_ADJUST_CFA_OFFSET 4
691 CFI_REL_OFFSET edx, 0
694 CFI_ADJUST_CFA_OFFSET 4
695 CFI_REL_OFFSET ecx, 0
697 CFI_ADJUST_CFA_OFFSET 4
698 CFI_REL_OFFSET ebx, 0
701 CFI_ADJUST_CFA_OFFSET 4
702 /*CFI_REL_OFFSET es, 0*/
705 CFI_ADJUST_CFA_OFFSET -4
706 /*CFI_REGISTER es, ecx*/
707 movl ES(%esp), %edi # get the function address
708 movl ORIG_EAX(%esp), %edx # get the error code
709 movl %eax, ORIG_EAX(%esp)
711 /*CFI_REL_OFFSET es, ES*/
712 movl $(__USER_DS), %ecx
715 movl %esp,%eax # pt_regs pointer
717 jmp ret_from_exception
721 # A note on the "critical region" in our callback handler.
722 # We want to avoid stacking callback handlers due to events occurring
723 # during handling of the last event. To do this, we keep events disabled
724 # until we've done all processing. HOWEVER, we must enable events before
725 # popping the stack frame (can't be done atomically) and so it would still
726 # be possible to get enough handler activations to overflow the stack.
727 # Although unlikely, bugs of that kind are hard to track down, so we'd
728 # like to avoid the possibility.
729 # So, on entry to the handler we detect whether we interrupted an
730 # existing activation in its critical region -- if so, we pop the current
731 # activation and restart the handler using the previous one.
733 # The sysexit critical region is slightly different. sysexit
734 # atomically removes the entire stack frame. If we interrupt in the
735 # critical region we know that the entire frame is present and correct
736 # so we can simply throw away the new one.
737 ENTRY(hypervisor_callback)
740 CFI_ADJUST_CFA_OFFSET 4
746 jb critical_region_fixup
747 cmpl $sysexit_scrit,%eax
749 cmpl $sysexit_ecrit,%eax
751 # interrupted in sysexit critical
752 addl $0x34,%esp # Remove cs...ebx from stack frame.
753 # this popped off new frame to reuse the old one, therefore no
754 # CFI_ADJUST_CFA_OFFSET here
756 CFI_ADJUST_CFA_OFFSET 4
757 call evtchn_do_upcall
759 CFI_ADJUST_CFA_OFFSET -4
763 restore_all_enable_events:
765 scrit: /**** START OF CRITICAL REGION ****/
767 jnz 14f # process more events if necessary...
770 CFI_ADJUST_CFA_OFFSET -4
772 .section __ex_table,"a"
776 14: __DISABLE_INTERRUPTS
778 ecrit: /**** END OF CRITICAL REGION ****/
779 # [How we do the fixup]. We want to merge the current stack frame with the
780 # just-interrupted frame. How we do this depends on where in the critical
781 # region the interrupted handler was executing, and so how many saved
782 # registers are in each frame. We do this quickly using the lookup table
783 # 'critical_fixup_table'. For each byte offset in the critical region, it
784 # provides the number of bytes which have already been popped from the
785 # interrupted stack frame.
786 critical_region_fixup:
787 addl $critical_fixup_table-scrit,%eax
788 movzbl (%eax),%eax # %eax contains num bytes popped
789 cmpb $0xff,%al # 0xff => vcpu_info critical region
791 GET_THREAD_INFO(%ebp)
794 add %eax,%esi # %esi points at end of src region
796 add $0x34,%edi # %edi points at end of dst region
798 shr $2,%ecx # convert words to bytes
799 je 17f # skip loop if nothing to copy
800 16: subl $4,%esi # pre-decrementing copy loop
805 17: movl %edi,%esp # final %edi is top of merged stack
806 # this popped off new frame to reuse the old one, therefore no
807 # CFI_DEF_CFA_OFFSET here
811 critical_fixup_table:
812 .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
813 .byte 0xff,0xff # jnz 14f
814 .byte 0x00 # pop %ebx
815 .byte 0x04 # pop %ecx
816 .byte 0x08 # pop %edx
817 .byte 0x0c # pop %esi
818 .byte 0x10 # pop %edi
819 .byte 0x14 # pop %ebp
820 .byte 0x18 # pop %eax
823 .byte 0x24,0x24,0x24 # add $4,%esp
825 .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
826 .byte 0x00,0x00 # jmp 11b
828 # Hypervisor uses this for application faults while it executes.
829 # We get here for two reasons:
830 # 1. Fault while reloading DS, ES, FS or GS
831 # 2. Fault while executing IRET
832 # Category 1 we fix up by reattempting the load, and zeroing the segment
833 # register if the load fails.
834 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
835 # normal Linux return path in this case because if we use the IRET hypercall
836 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
837 # We distinguish between categories by maintaining a status value in EAX.
838 ENTRY(failsafe_callback)
841 CFI_ADJUST_CFA_OFFSET 4
849 CFI_ADJUST_CFA_OFFSET -4
851 addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
852 CFI_ADJUST_CFA_OFFSET -16
854 CFI_ADJUST_CFA_OFFSET 16
855 5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
856 CFI_ADJUST_CFA_OFFSET -16
858 CFI_ADJUST_CFA_OFFSET 4
860 jmp ret_from_exception
861 .section .fixup,"ax"; \
869 movl %eax,12(%esp); \
872 movl %eax,16(%esp); \
875 .section __ex_table,"a"; \
885 ENTRY(coprocessor_error)
888 CFI_ADJUST_CFA_OFFSET 4
889 pushl $do_coprocessor_error
890 CFI_ADJUST_CFA_OFFSET 4
894 ENTRY(simd_coprocessor_error)
897 CFI_ADJUST_CFA_OFFSET 4
898 pushl $do_simd_coprocessor_error
899 CFI_ADJUST_CFA_OFFSET 4
903 ENTRY(device_not_available)
905 pushl $-1 # mark this as an int
906 CFI_ADJUST_CFA_OFFSET 4
910 testl $0x4, %eax # EM (math emulation bit)
911 je device_available_emulate
912 pushl $0 # temporary storage for ORIG_EIP
913 CFI_ADJUST_CFA_OFFSET 4
916 CFI_ADJUST_CFA_OFFSET -4
917 jmp ret_from_exception
918 device_available_emulate:
921 call math_state_restore
922 jmp ret_from_exception
927 * Debug traps and NMI can happen at the one SYSENTER instruction
928 * that sets up the real kernel stack. Check here, since we can't
929 * allow the wrong stack to be used.
931 * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
932 * already pushed 3 words if it hits on the sysenter instruction:
933 * eflags, cs and eip.
935 * We just load the right stack, and push the three (known) values
936 * by hand onto the new stack - while updating the return eip past
937 * the instruction that would have done it for sysenter.
939 #define FIX_STACK(offset, ok, label) \
940 cmpw $__KERNEL_CS,4(%esp); \
943 movl SYSENTER_stack_esp0+offset(%esp),%esp; \
945 pushl $__KERNEL_CS; \
946 pushl $sysenter_past_esp
947 #endif /* CONFIG_XEN */
952 cmpl $sysenter_entry,(%esp)
953 jne debug_stack_correct
954 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
956 #endif /* !CONFIG_XEN */
957 pushl $-1 # mark this as an int
958 CFI_ADJUST_CFA_OFFSET 4
960 xorl %edx,%edx # error code 0
961 movl %esp,%eax # pt_regs pointer
963 jmp ret_from_exception
969 * NMI is doubly nasty. It can happen _while_ we're handling
970 * a debug fault, and the debug fault hasn't yet been able to
971 * clear up the stack. So we first check whether we got an
972 * NMI on the sysenter entry path, but after that we need to
973 * check whether we got an NMI on the debug path where the debug
974 * fault happened on the sysenter path.
979 CFI_ADJUST_CFA_OFFSET 4
981 cmpw $__ESPFIX_SS, %ax
983 CFI_ADJUST_CFA_OFFSET -4
985 cmpl $sysenter_entry,(%esp)
988 CFI_ADJUST_CFA_OFFSET 4
990 /* Do not access memory above the end of our stack page,
991 * it might not exist.
993 andl $(THREAD_SIZE-1),%eax
994 cmpl $(THREAD_SIZE-20),%eax
996 CFI_ADJUST_CFA_OFFSET -4
997 jae nmi_stack_correct
998 cmpl $sysenter_entry,12(%esp)
999 je nmi_debug_stack_check
1002 CFI_ADJUST_CFA_OFFSET 4
1004 xorl %edx,%edx # zero error code
1005 movl %esp,%eax # pt_regs pointer
1007 jmp restore_nocheck_notrace
1011 FIX_STACK(12,nmi_stack_correct, 1)
1012 jmp nmi_stack_correct
1013 nmi_debug_stack_check:
1014 cmpw $__KERNEL_CS,16(%esp)
1015 jne nmi_stack_correct
1017 jb nmi_stack_correct
1018 cmpl $debug_esp_fix_insn,(%esp)
1019 ja nmi_stack_correct
1020 FIX_STACK(24,nmi_stack_correct, 1)
1021 jmp nmi_stack_correct
1025 /* create the pointer to lss back */
1027 CFI_ADJUST_CFA_OFFSET 4
1029 CFI_ADJUST_CFA_OFFSET 4
1032 /* copy the iret frame of 12 bytes */
1035 CFI_ADJUST_CFA_OFFSET 4
1038 CFI_ADJUST_CFA_OFFSET 4
1040 FIXUP_ESPFIX_STACK # %eax == %esp
1041 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
1042 xorl %edx,%edx # zero error code
1045 lss 12+4(%esp), %esp # back to 16bit stack
1048 .section __ex_table,"a"
1056 CFI_ADJUST_CFA_OFFSET 4
1058 xorl %edx,%edx # zero error code
1059 movl %esp,%eax # pt_regs pointer
1061 orl $NMI_MASK, EFLAGS(%esp)
1068 pushl $-1 # mark this as an int
1069 CFI_ADJUST_CFA_OFFSET 4
1071 xorl %edx,%edx # zero error code
1072 movl %esp,%eax # pt_regs pointer
1074 jmp ret_from_exception
1081 CFI_ADJUST_CFA_OFFSET 4
1083 CFI_ADJUST_CFA_OFFSET 4
1090 CFI_ADJUST_CFA_OFFSET 4
1092 CFI_ADJUST_CFA_OFFSET 4
1099 CFI_ADJUST_CFA_OFFSET 4
1100 pushl $do_invalid_op
1101 CFI_ADJUST_CFA_OFFSET 4
1105 ENTRY(coprocessor_segment_overrun)
1108 CFI_ADJUST_CFA_OFFSET 4
1109 pushl $do_coprocessor_segment_overrun
1110 CFI_ADJUST_CFA_OFFSET 4
1116 pushl $do_invalid_TSS
1117 CFI_ADJUST_CFA_OFFSET 4
1121 ENTRY(segment_not_present)
1123 pushl $do_segment_not_present
1124 CFI_ADJUST_CFA_OFFSET 4
1128 ENTRY(stack_segment)
1130 pushl $do_stack_segment
1131 CFI_ADJUST_CFA_OFFSET 4
1135 KPROBE_ENTRY(general_protection)
1137 pushl $do_general_protection
1138 CFI_ADJUST_CFA_OFFSET 4
1143 ENTRY(alignment_check)
1145 pushl $do_alignment_check
1146 CFI_ADJUST_CFA_OFFSET 4
1150 KPROBE_ENTRY(page_fault)
1152 pushl $do_page_fault
1153 CFI_ADJUST_CFA_OFFSET 4
1158 #ifdef CONFIG_X86_MCE
1159 ENTRY(machine_check)
1162 CFI_ADJUST_CFA_OFFSET 4
1163 pushl machine_check_vector
1164 CFI_ADJUST_CFA_OFFSET 4
1169 ENTRY(fixup_4gb_segment)
1171 pushl $do_fixup_4gb_segment
1172 CFI_ADJUST_CFA_OFFSET 4
1176 #ifdef CONFIG_STACK_UNWIND
1177 ENTRY(arch_unwind_init_running)
1182 movl %ebx, EBX(%edx)
1184 movl %ebx, ECX(%edx)
1185 movl %ebx, EDX(%edx)
1186 movl %esi, ESI(%edx)
1187 movl %edi, EDI(%edx)
1188 movl %ebp, EBP(%edx)
1189 movl %ebx, EAX(%edx)
1190 movl $__USER_DS, DS(%edx)
1191 movl $__USER_DS, ES(%edx)
1192 movl %ebx, ORIG_EAX(%edx)
1193 movl %ecx, EIP(%edx)
1195 movl $__KERNEL_CS, CS(%edx)
1196 movl %ebx, EFLAGS(%edx)
1197 movl %eax, OLDESP(%edx)
1200 movl EBX(%edx), %ebx
1201 movl $__KERNEL_DS, OLDSS(%edx)
1204 ENDPROC(arch_unwind_init_running)
1207 .section .rodata,"a"
1208 #include "syscall_table.S"
1210 syscall_table_size=(.-sys_call_table)