2 * linux/arch/i386/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
40 * "current" is in register %ebx during any slow entries.
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
51 #include <asm/dwarf2.h>
52 #include "irq_vectors.h"
53 #include <xen/interface/xen.h>
55 #define nr_syscalls ((syscall_table_size)/4)
83 #define DISABLE_INTERRUPTS cli
84 #define ENABLE_INTERRUPTS sti
86 /* Offsets into shared_info_t. */
87 #define evtchn_upcall_pending /* 0 */
88 #define evtchn_upcall_mask 1
90 #define sizeof_vcpu_shift 6
93 #define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
94 shl $sizeof_vcpu_shift,%esi ; \
95 addl HYPERVISOR_shared_info,%esi
97 #define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
100 #define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
101 #define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
102 #define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
104 #define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
106 #define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
109 #ifdef CONFIG_PREEMPT
110 #define preempt_stop cli; TRACE_IRQS_OFF
113 #define resume_kernel restore_nocheck
116 .macro TRACE_IRQS_IRET
117 #ifdef CONFIG_TRACE_IRQFLAGS
118 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
126 #define resume_userspace_sig check_userspace
128 #define resume_userspace_sig resume_userspace
134 CFI_ADJUST_CFA_OFFSET 4;\
135 /*CFI_REL_OFFSET es, 0;*/\
137 CFI_ADJUST_CFA_OFFSET 4;\
138 /*CFI_REL_OFFSET ds, 0;*/\
140 CFI_ADJUST_CFA_OFFSET 4;\
141 CFI_REL_OFFSET eax, 0;\
143 CFI_ADJUST_CFA_OFFSET 4;\
144 CFI_REL_OFFSET ebp, 0;\
146 CFI_ADJUST_CFA_OFFSET 4;\
147 CFI_REL_OFFSET edi, 0;\
149 CFI_ADJUST_CFA_OFFSET 4;\
150 CFI_REL_OFFSET esi, 0;\
152 CFI_ADJUST_CFA_OFFSET 4;\
153 CFI_REL_OFFSET edx, 0;\
155 CFI_ADJUST_CFA_OFFSET 4;\
156 CFI_REL_OFFSET ecx, 0;\
158 CFI_ADJUST_CFA_OFFSET 4;\
159 CFI_REL_OFFSET ebx, 0;\
160 movl $(__USER_DS), %edx; \
164 #define RESTORE_INT_REGS \
166 CFI_ADJUST_CFA_OFFSET -4;\
169 CFI_ADJUST_CFA_OFFSET -4;\
172 CFI_ADJUST_CFA_OFFSET -4;\
175 CFI_ADJUST_CFA_OFFSET -4;\
178 CFI_ADJUST_CFA_OFFSET -4;\
181 CFI_ADJUST_CFA_OFFSET -4;\
184 CFI_ADJUST_CFA_OFFSET -4;\
187 #define RESTORE_REGS \
190 CFI_ADJUST_CFA_OFFSET -4;\
193 CFI_ADJUST_CFA_OFFSET -4;\
195 .section .fixup,"ax"; \
201 .section __ex_table,"a";\
207 #define RING0_INT_FRAME \
208 CFI_STARTPROC simple;\
209 CFI_DEF_CFA esp, 3*4;\
210 /*CFI_OFFSET cs, -2*4;*/\
213 #define RING0_EC_FRAME \
214 CFI_STARTPROC simple;\
215 CFI_DEF_CFA esp, 4*4;\
216 /*CFI_OFFSET cs, -2*4;*/\
219 #define RING0_PTREGS_FRAME \
220 CFI_STARTPROC simple;\
221 CFI_DEF_CFA esp, OLDESP-EBX;\
222 /*CFI_OFFSET cs, CS-OLDESP;*/\
223 CFI_OFFSET eip, EIP-OLDESP;\
224 /*CFI_OFFSET es, ES-OLDESP;*/\
225 /*CFI_OFFSET ds, DS-OLDESP;*/\
226 CFI_OFFSET eax, EAX-OLDESP;\
227 CFI_OFFSET ebp, EBP-OLDESP;\
228 CFI_OFFSET edi, EDI-OLDESP;\
229 CFI_OFFSET esi, ESI-OLDESP;\
230 CFI_OFFSET edx, EDX-OLDESP;\
231 CFI_OFFSET ecx, ECX-OLDESP;\
232 CFI_OFFSET ebx, EBX-OLDESP
237 CFI_ADJUST_CFA_OFFSET 4
239 GET_THREAD_INFO(%ebp)
241 CFI_ADJUST_CFA_OFFSET -4
242 pushl $0x0202 # Reset kernel eflags
243 CFI_ADJUST_CFA_OFFSET 4
245 CFI_ADJUST_CFA_OFFSET -4
250 * Return to user mode is not as complex as all this looks,
251 * but we want the default path for a system call return to
252 * go as quickly as possible which is why some of this is
253 * less clear than it otherwise should be.
256 # userspace resumption stub bypassing syscall exit tracing
262 GET_THREAD_INFO(%ebp)
264 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
266 testl $(VM_MASK | 2), %eax
268 ENTRY(resume_userspace)
269 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
270 # setting need_resched or sigpending
271 # between sampling and the iret
272 movl TI_flags(%ebp), %ecx
273 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
274 # int/exception return?
278 #ifdef CONFIG_PREEMPT
281 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
284 movl TI_flags(%ebp), %ecx # need_resched set ?
285 testb $_TIF_NEED_RESCHED, %cl
287 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
289 call preempt_schedule_irq
294 /* SYSENTER_RETURN points to after the "sysenter" instruction in
295 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
297 # sysenter call handler stub
298 ENTRY(sysenter_entry)
301 CFI_REGISTER esp, ebp
302 movl SYSENTER_stack_esp0(%esp),%esp
305 * No need to follow this irqs on/off section: the syscall
306 * disabled irqs and here we enable it straight after entry:
310 CFI_ADJUST_CFA_OFFSET 4
311 /*CFI_REL_OFFSET ss, 0*/
313 CFI_ADJUST_CFA_OFFSET 4
314 CFI_REL_OFFSET esp, 0
316 CFI_ADJUST_CFA_OFFSET 4
318 CFI_ADJUST_CFA_OFFSET 4
319 /*CFI_REL_OFFSET cs, 0*/
321 * Push current_thread_info()->sysenter_return to the stack.
322 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
323 * pushed above; +8 corresponds to copy_thread's esp0 setting.
325 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
326 CFI_ADJUST_CFA_OFFSET 4
327 CFI_REL_OFFSET eip, 0
330 * Load the potential sixth argument from user stack.
331 * Careful about security.
333 cmpl $__PAGE_OFFSET-3,%ebp
336 .section __ex_table,"a"
338 .long 1b,syscall_fault
342 CFI_ADJUST_CFA_OFFSET 4
344 GET_THREAD_INFO(%ebp)
346 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
347 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
348 jnz syscall_trace_entry
349 cmpl $(nr_syscalls), %eax
351 call *sys_call_table(,%eax,4)
355 movl TI_flags(%ebp), %ecx
356 testw $_TIF_ALLWORK_MASK, %cx
357 jne syscall_exit_work
358 /* if something modifies registers it must also disable sysexit */
360 movl OLDESP(%esp), %ecx
365 sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
367 jnz 14f # process more events if necessary...
370 14: __DISABLE_INTERRUPTS
371 sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
373 CFI_ADJUST_CFA_OFFSET 4
374 call evtchn_do_upcall
376 CFI_ADJUST_CFA_OFFSET -4
381 #endif /* !CONFIG_XEN */
385 # system call handler stub
387 RING0_INT_FRAME # can't unwind into user space anyway
388 pushl %eax # save orig_eax
389 CFI_ADJUST_CFA_OFFSET 4
391 GET_THREAD_INFO(%ebp)
392 testl $TF_MASK,EFLAGS(%esp)
394 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
396 # system call tracing in operation / emulation
397 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
398 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
399 jnz syscall_trace_entry
400 cmpl $(nr_syscalls), %eax
403 call *sys_call_table(,%eax,4)
404 movl %eax,EAX(%esp) # store the return value
406 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
407 # setting need_resched or sigpending
408 # between sampling and the iret
410 movl TI_flags(%ebp), %ecx
411 testw $_TIF_ALLWORK_MASK, %cx # current->work
412 jne syscall_exit_work
416 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
417 # Warning: OLDSS(%esp) contains the wrong/random values if we
418 # are returning to the kernel.
419 # See comments in process.c:copy_thread() for details.
420 movb OLDSS(%esp), %ah
422 andl $(VM_MASK | (4 << 8) | 3), %eax
423 cmpl $((4 << 8) | 3), %eax
424 je ldt_ss # returning to user-space with LDT SS
428 movl EFLAGS(%esp), %eax
429 testl $(VM_MASK|NMI_MASK), %eax
431 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
433 andb evtchn_upcall_mask(%esi),%al
434 andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
435 jnz restore_all_enable_events # != 0 => enable event delivery
439 restore_nocheck_notrace:
442 CFI_ADJUST_CFA_OFFSET -4
450 pushl $0 # no error code
454 .section __ex_table,"a"
462 larl OLDSS(%esp), %eax
464 testl $0x00400000, %eax # returning to 32bit stack?
465 jnz restore_nocheck # allright, normal return
466 /* If returning to userspace with 16bit stack,
467 * try to fix the higher word of ESP, as the CPU
469 * This is an "official" bug of all the x86-compatible
470 * CPUs, which we can try to work around to make
471 * dosemu and wine happy. */
472 subl $8, %esp # reserve space for switch16 pointer
473 CFI_ADJUST_CFA_OFFSET 8
477 /* Set up the 16bit stack frame with switch32 pointer on top,
478 * and a switch16 pointer on top of the current frame. */
479 call setup_x86_bogus_stack
480 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
483 lss 20+4(%esp), %esp # switch to 16bit stack
485 .section __ex_table,"a"
491 andl $~NMI_MASK, EFLAGS(%esp)
494 jmp hypercall_page + (__HYPERVISOR_iret * 32)
498 # perform work that needs to be done immediately before resumption
500 RING0_PTREGS_FRAME # can't unwind into user space anyway
502 testb $_TIF_NEED_RESCHED, %cl
506 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
507 # setting need_resched or sigpending
508 # between sampling and the iret
510 movl TI_flags(%ebp), %ecx
511 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
512 # than syscall tracing?
514 testb $_TIF_NEED_RESCHED, %cl
517 work_notifysig: # deal with pending signals and
518 # notify-resume requests
519 testl $VM_MASK, EFLAGS(%esp)
521 jne work_notifysig_v86 # returning to kernel-space or
524 call do_notify_resume
525 jmp resume_userspace_sig
530 pushl %ecx # save ti_flags for do_notify_resume
531 CFI_ADJUST_CFA_OFFSET 4
532 call save_v86_state # %eax contains pt_regs pointer
534 CFI_ADJUST_CFA_OFFSET -4
537 call do_notify_resume
538 jmp resume_userspace_sig
541 # perform syscall exit tracing
544 movl $-ENOSYS,EAX(%esp)
547 call do_syscall_trace
549 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
550 # so must skip actual syscall
551 movl ORIG_EAX(%esp), %eax
552 cmpl $(nr_syscalls), %eax
556 # perform syscall exit tracing
559 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
562 ENABLE_INTERRUPTS # could let do_syscall_trace() call
566 call do_syscall_trace
570 RING0_INT_FRAME # can't unwind into user space anyway
572 pushl %eax # save orig_eax
573 CFI_ADJUST_CFA_OFFSET 4
575 GET_THREAD_INFO(%ebp)
576 movl $-EFAULT,EAX(%esp)
580 movl $-ENOSYS,EAX(%esp)
585 #define FIXUP_ESPFIX_STACK \
587 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
588 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
589 /* copy data from 16bit stack to 32bit stack */ \
590 call fixup_x86_bogus_stack; \
591 /* put ESP to the proper location */ \
593 #define UNWIND_ESPFIX_STACK \
595 CFI_ADJUST_CFA_OFFSET 4; \
597 /* see if on 16bit stack */ \
598 cmpw $__ESPFIX_SS, %ax; \
601 CFI_ADJUST_CFA_OFFSET -4; \
602 .section .fixup,"ax"; \
603 28: movl $__KERNEL_DS, %eax; \
606 /* switch to 32bit stack */ \
607 FIXUP_ESPFIX_STACK; \
612 * Build the entry stubs and pointer table with
613 * some assembler magic.
620 ENTRY(irq_entries_start)
625 CFI_ADJUST_CFA_OFFSET -4
628 CFI_ADJUST_CFA_OFFSET 4
637 * the CPU automatically disables interrupts when executing an IRQ vector,
638 * so IRQ-flags tracing has to follow that:
649 #define BUILD_INTERRUPT(name, nr) \
654 CFI_ADJUST_CFA_OFFSET 4; \
662 /* The include is where all of the SMP etc. interrupts come from */
663 #include "entry_arch.h"
665 #define UNWIND_ESPFIX_STACK
670 pushl $0 # no error code
671 CFI_ADJUST_CFA_OFFSET 4
672 pushl $do_divide_error
673 CFI_ADJUST_CFA_OFFSET 4
677 CFI_ADJUST_CFA_OFFSET 4
678 /*CFI_REL_OFFSET ds, 0*/
680 CFI_ADJUST_CFA_OFFSET 4
681 CFI_REL_OFFSET eax, 0
684 CFI_ADJUST_CFA_OFFSET 4
685 CFI_REL_OFFSET ebp, 0
687 CFI_ADJUST_CFA_OFFSET 4
688 CFI_REL_OFFSET edi, 0
690 CFI_ADJUST_CFA_OFFSET 4
691 CFI_REL_OFFSET esi, 0
693 CFI_ADJUST_CFA_OFFSET 4
694 CFI_REL_OFFSET edx, 0
697 CFI_ADJUST_CFA_OFFSET 4
698 CFI_REL_OFFSET ecx, 0
700 CFI_ADJUST_CFA_OFFSET 4
701 CFI_REL_OFFSET ebx, 0
704 CFI_ADJUST_CFA_OFFSET 4
705 /*CFI_REL_OFFSET es, 0*/
708 CFI_ADJUST_CFA_OFFSET -4
709 /*CFI_REGISTER es, ecx*/
710 movl ES(%esp), %edi # get the function address
711 movl ORIG_EAX(%esp), %edx # get the error code
712 movl %eax, ORIG_EAX(%esp)
714 /*CFI_REL_OFFSET es, ES*/
715 movl $(__USER_DS), %ecx
718 movl %esp,%eax # pt_regs pointer
720 jmp ret_from_exception
724 # A note on the "critical region" in our callback handler.
725 # We want to avoid stacking callback handlers due to events occurring
726 # during handling of the last event. To do this, we keep events disabled
727 # until we've done all processing. HOWEVER, we must enable events before
728 # popping the stack frame (can't be done atomically) and so it would still
729 # be possible to get enough handler activations to overflow the stack.
730 # Although unlikely, bugs of that kind are hard to track down, so we'd
731 # like to avoid the possibility.
732 # So, on entry to the handler we detect whether we interrupted an
733 # existing activation in its critical region -- if so, we pop the current
734 # activation and restart the handler using the previous one.
736 # The sysexit critical region is slightly different. sysexit
737 # atomically removes the entire stack frame. If we interrupt in the
738 # critical region we know that the entire frame is present and correct
739 # so we can simply throw away the new one.
740 ENTRY(hypervisor_callback)
743 CFI_ADJUST_CFA_OFFSET 4
749 jb critical_region_fixup
750 cmpl $sysexit_scrit,%eax
752 cmpl $sysexit_ecrit,%eax
754 # interrupted in sysexit critical
755 addl $0x34,%esp # Remove cs...ebx from stack frame.
756 # this popped off new frame to reuse the old one, therefore no
757 # CFI_ADJUST_CFA_OFFSET here
759 CFI_ADJUST_CFA_OFFSET 4
760 call evtchn_do_upcall
762 CFI_ADJUST_CFA_OFFSET -4
766 restore_all_enable_events:
768 scrit: /**** START OF CRITICAL REGION ****/
770 jnz 14f # process more events if necessary...
773 CFI_ADJUST_CFA_OFFSET -4
775 .section __ex_table,"a"
779 14: __DISABLE_INTERRUPTS
781 ecrit: /**** END OF CRITICAL REGION ****/
782 # [How we do the fixup]. We want to merge the current stack frame with the
783 # just-interrupted frame. How we do this depends on where in the critical
784 # region the interrupted handler was executing, and so how many saved
785 # registers are in each frame. We do this quickly using the lookup table
786 # 'critical_fixup_table'. For each byte offset in the critical region, it
787 # provides the number of bytes which have already been popped from the
788 # interrupted stack frame.
789 critical_region_fixup:
790 addl $critical_fixup_table-scrit,%eax
791 movzbl (%eax),%eax # %eax contains num bytes popped
792 cmpb $0xff,%al # 0xff => vcpu_info critical region
794 GET_THREAD_INFO(%ebp)
797 add %eax,%esi # %esi points at end of src region
799 add $0x34,%edi # %edi points at end of dst region
801 shr $2,%ecx # convert words to bytes
802 je 17f # skip loop if nothing to copy
803 16: subl $4,%esi # pre-decrementing copy loop
808 17: movl %edi,%esp # final %edi is top of merged stack
809 # this popped off new frame to reuse the old one, therefore no
810 # CFI_DEF_CFA_OFFSET here
814 critical_fixup_table:
815 .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
816 .byte 0xff,0xff # jnz 14f
817 .byte 0x00 # pop %ebx
818 .byte 0x04 # pop %ecx
819 .byte 0x08 # pop %edx
820 .byte 0x0c # pop %esi
821 .byte 0x10 # pop %edi
822 .byte 0x14 # pop %ebp
823 .byte 0x18 # pop %eax
826 .byte 0x24,0x24,0x24 # add $4,%esp
828 .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
829 .byte 0x00,0x00 # jmp 11b
831 # Hypervisor uses this for application faults while it executes.
832 # We get here for two reasons:
833 # 1. Fault while reloading DS, ES, FS or GS
834 # 2. Fault while executing IRET
835 # Category 1 we fix up by reattempting the load, and zeroing the segment
836 # register if the load fails.
837 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
838 # normal Linux return path in this case because if we use the IRET hypercall
839 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
840 # We distinguish between categories by maintaining a status value in EAX.
841 ENTRY(failsafe_callback)
844 CFI_ADJUST_CFA_OFFSET 4
852 CFI_ADJUST_CFA_OFFSET -4
854 addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
855 CFI_ADJUST_CFA_OFFSET -16
857 CFI_ADJUST_CFA_OFFSET 16
858 5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
859 CFI_ADJUST_CFA_OFFSET -16
861 CFI_ADJUST_CFA_OFFSET 4
863 jmp ret_from_exception
864 .section .fixup,"ax"; \
872 movl %eax,12(%esp); \
875 movl %eax,16(%esp); \
878 .section __ex_table,"a"; \
888 ENTRY(coprocessor_error)
891 CFI_ADJUST_CFA_OFFSET 4
892 pushl $do_coprocessor_error
893 CFI_ADJUST_CFA_OFFSET 4
897 ENTRY(simd_coprocessor_error)
900 CFI_ADJUST_CFA_OFFSET 4
901 pushl $do_simd_coprocessor_error
902 CFI_ADJUST_CFA_OFFSET 4
906 ENTRY(device_not_available)
908 pushl $-1 # mark this as an int
909 CFI_ADJUST_CFA_OFFSET 4
913 testl $0x4, %eax # EM (math emulation bit)
914 je device_available_emulate
915 pushl $0 # temporary storage for ORIG_EIP
916 CFI_ADJUST_CFA_OFFSET 4
919 CFI_ADJUST_CFA_OFFSET -4
920 jmp ret_from_exception
921 device_available_emulate:
924 call math_state_restore
925 jmp ret_from_exception
930 * Debug traps and NMI can happen at the one SYSENTER instruction
931 * that sets up the real kernel stack. Check here, since we can't
932 * allow the wrong stack to be used.
934 * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
935 * already pushed 3 words if it hits on the sysenter instruction:
936 * eflags, cs and eip.
938 * We just load the right stack, and push the three (known) values
939 * by hand onto the new stack - while updating the return eip past
940 * the instruction that would have done it for sysenter.
942 #define FIX_STACK(offset, ok, label) \
943 cmpw $__KERNEL_CS,4(%esp); \
946 movl SYSENTER_stack_esp0+offset(%esp),%esp; \
948 pushl $__KERNEL_CS; \
949 pushl $sysenter_past_esp
950 #endif /* CONFIG_XEN */
955 cmpl $sysenter_entry,(%esp)
956 jne debug_stack_correct
957 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
959 #endif /* !CONFIG_XEN */
960 pushl $-1 # mark this as an int
961 CFI_ADJUST_CFA_OFFSET 4
963 xorl %edx,%edx # error code 0
964 movl %esp,%eax # pt_regs pointer
966 jmp ret_from_exception
972 * NMI is doubly nasty. It can happen _while_ we're handling
973 * a debug fault, and the debug fault hasn't yet been able to
974 * clear up the stack. So we first check whether we got an
975 * NMI on the sysenter entry path, but after that we need to
976 * check whether we got an NMI on the debug path where the debug
977 * fault happened on the sysenter path.
982 CFI_ADJUST_CFA_OFFSET 4
984 cmpw $__ESPFIX_SS, %ax
986 CFI_ADJUST_CFA_OFFSET -4
988 cmpl $sysenter_entry,(%esp)
991 CFI_ADJUST_CFA_OFFSET 4
993 /* Do not access memory above the end of our stack page,
994 * it might not exist.
996 andl $(THREAD_SIZE-1),%eax
997 cmpl $(THREAD_SIZE-20),%eax
999 CFI_ADJUST_CFA_OFFSET -4
1000 jae nmi_stack_correct
1001 cmpl $sysenter_entry,12(%esp)
1002 je nmi_debug_stack_check
1005 CFI_ADJUST_CFA_OFFSET 4
1007 xorl %edx,%edx # zero error code
1008 movl %esp,%eax # pt_regs pointer
1010 jmp restore_nocheck_notrace
1014 FIX_STACK(12,nmi_stack_correct, 1)
1015 jmp nmi_stack_correct
1016 nmi_debug_stack_check:
1017 cmpw $__KERNEL_CS,16(%esp)
1018 jne nmi_stack_correct
1020 jb nmi_stack_correct
1021 cmpl $debug_esp_fix_insn,(%esp)
1022 ja nmi_stack_correct
1023 FIX_STACK(24,nmi_stack_correct, 1)
1024 jmp nmi_stack_correct
1028 /* create the pointer to lss back */
1030 CFI_ADJUST_CFA_OFFSET 4
1032 CFI_ADJUST_CFA_OFFSET 4
1035 /* copy the iret frame of 12 bytes */
1038 CFI_ADJUST_CFA_OFFSET 4
1041 CFI_ADJUST_CFA_OFFSET 4
1043 FIXUP_ESPFIX_STACK # %eax == %esp
1044 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
1045 xorl %edx,%edx # zero error code
1048 lss 12+4(%esp), %esp # back to 16bit stack
1051 .section __ex_table,"a"
1059 CFI_ADJUST_CFA_OFFSET 4
1061 xorl %edx,%edx # zero error code
1062 movl %esp,%eax # pt_regs pointer
1064 orl $NMI_MASK, EFLAGS(%esp)
1071 pushl $-1 # mark this as an int
1072 CFI_ADJUST_CFA_OFFSET 4
1074 xorl %edx,%edx # zero error code
1075 movl %esp,%eax # pt_regs pointer
1077 jmp ret_from_exception
1084 CFI_ADJUST_CFA_OFFSET 4
1086 CFI_ADJUST_CFA_OFFSET 4
1093 CFI_ADJUST_CFA_OFFSET 4
1095 CFI_ADJUST_CFA_OFFSET 4
1102 CFI_ADJUST_CFA_OFFSET 4
1103 pushl $do_invalid_op
1104 CFI_ADJUST_CFA_OFFSET 4
1108 ENTRY(coprocessor_segment_overrun)
1111 CFI_ADJUST_CFA_OFFSET 4
1112 pushl $do_coprocessor_segment_overrun
1113 CFI_ADJUST_CFA_OFFSET 4
1119 pushl $do_invalid_TSS
1120 CFI_ADJUST_CFA_OFFSET 4
1124 ENTRY(segment_not_present)
1126 pushl $do_segment_not_present
1127 CFI_ADJUST_CFA_OFFSET 4
1131 ENTRY(stack_segment)
1133 pushl $do_stack_segment
1134 CFI_ADJUST_CFA_OFFSET 4
1138 KPROBE_ENTRY(general_protection)
1140 pushl $do_general_protection
1141 CFI_ADJUST_CFA_OFFSET 4
1146 ENTRY(alignment_check)
1148 pushl $do_alignment_check
1149 CFI_ADJUST_CFA_OFFSET 4
1153 KPROBE_ENTRY(page_fault)
1155 pushl $do_page_fault
1156 CFI_ADJUST_CFA_OFFSET 4
1161 #ifdef CONFIG_X86_MCE
1162 ENTRY(machine_check)
1165 CFI_ADJUST_CFA_OFFSET 4
1166 pushl machine_check_vector
1167 CFI_ADJUST_CFA_OFFSET 4
1172 ENTRY(fixup_4gb_segment)
1174 pushl $do_fixup_4gb_segment
1175 CFI_ADJUST_CFA_OFFSET 4
1179 #ifdef CONFIG_STACK_UNWIND
1180 ENTRY(arch_unwind_init_running)
1185 movl %ebx, EBX(%edx)
1187 movl %ebx, ECX(%edx)
1188 movl %ebx, EDX(%edx)
1189 movl %esi, ESI(%edx)
1190 movl %edi, EDI(%edx)
1191 movl %ebp, EBP(%edx)
1192 movl %ebx, EAX(%edx)
1193 movl $__USER_DS, DS(%edx)
1194 movl $__USER_DS, ES(%edx)
1195 movl %ebx, ORIG_EAX(%edx)
1196 movl %ecx, EIP(%edx)
1198 movl $__KERNEL_CS, CS(%edx)
1199 movl %ebx, EFLAGS(%edx)
1200 movl %eax, OLDESP(%edx)
1203 movl EBX(%edx), %ebx
1204 movl $__KERNEL_DS, OLDSS(%edx)
1207 ENDPROC(arch_unwind_init_running)
1210 .section .rodata,"a"
1211 #include "syscall_table.S"
1213 syscall_table_size=(.-sys_call_table)