#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
-#include <asm/offset.h>
+#include <asm/asm-offsets.h>
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
-#include <asm/errno.h>
+#include <asm/page.h>
.code64
-#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
-#else
-#define preempt_stop
+#ifndef CONFIG_PREEMPT
#define retint_kernel retint_restore_args
#endif
.macro FAKE_STACK_FRAME child_rip
/* push in order ss, rsp, eflags, cs, rip */
- xorq %rax, %rax
+ xorl %eax, %eax
pushq %rax /* ss */
CFI_ADJUST_CFA_OFFSET 8
+ /*CFI_REL_OFFSET ss,0*/
pushq %rax /* rsp */
CFI_ADJUST_CFA_OFFSET 8
- CFI_OFFSET rip,0
+ CFI_REL_OFFSET rsp,0
pushq $(1<<9) /* eflags - interrupts on */
CFI_ADJUST_CFA_OFFSET 8
+ /*CFI_REL_OFFSET rflags,0*/
pushq $__KERNEL_CS /* cs */
CFI_ADJUST_CFA_OFFSET 8
+ /*CFI_REL_OFFSET cs,0*/
pushq \child_rip /* rip */
CFI_ADJUST_CFA_OFFSET 8
- CFI_OFFSET rip,0
+ CFI_REL_OFFSET rip,0
pushq %rax /* orig rax */
CFI_ADJUST_CFA_OFFSET 8
.endm
CFI_ADJUST_CFA_OFFSET -(6*8)
.endm
- .macro CFI_DEFAULT_STACK
- CFI_ADJUST_CFA_OFFSET (SS)
- CFI_OFFSET r15,R15-SS
- CFI_OFFSET r14,R14-SS
- CFI_OFFSET r13,R13-SS
- CFI_OFFSET r12,R12-SS
- CFI_OFFSET rbp,RBP-SS
- CFI_OFFSET rbx,RBX-SS
- CFI_OFFSET r11,R11-SS
- CFI_OFFSET r10,R10-SS
- CFI_OFFSET r9,R9-SS
- CFI_OFFSET r8,R8-SS
- CFI_OFFSET rax,RAX-SS
- CFI_OFFSET rcx,RCX-SS
- CFI_OFFSET rdx,RDX-SS
- CFI_OFFSET rsi,RSI-SS
- CFI_OFFSET rdi,RDI-SS
- CFI_OFFSET rsp,RSP-SS
- CFI_OFFSET rip,RIP-SS
+ .macro CFI_DEFAULT_STACK start=1
+ .if \start
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,SS+8
+ .else
+ CFI_DEF_CFA_OFFSET SS+8
+ .endif
+ CFI_REL_OFFSET r15,R15
+ CFI_REL_OFFSET r14,R14
+ CFI_REL_OFFSET r13,R13
+ CFI_REL_OFFSET r12,R12
+ CFI_REL_OFFSET rbp,RBP
+ CFI_REL_OFFSET rbx,RBX
+ CFI_REL_OFFSET r11,R11
+ CFI_REL_OFFSET r10,R10
+ CFI_REL_OFFSET r9,R9
+ CFI_REL_OFFSET r8,R8
+ CFI_REL_OFFSET rax,RAX
+ CFI_REL_OFFSET rcx,RCX
+ CFI_REL_OFFSET rdx,RDX
+ CFI_REL_OFFSET rsi,RSI
+ CFI_REL_OFFSET rdi,RDI
+ CFI_REL_OFFSET rip,RIP
+ /*CFI_REL_OFFSET cs,CS*/
+ /*CFI_REL_OFFSET rflags,EFLAGS*/
+ CFI_REL_OFFSET rsp,RSP
+ /*CFI_REL_OFFSET ss,SS*/
.endm
/*
* A newly forked process directly context switches into this.
*/
/* rdi: prev */
ENTRY(ret_from_fork)
- CFI_STARTPROC
CFI_DEFAULT_STACK
+ push kernel_eflags(%rip)
+ CFI_ADJUST_CFA_OFFSET 4
+ popf # reset kernel eflags
+ CFI_ADJUST_CFA_OFFSET -4
call schedule_tail
GET_THREAD_INFO(%rcx)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
*
* XXX if we had a free scratch register we could save the RSP into the stack frame
* and report it properly in ps. Unfortunately we haven't.
+ *
+ * When user can change the frames always force IRET. That is because
+ * it deals with uncanonical addresses better. SYSRET has trouble
+ * with them due to bugs in both AMD and Intel CPUs.
*/
ENTRY(system_call)
- CFI_STARTPROC
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
swapgs
movq %rsp,%gs:pda_oldrsp
movq %gs:pda_kernelstack,%rsp
sti
SAVE_ARGS 8,1
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
GET_THREAD_INFO(%rcx)
- testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
+ CFI_REMEMBER_STATE
jnz tracesys
cmpq $__NR_syscall_max,%rax
ja badsys
*/
.globl ret_from_sys_call
ret_from_sys_call:
- movl $_TIF_WORK_MASK,%edi
+ movl $_TIF_ALLWORK_MASK,%edi
/* edi: flagmask */
sysret_check:
GET_THREAD_INFO(%rcx)
cli
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
+ CFI_REMEMBER_STATE
jnz sysret_careful
movq RIP-ARGOFFSET(%rsp),%rcx
+ CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1
+ /*CFI_REGISTER rflags,r11*/
movq %gs:pda_oldrsp,%rsp
swapgs
sysretq
/* Handle reschedules */
/* edx: work, edi: workmask */
sysret_careful:
+ CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx
jnc sysret_signal
sti
pushq %rdi
+ CFI_ADJUST_CFA_OFFSET 8
call schedule
popq %rdi
+ CFI_ADJUST_CFA_OFFSET -8
jmp sysret_check
/* Handle a signal */
xorl %esi,%esi # oldset -> arg2
call ptregscall_common
1: movl $_TIF_NEED_RESCHED,%edi
- jmp sysret_check
+ /* Use IRET because user could have changed frame. This
+ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
+ cli
+ jmp int_with_check
+badsys:
+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
+ jmp ret_from_sys_call
+
/* Do syscall tracing */
tracesys:
+ CFI_RESTORE_STATE
SAVE_REST
movq $-ENOSYS,RAX(%rsp)
FIXUP_TOP_OF_STACK %rdi
ja 1f
movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
-1: SAVE_REST
- movq %rsp,%rdi
- call syscall_trace_leave
- RESTORE_TOP_OF_STACK %rbx
- RESTORE_REST
- jmp ret_from_sys_call
+1: movq %rax,RAX-ARGOFFSET(%rsp)
+ /* Use IRET because user could have changed frame */
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
-badsys:
- movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
- jmp ret_from_sys_call
-
/*
* Syscall return path ending with IRET.
* Has correct top of stack, but partial stack frame.
*/
-ENTRY(int_ret_from_sys_call)
+ENTRY(int_ret_from_sys_call)
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
+ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
+ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
+ /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
+ CFI_REL_OFFSET r8,R8-ARGOFFSET
+ CFI_REL_OFFSET r9,R9-ARGOFFSET
+ CFI_REL_OFFSET r10,R10-ARGOFFSET
+ CFI_REL_OFFSET r11,R11-ARGOFFSET
cli
testl $3,CS-ARGOFFSET(%rsp)
je retint_restore_args
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
jnz int_careful
+ andl $~TS_COMPAT,threadinfo_status(%rcx)
jmp retint_swapgs
/* Either reschedule or signal or syscall exit tracking needed. */
jnc int_very_careful
sti
pushq %rdi
+ CFI_ADJUST_CFA_OFFSET 8
call schedule
popq %rdi
+ CFI_ADJUST_CFA_OFFSET -8
+ cli
jmp int_with_check
/* handle signals and tracing -- both require a full stack frame */
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
jz int_signal
pushq %rdi
+ CFI_ADJUST_CFA_OFFSET 8
leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace_leave
popq %rdi
- btr $TIF_SYSCALL_TRACE,%edi
- btr $TIF_SYSCALL_AUDIT,%edi
- btr $TIF_SINGLESTEP,%edi
+ CFI_ADJUST_CFA_OFFSET -8
+ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
+ cli
jmp int_restore_rest
int_signal:
1: movl $_TIF_NEED_RESCHED,%edi
int_restore_rest:
RESTORE_REST
+ cli
jmp int_with_check
CFI_ENDPROC
jmp ptregscall_common
.endm
+ CFI_STARTPROC
+
PTREGSCALL stub_clone, sys_clone, %r8
PTREGSCALL stub_fork, sys_fork, %rdi
PTREGSCALL stub_vfork, sys_vfork, %rdi
PTREGSCALL stub_iopl, sys_iopl, %rsi
ENTRY(ptregscall_common)
- CFI_STARTPROC
popq %r11
- CFI_ADJUST_CFA_OFFSET -8
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_REGISTER rip, r11
SAVE_REST
movq %r11, %r15
+ CFI_REGISTER rip, r15
FIXUP_TOP_OF_STACK %r11
call *%rax
RESTORE_TOP_OF_STACK %r11
movq %r15, %r11
+ CFI_REGISTER rip, r11
RESTORE_REST
pushq %r11
- CFI_ADJUST_CFA_OFFSET 8
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rip, 0
ret
CFI_ENDPROC
ENTRY(stub_execve)
CFI_STARTPROC
popq %r11
- CFI_ADJUST_CFA_OFFSET -8
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_REGISTER rip, r11
SAVE_REST
- movq %r11, %r15
FIXUP_TOP_OF_STACK %r11
call sys_execve
- GET_THREAD_INFO(%rcx)
- bt $TIF_IA32,threadinfo_flags(%rcx)
- jc exec_32bit
RESTORE_TOP_OF_STACK %r11
- movq %r15, %r11
- RESTORE_REST
- push %r11
- ret
-
-exec_32bit:
- CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rax,RAX(%rsp)
RESTORE_REST
jmp int_ret_from_sys_call
*/
ENTRY(stub_rt_sigreturn)
CFI_STARTPROC
- addq $8, %rsp
+ addq $8, %rsp
+ CFI_ADJUST_CFA_OFFSET -8
SAVE_REST
movq %rsp,%rdi
FIXUP_TOP_OF_STACK %r11
jmp int_ret_from_sys_call
CFI_ENDPROC
+/*
+ * initial frame state for interrupts and exceptions
+ */
+ .macro _frame ref
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,SS+8-\ref
+ /*CFI_REL_OFFSET ss,SS-\ref*/
+ CFI_REL_OFFSET rsp,RSP-\ref
+ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
+ /*CFI_REL_OFFSET cs,CS-\ref*/
+ CFI_REL_OFFSET rip,RIP-\ref
+ .endm
+
+/* initial frame state for interrupts (and exceptions without error code) */
+#define INTR_FRAME _frame RIP
+/* initial frame state for exceptions with error code (and interrupts with
+ vector already pushed) */
+#define XCPT_FRAME _frame ORIG_RAX
+
/*
* Interrupt entry/exit.
*
/* 0(%rsp): interrupt number */
.macro interrupt func
- CFI_STARTPROC simple
- CFI_DEF_CFA rsp,(SS-RDI)
- CFI_REL_OFFSET rsp,(RSP-ORIG_RAX)
- CFI_REL_OFFSET rip,(RIP-ORIG_RAX)
cld
#ifdef CONFIG_DEBUG_INFO
SAVE_ALL
testl $3,CS(%rdi)
je 1f
swapgs
-1: addl $1,%gs:pda_irqcount # RED-PEN should check preempt count
+1: incl %gs:pda_irqcount # RED-PEN should check preempt count
movq %gs:pda_irqstackptr,%rax
- cmoveq %rax,%rsp
+ cmoveq %rax,%rsp /*todo This needs CFI annotation! */
pushq %rdi # save old stack
+#ifndef CONFIG_DEBUG_INFO
+ CFI_ADJUST_CFA_OFFSET 8
+#endif
call \func
.endm
ENTRY(common_interrupt)
+ XCPT_FRAME
interrupt do_IRQ
/* 0(%rsp): oldrsp-ARGOFFSET */
-ret_from_intr:
+ret_from_intr:
popq %rdi
+#ifndef CONFIG_DEBUG_INFO
+ CFI_ADJUST_CFA_OFFSET -8
+#endif
cli
- subl $1,%gs:pda_irqcount
+ decl %gs:pda_irqcount
#ifdef CONFIG_DEBUG_INFO
movq RBP(%rdi),%rbp
+ CFI_DEF_CFA_REGISTER rsp
#endif
- leaq ARGOFFSET(%rdi),%rsp
-exit_intr:
+ leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
+exit_intr:
GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp)
je retint_kernel
*/
retint_with_reschedule:
movl $_TIF_WORK_MASK,%edi
-retint_check:
+retint_check:
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
+ CFI_REMEMBER_STATE
jnz retint_careful
retint_swapgs:
- cli
swapgs
retint_restore_args:
cli
/* running with kernel gs */
bad_iret:
movq $-9999,%rdi /* better code? */
+ sti
jmp do_exit
.previous
- /* edi: workmask, edx: work */
+ /* edi: workmask, edx: work */
retint_careful:
+ CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx
jnc retint_signal
sti
pushq %rdi
+ CFI_ADJUST_CFA_OFFSET 8
call schedule
popq %rdi
+ CFI_ADJUST_CFA_OFFSET -8
GET_THREAD_INFO(%rcx)
cli
jmp retint_check
sti
SAVE_REST
movq $-1,ORIG_RAX(%rsp)
- xorq %rsi,%rsi # oldset
+ xorl %esi,%esi # oldset
movq %rsp,%rdi # &pt_regs
call do_notify_resume
RESTORE_REST
cli
movl $_TIF_NEED_RESCHED,%edi
- GET_THREAD_INFO(%rcx)
+ GET_THREAD_INFO(%rcx)
jmp retint_check
#ifdef CONFIG_PREEMPT
bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
jnc retint_restore_args
bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
- jc retint_restore_args
- movl $PREEMPT_ACTIVE,threadinfo_preempt_count(%rcx)
- sti
- call schedule
- cli
- GET_THREAD_INFO(%rcx)
- movl $0,threadinfo_preempt_count(%rcx)
+ jnc retint_restore_args
+ call preempt_schedule_irq
jmp exit_intr
#endif
CFI_ENDPROC
* APIC interrupts.
*/
.macro apicinterrupt num,func
+ INTR_FRAME
pushq $\num-256
+ CFI_ADJUST_CFA_OFFSET 8
interrupt \func
jmp ret_from_intr
CFI_ENDPROC
.endm
+ENTRY(thermal_interrupt)
+ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
+
+ENTRY(threshold_interrupt)
+ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
+
#ifdef CONFIG_SMP
ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
-ENTRY(invalidate_interrupt)
- apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt
+ .macro INVALIDATE_ENTRY num
+ENTRY(invalidate_interrupt\num)
+ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
+ .endm
+
+ INVALIDATE_ENTRY 0
+ INVALIDATE_ENTRY 1
+ INVALIDATE_ENTRY 2
+ INVALIDATE_ENTRY 3
+ INVALIDATE_ENTRY 4
+ INVALIDATE_ENTRY 5
+ INVALIDATE_ENTRY 6
+ INVALIDATE_ENTRY 7
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
* Exception entry points.
*/
.macro zeroentry sym
+ INTR_FRAME
pushq $0 /* push error code/oldrax */
+ CFI_ADJUST_CFA_OFFSET 8
pushq %rax /* push real oldrax to the rdi slot */
+ CFI_ADJUST_CFA_OFFSET 8
leaq \sym(%rip),%rax
jmp error_entry
+ CFI_ENDPROC
.endm
.macro errorentry sym
+ XCPT_FRAME
pushq %rax
+ CFI_ADJUST_CFA_OFFSET 8
leaq \sym(%rip),%rax
jmp error_entry
+ CFI_ENDPROC
.endm
/* error code is on the stack already */
/* handle NMI like exceptions that can happen everywhere */
- .macro paranoidentry sym
+ .macro paranoidentry sym, ist=0
SAVE_ALL
cld
movl $1,%ebx
js 1f
swapgs
xorl %ebx,%ebx
-1: movq %rsp,%rdi
+1:
+ .if \ist
+ movq %gs:pda_data_offset, %rbp
+ .endif
+ movq %rsp,%rdi
movq ORIG_RAX(%rsp),%rsi
movq $-1,ORIG_RAX(%rsp)
+ .if \ist
+ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ .endif
call \sym
+ .if \ist
+ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ .endif
+ cli
.endm
/*
* and the exception handler in %rax.
*/
ENTRY(error_entry)
- CFI_STARTPROC simple
- CFI_DEF_CFA rsp,(SS-RDI)
- CFI_REL_OFFSET rsp,(RSP-RDI)
- CFI_REL_OFFSET rip,(RIP-RDI)
+ _frame RDI
/* rdi slot contains rax, oldrax contains error code */
cld
subq $14*8,%rsp
jnz retint_careful
swapgs
RESTORE_ARGS 0,8,0
- iretq
+ jmp iret_label
CFI_ENDPROC
error_kernelspace:
/* Reload gs selector with exception handling */
/* edi: new selector */
ENTRY(load_gs_index)
+ CFI_STARTPROC
pushf
+ CFI_ADJUST_CFA_OFFSET 8
cli
swapgs
gs_change:
2: mfence /* workaround */
swapgs
popf
+ CFI_ADJUST_CFA_OFFSET -8
ret
+ CFI_ENDPROC
.section __ex_table,"a"
.align 8
movq %rsi, %rdi
call *%rax
# exit
- xorq %rdi, %rdi
+ xorl %edi, %edi
call do_exit
/*
ret
CFI_ENDPROC
-ENTRY(page_fault)
+KPROBE_ENTRY(page_fault)
errorentry do_page_fault
+ .previous .text
ENTRY(coprocessor_error)
zeroentry do_coprocessor_error
zeroentry math_state_restore
/* runs on exception stack */
-ENTRY(debug)
- CFI_STARTPROC
+KPROBE_ENTRY(debug)
+ INTR_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
- paranoidentry do_debug
- /* switch back to process stack to restore the state ptrace touched */
- movq %rax,%rsp
- testl $3,CS(%rsp)
- jnz paranoid_userspace
+ paranoidentry do_debug, DEBUG_STACK
jmp paranoid_exit
CFI_ENDPROC
+ .previous .text
/* runs on exception stack */
-ENTRY(nmi)
- CFI_STARTPROC
+KPROBE_ENTRY(nmi)
+ INTR_FRAME
pushq $-1
- CFI_ADJUST_CFA_OFFSET 8
+ CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_nmi
+ /*
+ * "Paranoid" exit path from exception stack.
+ * Paranoid because this is used by NMIs and cannot take
+ * any kernel state for granted.
+ * We don't do kernel preemption checks here, because only
+ * NMI should be common and it does not enable IRQs and
+ * cannot get reschedule ticks.
+ */
/* ebx: no swapgs flag */
paranoid_exit:
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
+ testl $3,CS(%rsp)
+ jnz paranoid_userspace
paranoid_swapgs:
- cli
swapgs
paranoid_restore:
RESTORE_ALL 8
iretq
paranoid_userspace:
- cli
GET_THREAD_INFO(%rcx)
- movl threadinfo_flags(%rcx),%edx
- testl $_TIF_NEED_RESCHED,%edx
- jnz paranoid_resched
- testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
- jnz paranoid_signal
- jmp paranoid_swapgs
-paranoid_resched:
+ movl threadinfo_flags(%rcx),%ebx
+ andl $_TIF_WORK_MASK,%ebx
+ jz paranoid_swapgs
+ movq %rsp,%rdi /* &pt_regs */
+ call sync_regs
+ movq %rax,%rsp /* switch stack for scheduling */
+ testl $_TIF_NEED_RESCHED,%ebx
+ jnz paranoid_schedule
+ movl %ebx,%edx /* arg3: thread flags */
sti
- call schedule
- jmp paranoid_exit
-paranoid_signal:
- sti
- xorl %esi,%esi /* oldset */
- movq %rsp,%rdi /* &pt_regs */
+ xorl %esi,%esi /* arg2: oldset */
+ movq %rsp,%rdi /* arg1: &pt_regs */
call do_notify_resume
- jmp paranoid_exit
+ cli
+ jmp paranoid_userspace
+paranoid_schedule:
+ sti
+ call schedule
+ cli
+ jmp paranoid_userspace
CFI_ENDPROC
-
-ENTRY(int3)
- zeroentry do_int3
+ .previous .text
+
+KPROBE_ENTRY(int3)
+ INTR_FRAME
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_int3, DEBUG_STACK
+ jmp paranoid_exit
+ CFI_ENDPROC
+ .previous .text
ENTRY(overflow)
zeroentry do_overflow
/* runs on exception stack */
ENTRY(double_fault)
- CFI_STARTPROC
+ XCPT_FRAME
paranoidentry do_double_fault
- movq %rax,%rsp
- testl $3,CS(%rsp)
- jnz paranoid_userspace
jmp paranoid_exit
CFI_ENDPROC
/* runs on exception stack */
ENTRY(stack_segment)
- CFI_STARTPROC
+ XCPT_FRAME
paranoidentry do_stack_segment
- movq %rax,%rsp
- testl $3,CS(%rsp)
- jnz paranoid_userspace
jmp paranoid_exit
CFI_ENDPROC
-ENTRY(general_protection)
+KPROBE_ENTRY(general_protection)
errorentry do_general_protection
+ .previous .text
ENTRY(alignment_check)
errorentry do_alignment_check
#ifdef CONFIG_X86_MCE
/* runs on exception stack */
ENTRY(machine_check)
- CFI_STARTPROC
+ INTR_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_machine_check
CFI_ENDPROC
#endif
-ENTRY(call_debug)
- zeroentry do_call_debug
-
+ENTRY(call_softirq)
+ CFI_STARTPROC
+ movq %gs:pda_irqstackptr,%rax
+ movq %rsp,%rdx
+ CFI_DEF_CFA_REGISTER rdx
+ incl %gs:pda_irqcount
+ cmove %rax,%rsp
+ pushq %rdx
+ /*todo CFI_DEF_CFA_EXPRESSION ...*/
+ call __do_softirq
+ popq %rsp
+ CFI_DEF_CFA_REGISTER rsp
+ decl %gs:pda_irqcount
+ ret
+ CFI_ENDPROC