NT_MASK = 0x00004000
VM_MASK = 0x00020000
-/*
- * ESP0 is at offset 4. 0x200 is the size of the TSS, and
- * also thus the top-of-stack pointer offset of SYSENTER_ESP
- */
-TSS_ESP0_OFFSET = (4 - 0x200)
-
#ifdef CONFIG_PREEMPT
#define preempt_stop cli
#else
movl %edx,EIP(%ebp) # Now we move them to their "normal" places
movl %ecx,CS(%ebp) #
GET_THREAD_INFO_WITH_ESP(%ebp) # GET_THREAD_INFO
- movl TI_EXEC_DOMAIN(%ebp), %edx # Get the execution domain
- call *4(%edx) # Call the lcall7 handler for the domain
+ movl TI_exec_domain(%ebp), %edx # Get the execution domain
+ call *EXEC_DOMAIN_handler(%edx) # Call the handler for the domain
addl $4, %esp
popl %eax
jmp resume_userspace
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
- movl TI_FLAGS(%ebp), %ecx
+ movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?
jne work_pending
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- cmpl $0,TI_PRE_COUNT(%ebp) # non-zero preempt_count ?
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_all
need_resched:
- movl TI_FLAGS(%ebp), %ecx # need_resched set ?
+ movl TI_flags(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
jz restore_all
testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
- movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebp)
+ movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
sti
call schedule
- movl $0,TI_PRE_COUNT(%ebp)
+ movl $0,TI_preempt_count(%ebp)
cli
jmp need_resched
#endif
# sysenter call handler stub
ENTRY(sysenter_entry)
- movl TSS_ESP0_OFFSET(%esp),%esp
+ movl TSS_sysenter_esp0(%esp),%esp
sysenter_past_esp:
sti
pushl $(__USER_DS)
cmpl $(nr_syscalls), %eax
jae syscall_badsys
- testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_FLAGS(%ebp)
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
jnz syscall_trace_entry
call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp)
cli
- movl TI_FLAGS(%ebp), %ecx
+ movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work
/* if something modifies registers it must also disable sysexit */
cmpl $(nr_syscalls), %eax
jae syscall_badsys
# system call tracing in operation
- testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_FLAGS(%ebp)
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
jnz syscall_trace_entry
syscall_call:
call *sys_call_table(,%eax,4)
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
- movl TI_FLAGS(%ebp), %ecx
+ movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work
restore_all:
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
- movl TI_FLAGS(%ebp), %ecx
+ movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
jz restore_all
* that sets up the real kernel stack. Check here, since we can't
* allow the wrong stack to be used.
*
- * "TSS_ESP0_OFFSET+12" is because the NMI/debug handler will have
+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
* already pushed 3 words if it hits on the sysenter instruction:
* eflags, cs and eip.
*
cmpw $__KERNEL_CS,4(%esp); \
jne ok; \
label: \
- movl TSS_ESP0_OFFSET+offset(%esp),%esp; \
+ movl TSS_sysenter_esp0+offset(%esp),%esp; \
pushfl; \
pushl $__KERNEL_CS; \
pushl $sysenter_past_esp
.long sys_utimes
.long sys_fadvise64_64
.long sys_vserver
- .long sys_ni_syscall /* sys_mbind */
- .long sys_ni_syscall /* 275 sys_get_mempolicy */
- .long sys_ni_syscall /* sys_set_mempolicy */
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive /* 280 */
.long sys_mq_notify
.long sys_mq_getsetattr
+ .long sys_ni_syscall /* reserved for kexec */
syscall_table_size=(.-sys_call_table)