#include <asm/thread_info.h>
#include <asm/segment.h>
#include <asm/vsyscall32.h>
+#include <asm/irqflags.h>
#include <linux/linkage.h>
#define __XEN_X86_64 1
CFI_REGISTER rsp,rbp
__swapgs
movq %gs:pda_kernelstack, %rsp
- addq $(PDA_STACKOFFSET),%rsp
+ addq $(PDA_STACKOFFSET),%rsp
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs, here we enable it straight after entry:
+ */
XEN_UNBLOCK_EVENTS(%r11)
__sti
movl %ebp,%ebp /* zero extension */
pushq %rax
CFI_ADJUST_CFA_OFFSET 8
cld
- SAVE_ARGS 0,0,1
+ SAVE_ARGS 0,0,0
/* no need to do an access_ok check here because rbp has been
32bit zero extended */
1: movl (%rbp),%r9d
GET_THREAD_INFO(%r10)
XEN_BLOCK_EVENTS(%r11)
__cli
+ TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call
andl $~TS_COMPAT,threadinfo_status(%r10)
CFI_REGISTER rsp,rcx
movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
CFI_REGISTER rip,rdx
+ TRACE_IRQS_ON
__swapgs
XEN_UNBLOCK_EVENTS(%r11)
__sti /* sti only takes effect after the next instruction */
/* sysexit */
- .byte 0xf, 0x35 /* TBD */
+ .byte 0xf, 0x35
sysenter_tracesys:
CFI_RESTORE_STATE
.previous
jmp sysenter_do_call
CFI_ENDPROC
+ENDPROC(ia32_sysenter_target)
/*
* 32bit SYSCALL instruction entry.
*/
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
- CFI_DEF_CFA rsp,0
+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
__swapgs
movl %esp,%r8d
CFI_REGISTER rsp,r8
movq %gs:pda_kernelstack,%rsp
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
XEN_UNBLOCK_EVENTS(%r11)
__sti
SAVE_ARGS 8,1,1
GET_THREAD_INFO(%r10)
XEN_BLOCK_EVENTS(%r11)
__cli
+ TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call
andl $~TS_COMPAT,threadinfo_status(%r10)
CFI_REGISTER rip,rcx
movl EFLAGS-ARGOFFSET(%rsp),%r11d
/*CFI_REGISTER rflags,r11*/
+ TRACE_IRQS_ON
movl RSP-ARGOFFSET(%rsp),%esp
CFI_RESTORE rsp
__swapgs
- sysretl /* TBD */
+ sysretl
cstar_tracesys:
CFI_RESTORE_STATE
.quad 1b,ia32_badarg
.previous
jmp cstar_do_call
+END(ia32_cstar_target)
ia32_badarg:
movq $-EFAULT,%rax
/*CFI_REL_OFFSET cs,CS-RIP*/
CFI_REL_OFFSET rip,RIP-RIP
__swapgs
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
XEN_UNBLOCK_EVENTS(%r11)
__sti
movq (%rsp),%rcx
pushq %rax
CFI_ADJUST_CFA_OFFSET 8
cld
-/* 1: jmp 1b */
/* note the registers are not zero extended to the sf.
this could be a problem. */
SAVE_ARGS 0,0,1
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
jmp ia32_do_syscall
+END(ia32_syscall)
ia32_badsys:
movq $0,ORIG_RAX-ARGOFFSET(%rsp)
movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
jmp int_ret_from_sys_call
-ni_syscall:
- movq %rax,%rdi
- jmp sys32_ni_syscall
-
quiet_ni_syscall:
movq $-ENOSYS,%rax
ret
RESTORE_REST
jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC
+END(ia32_ptregs_common)
.section .rodata,"a"
.align 8
- .globl ia32_sys_call_table
ia32_sys_call_table:
.quad sys_restart_syscall
.quad sys_exit
.quad sys_setuid16
.quad sys_getuid16
.quad compat_sys_stime /* stime */ /* 25 */
- .quad sys32_ptrace /* ptrace */
+ .quad compat_sys_ptrace /* ptrace */
.quad sys_alarm
.quad sys_fstat /* (old)fstat */
.quad sys_pause
.quad quiet_ni_syscall /* old lock syscall holder */
.quad compat_sys_ioctl
.quad compat_sys_fcntl64 /* 55 */
- .quad quiet_ni_syscall /* old mpx syscall holder */
+#ifdef CONFIG_TUX
+ .quad __sys_tux
+#else
+# ifdef CONFIG_TUX_MODULE
+ .quad sys_tux
+# else
+ .quad quiet_ni_syscall
+# endif
+#endif
.quad sys_setpgid
.quad quiet_ni_syscall /* old ulimit syscall holder */
.quad sys32_olduname
.quad sys_tgkill /* 270 */
.quad compat_sys_utimes
.quad sys32_fadvise64_64
- .quad quiet_ni_syscall /* sys_vserver */
+ .quad sys32_vserver
.quad sys_mbind
.quad compat_sys_get_mempolicy /* 275 */
.quad sys_set_mempolicy
.quad sys_readlinkat /* 305 */
.quad sys_fchmodat
.quad sys_faccessat
- .quad quiet_ni_syscall /* pselect6 for now */
- .quad quiet_ni_syscall /* ppoll for now */
+ .quad compat_sys_pselect6
+ .quad compat_sys_ppoll
.quad sys_unshare /* 310 */
.quad compat_sys_set_robust_list
.quad compat_sys_get_robust_list
.quad sys_sync_file_range
.quad sys_tee
.quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
+ .quad sys_getcpu
ia32_syscall_end: