X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fkernel%2Fscall32-o32.S;h=8e66d36ea781bb672e5f5943d3076f4598f48acf;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=9729191b8208b0a37cbfedefc9a74c8671d6b1a9;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9729191b8..8e66d36ea 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -3,13 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle + * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2004 Thiemo Seufer */ -#include #include #include #include +#include #include #include #include @@ -17,7 +18,8 @@ #include #include #include -#include +#include +#include /* Highest syscall used of any syscall flavour */ #define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls @@ -26,31 +28,36 @@ NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME + TRACE_IRQS_ON_RELOAD STI .set at lw t1, PT_EPC(sp) # skip syscall on return +#if defined(CONFIG_BINFMT_IRIX) sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number +#else + subu v0, v0, __NR_O32_Linux # check syscall number + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 +#endif addiu t1, 4 # skip to next instruction - beqz t0, illegal_syscall sw t1, PT_EPC(sp) + beqz t0, illegal_syscall - /* XXX Put both in one cacheline, should save a bit. */ - sll t0, v0, 2 - lw t2, sys_call_table(t0) # syscall routine - lbu t3, sys_narg_table(v0) # number of arguments - beqz t2, illegal_syscall; + sll t0, v0, 3 + la t1, sys_call_table + addu t1, t0 + lw t2, (t1) # syscall routine + lw t3, 4(t1) # >= 0 if we need stack arguments + beqz t2, illegal_syscall - subu t0, t3, 5 # 5 or more arguments? sw a3, PT_R26(sp) # save a3 for syscall restarting - bgez t0, stackargs + bgez t3, stackargs stack_done: - sw a3, PT_R26(sp) # save for syscall restart - LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? - li t1, _TIF_SYSCALL_TRACE - and t0, t1, t0 + lw t0, TI_FLAGS($28) # syscall tracing enabled? + li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + and t0, t1 bnez t0, syscall_trace_entry # -> yes jalr t2 # Do The Real Thing (TM) @@ -65,13 +72,13 @@ stack_done: # restarting 1: sw v0, PT_R2(sp) # result -EXPORT(o32_syscall_exit) +o32_syscall_exit: local_irq_disable # make sure need_resched and # signals dont change between # sampling and return - LONG_L a2, TI_FLAGS($28) # current->work + lw a2, TI_FLAGS($28) # current->work li t0, _TIF_ALLWORK_MASK - and t0, a2, t0 + and t0, a2 bnez t0, o32_syscall_exit_work j restore_partial @@ -83,15 +90,18 @@ o32_syscall_exit_work: syscall_trace_entry: SAVE_STATIC - sw t2, PT_R1(sp) + move s0, t2 + move a0, sp + li a1, 0 jal do_syscall_trace - lw t2, PT_R1(sp) + move t0, s0 + RESTORE_STATIC lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) - jalr t2 + jalr t0 li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 @@ -114,49 +124,48 @@ syscall_trace_entry: */ stackargs: lw t0, PT_R29(sp) # get old user stack pointer - subu t3, 4 - sll t1, t3, 2 # stack valid? - - addu t1, t0 # end address - or t0, t1 - bltz t0, bad_stack # -> sp is bad - lw t0, PT_R29(sp) # get old user stack pointer - PTR_LA t1, 4f # copy 1 to 3 arguments - sll t3, t3, 4 - subu t1, t3 - jr t1 - - /* Ok, copy the args from the luser stack to the kernel stack */ /* - * I know Ralf doesn't like nops but this avoids code - * duplication for R3000 targets (and this is the - * only place where ".set reorder" doesn't help). - * Harald. + * We intentionally keep the kernel stack a little below the top of + * userspace so we don't have to do a slower byte accurate check here. */ + lw t5, TI_ADDR_LIMIT($28) + addu t4, t0, 32 + and t5, t4 + bltz t5, bad_stack # -> sp is bad + + /* Ok, copy the args from the luser stack to the kernel stack. + * t3 is the precomputed number of instruction bytes needed to + * load or store arguments 6-8. + */ + + la t1, 5f # load up to 3 arguments + subu t1, t3 +1: lw t5, 16(t0) # argument #5 from usp .set push .set noreorder .set nomacro -1: lw t1, 24(t0) # argument #7 from usp - nop - sw t1, 24(sp) - nop -2: lw t1, 20(t0) # argument #5 from usp - nop - sw t1, 20(sp) - nop -3: lw t1, 16(t0) # argument #5 from usp - nop - sw t1, 16(sp) - nop -4: .set pop - - j stack_done # go back + jr t1 + addiu t1, 6f - 5f + +2: lw t8, 28(t0) # argument #8 from usp +3: lw t7, 24(t0) # argument #7 from usp +4: lw t6, 20(t0) # argument #6 from usp +5: jr t1 + sw t5, 16(sp) # argument #5 to ksp + + sw t8, 28(sp) # argument #8 to ksp + sw t7, 24(sp) # argument #7 to ksp + sw t6, 20(sp) # argument #6 to ksp +6: j stack_done # go back + nop + .set pop .section __ex_table,"a" PTR 1b,bad_stack PTR 2b,bad_stack PTR 3b,bad_stack + PTR 4b,bad_stack .previous /* @@ -175,7 +184,7 @@ bad_stack: * The system call does not exist in this kernel */ illegal_syscall: - li v0, ENOSYS # error + li v0, -ENOSYS # error sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) @@ -197,7 +206,11 @@ illegal_syscall: 1: ll v0, (a1) move a0, a2 2: sc a0, (a1) +#if R10000_LLSC_WAR + beqzl a0, 1b +#else beqz a0, 1b +#endif .section __ex_table,"a" PTR 1b, bad_stack @@ -231,17 +244,7 @@ illegal_syscall: sw zero, PT_R7(sp) # success sw v0, PT_R2(sp) # result - /* Success, so skip usual error handling garbage. */ - LONG_L a2, TI_FLAGS($28) # syscall tracing enabled? - li t0, _TIF_SYSCALL_TRACE - and t0, a2, t0 - bnez t0, 1f - - b o32_syscall_exit - -1: SAVE_STATIC - jal do_syscall_trace - j syscall_exit + j o32_syscall_exit # continue like a normal syscall no_mem: li v0, -ENOMEM jr ra @@ -261,69 +264,49 @@ bad_alignment: END(sys_sysmips) LEAF(sys_syscall) - lw t0, PT_R29(sp) # user sp - - sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1 - beqz v0, enosys - - sll v0, a0, 2 - la v1, sys_syscall - lw t2, sys_call_table(v0) # function pointer - lbu t4, sys_narg_table(a0) # number of arguments - - li v0, -EINVAL - beq t2, v1, out # do not recurse +#if defined(CONFIG_BINFMT_IRIX) + sltiu v0, a0, MAX_SYSCALL_NO + 1 # check syscall number +#else + subu t0, a0, __NR_O32_Linux # check syscall number + sltiu v0, t0, __NR_O32_Linux_syscalls + 1 +#endif + sll t1, t0, 3 + beqz v0, einval - beqz t2, enosys # null function pointer? + lw t2, sys_call_table(t1) # syscall routine - andi v0, t0, 0x3 # unaligned stack pointer? - bnez v0, sigsegv +#if defined(CONFIG_BINFMT_IRIX) + li v1, 4000 # nr of sys_syscall +#else + li v1, 4000 - __NR_O32_Linux # index of sys_syscall +#endif + beq t0, v1, einval # do not recurse - addu v0, t0, 16 # v0 = usp + 16 - addu t1, v0, 12 # 3 32-bit arguments - lw v1, TI_ADDR_LIMIT($28) - or v0, v0, t1 - and v1, v1, v0 - bltz v1, efault + /* Some syscalls like execve get their arguments from struct pt_regs + and claim zero arguments in the syscall table. Thus we have to + assume the worst case and shuffle around all potential arguments. + If you want performance, don't use indirect syscalls. */ move a0, a1 # shift argument registers move a1, a2 move a2, a3 - -1: lw a3, 16(t0) -2: lw t3, 20(t0) -3: lw t4, 24(t0) - - .section __ex_table, "a" - .word 1b, efault - .word 2b, efault - .word 3b, efault - .previous - - sw t3, 16(sp) # put into new stackframe - sw t4, 20(sp) - - bnez t4, 1f # zero arguments? - addu a0, sp, 32 # then pass sp in a0 -1: - - sw t3, 16(sp) - sw v1, 20(sp) + lw a3, 16(sp) + lw t4, 20(sp) + lw t5, 24(sp) + lw t6, 28(sp) + sw t4, 16(sp) + sw t5, 20(sp) + sw t6, 24(sp) + sw a0, PT_R4(sp) # .. and push back a0 - a3, some + sw a1, PT_R5(sp) # syscalls expect them there + sw a2, PT_R6(sp) + sw a3, PT_R7(sp) + sw a3, PT_R26(sp) # update a3 for syscall restarting jr t2 /* Unreached */ -enosys: li v0, -ENOSYS - b out - -sigsegv: - li a0, _SIGSEGV - move a1, $28 - jal force_sig - /* Fall through */ - -efault: li v0, -EFAULT - -out: jr ra +einval: li v0, -EINVAL + jr ra END(sys_syscall) .macro fifty ptr, nargs, from=1, to=50 @@ -341,12 +324,14 @@ out: jr ra .endm .macro syscalltable +#if defined(CONFIG_BINFMT_IRIX) mille sys_ni_syscall 0 /* 0 - 999 SVR4 flavour */ - #include "irix5sys.h" /* 1000 - 1999 32-bit IRIX */ + mille sys_ni_syscall 0 /* 1000 - 1999 32-bit IRIX */ mille sys_ni_syscall 0 /* 2000 - 2999 BSD43 flavour */ mille sys_ni_syscall 0 /* 3000 - 3999 POSIX flavour */ +#endif - sys sys_syscall 0 /* 4000 */ + sys sys_syscall 8 /* 4000 */ sys sys_exit 1 sys sys_fork 0 sys sys_read 3 @@ -397,7 +382,7 @@ out: jr ra sys sys_ni_syscall 0 /* was signal(2) */ sys sys_geteuid 0 sys sys_getegid 0 /* 4050 */ - sys sys_acct 0 + sys sys_acct 1 sys sys_umount 2 sys sys_ni_syscall 0 sys sys_ioctl 3 @@ -477,7 +462,7 @@ out: jr ra sys sys_init_module 5 sys sys_delete_module 1 sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */ - sys sys_quotactl 0 + sys sys_quotactl 4 sys sys_getpgid 1 sys sys_fchdir 1 sys sys_bdflush 2 @@ -498,7 +483,7 @@ out: jr ra sys sys_sysmips 4 sys sys_ni_syscall 0 /* 4150 */ sys sys_getsid 1 - sys sys_fdatasync 0 + sys sys_fdatasync 1 sys sys_sysctl 1 sys sys_mlock 2 sys sys_munlock 2 /* 4155 */ @@ -513,7 +498,7 @@ out: jr ra sys sys_sched_get_priority_min 1 sys sys_sched_rr_get_interval 2 /* 4165 */ sys sys_nanosleep, 2 - sys sys_mremap, 4 + sys sys_mremap, 5 sys sys_accept 3 sys sys_bind 3 sys sys_connect 3 /* 4170 */ @@ -584,16 +569,27 @@ out: jr ra sys sys_fremovexattr 2 /* 4235 */ sys sys_tkill 2 sys sys_sendfile64 5 - sys sys_futex 2 + sys sys_futex 6 +#ifdef CONFIG_MIPS_MT_FPAFF + /* + * For FPU affinity scheduling on MIPS MT processors, we need to + * intercept sys_sched_xxxaffinity() calls until we get a proper hook + * in kernel/sched.c. Considered only temporary we only support these + * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm. + */ + sys mipsmt_sys_sched_setaffinity 3 + sys mipsmt_sys_sched_getaffinity 3 +#else sys sys_sched_setaffinity 3 sys sys_sched_getaffinity 3 /* 4240 */ +#endif /* CONFIG_MIPS_MT_FPAFF */ sys sys_io_setup 2 sys sys_io_destroy 1 sys sys_io_getevents 5 sys sys_io_submit 3 sys sys_io_cancel 3 /* 4245 */ sys sys_exit_group 1 - sys sys_lookup_dcookie 3 + sys sys_lookup_dcookie 4 sys sys_epoll_create 1 sys sys_epoll_ctl 4 sys sys_epoll_wait 3 /* 4250 */ @@ -614,22 +610,64 @@ out: jr ra sys sys_clock_nanosleep 4 /* 4265 */ sys sys_tgkill 3 sys sys_utimes 2 - + sys sys_mbind 4 + sys sys_ni_syscall 0 /* sys_get_mempolicy */ + sys sys_ni_syscall 0 /* 4270 sys_set_mempolicy */ + sys sys_mq_open 4 + sys sys_mq_unlink 1 + sys sys_mq_timedsend 5 + sys sys_mq_timedreceive 5 + sys sys_mq_notify 2 /* 4275 */ + sys sys_mq_getsetattr 3 + sys sys_vserver 3 + sys sys_waitid 5 + sys sys_ni_syscall 0 /* available, was setaltroot */ + sys sys_add_key 5 /* 4280 */ + sys sys_request_key 4 + sys sys_keyctl 5 + sys sys_set_thread_area 1 + sys sys_inotify_init 0 + sys sys_inotify_add_watch 3 /* 4285 */ + sys sys_inotify_rm_watch 2 + sys sys_migrate_pages 4 + sys sys_openat 4 + sys sys_mkdirat 3 + sys sys_mknodat 4 /* 4290 */ + sys sys_fchownat 5 + sys sys_futimesat 3 + sys sys_fstatat64 4 + sys sys_unlinkat 3 + sys sys_renameat 4 /* 4295 */ + sys sys_linkat 5 + sys sys_symlinkat 3 + sys sys_readlinkat 4 + sys sys_fchmodat 3 + sys sys_faccessat 3 /* 4300 */ + sys sys_pselect6 6 + sys sys_ppoll 5 + sys sys_unshare 1 + sys sys_splice 4 + sys sys_sync_file_range 7 /* 4305 */ + sys sys_tee 4 + sys sys_vmsplice 4 + sys sys_move_pages 6 + sys sys_set_robust_list 2 + sys sys_get_robust_list 3 /* 4310 */ + sys sys_kexec_load 4 + sys sys_getcpu 3 + sys sys_epoll_pwait 6 .endm + /* We pre-compute the number of _instruction_ bytes needed to + load or store the arguments 6-8. Negative values are ignored. */ + .macro sys function, nargs PTR \function + LONG (\nargs << 2) - (5 << 2) .endm .align 3 -sys_call_table: + .type sys_call_table,@object +EXPORT(sys_call_table) syscalltable .size sys_call_table, . - sys_call_table - - .macro sys function, nargs - .byte \nargs - .endm - -sys_narg_table: - syscalltable - .size sys_narg_table, . - sys_narg_table