X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fkernel%2Fentry.S;h=a14218c9ec6cf208d78fb9e1f15c877c5a84d8df;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=061a60f597464696a4830dafc3e9c42240f58c9b;hpb=70790a4b5cd6c0291e5b1a2836e2832d46036ac6;p=linux-2.6.git diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 061a60f59..a14218c9e 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -43,12 +43,11 @@ #include #include #include -#include #include #include -#include #include #include +#include #include "irq_vectors.h" #define nr_syscalls ((syscall_table_size)/4) @@ -80,105 +79,10 @@ VM_MASK = 0x00020000 #define preempt_stop cli #else #define preempt_stop -#define resume_kernel restore_all +#define resume_kernel restore_nocheck #endif -#ifdef CONFIG_X86_HIGH_ENTRY - -#ifdef CONFIG_X86_SWITCH_PAGETABLES - -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) -/* - * If task is preempted in __SWITCH_KERNELSPACE, and moved to another cpu, - * __switch_to repoints %esp to the appropriate virtual stack; but %ebp is - * left stale, so we must check whether to repeat the real stack calculation. - */ -#define repeat_if_esp_changed \ - xorl %esp, %ebp; \ - testl $-THREAD_SIZE, %ebp; \ - jnz 0b -#else -#define repeat_if_esp_changed -#endif - -/* clobbers ebx, edx and ebp */ - -#define __SWITCH_KERNELSPACE \ - cmpl $0xff000000, %esp; \ - jb 1f; \ - \ - /* \ - * switch pagetables and load the real stack, \ - * keep the stack offset: \ - */ \ - \ - movl $swapper_pg_dir-__PAGE_OFFSET, %edx; \ - \ - /* GET_THREAD_INFO(%ebp) intermixed */ \ -0: \ - movl %esp, %ebp; \ - movl %esp, %ebx; \ - andl $(-THREAD_SIZE), %ebp; \ - andl $(THREAD_SIZE-1), %ebx; \ - orl TI_real_stack(%ebp), %ebx; \ - repeat_if_esp_changed; \ - \ - movl %edx, %cr3; \ - movl %ebx, %esp; \ -1: - -#endif - - -#define __SWITCH_USERSPACE \ - /* interrupted any of the user return paths? */ \ - \ - movl EIP(%esp), %eax; \ - \ - cmpl $int80_ret_start_marker, %eax; \ - jb 33f; /* nope - continue with sysexit check */\ - cmpl $int80_ret_end_marker, %eax; \ - jb 22f; /* yes - switch to virtual stack */ \ -33: \ - cmpl $sysexit_ret_start_marker, %eax; \ - jb 44f; /* nope - continue with user check */ \ - cmpl $sysexit_ret_end_marker, %eax; \ - jb 22f; /* yes - switch to virtual stack */ \ - /* return to userspace? */ \ -44: \ - movl EFLAGS(%esp),%ecx; \ - movb CS(%esp),%cl; \ - testl $(VM_MASK | 3),%ecx; \ - jz 2f; \ -22: \ - /* \ - * switch to the virtual stack, then switch to \ - * the userspace pagetables. \ - */ \ - \ - GET_THREAD_INFO(%ebp); \ - movl TI_virtual_stack(%ebp), %edx; \ - movl TI_user_pgd(%ebp), %ecx; \ - \ - movl %esp, %ebx; \ - andl $(THREAD_SIZE-1), %ebx; \ - orl %ebx, %edx; \ -int80_ret_start_marker: \ - movl %edx, %esp; \ - movl %ecx, %cr3; \ - \ - __RESTORE_ALL; \ -int80_ret_end_marker: \ -2: - -#else /* !CONFIG_X86_HIGH_ENTRY */ - -#define __SWITCH_KERNELSPACE -#define __SWITCH_USERSPACE - -#endif - -#define __SAVE_ALL \ +#define SAVE_ALL \ cld; \ pushl %es; \ pushl %ds; \ @@ -193,7 +97,7 @@ int80_ret_end_marker: \ movl %edx, %ds; \ movl %edx, %es; -#define __RESTORE_INT_REGS \ +#define RESTORE_INT_REGS \ popl %ebx; \ popl %ecx; \ popl %edx; \ @@ -202,87 +106,30 @@ int80_ret_end_marker: \ popl %ebp; \ popl %eax -#define __RESTORE_REGS \ - __RESTORE_INT_REGS; \ -111: popl %ds; \ -222: popl %es; \ +#define RESTORE_REGS \ + RESTORE_INT_REGS; \ +1: popl %ds; \ +2: popl %es; \ .section .fixup,"ax"; \ -444: movl $0,(%esp); \ - jmp 111b; \ -555: movl $0,(%esp); \ - jmp 222b; \ -.previous; \ -.section __ex_table,"a";\ - .align 4; \ - .long 111b,444b;\ - .long 222b,555b;\ -.previous - -#define __RESTORE_ALL \ - __RESTORE_REGS \ - addl $4, %esp; \ -333: iret; \ -.section .fixup,"ax"; \ -666: sti; \ - movl $(__USER_DS), %edx; \ - movl %edx, %ds; \ - movl %edx, %es; \ - pushl $11; \ - call do_exit; \ +3: movl $0,(%esp); \ + jmp 1b; \ +4: movl $0,(%esp); \ + jmp 2b; \ .previous; \ .section __ex_table,"a";\ .align 4; \ - .long 333b,666b;\ + .long 1b,3b; \ + .long 2b,4b; \ .previous -#define SAVE_ALL \ - __SAVE_ALL; \ - __SWITCH_KERNELSPACE; - -#define RESTORE_ALL \ - __SWITCH_USERSPACE; \ - __RESTORE_ALL; - -.section .entry.text,"ax" - -ENTRY(lcall7) - pushfl # We get a different stack layout with call - # gates, which has to be cleaned up later.. - pushl %eax - SAVE_ALL - movl %esp, %ebp - pushl %ebp - pushl $0x7 -do_lcall: - movl EIP(%ebp), %eax # due to call gates, this is eflags, not eip.. - movl CS(%ebp), %edx # this is eip.. - movl EFLAGS(%ebp), %ecx # and this is cs.. - movl %eax,EFLAGS(%ebp) # - movl %edx,EIP(%ebp) # Now we move them to their "normal" places - movl %ecx,CS(%ebp) # - GET_THREAD_INFO_WITH_ESP(%ebp) # GET_THREAD_INFO - movl TI_exec_domain(%ebp), %edx # Get the execution domain - call *EXEC_DOMAIN_handler(%edx) # Call the handler for the domain - addl $4, %esp - popl %eax - jmp resume_userspace - -ENTRY(lcall27) - pushfl # We get a different stack layout with call - # gates, which has to be cleaned up later.. - pushl %eax - SAVE_ALL - movl %esp, %ebp - pushl %ebp - pushl $0x27 - jmp do_lcall - ENTRY(ret_from_fork) pushl %eax call schedule_tail GET_THREAD_INFO(%ebp) popl %eax + pushl $0x0202 # Reset kernel eflags + popfl jmp syscall_exit /* @@ -301,7 +148,7 @@ ret_from_intr: movl EFLAGS(%esp), %eax # mix EFLAGS and CS movb CS(%esp), %al testl $(VM_MASK | 3), %eax - jz resume_kernel # returning to kernel or vm86-space + jz resume_kernel ENTRY(resume_userspace) cli # make sure we don't miss an interrupt # setting need_resched or sigpending @@ -314,19 +161,16 @@ ENTRY(resume_userspace) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) + cli cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? - jnz restore_all + jnz restore_nocheck need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl jz restore_all testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all - movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp) - sti - call schedule - movl $0,TI_preempt_count(%ebp) - cli + call preempt_schedule_irq jmp need_resched #endif @@ -342,68 +186,54 @@ sysenter_past_esp: pushl %ebp pushfl pushl $(__USER_CS) - /* - * Push current_thread_info()->sysenter_return to the stack. - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words - * pushed above, and the word being pushed now: - */ - pushl (TI_sysenter_return-THREAD_SIZE+4*4)(%esp) - /* - * No six-argument syscall is ever used with sysenter. - */ + pushl $SYSENTER_RETURN + +/* + * Load the potential sixth argument from user stack. + * Careful about security. + */ + cmpl $__PAGE_OFFSET-3,%ebp + jae syscall_fault +1: movl (%ebp),%ebp +.section __ex_table,"a" + .align 4 + .long 1b,syscall_fault +.previous + pushl %eax SAVE_ALL GET_THREAD_INFO(%ebp) - cmpl $(nr_syscalls), %eax - jae syscall_badsys - testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) + /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ + testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) jnz syscall_trace_entry + cmpl $(nr_syscalls), %eax + jae syscall_badsys call *sys_call_table(,%eax,4) movl %eax,EAX(%esp) cli movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx jne syscall_exit_work - -#ifdef CONFIG_X86_SWITCH_PAGETABLES - - GET_THREAD_INFO(%ebp) - movl TI_virtual_stack(%ebp), %edx - movl TI_user_pgd(%ebp), %ecx - movl %esp, %ebx - andl $(THREAD_SIZE-1), %ebx - orl %ebx, %edx -sysexit_ret_start_marker: - movl %edx, %esp - movl %ecx, %cr3 - /* - * only ebx is not restored by the userspace sysenter vsyscall - * code, it assumes it to be callee-saved. - */ - movl EBX(%esp), %ebx -#endif - /* if something modifies registers it must also disable sysexit */ movl EIP(%esp), %edx movl OLDESP(%esp), %ecx + xorl %ebp,%ebp sti sysexit -#ifdef CONFIG_X86_SWITCH_PAGETABLES -sysexit_ret_end_marker: - nop -#endif + # system call handler stub ENTRY(system_call) pushl %eax # save orig_eax SAVE_ALL GET_THREAD_INFO(%ebp) + # system call tracing in operation / emulation + /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ + testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) + jnz syscall_trace_entry cmpl $(nr_syscalls), %eax jae syscall_badsys - # system call tracing in operation - testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) - jnz syscall_trace_entry syscall_call: call *sys_call_table(,%eax,4) movl %eax,EAX(%esp) # store the return value @@ -414,8 +244,57 @@ syscall_exit: movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx # current->work jne syscall_exit_work + restore_all: - RESTORE_ALL + movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS + # Warning: OLDSS(%esp) contains the wrong/random values if we + # are returning to the kernel. + # See comments in process.c:copy_thread() for details. + movb OLDSS(%esp), %ah + movb CS(%esp), %al + andl $(VM_MASK | (4 << 8) | 3), %eax + cmpl $((4 << 8) | 3), %eax + je ldt_ss # returning to user-space with LDT SS +restore_nocheck: + RESTORE_REGS + addl $4, %esp +1: iret +.section .fixup,"ax" +iret_exc: + sti + pushl $0 # no error code + pushl $do_iret_error + jmp error_code +.previous +.section __ex_table,"a" + .align 4 + .long 1b,iret_exc +.previous + +ldt_ss: + larl OLDSS(%esp), %eax + jnz restore_nocheck + testl $0x00400000, %eax # returning to 32bit stack? + jnz restore_nocheck # allright, normal return + /* If returning to userspace with 16bit stack, + * try to fix the higher word of ESP, as the CPU + * won't restore it. + * This is an "official" bug of all the x86-compatible + * CPUs, which we can try to work around to make + * dosemu and wine happy. */ + subl $8, %esp # reserve space for switch16 pointer + cli + movl %esp, %eax + /* Set up the 16bit stack frame with switch32 pointer on top, + * and a switch16 pointer on top of the current frame. */ + call setup_x86_bogus_stack + RESTORE_REGS + lss 20+4(%esp), %esp # switch to 16bit stack +1: iret +.section __ex_table,"a" + .align 4 + .long 1b,iret_exc +.previous # perform work that needs to be done immediately before resumption ALIGN @@ -442,33 +321,19 @@ work_notifysig: # deal with pending signals and # vm86-space xorl %edx, %edx call do_notify_resume - -#if CONFIG_X86_HIGH_ENTRY - /* - * Reload db7 if necessary: - */ - movl TI_flags(%ebp), %ecx - testb $_TIF_DB7, %cl - jnz work_db7 - - jmp restore_all - -work_db7: - movl TI_task(%ebp), %edx; - movl task_thread_db7(%edx), %edx; - movl %edx, %db7; -#endif - jmp restore_all + jmp resume_userspace ALIGN work_notifysig_v86: - pushl %ecx - call save_v86_state +#ifdef CONFIG_VM86 + pushl %ecx # save ti_flags for do_notify_resume + call save_v86_state # %eax contains pt_regs pointer popl %ecx movl %eax, %esp xorl %edx, %edx call do_notify_resume - jmp restore_all + jmp resume_userspace +#endif # perform syscall exit tracing ALIGN @@ -477,6 +342,9 @@ syscall_trace_entry: movl %esp, %eax xorl %edx,%edx call do_syscall_trace + cmpl $0, %eax + jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, + # so must skip actual syscall movl ORIG_EAX(%esp), %eax cmpl $(nr_syscalls), %eax jnae syscall_call @@ -485,7 +353,7 @@ syscall_trace_entry: # perform syscall exit tracing ALIGN syscall_exit_work: - testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl + testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl jz work_pending sti # could let do_syscall_trace() call # schedule() instead @@ -494,18 +362,47 @@ syscall_exit_work: call do_syscall_trace jmp resume_userspace + ALIGN +syscall_fault: + pushl %eax # save orig_eax + SAVE_ALL + GET_THREAD_INFO(%ebp) + movl $-EFAULT,EAX(%esp) + jmp resume_userspace + ALIGN syscall_badsys: movl $-ENOSYS,EAX(%esp) jmp resume_userspace +#define FIXUP_ESPFIX_STACK \ + movl %esp, %eax; \ + /* switch to 32bit stack using the pointer on top of 16bit stack */ \ + lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \ + /* copy data from 16bit stack to 32bit stack */ \ + call fixup_x86_bogus_stack; \ + /* put ESP to the proper location */ \ + movl %eax, %esp; +#define UNWIND_ESPFIX_STACK \ + pushl %eax; \ + movl %ss, %eax; \ + /* see if on 16bit stack */ \ + cmpw $__ESPFIX_SS, %ax; \ + jne 28f; \ + movl $__KERNEL_DS, %edx; \ + movl %edx, %ds; \ + movl %edx, %es; \ + /* switch to 32bit stack */ \ + FIXUP_ESPFIX_STACK \ +28: popl %eax; + /* * Build the entry stubs and pointer table with * some assembler magic. */ .data ENTRY(interrupt) -.previous +.text vector=0 ENTRY(irq_entries_start) @@ -515,13 +412,14 @@ ENTRY(irq_entries_start) jmp common_interrupt .data .long 1b -.previous +.text vector=vector+1 .endr ALIGN common_interrupt: SAVE_ALL + movl %esp,%eax call do_IRQ jmp ret_from_intr @@ -529,7 +427,8 @@ common_interrupt: ENTRY(name) \ pushl $nr-256; \ SAVE_ALL \ - call smp_/**/name; \ + movl %esp,%eax; \ + call smp_/**/name; \ jmp ret_from_intr; /* The include is where all of the SMP etc. interrupts come from */ @@ -551,24 +450,18 @@ error_code: pushl %ecx pushl %ebx cld - movl %es, %ecx - movl ORIG_EAX(%esp), %esi # get the error code + pushl %es + UNWIND_ESPFIX_STACK + popl %ecx movl ES(%esp), %edi # get the function address + movl ORIG_EAX(%esp), %edx # get the error code movl %eax, ORIG_EAX(%esp) movl %ecx, ES(%esp) - pushl %esi # push the error code - movl $(__USER_DS), %edx - movl %edx, %ds - movl %edx, %es - -/* clobbers edx, ebx and ebp */ - __SWITCH_KERNELSPACE - - leal 4(%esp), %edx # prepare pt_regs - pushl %edx # push pt_regs - + movl $(__USER_DS), %ecx + movl %ecx, %ds + movl %ecx, %es + movl %esp,%eax # pt_regs pointer call *%edi - addl $8, %esp jmp ret_from_exception ENTRY(coprocessor_error) @@ -618,15 +511,18 @@ label: \ pushl $__KERNEL_CS; \ pushl $sysenter_past_esp -ENTRY(debug) +KPROBE_ENTRY(debug) cmpl $sysenter_entry,(%esp) jne debug_stack_correct FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) debug_stack_correct: - pushl $0 - pushl $do_debug - jmp error_code - + pushl $-1 # mark this as an int + SAVE_ALL + xorl %edx,%edx # error code 0 + movl %esp,%eax # pt_regs pointer + call do_debug + jmp ret_from_exception + .previous .text /* * NMI is doubly nasty. It can happen _while_ we're handling * a debug fault, and the debug fault hasn't yet been able to @@ -636,6 +532,11 @@ debug_stack_correct: * fault happened on the sysenter path. */ ENTRY(nmi) + pushl %eax + movl %ss, %eax + cmpw $__ESPFIX_SS, %ax + popl %eax + je nmi_16bit_stack cmpl $sysenter_entry,(%esp) je nmi_stack_fixup pushl %eax @@ -652,11 +553,9 @@ ENTRY(nmi) nmi_stack_correct: pushl %eax SAVE_ALL - movl %esp, %edx - pushl $0 - pushl %edx + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer call do_nmi - addl $8, %esp jmp restore_all nmi_stack_fixup: @@ -665,18 +564,44 @@ nmi_stack_fixup: nmi_debug_stack_check: cmpw $__KERNEL_CS,16(%esp) jne nmi_stack_correct - cmpl $debug - 1,(%esp) - jle nmi_stack_correct + cmpl $debug,(%esp) + jb nmi_stack_correct cmpl $debug_esp_fix_insn,(%esp) - jle nmi_debug_stack_fixup -nmi_debug_stack_fixup: + ja nmi_stack_correct FIX_STACK(24,nmi_stack_correct, 1) jmp nmi_stack_correct -ENTRY(int3) - pushl $0 - pushl $do_int3 - jmp error_code +nmi_16bit_stack: + /* create the pointer to lss back */ + pushl %ss + pushl %esp + movzwl %sp, %esp + addw $4, (%esp) + /* copy the iret frame of 12 bytes */ + .rept 3 + pushl 16(%esp) + .endr + pushl %eax + SAVE_ALL + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code + call do_nmi + RESTORE_REGS + lss 12+4(%esp), %esp # back to 16bit stack +1: iret +.section __ex_table,"a" + .align 4 + .long 1b,iret_exc +.previous + +KPROBE_ENTRY(int3) + pushl $-1 # mark this as an int + SAVE_ALL + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_int3 + jmp ret_from_exception + .previous .text ENTRY(overflow) pushl $0 @@ -710,17 +635,19 @@ ENTRY(stack_segment) pushl $do_stack_segment jmp error_code -ENTRY(general_protection) +KPROBE_ENTRY(general_protection) pushl $do_general_protection jmp error_code + .previous .text ENTRY(alignment_check) pushl $do_alignment_check jmp error_code -ENTRY(page_fault) +KPROBE_ENTRY(page_fault) pushl $do_page_fault jmp error_code + .previous .text #ifdef CONFIG_X86_MCE ENTRY(machine_check) @@ -734,293 +661,7 @@ ENTRY(spurious_interrupt_bug) pushl $do_spurious_interrupt_bug jmp error_code -.previous - -.data -ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ - .long sys_exit - .long sys_fork - .long sys_read - .long sys_write - .long sys_open /* 5 */ - .long sys_close - .long sys_waitpid - .long sys_creat - .long sys_link - .long sys_unlink /* 10 */ - .long sys_execve - .long sys_chdir - .long sys_time - .long sys_mknod - .long sys_chmod /* 15 */ - .long sys_lchown16 - .long sys_ni_syscall /* old break syscall holder */ - .long sys_stat - .long sys_lseek - .long sys_getpid /* 20 */ - .long sys_mount - .long sys_oldumount - .long sys_setuid16 - .long sys_getuid16 - .long sys_stime /* 25 */ - .long sys_ptrace - .long sys_alarm - .long sys_fstat - .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ - .long sys_access - .long sys_nice - .long sys_ni_syscall /* 35 - old ftime syscall holder */ - .long sys_sync - .long sys_kill - .long sys_rename - .long sys_mkdir - .long sys_rmdir /* 40 */ - .long sys_dup - .long sys_pipe - .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ - .long sys_setgid16 - .long sys_getgid16 - .long sys_signal - .long sys_geteuid16 - .long sys_getegid16 /* 50 */ - .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ - .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ - .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ - .long sys_olduname - .long sys_umask /* 60 */ - .long sys_chroot - .long sys_ustat - .long sys_dup2 - .long sys_getppid - .long sys_getpgrp /* 65 */ - .long sys_setsid - .long sys_sigaction - .long sys_sgetmask - .long sys_ssetmask - .long sys_setreuid16 /* 70 */ - .long sys_setregid16 - .long sys_sigsuspend - .long sys_sigpending - .long sys_sethostname - .long sys_setrlimit /* 75 */ - .long sys_old_getrlimit - .long sys_getrusage - .long sys_gettimeofday - .long sys_settimeofday - .long sys_getgroups16 /* 80 */ - .long sys_setgroups16 - .long old_select - .long sys_symlink - .long sys_lstat - .long sys_readlink /* 85 */ - .long sys_uselib - .long sys_swapon - .long sys_reboot - .long old_readdir - .long old_mmap /* 90 */ - .long sys_munmap - .long sys_truncate - .long sys_ftruncate - .long sys_fchmod - .long sys_fchown16 /* 95 */ - .long sys_getpriority - .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ - .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ioperm - .long sys_socketcall - .long sys_syslog - .long sys_setitimer - .long sys_getitimer /* 105 */ - .long sys_newstat - .long sys_newlstat - .long sys_newfstat - .long sys_uname - .long sys_iopl /* 110 */ - .long sys_vhangup - .long sys_ni_syscall /* old "idle" system call */ - .long sys_vm86old - .long sys_wait4 - .long sys_swapoff /* 115 */ - .long sys_sysinfo - .long sys_ipc - .long sys_fsync - .long sys_sigreturn - .long sys_clone /* 120 */ - .long sys_setdomainname - .long sys_newuname - .long sys_modify_ldt - .long sys_adjtimex - .long sys_mprotect /* 125 */ - .long sys_sigprocmask - .long sys_ni_syscall /* old "create_module" */ - .long sys_init_module - .long sys_delete_module - .long sys_ni_syscall /* 130: old "get_kernel_syms" */ - .long sys_quotactl - .long sys_getpgid - .long sys_fchdir - .long sys_bdflush - .long sys_sysfs /* 135 */ - .long sys_personality - .long sys_ni_syscall /* reserved for afs_syscall */ - .long sys_setfsuid16 - .long sys_setfsgid16 - .long sys_llseek /* 140 */ - .long sys_getdents - .long sys_select - .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ - .long sys_writev - .long sys_getsid - .long sys_fdatasync - .long sys_sysctl - .long sys_mlock /* 150 */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall - .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ - .long sys_sched_setscheduler - .long sys_sched_getscheduler - .long sys_sched_yield - .long sys_sched_get_priority_max - .long sys_sched_get_priority_min /* 160 */ - .long sys_sched_rr_get_interval - .long sys_nanosleep - .long sys_mremap - .long sys_setresuid16 - .long sys_getresuid16 /* 165 */ - .long sys_vm86 - .long sys_ni_syscall /* Old sys_query_module */ - .long sys_poll - .long sys_nfsservctl - .long sys_setresgid16 /* 170 */ - .long sys_getresgid16 - .long sys_prctl - .long sys_rt_sigreturn - .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ - .long sys_rt_sigpending - .long sys_rt_sigtimedwait - .long sys_rt_sigqueueinfo - .long sys_rt_sigsuspend - .long sys_pread64 /* 180 */ - .long sys_pwrite64 - .long sys_chown16 - .long sys_getcwd - .long sys_capget - .long sys_capset /* 185 */ - .long sys_sigaltstack - .long sys_sendfile - .long sys_ni_syscall /* reserved for streams1 */ - .long sys_ni_syscall /* reserved for streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit - .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ - .long sys_lstat64 - .long sys_fstat64 - .long sys_lchown - .long sys_getuid - .long sys_getgid /* 200 */ - .long sys_geteuid - .long sys_getegid - .long sys_setreuid - .long sys_setregid - .long sys_getgroups /* 205 */ - .long sys_setgroups - .long sys_fchown - .long sys_setresuid - .long sys_getresuid - .long sys_setresgid /* 210 */ - .long sys_getresgid - .long sys_chown - .long sys_setuid - .long sys_setgid - .long sys_setfsuid /* 215 */ - .long sys_setfsgid - .long sys_pivot_root - .long sys_mincore - .long sys_madvise - .long sys_getdents64 /* 220 */ - .long sys_fcntl64 - .long sys_ni_syscall /* reserved for TUX */ - .long sys_ni_syscall - .long sys_gettid - .long sys_readahead /* 225 */ - .long sys_setxattr - .long sys_lsetxattr - .long sys_fsetxattr - .long sys_getxattr - .long sys_lgetxattr /* 230 */ - .long sys_fgetxattr - .long sys_listxattr - .long sys_llistxattr - .long sys_flistxattr - .long sys_removexattr /* 235 */ - .long sys_lremovexattr - .long sys_fremovexattr - .long sys_tkill - .long sys_sendfile64 - .long sys_futex /* 240 */ - .long sys_sched_setaffinity - .long sys_sched_getaffinity - .long sys_set_thread_area - .long sys_get_thread_area - .long sys_io_setup /* 245 */ - .long sys_io_destroy - .long sys_io_getevents - .long sys_io_submit - .long sys_io_cancel - .long sys_fadvise64 /* 250 */ - .long sys_ni_syscall - .long sys_exit_group - .long sys_lookup_dcookie - .long sys_epoll_create - .long sys_epoll_ctl /* 255 */ - .long sys_epoll_wait - .long sys_remap_file_pages - .long sys_set_tid_address - .long sys_timer_create - .long sys_timer_settime /* 260 */ - .long sys_timer_gettime - .long sys_timer_getoverrun - .long sys_timer_delete - .long sys_clock_settime - .long sys_clock_gettime /* 265 */ - .long sys_clock_getres - .long sys_clock_nanosleep - .long sys_statfs64 - .long sys_fstatfs64 - .long sys_tgkill /* 270 */ - .long sys_utimes - .long sys_fadvise64_64 - .long sys_vserver - .long sys_mbind - .long sys_get_mempolicy - .long sys_set_mempolicy - .long sys_mq_open - .long sys_mq_unlink - .long sys_mq_timedsend - .long sys_mq_timedreceive /* 280 */ - .long sys_mq_notify - .long sys_mq_getsetattr - .long sys_ni_syscall /* reserved for kexec */ +.section .rodata,"a" +#include "syscall_table.S" syscall_table_size=(.-sys_call_table)