Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / arch / i386 / kernel / entry.S
index a991d4e..840d990 100644 (file)
@@ -184,8 +184,12 @@ sysenter_past_esp:
        pushl %ebp
        pushfl
        pushl $(__USER_CS)
-       pushl $SYSENTER_RETURN
-
+       /*
+        * Push current_thread_info()->sysenter_return to the stack.
+        * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+        * pushed above; +8 corresponds to copy_thread's esp0 setting.
+        */
+       pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
 /*
  * Load the potential sixth argument from user stack.
  * Careful about security.
@@ -203,7 +207,7 @@ sysenter_past_esp:
        GET_THREAD_INFO(%ebp)
 
        /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-       testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
+       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -226,9 +230,13 @@ ENTRY(system_call)
        pushl %eax                      # save orig_eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
-                                       # system call tracing in operation
+       testl $TF_MASK,EFLAGS(%esp)
+       jz no_singlestep
+       orl $_TIF_SINGLESTEP,TI_flags(%ebp)
+no_singlestep:
+                                       # system call tracing in operation / emulation
        /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-       testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
+       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -319,17 +327,19 @@ work_notifysig:                           # deal with pending signals and
                                        # vm86-space
        xorl %edx, %edx
        call do_notify_resume
-       jmp restore_all
+       jmp resume_userspace
 
        ALIGN
 work_notifysig_v86:
+#ifdef CONFIG_VM86
        pushl %ecx                      # save ti_flags for do_notify_resume
        call save_v86_state             # %eax contains pt_regs pointer
        popl %ecx
        movl %eax, %esp
        xorl %edx, %edx
        call do_notify_resume
-       jmp restore_all
+       jmp resume_userspace
+#endif
 
        # perform syscall exit tracing
        ALIGN
@@ -338,6 +348,9 @@ syscall_trace_entry:
        movl %esp, %eax
        xorl %edx,%edx
        call do_syscall_trace
+       cmpl $0, %eax
+       jne resume_userspace            # ret != 0 -> running under PTRACE_SYSEMU,
+                                       # so must skip actual syscall
        movl ORIG_EAX(%esp), %eax
        cmpl $(nr_syscalls), %eax
        jnae syscall_call
@@ -504,7 +517,7 @@ label:                                              \
        pushl $__KERNEL_CS;                     \
        pushl $sysenter_past_esp
 
-ENTRY(debug)
+KPROBE_ENTRY(debug)
        cmpl $sysenter_entry,(%esp)
        jne debug_stack_correct
        FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
@@ -515,7 +528,7 @@ debug_stack_correct:
        movl %esp,%eax                  # pt_regs pointer
        call do_debug
        jmp ret_from_exception
-
+       .previous .text
 /*
  * NMI is doubly nasty. It can happen _while_ we're handling
  * a debug fault, and the debug fault hasn't yet been able to
@@ -557,11 +570,10 @@ nmi_stack_fixup:
 nmi_debug_stack_check:
        cmpw $__KERNEL_CS,16(%esp)
        jne nmi_stack_correct
-       cmpl $debug - 1,(%esp)
-       jle nmi_stack_correct
+       cmpl $debug,(%esp)
+       jb nmi_stack_correct
        cmpl $debug_esp_fix_insn,(%esp)
-       jle nmi_debug_stack_fixup
-nmi_debug_stack_fixup:
+       ja nmi_stack_correct
        FIX_STACK(24,nmi_stack_correct, 1)
        jmp nmi_stack_correct
 
@@ -588,13 +600,14 @@ nmi_16bit_stack:
        .long 1b,iret_exc
 .previous
 
-ENTRY(int3)
+KPROBE_ENTRY(int3)
        pushl $-1                       # mark this as an int
        SAVE_ALL
        xorl %edx,%edx          # zero error code
        movl %esp,%eax          # pt_regs pointer
        call do_int3
        jmp ret_from_exception
+       .previous .text
 
 ENTRY(overflow)
        pushl $0
@@ -628,17 +641,19 @@ ENTRY(stack_segment)
        pushl $do_stack_segment
        jmp error_code
 
-ENTRY(general_protection)
+KPROBE_ENTRY(general_protection)
        pushl $do_general_protection
        jmp error_code
+       .previous .text
 
 ENTRY(alignment_check)
        pushl $do_alignment_check
        jmp error_code
 
-ENTRY(page_fault)
+KPROBE_ENTRY(page_fault)
        pushl $do_page_fault
        jmp error_code
+       .previous .text
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
@@ -652,6 +667,7 @@ ENTRY(spurious_interrupt_bug)
        pushl $do_spurious_interrupt_bug
        jmp error_code
 
+.section .rodata,"a"
 #include "syscall_table.S"
 
 syscall_table_size=(.-sys_call_table)