vserver 1.9.5.x5
[linux-2.6.git] / arch / ia64 / kernel / entry.S
index 6703e7f..0ad38f6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Kernel entry points.
  *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  * Copyright (C) 1999, 2002-2003
  *     Asit Mallick <Asit.K.Mallick@intel.com>
         * setup a null register window frame.
         */
 ENTRY(ia64_execve)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
-       alloc loc1=ar.pfs,3,2,4,0
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,4,0
        mov loc0=rp
        .body
        mov out0=in0                    // filename
@@ -113,8 +116,11 @@ END(ia64_execve)
  *           u64 tls)
  */
 GLOBAL_ENTRY(sys_clone2)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)
-       alloc r16=ar.pfs,6,2,6,0
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc r16=ar.pfs,8,2,6,0
        DO_SAVE_SWITCH_STACK
        adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
        mov loc0=rp
@@ -128,7 +134,7 @@ GLOBAL_ENTRY(sys_clone2)
 (p6)   st8 [r2]=in5                            // store TLS in r16 for copy_thread()
        mov out5=in4    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
        adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
-       dep out0=0,in0,CLONE_IDLETASK_BIT,1     // out0 = clone_flags & ~CLONE_IDLETASK
+       mov out0=in0                            // out0 = clone_flags
        br.call.sptk.many rp=do_fork
 .ret1: .restore sp
        adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
@@ -142,8 +148,11 @@ END(sys_clone2)
  *     Deprecated.  Use sys_clone2() instead.
  */
 GLOBAL_ENTRY(sys_clone)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
-       alloc r16=ar.pfs,5,2,6,0
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc r16=ar.pfs,8,2,6,0
        DO_SAVE_SWITCH_STACK
        adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
        mov loc0=rp
@@ -157,7 +166,7 @@ GLOBAL_ENTRY(sys_clone)
 (p6)   st8 [r2]=in4                            // store TLS in r13 (tp)
        mov out5=in3    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
        adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
-       dep out0=0,in0,CLONE_IDLETASK_BIT,1     // out0 = clone_flags & ~CLONE_IDLETASK
+       mov out0=in0                            // out0 = clone_flags
        br.call.sptk.many rp=do_fork
 .ret2: .restore sp
        adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
@@ -179,21 +188,23 @@ GLOBAL_ENTRY(ia64_switch_to)
        .body
 
        adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
+       movl r25=init_task
        mov r27=IA64_KR(CURRENT_STACK)
-       dep r20=0,in0,61,3              // physical address of "current"
+       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
+       dep r20=0,in0,61,3              // physical address of "next"
        ;;
        st8 [r22]=sp                    // save kernel stack pointer of old task
        shr.u r26=r20,IA64_GRANULE_SHIFT
-       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
+       cmp.eq p7,p6=r25,in0
        ;;
        /*
         * If we've already mapped this task's page, we can skip doing it again.
         */
-       cmp.eq p7,p6=r26,r27
+(p6)   cmp.eq p7,p6=r26,r27
 (p6)   br.cond.dpnt .map
        ;;
 .done:
-(p6)   ssm psr.ic                      // if we we had to map, renable the psr.ic bit FIRST!!!
+(p6)   ssm psr.ic                      // if we had to map, reenable the psr.ic bit FIRST!!!
        ;;
 (p6)   srlz.d
        ld8 sp=[r21]                    // load kernel stack pointer of new task
@@ -506,7 +517,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
        ;;
        stf.spill [r16]=f10
        stf.spill [r17]=f11
-       br.call.sptk.many rp=syscall_trace // give parent a chance to catch syscall args
+       br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
        adds r16=PT(F6)+16,sp
        adds r17=PT(F7)+16,sp
        ;;
@@ -546,8 +557,8 @@ GLOBAL_ENTRY(ia64_trace_syscall)
 .strace_save_retval:
 .mem.offset 0,0; st8.spill [r2]=r8             // store return value in slot for r8
 .mem.offset 8,0; st8.spill [r3]=r10            // clear error indication in slot for r10
-       br.call.sptk.many rp=syscall_trace // give parent a chance to catch return value
-.ret3: br.cond.sptk ia64_leave_syscall
+       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
+.ret3: br.cond.sptk .work_pending_syscall_end
 
 strace_error:
        ld8 r3=[r2]                             // load pt_regs.r8
@@ -573,7 +584,7 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel)
         */
        nop.m 0
        nop.i 0
-       br.call.sptk.many rp=syscall_trace // give parent a chance to catch return value
+       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
 }
 .ret4: br.cond.sptk ia64_leave_kernel
 END(ia64_strace_leave_kernel)
@@ -599,7 +610,9 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
        ld4 r2=[r2]
        ;;
        mov r8=0
-       tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
+       and r2=_TIF_SYSCALL_TRACEAUDIT,r2
+       ;;
+       cmp.ne p6,p0=r2,r0
 (p6)   br.cond.spnt .strace_check_retval
        ;;                                      // added stop bits to prevent r8 dependency
 END(ia64_ret_from_clone)
@@ -608,10 +621,7 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
        PT_REGS_UNWIND_INFO(0)
        cmp.ge p6,p7=r8,r0                      // syscall executed successfully?
        adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
-       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
-       ;;
-.mem.offset 0,0; (p6) st8.spill [r2]=r8        // store return value in slot for r8 and set unat bit
-.mem.offset 8,0; (p6) st8.spill [r3]=r0        // clear error indication in slot for r10 and set unat bit
+       mov r10=r0                              // clear error indication in r10
 (p7)   br.cond.spnt handle_syscall_error       // handle potential syscall failure
 END(ia64_ret_from_syscall)
        // fall through
@@ -629,10 +639,12 @@ END(ia64_ret_from_syscall)
  *              r13: restored (user-level thread pointer)
  *              r14: cleared
  *              r15: restored (syscall #)
- *          r16-r19: cleared
+ *          r16-r17: cleared
+ *              r18: user-level b6
+ *              r19: cleared
  *              r20: user-level ar.fpsr
  *              r21: user-level b0
- *              r22: user-level b6
+ *              r22: cleared
  *              r23: user-level ar.bspstore
  *              r24: user-level ar.rnat
  *              r25: user-level ar.unat
@@ -657,102 +669,105 @@ END(ia64_ret_from_syscall)
  *           ar.csd: cleared
  *           ar.ssd: cleared
  */
-GLOBAL_ENTRY(ia64_leave_syscall)
+ENTRY(ia64_leave_syscall)
        PT_REGS_UNWIND_INFO(0)
        /*
         * work.need_resched etc. mustn't get changed by this CPU before it returns to
-        * user- or fsys-mode, hence we disable interrupts early on:
+        * user- or fsys-mode, hence we disable interrupts early on.
+        *
+        * p6 controls whether current_thread_info()->flags needs to be check for
+        * extra work.  We always check for extra work when returning to user-level.
+        * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
+        * is 0.  After extra work processing has been completed, execution
+        * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
+        * needs to be redone.
         */
 #ifdef CONFIG_PREEMPT
        rsm psr.i                               // disable interrupts
-#else
-(pUStk)        rsm psr.i
-#endif
        cmp.eq pLvSys,p0=r0,r0                  // pLvSys=1: leave from syscall
-(pUStk)        cmp.eq.unc p6,p0=r0,r0                  // p6 <- pUStk
-.work_processed_syscall:
-#ifdef CONFIG_PREEMPT
 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
        ;;
        .pred.rel.mutex pUStk,pKStk
 (pKStk) ld4 r21=[r20]                  // r21 <- preempt_count
 (pUStk)        mov r21=0                       // r21 <- 0
        ;;
-(p6)   cmp.eq.unc p6,p0=r21,r0         // p6 <- p6 && (r21 == 0)
-#endif /* CONFIG_PREEMPT */
-       adds r16=PT(LOADRS)+16,r12
-       adds r17=PT(AR_BSPSTORE)+16,r12
+       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
+#else /* !CONFIG_PREEMPT */
+(pUStk)        rsm psr.i
+       cmp.eq pLvSys,p0=r0,r0          // pLvSys=1: leave from syscall
+(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
+#endif
+.work_processed_syscall:
+       adds r2=PT(LOADRS)+16,r12
+       adds r3=PT(AR_BSPSTORE)+16,r12
        adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
        ;;
 (p6)   ld4 r31=[r18]                           // load current_thread_info()->flags
-       ld8 r19=[r16],PT(B6)-PT(LOADRS)         // load ar.rsc value for "loadrs"
-       nop.i 0
+       ld8 r19=[r2],PT(B6)-PT(LOADRS)          // load ar.rsc value for "loadrs"
+       mov b7=r0               // clear b7
        ;;
-       ld8 r23=[r17],PT(R9)-PT(AR_BSPSTORE)    // load ar.bspstore (may be garbage)
-       ld8 r22=[r16],PT(R8)-PT(B6)             // load b6
+       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be garbage)
+       ld8 r18=[r2],PT(R9)-PT(B6)              // load b6
 (p6)   and r15=TIF_WORK_MASK,r31               // any work other than TIF_SYSCALL_TRACE?
        ;;
-
-       mov.m ar.ccv=r0         // clear ar.ccv
+       mov r16=ar.bsp                          // M2  get existing backing store pointer
 (p6)   cmp4.ne.unc p6,p0=r15, r0               // any special work pending?
-(p6)   br.cond.spnt .work_pending
+(p6)   br.cond.spnt .work_pending_syscall
        ;;
        // start restoring the state saved on the kernel stack (struct pt_regs):
-       ld8.fill r8=[r16],16
-       ld8.fill r9=[r17],16
+       ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
+       ld8 r11=[r3],PT(CR_IIP)-PT(R11)
        mov f6=f0               // clear f6
        ;;
-       ld8.fill r10=[r16],16
-       ld8.fill r11=[r17],16
-       mov f7=f0               // clear f7
-       ;;
-       ld8 r29=[r16],16        // load cr.ipsr
-       ld8 r28=[r17],16        // load cr.iip
+       invala                  // M0|1 invalidate ALAT
+       rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and interruption collection
+       mov f9=f0               // clear f9
+
+       ld8 r29=[r2],16         // load cr.ipsr
+       ld8 r28=[r3],16                 // load cr.iip
        mov f8=f0               // clear f8
        ;;
-       ld8 r30=[r16],16        // load cr.ifs
-       ld8 r25=[r17],16        // load ar.unat
+       ld8 r30=[r2],16         // M0|1 load cr.ifs
+       mov.m ar.ssd=r0         // M2 clear ar.ssd
        cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore cr.ifs
        ;;
-       rsm psr.i | psr.ic      // initiate turning off of interrupt and interruption collection
-       invala                  // invalidate ALAT
-       mov f9=f0               // clear f9
-
-       mov.m ar.ssd=r0         // clear ar.ssd
-       mov.m ar.csd=r0         // clear ar.csd
+       ld8 r25=[r3],16         // M0|1 load ar.unat
+       mov.m ar.csd=r0         // M2 clear ar.csd
+       mov r22=r0              // clear r22
+       ;;
+       ld8 r26=[r2],PT(B0)-PT(AR_PFS)  // M0|1 load ar.pfs
+(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are disabled
        mov f10=f0              // clear f10
        ;;
-       ld8 r26=[r16],16        // load ar.pfs
-       ld8 r27=[r17],PT(PR)-PT(AR_RSC) // load ar.rsc
+       ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
+       ld8 r27=[r3],PT(PR)-PT(AR_RSC)  // load ar.rsc
        mov f11=f0              // clear f11
        ;;
-       ld8 r24=[r16],PT(B0)-PT(AR_RNAT)        // load ar.rnat (may be garbage)
-       ld8 r31=[r17],PT(R1)-PT(PR)             // load predicates
+       ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)    // load ar.rnat (may be garbage)
+       ld8 r31=[r3],PT(R1)-PT(PR)              // load predicates
 (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
        ;;
-       ld8 r21=[r16],PT(R12)-PT(B0) // load b0
-       ld8.fill r1=[r17],16    // load r1
-(pUStk) mov r3=1
+       ld8 r20=[r2],PT(R12)-PT(AR_FPSR)        // load ar.fpsr
+       ld8.fill r1=[r3],16     // load r1
+(pUStk) mov r17=1
        ;;
-       ld8.fill r12=[r16],16
-       ld8.fill r13=[r17],16
-       mov r2=r0               // clear r2
+       srlz.d                  // M0  ensure interruption collection is off
+       ld8.fill r13=[r3],16
+       mov f7=f0               // clear f7
        ;;
-       ld8 r20=[r16]           // load ar.fpsr
-       ld8.fill r15=[r17]      // load r15
-       mov b7=r0               // clear b7
+       ld8.fill r12=[r2]       // restore r12 (sp)
+       ld8.fill r15=[r3]       // restore r15
+       addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
        ;;
-(pUStk) st1 [r14]=r3
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
+(pUStk)        ld4 r3=[r3]             // r3 = cpu_data->phys_stacked_size_p8
+(pUStk) st1 [r14]=r17
+       mov b6=r18              // I0  restore b6
        ;;
-       mov r16=ar.bsp          // get existing backing store pointer
-       srlz.i                  // ensure interruption collection is off
        mov r14=r0              // clear r14
-       ;;
-       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
-       mov b6=r22                              // restore b6
-       shr.u r18=r19,16        // get byte size of existing "dirty" partition
+       shr.u r18=r19,16        // I0|1 get byte size of existing "dirty" partition
 (pKStk) br.cond.dpnt.many skip_rbs_switch
+
+       mov.m ar.ccv=r0         // clear ar.ccv
 (pNonSys) br.cond.dpnt.many dont_preserve_current_frame
        br.cond.sptk.many rbs_switch
 END(ia64_leave_syscall)
@@ -774,26 +789,31 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        PT_REGS_UNWIND_INFO(0)
        /*
         * work.need_resched etc. mustn't get changed by this CPU before it returns to
-        * user- or fsys-mode, hence we disable interrupts early on:
+        * user- or fsys-mode, hence we disable interrupts early on.
+        *
+        * p6 controls whether current_thread_info()->flags needs to be check for
+        * extra work.  We always check for extra work when returning to user-level.
+        * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
+        * is 0.  After extra work processing has been completed, execution
+        * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
+        * needs to be redone.
         */
 #ifdef CONFIG_PREEMPT
        rsm psr.i                               // disable interrupts
-#else
-(pUStk)        rsm psr.i
-#endif
        cmp.eq p0,pLvSys=r0,r0                  // pLvSys=0: leave from kernel
-(pUStk)        cmp.eq.unc p6,p0=r0,r0                  // p6 <- pUStk
-       ;;
-.work_processed_kernel:
-#ifdef CONFIG_PREEMPT
-       adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
        ;;
        .pred.rel.mutex pUStk,pKStk
 (pKStk)        ld4 r21=[r20]                   // r21 <- preempt_count
 (pUStk)        mov r21=0                       // r21 <- 0
        ;;
-(p6)   cmp.eq.unc p6,p0=r21,r0         // p6 <- p6 && (r21 == 0)
-#endif /* CONFIG_PREEMPT */
+       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
+#else
+(pUStk)        rsm psr.i
+       cmp.eq p0,pLvSys=r0,r0          // pLvSys=0: leave from kernel
+(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
+#endif
+.work_processed_kernel:
        adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
        ;;
 (p6)   ld4 r31=[r17]                           // load current_thread_info()->flags
@@ -868,12 +888,16 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        srlz.i                  // ensure interruption collection is off
        mov ar.ccv=r15
        ;;
+       ldf.fill f11=[r2]
        bsw.0                   // switch back to bank 0 (no stop bit required beforehand...)
        ;;
-       ldf.fill f11=[r2]
-(pUStk)        mov r18=IA64_KR(CURRENT)        // Itanium 2: 12 cycle read latency
+(pUStk)        mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
        adds r16=PT(CR_IPSR)+16,r12
        adds r17=PT(CR_IIP)+16,r12
+
+(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are disabled
+       nop.i 0
+       nop.i 0
        ;;
        ld8 r29=[r16],16        // load cr.ipsr
        ld8 r28=[r17],16        // load cr.iip
@@ -886,7 +910,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore cr.ifs
        ;;
        ld8 r24=[r16],16        // load ar.rnat (may be garbage)
-       ld8 r23=[r17],16// load ar.bspstore (may be garbage)
+       ld8 r23=[r17],16        // load ar.bspstore (may be garbage)
        ;;
        ld8 r31=[r16],16        // load predicates
        ld8 r21=[r17],16        // load b0
@@ -956,6 +980,7 @@ dont_preserve_current_frame:
        shladd in0=loc1,3,r17
        mov in1=0
        ;;
+       TEXT_ALIGN(32)
 rse_clear_invalid:
 #ifdef CONFIG_ITANIUM
        // cycle 0
@@ -1016,23 +1041,33 @@ rse_clear_invalid:
        loadrs
        ;;
 skip_rbs_switch:
-(pLvSys)       mov r19=r0              // clear r19 for leave_syscall, no-op otherwise
-       mov b0=r21
-       mov ar.pfs=r26
-(pUStk)        mov ar.bspstore=r23
-(p9)   mov cr.ifs=r30
-(pLvSys)mov r16=r0             // clear r16 for leave_syscall, no-op otherwise
-       mov cr.ipsr=r29
-       mov ar.fpsr=r20
-(pLvSys)mov r17=r0             // clear r17 for leave_syscall, no-op otherwise
-       mov cr.iip=r28
-       ;;
-(pUStk)        mov ar.rnat=r24         // must happen with RSE in lazy mode
-(pLvSys)mov r18=r0             // clear r18 for leave_syscall, no-op otherwise
-       mov ar.rsc=r27
-       mov ar.unat=r25
-       mov pr=r31,-1
-       rfi
+       mov ar.unat=r25         // M2
+(pKStk)        extr.u r22=r22,21,1     // I0 extract current value of psr.pp from r22
+(pLvSys)mov r19=r0             // A  clear r19 for leave_syscall, no-op otherwise
+       ;;
+(pUStk)        mov ar.bspstore=r23     // M2
+(pKStk)        dep r29=r22,r29,21,1    // I0 update ipsr.pp with psr.pp
+(pLvSys)mov r16=r0             // A  clear r16 for leave_syscall, no-op otherwise
+       ;;
+       mov cr.ipsr=r29         // M2
+       mov ar.pfs=r26          // I0
+(pLvSys)mov r17=r0             // A  clear r17 for leave_syscall, no-op otherwise
+
+(p9)   mov cr.ifs=r30          // M2
+       mov b0=r21              // I0
+(pLvSys)mov r18=r0             // A  clear r18 for leave_syscall, no-op otherwise
+
+       mov ar.fpsr=r20         // M2
+       mov cr.iip=r28          // M2
+       nop 0
+       ;;
+(pUStk)        mov ar.rnat=r24         // M2 must happen with RSE in lazy mode
+       nop 0
+(pLvSys)mov r2=r0
+
+       mov ar.rsc=r27          // M2
+       mov pr=r31,-1           // I0
+       rfi                     // B
 
        /*
         * On entry:
@@ -1041,7 +1076,16 @@ skip_rbs_switch:
         * On exit:
         *      p6 = TRUE if work-pending-check needs to be redone
         */
+.work_pending_syscall:
+       add r2=-8,r2
+       add r3=-8,r3
+       ;;
+       st8 [r2]=r8
+       st8 [r3]=r10
 .work_pending:
+       tbit.nz p6,p0=r31,TIF_SIGDELAYED                // signal delayed from  MCA/INIT/NMI/PMI context?
+(p6)   br.cond.sptk.few .sigdelayed
+       ;;
        tbit.z p6,p0=r31,TIF_NEED_RESCHED               // current_thread_info()->need_resched==0?
 (p6)   br.cond.sptk.few .notify
 #ifdef CONFIG_PREEMPT
@@ -1059,14 +1103,34 @@ skip_rbs_switch:
        ;;
 (pKStk)        st4 [r20]=r0            // preempt_count() <- 0
 #endif
-(pLvSys)br.cond.sptk.many .work_processed_syscall      // re-check
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
        br.cond.sptk.many .work_processed_kernel        // re-check
 
 .notify:
-       br.call.spnt.many rp=notify_resume_user
+(pUStk)        br.call.spnt.many rp=notify_resume_user
 .ret10:        cmp.ne p6,p0=r0,r0                              // p6 <- 0
-(pLvSys)br.cond.sptk.many .work_processed_syscall      // don't re-check
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
        br.cond.sptk.many .work_processed_kernel        // don't re-check
+
+// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
+// it could not be delivered.  Deliver it now.  The signal might be for us and
+// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
+// signal.
+
+.sigdelayed:
+       br.call.sptk.many rp=do_sigdelayed
+       cmp.eq p6,p0=r0,r0                              // p6 <- 1, always re-check
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // re-check
+
+.work_pending_syscall_end:
+       adds r2=PT(R8)+16,r12
+       adds r3=PT(R10)+16,r12
+       ;;
+       ld8 r8=[r2]
+       ld8 r10=[r3]
+       br.cond.sptk.many .work_processed_syscall       // re-check
+
 END(ia64_leave_kernel)
 
 ENTRY(handle_syscall_error)
@@ -1078,17 +1142,11 @@ ENTRY(handle_syscall_error)
         */
        PT_REGS_UNWIND_INFO(0)
        ld8 r3=[r2]             // load pt_regs.r8
-       sub r9=0,r8             // negate return value to get errno
        ;;
-       mov r10=-1              // return -1 in pt_regs.r10 to indicate error
        cmp.eq p6,p7=r3,r0      // is pt_regs.r8==0?
-       adds r3=16,r2           // r3=&pt_regs.r10
-       ;;
-(p6)   mov r9=r8
-(p6)   mov r10=0
        ;;
-.mem.offset 0,0; st8.spill [r2]=r9     // store errno in pt_regs.r8 and set unat bit
-.mem.offset 8,0; st8.spill [r3]=r10    // store error indication in pt_regs.r10 and set unat bit
+(p7)   mov r10=-1
+(p7)   sub r8=0,r8             // negate return value to get errno
        br.cond.sptk ia64_leave_syscall
 END(handle_syscall_error)
 
@@ -1168,7 +1226,10 @@ END(sys_rt_sigsuspend)
 
 ENTRY(sys_rt_sigreturn)
        PT_REGS_UNWIND_INFO(0)
-       alloc r2=ar.pfs,0,0,1,0
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       alloc r2=ar.pfs,8,0,1,0
        .prologue
        PT_REGS_SAVES(16)
        adds sp=-16,sp
@@ -1512,10 +1573,10 @@ sys_call_table:
        data8 sys_mq_getsetattr
        data8 sys_ni_syscall                    // reserved for kexec_load
        data8 sys_vserver
-       data8 sys_ni_syscall                    // 1270
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
+       data8 sys_waitid                        // 1270
+       data8 sys_add_key
+       data8 sys_request_key
+       data8 sys_keyctl
        data8 sys_ni_syscall
        data8 sys_ni_syscall                    // 1275
        data8 sys_ni_syscall