vserver 1.9.5.x5
[linux-2.6.git] / arch / s390 / kernel / entry64.S
index e94e694..51527ab 100644 (file)
 #include <asm/thread_info.h>
 #include <asm/offsets.h>
 #include <asm/unistd.h>
+#include <asm/page.h>
 
 /*
  * Stack layout for the system_call stack entry.
  * The first few entries are identical to the user_regs_struct.
  */
-SP_PTREGS    =  STACK_FRAME_OVERHEAD 
+SP_PTREGS    =  STACK_FRAME_OVERHEAD
+SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
 SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
 SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
 SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
@@ -47,9 +49,30 @@ SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
 SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
 SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
 
-_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_RESTART_SVC)
+STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SIZE  = 1 << STACK_SHIFT
+
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+                _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
 
+#define BASED(name) name-system_call(%r13)
+
+       .macro  STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       stpt    \lc_offset
+#endif
+       .endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       .macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+       lg      %r10,\lc_from
+       slg     %r10,\lc_to
+       alg     %r10,\lc_sum
+       stg     %r10,\lc_sum
+       .endm
+#endif
+
 /*
  * Register usage in interrupt handlers:
  *    R9  - pointer to current task structure
@@ -58,99 +81,59 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
  *    R15 - kernel stack pointer
  */
 
+        .macro  SAVE_ALL_BASE savearea
+       stmg    %r12,%r15,\savearea
+       larl    %r13,system_call
+       .endm
+
         .macro  SAVE_ALL psworg,savearea,sync
-        stmg    %r13,%r15,\savearea
+       la      %r12,\psworg
        .if     \sync
-        tm      \psworg+1,0x01           # test problem state bit
-        jz      1f                       # skip stack setup save
-       lg      %r15,__LC_KERNEL_STACK   # problem state -> load ksp
+       tm      \psworg+1,0x01          # test problem state bit
+       jz      2f                      # skip stack setup save
+       lg      %r15,__LC_KERNEL_STACK  # problem state -> load ksp
        .else
-        tm      \psworg+1,0x01           # test problem state bit
-       jnz     0f                       # from user -> load kernel stack
-       lg      %r14,__LC_ASYNC_STACK    # are we already on the async. stack ?
+       tm      \psworg+1,0x01          # test problem state bit
+       jnz     1f                      # from user -> load kernel stack
+       clc     \psworg+8(8),BASED(.Lcritical_end)
+       jhe     0f
+       clc     \psworg+8(8),BASED(.Lcritical_start)
+       jl      0f
+       brasl   %r14,cleanup_critical
+       tm      0(%r12),0x01            # retest problem state after cleanup
+       jnz     1f
+0:     lg      %r14,__LC_ASYNC_STACK   # are we already on the async. stack ?
        slgr    %r14,%r15
-       srag    %r14,%r14,14
-       jz      1f
-0:     lg      %r15,__LC_ASYNC_STACK    # load async stack
+       srag    %r14,%r14,STACK_SHIFT
+       jz      2f
+1:     lg      %r15,__LC_ASYNC_STACK   # load async stack
        .endif
-1:      aghi    %r15,-SP_SIZE            # make room for registers & psw
-       lghi    %r14,\psworg
-       slgr    %r13,%r13
-       icm     %r14,12,__LC_SVC_ILC
-        stmg    %r0,%r12,SP_R0(%r15)     # store gprs 0-13 to kernel stack
-        stg     %r2,SP_ORIG_R2(%r15)     # store original content of gpr 2
-        mvc     SP_R13(24,%r15),\savearea # move r13, r14 and r15 to stack
-        mvc     SP_PSW(16,%r15),\psworg  # move user PSW to stack
-       st      %r14,SP_ILC(%r15)
-       stg     %r13,0(%r15)
+#ifdef CONFIG_CHECK_STACK
+       j       3f
+2:     tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
+       jz      stack_overflow
+3:
+#endif
+2:     aghi    %r15,-SP_SIZE           # make room for registers & psw
+       mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+       la      %r12,\psworg
+       stg     %r2,SP_ORIG_R2(%r15)    # store original content of gpr 2
+       icm     %r12,12,__LC_SVC_ILC
+       stmg    %r0,%r11,SP_R0(%r15)    # store gprs %r0-%r11 to kernel stack
+       st      %r12,SP_ILC(%r15)
+       mvc     SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
+       la      %r12,0
+       stg     %r12,__SF_BACKCHAIN(%r15)
         .endm
 
-       .macro  CLEANUP_SAVE_ALL psworg,savearea,sync
-       lg      %r1,SP_PSW+8(%r15)
-       cli     1(%r1),0xdf
-       jne     2f
-       mvc     \savearea(24),SP_R13(%r15)
-2:     lg      %r1,\savearea+16
-       .if     \sync
-       tm      \psworg+1,0x01
-       jz      1f
-       lg      %r1,__LC_KERNEL_STACK
-       .else
-       tm      \psworg+1,0x01
-       jnz     0f
-       lg      %r0,__LC_ASYNC_STACK
-       slgr    %r0,%r1
-       srag    %r0,%r0,14
-       jz      1f
-0:     lg      %r1,__LC_ASYNC_STACK
+       .macro  RESTORE_ALL sync
+       mvc     __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
+       .if !\sync
+       ni      __LC_RETURN_PSW+1,0xfd  # clear wait state bit
        .endif
-1:     aghi    %r1,-SP_SIZE
-       stg     %r1,SP_R15(%r15)
-       lghi    %r0,\psworg
-       xc      SP_R13(8,%r15),SP_R13(%r15)
-       icm     %r0,12,__LC_SVC_ILC
-       stg     %r0,SP_R14(%r15)
-       mvc     SP_R0(104,%r1),SP_R0(%r15)
-       mvc     SP_ORIG_R2(8,%r1),SP_R2(%r15)
-       mvc     SP_R13(24,%r1),\savearea
-       mvc     SP_PSW(16,%r1),\psworg
-       st      %r0,SP_ILC(%r1)
-       xc      0(8,%r1),0(%r1)
-       .endm
-
-        .macro  RESTORE_ALL              # system exit macro
-        mvc     __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
-        ni      __LC_RETURN_PSW+1,0xfd   # clear wait state bit
-        lmg     %r0,%r15,SP_R0(%r15)     # load gprs 0-15 of user
-        lpswe   __LC_RETURN_PSW          # back to caller
-        .endm
-
-       .macro  CLEANUP_RESTORE_ALL
-       lg      %r1,SP_PSW+8(%r15)
-       cli     0(%r1),0xb2
-       jne     0f
-       mvc     SP_PSW(16,%r15),__LC_RETURN_PSW
-       j       1f
-0:     lg      %r1,SP_R15(%r15)
-       mvc     SP_PSW(16,%r15),SP_PSW(%r1)
-       mvc     SP_R0(128,%r15),SP_R0(%r1)
-1:
-       .endm
-
-        .macro  GET_THREAD_INFO
-       lg      %r9,__LC_THREAD_INFO     # load pointer to thread_info struct
-        .endm
-
-       .macro  CHECK_CRITICAL
-        tm      SP_PSW+1(%r15),0x01      # test problem state bit
-       jnz     0f                       # from user -> not critical
-       larl    %r1,.Lcritical_start
-       clc     SP_PSW+8(8,%r15),8(%r1)  # compare ip with __critical_end
-       jnl     0f
-       clc     SP_PSW+8(8,%r15),0(%r1)  # compare ip with __critical_start
-       jl      0f
-       brasl   %r14,cleanup_critical
-0:
+       lmg     %r0,%r15,SP_R0(%r15)    # load gprs 0-15 of user
+       STORE_TIMER __LC_EXIT_TIMER
+       lpswe   __LC_RETURN_PSW         # back to caller
        .endm
 
 /*
@@ -164,43 +147,23 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
 __switch_to:
        tm      __THREAD_per+4(%r3),0xe8 # is the new process using per ?
        jz      __switch_to_noper               # if not we're fine
-        stctg   %c9,%c11,48(%r15)       # We are using per stuff
-        clc     __THREAD_per(24,%r3),48(%r15)
+        stctg   %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
+        clc     __THREAD_per(24,%r3),__SF_EMPTY(%r15)
         je      __switch_to_noper            # we got away without bashing TLB's
         lctlg   %c9,%c11,__THREAD_per(%r3)     # Nope we didn't
 __switch_to_noper:
-        stmg    %r6,%r15,48(%r15)       # store __switch_to registers of prev task
+        stmg    %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
        stg     %r15,__THREAD_ksp(%r2)  # store kernel stack to prev->tss.ksp
        lg      %r15,__THREAD_ksp(%r3)  # load kernel stack from next->tss.ksp
-        lmg     %r6,%r15,48(%r15)       # load __switch_to registers of next task
+        lmg     %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
        stg     %r3,__LC_CURRENT        # __LC_CURRENT = current task struct
+       lctl    %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
        lg      %r3,__THREAD_info(%r3)  # load thread_info from task struct
        stg     %r3,__LC_THREAD_INFO
-       aghi    %r3,16384
+       aghi    %r3,STACK_SIZE
        stg     %r3,__LC_KERNEL_STACK   # __LC_KERNEL_STACK = new kernel stack
        br      %r14
 
-/*
- * do_softirq calling function. We want to run the softirq functions on the
- * asynchronous interrupt stack.
- */
-       .global do_call_softirq
-do_call_softirq:
-       stnsm   48(%r15),0xfc
-       stmg    %r12,%r15,56(%r15)
-       lgr     %r12,%r15
-       lg      %r0,__LC_ASYNC_STACK
-       slgr    %r0,%r15
-       srag    %r0,%r0,14
-       je      0f
-       lg      %r15,__LC_ASYNC_STACK
-0:     aghi    %r15,-STACK_FRAME_OVERHEAD
-       stg     %r12,0(%r15)            # store back chain
-       brasl   %r14,do_softirq
-       lmg     %r12,%r15,56(%r12)
-       ssm     48(%r15)
-       br      %r14
-
 __critical_start:
 /*
  * SVC interrupt handler routine. System calls are synchronous events and
@@ -209,18 +172,32 @@ __critical_start:
 
        .globl  system_call
 system_call:
+       STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
+       SAVE_ALL_BASE __LC_SAVE_AREA
         SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
        llgh    %r7,__LC_SVC_INT_CODE # get svc number from lowcore
-sysc_enter:
-        GET_THREAD_INFO           # load pointer to task_struct to R9
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      sysc_do_svc
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
+sysc_do_svc:
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
         slag    %r7,%r7,2         # *4 and test for svc 0
-       jnz     sysc_do_restart
+       jnz     sysc_nr_ok
        # svc 0: system call number in %r1
-       lghi    %r0,NR_syscalls
-       clr     %r1,%r0
-       jnl     sysc_do_restart
+       cl      %r1,BASED(.Lnr_syscalls)
+       jnl     sysc_nr_ok
        lgfr    %r7,%r1           # clear high word in r1
        slag    %r7,%r7,2         # svc 0: system call number in %r1
+sysc_nr_ok:
+       mvc     SP_ARGS(8,%r15),SP_R7(%r15)
 sysc_do_restart:
        larl    %r10,sys_call_table
 #ifdef CONFIG_S390_SUPPORT
@@ -231,7 +208,7 @@ sysc_noemu:
 #endif
        tm      __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
         lgf     %r8,0(%r7,%r10)   # load address of system call routine
-        j     sysc_tracesys
+        jnz     sysc_tracesys
         basr    %r14,%r8          # call sys_xxxx
         stg     %r2,SP_R2(%r15)   # store return value (change R2 on stack)
                                   # ATTENTION: check sys_execve_glue before
@@ -243,18 +220,16 @@ sysc_return:
        tm      __TI_flags+7(%r9),_TIF_WORK_SVC
        jnz     sysc_work         # there is work to do (signals etc.)
 sysc_leave:
-        RESTORE_ALL
+        RESTORE_ALL 1
 
 #
 # recheck if there is more work to do
 #
 sysc_work_loop:
-        GET_THREAD_INFO           # load pointer to task_struct to R9
        tm      __TI_flags+7(%r9),_TIF_WORK_SVC
        jz      sysc_leave        # there is no work to do
 #
 # One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
 #
 sysc_work:
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
@@ -263,6 +238,8 @@ sysc_work:
        jo      sysc_sigpending
        tm      __TI_flags+7(%r9),_TIF_RESTART_SVC
        jo      sysc_restart
+       tm      __TI_flags+7(%r9),_TIF_SINGLE_STEP
+       jo      sysc_singlestep
        j       sysc_leave
 
 #
@@ -276,11 +253,14 @@ sysc_reschedule:
 # _TIF_SIGPENDING is set, call do_signal
 #
 sysc_sigpending:     
+       ni      __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
         la      %r2,SP_PTREGS(%r15) # load pt_regs
         sgr     %r3,%r3           # clear *oldset
        brasl   %r14,do_signal    # call do_signal
        tm      __TI_flags+7(%r9),_TIF_RESTART_SVC
        jo      sysc_restart
+       tm      __TI_flags+7(%r9),_TIF_SINGLE_STEP
+       jo      sysc_singlestep
        j       sysc_leave        # out of here, do NOT recheck
 
 #
@@ -294,6 +274,18 @@ sysc_restart:
        lmg     %r2,%r6,SP_R2(%r15)    # load svc arguments
        j       sysc_do_restart        # restart svc
 
+#
+# _TIF_SINGLE_STEP is set, call do_single_step
+#
+sysc_singlestep:
+       ni      __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+       lhi     %r0,__LC_PGM_OLD_PSW
+       sth     %r0,SP_TRAP(%r15)       # set trap indication to pgm check
+       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       larl    %r14,sysc_return        # load adr. of system return
+       jg      do_single_step          # branch to do_sigtrap
+
+
 __critical_end:
 
 #
@@ -319,7 +311,7 @@ sysc_tracego:
         stg     %r2,SP_R2(%r15)     # store return value
 sysc_tracenogo:
        tm      __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        jno     sysc_return
+        j     sysc_return
        la      %r2,SP_PTREGS(%r15)    # load pt_regs
        la      %r3,1
        larl    %r14,sysc_return    # return point is sysc_return
@@ -329,9 +321,13 @@ sysc_tracenogo:
 # a new process exits the kernel with ret_from_fork
 #
         .globl  ret_from_fork
-ret_from_fork:  
-        GET_THREAD_INFO           # load pointer to task_struct to R9
-        brasl   %r14,schedule_tail
+ret_from_fork:
+       lg      %r13,__LC_SVC_NEW_PSW+8
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
+       tm      SP_PSW+1(%r15),0x01     # forking a kernel thread ?
+       jo      0f
+       stg     %r15,SP_R15(%r15)       # store stack pointer for new kthread
+0:     brasl   %r14,schedule_tail
         stosm   24(%r15),0x03     # reenable interrupts
        j       sysc_return
 
@@ -473,14 +469,25 @@ pgm_check_handler:
  * we just ignore the PER event (FIXME: is there anything we have to do
  * for LPSW?).
  */
+       STORE_TIMER __LC_SYNC_ENTER_TIMER
+       SAVE_ALL_BASE __LC_SAVE_AREA
         tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
         jnz     pgm_per                  # got per exception -> special case
        SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      pgm_no_vtime
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
        lgf     %r3,__LC_PGM_ILC         # load program interruption code
        lghi    %r8,0x7f
        ngr     %r8,%r3
+pgm_do_call:
         sll     %r8,3
-       GET_THREAD_INFO
         larl    %r1,pgm_check_table
         lg      %r1,0(%r8,%r1)          # load address of handler routine
         la      %r2,SP_PTREGS(%r15)     # address of register-save area
@@ -497,6 +504,7 @@ pgm_per:
         clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
         je      pgm_svcper
 # no interesting special case, ignore PER event
+       lmg     %r12,%r15,__LC_SAVE_AREA
        lpswe   __LC_PGM_OLD_PSW
 
 #
@@ -504,107 +512,67 @@ pgm_per:
 #
 pgm_per_std:
        SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
-       GET_THREAD_INFO
-       mvc     __THREAD_per+__PER_atmid(2,%r9),__LC_PER_ATMID
-       mvc     __THREAD_per+__PER_address(8,%r9),__LC_PER_ADDRESS
-       mvc     __THREAD_per+__PER_access_id(1,%r9),__LC_PER_ACCESS_ID
-       lghi    %r4,0x7f
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      pgm_no_vtime2
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
+       lg      %r1,__TI_task(%r9)
+       mvc     __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+       mvc     __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
+       mvc     __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+       oi      __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
        lgf     %r3,__LC_PGM_ILC         # load program interruption code
-        nr      %r4,%r3                         # clear per-event-bit and ilc
-        je      pgm_per_only            # only per of per+check ?
-        sll     %r4,3
-        larl    %r1,pgm_check_table
-        lg      %r1,0(%r4,%r1)          # load address of handler routine
-        la      %r2,SP_PTREGS(%r15)     # address of register-save area
-        basr    %r14,%r1                # branch to interrupt-handler
-pgm_per_only:
-        la      %r2,SP_PTREGS(15)       # address of register-save area
-        larl    %r14,sysc_return        # load adr. of system return
-        jg      do_debugger_trap
+       lghi    %r8,0x7f
+       ngr     %r8,%r3                  # clear per-event-bit and ilc
+       je      sysc_return
+       j       pgm_do_call
 
 #
 # it was a single stepped SVC that is causing all the trouble
 #
 pgm_svcper:
        SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
-       llgh    %r7,__LC_SVC_INT_CODE # get svc number from lowcore
-       stosm   48(%r15),0x03     # reenable interrupts
-        GET_THREAD_INFO           # load pointer to task_struct to R9
-       mvc     __THREAD_per+__PER_atmid(2,%r9),__LC_PER_ATMID
-       mvc     __THREAD_per+__PER_address(8,%r9),__LC_PER_ADDRESS
-       mvc     __THREAD_per+__PER_access_id(1,%r9),__LC_PER_ACCESS_ID
-       slag    %r7,%r7,2         # *4 and test for svc 0
-       jnz     pgm_svcstd
-       # svc 0: system call number in %r1
-       lghi    %r0,NR_syscalls
-       clr     %r1,%r0
-       slag    %r7,%r1,2
-pgm_svcstd:
-       larl    %r10,sys_call_table
-#ifdef CONFIG_S390_SUPPORT
-        tm      SP_PSW+3(%r15),0x01  # are we running in 31 bit mode ?
-        jo      pgm_svcper_noemu
-       larl    %r10,sys_call_table_emu # use 31 bit emulation system calls
-pgm_svcper_noemu:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      pgm_no_vtime3
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
 #endif
-       tm      __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        lgf     %r8,0(%r7,%r10)   # load address of system call routine
-        jo      pgm_tracesys
-        basr    %r14,%r8          # call sys_xxxx
-        stg     %r2,SP_R2(%r15)   # store return value (change R2 on stack)
-                                  # ATTENTION: check sys_execve_glue before
-                                  # changing anything here !!
-
-pgm_svcret:
-       tm      __TI_flags+7(%r9),_TIF_SIGPENDING
-       jno     pgm_svcper_nosig
-        la      %r2,SP_PTREGS(%r15) # load pt_regs
-        sgr     %r3,%r3             # clear *oldset
-       brasl   %r14,do_signal
-       
-pgm_svcper_nosig:
-       lhi     %r0,__LC_PGM_OLD_PSW     # set trap indication back to pgm_chk
-       st      %r0,SP_TRAP(%r15)
-        la      %r2,SP_PTREGS(15) # address of register-save area
-        larl    %r14,sysc_return  # load adr. of system return
-        jg      do_debugger_trap
-#
-# call trace before and after sys_call
-#
-pgm_tracesys:
-       la      %r2,SP_PTREGS(%r15)    # load pt_regs
-       la      %r3,0
-       srlg    %r7,%r7,2
-       stg     %r7,SP_R2(%r15)
-        brasl   %r14,syscall_trace
-       lghi    %r0,NR_syscalls
-       clg     %r0,SP_R2(%r15)
-       jnh     pgm_svc_nogo
-       lg      %r7,SP_R2(%r15)
-       sllg    %r7,%r7,2           # strace wants to change the syscall
-       lgf     %r8,0(%r7,%r10)
-pgm_svc_go:
-       lmg     %r3,%r6,SP_R3(%r15)
-       lg      %r2,SP_ORIG_R2(%r15)
-        basr    %r14,%r8            # call sys_xxx
-        stg     %r2,SP_R2(%r15)     # store return value
-pgm_svc_nogo:
-       tm      __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        jno     pgm_svcret
-       la      %r2,SP_PTREGS(%r15)    # load pt_regs
-       la      %r3,1
-       larl    %r14,pgm_svcret     # return point is sysc_return
-       jg      syscall_trace
+       llgh    %r7,__LC_SVC_INT_CODE   # get svc number from lowcore
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
+       lg      %r1,__TI_task(%r9)
+       mvc     __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+       mvc     __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
+       mvc     __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+       oi      __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       j       sysc_do_svc
 
 /*
  * IO interrupt handler routine
  */
         .globl io_int_handler
 io_int_handler:
-        SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+       STORE_TIMER __LC_ASYNC_ENTER_TIMER
        stck    __LC_INT_CLOCK
-       CHECK_CRITICAL
-        GET_THREAD_INFO                # load pointer to task_struct to R9
+       SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      io_no_vtime
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
         la      %r2,SP_PTREGS(%r15)    # address of register-save area
        brasl   %r14,do_IRQ            # call standard irq handler
 
@@ -618,7 +586,7 @@ io_return:
        tm      __TI_flags+7(%r9),_TIF_WORK_INT
        jnz     io_work                # there is work to do (signals etc.)
 io_leave:
-        RESTORE_ALL
+        RESTORE_ALL 0
 
 #ifdef CONFIG_PREEMPT
 io_preempt:
@@ -628,17 +596,16 @@ io_preempt:
        lg      %r1,SP_R15(%r15)
        aghi    %r1,-SP_SIZE
        mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-        xc      0(8,%r1),0(%r1)        # clear back chain
+        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
        lgr     %r15,%r1
 io_resume_loop:
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
        jno     io_leave
        larl    %r1,.Lc_pactive
        mvc     __TI_precount(4,%r9),0(%r1)
-        stosm   48(%r15),0x03          # reenable interrupts
+        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
        brasl   %r14,schedule          # call schedule
-        stnsm   48(%r15),0xfc          # disable I/O and ext. interrupts
-        GET_THREAD_INFO                # load pointer to task_struct to R9
+        stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
        xc      __TI_precount(4,%r9),__TI_precount(%r9)
        j       io_resume_loop
 #endif
@@ -650,7 +617,7 @@ io_work:
        lg      %r1,__LC_KERNEL_STACK
        aghi    %r1,-SP_SIZE
        mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-        xc      0(8,%r1),0(%r1)        # clear back chain
+        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
        lgr     %r15,%r1
 #
 # One of the work bits is on. Find out which one.
@@ -667,34 +634,42 @@ io_work_loop:
 # _TIF_NEED_RESCHED is set, call schedule
 #      
 io_reschedule:        
-        stosm   48(%r15),0x03       # reenable interrupts
-        brasl   %r14,schedule       # call scheduler
-        stnsm   48(%r15),0xfc       # disable I/O and ext. interrupts
-        GET_THREAD_INFO             # load pointer to task_struct to R9
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       brasl   %r14,schedule           # call scheduler
+       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
        tm      __TI_flags+7(%r9),_TIF_WORK_INT
-       jz      io_leave               # there is no work to do
+       jz      io_leave                # there is no work to do
        j       io_work_loop
 
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
 io_sigpending:     
-        stosm   48(%r15),0x03       # reenable interrupts
-        la      %r2,SP_PTREGS(%r15) # load pt_regs
-        slgr    %r3,%r3             # clear *oldset
-       brasl   %r14,do_signal      # call do_signal
-        stnsm   48(%r15),0xfc       # disable I/O and ext. interrupts
-       j       sysc_leave          # out of here, do NOT recheck
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       slgr    %r3,%r3                 # clear *oldset
+       brasl   %r14,do_signal          # call do_signal
+       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       j       sysc_leave              # out of here, do NOT recheck
 
 /*
  * External interrupt handler routine
  */
         .globl  ext_int_handler
 ext_int_handler:
-        SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
-       CHECK_CRITICAL
-        GET_THREAD_INFO                # load pointer to task_struct to R9
+       STORE_TIMER __LC_ASYNC_ENTER_TIMER
        stck    __LC_INT_CLOCK
+       SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      ext_no_vtime
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
+       lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
        la      %r2,SP_PTREGS(%r15)    # address of register-save area
        llgh    %r3,__LC_EXT_INT_CODE  # get interruption code
        brasl   %r14,do_extint
@@ -705,10 +680,20 @@ ext_int_handler:
  */
         .globl mcck_int_handler
 mcck_int_handler:
+       STORE_TIMER __LC_ASYNC_ENTER_TIMER
+       SAVE_ALL_BASE __LC_SAVE_AREA+64
         SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      mcck_no_vtime
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
        brasl   %r14,s390_do_machine_check
 mcck_return:
-        RESTORE_ALL
+        RESTORE_ALL 0
 
 #ifdef CONFIG_SMP
 /*
@@ -721,8 +706,8 @@ restart_int_handler:
         lctlg   %c0,%c15,0(%r10) # get new ctl regs
         lghi    %r10,__LC_AREGS_SAVE_AREA
         lam     %a0,%a15,0(%r10)
-        stosm   0(%r15),0x04           # now we can turn dat on
-        lmg     %r6,%r15,48(%r15)      # load registers from clone
+        lmg     %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
        jg      start_secondary
 #else
 /*
@@ -739,46 +724,129 @@ restart_crash:
 restart_go:
 #endif
 
-cleanup_table:
-       .quad   system_call, sysc_enter, cleanup_sysc_enter
-       .quad   sysc_return, sysc_leave, cleanup_sysc_return
-       .quad   sysc_leave, sysc_work_loop, cleanup_sysc_leave
-       .quad   sysc_work_loop, sysc_reschedule, cleanup_sysc_return
-cleanup_table_entries=(.-cleanup_table) / 24
+#ifdef CONFIG_CHECK_STACK
+/*
+ * The synchronous or the asynchronous stack overflowed. We are dead.
+ * No need to properly save the registers, we are going to panic anyway.
+ * Setup a pt_regs so that show_trace can provide a good call trace.
+ */
+stack_overflow:
+       lg      %r15,__LC_PANIC_STACK   # change to panic stack
+       aghi    %r1,-SP_SIZE
+       mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+       stmg    %r0,%r11,SP_R0(%r15)    # store gprs %r0-%r11 to kernel stack
+       la      %r1,__LC_SAVE_AREA
+       chi     %r12,__LC_SVC_OLD_PSW
+       je      0f
+       chi     %r12,__LC_PGM_OLD_PSW
+       je      0f
+       la      %r1,__LC_SAVE_AREA+16
+0:     mvc     SP_R12(32,%r15),0(%r1)  # move %r12-%r15 to stack
+        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+       jg      kernel_stack_overflow
+#endif
+
+cleanup_table_system_call:
+       .quad   system_call, sysc_do_svc
+cleanup_table_sysc_return:
+       .quad   sysc_return, sysc_leave
+cleanup_table_sysc_leave:
+       .quad   sysc_leave, sysc_work_loop
+cleanup_table_sysc_work_loop:
+       .quad   sysc_work_loop, sysc_reschedule
 
 cleanup_critical:
-       lghi    %r0,cleanup_table_entries
-       larl    %r1,cleanup_table
-       lg      %r2,SP_PSW+8(%r15)
-cleanup_loop:
-       clg     %r2,0(%r1)
-       jl      cleanup_cont
-       clg     %r2,8(%r1)
-       jl      cleanup_found
-cleanup_cont:
-       la      %r1,24(%r1)
-       brct    %r0,cleanup_loop
+       clc     8(8,%r12),BASED(cleanup_table_system_call)
+       jl      0f
+       clc     8(8,%r12),BASED(cleanup_table_system_call+8)
+       jl      cleanup_system_call
+0:
+       clc     8(8,%r12),BASED(cleanup_table_sysc_return)
+       jl      0f
+       clc     8(8,%r12),BASED(cleanup_table_sysc_return+8)
+       jl      cleanup_sysc_return
+0:
+       clc     8(8,%r12),BASED(cleanup_table_sysc_leave)
+       jl      0f
+       clc     8(8,%r12),BASED(cleanup_table_sysc_leave+8)
+       jl      cleanup_sysc_leave
+0:
+       clc     8(8,%r12),BASED(cleanup_table_sysc_work_loop)
+       jl      0f
+       clc     8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
+       jl      cleanup_sysc_leave
+0:
        br      %r14
-cleanup_found:
-       lg      %r1,16(%r1)
-       br      %r1
-
-cleanup_sysc_enter:
-       CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
-       llgh    %r0,0x8a
-       stg     %r0,SP_R7(%r15)
-       larl    %r1,sysc_enter
-       stg     %r1,SP_PSW+8(%r15)
+
+cleanup_system_call:
+       mvc     __LC_RETURN_PSW(16),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+       jh      0f
+       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0:     clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
+       jhe     cleanup_vtime
+#endif
+       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+       jh      0f
+       mvc     __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
+0:     stg     %r13,__LC_SAVE_AREA+40
+       SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+       stg     %r15,__LC_SAVE_AREA+56
+       llgh    %r7,__LC_SVC_INT_CODE
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
+       jhe     cleanup_stime
+       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
+       jz      cleanup_novtime
+       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
+       jh      cleanup_update
+       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
+       mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
+       la      %r12,__LC_RETURN_PSW
        br      %r14
+cleanup_system_call_insn:
+       .quad   sysc_saveall
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       .quad   system_call
+       .quad   sysc_vtime
+       .quad   sysc_stime
+       .quad   sysc_update
+#endif
 
 cleanup_sysc_return:
-       larl    %r1,sysc_return
-       stg     %r1,SP_PSW+8(%r15)
+       mvc     __LC_RETURN_PSW(8),0(%r12)
+       mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
+       la      %r12,__LC_RETURN_PSW
        br      %r14
 
 cleanup_sysc_leave:
-       CLEANUP_RESTORE_ALL
+       clc     8(8,%r12),BASED(cleanup_sysc_leave_insn)
+       je      0f
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       mvc     __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+       clc     8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
+       je      0f
+#endif
+       mvc     __LC_RETURN_PSW(16),SP_PSW(%r15)
+       mvc     __LC_SAVE_AREA+32(32),SP_R12(%r15)
+       lmg     %r0,%r11,SP_R0(%r15)
+       lg      %r15,SP_R15(%r15)
+0:     la      %r12,__LC_RETURN_PSW
        br      %r14
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+       .quad   sysc_leave + 16
+#endif
+       .quad   sysc_leave + 12
 
 /*
  * Integer constants
@@ -786,6 +854,12 @@ cleanup_sysc_leave:
                .align 4
 .Lconst:
 .Lc_pactive:   .long  PREEMPT_ACTIVE
+.Lnr_syscalls: .long  NR_syscalls
+.L0x0130:      .short 0x130
+.L0x0140:      .short 0x140
+.L0x0150:      .short 0x150
+.L0x0160:      .short 0x160
+.L0x0170:      .short 0x170
 .Lcritical_start:
                .quad  __critical_start
 .Lcritical_end: