4 * Alternate kernel routines for Xen. Heavily leveraged from
7 * Copyright (C) 2005 Hewlett-Packard Co
8 * Dan Magenheimer <dan.magenheimer@.hp.com>
11 #include <asm/asmmacro.h>
12 #include <asm/cache.h>
13 #include <asm/errno.h>
14 #include <asm/kregs.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/pgtable.h>
17 #include <asm/percpu.h>
18 #include <asm/processor.h>
19 #include <asm/thread_info.h>
20 #include <asm/unistd.h>
23 #include "xenminstate.h"
29 * prev_task <- ia64_switch_to(struct task_struct *next)
30 * With Ingo's new scheduler, interrupts are disabled when this routine gets
31 * called. The code starting at .map relies on this. The rest of the code
32 * doesn't care about the interrupt masking status.
35 GLOBAL_ENTRY(xen_switch_to)
37 alloc r16=ar.pfs,1,0,0,0
38 movl r22=running_on_xen;;
41 (p7) br.cond.sptk.many __ia64_switch_to;;
43 GLOBAL_ENTRY(ia64_switch_to)
45 alloc r16=ar.pfs,1,0,0,0
50 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
52 mov r27=IA64_KR(CURRENT_STACK)
53 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
54 dep r20=0,in0,61,3 // physical address of "next"
56 st8 [r22]=sp // save kernel stack pointer of old task
57 shr.u r26=r20,IA64_GRANULE_SHIFT
63 st4 [r8]=r0 // force psr.ic off for hyperprivop(s)
67 * If we've already mapped this task's page, we can skip doing it again.
69 (p6) cmp.eq p7,p6=r26,r27
70 (p6) br.cond.dpnt .map
75 // update "current" application register
76 mov r8=IA64_KR_CURRENT
79 ld8 sp=[r21] // load kernel stack pointer of new task
83 st4 [r27]=r8 // psr.ic back on
85 ld8 sp=[r21] // load kernel stack pointer of new task
86 mov IA64_KR(CURRENT)=in0 // update "current" application register
88 mov r8=r13 // return pointer to previously running task
89 mov r13=in0 // set "current" pointer
94 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
96 br.ret.sptk.many rp // boogie on out in new context
100 // psr.ic already off
102 rsm psr.ic // interrupts (psr.i) are already disabled here
107 or r23=r25,r20 // construct PA | page properties
108 mov r25=IA64_GRANULE_SHIFT<<2
117 st8 [r8]=in0 // VA of next task...
119 mov r25=IA64_TR_CURRENT_STACK
120 // remember last page we mapped...
121 mov r8=IA64_KR_CURRENT_STACK
126 mov cr.ifa=in0 // VA of next task...
128 mov r25=IA64_TR_CURRENT_STACK
129 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
132 itr.d dtr[r25]=r23 // wire in new mapping...
134 ssm psr.ic // reenable the psr.ic bit
146 * Invoke a system call, but do some tracing before and after the call.
147 * We MUST preserve the current register frame throughout this routine
148 * because some system calls (such as ia64_execve) directly
152 GLOBAL_ENTRY(xen_trace_syscall)
153 PT_REGS_UNWIND_INFO(0)
154 movl r16=running_on_xen;;
157 (p7) br.cond.sptk.many __ia64_trace_syscall;;
159 GLOBAL_ENTRY(ia64_trace_syscall)
160 PT_REGS_UNWIND_INFO(0)
163 * We need to preserve the scratch registers f6-f11 in case the system
166 adds r16=PT(F6)+16,sp
167 adds r17=PT(F7)+16,sp
169 stf.spill [r16]=f6,32
170 stf.spill [r17]=f7,32
172 stf.spill [r16]=f8,32
173 stf.spill [r17]=f9,32
177 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
178 adds r16=PT(F6)+16,sp
179 adds r17=PT(F7)+16,sp
189 // the syscall number may have changed, so re-load it and re-calculate the
190 // syscall entry-point:
191 adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
194 mov r3=NR_syscalls - 1
197 movl r16=sys_call_table
199 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
202 (p6) ld8 r20=[r20] // load address of syscall entry point
203 (p7) movl r20=sys_ni_syscall
206 br.call.sptk.many rp=b6 // do the syscall
207 .strace_check_retval:
208 cmp.lt p6,p0=r8,r0 // syscall failed?
209 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
210 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
212 (p6) br.cond.sptk strace_error // syscall failed ->
213 ;; // avoid RAW on r10
215 .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
216 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
217 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
219 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
220 br.cond.sptk .work_pending_syscall_end
223 ld8 r3=[r2] // load pt_regs.r8
224 sub r9=0,r8 // negate return value to get errno value
226 cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
227 adds r3=16,r2 // r3=&pt_regs.r10
231 br.cond.sptk .strace_save_retval
233 END(xen_trace_syscall)
235 END(ia64_trace_syscall)
239 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
240 * need to switch to bank 0 and doesn't restore the scratch registers.
241 * To avoid leaking kernel bits, the scratch registers are set to
242 * the following known-to-be-safe values:
244 * r1: restored (global pointer)
246 * r3: 1 (when returning to user-level)
247 * r8-r11: restored (syscall return value(s))
248 * r12: restored (user-level stack pointer)
249 * r13: restored (user-level thread pointer)
250 * r14: set to __kernel_syscall_via_epc
251 * r15: restored (syscall #)
255 * r20: user-level ar.fpsr
258 * r23: user-level ar.bspstore
259 * r24: user-level ar.rnat
260 * r25: user-level ar.unat
261 * r26: user-level ar.pfs
262 * r27: user-level ar.rsc
264 * r29: user-level psr
265 * r30: user-level cfm
268 * pr: restored (user-level pr)
269 * b0: restored (user-level rp)
271 * b7: set to __kernel_syscall_via_epc
272 * ar.unat: restored (user-level ar.unat)
273 * ar.pfs: restored (user-level ar.pfs)
274 * ar.rsc: restored (user-level ar.rsc)
275 * ar.rnat: restored (user-level ar.rnat)
276 * ar.bspstore: restored (user-level ar.bspstore)
277 * ar.fpsr: restored (user-level ar.fpsr)
283 GLOBAL_ENTRY(xen_leave_syscall)
284 PT_REGS_UNWIND_INFO(0)
285 movl r22=running_on_xen;;
288 (p7) br.cond.sptk.many __ia64_leave_syscall;;
290 ENTRY(ia64_leave_syscall)
291 PT_REGS_UNWIND_INFO(0)
294 * work.need_resched etc. mustn't get changed by this CPU before it returns to
295 * user- or fsys-mode, hence we disable interrupts early on.
297 * p6 controls whether current_thread_info()->flags needs to be check for
298 * extra work. We always check for extra work when returning to user-level.
299 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
300 * is 0. After extra work processing has been completed, execution
301 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
302 * needs to be redone.
304 #ifdef CONFIG_PREEMPT
305 rsm psr.i // disable interrupts
306 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
307 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
309 .pred.rel.mutex pUStk,pKStk
310 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
311 (pUStk) mov r21=0 // r21 <- 0
313 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
314 #else /* !CONFIG_PREEMPT */
316 movl r2=XSI_PSR_I_ADDR
325 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
326 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
328 .work_processed_syscall:
329 adds r2=PT(LOADRS)+16,r12
330 adds r3=PT(AR_BSPSTORE)+16,r12
331 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
333 (p6) ld4 r31=[r18] // load current_thread_info()->flags
334 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
337 mov r16=ar.bsp // M2 get existing backing store pointer
338 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
339 (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
341 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
342 (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
343 (p6) br.cond.spnt .work_pending_syscall
345 // start restoring the state saved on the kernel stack (struct pt_regs):
346 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
347 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
348 (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
350 invala // M0|1 invalidate ALAT
352 movl r28=XSI_PSR_I_ADDR
359 st4 [r29]=r0 // note: clears both vpsr.i and vpsr.ic!
362 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
364 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
366 ld8 r29=[r2],16 // M0|1 load cr.ipsr
367 ld8 r28=[r3],16 // M0|1 load cr.iip
368 mov r22=r0 // A clear r22
370 ld8 r30=[r2],16 // M0|1 load cr.ifs
371 ld8 r25=[r3],16 // M0|1 load ar.unat
372 (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
374 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
375 (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
378 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
379 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
380 mov f6=f0 // F clear f6
382 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
383 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
384 mov f7=f0 // F clear f7
386 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
387 ld8.fill r1=[r3],16 // M0|1 load r1
388 (pUStk) mov r17=1 // A
390 (pUStk) st1 [r14]=r17 // M2|3
391 ld8.fill r13=[r3],16 // M0|1
392 mov f8=f0 // F clear f8
394 ld8.fill r12=[r2] // M0|1 restore r12 (sp)
395 ld8.fill r15=[r3] // M0|1 restore r15
396 mov b6=r18 // I0 restore b6
398 addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
399 mov f9=f0 // F clear f9
400 (pKStk) br.cond.dpnt.many skip_rbs_switch // B
402 srlz.d // M0 ensure interruption collection is off (for cover)
403 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
407 cover // B add current frame into dirty partition & set cr.ifs
410 (pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
411 mov r19=ar.bsp // M2 get new backing store pointer
412 mov f10=f0 // F clear f10
415 movl r14=__kernel_syscall_via_epc // X
417 mov.m ar.csd=r0 // M2 clear ar.csd
418 mov.m ar.ccv=r0 // M2 clear ar.ccv
419 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
421 mov.m ar.ssd=r0 // M2 clear ar.ssd
422 mov f11=f0 // F clear f11
423 br.cond.sptk.many rbs_switch // B
425 END(xen_leave_syscall)
427 END(ia64_leave_syscall)
431 GLOBAL_ENTRY(xen_leave_kernel)
432 PT_REGS_UNWIND_INFO(0)
433 movl r22=running_on_xen;;
436 (p7) br.cond.sptk.many __ia64_leave_kernel;;
438 GLOBAL_ENTRY(ia64_leave_kernel)
439 PT_REGS_UNWIND_INFO(0)
442 * work.need_resched etc. mustn't get changed by this CPU before it returns to
443 * user- or fsys-mode, hence we disable interrupts early on.
445 * p6 controls whether current_thread_info()->flags needs to be check for
446 * extra work. We always check for extra work when returning to user-level.
447 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
448 * is 0. After extra work processing has been completed, execution
449 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
450 * needs to be redone.
452 #ifdef CONFIG_PREEMPT
453 rsm psr.i // disable interrupts
454 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
455 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
457 .pred.rel.mutex pUStk,pKStk
458 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
459 (pUStk) mov r21=0 // r21 <- 0
461 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
464 (pUStk) movl r17=XSI_PSR_I_ADDR
467 (pUStk) ld8 r17=[r17]
469 (pUStk) st1 [r17]=r31
474 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
475 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
477 .work_processed_kernel:
478 adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
480 (p6) ld4 r31=[r17] // load current_thread_info()->flags
481 adds r21=PT(PR)+16,r12
484 lfetch [r21],PT(CR_IPSR)-PT(PR)
485 adds r2=PT(B6)+16,r12
486 adds r3=PT(R16)+16,r12
489 ld8 r28=[r2],8 // load b6
490 adds r29=PT(R24)+16,r12
492 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
493 adds r30=PT(AR_CCV)+16,r12
494 (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
497 ld8 r15=[r30] // load ar.ccv
498 (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
500 ld8 r29=[r2],16 // load b7
501 ld8 r30=[r3],16 // load ar.csd
502 (p6) br.cond.spnt .work_pending
504 ld8 r31=[r2],16 // load ar.ssd
508 ld8.fill r10=[r3],PT(R17)-PT(R10)
510 ld8.fill r11=[r2],PT(R18)-PT(R11)
522 movl r23=XSI_PSR_I_ADDR
529 st4 [r22]=r0 // note: clears both vpsr.i and vpsr.ic!
532 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
534 invala // invalidate ALAT
550 ld8.fill r31=[r2],PT(F9)-PT(R31)
551 adds r3=PT(F10)-PT(F6),r3
553 ldf.fill f9=[r2],PT(F6)-PT(F9)
554 ldf.fill f10=[r3],PT(F8)-PT(F10)
556 ldf.fill f6=[r2],PT(F7)-PT(F6)
558 ldf.fill f7=[r2],PT(F11)-PT(F7)
561 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
567 // r16-r31 all now hold bank1 values
568 movl r2=XSI_BANK1_R16
569 movl r3=XSI_BANK1_R16+8
571 .mem.offset 0,0; st8.spill [r2]=r16,16
572 .mem.offset 8,0; st8.spill [r3]=r17,16
574 .mem.offset 0,0; st8.spill [r2]=r18,16
575 .mem.offset 8,0; st8.spill [r3]=r19,16
577 .mem.offset 0,0; st8.spill [r2]=r20,16
578 .mem.offset 8,0; st8.spill [r3]=r21,16
580 .mem.offset 0,0; st8.spill [r2]=r22,16
581 .mem.offset 8,0; st8.spill [r3]=r23,16
583 .mem.offset 0,0; st8.spill [r2]=r24,16
584 .mem.offset 8,0; st8.spill [r3]=r25,16
586 .mem.offset 0,0; st8.spill [r2]=r26,16
587 .mem.offset 8,0; st8.spill [r3]=r27,16
589 .mem.offset 0,0; st8.spill [r2]=r28,16
590 .mem.offset 8,0; st8.spill [r3]=r29,16
592 .mem.offset 0,0; st8.spill [r2]=r30,16
593 .mem.offset 8,0; st8.spill [r3]=r31,16
595 movl r2=XSI_BANKNUM;;
598 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
601 (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
602 adds r16=PT(CR_IPSR)+16,r12
603 adds r17=PT(CR_IIP)+16,r12
605 (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
609 ld8 r29=[r16],16 // load cr.ipsr
610 ld8 r28=[r17],16 // load cr.iip
612 ld8 r30=[r16],16 // load cr.ifs
613 ld8 r25=[r17],16 // load ar.unat
615 ld8 r26=[r16],16 // load ar.pfs
616 ld8 r27=[r17],16 // load ar.rsc
617 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
619 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
620 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
622 ld8 r31=[r16],16 // load predicates
623 ld8 r21=[r17],16 // load b0
625 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
626 ld8.fill r1=[r17],16 // load r1
628 ld8.fill r12=[r16],16
629 ld8.fill r13=[r17],16
630 (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
632 ld8 r20=[r16],16 // ar.fpsr
633 ld8.fill r15=[r17],16
635 ld8.fill r14=[r16],16
640 (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
641 shr.u r18=r19,16 // get byte size of existing "dirty" partition
643 mov r16=ar.bsp // get existing backing store pointer
644 addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
646 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
647 (pKStk) br.cond.dpnt skip_rbs_switch
650 * Restore user backing store.
652 * NOTE: alloc, loadrs, and cover can't be predicated.
654 (pNonSys) br.cond.dpnt dont_preserve_current_frame
659 cover // add current frame into dirty partition and set cr.ifs
662 mov r19=ar.bsp // get new backing store pointer
664 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
665 cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
667 sub r19=r19,r16 // calculate total byte size of dirty partition
668 add r18=64,r18 // don't force in0-in7 into memory...
670 shl r19=r19,16 // shift size of dirty partition into loadrs position
672 dont_preserve_current_frame:
674 * To prevent leaking bits between the kernel and user-space,
675 * we must clear the stacked registers in the "invalid" partition here.
676 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
677 * 5 registers/cycle on McKinley).
681 #ifdef CONFIG_ITANIUM
686 alloc loc0=ar.pfs,2,Nregs-2,2,0
687 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
688 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
690 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
691 shladd in0=loc1,3,r17
696 #ifdef CONFIG_ITANIUM
699 alloc loc0=ar.pfs,2,Nregs-2,2,0
700 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
701 add out0=-Nregs*8,in0
703 add out1=1,in1 // increment recursion count
705 nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
714 (pRecurse) br.call.sptk.many b0=rse_clear_invalid
719 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
723 (pReturn) br.ret.sptk.many b0
725 #else /* !CONFIG_ITANIUM */
726 alloc loc0=ar.pfs,2,Nregs-2,2,0
727 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
728 add out0=-Nregs*8,in0
729 add out1=1,in1 // increment recursion count
738 (pRecurse) br.call.dptk.few b0=rse_clear_invalid
742 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
745 (pReturn) br.ret.dptk.many b0
746 #endif /* !CONFIG_ITANIUM */
750 alloc r17=ar.pfs,0,0,0,0 // drop current register frame
755 mov ar.unat=r25 // M2
756 (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
757 (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
759 (pUStk) mov ar.bspstore=r23 // M2
760 (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
761 (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
766 st8[r25]=r29,XSI_IFS_OFS-XSI_IPSR_OFS
769 mov cr.ipsr=r29 // M2
772 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
777 adds r25=XSI_IIP_OFS-XSI_IFS_OFS,r25
780 (p9) mov cr.ifs=r30 // M2
783 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
785 mov ar.fpsr=r20 // M2
793 (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
808 * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT)
809 * r31 = current->thread_info->flags
811 * p6 = TRUE if work-pending-check needs to be redone
813 .work_pending_syscall:
820 tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
821 (p6) br.cond.sptk.few .notify
822 #ifdef CONFIG_PREEMPT
823 (pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
825 (pKStk) st4 [r20]=r21
826 ssm psr.i // enable interrupts
828 br.call.spnt.many rp=schedule
829 .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
831 movl r2=XSI_PSR_I_ADDR
838 rsm psr.i // disable interrupts
841 #ifdef CONFIG_PREEMPT
842 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
844 (pKStk) st4 [r20]=r0 // preempt_count() <- 0
846 (pLvSys)br.cond.sptk.few .work_pending_syscall_end
847 br.cond.sptk.many .work_processed_kernel // re-check
850 (pUStk) br.call.spnt.many rp=notify_resume_user
851 .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
852 (pLvSys)br.cond.sptk.few .work_pending_syscall_end
853 br.cond.sptk.many .work_processed_kernel // don't re-check
855 .work_pending_syscall_end:
856 adds r2=PT(R8)+16,r12
857 adds r3=PT(R10)+16,r12
861 br.cond.sptk.many .work_processed_syscall // re-check
864 END(xen_leave_kernel)
866 END(ia64_leave_kernel)