2 * arch/ppc64/kernel/entry.S
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
23 #include <linux/config.h>
24 #include <linux/errno.h>
25 #include <asm/unistd.h>
26 #include <asm/processor.h>
29 #include <asm/thread_info.h>
30 #include <asm/ppc_asm.h>
31 #include <asm/offsets.h>
32 #include <asm/cputable.h>
34 #ifdef CONFIG_PPC_ISERIES
35 #define DO_SOFT_DISABLE
43 .tc .sys_call_table[TC],.sys_call_table
46 .tc .sys_call_table32[TC],.sys_call_table32
48 /* This value is used to mark exception frames on the stack. */
50 .tc ID_72656773_68657265[TC],0x7265677368657265
57 .globl SystemCall_common
61 addi r1,r1,-INT_FRAME_SIZE
95 addi r9,r1,STACK_FRAME_OVERHEAD
96 ld r11,exception_marker@toc(r2)
97 std r11,-16(r9) /* "regshere" marker */
98 #ifdef CONFIG_PPC_ISERIES
99 /* Hack for handling interrupts when soft-enabling on iSeries */
100 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
101 andi. r10,r12,MSR_PR /* from kernel */
102 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
103 beq HardwareInterrupt_entry
104 lbz r10,PACAPROCENABLED(r13)
116 addi r9,r1,STACK_FRAME_OVERHEAD
118 clrrdi r11,r1,THREAD_SHIFT
121 stb r12,TI_SC_NOERR(r11)
122 andi. r11,r10,_TIF_SYSCALL_T_OR_A
124 syscall_dotrace_cont:
125 cmpli 0,r0,NR_syscalls
128 system_call: /* label this so stack traces look sane */
130 * Need to vector to 32 Bit or default sys_call_table here,
131 * based on caller's run-mode / personality.
133 ld r11,.SYS_CALL_TABLE@toc(2)
134 andi. r10,r10,_TIF_32BIT
136 ld r11,.SYS_CALL_TABLE32@toc(2)
145 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
147 bctrl /* Call handler */
152 bl .do_show_syscall_exit
159 clrrdi r12,r1,THREAD_SHIFT
163 /* check for syscall tracing or audit */
165 andi. r0,r9,_TIF_SYSCALL_T_OR_A
166 bne- syscall_exit_trace
167 syscall_exit_trace_cont:
169 /* disable interrupts so current_thread_info()->flags can't change,
170 and so that we don't get interrupted after loading SRR0/1. */
179 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
180 bne- syscall_exit_work
182 stdcx. r0,0,r1 /* to clear the reservation */
185 beq- 1f /* only restore r13 if */
186 ld r13,GPR13(r1) /* returning to usermode */
191 mtmsrd r10,1 /* clear MSR.RI */
197 b . /* prevent speculative execution */
202 clrrdi r12,r1,THREAD_SHIFT
206 lbz r11,TI_SC_NOERR(r12)
208 bne- syscall_error_cont
210 oris r5,r5,0x1000 /* Set SO bit in CR */
214 /* Traced system call support */
217 addi r3,r1,STACK_FRAME_OVERHEAD
218 bl .do_syscall_trace_enter
219 ld r0,GPR0(r1) /* Restore original registers */
226 addi r9,r1,STACK_FRAME_OVERHEAD
227 clrrdi r10,r1,THREAD_SHIFT
229 b syscall_dotrace_cont
234 bl .do_syscall_trace_leave
238 clrrdi r12,r1,THREAD_SHIFT
239 b syscall_exit_trace_cont
241 /* Stuff to do on exit from a system call. */
245 b .ret_from_except_lite
247 /* Save non-volatile GPRs, if not already saved. */
258 * The sigsuspend and rt_sigsuspend system calls can call do_signal
259 * and thus put the process into the stopped state where we might
260 * want to examine its user state with ptrace. Therefore we need
261 * to save all the nonvolatile registers (r14 - r31) before calling
262 * the C code. Similarly, fork, vfork and clone need the full
263 * register state on the stack so that it can be copied to the child.
265 _GLOBAL(ppc32_sigsuspend)
270 _GLOBAL(ppc64_rt_sigsuspend)
272 bl .sys_rt_sigsuspend
275 _GLOBAL(ppc32_rt_sigsuspend)
277 bl .sys32_rt_sigsuspend
295 _GLOBAL(ppc32_swapcontext)
297 bl .sys32_swapcontext
300 _GLOBAL(ppc64_swapcontext)
305 _GLOBAL(ppc32_sigreturn)
309 _GLOBAL(ppc32_rt_sigreturn)
310 bl .sys32_rt_sigreturn
313 _GLOBAL(ppc64_rt_sigreturn)
318 clrrdi r4,r1,THREAD_SHIFT
320 andi. r4,r4,_TIF_SYSCALL_T_OR_A
322 bl .do_syscall_trace_leave
323 81: b .ret_from_except
325 _GLOBAL(ret_from_fork)
332 * This routine switches between two different tasks. The process
333 * state of one is saved on its kernel stack. Then the state
334 * of the other is restored from its kernel stack. The memory
335 * management hardware is updated to the second process's state.
336 * Finally, we can return to the second process, via ret_from_except.
337 * On entry, r3 points to the THREAD for the current task, r4
338 * points to the THREAD for the new task.
340 * Note: there are two ways to get to the "going out" portion
341 * of this code; either by coming in via the entry (_switch)
342 * or via "fork" which must set up an environment equivalent
343 * to the "_switch" path. If you change this you'll have to change
344 * the fork code also.
346 * The code which creates the new task context is in 'copy_thread'
347 * in arch/ppc64/kernel/process.c
353 stdu r1,-SWITCH_FRAME_SIZE(r1)
354 /* r3-r13 are caller saved -- Cort */
357 mflr r20 /* Return to switch caller */
360 #ifdef CONFIG_ALTIVEC
362 oris r0,r0,MSR_VEC@h /* Disable altivec */
363 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
364 std r24,THREAD_VRSAVE(r3)
365 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
366 #endif /* CONFIG_ALTIVEC */
375 std r1,KSP(r3) /* Set old stack pointer */
378 /* We need a sync somewhere here to make sure that if the
379 * previous task gets rescheduled on another CPU, it sees all
380 * stores it has performed on this one.
383 #endif /* CONFIG_SMP */
385 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
386 std r6,PACACURRENT(r13) /* Set new 'current' */
388 ld r8,KSP(r4) /* new stack pointer */
390 clrrdi r6,r8,28 /* get its ESID */
391 clrrdi r9,r1,28 /* get current sp ESID */
392 clrldi. r0,r6,2 /* is new ESID c00000000? */
393 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
395 beq 2f /* if yes, don't slbie it */
396 oris r6,r6,0x0800 /* set C (class) bit */
398 slbie r6 /* Workaround POWER5 < DD2.1 issue */
400 END_FTR_SECTION_IFSET(CPU_FTR_SLB)
401 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
402 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
403 because we don't need to leave the 288-byte ABI gap at the
404 top of the kernel stack. */
405 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
407 mr r1,r8 /* start using new stack pointer */
408 std r7,PACAKSAVE(r13)
413 #ifdef CONFIG_ALTIVEC
415 ld r0,THREAD_VRSAVE(r4)
416 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
417 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
418 #endif /* CONFIG_ALTIVEC */
420 /* r3-r13 are destroyed -- Cort */
424 #ifdef CONFIG_PPC_ISERIES
425 clrrdi r7,r1,THREAD_SHIFT /* get current_thread_info() */
426 ld r7,TI_FLAGS(r7) /* Get run light flag */
428 srdi r7,r7,TIF_RUN_LIGHT
429 insrdi r9,r7,1,63 /* Insert run light into CTRL */
433 /* convert old thread to its task_struct for return value */
435 ld r7,_NIP(r1) /* Return to _switch caller in new task */
437 addi r1,r1,SWITCH_FRAME_SIZE
441 _GLOBAL(ret_from_except)
444 bne .ret_from_except_lite
447 _GLOBAL(ret_from_except_lite)
449 * Disable interrupts so that current_thread_info()->flags
450 * can't change between when we test it and when we return
451 * from the interrupt.
453 mfmsr r10 /* Get current interrupt state */
454 rldicl r9,r10,48,1 /* clear MSR_EE */
456 mtmsrd r9,1 /* Update machine state */
458 #ifdef CONFIG_PREEMPT
459 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
460 li r0,_TIF_NEED_RESCHED /* bits to check */
463 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
464 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
465 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
468 #else /* !CONFIG_PREEMPT */
469 ld r3,_MSR(r1) /* Returning to user mode? */
471 beq restore /* if not, just restore regs and return */
473 /* Check current_thread_info()->flags */
474 clrrdi r9,r1,THREAD_SHIFT
476 andi. r0,r4,_TIF_USER_WORK_MASK
481 #ifdef CONFIG_PPC_ISERIES
485 /* Check for pending interrupts (iSeries) */
486 ld r3,PACALPPACA+LPPACAANYINT(r13)
488 beq+ 4f /* skip do_IRQ if no interrupts */
491 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
492 mtmsrd r10 /* hard-enable again */
493 addi r3,r1,STACK_FRAME_OVERHEAD
495 b .ret_from_except /* loop back and handle more */
497 4: stb r5,PACAPROCENABLED(r13)
507 * r13 is our per cpu area, only restore it if we are returning to
522 stdcx. r0,0,r1 /* to clear the reservation */
544 b . /* prevent speculative execution */
546 /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
548 #ifdef CONFIG_PREEMPT
549 andi. r0,r3,MSR_PR /* Returning to user mode? */
551 /* Check that preempt_count() == 0 and interrupts are enabled */
552 lwz r8,TI_PREEMPT(r9)
554 #ifdef CONFIG_PPC_ISERIES
560 crandc eq,cr1*4+eq,eq
562 /* here we are preempting the current task */
563 1: lis r0,PREEMPT_ACTIVE@h
564 stw r0,TI_PREEMPT(r9)
565 #ifdef CONFIG_PPC_ISERIES
567 stb r0,PACAPROCENABLED(r13)
569 mtmsrd r10,1 /* reenable interrupts */
572 clrrdi r9,r1,THREAD_SHIFT
573 rldicl r10,r10,48,1 /* disable interrupts again */
578 andi. r0,r4,_TIF_NEED_RESCHED
580 stw r0,TI_PREEMPT(r9)
585 /* Enable interrupts */
588 andi. r0,r4,_TIF_NEED_RESCHED
591 b .ret_from_except_lite
595 addi r4,r1,STACK_FRAME_OVERHEAD
600 addi r3,r1,STACK_FRAME_OVERHEAD
601 bl .unrecoverable_exception
604 #ifdef CONFIG_PPC_PSERIES
606 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
607 * called with the MMU off.
609 * In addition, we need to be in 32b mode, at least for now.
611 * Note: r3 is an input parameter to rtas, so don't trash it...
616 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
618 /* Because RTAS is running in 32b mode, it clobbers the high order half
619 * of all registers that it saves. We therefore save those registers
620 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
622 SAVE_GPR(2, r1) /* Save the TOC */
623 SAVE_GPR(13, r1) /* Save paca */
624 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
625 SAVE_10GPRS(22, r1) /* ditto */
642 /* There is no way it is acceptable to get here with interrupts enabled,
643 * check it with the asm equivalent of WARN_ON
648 .section __bug_table,"a"
649 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
653 2: .asciz "enter_rtas"
656 /* Unfortunately, the stack pointer and the MSR are also clobbered,
657 * so they are saved in the PACA which allows us to restore
658 * our original state after RTAS returns.
661 std r6,PACASAVEDMSR(r13)
663 /* Setup our real return addr */
664 SET_REG_TO_LABEL(r4,.rtas_return_loc)
665 SET_REG_TO_CONST(r9,KERNELBASE)
670 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
674 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
675 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
678 sync /* disable interrupts so SRR0/1 */
679 mtmsrd r0 /* don't get trashed */
681 SET_REG_TO_LABEL(r4,rtas)
682 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
683 ld r4,RTASBASE(r4) /* get the rtas->base value */
688 b . /* prevent speculative execution */
690 _STATIC(rtas_return_loc)
691 /* relocation is off at this point */
692 mfspr r4,SPRG3 /* Get PACA */
693 SET_REG_TO_CONST(r5, KERNELBASE)
694 sub r4,r4,r5 /* RELOC the PACA base pointer */
702 ld r1,PACAR1(r4) /* Restore our SP */
703 LOADADDR(r3,.rtas_restore_regs)
704 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
709 b . /* prevent speculative execution */
711 _STATIC(rtas_restore_regs)
712 /* relocation is on at this point */
713 REST_GPR(2, r1) /* Restore the TOC */
714 REST_GPR(13, r1) /* Restore paca */
715 REST_8GPRS(14, r1) /* Restore the non-volatiles */
716 REST_10GPRS(22, r1) /* ditto */
735 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
736 ld r0,16(r1) /* get return address */
739 blr /* return to caller */
744 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
746 /* Because PROM is running in 32b mode, it clobbers the high order half
747 * of all registers that it saves. We therefore save those registers
748 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
771 /* Get the PROM entrypoint */
775 ld r12,PROMENTRY(r12)
778 /* Switch MSR to 32 bits mode
782 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
785 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
790 /* Restore arguments & enter PROM here... */
794 /* Just make sure that r1 top 32 bits didn't get
799 /* Restore the MSR (back to 64 bits) */
804 /* Restore other registers */
824 addi r1,r1,PROM_FRAME_SIZE
829 #endif /* defined(CONFIG_PPC_PSERIES) */