3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
36 #undef SHOW_SYSCALLS_TASK
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
48 #define COR r8 /* Critical Offset Register (COR) */
49 #define BOOKE_LOAD_COR lis COR,crit_save@ha
50 #define BOOKE_REST_COR mfspr COR,SPRG2
51 #define BOOKE_SAVE_COR mtspr SPRG2,COR
54 #define BOOKE_LOAD_COR
55 #define BOOKE_REST_COR
56 #define BOOKE_SAVE_COR
60 .globl mcheck_transfer_to_handler
61 mcheck_transfer_to_handler:
64 lwz r0,mcheck_r10@l(r8)
66 lwz r0,mcheck_r11@l(r8)
69 b transfer_to_handler_full
72 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
73 .globl crit_transfer_to_handler
74 crit_transfer_to_handler:
77 lwz r0,crit_r10@l(COR)
79 lwz r0,crit_r11@l(COR)
86 * This code finishes saving the registers to the exception frame
87 * and jumps to the appropriate handler for the exception, turning
88 * on address translation.
89 * Note that we rely on the caller having set cr0.eq iff the exception
90 * occurred in kernel mode (i.e. MSR:PR = 0).
92 .globl transfer_to_handler_full
93 transfer_to_handler_full:
97 .globl transfer_to_handler
109 tovirt(r2,r2) /* set r2 to current */
110 beq 2f /* if from user, fix up THREAD.regs */
111 addi r11,r1,STACK_FRAME_OVERHEAD
113 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
114 /* Check to see if the dbcr0 register is set up to debug. Use the
115 single-step bit to do this. */
116 lwz r12,THREAD_DBCR0(r12)
117 andis. r12,r12,DBCR0_IC@h
119 /* From user and task is ptraced - load up global dbcr0 */
120 li r12,-1 /* clear all pending debug events */
122 lis r11,global_dbcr0@ha
124 addi r11,r11,global_dbcr0@l
132 2: /* if from kernel, check interrupted DOZE/NAP mode and
133 * check for stack overflow
139 bt- 8,power_save_6xx_restore /* Check DOZE */
140 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
142 bt- 9,power_save_6xx_restore /* Check NAP */
143 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
144 #endif /* CONFIG_6xx */
145 .globl transfer_to_handler_cont
146 transfer_to_handler_cont:
147 lwz r11,THREAD_INFO-THREAD(r12)
148 cmplw r1,r11 /* if r1 <= current->thread_info */
149 ble- stack_ovf /* then the kernel stack overflowed */
152 lwz r11,0(r9) /* virtual address of handler */
153 lwz r9,4(r9) /* where to go when done */
159 RFI /* jump to handler, enable MMU */
162 * On kernel stack overflow, load up an initial stack pointer
163 * and call StackOverflow(regs), which should not return.
166 /* sometimes we use a statically-allocated stack, which is OK. */
170 ble 3b /* r1 <= &_end is OK */
172 addi r3,r1,STACK_FRAME_OVERHEAD
173 lis r1,init_thread_union@ha
174 addi r1,r1,init_thread_union@l
175 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
176 lis r9,StackOverflow@ha
177 addi r9,r9,StackOverflow@l
178 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
186 * Handle a system call.
188 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
189 .stabs "entry.S",N_SO,0,0,0f
193 stw r0,THREAD+LAST_SYSCALL(r2)
197 lwz r11,_CCR(r1) /* Clear SO bit in CR */
202 #endif /* SHOW_SYSCALLS */
203 rlwinm r10,r1,0,0,18 /* current_thread_info() */
204 lwz r11,TI_LOCAL_FLAGS(r10)
205 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
206 stw r11,TI_LOCAL_FLAGS(r10)
207 lwz r11,TI_FLAGS(r10)
208 andi. r11,r11,_TIF_SYSCALL_TRACE
210 syscall_dotrace_cont:
211 cmplwi 0,r0,NR_syscalls
212 lis r10,sys_call_table@h
213 ori r10,r10,sys_call_table@l
216 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
218 addi r9,r1,STACK_FRAME_OVERHEAD
219 blrl /* Call handler */
220 .globl ret_from_syscall
223 bl do_show_syscall_exit
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */
230 lwz r11,TI_LOCAL_FLAGS(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR
234 lwz r10,_CCR(r1) /* Set SO bit in CR */
238 /* disable interrupts so current_thread_info()->flags can't change */
239 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
243 andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
244 bne- syscall_exit_work
246 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
247 /* If the process has its own DBCR0 value, load it up. The single
248 step bit tells us that dbcr0 should be loaded. */
249 lwz r0,THREAD+THREAD_DBCR0(r2)
250 andis. r10,r0,DBCR0_IC@h
253 stwcx. r0,0,r1 /* to clear the reservation */
278 /* Traced system call support */
284 lwz r0,GPR0(r1) /* Restore original registers */
292 b syscall_dotrace_cont
295 stw r6,RESULT(r1) /* Save result */
296 stw r3,GPR3(r1) /* Update return value */
297 andi. r0,r9,_TIF_SYSCALL_TRACE
301 MTMSRD(r10) /* re-enable interrupts */
313 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
315 MTMSRD(r10) /* disable interrupts again */
316 rlwinm r12,r1,0,0,18 /* current_thread_info() */
319 andi. r0,r9,_TIF_NEED_RESCHED
323 beq syscall_exit_cont
324 andi. r0,r9,_TIF_SIGPENDING
325 beq syscall_exit_cont
330 MTMSRD(r10) /* re-enable interrupts */
336 #ifdef SHOW_SYSCALLS_TASK
337 lis r11,show_syscalls_task@ha
338 lwz r11,show_syscalls_task@l(r11)
369 do_show_syscall_exit:
370 #ifdef SHOW_SYSCALLS_TASK
371 lis r11,show_syscalls_task@ha
372 lwz r11,show_syscalls_task@l(r11)
378 stw r3,RESULT(r1) /* Save result */
388 7: .string "syscall %d(%x, %x, %x, %x, %x, "
389 77: .string "%x), current=%p\n"
390 79: .string " -> %x\n"
393 #ifdef SHOW_SYSCALLS_TASK
395 .globl show_syscalls_task
400 #endif /* SHOW_SYSCALLS */
403 * The sigsuspend and rt_sigsuspend system calls can call do_signal
404 * and thus put the process into the stopped state where we might
405 * want to examine its user state with ptrace. Therefore we need
406 * to save all the nonvolatile registers (r13 - r31) before calling
409 .globl ppc_sigsuspend
413 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
414 stw r0,TRAP(r1) /* register set saved */
417 .globl ppc_rt_sigsuspend
429 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
430 stw r0,TRAP(r1) /* register set saved */
437 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
438 stw r0,TRAP(r1) /* register set saved */
445 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
446 stw r0,TRAP(r1) /* register set saved */
449 .globl ppc_swapcontext
453 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
454 stw r0,TRAP(r1) /* register set saved */
458 * Top-level page fault handling.
459 * This is in assembler because if do_page_fault tells us that
460 * it is a bad kernel page fault, we want to save the non-volatile
461 * registers before calling bad_page_fault.
463 .globl handle_page_fault
466 addi r3,r1,STACK_FRAME_OVERHEAD
475 addi r3,r1,STACK_FRAME_OVERHEAD
478 b ret_from_except_full
481 * This routine switches between two different tasks. The process
482 * state of one is saved on its kernel stack. Then the state
483 * of the other is restored from its kernel stack. The memory
484 * management hardware is updated to the second process's state.
485 * Finally, we can return to the second process.
486 * On entry, r3 points to the THREAD for the current task, r4
487 * points to the THREAD for the new task.
489 * This routine is always called with interrupts disabled.
491 * Note: there are two ways to get to the "going out" portion
492 * of this code; either by coming in via the entry (_switch)
493 * or via "fork" which must set up an environment equivalent
494 * to the "_switch" path. If you change this , you'll have to
495 * change the fork code also.
497 * The code which creates the new task context is in 'copy_thread'
498 * in arch/ppc/kernel/process.c
501 stwu r1,-INT_FRAME_SIZE(r1)
503 stw r0,INT_FRAME_SIZE+4(r1)
504 /* r3-r12 are caller saved -- Cort */
506 stw r0,_NIP(r1) /* Return to switch caller */
508 li r0,MSR_FP /* Disable floating-point */
509 #ifdef CONFIG_ALTIVEC
511 oris r0,r0,MSR_VEC@h /* Disable altivec */
512 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
513 stw r12,THREAD+THREAD_VRSAVE(r2)
514 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
515 #endif /* CONFIG_ALTIVEC */
517 oris r0,r0,MSR_SPE@h /* Disable SPE */
518 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
519 stw r12,THREAD+THREAD_SPEFSCR(r2)
520 #endif /* CONFIG_SPE */
521 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
529 stw r1,KSP(r3) /* Set old stack pointer */
532 /* We need a sync somewhere here to make sure that if the
533 * previous task gets rescheduled on another CPU, it sees all
534 * stores it has performed on this one.
537 #endif /* CONFIG_SMP */
541 mtspr SPRG3,r0 /* Update current THREAD phys addr */
542 lwz r1,KSP(r4) /* Load new stack pointer */
544 /* save the old current 'last' for return value */
546 addi r2,r4,-THREAD /* Update current */
548 #ifdef CONFIG_ALTIVEC
550 lwz r0,THREAD+THREAD_VRSAVE(r2)
551 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
552 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
553 #endif /* CONFIG_ALTIVEC */
555 lwz r0,THREAD+THREAD_SPEFSCR(r2)
556 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
557 #endif /* CONFIG_SPE */
561 /* r3-r12 are destroyed -- Cort */
564 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
566 addi r1,r1,INT_FRAME_SIZE
569 .globl sigreturn_exit
571 subi r1,r3,STACK_FRAME_OVERHEAD
572 rlwinm r12,r1,0,0,18 /* current_thread_info() */
574 andi. r0,r9,_TIF_SYSCALL_TRACE
575 bnel- do_syscall_trace
578 .globl ret_from_except_full
579 ret_from_except_full:
583 .globl ret_from_except
585 /* Hard-disable interrupts so that current_thread_info()->flags
586 * can't change between when we test it and when we return
587 * from the interrupt. */
588 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
589 SYNC /* Some chip revs have problems here... */
590 MTMSRD(r10) /* disable interrupts */
592 lwz r3,_MSR(r1) /* Returning to user mode? */
596 user_exc_return: /* r10 contains MSR_KERNEL here */
597 /* Check current_thread_info()->flags */
600 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
604 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
605 /* Check whether this process has its own DBCR0 value. The single
606 step bit tells us that dbcr0 should be loaded. */
607 lwz r0,THREAD+THREAD_DBCR0(r2)
608 andis. r10,r0,DBCR0_IC@h
612 #ifdef CONFIG_PREEMPT
615 /* N.B. the only way to get here is from the beq following ret_from_except. */
617 /* check current_thread_info->preempt_count */
619 lwz r0,TI_PREEMPT(r9)
620 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
623 andi. r0,r0,_TIF_NEED_RESCHED
625 andi. r0,r3,MSR_EE /* interrupts off? */
626 beq restore /* don't schedule if so */
627 1: bl preempt_schedule_irq
630 andi. r0,r3,_TIF_NEED_RESCHED
634 #endif /* CONFIG_PREEMPT */
636 /* interrupts are hard-disabled at this point */
649 stwcx. r0,0,r1 /* to clear the reservation */
651 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
653 andi. r10,r9,MSR_RI /* check if this exception occurred */
654 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
662 * Once we put values in SRR0 and SRR1, we are in a state
663 * where exceptions are not recoverable, since taking an
664 * exception will trash SRR0 and SRR1. Therefore we clear the
665 * MSR:RI bit to indicate this. If we do take an exception,
666 * we can't return to the point of the exception but we
667 * can restart the exception exit path at the label
668 * exc_exit_restart below. -- paulus
670 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
672 MTMSRD(r10) /* clear the RI bit */
673 .globl exc_exit_restart
682 .globl exc_exit_restart_end
683 exc_exit_restart_end:
687 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
689 * This is a bit different on 4xx/Book-E because it doesn't have
690 * the RI bit in the MSR.
691 * The TLB miss handler checks if we have interrupted
692 * the exception exit path and restarts it if so
693 * (well maybe one day it will... :).
700 .globl exc_exit_restart
709 .globl exc_exit_restart_end
710 exc_exit_restart_end:
713 b . /* prevent prefetch past rfi */
716 * Returning from a critical interrupt in user mode doesn't need
717 * to be any different from a normal exception. For a critical
718 * interrupt in the kernel, we just return (without checking for
719 * preemption) since the interrupt may have happened at some crucial
720 * place (e.g. inside the TLB miss handler), and because we will be
721 * running with r1 pointing into critical_stack, not the current
722 * process's kernel stack (and therefore current_thread_info() will
723 * give the wrong answer).
724 * We have to restore various SPRs that may have been in use at the
725 * time of the critical interrupt.
727 * Note that SPRG6 is used for machine check on CONFIG_BOOKE parts and
728 * thus not saved in the critical handler
730 .globl ret_from_crit_exc
735 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
749 stwcx. r0,0,r1 /* to clear the reservation */
756 /* avoid any possible TLB misses here by turning off MSR.DR, we
757 * assume the instructions here are mapped by a pinned TLB entry */
775 lwz r10,crit_sprg0@l(COR)
777 lwz r10,crit_sprg1@l(COR)
779 lwz r10,crit_sprg4@l(COR)
781 lwz r10,crit_sprg5@l(COR)
784 lwz r10,crit_sprg6@l(COR)
787 lwz r10,crit_sprg7@l(COR)
789 lwz r10,crit_srr0@l(COR)
791 lwz r10,crit_srr1@l(COR)
793 lwz r10,crit_pid@l(COR)
801 b . /* prevent prefetch past rfci */
805 * Return from a machine check interrupt, similar to a critical
808 .globl ret_from_mcheck_exc
813 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
826 stwcx. r0,0,r1 /* to clear the reservation */
843 lis r8,mcheck_save@ha
844 lwz r10,mcheck_sprg0@l(r8)
846 lwz r10,mcheck_sprg1@l(r8)
848 lwz r10,mcheck_sprg4@l(r8)
850 lwz r10,mcheck_sprg5@l(r8)
852 lwz r10,mcheck_sprg7@l(r8)
854 lwz r10,mcheck_srr0@l(r8)
856 lwz r10,mcheck_srr1@l(r8)
858 lwz r10,mcheck_csrr0@l(r8)
860 lwz r10,mcheck_csrr1@l(r8)
862 lwz r10,mcheck_pid@l(r8)
869 #endif /* CONFIG_BOOKE */
872 * Load the DBCR0 value for a task that is being ptraced,
873 * having first saved away the global DBCR0. Note that r0
874 * has the dbcr0 value to set upon entry to this.
877 mfmsr r10 /* first disable debug exceptions */
878 rlwinm r10,r10,0,~MSR_DE
882 lis r11,global_dbcr0@ha
883 addi r11,r11,global_dbcr0@l
890 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
894 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
896 do_work: /* r10 contains MSR_KERNEL here */
897 andi. r0,r9,_TIF_NEED_RESCHED
900 do_resched: /* r10 contains MSR_KERNEL here */
903 MTMSRD(r10) /* hard-enable interrupts */
906 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
908 MTMSRD(r10) /* disable interrupts */
911 andi. r0,r9,_TIF_NEED_RESCHED
913 andi. r0,r9,_TIF_SIGPENDING
915 do_user_signal: /* r10 contains MSR_KERNEL here */
918 MTMSRD(r10) /* hard-enable interrupts */
919 /* save r13-r31 in the exception frame, if not already done */
927 addi r4,r1,STACK_FRAME_OVERHEAD
933 * We come here when we are at the end of handling an exception
934 * that occurred at a place where taking an exception will lose
935 * state information, such as the contents of SRR0 and SRR1.
938 lis r10,exc_exit_restart_end@ha
939 addi r10,r10,exc_exit_restart_end@l
942 lis r11,exc_exit_restart@ha
943 addi r11,r11,exc_exit_restart@l
946 lis r10,ee_restarts@ha
947 lwz r12,ee_restarts@l(r10)
949 stw r12,ee_restarts@l(r10)
950 mr r12,r11 /* restart at exc_exit_restart */
952 3: /* OK, we can't recover, kill this process */
953 /* but the 601 doesn't implement the RI bit, so assume it's OK */
956 END_FTR_SECTION_IFSET(CPU_FTR_601)
963 4: addi r3,r1,STACK_FRAME_OVERHEAD
964 bl nonrecoverable_exception
965 /* shouldn't return */
971 * PROM code for specific machines follows. Put it
972 * here so it's easy to add arch-specific sections later.
977 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
978 * called with the MMU off.
981 stwu r1,-INT_FRAME_SIZE(r1)
983 stw r0,INT_FRAME_SIZE+4(r1)
985 lwz r4,rtas_data@l(r4)
986 lis r6,1f@ha /* physical return address for rtas */
991 lwz r8,rtas_entry@l(r8)
994 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
995 SYNC /* disable interrupts so SRR0/1 */
996 MTMSRD(r0) /* don't get trashed */
997 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1005 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1006 lwz r9,8(r9) /* original msr value */
1008 addi r1,r1,INT_FRAME_SIZE
1013 RFI /* return to caller */
1015 .globl machine_check_in_rtas
1016 machine_check_in_rtas:
1018 /* XXX load up BATs and panic */
1020 #endif /* CONFIG_PPC_OF */