3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
36 #undef SHOW_SYSCALLS_TASK
39 * MSR_KERNEL is > 0x10000 on 4xx since it include MSR_CE.
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
49 #define BOOKE_LOAD_COR lis COR,crit_save@ha
50 #define BOOKE_REST_COR mfspr COR,SPRG2
51 #define BOOKE_SAVE_COR mtspr SPRG2,COR
54 #define BOOKE_LOAD_COR
55 #define BOOKE_REST_COR
56 #define BOOKE_SAVE_COR
60 .globl mcheck_transfer_to_handler
61 mcheck_transfer_to_handler:
64 lwz r0,mcheck_r10@l(r8)
66 lwz r0,mcheck_r11@l(r8)
69 b transfer_to_handler_full
72 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
73 .globl crit_transfer_to_handler
74 crit_transfer_to_handler:
77 lwz r0,crit_r10@l(COR)
79 lwz r0,crit_r11@l(COR)
86 * This code finishes saving the registers to the exception frame
87 * and jumps to the appropriate handler for the exception, turning
88 * on address translation.
89 * Note that we rely on the caller having set cr0.eq iff the exception
90 * occurred in kernel mode (i.e. MSR:PR = 0).
92 .globl transfer_to_handler_full
93 transfer_to_handler_full:
97 .globl transfer_to_handler
109 tovirt(r2,r2) /* set r2 to current */
110 beq 2f /* if from user, fix up THREAD.regs */
111 addi r11,r1,STACK_FRAME_OVERHEAD
113 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
114 lwz r12,PTRACE-THREAD(r12)
115 andi. r12,r12,PT_PTRACED
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
120 lis r11,global_dbcr0@ha
122 addi r11,r11,global_dbcr0@l
130 2: /* if from kernel, check interrupted DOZE/NAP mode and
131 * check for stack overflow
137 bt- 8,power_save_6xx_restore /* Check DOZE */
138 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140 bt- 9,power_save_6xx_restore /* Check NAP */
141 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
142 #endif /* CONFIG_6xx */
143 .globl transfer_to_handler_cont
144 transfer_to_handler_cont:
145 lwz r11,THREAD_INFO-THREAD(r12)
146 cmplw r1,r11 /* if r1 <= current->thread_info */
147 ble- stack_ovf /* then the kernel stack overflowed */
150 lwz r11,0(r9) /* virtual address of handler */
151 lwz r9,4(r9) /* where to go when done */
157 RFI /* jump to handler, enable MMU */
160 * On kernel stack overflow, load up an initial stack pointer
161 * and call StackOverflow(regs), which should not return.
164 /* sometimes we use a statically-allocated stack, which is OK. */
168 ble 3b /* r1 <= &_end is OK */
170 addi r3,r1,STACK_FRAME_OVERHEAD
171 lis r1,init_thread_union@ha
172 addi r1,r1,init_thread_union@l
173 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
174 lis r9,StackOverflow@ha
175 addi r9,r9,StackOverflow@l
176 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
184 * Handle a system call.
186 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
187 .stabs "entry.S",N_SO,0,0,0f
191 stw r0,THREAD+LAST_SYSCALL(r2)
195 lwz r11,_CCR(r1) /* Clear SO bit in CR */
200 #endif /* SHOW_SYSCALLS */
201 rlwinm r10,r1,0,0,18 /* current_thread_info() */
202 lwz r11,TI_LOCAL_FLAGS(r10)
203 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
204 stw r11,TI_LOCAL_FLAGS(r10)
205 lwz r11,TI_FLAGS(r10)
206 andi. r11,r11,_TIF_SYSCALL_TRACE
208 syscall_dotrace_cont:
209 cmpli 0,r0,NR_syscalls
210 lis r10,sys_call_table@h
211 ori r10,r10,sys_call_table@l
214 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
216 addi r9,r1,STACK_FRAME_OVERHEAD
217 blrl /* Call handler */
218 .globl ret_from_syscall
221 bl do_show_syscall_exit
226 rlwinm r12,r1,0,0,18 /* current_thread_info() */
228 lwz r11,TI_LOCAL_FLAGS(r12)
229 andi. r11,r11,_TIFL_FORCE_NOERROR
232 lwz r10,_CCR(r1) /* Set SO bit in CR */
236 /* disable interrupts so current_thread_info()->flags can't change */
237 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
241 andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
242 bne- syscall_exit_work
245 /* If the process has its own DBCR0 value, load it up */
247 andi. r0,r0,PT_PTRACED
250 stwcx. r0,0,r1 /* to clear the reservation */
275 /* Traced system call support */
281 lwz r0,GPR0(r1) /* Restore original registers */
289 b syscall_dotrace_cont
292 stw r6,RESULT(r1) /* Save result */
293 stw r3,GPR3(r1) /* Update return value */
294 andi. r0,r9,_TIF_SYSCALL_TRACE
298 MTMSRD(r10) /* re-enable interrupts */
310 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
312 MTMSRD(r10) /* disable interrupts again */
313 rlwinm r12,r1,0,0,18 /* current_thread_info() */
316 andi. r0,r9,_TIF_NEED_RESCHED
320 beq syscall_exit_cont
321 andi. r0,r9,_TIF_SIGPENDING
322 beq syscall_exit_cont
327 MTMSRD(r10) /* re-enable interrupts */
333 #ifdef SHOW_SYSCALLS_TASK
334 lis r11,show_syscalls_task@ha
335 lwz r11,show_syscalls_task@l(r11)
366 do_show_syscall_exit:
367 #ifdef SHOW_SYSCALLS_TASK
368 lis r11,show_syscalls_task@ha
369 lwz r11,show_syscalls_task@l(r11)
375 stw r3,RESULT(r1) /* Save result */
385 7: .string "syscall %d(%x, %x, %x, %x, %x, "
386 77: .string "%x), current=%p\n"
387 79: .string " -> %x\n"
390 #ifdef SHOW_SYSCALLS_TASK
392 .globl show_syscalls_task
397 #endif /* SHOW_SYSCALLS */
400 * The sigsuspend and rt_sigsuspend system calls can call do_signal
401 * and thus put the process into the stopped state where we might
402 * want to examine its user state with ptrace. Therefore we need
403 * to save all the nonvolatile registers (r13 - r31) before calling
406 .globl ppc_sigsuspend
410 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
411 stw r0,TRAP(r1) /* register set saved */
414 .globl ppc_rt_sigsuspend
426 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
427 stw r0,TRAP(r1) /* register set saved */
434 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
435 stw r0,TRAP(r1) /* register set saved */
442 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
443 stw r0,TRAP(r1) /* register set saved */
446 .globl ppc_swapcontext
450 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
451 stw r0,TRAP(r1) /* register set saved */
455 * Top-level page fault handling.
456 * This is in assembler because if do_page_fault tells us that
457 * it is a bad kernel page fault, we want to save the non-volatile
458 * registers before calling bad_page_fault.
460 .globl handle_page_fault
463 addi r3,r1,STACK_FRAME_OVERHEAD
472 addi r3,r1,STACK_FRAME_OVERHEAD
475 b ret_from_except_full
478 * This routine switches between two different tasks. The process
479 * state of one is saved on its kernel stack. Then the state
480 * of the other is restored from its kernel stack. The memory
481 * management hardware is updated to the second process's state.
482 * Finally, we can return to the second process.
483 * On entry, r3 points to the THREAD for the current task, r4
484 * points to the THREAD for the new task.
486 * This routine is always called with interrupts disabled.
488 * Note: there are two ways to get to the "going out" portion
489 * of this code; either by coming in via the entry (_switch)
490 * or via "fork" which must set up an environment equivalent
491 * to the "_switch" path. If you change this , you'll have to
492 * change the fork code also.
494 * The code which creates the new task context is in 'copy_thread'
495 * in arch/ppc/kernel/process.c
498 stwu r1,-INT_FRAME_SIZE(r1)
500 stw r0,INT_FRAME_SIZE+4(r1)
501 /* r3-r12 are caller saved -- Cort */
503 stw r0,_NIP(r1) /* Return to switch caller */
505 li r0,MSR_FP /* Disable floating-point */
506 #ifdef CONFIG_ALTIVEC
508 oris r0,r0,MSR_VEC@h /* Disable altivec */
509 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
510 stw r12,THREAD+THREAD_VRSAVE(r2)
511 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
512 #endif /* CONFIG_ALTIVEC */
513 and. r0,r0,r11 /* FP or altivec enabled? */
521 stw r1,KSP(r3) /* Set old stack pointer */
524 /* We need a sync somewhere here to make sure that if the
525 * previous task gets rescheduled on another CPU, it sees all
526 * stores it has performed on this one.
529 #endif /* CONFIG_SMP */
533 mtspr SPRG3,r0 /* Update current THREAD phys addr */
534 lwz r1,KSP(r4) /* Load new stack pointer */
536 /* save the old current 'last' for return value */
538 addi r2,r4,-THREAD /* Update current */
540 #ifdef CONFIG_ALTIVEC
542 lwz r0,THREAD+THREAD_VRSAVE(r2)
543 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
544 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
545 #endif /* CONFIG_ALTIVEC */
549 /* r3-r12 are destroyed -- Cort */
552 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
554 addi r1,r1,INT_FRAME_SIZE
557 .globl sigreturn_exit
559 subi r1,r3,STACK_FRAME_OVERHEAD
560 rlwinm r12,r1,0,0,18 /* current_thread_info() */
562 andi. r0,r9,_TIF_SYSCALL_TRACE
563 bnel- do_syscall_trace
566 .globl ret_from_except_full
567 ret_from_except_full:
571 .globl ret_from_except
573 /* Hard-disable interrupts so that current_thread_info()->flags
574 * can't change between when we test it and when we return
575 * from the interrupt. */
576 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
577 SYNC /* Some chip revs have problems here... */
578 MTMSRD(r10) /* disable interrupts */
580 lwz r3,_MSR(r1) /* Returning to user mode? */
584 user_exc_return: /* r10 contains MSR_KERNEL here */
585 /* Check current_thread_info()->flags */
588 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
593 /* Check whether this process has its own DBCR0 value */
595 andi. r0,r0,PT_PTRACED
599 #ifdef CONFIG_PREEMPT
602 /* N.B. the only way to get here is from the beq following ret_from_except. */
604 /* check current_thread_info->preempt_count */
606 lwz r0,TI_PREEMPT(r9)
607 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
610 andi. r0,r0,_TIF_NEED_RESCHED
612 andi. r0,r3,MSR_EE /* interrupts off? */
613 beq restore /* don't schedule if so */
614 1: lis r0,PREEMPT_ACTIVE@h
615 stw r0,TI_PREEMPT(r9)
618 MTMSRD(r10) /* hard-enable interrupts */
620 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
622 MTMSRD(r10) /* disable interrupts */
625 stw r0,TI_PREEMPT(r9)
627 andi. r0,r3,_TIF_NEED_RESCHED
631 #endif /* CONFIG_PREEMPT */
633 /* interrupts are hard-disabled at this point */
646 stwcx. r0,0,r1 /* to clear the reservation */
650 andi. r10,r9,MSR_RI /* check if this exception occurred */
651 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
659 * Once we put values in SRR0 and SRR1, we are in a state
660 * where exceptions are not recoverable, since taking an
661 * exception will trash SRR0 and SRR1. Therefore we clear the
662 * MSR:RI bit to indicate this. If we do take an exception,
663 * we can't return to the point of the exception but we
664 * can restart the exception exit path at the label
665 * exc_exit_restart below. -- paulus
667 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
669 MTMSRD(r10) /* clear the RI bit */
670 .globl exc_exit_restart
679 .globl exc_exit_restart_end
680 exc_exit_restart_end:
684 #else /* CONFIG_4xx */
686 * This is a bit different on 4xx because 4xx doesn't have
687 * the RI bit in the MSR.
688 * The TLB miss handler checks if we have interrupted
689 * the exception exit path and restarts it if so
690 * (well maybe one day it will... :).
697 .globl exc_exit_restart
706 .globl exc_exit_restart_end
707 exc_exit_restart_end:
710 b . /* prevent prefetch past rfi */
713 * Returning from a critical interrupt in user mode doesn't need
714 * to be any different from a normal exception. For a critical
715 * interrupt in the kernel, we just return (without checking for
716 * preemption) since the interrupt may have happened at some crucial
717 * place (e.g. inside the TLB miss handler), and because we will be
718 * running with r1 pointing into critical_stack, not the current
719 * process's kernel stack (and therefore current_thread_info() will
720 * give the wrong answer).
721 * We have to restore various SPRs that may have been in use at the
722 * time of the critical interrupt.
724 .globl ret_from_crit_exc
729 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
743 stwcx. r0,0,r1 /* to clear the reservation */
750 /* avoid any possible TLB misses here by turning off MSR.DR, we
751 * assume the instructions here are mapped by a pinned TLB entry */
769 lwz r10,crit_sprg0@l(COR)
771 lwz r10,crit_sprg1@l(COR)
773 lwz r10,crit_sprg4@l(COR)
775 lwz r10,crit_sprg5@l(COR)
778 lwz r10,crit_sprg6@l(COR)
781 lwz r10,crit_sprg7@l(COR)
783 lwz r10,crit_srr0@l(COR)
785 lwz r10,crit_srr1@l(COR)
787 lwz r10,crit_pid@l(COR)
795 b . /* prevent prefetch past rfci */
799 * Return from a machine check interrupt, similar to a critical
802 .globl ret_from_mcheck_exc
807 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
820 stwcx. r0,0,r1 /* to clear the reservation */
837 lis r8,mcheck_save@ha
838 lwz r10,mcheck_sprg0@l(r8)
840 lwz r10,mcheck_sprg1@l(r8)
842 lwz r10,mcheck_sprg4@l(r8)
844 lwz r10,mcheck_sprg5@l(r8)
846 lwz r10,mcheck_sprg7@l(r8)
848 lwz r10,mcheck_srr0@l(r8)
850 lwz r10,mcheck_srr1@l(r8)
852 lwz r10,mcheck_csrr0@l(r8)
854 lwz r10,mcheck_csrr1@l(r8)
856 lwz r10,mcheck_pid@l(r8)
863 #endif /* CONFIG_BOOKE */
866 * Load the DBCR0 value for a task that is being ptraced,
867 * having first saved away the global DBCR0.
870 mfmsr r0 /* first disable debug exceptions */
871 rlwinm r0,r0,0,~MSR_DE
875 lis r11,global_dbcr0@ha
876 addi r11,r11,global_dbcr0@l
877 lwz r0,THREAD+THREAD_DBCR0(r2)
884 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
888 #endif /* CONFIG_4xx */
890 do_work: /* r10 contains MSR_KERNEL here */
891 andi. r0,r9,_TIF_NEED_RESCHED
894 do_resched: /* r10 contains MSR_KERNEL here */
897 MTMSRD(r10) /* hard-enable interrupts */
900 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
902 MTMSRD(r10) /* disable interrupts */
905 andi. r0,r9,_TIF_NEED_RESCHED
907 andi. r0,r9,_TIF_SIGPENDING
909 do_user_signal: /* r10 contains MSR_KERNEL here */
912 MTMSRD(r10) /* hard-enable interrupts */
913 /* save r13-r31 in the exception frame, if not already done */
921 addi r4,r1,STACK_FRAME_OVERHEAD
927 * We come here when we are at the end of handling an exception
928 * that occurred at a place where taking an exception will lose
929 * state information, such as the contents of SRR0 and SRR1.
932 lis r10,exc_exit_restart_end@ha
933 addi r10,r10,exc_exit_restart_end@l
936 lis r11,exc_exit_restart@ha
937 addi r11,r11,exc_exit_restart@l
940 lis r10,ee_restarts@ha
941 lwz r12,ee_restarts@l(r10)
943 stw r12,ee_restarts@l(r10)
944 mr r12,r11 /* restart at exc_exit_restart */
946 3: /* OK, we can't recover, kill this process */
947 /* but the 601 doesn't implement the RI bit, so assume it's OK */
950 END_FTR_SECTION_IFSET(CPU_FTR_601)
957 4: addi r3,r1,STACK_FRAME_OVERHEAD
958 bl nonrecoverable_exception
959 /* shouldn't return */
965 * PROM code for specific machines follows. Put it
966 * here so it's easy to add arch-specific sections later.
971 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
972 * called with the MMU off.
975 stwu r1,-INT_FRAME_SIZE(r1)
977 stw r0,INT_FRAME_SIZE+4(r1)
979 lwz r4,rtas_data@l(r4)
980 lis r6,1f@ha /* physical return address for rtas */
985 lwz r8,rtas_entry@l(r8)
988 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
989 SYNC /* disable interrupts so SRR0/1 */
990 MTMSRD(r0) /* don't get trashed */
991 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
999 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1000 lwz r9,8(r9) /* original msr value */
1002 addi r1,r1,INT_FRAME_SIZE
1007 RFI /* return to caller */
1009 .globl machine_check_in_rtas
1010 machine_check_in_rtas:
1012 /* XXX load up BATs and panic */
1014 #endif /* CONFIG_PPC_OF */