3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
36 #undef SHOW_SYSCALLS_TASK
39 * MSR_KERNEL is > 0x10000 on 4xx since it include MSR_CE.
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
48 .globl crit_transfer_to_handler
49 crit_transfer_to_handler:
58 * This code finishes saving the registers to the exception frame
59 * and jumps to the appropriate handler for the exception, turning
60 * on address translation.
61 * Note that we rely on the caller having set cr0.eq iff the exception
62 * occurred in kernel mode (i.e. MSR:PR = 0).
64 .globl transfer_to_handler_full
65 transfer_to_handler_full:
69 .globl transfer_to_handler
81 tovirt(r2,r2) /* set r2 to current */
82 beq 2f /* if from user, fix up THREAD.regs */
83 addi r11,r1,STACK_FRAME_OVERHEAD
86 lwz r12,PTRACE-THREAD(r12)
87 andi. r12,r12,PT_PTRACED
89 /* From user and task is ptraced - load up global dbcr0 */
90 li r12,-1 /* clear all pending debug events */
92 lis r11,global_dbcr0@ha
94 addi r11,r11,global_dbcr0@l
102 2: /* if from kernel, check interrupted DOZE/NAP mode and
103 * check for stack overflow
109 bt- 8,power_save_6xx_restore /* Check DOZE */
110 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
112 bt- 9,power_save_6xx_restore /* Check NAP */
113 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
114 #endif /* CONFIG_6xx */
115 .globl transfer_to_handler_cont
116 transfer_to_handler_cont:
117 lwz r11,THREAD_INFO-THREAD(r12)
118 cmplw r1,r11 /* if r1 <= current->thread_info */
119 ble- stack_ovf /* then the kernel stack overflowed */
122 lwz r11,0(r9) /* virtual address of handler */
123 lwz r9,4(r9) /* where to go when done */
129 RFI /* jump to handler, enable MMU */
132 * On kernel stack overflow, load up an initial stack pointer
133 * and call StackOverflow(regs), which should not return.
136 /* sometimes we use a statically-allocated stack, which is OK. */
140 ble 3b /* r1 <= &_end is OK */
142 addi r3,r1,STACK_FRAME_OVERHEAD
143 lis r1,init_thread_union@ha
144 addi r1,r1,init_thread_union@l
145 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
146 lis r9,StackOverflow@ha
147 addi r9,r9,StackOverflow@l
148 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
156 * Handle a system call.
158 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
159 .stabs "entry.S",N_SO,0,0,0f
163 stw r0,THREAD+LAST_SYSCALL(r2)
167 lwz r11,_CCR(r1) /* Clear SO bit in CR */
172 #endif /* SHOW_SYSCALLS */
173 rlwinm r10,r1,0,0,18 /* current_thread_info() */
174 lwz r11,TI_LOCAL_FLAGS(r10)
175 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
176 stw r11,TI_LOCAL_FLAGS(r10)
177 lwz r11,TI_FLAGS(r10)
178 andi. r11,r11,_TIF_SYSCALL_TRACE
180 syscall_dotrace_cont:
181 cmpli 0,r0,NR_syscalls
182 lis r10,sys_call_table@h
183 ori r10,r10,sys_call_table@l
186 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
188 addi r9,r1,STACK_FRAME_OVERHEAD
189 blrl /* Call handler */
190 .globl ret_from_syscall
193 bl do_show_syscall_exit
198 rlwinm r12,r1,0,0,18 /* current_thread_info() */
200 lwz r11,TI_LOCAL_FLAGS(r12)
201 andi. r11,r11,_TIFL_FORCE_NOERROR
204 lwz r10,_CCR(r1) /* Set SO bit in CR */
208 /* disable interrupts so current_thread_info()->flags can't change */
209 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
213 andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
214 bne- syscall_exit_work
217 /* If the process has its own DBCR0 value, load it up */
219 andi. r0,r0,PT_PTRACED
222 stwcx. r0,0,r1 /* to clear the reservation */
247 /* Traced system call support */
253 lwz r0,GPR0(r1) /* Restore original registers */
261 b syscall_dotrace_cont
264 stw r6,RESULT(r1) /* Save result */
265 stw r3,GPR3(r1) /* Update return value */
266 andi. r0,r9,_TIF_SYSCALL_TRACE
270 MTMSRD(r10) /* re-enable interrupts */
282 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
284 MTMSRD(r10) /* disable interrupts again */
285 rlwinm r12,r1,0,0,18 /* current_thread_info() */
288 andi. r0,r9,_TIF_NEED_RESCHED
292 beq syscall_exit_cont
293 andi. r0,r9,_TIF_SIGPENDING
294 beq syscall_exit_cont
299 MTMSRD(r10) /* re-enable interrupts */
305 #ifdef SHOW_SYSCALLS_TASK
306 lis r11,show_syscalls_task@ha
307 lwz r11,show_syscalls_task@l(r11)
338 do_show_syscall_exit:
339 #ifdef SHOW_SYSCALLS_TASK
340 lis r11,show_syscalls_task@ha
341 lwz r11,show_syscalls_task@l(r11)
347 stw r3,RESULT(r1) /* Save result */
357 7: .string "syscall %d(%x, %x, %x, %x, %x, "
358 77: .string "%x), current=%p\n"
359 79: .string " -> %x\n"
362 #ifdef SHOW_SYSCALLS_TASK
364 .globl show_syscalls_task
369 #endif /* SHOW_SYSCALLS */
372 * The sigsuspend and rt_sigsuspend system calls can call do_signal
373 * and thus put the process into the stopped state where we might
374 * want to examine its user state with ptrace. Therefore we need
375 * to save all the nonvolatile registers (r13 - r31) before calling
378 .globl ppc_sigsuspend
382 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
383 stw r0,TRAP(r1) /* register set saved */
386 .globl ppc_rt_sigsuspend
398 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
399 stw r0,TRAP(r1) /* register set saved */
406 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
407 stw r0,TRAP(r1) /* register set saved */
414 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
415 stw r0,TRAP(r1) /* register set saved */
418 .globl ppc_swapcontext
422 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
423 stw r0,TRAP(r1) /* register set saved */
427 * This routine switches between two different tasks. The process
428 * state of one is saved on its kernel stack. Then the state
429 * of the other is restored from its kernel stack. The memory
430 * management hardware is updated to the second process's state.
431 * Finally, we can return to the second process.
432 * On entry, r3 points to the THREAD for the current task, r4
433 * points to the THREAD for the new task.
435 * This routine is always called with interrupts disabled.
437 * Note: there are two ways to get to the "going out" portion
438 * of this code; either by coming in via the entry (_switch)
439 * or via "fork" which must set up an environment equivalent
440 * to the "_switch" path. If you change this , you'll have to
441 * change the fork code also.
443 * The code which creates the new task context is in 'copy_thread'
444 * in arch/ppc/kernel/process.c
447 stwu r1,-INT_FRAME_SIZE(r1)
449 stw r0,INT_FRAME_SIZE+4(r1)
450 /* r3-r12 are caller saved -- Cort */
452 stw r0,_NIP(r1) /* Return to switch caller */
454 li r0,MSR_FP /* Disable floating-point */
455 #ifdef CONFIG_ALTIVEC
457 oris r0,r0,MSR_VEC@h /* Disable altivec */
458 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
459 stw r12,THREAD+THREAD_VRSAVE(r2)
460 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
461 #endif /* CONFIG_ALTIVEC */
462 and. r0,r0,r11 /* FP or altivec enabled? */
470 stw r1,KSP(r3) /* Set old stack pointer */
473 /* We need a sync somewhere here to make sure that if the
474 * previous task gets rescheduled on another CPU, it sees all
475 * stores it has performed on this one.
478 #endif /* CONFIG_SMP */
482 mtspr SPRG3,r0 /* Update current THREAD phys addr */
483 lwz r1,KSP(r4) /* Load new stack pointer */
485 /* save the old current 'last' for return value */
487 addi r2,r4,-THREAD /* Update current */
489 #ifdef CONFIG_ALTIVEC
491 lwz r0,THREAD+THREAD_VRSAVE(r2)
492 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
493 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
494 #endif /* CONFIG_ALTIVEC */
498 /* r3-r12 are destroyed -- Cort */
501 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
503 addi r1,r1,INT_FRAME_SIZE
506 .globl sigreturn_exit
508 subi r1,r3,STACK_FRAME_OVERHEAD
509 rlwinm r12,r1,0,0,18 /* current_thread_info() */
511 andi. r0,r9,_TIF_SYSCALL_TRACE
512 bnel- do_syscall_trace
515 .globl ret_from_except_full
516 ret_from_except_full:
520 .globl ret_from_except
522 /* Hard-disable interrupts so that current_thread_info()->flags
523 * can't change between when we test it and when we return
524 * from the interrupt. */
525 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
526 SYNC /* Some chip revs have problems here... */
527 MTMSRD(r10) /* disable interrupts */
529 lwz r3,_MSR(r1) /* Returning to user mode? */
533 user_exc_return: /* r10 contains MSR_KERNEL here */
534 /* Check current_thread_info()->flags */
537 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
542 /* Check whether this process has its own DBCR0 value */
544 andi. r0,r0,PT_PTRACED
548 #ifdef CONFIG_PREEMPT
551 /* N.B. the only way to get here is from the beq following ret_from_except. */
553 /* check current_thread_info->preempt_count */
555 lwz r0,TI_PREEMPT(r9)
556 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
559 andi. r0,r0,_TIF_NEED_RESCHED
561 andi. r0,r3,MSR_EE /* interrupts off? */
562 beq restore /* don't schedule if so */
563 1: lis r0,PREEMPT_ACTIVE@h
564 stw r0,TI_PREEMPT(r9)
567 MTMSRD(r10) /* hard-enable interrupts */
569 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
571 MTMSRD(r10) /* disable interrupts */
574 stw r0,TI_PREEMPT(r9)
576 andi. r0,r3,_TIF_NEED_RESCHED
580 #endif /* CONFIG_PREEMPT */
582 /* interrupts are hard-disabled at this point */
595 stwcx. r0,0,r1 /* to clear the reservation */
599 andi. r10,r9,MSR_RI /* check if this exception occurred */
600 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
608 * Once we put values in SRR0 and SRR1, we are in a state
609 * where exceptions are not recoverable, since taking an
610 * exception will trash SRR0 and SRR1. Therefore we clear the
611 * MSR:RI bit to indicate this. If we do take an exception,
612 * we can't return to the point of the exception but we
613 * can restart the exception exit path at the label
614 * exc_exit_restart below. -- paulus
616 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
618 MTMSRD(r10) /* clear the RI bit */
619 .globl exc_exit_restart
628 .globl exc_exit_restart_end
629 exc_exit_restart_end:
633 #else /* CONFIG_4xx */
635 * This is a bit different on 4xx because 4xx doesn't have
636 * the RI bit in the MSR.
637 * The TLB miss handler checks if we have interrupted
638 * the exception exit path and restarts it if so
639 * (well maybe one day it will... :).
646 .globl exc_exit_restart
655 .globl exc_exit_restart_end
656 exc_exit_restart_end:
659 b . /* prevent prefetch past rfi */
662 * Returning from a critical interrupt in user mode doesn't need
663 * to be any different from a normal exception. For a critical
664 * interrupt in the kernel, we just return (without checking for
665 * preemption) since the interrupt may have happened at some crucial
666 * place (e.g. inside the TLB miss handler), and because we will be
667 * running with r1 pointing into critical_stack, not the current
668 * process's kernel stack (and therefore current_thread_info() will
669 * give the wrong answer).
670 * We have to restore various SPRs that may have been in use at the
671 * time of the critical interrupt.
673 .globl ret_from_crit_exc
678 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
692 stwcx. r0,0,r1 /* to clear the reservation */
698 /* avoid any possible TLB misses here by turning off MSR.DR, we
699 * assume the instructions here are mapped by a pinned TLB entry */
714 lwz r10,crit_sprg0@l(0)
716 lwz r10,crit_sprg1@l(0)
718 lwz r10,crit_sprg4@l(0)
720 lwz r10,crit_sprg5@l(0)
722 lwz r10,crit_sprg6@l(0)
724 lwz r10,crit_sprg7@l(0)
726 lwz r10,crit_srr0@l(0)
728 lwz r10,crit_srr1@l(0)
730 lwz r10,crit_pid@l(0)
737 b . /* prevent prefetch past rfci */
740 * Load the DBCR0 value for a task that is being ptraced,
741 * having first saved away the global DBCR0.
744 mfmsr r0 /* first disable debug exceptions */
745 rlwinm r0,r0,0,~MSR_DE
749 lis r11,global_dbcr0@ha
750 addi r11,r11,global_dbcr0@l
751 lwz r0,THREAD+THREAD_DBCR0(r2)
758 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
762 #endif /* CONFIG_4xx */
764 do_work: /* r10 contains MSR_KERNEL here */
765 andi. r0,r9,_TIF_NEED_RESCHED
768 do_resched: /* r10 contains MSR_KERNEL here */
771 MTMSRD(r10) /* hard-enable interrupts */
774 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
776 MTMSRD(r10) /* disable interrupts */
779 andi. r0,r9,_TIF_NEED_RESCHED
781 andi. r0,r9,_TIF_SIGPENDING
783 do_user_signal: /* r10 contains MSR_KERNEL here */
786 MTMSRD(r10) /* hard-enable interrupts */
787 /* save r13-r31 in the exception frame, if not already done */
795 addi r4,r1,STACK_FRAME_OVERHEAD
801 * We come here when we are at the end of handling an exception
802 * that occurred at a place where taking an exception will lose
803 * state information, such as the contents of SRR0 and SRR1.
806 lis r10,exc_exit_restart_end@ha
807 addi r10,r10,exc_exit_restart_end@l
810 lis r11,exc_exit_restart@ha
811 addi r11,r11,exc_exit_restart@l
814 lis r10,ee_restarts@ha
815 lwz r12,ee_restarts@l(r10)
817 stw r12,ee_restarts@l(r10)
818 mr r12,r11 /* restart at exc_exit_restart */
820 3: /* OK, we can't recover, kill this process */
821 /* but the 601 doesn't implement the RI bit, so assume it's OK */
824 END_FTR_SECTION_IFSET(CPU_FTR_601)
831 4: addi r3,r1,STACK_FRAME_OVERHEAD
832 bl nonrecoverable_exception
833 /* shouldn't return */
839 * PROM code for specific machines follows. Put it
840 * here so it's easy to add arch-specific sections later.
845 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
846 * called with the MMU off.
849 stwu r1,-INT_FRAME_SIZE(r1)
851 stw r0,INT_FRAME_SIZE+4(r1)
853 lwz r4,rtas_data@l(r4)
854 lis r6,1f@ha /* physical return address for rtas */
859 lwz r8,rtas_entry@l(r8)
862 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
863 SYNC /* disable interrupts so SRR0/1 */
864 MTMSRD(r0) /* don't get trashed */
865 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
873 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
874 lwz r9,8(r9) /* original msr value */
876 addi r1,r1,INT_FRAME_SIZE
881 RFI /* return to caller */
883 .globl machine_check_in_rtas
884 machine_check_in_rtas:
886 /* XXX load up BATs and panic */
888 #endif /* CONFIG_PPC_OF */