2 * arch/ppc64/kernel/head.S
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #define SECONDARY_PROCESSORS
28 #include <linux/config.h>
29 #include <asm/processor.h>
33 #include <asm/systemcfg.h>
34 #include <asm/ppc_asm.h>
35 #include <asm/offsets.h>
37 #include <asm/cputable.h>
38 #include <asm/setup.h>
40 #ifdef CONFIG_PPC_ISERIES
41 #define DO_SOFT_DISABLE
45 * hcall interface to pSeries LPAR
47 #define HVSC .long 0x44000022
48 #define H_SET_ASR 0x30
51 * We layout physical memory as follows:
52 * 0x0000 - 0x00ff : Secondary processor spin code
53 * 0x0100 - 0x2fff : pSeries Interrupt prologs
54 * 0x3000 - 0x3fff : Interrupt support
55 * 0x4000 - 0x4fff : NACA
56 * 0x5000 - 0x5fff : SystemCfg
57 * 0x6000 : iSeries and common interrupt prologs
58 * 0x9000 - 0x9fff : Initial segment table
66 * SPRG0 reserved for hypervisor
67 * SPRG1 temp - used to save gpr
68 * SPRG2 temp - used to save gpr
69 * SPRG3 virt addr of paca
73 * Entering into this code we make the following assumptions:
75 * 1. The MMU is off & open firmware is running in real mode.
76 * 2. The kernel is entered at __start
79 * 1. The MMU is on (as it always is for iSeries)
80 * 2. The kernel is entered at SystemReset_Iseries
86 #ifdef CONFIG_PPC_PSERIES
88 /* NOP this out unconditionally */
90 b .__start_initialization_pSeries
93 /* Catch branch to 0 in real mode */
95 #ifdef CONFIG_PPC_ISERIES
97 * At offset 0x20, there is a pointer to iSeries LPAR data.
98 * This is required by the hypervisor
101 .llong hvReleaseData-KERNELBASE
104 * At offset 0x28 and 0x30 are offsets to the msChunks
105 * array (used by the iSeries LPAR debugger to do translation
106 * between physical addresses and absolute addresses) and
107 * to the pidhash table (also used by the debugger)
109 .llong msChunks-KERNELBASE
110 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112 /* Offset 0x38 - Pointer to start of embedded System.map */
113 .globl embedded_sysmap_start
114 embedded_sysmap_start:
116 /* Offset 0x40 - Pointer to end of embedded System.map */
117 .globl embedded_sysmap_end
122 /* Secondary processors spin on this value until it goes to 1. */
123 .globl __secondary_hold_spinloop
124 __secondary_hold_spinloop:
127 /* Secondary processors write this value with their cpu # */
128 /* after they enter the spin loop immediately below. */
129 .globl __secondary_hold_acknowledge
130 __secondary_hold_acknowledge:
135 * The following code is used on pSeries to hold secondary processors
136 * in a spin loop after they have been freed from OpenFirmware, but
137 * before the bulk of the kernel has been relocated. This code
138 * is relocated to physical address 0x60 before prom_init is run.
139 * All of it must fit below the first exception vector at 0x100.
141 _GLOBAL(__secondary_hold)
144 mtmsrd r24 /* RI on */
146 /* Grab our linux cpu number */
149 /* Tell the master cpu we're here */
150 /* Relocation is off & we are located at an address less */
151 /* than 0x100, so only need to grab low order offset. */
152 std r24,__secondary_hold_acknowledge@l(0)
155 /* All secondary cpu's wait here until told to start. */
156 100: ld r4,__secondary_hold_spinloop@l(0)
165 b .pseries_secondary_smp_init
172 /* This value is used to mark exception frames on the stack. */
175 .tc ID_72656773_68657265[TC],0x7265677368657265
179 * The following macros define the code that appears as
180 * the prologue to each of the exception handlers. They
181 * are split into two parts to allow a single kernel binary
182 * to be used for pSeries and iSeries.
183 * LOL. One day... - paulus
187 * We make as much of the exception code common between native
188 * exception handlers (including pSeries LPAR) and iSeries LPAR
189 * implementations as possible.
193 * This is the start of the interrupt handlers for pSeries
194 * This code runs with relocation off.
206 #define EXCEPTION_PROLOG_PSERIES(area, label) \
207 mfspr r13,SPRG3; /* get paca address into r13 */ \
208 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
209 std r10,area+EX_R10(r13); \
210 std r11,area+EX_R11(r13); \
211 std r12,area+EX_R12(r13); \
213 std r9,area+EX_R13(r13); \
215 clrrdi r12,r13,32; /* get high part of &label */ \
217 mfspr r11,SRR0; /* save SRR0 */ \
218 ori r12,r12,(label)@l; /* virt addr of handler */ \
219 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
221 mfspr r12,SRR1; /* and SRR1 */ \
226 * This is the start of the interrupt handlers for iSeries
227 * This code runs with relocation on.
229 #define EXCEPTION_PROLOG_ISERIES_1(area) \
230 mfspr r13,SPRG3; /* get paca address into r13 */ \
231 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
232 std r10,area+EX_R10(r13); \
233 std r11,area+EX_R11(r13); \
234 std r12,area+EX_R12(r13); \
236 std r9,area+EX_R13(r13); \
239 #define EXCEPTION_PROLOG_ISERIES_2 \
241 ld r11,PACALPPACA+LPPACASRR0(r13); \
242 ld r12,PACALPPACA+LPPACASRR1(r13); \
243 ori r10,r10,MSR_RI; \
247 * The common exception prolog is used for all except a few exceptions
248 * such as a segment miss on a kernel address. We have to be prepared
249 * to take another exception from the point where we first touch the
250 * kernel stack onwards.
252 * On entry r13 points to the paca, r9-r13 are saved in the paca,
253 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
254 * SRR1, and relocation is on.
256 #define EXCEPTION_PROLOG_COMMON(n, area) \
257 andi. r10,r12,MSR_PR; /* See if coming from user */ \
258 mr r10,r1; /* Save r1 */ \
259 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
261 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
262 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
263 bge- cr1,bad_stack; /* abort if it is */ \
264 std r9,_CCR(r1); /* save CR in stackframe */ \
265 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
266 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
267 std r10,0(r1); /* make stack chain pointer */ \
268 std r0,GPR0(r1); /* save r0 in stackframe */ \
269 std r10,GPR1(r1); /* save r1 in stackframe */ \
270 std r2,GPR2(r1); /* save r2 in stackframe */ \
271 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
272 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
273 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
274 ld r10,area+EX_R10(r13); \
277 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
278 ld r10,area+EX_R12(r13); \
279 ld r11,area+EX_R13(r13); \
283 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
284 mflr r9; /* save LR in stackframe */ \
286 mfctr r10; /* save CTR in stackframe */ \
288 mfspr r11,XER; /* save XER in stackframe */ \
291 std r9,_TRAP(r1); /* set trap number */ \
293 ld r11,exception_marker@toc(r2); \
294 std r10,RESULT(r1); /* clear regs->result */ \
295 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
300 #define STD_EXCEPTION_PSERIES(n, label) \
302 .globl label##_Pseries; \
304 mtspr SPRG1,r13; /* save r13 */ \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
307 #define STD_EXCEPTION_ISERIES(n, label, area) \
308 .globl label##_Iseries; \
310 mtspr SPRG1,r13; /* save r13 */ \
311 EXCEPTION_PROLOG_ISERIES_1(area); \
312 EXCEPTION_PROLOG_ISERIES_2; \
315 #define MASKABLE_EXCEPTION_ISERIES(n, label) \
316 .globl label##_Iseries; \
318 mtspr SPRG1,r13; /* save r13 */ \
319 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
320 lbz r10,PACAPROFENABLED(r13); \
322 bne- label##_Iseries_profile; \
323 label##_Iseries_prof_ret: \
324 lbz r10,PACAPROCENABLED(r13); \
326 beq- label##_Iseries_masked; \
327 EXCEPTION_PROLOG_ISERIES_2; \
329 label##_Iseries_profile: \
330 ld r12,PACALPPACA+LPPACASRR1(r13); \
331 andi. r12,r12,MSR_PR; /* Test if in kernel */ \
332 bne label##_Iseries_prof_ret; \
333 ld r11,PACALPPACA+LPPACASRR0(r13); \
334 ld r12,PACAPROFSTEXT(r13); /* _stext */ \
335 subf r11,r12,r11; /* offset into kernel */ \
336 lwz r12,PACAPROFSHIFT(r13); \
338 lwz r12,PACAPROFLEN(r13); /* profile table length - 1 */ \
339 cmpd r11,r12; /* off end? */ \
341 mr r11,r12; /* force into last entry */ \
342 1: sldi r11,r11,2; /* convert to offset */ \
343 ld r12,PACAPROFBUFFER(r13);/* profile buffer */ \
345 2: lwarx r11,0,r12; /* atomically increment */ \
349 b label##_Iseries_prof_ret
351 #ifdef DO_SOFT_DISABLE
352 #define DISABLE_INTS \
353 lbz r10,PACAPROCENABLED(r13); \
357 stb r11,PACAPROCENABLED(r13); \
358 ori r10,r10,MSR_EE; \
361 #define ENABLE_INTS \
362 lbz r10,PACAPROCENABLED(r13); \
365 ori r11,r11,MSR_EE; \
368 #else /* hard enable/disable interrupts */
371 #define ENABLE_INTS \
374 rlwimi r11,r12,0,MSR_EE; \
379 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
381 .globl label##_common; \
383 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
386 addi r3,r1,STACK_FRAME_OVERHEAD; \
390 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
392 .globl label##_common; \
394 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
396 addi r3,r1,STACK_FRAME_OVERHEAD; \
398 b .ret_from_except_lite
401 * Start of pSeries system interrupt routines
404 .globl __start_interrupts
407 STD_EXCEPTION_PSERIES(0x100, SystemReset)
410 _MachineCheckPseries:
411 mtspr SPRG1,r13 /* save r13 */
412 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
415 .globl DataAccess_Pseries
423 rlwimi r13,r12,16,0x20
426 beq .do_stab_bolted_Pseries
429 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
430 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccess_common)
433 .globl DataAccessSLB_Pseries
434 DataAccessSLB_Pseries:
441 beq .do_slb_bolted_Pseries
444 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccessSLB_common)
446 STD_EXCEPTION_PSERIES(0x400, InstructionAccess)
447 STD_EXCEPTION_PSERIES(0x480, InstructionAccessSLB)
448 STD_EXCEPTION_PSERIES(0x500, HardwareInterrupt)
449 STD_EXCEPTION_PSERIES(0x600, Alignment)
450 STD_EXCEPTION_PSERIES(0x700, ProgramCheck)
451 STD_EXCEPTION_PSERIES(0x800, FPUnavailable)
452 STD_EXCEPTION_PSERIES(0x900, Decrementer)
453 STD_EXCEPTION_PSERIES(0xa00, Trap_0a)
454 STD_EXCEPTION_PSERIES(0xb00, Trap_0b)
457 .globl SystemCall_Pseries
464 oris r12,r12,SystemCall_common@h
465 ori r12,r12,SystemCall_common@l
467 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
472 STD_EXCEPTION_PSERIES(0xd00, SingleStep)
473 STD_EXCEPTION_PSERIES(0xe00, Trap_0e)
475 /* We need to deal with the Altivec unavailable exception
476 * here which is at 0xf20, thus in the middle of the
477 * prolog code of the PerformanceMonitor one. A little
478 * trickery is thus necessary
481 b PerformanceMonitor_Pseries
483 STD_EXCEPTION_PSERIES(0xf20, AltivecUnavailable)
485 STD_EXCEPTION_PSERIES(0x1300, InstructionBreakpoint)
486 STD_EXCEPTION_PSERIES(0x1700, AltivecAssist)
488 /* moved from 0xf00 */
489 STD_EXCEPTION_PSERIES(0x3000, PerformanceMonitor)
492 _GLOBAL(do_stab_bolted_Pseries)
495 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
497 _GLOBAL(do_slb_bolted_Pseries)
500 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_slb_bolted)
503 /* Space for the naca. Architected to be located at real address
504 * NACA_PHYS_ADDR. Various tools rely on this location being fixed.
505 * The first dword of the naca is required by iSeries LPAR to
506 * point to itVpdAreas. On pSeries native, this value is not used.
509 .globl __end_interrupts
513 #ifdef CONFIG_PPC_ISERIES
522 . = SYSTEMCFG_PHYS_ADDR
524 .globl __start_systemcfg
527 . = (SYSTEMCFG_PHYS_ADDR + PAGE_SIZE)
528 .globl __end_systemcfg
531 #ifdef CONFIG_PPC_ISERIES
533 * The iSeries LPAR map is at this fixed address
534 * so that the HvReleaseData structure can address
535 * it with a 32-bit offset.
537 * The VSID values below are dependent on the
538 * VSID generation algorithm. See include/asm/mmu_context.h.
541 .llong 1 /* # ESIDs to be mapped by hypervisor */
542 .llong 1 /* # memory ranges to be mapped by hypervisor */
543 .llong STAB0_PAGE /* Page # of segment table within load area */
544 .llong 0 /* Reserved */
545 .llong 0 /* Reserved */
546 .llong 0 /* Reserved */
547 .llong 0 /* Reserved */
548 .llong 0 /* Reserved */
549 .llong 0x0c00000000 /* ESID to map (Kernel at EA = 0xC000000000000000) */
550 .llong 0x06a99b4b14 /* VSID to map (Kernel at VA = 0x6a99b4b140000000) */
551 .llong 8192 /* # pages to map (32 MB) */
552 .llong 0 /* Offset from start of loadarea to start of map */
553 .llong 0x0006a99b4b140000 /* VPN of first page to map */
557 /*** ISeries-LPAR interrupt handlers ***/
559 STD_EXCEPTION_ISERIES(0x200, MachineCheck, PACA_EXMC)
561 .globl DataAccess_Iseries
569 rlwimi r13,r12,16,0x20
572 beq .do_stab_bolted_Iseries
575 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
576 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
577 EXCEPTION_PROLOG_ISERIES_2
580 .do_stab_bolted_Iseries:
583 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
584 EXCEPTION_PROLOG_ISERIES_2
587 .globl DataAccessSLB_Iseries
588 DataAccessSLB_Iseries:
589 mtspr SPRG1,r13 /* save r13 */
595 beq .do_slb_bolted_Iseries
598 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
599 EXCEPTION_PROLOG_ISERIES_2
600 b DataAccessSLB_common
602 .do_slb_bolted_Iseries:
605 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
606 EXCEPTION_PROLOG_ISERIES_2
609 STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
610 STD_EXCEPTION_ISERIES(0x480, InstructionAccessSLB, PACA_EXGEN)
611 MASKABLE_EXCEPTION_ISERIES(0x500, HardwareInterrupt)
612 STD_EXCEPTION_ISERIES(0x600, Alignment, PACA_EXGEN)
613 STD_EXCEPTION_ISERIES(0x700, ProgramCheck, PACA_EXGEN)
614 STD_EXCEPTION_ISERIES(0x800, FPUnavailable, PACA_EXGEN)
615 MASKABLE_EXCEPTION_ISERIES(0x900, Decrementer)
616 STD_EXCEPTION_ISERIES(0xa00, Trap_0a, PACA_EXGEN)
617 STD_EXCEPTION_ISERIES(0xb00, Trap_0b, PACA_EXGEN)
619 .globl SystemCall_Iseries
623 EXCEPTION_PROLOG_ISERIES_2
626 STD_EXCEPTION_ISERIES( 0xd00, SingleStep, PACA_EXGEN)
627 STD_EXCEPTION_ISERIES( 0xe00, Trap_0e, PACA_EXGEN)
628 STD_EXCEPTION_ISERIES( 0xf00, PerformanceMonitor, PACA_EXGEN)
630 .globl SystemReset_Iseries
632 mfspr r13,SPRG3 /* Get paca address */
635 mtmsrd r24 /* RI on */
636 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
637 cmpwi 0,r24,0 /* Are we processor 0? */
638 beq .__start_initialization_iSeries /* Start up the first processor */
640 li r5,RUNLATCH /* Turn off the run light */
647 lbz r23,PACAPROCSTART(r13) /* Test if this processor
650 LOADADDR(r3,current_set)
651 sldi r28,r24,3 /* get current_set[cpu#] */
653 addi r1,r3,THREAD_SIZE
654 subi r1,r1,STACK_FRAME_OVERHEAD
657 beq iseries_secondary_smp_loop /* Loop until told to go */
658 #ifdef SECONDARY_PROCESSORS
659 bne .__secondary_start /* Loop until told to go */
661 iseries_secondary_smp_loop:
662 /* Let the Hypervisor know we are alive */
663 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
665 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
666 #else /* CONFIG_SMP */
667 /* Yield the processor. This is required for non-SMP kernels
668 which are running on multi-threaded machines. */
670 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
671 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
672 li r4,0 /* "yield timed" */
673 li r5,-1 /* "yield forever" */
674 #endif /* CONFIG_SMP */
675 li r0,-1 /* r0=-1 indicates a Hypervisor call */
676 sc /* Invoke the hypervisor via a system call */
677 mfspr r13,SPRG3 /* Put r13 back ???? */
678 b 1b /* If SMP not configured, secondaries
681 .globl Decrementer_Iseries_masked
682 Decrementer_Iseries_masked:
684 stb r11,PACALPPACA+LPPACADECRINT(r13)
685 lwz r12,PACADEFAULTDECR(r13)
689 .globl HardwareInterrupt_Iseries_masked
690 HardwareInterrupt_Iseries_masked:
691 mtcrf 0x80,r9 /* Restore regs */
692 ld r11,PACALPPACA+LPPACASRR0(r13)
693 ld r12,PACALPPACA+LPPACASRR1(r13)
696 ld r9,PACA_EXGEN+EX_R9(r13)
697 ld r10,PACA_EXGEN+EX_R10(r13)
698 ld r11,PACA_EXGEN+EX_R11(r13)
699 ld r12,PACA_EXGEN+EX_R12(r13)
700 ld r13,PACA_EXGEN+EX_R13(r13)
705 * Data area reserved for FWNMI option.
708 .globl fwnmi_data_area
712 * Vectors for the FWNMI option. Share common code.
715 .globl SystemReset_FWNMI
717 mtspr SPRG1,r13 /* save r13 */
718 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, SystemReset_common)
719 .globl MachineCheck_FWNMI
721 mtspr SPRG1,r13 /* save r13 */
722 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
725 * Space for the initial segment table
726 * For LPAR, the hypervisor must fill in at least one entry
727 * before we get control (with relocate on)
733 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
738 /*** Common interrupt handlers ***/
740 STD_EXCEPTION_COMMON(0x100, SystemReset, .SystemResetException)
743 * Machine check is different because we use a different
744 * save area: PACA_EXMC instead of PACA_EXGEN.
747 .globl MachineCheck_common
749 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
752 addi r3,r1,STACK_FRAME_OVERHEAD
753 bl .MachineCheckException
756 STD_EXCEPTION_COMMON_LITE(0x900, Decrementer, .timer_interrupt)
757 STD_EXCEPTION_COMMON(0xa00, Trap_0a, .UnknownException)
758 STD_EXCEPTION_COMMON(0xb00, Trap_0b, .UnknownException)
759 STD_EXCEPTION_COMMON(0xd00, SingleStep, .SingleStepException)
760 STD_EXCEPTION_COMMON(0xe00, Trap_0e, .UnknownException)
761 STD_EXCEPTION_COMMON(0xf00, PerformanceMonitor, .PerformanceMonitorException)
762 STD_EXCEPTION_COMMON(0x1300, InstructionBreakpoint, .InstructionBreakpointException)
763 #ifdef CONFIG_ALTIVEC
764 STD_EXCEPTION_COMMON(0x1700, AltivecAssist, .AltivecAssistException)
766 STD_EXCEPTION_COMMON(0x1700, AltivecAssist, .UnknownException)
770 * Here we have detected that the kernel stack pointer is bad.
771 * R9 contains the saved CR, r13 points to the paca,
772 * r10 contains the (bad) kernel stack pointer,
773 * r11 and r12 contain the saved SRR0 and SRR1.
774 * We switch to using the paca guard page as an emergency stack,
775 * save the registers there, and call kernel_bad_stack(), which panics.
778 ld r1,PACAEMERGSP(r13)
779 subi r1,r1,64+INT_FRAME_SIZE
800 addi r11,r1,INT_FRAME_SIZE
805 1: addi r3,r1,STACK_FRAME_OVERHEAD
810 * Return from an exception with minimal checks.
811 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
812 * If interrupts have been enabled, or anything has been
813 * done that might have changed the scheduling status of
814 * any task or sent any task a signal, you should use
815 * ret_from_except or ret_from_except_lite instead of this.
817 fast_exception_return:
820 andi. r3,r12,MSR_RI /* check if RI is set */
834 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
845 1: addi r3,r1,STACK_FRAME_OVERHEAD
846 bl .unrecoverable_exception
850 * Here r13 points to the paca, r9 contains the saved CR,
851 * SRR0 and SRR1 are saved in r11 and r12,
852 * r9 - r13 are saved in paca->exgen.
855 .globl DataAccess_common
858 std r10,PACA_EXGEN+EX_DAR(r13)
860 stw r10,PACA_EXGEN+EX_DSISR(r13)
861 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
862 ld r3,PACA_EXGEN+EX_DAR(r13)
863 lwz r4,PACA_EXGEN+EX_DSISR(r13)
865 b .do_hash_page /* Try to handle as hpte fault */
868 .globl DataAccessSLB_common
869 DataAccessSLB_common:
871 std r10,PACA_EXGEN+EX_DAR(r13)
872 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
873 ld r3,PACA_EXGEN+EX_DAR(r13)
876 cmpdi r3,0 /* Check return code */
877 beq fast_exception_return /* Return if we succeeded */
883 .globl InstructionAccess_common
884 InstructionAccess_common:
885 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
889 b .do_hash_page /* Try to handle as hpte fault */
892 .globl InstructionAccessSLB_common
893 InstructionAccessSLB_common:
894 EXCEPTION_PROLOG_COMMON(0x480, PACA_EXGEN)
895 ld r3,_NIP(r1) /* SRR0 = NIA */
897 or. r3,r3,r3 /* Check return code */
898 beq+ fast_exception_return /* Return if we succeeded */
907 .globl HardwareInterrupt_common
908 .globl HardwareInterrupt_entry
909 HardwareInterrupt_common:
910 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
911 HardwareInterrupt_entry:
913 addi r3,r1,STACK_FRAME_OVERHEAD
915 b .ret_from_except_lite
918 .globl Alignment_common
921 std r10,PACA_EXGEN+EX_DAR(r13)
923 stw r10,PACA_EXGEN+EX_DSISR(r13)
924 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
925 ld r3,PACA_EXGEN+EX_DAR(r13)
926 lwz r4,PACA_EXGEN+EX_DSISR(r13)
930 addi r3,r1,STACK_FRAME_OVERHEAD
932 bl .AlignmentException
936 .globl ProgramCheck_common
938 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
940 addi r3,r1,STACK_FRAME_OVERHEAD
942 bl .ProgramCheckException
946 .globl FPUnavailable_common
947 FPUnavailable_common:
948 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
949 bne .load_up_fpu /* if from user, just load it up */
951 addi r3,r1,STACK_FRAME_OVERHEAD
953 bl .KernelFPUnavailableException
957 .globl AltivecUnavailable_common
958 AltivecUnavailable_common:
959 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
960 #ifdef CONFIG_ALTIVEC
961 bne .load_up_altivec /* if from user, just load it up */
964 addi r3,r1,STACK_FRAME_OVERHEAD
966 bl .AltivecUnavailableException
973 _GLOBAL(do_hash_page)
977 andis. r0,r4,0xa450 /* weird error? */
978 bne- .handle_page_fault /* if not, try to insert a HPTE */
980 andis. r0,r4,0x0020 /* Is it a segment table fault? */
981 bne- .do_ste_alloc /* If so handle it */
982 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
985 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
986 * accessing a userspace segment (even from the kernel). We assume
987 * kernel addresses always have the high bit set.
989 rlwinm r4,r4,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
990 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
991 orc r0,r12,r0 /* MSR_PR | ~high_bit */
992 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
993 ori r4,r4,1 /* add _PAGE_PRESENT */
996 * On iSeries, we soft-disable interrupts here, then
997 * hard-enable interrupts so that the hash_page code can spin on
998 * the hash_table_lock without problems on a shared processor.
1003 * r3 contains the faulting address
1004 * r4 contains the required access permissions
1005 * r5 contains the trap number
1007 * at return r3 = 0 for success
1009 bl .hash_page /* build HPTE if possible */
1010 cmpdi r3,0 /* see if hash_page succeeded */
1012 #ifdef DO_SOFT_DISABLE
1014 * If we had interrupts soft-enabled at the point where the
1015 * DSI/ISI occurred, and an interrupt came in during hash_page,
1017 * We jump to ret_from_except_lite rather than fast_exception_return
1018 * because ret_from_except_lite will check for and handle pending
1019 * interrupts if necessary.
1021 beq .ret_from_except_lite
1023 * hash_page couldn't handle it, set soft interrupt enable back
1024 * to what it was before the trap. Note that .local_irq_restore
1025 * handles any interrupts pending at this point.
1028 bl .local_irq_restore
1031 beq+ fast_exception_return /* Return from exception on success */
1035 /* Here we have a page fault that hash_page can't handle. */
1036 _GLOBAL(handle_page_fault)
1040 addi r3,r1,STACK_FRAME_OVERHEAD
1043 beq+ .ret_from_except_lite
1046 addi r3,r1,STACK_FRAME_OVERHEAD
1051 /* here we have a segment miss */
1052 _GLOBAL(do_ste_alloc)
1053 bl .ste_allocate /* try to insert stab entry */
1055 beq+ fast_exception_return
1056 b .handle_page_fault
1059 * r13 points to the PACA, r9 contains the saved CR,
1060 * r11 and r12 contain the saved SRR0 and SRR1.
1061 * r9 - r13 are saved in paca->exslb.
1062 * We assume we aren't going to take any exceptions during this procedure.
1063 * We assume (DAR >> 60) == 0xc.
1066 _GLOBAL(do_stab_bolted)
1067 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1068 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1070 /* Hash to the primary group */
1071 ld r10,PACASTABVIRT(r13)
1074 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1076 /* Calculate VSID */
1077 /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
1081 /* VSID_RANDOMIZER */
1088 rldic r9,r9,12,16 /* r9 = vsid << 12 */
1090 /* Search the primary group for a free entry */
1091 1: ld r11,0(r10) /* Test valid bit of the current ste */
1098 /* Stick for only searching the primary group for now. */
1099 /* At least for now, we use a very simple random castout scheme */
1100 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1102 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1105 /* r10 currently points to an ste one past the group of interest */
1106 /* make it point to the randomly selected entry */
1108 or r10,r10,r11 /* r10 is the entry to invalidate */
1110 isync /* mark the entry invalid */
1112 rldicl r11,r11,56,1 /* clear the valid bit */
1117 clrrdi r11,r11,28 /* Get the esid part of the ste */
1120 2: std r9,8(r10) /* Store the vsid part of the ste */
1123 mfspr r11,DAR /* Get the new esid */
1124 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1125 ori r11,r11,0x90 /* Turn on valid and kp */
1126 std r11,0(r10) /* Put new entry back into the stab */
1130 /* All done -- return from exception. */
1131 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1132 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1134 andi. r10,r12,MSR_RI
1137 mtcrf 0x80,r9 /* restore CR */
1145 ld r9,PACA_EXSLB+EX_R9(r13)
1146 ld r10,PACA_EXSLB+EX_R10(r13)
1147 ld r11,PACA_EXSLB+EX_R11(r13)
1148 ld r12,PACA_EXSLB+EX_R12(r13)
1149 ld r13,PACA_EXSLB+EX_R13(r13)
1153 * r13 points to the PACA, r9 contains the saved CR,
1154 * r11 and r12 contain the saved SRR0 and SRR1.
1155 * r9 - r13 are saved in paca->exslb.
1156 * We assume we aren't going to take any exceptions during this procedure.
1158 /* XXX note fix masking in get_kernel_vsid to match */
1159 _GLOBAL(do_slb_bolted)
1160 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1161 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1164 * We take the next entry, round robin. Previously we tried
1165 * to find a free slot first but that took too long. Unfortunately
1166 * we dont have any LRU information to help us choose a slot.
1170 1: ld r10,PACASTABRR(r13)
1172 cmpdi r9,SLB_NUM_ENTRIES
1174 li r9,2 /* dont touch slot 0 or 1 */
1175 2: std r9,PACASTABRR(r13)
1177 /* r13 = paca, r10 = entry */
1180 * Never cast out the segment for our kernel stack. Since we
1181 * dont invalidate the ERAT we could have a valid translation
1182 * for the kernel stack during the first part of exception exit
1183 * which gets invalidated due to a tlbie from another cpu at a
1184 * non recoverable point (after setting srr0/1) - Anton
1189 * Use paca->ksave as the value of the kernel stack pointer,
1190 * because this is valid at all times.
1191 * The >> 27 (rather than >> 28) is so that the LSB is the
1192 * valid bit - this way we check valid and ESID in one compare.
1193 * In order to completely close the tiny race in the context
1194 * switch (between updating r1 and updating paca->ksave),
1195 * we check against both r1 and paca->ksave.
1201 ld r11,PACAKSAVE(r13)
1207 /* r13 = paca, r10 = entry */
1209 /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
1216 /* VSID_RANDOMIZER */
1222 /* vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK */
1226 /* r13 = paca, r10 = entry, r11 = vsid */
1228 /* Put together slb word1 */
1232 /* set kp and c bits */
1234 END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
1236 /* set kp, l and c bits */
1238 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
1240 /* r13 = paca, r10 = entry, r11 = slb word1 */
1242 /* Put together slb word0 */
1244 clrrdi r9,r9,28 /* get the new esid */
1245 oris r9,r9,0x800 /* set valid bit */
1246 rldimi r9,r10,0,52 /* insert entry */
1248 /* r13 = paca, r9 = slb word0, r11 = slb word1 */
1251 * No need for an isync before or after this slbmte. The exception
1252 * we enter with and the rfid we exit with are context synchronizing .
1256 /* All done -- return from exception. */
1257 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1258 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1260 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1264 * Until everyone updates binutils hardwire the POWER4 optimised
1265 * single field mtcrf
1282 ld r9,PACA_EXSLB+EX_R9(r13)
1283 ld r10,PACA_EXSLB+EX_R10(r13)
1284 ld r11,PACA_EXSLB+EX_R11(r13)
1285 ld r12,PACA_EXSLB+EX_R12(r13)
1286 ld r13,PACA_EXSLB+EX_R13(r13)
1290 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1293 1: addi r3,r1,STACK_FRAME_OVERHEAD
1294 bl .unrecoverable_exception
1299 * On pSeries, secondary processors spin in the following code.
1300 * At entry, r3 = this processor's number (in Linux terms, not hardware).
1302 _GLOBAL(pseries_secondary_smp_init)
1303 /* turn on 64-bit mode */
1307 /* Set up a paca value for this processor. */
1308 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1309 mulli r13,r3,PACA_SIZE /* Calculate vaddr of right paca */
1310 add r13,r13,r24 /* for this processor. */
1312 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1313 mr r24,r3 /* __secondary_start needs cpu# */
1317 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1321 /* Create a temp kernel stack for use before relocation is on. */
1322 ld r1,PACAEMERGSP(r13)
1323 subi r1,r1,STACK_FRAME_OVERHEAD
1327 #ifdef SECONDARY_PROCESSORS
1328 bne .__secondary_start
1331 b 1b /* Loop until told to go */
1332 #ifdef CONFIG_PPC_ISERIES
1333 _GLOBAL(__start_initialization_iSeries)
1334 /* Clear out the BSS */
1335 LOADADDR(r11,__bss_stop)
1337 LOADADDR(r8,__bss_start)
1339 sub r11,r11,r8 /* bss size */
1340 addi r11,r11,7 /* round up to an even double word */
1341 rldicl. r11,r11,61,3 /* shift right by 3 */
1345 mtctr r11 /* zero this many doublewords */
1349 LOADADDR(r1,init_thread_union)
1350 addi r1,r1,THREAD_SIZE
1352 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1354 LOADADDR(r3,cpu_specs)
1355 LOADADDR(r4,cur_cpu_spec)
1359 LOADADDR(r2,__toc_start)
1363 LOADADDR(r9,systemcfg)
1364 SET_REG_TO_CONST(r4, SYSTEMCFG_VIRT_ADDR)
1365 std r4,0(r9) /* set the systemcfg pointer */
1368 SET_REG_TO_CONST(r4, NACA_VIRT_ADDR)
1369 std r4,0(r9) /* set the naca pointer */
1371 /* Get the pointer to the segment table */
1372 ld r6,PACA(r4) /* Get the base paca pointer */
1373 ld r4,PACASTABVIRT(r6)
1375 bl .iSeries_fixup_klimit
1377 /* relocation is on at this point */
1379 b .start_here_common
1382 #ifdef CONFIG_PPC_PSERIES
1386 andi. r0,r3,MSR_IR|MSR_DR
1393 _GLOBAL(__start_initialization_pSeries)
1394 mr r31,r3 /* save parameters */
1402 /* put a relocation offset into r3 */
1405 LOADADDR(r2,__toc_start)
1409 /* Relocate the TOC from a virt addr to a real addr */
1412 /* Save parameters */
1419 /* Do all of the interaction with OF client interface */
1421 mr r23,r3 /* Save phys address we are running at */
1423 /* Setup some critical 970 SPRs before switching MMU off */
1424 bl .__970_cpu_preinit
1426 li r24,0 /* cpu # */
1428 /* Switch off MMU if not already */
1429 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1434 * At this point, r3 contains the physical address we are running at,
1435 * returned by prom_init()
1437 _STATIC(__after_prom_start)
1440 * We need to run with __start at physical address 0.
1441 * This will leave some code in the first 256B of
1442 * real memory, which are reserved for software use.
1443 * The remainder of the first page is loaded with the fixed
1444 * interrupt vectors. The next two pages are filled with
1445 * unknown exception placeholders.
1447 * Note: This process overwrites the OF exception vectors.
1448 * r26 == relocation offset
1453 SET_REG_TO_CONST(r27,KERNELBASE)
1455 li r3,0 /* target addr */
1457 // XXX FIXME: Use phys returned by OF (r23)
1458 sub r4,r27,r26 /* source addr */
1459 /* current address of _start */
1460 /* i.e. where we are running */
1461 /* the source addr */
1463 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1466 li r6,0x100 /* Start offset, the first 0x100 */
1467 /* bytes were copied earlier. */
1469 bl .copy_and_flush /* copy the first n bytes */
1470 /* this includes the code being */
1471 /* executed here. */
1473 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1474 mtctr r0 /* that we just made/relocated */
1477 4: LOADADDR(r5,klimit)
1479 ld r5,0(r5) /* get the value of klimit */
1481 bl .copy_and_flush /* copy the rest */
1482 b .start_here_pSeries
1486 * Copy routine used to copy the kernel to start at physical address 0
1487 * and flush and invalidate the caches as needed.
1488 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1489 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1491 * Note: this routine *only* clobbers r0, r6 and lr
1493 _GLOBAL(copy_and_flush)
1496 4: li r0,16 /* Use the least common */
1497 /* denominator cache line */
1498 /* size. This results in */
1499 /* extra cache line flushes */
1500 /* but operation is correct. */
1501 /* Can't get cache line size */
1502 /* from NACA as it is being */
1505 mtctr r0 /* put # words/line in ctr */
1506 3: addi r6,r6,8 /* copy a cache line */
1510 dcbst r6,r3 /* write it to memory */
1512 icbi r6,r3 /* flush the icache line */
1524 * load_up_fpu(unused, unused, tsk)
1525 * Disable FP for the task which had the FPU previously,
1526 * and save its floating-point registers in its thread_struct.
1527 * Enables the FPU for use in the kernel on return.
1528 * On SMP we know the fpu is free, since we give it up every
1529 * switch (ie, no lazy save of the FP registers).
1530 * On entry: r13 == 'current' && last_task_used_math != 'current'
1532 _STATIC(load_up_fpu)
1533 mfmsr r5 /* grab the current MSR */
1535 mtmsrd r5 /* enable use of fpu now */
1538 * For SMP, we don't do lazy FPU switching because it just gets too
1539 * horrendously complex, especially when a task switches from one CPU
1540 * to another. Instead we call giveup_fpu in switch_to.
1544 ld r3,last_task_used_math@got(r2)
1548 /* Save FP state to last_task_used_math's THREAD struct */
1552 stfd fr0,THREAD_FPSCR(r4)
1553 /* Disable FP for last_task_used_math */
1555 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1556 li r6,MSR_FP|MSR_FE0|MSR_FE1
1558 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1560 #endif /* CONFIG_SMP */
1561 /* enable use of FP after return */
1562 ld r4,PACACURRENT(r13)
1563 addi r5,r4,THREAD /* Get THREAD */
1564 ld r4,THREAD_FPEXC_MODE(r5)
1568 lfd fr0,THREAD_FPSCR(r5)
1572 /* Update last_task_used_math to 'current' */
1573 subi r4,r5,THREAD /* Back to 'current' */
1575 #endif /* CONFIG_SMP */
1576 /* restore registers and return */
1577 b fast_exception_return
1580 * disable_kernel_fp()
1583 _GLOBAL(disable_kernel_fp)
1585 rldicl r0,r3,(63-MSR_FP_LG),1
1586 rldicl r3,r0,(MSR_FP_LG+1),0
1587 mtmsrd r3 /* disable use of fpu now */
1593 * Disable FP for the task given as the argument,
1594 * and save the floating-point registers in its thread_struct.
1595 * Enables the FPU for use in the kernel on return.
1600 mtmsrd r5 /* enable use of fpu now */
1603 beqlr- /* if no previous owner, done */
1604 addi r3,r3,THREAD /* want THREAD of task */
1609 stfd fr0,THREAD_FPSCR(r3)
1611 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1612 li r3,MSR_FP|MSR_FE0|MSR_FE1
1613 andc r4,r4,r3 /* disable FP for previous task */
1614 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1618 ld r4,last_task_used_math@got(r2)
1620 #endif /* CONFIG_SMP */
1624 #ifdef CONFIG_ALTIVEC
1627 * load_up_altivec(unused, unused, tsk)
1628 * Disable VMX for the task which had it previously,
1629 * and save its vector registers in its thread_struct.
1630 * Enables the VMX for use in the kernel on return.
1631 * On SMP we know the VMX is free, since we give it up every
1632 * switch (ie, no lazy save of the vector registers).
1633 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1635 _STATIC(load_up_altivec)
1636 mfmsr r5 /* grab the current MSR */
1637 oris r5,r5,MSR_VEC@h
1638 mtmsrd r5 /* enable use of VMX now */
1642 * For SMP, we don't do lazy VMX switching because it just gets too
1643 * horrendously complex, especially when a task switches from one CPU
1644 * to another. Instead we call giveup_altvec in switch_to.
1645 * VRSAVE isn't dealt with here, that is done in the normal context
1646 * switch code. Note that we could rely on vrsave value to eventually
1647 * avoid saving all of the VREGs here...
1650 ld r3,last_task_used_altivec@got(r2)
1654 /* Save VMX state to last_task_used_altivec's THREAD struct */
1660 /* Disable VMX for last_task_used_altivec */
1662 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1665 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1667 #endif /* CONFIG_SMP */
1668 /* Hack: if we get an altivec unavailable trap with VRSAVE
1669 * set to all zeros, we assume this is a broken application
1670 * that fails to set it properly, and thus we switch it to
1673 mfspr r4,SPRN_VRSAVE
1677 mtspr SPRN_VRSAVE,r4
1679 /* enable use of VMX after return */
1680 ld r4,PACACURRENT(r13)
1681 addi r5,r4,THREAD /* Get THREAD */
1682 oris r12,r12,MSR_VEC@h
1686 stw r4,THREAD_USED_VR(r5)
1690 /* Update last_task_used_math to 'current' */
1691 subi r4,r5,THREAD /* Back to 'current' */
1693 #endif /* CONFIG_SMP */
1694 /* restore registers and return */
1695 b fast_exception_return
1698 * disable_kernel_altivec()
1701 _GLOBAL(disable_kernel_altivec)
1703 rldicl r0,r3,(63-MSR_VEC_LG),1
1704 rldicl r3,r0,(MSR_VEC_LG+1),0
1705 mtmsrd r3 /* disable use of VMX now */
1710 * giveup_altivec(tsk)
1711 * Disable VMX for the task given as the argument,
1712 * and save the vector registers in its thread_struct.
1713 * Enables the VMX for use in the kernel on return.
1715 _GLOBAL(giveup_altivec)
1717 oris r5,r5,MSR_VEC@h
1718 mtmsrd r5 /* enable use of VMX now */
1721 beqlr- /* if no previous owner, done */
1722 addi r3,r3,THREAD /* want THREAD of task */
1730 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1732 andc r4,r4,r3 /* disable FP for previous task */
1733 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1737 ld r4,last_task_used_altivec@got(r2)
1739 #endif /* CONFIG_SMP */
1742 #endif /* CONFIG_ALTIVEC */
1745 #ifdef CONFIG_PPC_PMAC
1747 * On PowerMac, secondary processors starts from the reset vector, which
1748 * is temporarily turned into a call to one of the functions below.
1753 .globl pmac_secondary_start_1
1754 pmac_secondary_start_1:
1756 b .pmac_secondary_start
1758 .globl pmac_secondary_start_2
1759 pmac_secondary_start_2:
1761 b .pmac_secondary_start
1763 .globl pmac_secondary_start_3
1764 pmac_secondary_start_3:
1766 b .pmac_secondary_start
1768 _GLOBAL(pmac_secondary_start)
1769 /* turn on 64-bit mode */
1773 /* Copy some CPU settings from CPU 0 */
1774 bl .__restore_cpu_setup
1776 /* pSeries do that early though I don't think we really need it */
1779 mtmsrd r3 /* RI on */
1781 /* Set up a paca value for this processor. */
1782 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1783 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1784 add r13,r13,r4 /* for this processor. */
1785 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1787 /* Create a temp kernel stack for use before relocation is on. */
1788 ld r1,PACAEMERGSP(r13)
1789 subi r1,r1,STACK_FRAME_OVERHEAD
1791 b .__secondary_start
1793 #endif /* CONFIG_PPC_PMAC */
1796 * This function is called after the master CPU has released the
1797 * secondary processors. The execution environment is relocation off.
1798 * The paca for this processor has the following fields initialized at
1800 * 1. Processor number
1801 * 2. Segment table pointer (virtual address)
1802 * On entry the following are set:
1803 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1804 * r24 = cpu# (in Linux terms)
1805 * r13 = paca virtual address
1806 * SPRG3 = paca virtual address
1808 _GLOBAL(__secondary_start)
1810 HMT_MEDIUM /* Set thread priority to MEDIUM */
1814 stb r6,PACAPROCENABLED(r13)
1816 #ifndef CONFIG_PPC_ISERIES
1817 /* Initialize the page table pointer register. */
1819 ld r6,0(r6) /* get the value of _SDR1 */
1820 mtspr SDR1,r6 /* set the htab location */
1822 /* Initialize the first segment table (or SLB) entry */
1823 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1826 /* Initialize the kernel stack. Just a repeat for iSeries. */
1827 LOADADDR(r3,current_set)
1828 sldi r28,r24,3 /* get current_set[cpu#] */
1830 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1831 std r1,PACAKSAVE(r13)
1833 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1834 ori r4,r3,1 /* turn on valid bit */
1836 #ifdef CONFIG_PPC_ISERIES
1837 li r0,-1 /* hypervisor call */
1839 sldi r3,r3,63 /* 0x8000000000000000 */
1840 ori r3,r3,4 /* 0x8000000000000004 */
1841 sc /* HvCall_setASR */
1844 li r3,SYSTEMCFG_PHYS_ADDR /* r3 = ptr to systemcfg */
1845 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1846 cmpldi r3,PLATFORM_PSERIES_LPAR
1850 cmpwi r3,0x37 /* SStar */
1852 cmpwi r3,0x36 /* IStar */
1854 cmpwi r3,0x34 /* Pulsar */
1856 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1857 HVSC /* Invoking hcall */
1859 98: /* !(rpa hypervisor) || !(star) */
1860 mtasr r4 /* set the stab location */
1866 /* enable MMU and jump to start_secondary */
1867 LOADADDR(r3,.start_secondary_prolog)
1868 SET_REG_TO_CONST(r4, MSR_KERNEL)
1869 #ifdef DO_SOFT_DISABLE
1877 * Running with relocation on at this point. All we want to do is
1878 * zero the stack back-chain pointer before going into C code.
1880 _GLOBAL(start_secondary_prolog)
1882 std r3,0(r1) /* Zero the stack frame pointer */
1887 * This subroutine clobbers r11 and r12
1889 _GLOBAL(enable_64b_mode)
1890 mfmsr r11 /* grab the current MSR */
1892 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1895 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1901 #ifdef CONFIG_PPC_PSERIES
1903 * This is where the main kernel code starts.
1905 _STATIC(start_here_pSeries)
1906 /* get a new offset, now that the kernel has moved. */
1912 mtmsrd r6 /* RI on */
1914 /* setup the systemcfg pointer which is needed by *tab_initialize */
1915 LOADADDR(r6,systemcfg)
1916 sub r6,r6,r26 /* addr of the variable systemcfg */
1917 li r27,SYSTEMCFG_PHYS_ADDR
1918 std r27,0(r6) /* set the value of systemcfg */
1920 /* setup the naca pointer which is needed by *tab_initialize */
1922 sub r6,r6,r26 /* addr of the variable naca */
1923 li r27,NACA_PHYS_ADDR
1924 std r27,0(r6) /* set the value of naca */
1927 /* Start up the second thread on cpu 0 */
1930 cmpwi r3,0x34 /* Pulsar */
1932 cmpwi r3,0x36 /* Icestar */
1934 cmpwi r3,0x37 /* SStar */
1936 b 91f /* HMT not supported */
1938 bl .hmt_start_secondary
1943 /* All secondary cpus are now spinning on a common
1944 * spinloop, release them all now so they can start
1945 * to spin on their individual paca spinloops.
1946 * For non SMP kernels, the secondary cpus never
1947 * get out of the common spinloop.
1950 LOADADDR(r5,__secondary_hold_spinloop)
1955 /* The following gets the stack and TOC set up with the regs */
1956 /* pointing to the real addr of the kernel stack. This is */
1957 /* all done to support the C function call below which sets */
1958 /* up the htab. This is done because we have relocated the */
1959 /* kernel but are still running in real mode. */
1961 LOADADDR(r3,init_thread_union)
1964 /* set up a stack pointer (physical address) */
1965 addi r1,r3,THREAD_SIZE
1967 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1969 /* set up the TOC (physical address) */
1970 LOADADDR(r2,__toc_start)
1975 LOADADDR(r3,cpu_specs)
1977 LOADADDR(r4,cur_cpu_spec)
1982 /* Get the pointer to the segment table which is used by */
1983 /* stab_initialize */
1984 LOADADDR(r27, boot_cpuid)
1988 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1989 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1990 add r13,r13,r24 /* for this processor. */
1991 sub r13,r13,r26 /* convert to physical addr */
1993 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1994 ld r3,PACASTABREAL(r13)
1995 ori r4,r3,1 /* turn on valid bit */
1998 li r3,SYSTEMCFG_PHYS_ADDR /* r3 = ptr to systemcfg */
1999 lwz r3,PLATFORM(r3) /* r3 = platform flags */
2000 cmpldi r3,PLATFORM_PSERIES_LPAR
2004 cmpwi r3,0x37 /* SStar */
2006 cmpwi r3,0x36 /* IStar */
2008 cmpwi r3,0x34 /* Pulsar */
2010 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
2011 HVSC /* Invoking hcall */
2013 98: /* !(rpa hypervisor) || !(star) */
2014 mtasr r4 /* set the stab location */
2017 ld r3,PACASTABREAL(r6) /* restore r3 for stab_initialize */
2019 /* Initialize an initial memory mapping and turn on relocation. */
2023 li r3,SYSTEMCFG_PHYS_ADDR /* r3 = ptr to systemcfg */
2024 lwz r3,PLATFORM(r3) /* r3 = platform flags */
2025 /* Test if bit 0 is set (LPAR bit) */
2028 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
2030 ld r6,0(r6) /* get the value of _SDR1 */
2031 mtspr SDR1,r6 /* set the htab location */
2033 LOADADDR(r3,.start_here_common)
2034 SET_REG_TO_CONST(r4, MSR_KERNEL)
2038 #endif /* CONFIG_PPC_PSERIES */
2040 /* This is where all platforms converge execution */
2041 _STATIC(start_here_common)
2042 /* relocation is on at this point */
2044 /* The following code sets up the SP and TOC now that we are */
2045 /* running with translation enabled. */
2047 LOADADDR(r3,init_thread_union)
2049 /* set up the stack */
2050 addi r1,r3,THREAD_SIZE
2052 stdu r0,-STACK_FRAME_OVERHEAD(r1)
2054 /* Apply the CPUs-specific fixups (nop out sections not relevant
2058 bl .do_cpu_ftr_fixups
2060 /* setup the systemcfg pointer */
2061 LOADADDR(r9,systemcfg)
2062 SET_REG_TO_CONST(r8, SYSTEMCFG_VIRT_ADDR)
2065 /* setup the naca pointer */
2067 SET_REG_TO_CONST(r8, NACA_VIRT_ADDR)
2068 std r8,0(r9) /* set the value of the naca ptr */
2070 LOADADDR(r26, boot_cpuid)
2073 LOADADDR(r24, paca) /* Get base vaddr of paca array */
2074 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
2075 add r13,r13,r24 /* for this processor. */
2078 /* ptr to current */
2079 LOADADDR(r4,init_task)
2080 std r4,PACACURRENT(r13)
2084 std r1,PACAKSAVE(r13)
2086 /* Restore the parms passed in from the bootloader. */
2095 /* Load up the kernel context */
2097 #ifdef DO_SOFT_DISABLE
2099 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
2101 ori r5,r5,MSR_EE /* Hard Enabled */
2107 _GLOBAL(__setup_cpu_power3)
2112 LOADADDR(r5, hmt_thread_data)
2115 cmpwi r7,0x34 /* Pulsar */
2117 cmpwi r7,0x36 /* Icestar */
2119 cmpwi r7,0x37 /* SStar */
2129 bl .hmt_start_secondary
2132 __hmt_secondary_hold:
2133 LOADADDR(r5, hmt_thread_data)
2143 93: andi. r6,r6,0x3f
2157 b .pseries_secondary_smp_init
2160 _GLOBAL(hmt_start_secondary)
2161 LOADADDR(r4,__hmt_secondary_hold)
2183 * We put a few things here that have to be page-aligned.
2184 * This stuff goes at the beginning of the data segment,
2185 * which is page-aligned.
2191 .globl empty_zero_page
2195 .globl swapper_pg_dir
2203 /* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
2209 * This space gets a copy of optional info passed to us by the bootstrap
2210 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2214 .space COMMAND_LINE_SIZE