turn_on_mmu:
mfmsr r0
ori r0,r0,MSR_DR|MSR_IR
- mtspr SRR1,r0
+ mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
- mtspr SRR0,r0
+ mtspr SPRN_SRR0,r0
SYNC
rfi /* enables MMU */
* task's thread_struct.
*/
#define EXCEPTION_PROLOG \
- mtspr SPRG0,r10; \
- mtspr SPRG1,r11; \
+ mtspr SPRN_SPRG0,r10; \
+ mtspr SPRN_SPRG1,r11; \
mfcr r10; \
EXCEPTION_PROLOG_1; \
EXCEPTION_PROLOG_2
#define EXCEPTION_PROLOG_1 \
- mfspr r11,SRR1; /* check whether user or kernel */ \
+ mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
andi. r11,r11,MSR_PR; \
tophys(r11,r1); /* use tophys(r1) if kernel */ \
beq 1f; \
- mfspr r11,SPRG3; \
+ mfspr r11,SPRN_SPRG3; \
lwz r11,THREAD_INFO-THREAD(r11); \
addi r11,r11,THREAD_SIZE; \
tophys(r11,r11); \
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
- mfspr r12,SPRG1; \
+ mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
- mfspr r12,SRR0; \
- mfspr r9,SRR1; \
+ mfspr r12,SPRN_SRR0; \
+ mfspr r9,SPRN_SRR1; \
stw r1,GPR1(r11); \
stw r1,0(r11); \
tovirt(r1,r11); /* set new kernel sp */ \
. = 0x200
MachineCheck:
EXCEPTION_PROLOG
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- mfspr r5,DSISR
+ mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0x200, MachineCheckException)
. = 0x300
DataAccess:
EXCEPTION_PROLOG
- mfspr r10,DSISR
+ mfspr r10,SPRN_DSISR
stw r10,_DSISR(r11)
mr r5,r10
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
EXC_XFER_EE_LITE(0x300, handle_page_fault)
/* Instruction access exception.
. = 0x600
Alignment:
EXCEPTION_PROLOG
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- mfspr r5,DSISR
+ mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE(0x600, AlignmentException)
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
- mtspr M_TW, r10 /* Save a couple of working registers */
+ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
- mfspr r10, SRR0 /* Get effective address of fault */
+ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
DO_8xx_CPU6(0x3780, r3)
- mtspr MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
- mfspr r10, M_TWB /* Get level 1 table entry address */
+ mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r11,r11,1 /* Set valid bit */
DO_8xx_CPU6(0x2b80, r3)
- mtspr MI_TWC, r11 /* Set segment attributes */
+ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11 /* Load pte table base address */
- mfspr r11, MD_TWC /* ....and get the pte address */
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
ori r10, r10, _PAGE_ACCESSED
2: li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x2d80, r3)
- mtspr MI_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
- mtspr M_TW, r10 /* Save a couple of working registers */
+ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
- mfspr r10, M_TWB /* Get level 1 table entry address */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r11, r11, 1 /* Set valid bit in physical L2 page */
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11 /* Load pte table base address */
- mfspr r10, MD_TWC /* ....and get the pte address */
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r10) /* Get the pte */
/* Insert the Guarded flag into the TWC from the Linux PTE.
*/
rlwimi r11, r10, 0, 27, 27
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11
+ mtspr SPRN_MD_TWC, r11
- mfspr r11, MD_TWC /* get the pte address again */
+ mfspr r11, SPRN_MD_TWC /* get the pte address again */
ori r10, r10, _PAGE_ACCESSED
stw r10, 0(r11)
2: li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
- mtspr MD_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
- mtspr M_TW, r10 /* Save a couple of working registers */
+ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
/* First, make sure this was a store operation.
*/
- mfspr r10, DSISR
+ mfspr r10, SPRN_DSISR
andis. r11, r10, 0x0200 /* If set, indicates store op */
beq 2f
* are initialized in mapin_ram(). This will avoid the problem,
* assuming we only use the dcbi instruction on kernel addresses.
*/
- mfspr r10, DAR
+ mfspr r10, SPRN_DAR
rlwinm r11, r10, 0, 0, 19
ori r11, r11, MD_EVALID
- mfspr r10, M_CASID
+ mfspr r10, SPRN_M_CASID
rlwimi r11, r10, 0, 28, 31
DO_8xx_CPU6(0x3780, r3)
- mtspr MD_EPN, r11
+ mtspr SPRN_MD_EPN, r11
- mfspr r10, M_TWB /* Get level 1 table entry address */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r11, r11, 1 /* Set valid bit in physical L2 page */
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11 /* Load pte table base address */
- mfspr r11, MD_TWC /* ....and get the pte address */
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
andi. r11, r10, _PAGE_RW /* Is it writeable? */
/* Update 'changed', among others.
*/
ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
- mfspr r11, MD_TWC /* Get pte address again */
+ mfspr r11, SPRN_MD_TWC /* Get pte address again */
stw r10, 0(r11) /* and update pte in table */
/* The Linux PTE won't go exactly into the MMU TLB.
li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
- mtspr MD_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
#endif
rfi
2:
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
li r3,0
- mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+ mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */
lis r1,init_thread_union@ha
stw r3, 12(r4)
lwz r3, 12(r4)
#endif
- mtspr M_TWB, r6
+ mtspr SPRN_M_TWB, r6
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
rfi
/* Load up the kernel context */
2:
li r4,MSR_KERNEL
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
- mtspr SRR0,r3
- mtspr SRR1,r4
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
rfi /* enable MMU and jump to start_kernel */
/* Set up the initial MMU state so we can do the first level of
#else
li r8, 0
#endif
- mtspr MI_CTR, r8 /* Set instruction MMU control */
+ mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
#ifdef CONFIG_PIN_TLB
lis r10, (MD_RSV4I | MD_RESETVAL)@h
#ifndef CONFIG_8xx_COPYBACK
oris r10, r10, MD_WTDEF@h
#endif
- mtspr MD_CTR, r10 /* Set data TLB control */
+ mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
/* Now map the lower 8 Meg into the TLBs. For this quick hack,
* we can load the instruction and data TLB registers with the
*/
lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr MI_EPN, r8
- mtspr MD_EPN, r8
+ mtspr SPRN_MI_EPN, r8
+ mtspr SPRN_MD_EPN, r8
li r8, MI_PS8MEG /* Set 8M byte page */
ori r8, r8, MI_SVALID /* Make it valid */
- mtspr MI_TWC, r8
- mtspr MD_TWC, r8
+ mtspr SPRN_MI_TWC, r8
+ mtspr SPRN_MD_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
- mtspr MI_RPN, r8 /* Store TLB entry */
- mtspr MD_RPN, r8
+ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
+ mtspr SPRN_MD_RPN, r8
lis r8, MI_Kp@h /* Set the protection mode */
- mtspr MI_AP, r8
- mtspr MD_AP, r8
+ mtspr SPRN_MI_AP, r8
+ mtspr SPRN_MD_AP, r8
/* Map another 8 MByte at the IMMR to get the processor
* internal registers (among other things).
*/
#ifdef CONFIG_PIN_TLB
addi r10, r10, 0x0100
- mtspr MD_CTR, r10
+ mtspr SPRN_MD_CTR, r10
#endif
mfspr r9, 638 /* Get current IMMR */
andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
mr r8, r9 /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
- mtspr MD_EPN, r8
+ mtspr SPRN_MD_EPN, r8
li r8, MD_PS8MEG /* Set 8M byte page */
ori r8, r8, MD_SVALID /* Make it valid */
- mtspr MD_TWC, r8
+ mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
- mtspr MD_RPN, r8
+ mtspr SPRN_MD_RPN, r8
#ifdef CONFIG_PIN_TLB
/* Map two more 8M kernel data pages.
*/
addi r10, r10, 0x0100
- mtspr MD_CTR, r10
+ mtspr SPRN_MD_CTR, r10
lis r8, KERNELBASE@h /* Create vaddr for TLB */
addis r8, r8, 0x0080 /* Add 8M */
ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr MD_EPN, r8
+ mtspr SPRN_MD_EPN, r8
li r9, MI_PS8MEG /* Set 8M byte page */
ori r9, r9, MI_SVALID /* Make it valid */
- mtspr MD_TWC, r9
+ mtspr SPRN_MD_TWC, r9
li r11, MI_BOOTINIT /* Create RPN for address 0 */
addis r11, r11, 0x0080 /* Add 8M */
- mtspr MD_RPN, r8
+ mtspr SPRN_MD_RPN, r8
addis r8, r8, 0x0080 /* Add 8M */
- mtspr MD_EPN, r8
- mtspr MD_TWC, r9
+ mtspr SPRN_MD_EPN, r8
+ mtspr SPRN_MD_TWC, r9
addis r11, r11, 0x0080 /* Add 8M */
- mtspr MD_RPN, r8
+ mtspr SPRN_MD_RPN, r8
#endif
/* Since the cache is enabled according to the information we
* We should probably check/set other modes....later.
*/
lis r8, IDC_INVALL@h
- mtspr IC_CST, r8
- mtspr DC_CST, r8
+ mtspr SPRN_IC_CST, r8
+ mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
- mtspr IC_CST, r8
+ mtspr SPRN_IC_CST, r8
#ifdef CONFIG_8xx_COPYBACK
- mtspr DC_CST, r8
+ mtspr SPRN_DC_CST, r8
#else
/* For a debug option, I left this here to easily enable
* the write through cache mode
*/
lis r8, DC_SFWT@h
- mtspr DC_CST, r8
+ mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
- mtspr DC_CST, r8
+ mtspr SPRN_DC_CST, r8
#endif
blr
li r7, 0x3980
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr M_TWB, r4 /* Update MMU base address */
+ mtspr SPRN_M_TWB, r4 /* Update MMU base address */
li r7, 0x3380
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr M_CASID, r3 /* Update context */
+ mtspr SPRN_M_CASID, r3 /* Update context */
#else
- mtspr M_CASID,r3 /* Update context */
+ mtspr SPRN_M_CASID,r3 /* Update context */
tophys (r4, r4)
- mtspr M_TWB, r4 /* and pgd */
+ mtspr SPRN_M_TWB, r4 /* and pgd */
#endif
SYNC
blr