2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004 by Thiemo Seufer
13 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
20 #include <asm/pgtable.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cacheflush.h>
23 #include <asm/mmu_context.h>
28 /* #define DEBUG_TLB */
30 static __init int __attribute__((unused)) r45k_bvahwbug(void)
32 /* XXX: We should probe for the presence of this bug, but we don't. */
36 static __init int __attribute__((unused)) r4k_250MHZhwbug(void)
38 /* XXX: We should probe for the presence of this bug, but we don't. */
42 static __init int __attribute__((unused)) bcm1250_m3_war(void)
44 return BCM1250_M3_WAR;
48 * A little micro-assembler, intended for TLB refill handler
49 * synthesizing. It is intentionally kept simple, does only support
50 * a subset of instructions, and does not try to hide pipeline effects
51 * like branch delay slots.
77 #define IMM_MASK 0xffff
79 #define JIMM_MASK 0x3ffffff
81 #define FUNC_MASK 0x2f
86 insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
87 insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, insn_bne,
88 insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
89 insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
90 insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
91 insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_ori, insn_rfe,
92 insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
93 insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
102 /* This macro sets the non-variable bits of an instruction. */
103 #define M(a, b, c, d, e, f) \
111 static __initdata struct insn insn_table[] = {
112 { insn_addiu, M(addiu_op,0,0,0,0,0), RS | RT | SIMM },
113 { insn_addu, M(spec_op,0,0,0,0,addu_op), RS | RT | RD },
114 { insn_and, M(spec_op,0,0,0,0,and_op), RS | RT | RD },
115 { insn_andi, M(andi_op,0,0,0,0,0), RS | RT | UIMM },
116 { insn_beq, M(beq_op,0,0,0,0,0), RS | RT | BIMM },
117 { insn_bgez, M(bcond_op,0,bgez_op,0,0,0), RS | BIMM },
118 { insn_bgezl, M(bcond_op,0,bgezl_op,0,0,0), RS | BIMM },
119 { insn_bltz, M(bcond_op,0,bltz_op,0,0,0), RS | BIMM },
120 { insn_bltzl, M(bcond_op,0,bltzl_op,0,0,0), RS | BIMM },
121 { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
122 { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
123 { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
124 { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD },
125 { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD },
126 { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
127 { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
128 { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
129 { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE },
130 { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE },
131 { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD },
132 { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 },
133 { insn_j, M(j_op,0,0,0,0,0), JIMM },
134 { insn_jal, M(jal_op,0,0,0,0,0), JIMM },
135 { insn_jr, M(spec_op,0,0,0,0,jr_op), RS },
136 { insn_ld, M(ld_op,0,0,0,0,0), RS | RT | SIMM },
137 { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
138 { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
139 { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD },
140 { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD },
141 { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
142 { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
143 { insn_sd, M(sd_op,0,0,0,0,0), RS | RT | SIMM },
144 { insn_sll, M(spec_op,0,0,0,0,sll_op), RT | RD | RE },
145 { insn_sra, M(spec_op,0,0,0,0,sra_op), RT | RD | RE },
146 { insn_srl, M(spec_op,0,0,0,0,srl_op), RT | RD | RE },
147 { insn_subu, M(spec_op,0,0,0,0,subu_op), RS | RT | RD },
148 { insn_sw, M(sw_op,0,0,0,0,0), RS | RT | SIMM },
149 { insn_tlbp, M(cop0_op,cop_op,0,0,0,tlbp_op), 0 },
150 { insn_tlbwi, M(cop0_op,cop_op,0,0,0,tlbwi_op), 0 },
151 { insn_tlbwr, M(cop0_op,cop_op,0,0,0,tlbwr_op), 0 },
152 { insn_xor, M(spec_op,0,0,0,0,xor_op), RS | RT | RD },
153 { insn_xori, M(xori_op,0,0,0,0,0), RS | RT | UIMM },
154 { insn_invalid, 0, 0 }
159 static __init u32 build_rs(u32 arg)
162 printk(KERN_WARNING "TLB synthesizer field overflow\n");
164 return (arg & RS_MASK) << RS_SH;
167 static __init u32 build_rt(u32 arg)
170 printk(KERN_WARNING "TLB synthesizer field overflow\n");
172 return (arg & RT_MASK) << RT_SH;
175 static __init u32 build_rd(u32 arg)
178 printk(KERN_WARNING "TLB synthesizer field overflow\n");
180 return (arg & RD_MASK) << RD_SH;
183 static __init u32 build_re(u32 arg)
186 printk(KERN_WARNING "TLB synthesizer field overflow\n");
188 return (arg & RE_MASK) << RE_SH;
191 static __init u32 build_simm(s32 arg)
193 if (arg > 0x7fff || arg < -0x8000)
194 printk(KERN_WARNING "TLB synthesizer field overflow\n");
199 static __init u32 build_uimm(u32 arg)
202 printk(KERN_WARNING "TLB synthesizer field overflow\n");
204 return arg & IMM_MASK;
207 static __init u32 build_bimm(s32 arg)
209 if (arg > 0x1ffff || arg < -0x20000)
210 printk(KERN_WARNING "TLB synthesizer field overflow\n");
213 printk(KERN_WARNING "Invalid TLB synthesizer branch target\n");
215 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
218 static __init u32 build_jimm(u32 arg)
220 if (arg & ~((JIMM_MASK) << 2))
221 printk(KERN_WARNING "TLB synthesizer field overflow\n");
223 return (arg >> 2) & JIMM_MASK;
226 static __init u32 build_func(u32 arg)
228 if (arg & ~FUNC_MASK)
229 printk(KERN_WARNING "TLB synthesizer field overflow\n");
231 return arg & FUNC_MASK;
235 * The order of opcode arguments is implicitly left to right,
236 * starting with RS and ending with FUNC or IMM.
238 static void __init build_insn(u32 **buf, enum opcode opc, ...)
240 struct insn *ip = NULL;
245 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
246 if (insn_table[i].opcode == opc) {
252 panic("Unsupported TLB synthesizer instruction %d", opc);
256 if (ip->fields & RS) op |= build_rs(va_arg(ap, u32));
257 if (ip->fields & RT) op |= build_rt(va_arg(ap, u32));
258 if (ip->fields & RD) op |= build_rd(va_arg(ap, u32));
259 if (ip->fields & RE) op |= build_re(va_arg(ap, u32));
260 if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32));
261 if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32));
262 if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
263 if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
264 if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
271 #define I_u1u2u3(op) \
272 static inline void i##op(u32 **buf, unsigned int a, \
273 unsigned int b, unsigned int c) \
275 build_insn(buf, insn##op, a, b, c); \
278 #define I_u2u1u3(op) \
279 static inline void i##op(u32 **buf, unsigned int a, \
280 unsigned int b, unsigned int c) \
282 build_insn(buf, insn##op, b, a, c); \
285 #define I_u3u1u2(op) \
286 static inline void i##op(u32 **buf, unsigned int a, \
287 unsigned int b, unsigned int c) \
289 build_insn(buf, insn##op, b, c, a); \
292 #define I_u1u2s3(op) \
293 static inline void i##op(u32 **buf, unsigned int a, \
294 unsigned int b, signed int c) \
296 build_insn(buf, insn##op, a, b, c); \
299 #define I_u2s3u1(op) \
300 static inline void i##op(u32 **buf, unsigned int a, \
301 signed int b, unsigned int c) \
303 build_insn(buf, insn##op, c, a, b); \
306 #define I_u2u1s3(op) \
307 static inline void i##op(u32 **buf, unsigned int a, \
308 unsigned int b, signed int c) \
310 build_insn(buf, insn##op, b, a, c); \
314 static inline void i##op(u32 **buf, unsigned int a, \
317 build_insn(buf, insn##op, a, b); \
321 static inline void i##op(u32 **buf, unsigned int a, \
324 build_insn(buf, insn##op, a, b); \
328 static inline void i##op(u32 **buf, unsigned int a) \
330 build_insn(buf, insn##op, a); \
334 static inline void i##op(u32 **buf) \
336 build_insn(buf, insn##op); \
401 static __init void build_label(struct label **lab, u32 *addr,
410 static inline void l##lb(struct label **lab, u32 *addr) \
412 build_label(lab, addr, label##lb); \
422 /* convenience macros for instructions */
424 # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
425 # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
426 # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
427 # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
428 # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
429 # define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd)
430 # define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd)
431 # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
432 # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
433 # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
435 # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off)
436 # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off)
437 # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
438 # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
439 # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
440 # define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd)
441 # define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd)
442 # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
443 # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
444 # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
447 #define i_b(buf, off) i_beq(buf, 0, 0, off)
448 #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off)
449 #define i_move(buf, a, b) i_ADDU(buf, a, 0, b)
450 #define i_nop(buf) i_sll(buf, 0, 0, 0)
451 #define i_ssnop(buf) i_sll(buf, 0, 0, 1)
452 #define i_ehb(buf) i_sll(buf, 0, 0, 3)
455 static __init int in_compat_space_p(long addr)
457 /* Is this address in 32bit compat space? */
458 return (((addr) & 0xffffffff00000000) == 0xffffffff00000000);
461 static __init int rel_highest(long val)
463 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
466 static __init int rel_higher(long val)
468 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
472 static __init int rel_hi(long val)
474 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
477 static __init int rel_lo(long val)
479 return ((val & 0xffff) ^ 0x8000) - 0x8000;
482 static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
485 if (!in_compat_space_p(addr)) {
486 i_lui(buf, rs, rel_highest(addr));
487 if (rel_higher(addr))
488 i_daddiu(buf, rs, rs, rel_higher(addr));
490 i_dsll(buf, rs, rs, 16);
491 i_daddiu(buf, rs, rs, rel_hi(addr));
492 i_dsll(buf, rs, rs, 16);
494 i_dsll32(buf, rs, rs, 0);
497 i_lui(buf, rs, rel_hi(addr));
500 static __init void __attribute__((unused)) i_LA(u32 **buf, unsigned int rs,
503 i_LA_mostly(buf, rs, addr);
505 i_ADDIU(buf, rs, rs, rel_lo(addr));
518 static __init void r_mips_pc16(struct reloc **rel, u32 *addr,
522 (*rel)->type = R_MIPS_PC16;
527 static inline void __resolve_relocs(struct reloc *rel, struct label *lab)
529 long laddr = (long)lab->addr;
530 long raddr = (long)rel->addr;
534 *rel->addr |= build_bimm(laddr - (raddr + 4));
538 panic("Unsupported TLB synthesizer relocation %d",
543 static __init void resolve_relocs(struct reloc *rel, struct label *lab)
547 for (; rel->lab != label_invalid; rel++)
548 for (l = lab; l->lab != label_invalid; l++)
549 if (rel->lab == l->lab)
550 __resolve_relocs(rel, l);
553 static __init void copy_handler(struct reloc *rel, struct label *lab,
554 u32 *first, u32 *end, u32* target)
556 long off = (long)(target - first);
558 memcpy(target, first, (end - first) * sizeof(u32));
560 for (; rel->lab != label_invalid; rel++)
561 if (rel->addr >= first && rel->addr < end)
564 for (; lab->lab != label_invalid; lab++)
565 if (lab->addr >= first && lab->addr < end)
569 static __init int __attribute__((unused)) insn_has_bdelay(struct reloc *rel,
572 for (; rel->lab != label_invalid; rel++) {
573 if (rel->addr == addr
574 && (rel->type == R_MIPS_PC16
575 || rel->type == R_MIPS_26))
582 /* convenience functions for labeled branches */
583 static void __attribute__((unused)) il_bltz(u32 **p, struct reloc **r,
584 unsigned int reg, enum label_id l)
586 r_mips_pc16(r, *p, l);
590 static void __attribute__((unused)) il_b(u32 **p, struct reloc **r,
593 r_mips_pc16(r, *p, l);
597 static void il_bnez(u32 **p, struct reloc **r, unsigned int reg,
600 r_mips_pc16(r, *p, l);
604 static void il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
607 r_mips_pc16(r, *p, l);
611 /* The only registers allowed in TLB handlers. */
615 /* Some CP0 registers */
617 #define C0_ENTRYLO0 2
618 #define C0_ENTRYLO1 3
620 #define C0_BADVADDR 8
621 #define C0_ENTRYHI 10
623 #define C0_XCONTEXT 20
626 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
628 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
631 /* The worst case length of the handler is around 18 instructions for
632 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
633 * Maximum space available is 32 instructions for R3000 and 64
634 * instructions for R4000.
636 * We deliberately chose a buffer size of 128, so we won't scribble
637 * over anything important on overflow before we panic.
639 static __initdata u32 tlb_handler[128];
641 /* simply assume worst case size for labels and relocs */
642 static __initdata struct label labels[128];
643 static __initdata struct reloc relocs[128];
647 * The R3000 TLB handler is simple.
649 static void __init build_r3000_tlb_refill_handler(void)
651 long pgdc = (long)pgd_current;
654 memset(tlb_handler, 0, sizeof(tlb_handler));
657 i_mfc0(&p, K0, C0_BADVADDR);
658 i_lui(&p, K1, rel_hi(pgdc)); /* cp0 delay */
659 i_lw(&p, K1, rel_lo(pgdc), K1);
660 i_srl(&p, K0, K0, 22); /* load delay */
661 i_sll(&p, K0, K0, 2);
662 i_addu(&p, K1, K1, K0);
663 i_mfc0(&p, K0, C0_CONTEXT);
664 i_lw(&p, K1, 0, K1); /* cp0 delay */
665 i_andi(&p, K0, K0, 0xffc); /* load delay */
666 i_addu(&p, K1, K1, K0);
668 i_nop(&p); /* load delay */
669 i_mtc0(&p, K0, C0_ENTRYLO0);
670 i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
671 i_tlbwr(&p); /* cp0 delay */
673 i_rfe(&p); /* branch delay */
675 if (p > tlb_handler + 32)
676 panic("TLB refill handler space exceeded");
678 printk("Synthesized TLB handler (%u instructions).\n",
683 for (i = 0; i < (p - tlb_handler); i++)
684 printk("%08x\n", tlb_handler[i]);
688 memcpy((void *)CAC_BASE, tlb_handler, 0x80);
689 flush_icache_range(CAC_BASE, CAC_BASE + 0x80);
691 #endif /* CONFIG_MIPS32 */
694 * The R4000 TLB handler is much more complicated. We have two
695 * consecutive handler areas with 32 instructions space each.
696 * Since they aren't used at the same time, we can overflow in the
697 * other one.To keep things simple, we first assume linear space,
698 * then we relocate it to the final handler layout as needed.
700 static __initdata u32 final_handler[64];
705 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
706 * 2. A timing hazard exists for the TLBP instruction.
708 * stalling_instruction
711 * The JTLB is being read for the TLBP throughout the stall generated by the
712 * previous instruction. This is not really correct as the stalling instruction
713 * can modify the address used to access the JTLB. The failure symptom is that
714 * the TLBP instruction will use an address created for the stalling instruction
715 * and not the address held in C0_ENHI and thus report the wrong results.
717 * The software work-around is to not allow the instruction preceding the TLBP
718 * to stall - make it an NOP or some other instruction guaranteed not to stall.
720 * Errata 2 will not be fixed. This errata is also on the R5000.
722 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
724 static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p)
726 switch (current_cpu_data.cputype) {
741 * Write random TLB entry, and care about the hazards from the
742 * preceeding mtc0 and for the following eret.
744 static __init void build_tlb_write_random_entry(u32 **p, struct label **l,
747 switch (current_cpu_data.cputype) {
755 * This branch uses up a mtc0 hazard nop slot and saves
756 * two nops after the tlbwr.
758 il_bgezl(p, r, 0, label_tlbwr_hazard);
760 l_tlbwr_hazard(l, *p);
788 i_nop(p); /* QED specifies 2 nops hazard */
790 * This branch uses up a mtc0 hazard nop slot and saves
791 * a nop after the tlbwr.
793 il_bgezl(p, r, 0, label_tlbwr_hazard);
795 l_tlbwr_hazard(l, *p);
806 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
807 * use of the JTLB for instructions should not occur for 4
808 * cpu cycles and use for data translations should not occur
823 panic("No TLB refill handler yet (CPU type: %d)",
824 current_cpu_data.cputype);
831 * TMP and PTR are scratch.
832 * TMP will be clobbered, PTR will hold the pmd entry.
835 build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
836 unsigned int tmp, unsigned int ptr)
838 long pgdc = (long)pgd_current;
841 * The vmalloc handling is not in the hotpath.
843 i_dmfc0(p, tmp, C0_BADVADDR);
844 il_bltz(p, r, tmp, label_vmalloc);
845 /* No i_nop needed here, since the next insn doesn't touch TMP. */
849 * 64 bit SMP has the lower part of &pgd_current[smp_processor_id()]
852 if (in_compat_space_p(pgdc)) {
853 i_dmfc0(p, ptr, C0_CONTEXT);
854 i_dsra(p, ptr, ptr, 23);
856 i_dmfc0(p, ptr, C0_CONTEXT);
857 i_lui(p, tmp, rel_highest(pgdc));
858 i_dsll(p, ptr, ptr, 9);
859 i_daddiu(p, tmp, tmp, rel_higher(pgdc));
860 i_dsrl32(p, ptr, ptr, 0);
861 i_and(p, ptr, ptr, tmp);
862 i_dmfc0(p, tmp, C0_BADVADDR);
864 i_ld(p, ptr, 0, ptr);
866 i_LA_mostly(p, ptr, pgdc);
867 i_ld(p, ptr, rel_lo(pgdc), ptr);
870 l_vmalloc_done(l, *p);
871 i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); /* get pgd offset in bytes */
872 i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
873 i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
874 i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
875 i_ld(p, ptr, 0, ptr); /* get pmd pointer */
876 i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
877 i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
878 i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
882 * BVADDR is the faulting address, PTR is scratch.
883 * PTR will hold the pgd for vmalloc.
886 build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
887 unsigned int bvaddr, unsigned int ptr)
889 long swpd = (long)swapper_pg_dir;
892 i_LA(p, ptr, VMALLOC_START);
893 i_dsubu(p, bvaddr, bvaddr, ptr);
895 if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
896 il_b(p, r, label_vmalloc_done);
897 i_lui(p, ptr, rel_hi(swpd));
899 i_LA_mostly(p, ptr, swpd);
900 il_b(p, r, label_vmalloc_done);
901 i_daddiu(p, ptr, ptr, rel_lo(swpd));
905 #else /* CONFIG_MIPS32 */
908 * TMP and PTR are scratch.
909 * TMP will be clobbered, PTR will hold the pgd entry.
911 static __init void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
913 long pgdc = (long)pgd_current;
915 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
917 i_mfc0(p, ptr, C0_CONTEXT);
918 i_LA_mostly(p, tmp, pgdc);
919 i_srl(p, ptr, ptr, 23);
920 i_sll(p, ptr, ptr, 2);
921 i_addu(p, ptr, tmp, ptr);
923 i_LA_mostly(p, ptr, pgdc);
925 i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
926 i_lw(p, ptr, rel_lo(pgdc), ptr);
927 i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
928 i_sll(p, tmp, tmp, PGD_T_LOG2);
929 i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
931 #endif /* CONFIG_MIPS32 */
933 static __init void build_adjust_context(u32 **p, unsigned int ctx)
935 unsigned int shift = 0;
936 unsigned int mask = 0xff0;
938 #if !defined(CONFIG_MIPS64) && !defined(CONFIG_64BIT_PHYS_ADDR)
943 switch (current_cpu_data.cputype) {
960 i_SRL(p, ctx, ctx, shift);
961 i_andi(p, ctx, ctx, mask);
964 static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
967 * Bug workaround for the Nevada. It seems as if under certain
968 * circumstances the move from cp0_context might produce a
969 * bogus result when the mfc0 instruction and its consumer are
970 * in a different cacheline or a load instruction, probably any
971 * memory reference, is between them.
973 switch (current_cpu_data.cputype) {
975 i_LW(p, ptr, 0, ptr);
976 GET_CONTEXT(p, tmp); /* get context reg */
980 GET_CONTEXT(p, tmp); /* get context reg */
981 i_LW(p, ptr, 0, ptr);
985 build_adjust_context(p, tmp);
986 i_ADDU(p, ptr, ptr, tmp); /* add in offset */
989 static __init void build_update_entries(u32 **p, unsigned int tmp,
993 * 64bit address support (36bit on a 32bit CPU) in a 32bit
994 * Kernel is a special case. Only a few CPUs use it.
996 #ifdef CONFIG_64BIT_PHYS_ADDR
997 if (cpu_has_64bit_gp_regs) {
998 i_ld(p, tmp, 0, ptep); /* get even pte */
999 i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1000 i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
1001 i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
1002 i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
1003 i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
1005 int pte_off_even = sizeof(pte_t) / 2;
1006 int pte_off_odd = pte_off_even + sizeof(pte_t);
1008 /* The pte entries are pre-shifted */
1009 i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1010 i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
1011 i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1012 i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
1015 i_LW(p, tmp, 0, ptep); /* get even pte */
1016 i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1017 if (r45k_bvahwbug())
1018 build_tlb_probe_entry(p);
1019 i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
1020 if (r4k_250MHZhwbug())
1021 i_mtc0(p, 0, C0_ENTRYLO0);
1022 i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
1023 i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
1024 if (r45k_bvahwbug())
1025 i_mfc0(p, tmp, C0_INDEX);
1026 if (r4k_250MHZhwbug())
1027 i_mtc0(p, 0, C0_ENTRYLO1);
1028 i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
1032 static void __init build_r4000_tlb_refill_handler(void)
1034 u32 *p = tlb_handler;
1035 struct label *l = labels;
1036 struct reloc *r = relocs;
1038 unsigned int final_len;
1040 memset(tlb_handler, 0, sizeof(tlb_handler));
1041 memset(labels, 0, sizeof(labels));
1042 memset(relocs, 0, sizeof(relocs));
1043 memset(final_handler, 0, sizeof(final_handler));
1046 * create the plain linear handler
1048 if (bcm1250_m3_war()) {
1049 i_MFC0(&p, K0, C0_BADVADDR);
1050 i_MFC0(&p, K1, C0_ENTRYHI);
1051 i_xor(&p, K0, K0, K1);
1052 i_SRL(&p, K0, K0, PAGE_SHIFT+1);
1053 il_bnez(&p, &r, K0, label_leave);
1054 /* No need for i_nop */
1057 #ifdef CONFIG_MIPS64
1058 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd ptr in K1 */
1060 build_get_pgde32(&p, K0, K1); /* get pgd ptr in K1 */
1063 build_get_ptep(&p, K0, K1);
1064 build_update_entries(&p, K0, K1);
1065 build_tlb_write_random_entry(&p, &l, &r);
1067 i_eret(&p); /* return from trap */
1069 #ifdef CONFIG_MIPS64
1070 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
1074 * Overflow check: For the 64bit handler, we need at least one
1075 * free instruction slot for the wrap-around branch. In worst
1076 * case, if the intended insertion point is a delay slot, we
1077 * need three, with the the second nop'ed and the third being
1080 #ifdef CONFIG_MIPS32
1081 if ((p - tlb_handler) > 64)
1082 panic("TLB refill handler space exceeded");
1084 if (((p - tlb_handler) > 63)
1085 || (((p - tlb_handler) > 61)
1086 && insn_has_bdelay(relocs, tlb_handler + 29)))
1087 panic("TLB refill handler space exceeded");
1091 * Now fold the handler in the TLB refill handler space.
1093 #ifdef CONFIG_MIPS32
1095 /* Simplest case, just copy the handler. */
1096 copy_handler(relocs, labels, tlb_handler, p, f);
1097 final_len = p - tlb_handler;
1098 #else /* CONFIG_MIPS64 */
1099 f = final_handler + 32;
1100 if ((p - tlb_handler) <= 32) {
1101 /* Just copy the handler. */
1102 copy_handler(relocs, labels, tlb_handler, p, f);
1103 final_len = p - tlb_handler;
1105 u32 *split = tlb_handler + 30;
1108 * Find the split point.
1110 if (insn_has_bdelay(relocs, split - 1))
1113 /* Copy first part of the handler. */
1114 copy_handler(relocs, labels, tlb_handler, split, f);
1115 f += split - tlb_handler;
1117 /* Insert branch. */
1118 l_split(&l, final_handler);
1119 il_b(&f, &r, label_split);
1120 if (insn_has_bdelay(relocs, split))
1123 copy_handler(relocs, labels, split, split + 1, f);
1128 /* Copy the rest of the handler. */
1129 copy_handler(relocs, labels, split, p, final_handler);
1130 final_len = (f - (final_handler + 32)) + (p - split);
1132 #endif /* CONFIG_MIPS64 */
1134 resolve_relocs(relocs, labels);
1135 printk("Synthesized TLB handler (%u instructions).\n", final_len);
1141 for (i = 0; i < 64; i++)
1142 printk("%08x\n", final_handler[i]);
1146 memcpy((void *)CAC_BASE, final_handler, 0x100);
1147 flush_icache_range(CAC_BASE, CAC_BASE + 0x100);
1150 void __init build_tlb_refill_handler(void)
1152 switch (current_cpu_data.cputype) {
1153 #ifdef CONFIG_MIPS32
1161 build_r3000_tlb_refill_handler();
1166 panic("No R6000 TLB refill handler yet");
1171 panic("No R8000 TLB refill handler yet");
1175 build_r4000_tlb_refill_handler();