*
* Synthesize TLB refill handlers at runtime.
*
- * Copyright (C) 2004 by Thiemo Seufer
+ * Copyright (C) 2004,2005 by Thiemo Seufer
+ * Copyright (C) 2005 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * ... and the days got worse and worse and now you see
+ * I've gone completly out of my mind.
+ *
+ * They're coming to take me a away haha
+ * they're coming to take me a away hoho hihi haha
+ * to the funny farm where code is beautiful all the time ...
+ *
+ * (Condolences to Napoleon XIV)
*/
#include <stdarg.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
-#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/smp.h>
+#include <asm/war.h>
/* #define DEBUG_TLB */
return BCM1250_M3_WAR;
}
+static __init int __attribute__((unused)) r10000_llsc_war(void)
+{
+ return R10000_LLSC_WAR;
+}
+
/*
* A little micro-assembler, intended for TLB refill handler
* synthesizing. It is intentionally kept simple, does only support
BIMM = 0x040,
JIMM = 0x080,
FUNC = 0x100,
+ SET = 0x200
};
#define OP_MASK 0x2f
#define JIMM_SH 0
#define FUNC_MASK 0x2f
#define FUNC_SH 0
+#define SET_MASK 0x7
+#define SET_SH 0
enum opcode {
insn_invalid,
insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
- insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, insn_bne,
- insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
- insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
+ insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
+ insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
+ insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
- insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_ori, insn_rfe,
- insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
- insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
+ insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
+ insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
+ insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi,
+ insn_tlbwr, insn_xor, insn_xori
};
struct insn {
{ insn_and, M(spec_op,0,0,0,0,and_op), RS | RT | RD },
{ insn_andi, M(andi_op,0,0,0,0,0), RS | RT | UIMM },
{ insn_beq, M(beq_op,0,0,0,0,0), RS | RT | BIMM },
+ { insn_beql, M(beql_op,0,0,0,0,0), RS | RT | BIMM },
{ insn_bgez, M(bcond_op,0,bgez_op,0,0,0), RS | BIMM },
{ insn_bgezl, M(bcond_op,0,bgezl_op,0,0,0), RS | BIMM },
{ insn_bltz, M(bcond_op,0,bltz_op,0,0,0), RS | BIMM },
{ insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
{ insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
{ insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
- { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD },
- { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD },
+ { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET},
+ { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET},
{ insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
{ insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
{ insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
{ insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE },
- { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE },
{ insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD },
{ insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 },
{ insn_j, M(j_op,0,0,0,0,0), JIMM },
{ insn_jal, M(jal_op,0,0,0,0,0), JIMM },
{ insn_jr, M(spec_op,0,0,0,0,jr_op), RS },
{ insn_ld, M(ld_op,0,0,0,0,0), RS | RT | SIMM },
+ { insn_ll, M(ll_op,0,0,0,0,0), RS | RT | SIMM },
+ { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM },
{ insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
{ insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
- { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD },
- { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD },
+ { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET},
+ { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET},
{ insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
{ insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
+ { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM },
+ { insn_scd, M(scd_op,0,0,0,0,0), RS | RT | SIMM },
{ insn_sd, M(sd_op,0,0,0,0,0), RS | RT | SIMM },
{ insn_sll, M(spec_op,0,0,0,0,sll_op), RT | RD | RE },
{ insn_sra, M(spec_op,0,0,0,0,sra_op), RT | RD | RE },
return arg & FUNC_MASK;
}
+static __init u32 build_set(u32 arg)
+{
+ if (arg & ~SET_MASK)
+ printk(KERN_WARNING "TLB synthesizer field overflow\n");
+
+ return arg & SET_MASK;
+}
+
/*
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
va_end(ap);
**buf = op;
}
#define I_u1u2u3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, a, b, c); \
}
#define I_u2u1u3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, b, a, c); \
}
#define I_u3u1u2(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, b, c, a); \
}
#define I_u1u2s3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, signed int c) \
{ \
build_insn(buf, insn##op, a, b, c); \
}
#define I_u2s3u1(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
signed int b, unsigned int c) \
{ \
build_insn(buf, insn##op, c, a, b); \
}
#define I_u2u1s3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, signed int c) \
{ \
build_insn(buf, insn##op, b, a, c); \
}
#define I_u1u2(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b) \
{ \
build_insn(buf, insn##op, a, b); \
}
#define I_u1s2(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
signed int b) \
{ \
build_insn(buf, insn##op, a, b); \
}
#define I_u1(op) \
- static inline void i##op(u32 **buf, unsigned int a) \
+ static inline void __init i##op(u32 **buf, unsigned int a) \
{ \
build_insn(buf, insn##op, a); \
}
#define I_0(op) \
- static inline void i##op(u32 **buf) \
+ static inline void __init i##op(u32 **buf) \
{ \
build_insn(buf, insn##op); \
}
I_u2u1u3(_andi);
I_u3u1u2(_and);
I_u1u2s3(_beq);
+I_u1u2s3(_beql);
I_u1s2(_bgez);
I_u1s2(_bgezl);
I_u1s2(_bltz);
I_u1s2(_bltzl);
I_u1u2s3(_bne);
-I_u1u2(_dmfc0);
-I_u1u2(_dmtc0);
+I_u1u2u3(_dmfc0);
+I_u1u2u3(_dmtc0);
I_u2u1s3(_daddiu);
I_u3u1u2(_daddu);
I_u2u1u3(_dsll);
I_u2u1u3(_dsll32);
I_u2u1u3(_dsra);
I_u2u1u3(_dsrl);
-I_u2u1u3(_dsrl32);
I_u3u1u2(_dsubu);
I_0(_eret);
I_u1(_j);
I_u1(_jal);
I_u1(_jr);
I_u2s3u1(_ld);
+I_u2s3u1(_ll);
+I_u2s3u1(_lld);
I_u1s2(_lui);
I_u2s3u1(_lw);
-I_u1u2(_mfc0);
-I_u1u2(_mtc0);
+I_u1u2u3(_mfc0);
+I_u1u2u3(_mtc0);
I_u2u1u3(_ori);
I_0(_rfe);
+I_u2s3u1(_sc);
+I_u2s3u1(_scd);
I_u2s3u1(_sd);
I_u2u1u3(_sll);
I_u2u1u3(_sra);
label_leave,
label_vmalloc,
label_vmalloc_done,
- label_tlbwr_hazard,
- label_split
+ label_tlbw_hazard,
+ label_split,
+ label_nopage_tlbl,
+ label_nopage_tlbs,
+ label_nopage_tlbm,
+ label_smp_pgtable_change,
+ label_r3000_write_probe_fail,
};
struct label {
L_LA(_leave)
L_LA(_vmalloc)
L_LA(_vmalloc_done)
-L_LA(_tlbwr_hazard)
+L_LA(_tlbw_hazard)
L_LA(_split)
+L_LA(_nopage_tlbl)
+L_LA(_nopage_tlbs)
+L_LA(_nopage_tlbm)
+L_LA(_smp_pgtable_change)
+L_LA(_r3000_write_probe_fail)
/* convenience macros for instructions */
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
# define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
# define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
# define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
# define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
# define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
-# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd)
-# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd)
+# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
+# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
# define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
# define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
# define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
+# define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off)
+# define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off)
#else
# define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off)
# define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off)
# define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
# define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
# define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
-# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd)
-# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd)
+# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
+# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
# define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
# define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
# define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
+# define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off)
+# define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off)
#endif
#define i_b(buf, off) i_beq(buf, 0, 0, off)
+#define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off)
+#define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off)
#define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off)
+#define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off)
#define i_move(buf, a, b) i_ADDU(buf, a, 0, b)
#define i_nop(buf) i_sll(buf, 0, 0, 0)
#define i_ssnop(buf) i_sll(buf, 0, 0, 1)
#define i_ehb(buf) i_sll(buf, 0, 0, 3)
-#if CONFIG_MIPS64
-static __init int in_compat_space_p(long addr)
+#ifdef CONFIG_64BIT
+static __init int __attribute__((unused)) in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
- return (((addr) & 0xffffffff00000000) == 0xffffffff00000000);
+ return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
}
-static __init int rel_highest(long val)
+static __init int __attribute__((unused)) rel_highest(long val)
{
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
}
-static __init int rel_higher(long val)
+static __init int __attribute__((unused)) rel_higher(long val)
{
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
}
static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
-#if CONFIG_MIPS64
+#ifdef CONFIG_64BIT
if (!in_compat_space_p(addr)) {
i_lui(buf, rs, rel_highest(addr));
if (rel_higher(addr))
__resolve_relocs(rel, l);
}
-static __init void copy_handler(struct reloc *rel, struct label *lab,
- u32 *first, u32 *end, u32* target)
+static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end,
+ long off)
{
- long off = (long)(target - first);
-
- memcpy(target, first, (end - first) * sizeof(u32));
-
for (; rel->lab != label_invalid; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
+}
+static __init void move_labels(struct label *lab, u32 *first, u32 *end,
+ long off)
+{
for (; lab->lab != label_invalid; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
+static __init void copy_handler(struct reloc *rel, struct label *lab,
+ u32 *first, u32 *end, u32 *target)
+{
+ long off = (long)(target - first);
+
+ memcpy(target, first, (end - first) * sizeof(u32));
+
+ move_relocs(rel, first, end, off);
+ move_labels(lab, first, end, off);
+}
+
static __init int __attribute__((unused)) insn_has_bdelay(struct reloc *rel,
u32 *addr)
{
}
/* convenience functions for labeled branches */
-static void __attribute__((unused)) il_bltz(u32 **p, struct reloc **r,
- unsigned int reg, enum label_id l)
+static void __init __attribute__((unused))
+ il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
{
r_mips_pc16(r, *p, l);
i_bltz(p, reg, 0);
}
-static void __attribute__((unused)) il_b(u32 **p, struct reloc **r,
+static void __init __attribute__((unused)) il_b(u32 **p, struct reloc **r,
enum label_id l)
{
r_mips_pc16(r, *p, l);
i_b(p, 0);
}
-static void il_bnez(u32 **p, struct reloc **r, unsigned int reg,
+static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg,
+ enum label_id l)
+{
+ r_mips_pc16(r, *p, l);
+ i_beqz(p, reg, 0);
+}
+
+static void __init __attribute__((unused))
+il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
+{
+ r_mips_pc16(r, *p, l);
+ i_beqzl(p, reg, 0);
+}
+
+static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg,
enum label_id l)
{
r_mips_pc16(r, *p, l);
i_bnez(p, reg, 0);
}
-static void il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
+static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
enum label_id l)
{
r_mips_pc16(r, *p, l);
i_bgezl(p, reg, 0);
}
-/* The only registers allowed in TLB handlers. */
+/* The only general purpose registers allowed in TLB handlers. */
#define K0 26
#define K1 27
/* Some CP0 registers */
-#define C0_INDEX 0
-#define C0_ENTRYLO0 2
-#define C0_ENTRYLO1 3
-#define C0_CONTEXT 4
-#define C0_BADVADDR 8
-#define C0_ENTRYHI 10
-#define C0_EPC 14
-#define C0_XCONTEXT 20
-
-#ifdef CONFIG_MIPS64
+#define C0_INDEX 0, 0
+#define C0_ENTRYLO0 2, 0
+#define C0_TCBIND 2, 2
+#define C0_ENTRYLO1 3, 0
+#define C0_CONTEXT 4, 0
+#define C0_BADVADDR 8, 0
+#define C0_ENTRYHI 10, 0
+#define C0_EPC 14, 0
+#define C0_XCONTEXT 20, 0
+
+#ifdef CONFIG_64BIT
# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
#else
# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
static __initdata struct label labels[128];
static __initdata struct reloc relocs[128];
-#ifdef CONFIG_MIPS32
/*
* The R3000 TLB handler is simple.
*/
if (p > tlb_handler + 32)
panic("TLB refill handler space exceeded");
- printk("Synthesized TLB handler (%u instructions).\n",
- p - tlb_handler);
+ printk("Synthesized TLB refill handler (%u instructions).\n",
+ (unsigned int)(p - tlb_handler));
#ifdef DEBUG_TLB
{
int i;
+
for (i = 0; i < (p - tlb_handler); i++)
printk("%08x\n", tlb_handler[i]);
}
#endif
- memcpy((void *)CAC_BASE, tlb_handler, 0x80);
- flush_icache_range(CAC_BASE, CAC_BASE + 0x80);
+ memcpy((void *)ebase, tlb_handler, 0x80);
}
-#endif /* CONFIG_MIPS32 */
/*
* The R4000 TLB handler is much more complicated. We have two
static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_data.cputype) {
+ /* Found by experiment: R4600 v2.0 needs this, too. */
+ case CPU_R4600:
case CPU_R5000:
case CPU_R5000A:
case CPU_NEVADA:
}
/*
- * Write random TLB entry, and care about the hazards from the
- * preceeding mtc0 and for the following eret.
+ * Write random or indexed TLB entry, and care about the hazards from
+ * the preceeding mtc0 and for the following eret.
*/
-static __init void build_tlb_write_random_entry(u32 **p, struct label **l,
- struct reloc **r)
+enum tlb_write_entry { tlb_random, tlb_indexed };
+
+static __init void build_tlb_write_entry(u32 **p, struct label **l,
+ struct reloc **r,
+ enum tlb_write_entry wmode)
{
+ void(*tlbw)(u32 **) = NULL;
+
+ switch (wmode) {
+ case tlb_random: tlbw = i_tlbwr; break;
+ case tlb_indexed: tlbw = i_tlbwi; break;
+ }
+
switch (current_cpu_data.cputype) {
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4400MC:
/*
* This branch uses up a mtc0 hazard nop slot and saves
- * two nops after the tlbwr.
+ * two nops after the tlbw instruction.
*/
- il_bgezl(p, r, 0, label_tlbwr_hazard);
- i_tlbwr(p);
- l_tlbwr_hazard(l, *p);
+ il_bgezl(p, r, 0, label_tlbw_hazard);
+ tlbw(p);
+ l_tlbw_hazard(l, *p);
i_nop(p);
break;
case CPU_R4700:
case CPU_R5000:
case CPU_R5000A:
+ i_nop(p);
+ tlbw(p);
+ i_nop(p);
+ break;
+
+ case CPU_R4300:
case CPU_5KC:
+ case CPU_TX49XX:
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
case CPU_AU1550:
+ case CPU_AU1200:
+ case CPU_PR4450:
i_nop(p);
- i_tlbwr(p);
+ tlbw(p);
break;
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
case CPU_4KC:
case CPU_SB1:
+ case CPU_SB1A:
case CPU_4KSC:
case CPU_20KC:
case CPU_25KF:
- i_tlbwr(p);
+ tlbw(p);
break;
case CPU_NEVADA:
i_nop(p); /* QED specifies 2 nops hazard */
/*
* This branch uses up a mtc0 hazard nop slot and saves
- * a nop after the tlbwr.
+ * a nop after the tlbw instruction.
*/
- il_bgezl(p, r, 0, label_tlbwr_hazard);
- i_tlbwr(p);
- l_tlbwr_hazard(l, *p);
+ il_bgezl(p, r, 0, label_tlbw_hazard);
+ tlbw(p);
+ l_tlbw_hazard(l, *p);
+ break;
+
+ case CPU_RM7000:
+ i_nop(p);
+ i_nop(p);
+ i_nop(p);
+ i_nop(p);
+ tlbw(p);
break;
case CPU_4KEC:
case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
i_ehb(p);
- i_tlbwr(p);
+ tlbw(p);
break;
case CPU_RM9000:
i_ssnop(p);
i_ssnop(p);
i_ssnop(p);
- i_tlbwr(p);
+ tlbw(p);
i_ssnop(p);
i_ssnop(p);
i_ssnop(p);
i_ssnop(p);
break;
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ i_nop(p);
+ i_nop(p);
+ tlbw(p);
+ i_nop(p);
+ i_nop(p);
+ break;
+
+ case CPU_VR4131:
+ case CPU_VR4133:
+ case CPU_R5432:
+ i_nop(p);
+ i_nop(p);
+ tlbw(p);
+ break;
+
default:
panic("No TLB refill handler yet (CPU type: %d)",
current_cpu_data.cputype);
}
}
-#if CONFIG_MIPS64
+#ifdef CONFIG_64BIT
/*
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
il_bltz(p, r, tmp, label_vmalloc);
/* No i_nop needed here, since the next insn doesn't touch TMP. */
-# ifdef CONFIG_SMP
+#ifdef CONFIG_SMP
+# ifdef CONFIG_MIPS_MT_SMTC
/*
- * 64 bit SMP has the lower part of &pgd_current[smp_processor_id()]
- * stored in CONTEXT.
+ * SMTC uses TCBind value as "CPU" index
*/
- if (in_compat_space_p(pgdc)) {
- i_dmfc0(p, ptr, C0_CONTEXT);
- i_dsra(p, ptr, ptr, 23);
- } else {
- i_dmfc0(p, ptr, C0_CONTEXT);
- i_lui(p, tmp, rel_highest(pgdc));
- i_dsll(p, ptr, ptr, 9);
- i_daddiu(p, tmp, tmp, rel_higher(pgdc));
- i_dsrl32(p, ptr, ptr, 0);
- i_and(p, ptr, ptr, tmp);
- i_dmfc0(p, tmp, C0_BADVADDR);
- }
- i_ld(p, ptr, 0, ptr);
+ i_mfc0(p, ptr, C0_TCBIND);
+ i_dsrl(p, ptr, ptr, 19);
# else
+ /*
+ * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
+ * stored in CONTEXT.
+ */
+ i_dmfc0(p, ptr, C0_CONTEXT);
+ i_dsrl(p, ptr, ptr, 23);
+#endif
+ i_LA_mostly(p, tmp, pgdc);
+ i_daddu(p, ptr, ptr, tmp);
+ i_dmfc0(p, tmp, C0_BADVADDR);
+ i_ld(p, ptr, rel_lo(pgdc), ptr);
+#else
i_LA_mostly(p, ptr, pgdc);
i_ld(p, ptr, rel_lo(pgdc), ptr);
-# endif
+#endif
l_vmalloc_done(l, *p);
i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); /* get pgd offset in bytes */
}
}
-#else /* CONFIG_MIPS32 */
+#else /* !CONFIG_64BIT */
/*
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
-static __init void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
+static __init void __attribute__((unused))
+build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
long pgdc = (long)pgd_current;
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC uses TCBind value as "CPU" index
+ */
+ i_mfc0(p, ptr, C0_TCBIND);
+ i_LA_mostly(p, tmp, pgdc);
+ i_srl(p, ptr, ptr, 19);
+#else
+ /*
+ * smp_processor_id() << 3 is stored in CONTEXT.
+ */
i_mfc0(p, ptr, C0_CONTEXT);
i_LA_mostly(p, tmp, pgdc);
i_srl(p, ptr, ptr, 23);
- i_sll(p, ptr, ptr, 2);
+#endif
i_addu(p, ptr, tmp, ptr);
#else
i_LA_mostly(p, ptr, pgdc);
i_sll(p, tmp, tmp, PGD_T_LOG2);
i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
}
-#endif /* CONFIG_MIPS32 */
+
+#endif /* !CONFIG_64BIT */
static __init void build_adjust_context(u32 **p, unsigned int ctx)
{
- unsigned int shift = 0;
- unsigned int mask = 0xff0;
-
-#if !defined(CONFIG_MIPS64) && !defined(CONFIG_64BIT_PHYS_ADDR)
- shift++;
- mask |= 0x008;
-#endif
+ unsigned int shift = 4 - (PTE_T_LOG2 + 1);
+ unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
switch (current_cpu_data.cputype) {
case CPU_VR41XX:
* Kernel is a special case. Only a few CPUs use it.
*/
#ifdef CONFIG_64BIT_PHYS_ADDR
- if (cpu_has_64bit_gp_regs) {
+ if (cpu_has_64bits) {
i_ld(p, tmp, 0, ptep); /* get even pte */
i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
i_MFC0(&p, K0, C0_BADVADDR);
i_MFC0(&p, K1, C0_ENTRYHI);
i_xor(&p, K0, K0, K1);
- i_SRL(&p, K0, K0, PAGE_SHIFT+1);
+ i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
il_bnez(&p, &r, K0, label_leave);
/* No need for i_nop */
}
-#ifdef CONFIG_MIPS64
- build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd ptr in K1 */
+#ifdef CONFIG_64BIT
+ build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
#else
- build_get_pgde32(&p, K0, K1); /* get pgd ptr in K1 */
+ build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
- build_tlb_write_random_entry(&p, &l, &r);
+ build_tlb_write_entry(&p, &l, &r, tlb_random);
l_leave(&l, p);
i_eret(&p); /* return from trap */
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
#endif
* need three, with the the second nop'ed and the third being
* unused.
*/
-#ifdef CONFIG_MIPS32
+#ifdef CONFIG_32BIT
if ((p - tlb_handler) > 64)
panic("TLB refill handler space exceeded");
#else
/*
* Now fold the handler in the TLB refill handler space.
*/
-#ifdef CONFIG_MIPS32
+#ifdef CONFIG_32BIT
f = final_handler;
/* Simplest case, just copy the handler. */
copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
-#else /* CONFIG_MIPS64 */
+#else /* CONFIG_64BIT */
f = final_handler + 32;
if ((p - tlb_handler) <= 32) {
/* Just copy the handler. */
i_nop(&f);
else {
copy_handler(relocs, labels, split, split + 1, f);
+ move_labels(labels, f, f + 1, -1);
f++;
split++;
}
copy_handler(relocs, labels, split, p, final_handler);
final_len = (f - (final_handler + 32)) + (p - split);
}
-#endif /* CONFIG_MIPS64 */
+#endif /* CONFIG_64BIT */
+
+ resolve_relocs(relocs, labels);
+ printk("Synthesized TLB refill handler (%u instructions).\n",
+ final_len);
+
+#ifdef DEBUG_TLB
+ {
+ int i;
+
+ f = final_handler;
+#ifdef CONFIG_64BIT
+ if (final_len > 32)
+ final_len = 64;
+ else
+ f = final_handler + 32;
+#endif /* CONFIG_64BIT */
+ for (i = 0; i < final_len; i++)
+ printk("%08x\n", f[i]);
+ }
+#endif
+
+ memcpy((void *)ebase, final_handler, 0x100);
+}
+
+/*
+ * TLB load/store/modify handlers.
+ *
+ * Only the fastpath gets synthesized at runtime, the slowpath for
+ * do_page_fault remains normal asm.
+ */
+extern void tlb_do_page_fault_0(void);
+extern void tlb_do_page_fault_1(void);
+
+#define __tlb_handler_align \
+ __attribute__((__aligned__(1 << CONFIG_MIPS_L1_CACHE_SHIFT)))
+
+/*
+ * 128 instructions for the fastpath handler is generous and should
+ * never be exceeded.
+ */
+#define FASTPATH_SIZE 128
+
+u32 __tlb_handler_align handle_tlbl[FASTPATH_SIZE];
+u32 __tlb_handler_align handle_tlbs[FASTPATH_SIZE];
+u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE];
+
+static void __init
+iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr)
+{
+#ifdef CONFIG_SMP
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ i_lld(p, pte, 0, ptr);
+ else
+# endif
+ i_LL(p, pte, 0, ptr);
+#else
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ i_ld(p, pte, 0, ptr);
+ else
+# endif
+ i_LW(p, pte, 0, ptr);
+#endif
+}
+
+static void __init
+iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr,
+ unsigned int mode)
+{
+#ifdef CONFIG_64BIT_PHYS_ADDR
+ unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
+#endif
+
+ i_ori(p, pte, pte, mode);
+#ifdef CONFIG_SMP
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ i_scd(p, pte, 0, ptr);
+ else
+# endif
+ i_SC(p, pte, 0, ptr);
+
+ if (r10000_llsc_war())
+ il_beqzl(p, r, pte, label_smp_pgtable_change);
+ else
+ il_beqz(p, r, pte, label_smp_pgtable_change);
+
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (!cpu_has_64bits) {
+ /* no i_nop needed */
+ i_ll(p, pte, sizeof(pte_t) / 2, ptr);
+ i_ori(p, pte, pte, hwmode);
+ i_sc(p, pte, sizeof(pte_t) / 2, ptr);
+ il_beqz(p, r, pte, label_smp_pgtable_change);
+ /* no i_nop needed */
+ i_lw(p, pte, 0, ptr);
+ } else
+ i_nop(p);
+# else
+ i_nop(p);
+# endif
+#else
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ i_sd(p, pte, 0, ptr);
+ else
+# endif
+ i_SW(p, pte, 0, ptr);
+
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (!cpu_has_64bits) {
+ i_lw(p, pte, sizeof(pte_t) / 2, ptr);
+ i_ori(p, pte, pte, hwmode);
+ i_sw(p, pte, sizeof(pte_t) / 2, ptr);
+ i_lw(p, pte, 0, ptr);
+ }
+# endif
+#endif
+}
+
+/*
+ * Check if PTE is present, if not then jump to LABEL. PTR points to
+ * the page table where this PTE is located, PTE will be re-loaded
+ * with it's original value.
+ */
+static void __init
+build_pte_present(u32 **p, struct label **l, struct reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ il_bnez(p, r, pte, lid);
+ iPTE_LW(p, l, pte, ptr);
+}
+
+/* Make PTE valid, store result in PTR. */
+static void __init
+build_make_valid(u32 **p, struct reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+ unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
+
+ iPTE_SW(p, r, pte, ptr, mode);
+}
+
+/*
+ * Check if PTE can be written to, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void __init
+build_pte_writable(u32 **p, struct label **l, struct reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ il_bnez(p, r, pte, lid);
+ iPTE_LW(p, l, pte, ptr);
+}
+
+/* Make PTE writable, update software status bits as well, then store
+ * at PTR.
+ */
+static void __init
+build_make_write(u32 **p, struct reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+ unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
+ | _PAGE_DIRTY);
+
+ iPTE_SW(p, r, pte, ptr, mode);
+}
+
+/*
+ * Check if PTE can be modified, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void __init
+build_pte_modifiable(u32 **p, struct label **l, struct reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ i_andi(p, pte, pte, _PAGE_WRITE);
+ il_beqz(p, r, pte, lid);
+ iPTE_LW(p, l, pte, ptr);
+}
+
+/*
+ * R3000 style TLB load/store/modify handlers.
+ */
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi.
+ * Then it returns.
+ */
+static void __init
+build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
+{
+ i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
+ i_tlbwi(p);
+ i_jr(p, tmp);
+ i_rfe(p); /* branch delay */
+}
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi
+ * or tlbwr as appropriate. This is because the index register
+ * may have the probe fail bit set as a result of a trap on a
+ * kseg2 access, i.e. without refill. Then it returns.
+ */
+static void __init
+build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r,
+ unsigned int pte, unsigned int tmp)
+{
+ i_mfc0(p, tmp, C0_INDEX);
+ i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
+ i_mfc0(p, tmp, C0_EPC); /* branch delay */
+ i_tlbwi(p); /* cp0 delay */
+ i_jr(p, tmp);
+ i_rfe(p); /* branch delay */
+ l_r3000_write_probe_fail(l, *p);
+ i_tlbwr(p); /* cp0 delay */
+ i_jr(p, tmp);
+ i_rfe(p); /* branch delay */
+}
+
+static void __init
+build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
+ unsigned int ptr)
+{
+ long pgdc = (long)pgd_current;
+
+ i_mfc0(p, pte, C0_BADVADDR);
+ i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */
+ i_lw(p, ptr, rel_lo(pgdc), ptr);
+ i_srl(p, pte, pte, 22); /* load delay */
+ i_sll(p, pte, pte, 2);
+ i_addu(p, ptr, ptr, pte);
+ i_mfc0(p, pte, C0_CONTEXT);
+ i_lw(p, ptr, 0, ptr); /* cp0 delay */
+ i_andi(p, pte, pte, 0xffc); /* load delay */
+ i_addu(p, ptr, ptr, pte);
+ i_lw(p, pte, 0, ptr);
+ i_tlbp(p); /* load delay */
+}
+
+static void __init build_r3000_tlb_load_handler(void)
+{
+ u32 *p = handle_tlbl;
+ struct label *l = labels;
+ struct reloc *r = relocs;
+
+ memset(handle_tlbl, 0, sizeof(handle_tlbl));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+ i_nop(&p); /* load delay */
+ build_make_valid(&p, &r, K0, K1);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+
+ l_nopage_tlbl(&l, p);
+ i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
+ i_nop(&p);
+
+ if ((p - handle_tlbl) > FASTPATH_SIZE)
+ panic("TLB load handler fastpath space exceeded");
+
+ resolve_relocs(relocs, labels);
+ printk("Synthesized TLB load handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbl));
+
+#ifdef DEBUG_TLB
+ {
+ int i;
+
+ for (i = 0; i < (p - handle_tlbl); i++)
+ printk("%08x\n", handle_tlbl[i]);
+ }
+#endif
+}
+
+static void __init build_r3000_tlb_store_handler(void)
+{
+ u32 *p = handle_tlbs;
+ struct label *l = labels;
+ struct reloc *r = relocs;
+
+ memset(handle_tlbs, 0, sizeof(handle_tlbs));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+ i_nop(&p); /* load delay */
+ build_make_write(&p, &r, K0, K1);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+
+ l_nopage_tlbs(&l, p);
+ i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ i_nop(&p);
+
+ if ((p - handle_tlbs) > FASTPATH_SIZE)
+ panic("TLB store handler fastpath space exceeded");
+
+ resolve_relocs(relocs, labels);
+ printk("Synthesized TLB store handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbs));
+
+#ifdef DEBUG_TLB
+ {
+ int i;
+
+ for (i = 0; i < (p - handle_tlbs); i++)
+ printk("%08x\n", handle_tlbs[i]);
+ }
+#endif
+}
+
+static void __init build_r3000_tlb_modify_handler(void)
+{
+ u32 *p = handle_tlbm;
+ struct label *l = labels;
+ struct reloc *r = relocs;
+
+ memset(handle_tlbm, 0, sizeof(handle_tlbm));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+ i_nop(&p); /* load delay */
+ build_make_write(&p, &r, K0, K1);
+ build_r3000_pte_reload_tlbwi(&p, K0, K1);
+
+ l_nopage_tlbm(&l, p);
+ i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ i_nop(&p);
+
+ if ((p - handle_tlbm) > FASTPATH_SIZE)
+ panic("TLB modify handler fastpath space exceeded");
+
+ resolve_relocs(relocs, labels);
+ printk("Synthesized TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbm));
+
+#ifdef DEBUG_TLB
+ {
+ int i;
+
+ for (i = 0; i < (p - handle_tlbm); i++)
+ printk("%08x\n", handle_tlbm[i]);
+ }
+#endif
+}
+
+/*
+ * R4000 style TLB load/store/modify handlers.
+ */
+static void __init
+build_r4000_tlbchange_handler_head(u32 **p, struct label **l,
+ struct reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+#ifdef CONFIG_64BIT
+ build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
+#else
+ build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
+#endif
+
+ i_MFC0(p, pte, C0_BADVADDR);
+ i_LW(p, ptr, 0, ptr);
+ i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
+ i_ADDU(p, ptr, ptr, pte);
+
+#ifdef CONFIG_SMP
+ l_smp_pgtable_change(l, *p);
+# endif
+ iPTE_LW(p, l, pte, ptr); /* get even pte */
+ build_tlb_probe_entry(p);
+}
+
+static void __init
+build_r4000_tlbchange_handler_tail(u32 **p, struct label **l,
+ struct reloc **r, unsigned int tmp,
+ unsigned int ptr)
+{
+ i_ori(p, ptr, ptr, sizeof(pte_t));
+ i_xori(p, ptr, ptr, sizeof(pte_t));
+ build_update_entries(p, tmp, ptr);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+ l_leave(l, *p);
+ i_eret(p); /* return from trap */
+
+#ifdef CONFIG_64BIT
+ build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
+#endif
+}
+
+static void __init build_r4000_tlb_load_handler(void)
+{
+ u32 *p = handle_tlbl;
+ struct label *l = labels;
+ struct reloc *r = relocs;
+
+ memset(handle_tlbl, 0, sizeof(handle_tlbl));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (bcm1250_m3_war()) {
+ i_MFC0(&p, K0, C0_BADVADDR);
+ i_MFC0(&p, K1, C0_ENTRYHI);
+ i_xor(&p, K0, K0, K1);
+ i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ il_bnez(&p, &r, K0, label_leave);
+ /* No need for i_nop */
+ }
+
+ build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
+ build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+ build_make_valid(&p, &r, K0, K1);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+ l_nopage_tlbl(&l, p);
+ i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
+ i_nop(&p);
+
+ if ((p - handle_tlbl) > FASTPATH_SIZE)
+ panic("TLB load handler fastpath space exceeded");
resolve_relocs(relocs, labels);
- printk("Synthesized TLB handler (%u instructions).\n", final_len);
+ printk("Synthesized TLB load handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbl));
#ifdef DEBUG_TLB
{
int i;
- for (i = 0; i < 64; i++)
- printk("%08x\n", final_handler[i]);
+ for (i = 0; i < (p - handle_tlbl); i++)
+ printk("%08x\n", handle_tlbl[i]);
}
#endif
+}
+
+static void __init build_r4000_tlb_store_handler(void)
+{
+ u32 *p = handle_tlbs;
+ struct label *l = labels;
+ struct reloc *r = relocs;
+
+ memset(handle_tlbs, 0, sizeof(handle_tlbs));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
- memcpy((void *)CAC_BASE, final_handler, 0x100);
- flush_icache_range(CAC_BASE, CAC_BASE + 0x100);
+ build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
+ build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+ build_make_write(&p, &r, K0, K1);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+ l_nopage_tlbs(&l, p);
+ i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ i_nop(&p);
+
+ if ((p - handle_tlbs) > FASTPATH_SIZE)
+ panic("TLB store handler fastpath space exceeded");
+
+ resolve_relocs(relocs, labels);
+ printk("Synthesized TLB store handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbs));
+
+#ifdef DEBUG_TLB
+ {
+ int i;
+
+ for (i = 0; i < (p - handle_tlbs); i++)
+ printk("%08x\n", handle_tlbs[i]);
+ }
+#endif
+}
+
+static void __init build_r4000_tlb_modify_handler(void)
+{
+ u32 *p = handle_tlbm;
+ struct label *l = labels;
+ struct reloc *r = relocs;
+
+ memset(handle_tlbm, 0, sizeof(handle_tlbm));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
+ build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+ /* Present and writable bits set, set accessed and dirty bits. */
+ build_make_write(&p, &r, K0, K1);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+ l_nopage_tlbm(&l, p);
+ i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ i_nop(&p);
+
+ if ((p - handle_tlbm) > FASTPATH_SIZE)
+ panic("TLB modify handler fastpath space exceeded");
+
+ resolve_relocs(relocs, labels);
+ printk("Synthesized TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbm));
+
+#ifdef DEBUG_TLB
+ {
+ int i;
+
+ for (i = 0; i < (p - handle_tlbm); i++)
+ printk("%08x\n", handle_tlbm[i]);
+ }
+#endif
}
void __init build_tlb_refill_handler(void)
{
+ /*
+ * The refill handler is generated per-CPU, multi-node systems
+ * may have local storage for it. The other handlers are only
+ * needed once.
+ */
+ static int run_once = 0;
+
switch (current_cpu_data.cputype) {
-#ifdef CONFIG_MIPS32
case CPU_R2000:
case CPU_R3000:
case CPU_R3000A:
case CPU_TX3922:
case CPU_TX3927:
build_r3000_tlb_refill_handler();
+ if (!run_once) {
+ build_r3000_tlb_load_handler();
+ build_r3000_tlb_store_handler();
+ build_r3000_tlb_modify_handler();
+ run_once++;
+ }
break;
case CPU_R6000:
case CPU_R6000A:
panic("No R6000 TLB refill handler yet");
break;
-#endif
case CPU_R8000:
panic("No R8000 TLB refill handler yet");
default:
build_r4000_tlb_refill_handler();
+ if (!run_once) {
+ build_r4000_tlb_load_handler();
+ build_r4000_tlb_store_handler();
+ build_r4000_tlb_modify_handler();
+ run_once++;
+ }
}
}
+
+void __init flush_tlb_handlers(void)
+{
+ flush_icache_range((unsigned long)handle_tlbl,
+ (unsigned long)handle_tlbl + sizeof(handle_tlbl));
+ flush_icache_range((unsigned long)handle_tlbs,
+ (unsigned long)handle_tlbs + sizeof(handle_tlbs));
+ flush_icache_range((unsigned long)handle_tlbm,
+ (unsigned long)handle_tlbm + sizeof(handle_tlbm));
+}