X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fmm%2Ftlbex.c;h=492c518e7ba5d68b3676fbd7c18c572e7e4ca405;hb=a2f44b27303a5353859d77a3e96a1d3f33f56ab7;hp=ac4f4bfaae50af41ddf5184cd4e87e4c3752536f;hpb=134734d875a0a48d994ef20b9905209b4b8b6f75;p=linux-2.6.git diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ac4f4bfaa..492c518e7 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -5,13 +5,22 @@ * * Synthesize TLB refill handlers at runtime. * - * Copyright (C) 2004,2005 by Thiemo Seufer + * Copyright (C) 2004,2005,2006 by Thiemo Seufer * Copyright (C) 2005 Maciej W. Rozycki + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * + * ... and the days got worse and worse and now you see + * I've gone completly out of my mind. + * + * They're coming to take me a away haha + * they're coming to take me a away hoho hihi haha + * to the funny farm where code is beautiful all the time ... + * + * (Condolences to Napoleon XIV) */ #include -#include #include #include #include @@ -26,8 +35,6 @@ #include #include -/* #define DEBUG_TLB */ - static __init int __attribute__((unused)) r45k_bvahwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ @@ -68,6 +75,7 @@ enum fields BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, + SET = 0x200 }; #define OP_MASK 0x2f @@ -86,13 +94,15 @@ enum fields #define JIMM_SH 0 #define FUNC_MASK 0x2f #define FUNC_SH 0 +#define SET_MASK 0x7 +#define SET_SH 0 enum opcode { insn_invalid, insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, - insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, + insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, @@ -129,12 +139,13 @@ static __initdata struct insn insn_table[] = { { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, - { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD }, - { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD }, + { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET}, + { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET}, { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE }, + { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE }, { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD }, { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 }, { insn_j, M(j_op,0,0,0,0,0), JIMM }, @@ -145,8 +156,8 @@ static __initdata struct insn insn_table[] = { { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD }, - { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD }, + { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET}, + { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET}, { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, @@ -242,6 +253,14 @@ static __init u32 build_func(u32 arg) return arg & FUNC_MASK; } +static __init u32 build_set(u32 arg) +{ + if (arg & ~SET_MASK) + printk(KERN_WARNING "TLB synthesizer field overflow\n"); + + return arg & SET_MASK; +} + /* * The order of opcode arguments is implicitly left to right, * starting with RS and ending with FUNC or IMM. @@ -273,6 +292,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...) if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); + if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); va_end(ap); **buf = op; @@ -358,14 +378,15 @@ I_u1s2(_bgezl); I_u1s2(_bltz); I_u1s2(_bltzl); I_u1u2s3(_bne); -I_u1u2(_dmfc0); -I_u1u2(_dmtc0); +I_u1u2u3(_dmfc0); +I_u1u2u3(_dmtc0); I_u2u1s3(_daddiu); I_u3u1u2(_daddu); I_u2u1u3(_dsll); I_u2u1u3(_dsll32); I_u2u1u3(_dsra); I_u2u1u3(_dsrl); +I_u2u1u3(_dsrl32); I_u3u1u2(_dsubu); I_0(_eret); I_u1(_j); @@ -376,8 +397,8 @@ I_u2s3u1(_ll); I_u2s3u1(_lld); I_u1s2(_lui); I_u2s3u1(_lw); -I_u1u2(_mfc0); -I_u1u2(_mtc0); +I_u1u2u3(_mfc0); +I_u1u2u3(_mtc0); I_u2u1u3(_ori); I_0(_rfe); I_u2s3u1(_sc); @@ -402,6 +423,9 @@ enum label_id { label_invalid, label_second_part, label_leave, +#ifdef MODULE_START + label_module_alloc, +#endif label_vmalloc, label_vmalloc_done, label_tlbw_hazard, @@ -434,6 +458,9 @@ static __init void build_label(struct label **lab, u32 *addr, L_LA(_second_part) L_LA(_leave) +#ifdef MODULE_START +L_LA(_module_alloc) +#endif L_LA(_vmalloc) L_LA(_vmalloc_done) L_LA(_tlbw_hazard) @@ -451,8 +478,8 @@ L_LA(_r3000_write_probe_fail) # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) -# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd) -# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd) +# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd) +# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd) # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) @@ -464,8 +491,8 @@ L_LA(_r3000_write_probe_fail) # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) -# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd) -# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd) +# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd) +# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd) # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) @@ -665,19 +692,27 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, i_bgezl(p, reg, 0); } +static void __init __attribute__((unused)) +il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) +{ + r_mips_pc16(r, *p, l); + i_bgez(p, reg, 0); +} + /* The only general purpose registers allowed in TLB handlers. */ #define K0 26 #define K1 27 /* Some CP0 registers */ -#define C0_INDEX 0 -#define C0_ENTRYLO0 2 -#define C0_ENTRYLO1 3 -#define C0_CONTEXT 4 -#define C0_BADVADDR 8 -#define C0_ENTRYHI 10 -#define C0_EPC 14 -#define C0_XCONTEXT 20 +#define C0_INDEX 0, 0 +#define C0_ENTRYLO0 2, 0 +#define C0_TCBIND 2, 2 +#define C0_ENTRYLO1 3, 0 +#define C0_CONTEXT 4, 0 +#define C0_BADVADDR 8, 0 +#define C0_ENTRYHI 10, 0 +#define C0_EPC 14, 0 +#define C0_XCONTEXT 20, 0 #ifdef CONFIG_64BIT # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) @@ -706,6 +741,7 @@ static void __init build_r3000_tlb_refill_handler(void) { long pgdc = (long)pgd_current; u32 *p; + int i; memset(tlb_handler, 0, sizeof(tlb_handler)); p = tlb_handler; @@ -731,18 +767,16 @@ static void __init build_r3000_tlb_refill_handler(void) if (p > tlb_handler + 32) panic("TLB refill handler space exceeded"); - printk("Synthesized TLB refill handler (%u instructions).\n", - (unsigned int)(p - tlb_handler)); -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB refill handler (%u instructions).\n", + (unsigned int)(p - tlb_handler)); - for (i = 0; i < (p - tlb_handler); i++) - printk("%08x\n", tlb_handler[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - tlb_handler); i++) + pr_debug("\t.word 0x%08x\n", tlb_handler[i]); + pr_debug("\t.set pop\n"); - memcpy((void *)CAC_BASE, tlb_handler, 0x80); + memcpy((void *)ebase, tlb_handler, 0x80); } /* @@ -852,6 +886,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, case CPU_R10000: case CPU_R12000: + case CPU_R14000: case CPU_4KC: case CPU_SB1: case CPU_SB1A: @@ -883,6 +918,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, case CPU_4KEC: case CPU_24K: case CPU_34K: + case CPU_74K: i_ehb(p); tlbw(p); break; @@ -947,40 +983,44 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, * The vmalloc handling is not in the hotpath. */ i_dmfc0(p, tmp, C0_BADVADDR); +#ifdef MODULE_START + il_bltz(p, r, tmp, label_module_alloc); +#else il_bltz(p, r, tmp, label_vmalloc); +#endif /* No i_nop needed here, since the next insn doesn't touch TMP. */ #ifdef CONFIG_SMP -# ifdef CONFIG_BUILD_ELF64 +# ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC uses TCBind value as "CPU" index + */ + i_mfc0(p, ptr, C0_TCBIND); + i_dsrl(p, ptr, ptr, 19); +# else /* * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 * stored in CONTEXT. */ i_dmfc0(p, ptr, C0_CONTEXT); i_dsrl(p, ptr, ptr, 23); +#endif i_LA_mostly(p, tmp, pgdc); i_daddu(p, ptr, ptr, tmp); i_dmfc0(p, tmp, C0_BADVADDR); i_ld(p, ptr, rel_lo(pgdc), ptr); -# else - /* - * 64 bit SMP running in compat space has the lower part of - * &pgd_current[smp_processor_id()] stored in CONTEXT. - */ - if (!in_compat_space_p(pgdc)) - panic("Invalid page directory address!"); - - i_dmfc0(p, ptr, C0_CONTEXT); - i_dsra(p, ptr, ptr, 23); - i_ld(p, ptr, 0, ptr); -# endif #else i_LA_mostly(p, ptr, pgdc); i_ld(p, ptr, rel_lo(pgdc), ptr); #endif l_vmalloc_done(l, *p); - i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); /* get pgd offset in bytes */ + + if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ + i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); + else + i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); + i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ @@ -1000,8 +1040,46 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, { long swpd = (long)swapper_pg_dir; +#ifdef MODULE_START + long modd = (long)module_pg_dir; + + l_module_alloc(l, *p); + /* + * Assumption: + * VMALLOC_START >= 0xc000000000000000UL + * MODULE_START >= 0xe000000000000000UL + */ + i_SLL(p, ptr, bvaddr, 2); + il_bgez(p, r, ptr, label_vmalloc); + + if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) { + i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */ + } else { + /* unlikely configuration */ + i_nop(p); /* delay slot */ + i_LA(p, ptr, MODULE_START); + } + i_dsubu(p, bvaddr, bvaddr, ptr); + + if (in_compat_space_p(modd) && !rel_lo(modd)) { + il_b(p, r, label_vmalloc_done); + i_lui(p, ptr, rel_hi(modd)); + } else { + i_LA_mostly(p, ptr, modd); + il_b(p, r, label_vmalloc_done); + i_daddiu(p, ptr, ptr, rel_lo(modd)); + } + + l_vmalloc(l, *p); + if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) && + MODULE_START << 32 == VMALLOC_START) + i_dsll32(p, ptr, ptr, 0); /* typical case */ + else + i_LA(p, ptr, VMALLOC_START); +#else l_vmalloc(l, *p); i_LA(p, ptr, VMALLOC_START); +#endif i_dsubu(p, bvaddr, bvaddr, ptr); if (in_compat_space_p(swpd) && !rel_lo(swpd)) { @@ -1027,9 +1105,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ #ifdef CONFIG_SMP +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC uses TCBind value as "CPU" index + */ + i_mfc0(p, ptr, C0_TCBIND); + i_LA_mostly(p, tmp, pgdc); + i_srl(p, ptr, ptr, 19); +#else + /* + * smp_processor_id() << 3 is stored in CONTEXT. + */ i_mfc0(p, ptr, C0_CONTEXT); i_LA_mostly(p, tmp, pgdc); i_srl(p, ptr, ptr, 23); +#endif i_addu(p, ptr, tmp, ptr); #else i_LA_mostly(p, ptr, pgdc); @@ -1045,7 +1135,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) static __init void build_adjust_context(u32 **p, unsigned int ctx) { - unsigned int shift = 4 - (PTE_T_LOG2 + 1); + unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); switch (current_cpu_data.cputype) { @@ -1144,6 +1234,7 @@ static void __init build_r4000_tlb_refill_handler(void) struct reloc *r = relocs; u32 *f; unsigned int final_len; + int i; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); @@ -1182,7 +1273,7 @@ static void __init build_r4000_tlb_refill_handler(void) * Overflow check: For the 64bit handler, we need at least one * free instruction slot for the wrap-around branch. In worst * case, if the intended insertion point is a delay slot, we - * need three, with the the second nop'ed and the third being + * need three, with the second nop'ed and the third being * unused. */ #ifdef CONFIG_32BIT @@ -1241,26 +1332,23 @@ static void __init build_r4000_tlb_refill_handler(void) #endif /* CONFIG_64BIT */ resolve_relocs(relocs, labels); - printk("Synthesized TLB refill handler (%u instructions).\n", - final_len); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB refill handler (%u instructions).\n", + final_len); - f = final_handler; + f = final_handler; #ifdef CONFIG_64BIT - if (final_len > 32) - final_len = 64; - else - f = final_handler + 32; + if (final_len > 32) + final_len = 64; + else + f = final_handler + 32; #endif /* CONFIG_64BIT */ - for (i = 0; i < final_len; i++) - printk("%08x\n", f[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < final_len; i++) + pr_debug("\t.word 0x%08x\n", f[i]); + pr_debug("\t.set pop\n"); - memcpy((void *)CAC_BASE, final_handler, 0x100); + memcpy((void *)ebase, final_handler, 0x100); } /* @@ -1491,6 +1579,7 @@ static void __init build_r3000_tlb_load_handler(void) u32 *p = handle_tlbl; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); @@ -1510,17 +1599,14 @@ static void __init build_r3000_tlb_load_handler(void) panic("TLB load handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB load handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbl)); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbl)); - for (i = 0; i < (p - handle_tlbl); i++) - printk("%08x\n", handle_tlbl[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbl); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbl[i]); + pr_debug("\t.set pop\n"); } static void __init build_r3000_tlb_store_handler(void) @@ -1528,6 +1614,7 @@ static void __init build_r3000_tlb_store_handler(void) u32 *p = handle_tlbs; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); @@ -1547,17 +1634,14 @@ static void __init build_r3000_tlb_store_handler(void) panic("TLB store handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB store handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbs)); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbs)); - for (i = 0; i < (p - handle_tlbs); i++) - printk("%08x\n", handle_tlbs[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbs); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbs[i]); + pr_debug("\t.set pop\n"); } static void __init build_r3000_tlb_modify_handler(void) @@ -1565,6 +1649,7 @@ static void __init build_r3000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); @@ -1584,17 +1669,14 @@ static void __init build_r3000_tlb_modify_handler(void) panic("TLB modify handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB modify handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbm)); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbm)); - for (i = 0; i < (p - handle_tlbm); i++) - printk("%08x\n", handle_tlbm[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbm); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbm[i]); + pr_debug("\t.set pop\n"); } /* @@ -1646,6 +1728,7 @@ static void __init build_r4000_tlb_load_handler(void) u32 *p = handle_tlbl; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); @@ -1673,17 +1756,14 @@ static void __init build_r4000_tlb_load_handler(void) panic("TLB load handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB load handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbl)); + pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbl)); -#ifdef DEBUG_TLB - { - int i; - - for (i = 0; i < (p - handle_tlbl); i++) - printk("%08x\n", handle_tlbl[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbl); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbl[i]); + pr_debug("\t.set pop\n"); } static void __init build_r4000_tlb_store_handler(void) @@ -1691,6 +1771,7 @@ static void __init build_r4000_tlb_store_handler(void) u32 *p = handle_tlbs; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); @@ -1709,17 +1790,14 @@ static void __init build_r4000_tlb_store_handler(void) panic("TLB store handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB store handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbs)); + pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbs)); -#ifdef DEBUG_TLB - { - int i; - - for (i = 0; i < (p - handle_tlbs); i++) - printk("%08x\n", handle_tlbs[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbs); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbs[i]); + pr_debug("\t.set pop\n"); } static void __init build_r4000_tlb_modify_handler(void) @@ -1727,6 +1805,7 @@ static void __init build_r4000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); @@ -1746,17 +1825,14 @@ static void __init build_r4000_tlb_modify_handler(void) panic("TLB modify handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB modify handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbm)); - -#ifdef DEBUG_TLB - { - int i; - - for (i = 0; i < (p - handle_tlbm); i++) - printk("%08x\n", handle_tlbm[i]); - } -#endif + pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbm)); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbm); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbm[i]); + pr_debug("\t.set pop\n"); } void __init build_tlb_refill_handler(void)