2 * TLB exception handling code for r4k.
4 * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
6 * Multi-cpu abstraction and reworking:
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
12 #include <linux/init.h>
13 #include <linux/config.h>
16 #include <asm/offset.h>
17 #include <asm/cachectl.h>
18 #include <asm/fpregdef.h>
19 #include <asm/mipsregs.h>
21 #include <asm/pgtable-bits.h>
22 #include <asm/regdef.h>
23 #include <asm/stackframe.h>
26 #define TLB_OPTIMIZE /* If you are paranoid, disable this. */
28 #ifdef CONFIG_64BIT_PHYS_ADDR
34 #define PTEP_INDX_MSK 0xff0
35 #define PTE_INDX_MSK 0xff8
36 #define PTE_INDX_SHIFT 9
43 #define PTEP_INDX_MSK 0xff8
44 #define PTE_INDX_MSK 0xffc
45 #define PTE_INDX_SHIFT 10
49 * ABUSE of CPP macros 101.
51 * After this macro runs, the pte faulted on is
52 * in register PTE, a ptr into the table in which
53 * the pte belongs is in PTR.
57 #define GET_PGD(scratch, ptr) \
58 mfc0 ptr, CP0_CONTEXT; \
59 la scratch, pgd_current;\
62 addu ptr, scratch, ptr; \
65 #define GET_PGD(scratch, ptr) \
69 #define LOAD_PTE(pte, ptr) \
71 mfc0 pte, CP0_BADVADDR; \
72 srl pte, pte, _PGDIR_SHIFT; \
75 mfc0 pte, CP0_BADVADDR; \
77 srl pte, pte, PTE_INDX_SHIFT; \
78 and pte, pte, PTE_INDX_MSK; \
82 /* This places the even/odd pte pair in the page
83 * table at PTR into ENTRYLO0 and ENTRYLO1 using
84 * TMP as a scratch register.
86 #define PTE_RELOAD(ptr, tmp) \
87 ori ptr, ptr, PTE_SIZE; \
88 xori ptr, ptr, PTE_SIZE; \
89 PTE_L tmp, PTE_SIZE(ptr); \
91 PTE_SRL tmp, tmp, 6; \
92 P_MTC0 tmp, CP0_ENTRYLO1; \
93 PTE_SRL ptr, ptr, 6; \
94 P_MTC0 ptr, CP0_ENTRYLO0;
96 #define DO_FAULT(write) \
99 mfc0 a2, CP0_BADVADDR; \
105 j ret_from_exception; \
109 /* Check is PTE is present, if not then jump to LABEL.
110 * PTR points to the page table where this PTE is located,
111 * when the macro is done executing PTE will be restored
112 * with it's original value.
114 #define PTE_PRESENT(pte, ptr, label) \
115 andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
116 xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
120 /* Make PTE valid, store result in PTR. */
121 #define PTE_MAKEVALID(pte, ptr) \
122 ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
125 /* Check if PTE can be written to, if not branch to LABEL.
126 * Regardless restore PTE with value from PTR when done.
128 #define PTE_WRITABLE(pte, ptr, label) \
129 andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
130 xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
134 /* Make PTE writable, update software status bits as well,
137 #define PTE_MAKEWRITE(pte, ptr) \
138 ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
139 _PAGE_VALID | _PAGE_DIRTY); \
144 #ifdef CONFIG_64BIT_PHYS_ADDR
145 #define GET_PTE_OFF(reg)
146 #elif CONFIG_CPU_VR41XX
147 #define GET_PTE_OFF(reg) srl reg, reg, 3
149 #define GET_PTE_OFF(reg) srl reg, reg, 1
153 * These handlers much be written in a relocatable manner
154 * because based upon the cpu type an arbitrary one of the
155 * following pieces of code will be copied to the KSEG0
158 /* TLB refill, EXL == 0, R4xx0, non-R4600 version */
161 LEAF(except_vec0_r4000)
163 GET_PGD(k0, k1) # get pgd pointer
164 mfc0 k0, CP0_BADVADDR # Get faulting address
165 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
168 addu k1, k1, k0 # add in pgd offset
169 mfc0 k0, CP0_CONTEXT # get context reg
171 GET_PTE_OFF(k0) # get pte offset
172 and k0, k0, PTEP_INDX_MSK
173 addu k1, k1, k0 # add in offset
174 PTE_L k0, 0(k1) # get even pte
175 PTE_L k1, PTE_SIZE(k1) # get odd pte
176 PTE_SRL k0, k0, 6 # convert to entrylo0
177 P_MTC0 k0, CP0_ENTRYLO0 # load it
178 PTE_SRL k1, k1, 6 # convert to entrylo1
179 P_MTC0 k1, CP0_ENTRYLO1 # load it
181 tlbwr # write random tlb entry
183 eret # return from trap
184 END(except_vec0_r4000)
186 /* TLB refill, EXL == 0, R4600 version */
187 LEAF(except_vec0_r4600)
189 GET_PGD(k0, k1) # get pgd pointer
190 mfc0 k0, CP0_BADVADDR
191 srl k0, k0, _PGDIR_SHIFT
192 sll k0, k0, 2 # log2(sizeof(pgd_t)
196 GET_PTE_OFF(k0) # get pte offset
197 and k0, k0, PTEP_INDX_MSK
200 PTE_L k1, PTE_SIZE(k1)
202 P_MTC0 k0, CP0_ENTRYLO0
204 P_MTC0 k1, CP0_ENTRYLO1
209 END(except_vec0_r4600)
211 /* TLB refill, EXL == 0, R52x0 "Nevada" version */
213 * This version has a bug workaround for the Nevada. It seems
214 * as if under certain circumstances the move from cp0_context
215 * might produce a bogus result when the mfc0 instruction and
216 * it's consumer are in a different cacheline or a load instruction,
217 * probably any memory reference, is between them. This is
218 * potencially slower than the R4000 version, so we use this
223 LEAF(except_vec0_nevada)
225 mfc0 k0, CP0_BADVADDR # Get faulting address
226 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
227 lw k1, pgd_current # get pgd pointer
228 sll k0, k0, 2 # log2(sizeof(pgd_t)
229 addu k1, k1, k0 # add in pgd offset
231 mfc0 k0, CP0_CONTEXT # get context reg
232 GET_PTE_OFF(k0) # get pte offset
233 and k0, k0, PTEP_INDX_MSK
234 addu k1, k1, k0 # add in offset
235 PTE_L k0, 0(k1) # get even pte
236 PTE_L k1, PTE_SIZE(k1) # get odd pte
237 PTE_SRL k0, k0, 6 # convert to entrylo0
238 P_MTC0 k0, CP0_ENTRYLO0 # load it
239 PTE_SRL k1, k1, 6 # convert to entrylo1
240 P_MTC0 k1, CP0_ENTRYLO1 # load it
241 nop # QED specified nops
243 tlbwr # write random tlb entry
244 nop # traditional nop
245 eret # return from trap
246 END(except_vec0_nevada)
248 /* TLB refill, EXL == 0, SB1 with M3 errata handling version */
249 LEAF(except_vec0_sb1)
251 mfc0 k0, CP0_BADVADDR
254 srl k0, k0, PAGE_SHIFT+1
257 GET_PGD(k0, k1) # get pgd pointer
258 mfc0 k0, CP0_BADVADDR # Get faulting address
259 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
261 addu k1, k1, k0 # add in pgd offset
262 mfc0 k0, CP0_CONTEXT # get context reg
264 GET_PTE_OFF(k0) # get pte offset
265 and k0, k0, PTEP_INDX_MSK
266 addu k1, k1, k0 # add in offset
267 PTE_L k0, 0(k1) # get even pte
268 PTE_L k1, PTE_SIZE(k1) # get odd pte
269 PTE_SRL k0, k0, 6 # convert to entrylo0
270 P_MTC0 k0, CP0_ENTRYLO0 # load it
271 PTE_SRL k1, k1, 6 # convert to entrylo1
272 P_MTC0 k1, CP0_ENTRYLO1 # load it
273 tlbwr # write random tlb entry
274 1: eret # return from trap
277 /* TLB refill, EXL == 0, R4[40]00/R5000 badvaddr hwbug version */
278 LEAF(except_vec0_r45k_bvahwbug)
280 GET_PGD(k0, k1) # get pgd pointer
281 mfc0 k0, CP0_BADVADDR
282 srl k0, k0, _PGDIR_SHIFT
283 sll k0, k0, 2 # log2(sizeof(pgd_t)
287 #ifndef CONFIG_64BIT_PHYS_ADDR
290 and k0, k0, PTEP_INDX_MSK
293 PTE_L k1, PTE_SIZE(k1)
297 P_MTC0 k0, CP0_ENTRYLO0
300 P_MTC0 k1, CP0_ENTRYLO1
306 END(except_vec0_r45k_bvahwbug)
309 /* TLB refill, EXL == 0, R4000 MP badvaddr hwbug version */
310 LEAF(except_vec0_r4k_mphwbug)
312 GET_PGD(k0, k1) # get pgd pointer
313 mfc0 k0, CP0_BADVADDR
314 srl k0, k0, _PGDIR_SHIFT
315 sll k0, k0, 2 # log2(sizeof(pgd_t)
319 #ifndef CONFIG_64BIT_PHYS_ADDR
322 and k0, k0, PTEP_INDX_MSK
325 PTE_L k1, PTE_SIZE(k1)
329 P_MTC0 k0, CP0_ENTRYLO0
332 P_MTC0 k1, CP0_ENTRYLO1
338 END(except_vec0_r4k_mphwbug)
341 /* TLB refill, EXL == 0, R4000 UP 250MHZ entrylo[01] hwbug version */
342 LEAF(except_vec0_r4k_250MHZhwbug)
344 GET_PGD(k0, k1) # get pgd pointer
345 mfc0 k0, CP0_BADVADDR
346 srl k0, k0, _PGDIR_SHIFT
347 sll k0, k0, 2 # log2(sizeof(pgd_t)
351 #ifndef CONFIG_64BIT_PHYS_ADDR
354 and k0, k0, PTEP_INDX_MSK
357 PTE_L k1, PTE_SIZE(k1)
359 P_MTC0 zero, CP0_ENTRYLO0
360 P_MTC0 k0, CP0_ENTRYLO0
362 P_MTC0 zero, CP0_ENTRYLO1
363 P_MTC0 k1, CP0_ENTRYLO1
369 END(except_vec0_r4k_250MHZhwbug)
372 /* TLB refill, EXL == 0, R4000 MP 250MHZ entrylo[01]+badvaddr bug version */
373 LEAF(except_vec0_r4k_MP250MHZhwbug)
375 GET_PGD(k0, k1) # get pgd pointer
376 mfc0 k0, CP0_BADVADDR
377 srl k0, k0, _PGDIR_SHIFT
378 sll k0, k0, 2 # log2(sizeof(pgd_t)
382 #ifndef CONFIG_64BIT_PHYS_ADDR
385 and k0, k0, PTEP_INDX_MSK
388 PTE_L k1, PTE_SIZE(k1)
392 P_MTC0 zero, CP0_ENTRYLO0
393 P_MTC0 k0, CP0_ENTRYLO0
396 P_MTC0 zero, CP0_ENTRYLO1
397 P_MTC0 k1, CP0_ENTRYLO1
403 END(except_vec0_r4k_MP250MHZhwbug)
411 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
412 * 2. A timing hazard exists for the TLBP instruction.
414 * stalling_instruction
417 * The JTLB is being read for the TLBP throughout the stall generated by the
418 * previous instruction. This is not really correct as the stalling instruction
419 * can modify the address used to access the JTLB. The failure symptom is that
420 * the TLBP instruction will use an address created for the stalling instruction
421 * and not the address held in C0_ENHI and thus report the wrong results.
423 * The software work-around is to not allow the instruction preceding the TLBP
424 * to stall - make it an NOP or some other instruction guaranteed not to stall.
426 * Errata 2 will not be fixed. This errata is also on the R5000.
428 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
430 #define R5K_HAZARD nop
433 * Note for many R4k variants tlb probes cannot be executed out
434 * of the instruction cache else you get bogus results.
437 NESTED(handle_tlbl, PT_SIZE, sp)
440 mfc0 k0, CP0_BADVADDR
443 srl k0, k0, PAGE_SHIFT+1
454 /* Test present bit in entry. */
458 PTE_PRESENT(k0, k1, nopage_tlbl)
459 PTE_MAKEVALID(k0, k1)
474 NESTED(handle_tlbs, PT_SIZE, sp)
481 tlbp # find faulting entry
482 PTE_WRITABLE(k0, k1, nopage_tlbs)
483 PTE_MAKEWRITE(k0, k1)
498 NESTED(handle_mod, PT_SIZE, sp)
504 tlbp # find faulting entry
505 andi k0, k0, _PAGE_WRITE
509 /* Present and writable bits set, set accessed and dirty bits. */
510 PTE_MAKEWRITE(k0, k1)
512 /* Now reload the entry into the tlb. */