2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Silicon Graphics, Inc.
7 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
8 * Copyright (C) 2002 Maciej W. Rozycki
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/threads.h>
15 #include <asm/hazards.h>
16 #include <asm/regdef.h>
17 #include <asm/mipsregs.h>
18 #include <asm/stackframe.h>
21 #define _VMALLOC_START 0xc000000000000000
24 * After this macro runs we have a pointer to the pte of the address
25 * that caused the fault in PTR.
27 .macro LOAD_PTE2, ptr, tmp, kaddr
29 dmfc0 \ptr, CP0_CONTEXT
30 dmfc0 \tmp, CP0_BADVADDR
31 dsra \ptr, 23 # get pgd_current[cpu]
33 dmfc0 \tmp, CP0_BADVADDR
38 dsrl \tmp, (_PGDIR_SHIFT-3) # get pgd offset in bytes
39 andi \tmp, ((_PTRS_PER_PGD - 1)<<3)
40 daddu \ptr, \tmp # add in pgd offset
41 dmfc0 \tmp, CP0_BADVADDR
42 ld \ptr, (\ptr) # get pmd pointer
43 dsrl \tmp, (_PMD_SHIFT-3) # get pmd offset in bytes
44 andi \tmp, ((_PTRS_PER_PMD - 1)<<3)
45 daddu \ptr, \tmp # add in pmd offset
46 dmfc0 \tmp, CP0_XCONTEXT
47 ld \ptr, (\ptr) # get pte pointer
48 andi \tmp, 0xff0 # get pte offset
54 * Ditto for the kernel table.
56 .macro LOAD_KPTE2, ptr, tmp, not_vmalloc
58 * First, determine that the address is in/above vmalloc range.
60 dmfc0 \tmp, CP0_BADVADDR
61 dli \ptr, _VMALLOC_START
64 * Now find offset into kptbl.
66 dsubu \tmp, \tmp, \ptr
68 dsrl \tmp, (_PAGE_SHIFT+1) # get vpn2
69 dsll \tmp, 4 # byte offset of pte
70 daddu \ptr, \ptr, \tmp
73 * Determine that fault address is within vmalloc range.
77 beqz \tmp, \not_vmalloc # not vmalloc
83 * This places the even/odd pte pair in the page table at the pte
84 * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1.
86 .macro PTE_RELOAD, pte0, pte1
87 dsrl \pte0, 6 # convert to entrylo0
88 dmtc0 \pte0, CP0_ENTRYLO0 # load it
89 dsrl \pte1, 6 # convert to entrylo1
90 dmtc0 \pte1, CP0_ENTRYLO1 # load it
101 * TLB refill handlers for the R4000 and SB1.
102 * Attention: We may only use 32 instructions / 128 bytes.
105 LEAF(except_vec1_r4k)
107 dla k0, handle_vec1_r4k
112 LEAF(except_vec1_sb1)
114 dmfc0 k0, CP0_BADVADDR
115 dmfc0 k1, CP0_ENTRYHI
117 dsrl k0, k0, _PAGE_SHIFT+1
121 dla k0, handle_vec1_r4k
132 LEAF(handle_vec1_r4k)
135 ld k0, 0(k1) # get even pte
136 ld k1, 8(k1) # get odd pte
143 9: # handle the vmalloc range
144 LOAD_KPTE2 k1 k0 invalid_vmalloc_address
145 ld k0, 0(k1) # get even pte
146 ld k1, 8(k1) # get odd pte
158 * TLB refill handler for the R10000.
159 * Attention: We may only use 32 instructions / 128 bytes.
162 LEAF(except_vec1_r10k)
164 dla k0, handle_vec1_r10k
167 END(except_vec1_r10k)
172 LEAF(handle_vec1_r10k)
175 ld k0, 0(k1) # get even pte
176 ld k1, 8(k1) # get odd pte
182 9: # handle the vmalloc range
183 LOAD_KPTE2 k1 k0 invalid_vmalloc_address
184 ld k0, 0(k1) # get even pte
185 ld k1, 8(k1) # get odd pte
190 END(handle_vec1_r10k)
194 LEAF(invalid_vmalloc_address)
198 dmfc0 t0, CP0_BADVADDR
202 PANIC("Invalid kernel address")
203 END(invalid_vmalloc_address)