2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/mm/tlbmiss.c
8 * Original code from fault.c
9 * Copyright (C) 2000, 2001 Paolo Alberelli
11 * Fast PTE->TLB refill path
12 * Copyright (C) 2003 Richard.Curnow@superh.com
15 * The do_fast_page_fault function is called from a context in entry.S where very few registers
16 * have been saved. In particular, the code in this file must be compiled not to use ANY
17 * caller-save regiseters that are not part of the restricted save set. Also, it means that
18 * code in this file must not make calls to functions elsewhere in the kernel, or else the
19 * excepting context will see corruption in its caller-save registers. Plus, the entry.S save
20 * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside
21 * it and panic on any exception.
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/ptrace.h>
32 #include <linux/mman.h>
34 #include <linux/smp.h>
35 #include <linux/smp_lock.h>
36 #include <linux/interrupt.h>
38 #include <asm/system.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgalloc.h>
43 #include <asm/mmu_context.h>
44 #include <asm/registers.h> /* required by inline asm statements */
46 /* Callable from fault.c, so not static */
47 inline void __do_tlb_refill(unsigned long address,
48 unsigned long long is_text_not_data, pte_t *pte)
50 unsigned long long ptel;
51 unsigned long long pteh=0;
52 struct tlb_info *tlbp;
53 unsigned long long next;
61 pteh = address & MMU_VPN_MASK;
63 /* Sign extend based on neff. */
65 /* Faster sign extension */
66 pteh = (unsigned long long)(signed long long)(signed long)pteh;
69 pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
73 pteh |= get_asid() << PTEH_ASID_SHIFT;
76 /* Set PTEL register, set_pte has performed the sign extension */
77 ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
78 ptel |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */
80 tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
82 __flush_tlb_slot(next);
83 asm volatile ("putcfg %0,1,%2\n\n\t"
85 : : "r" (next), "r" (pteh), "r" (ptel) );
88 if (next > tlbp->last) next = tlbp->first;
93 static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags,
94 unsigned long long textaccess,
95 unsigned long address)
102 dir = pgd_offset_k(address);
103 pmd = pmd_offset(dir, address);
105 if (pmd_none(*pmd)) {
114 pte = pte_offset_kernel(pmd, address);
117 if (pte_none(entry) || !pte_present(entry)) {
121 if ((pte_val(entry) & protection_flags) != protection_flags) {
125 __do_tlb_refill(address, textaccess, pte);
130 static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags,
131 unsigned long long textaccess,
132 unsigned long address)
139 /* NB. The PGD currently only contains a single entry - there is no
140 page table tree stored for the top half of the address space since
141 virtual pages in that region should never be mapped in user mode.
142 (In kernel mode, the only things in that region are the 512Mb super
143 page (locked in), and vmalloc (modules) + I/O device pages (handled
144 by handle_vmalloc_fault), so no PGD for the upper half is required
145 by kernel mode either).
147 See how mm->pgd is allocated and initialised in pgd_alloc to see why
148 the next test is necessary. - RPC */
149 if (address >= (unsigned long) TASK_SIZE) {
150 /* upper half - never has page table entries. */
153 dir = pgd_offset(mm, address);
154 if (pgd_none(*dir)) {
157 if (!pgd_present(*dir)) {
161 pmd = pmd_offset(dir, address);
162 if (pmd_none(*pmd)) {
165 if (!pmd_present(*pmd)) {
168 pte = pte_offset_kernel(pmd, address);
170 if (pte_none(entry)) {
173 if (!pte_present(entry)) {
177 /* If the page doesn't have sufficient protection bits set to service the
178 kind of fault being handled, there's not much point doing the TLB refill.
179 Punt the fault to the general handler. */
180 if ((pte_val(entry) & protection_flags) != protection_flags) {
184 __do_tlb_refill(address, textaccess, pte);
189 /* Put all this information into one structure so that everything is just arithmetic
190 relative to a single base address. This reduces the number of movi/shori pairs needed
191 just to load addresses of static data. */
192 struct expevt_lookup {
193 unsigned short protection_flags[8];
194 unsigned char is_text_access[8];
195 unsigned char is_write_access[8];
203 #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
204 #define YOUNG (_PAGE_ACCESSED)
206 /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
207 the fault happened in user mode or privileged mode. */
208 static struct expevt_lookup expevt_lookup_table = {
209 .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
210 .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
214 This routine handles page faults that can be serviced just by refilling a
215 TLB entry from an existing page table entry. (This case represents a very
216 large majority of page faults.) Return 1 if the fault was successfully
217 handled. Return 0 if the fault could not be handled. (This leads into the
218 general fault handling in fault.c which deals with mapping file-backed
219 pages, stack growth, segmentation faults, swapping etc etc)
221 asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
222 unsigned long address)
224 struct task_struct *tsk;
225 struct mm_struct *mm;
226 unsigned long long textaccess;
227 unsigned long long protection_flags;
228 unsigned long long index;
229 unsigned long long expevt4;
231 /* The next few lines implement a way of hashing EXPEVT into a small array index
232 which can be used to lookup parameters specific to the type of TLBMISS being
234 ITLBMISS has EXPEVT==0xa40
235 RTLBMISS has EXPEVT==0x040
236 WTLBMISS has EXPEVT==0x060
239 expevt4 = (expevt >> 4);
240 /* TODO : xor ssr_md into this expression too. Then we can check that PRU is set
241 when it needs to be. */
242 index = expevt4 ^ (expevt4 >> 5);
244 protection_flags = expevt_lookup_table.protection_flags[index];
245 textaccess = expevt_lookup_table.is_text_access[index];
247 #ifdef CONFIG_SH64_PROC_TLB
248 ++calls_to_do_fast_page_fault;
252 * Note this is now called with interrupts still disabled
253 * This is to cope with being called for a missing IO port
254 * address with interupts disabled. This should be fixed as
255 * soon as we have a better 'fast path' miss handler.
257 * Plus take care how you try and debug this stuff.
258 * For example, writing debug data to a port which you
259 * have just faulted on is not going to work.
265 if ((address >= VMALLOC_START && address < VMALLOC_END) ||
266 (address >= IOBASE_VADDR && address < IOBASE_END)) {
268 /* Process-contexts can never have this address range mapped */
269 if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) {
273 } else if (!in_interrupt() && mm) {
274 if (handle_tlbmiss(mm, protection_flags, textaccess, address)) {