1 /* $Id: fault.c,v 1.59 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
15 #include <linux/signal.h>
17 #include <linux/module.h>
18 #include <linux/smp_lock.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <asm/pgtable.h>
24 #include <asm/openprom.h>
25 #include <asm/oplib.h>
26 #include <asm/uaccess.h>
29 #include <asm/sections.h>
31 #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
33 extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
36 * To debug kernel during syscall entry.
38 void syscall_trace_entry(struct pt_regs *regs)
40 printk("scall entry: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
44 * To debug kernel during syscall exit.
46 void syscall_trace_exit(struct pt_regs *regs)
48 printk("scall exit: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
52 * To debug kernel to catch accesses to certain virtual/physical addresses.
53 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
54 * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses.
55 * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be
56 * watched. This is only useful on a single cpu machine for now. After the watchpoint
57 * is detected, the process causing it will be killed, thus preventing an infinite loop.
59 void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
61 unsigned long lsubits;
63 __asm__ __volatile__("ldxa [%%g0] %1, %0"
65 : "i" (ASI_LSU_CONTROL));
66 lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM |
67 LSU_CONTROL_PR | LSU_CONTROL_VR |
68 LSU_CONTROL_PW | LSU_CONTROL_VW);
70 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
73 : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT),
76 lsubits |= ((unsigned long)mask << (mode ? 25 : 33));
78 lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR);
80 lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW);
81 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
84 : "r" (lsubits), "i" (ASI_LSU_CONTROL)
88 /* Nice, simple, prom library does all the sweating for us. ;) */
89 unsigned long __init prom_probe_memory (void)
91 register struct linux_mlist_p1275 *mlist;
92 register unsigned long bytes, base_paddr, tally;
96 mlist = *prom_meminfo()->p1275_available;
97 bytes = tally = mlist->num_bytes;
98 base_paddr = mlist->start_adr;
100 sp_banks[0].base_addr = base_paddr;
101 sp_banks[0].num_bytes = bytes;
103 while (mlist->theres_more != (void *) 0) {
105 mlist = mlist->theres_more;
106 bytes = mlist->num_bytes;
108 if (i >= SPARC_PHYS_BANKS-1) {
109 printk ("The machine has more banks than "
110 "this kernel can support\n"
111 "Increase the SPARC_PHYS_BANKS "
112 "setting (currently %d)\n",
114 i = SPARC_PHYS_BANKS-1;
118 sp_banks[i].base_addr = mlist->start_adr;
119 sp_banks[i].num_bytes = mlist->num_bytes;
123 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
124 sp_banks[i].num_bytes = 0;
126 /* Now mask all bank sizes on a page boundary, it is all we can
129 for (i = 0; sp_banks[i].num_bytes != 0; i++)
130 sp_banks[i].num_bytes &= PAGE_MASK;
135 static void unhandled_fault(unsigned long address, struct task_struct *tsk,
136 struct pt_regs *regs)
138 if ((unsigned long) address < PAGE_SIZE) {
139 printk(KERN_ALERT "Unable to handle kernel NULL "
140 "pointer dereference\n");
142 printk(KERN_ALERT "Unable to handle kernel paging request "
143 "at virtual address %016lx\n", (unsigned long)address);
145 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
146 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
147 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
148 (tsk->mm ? (unsigned long) tsk->mm->pgd :
149 (unsigned long) tsk->active_mm->pgd));
150 die_if_kernel("Oops", regs);
153 static void bad_kernel_pc(struct pt_regs *regs)
157 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
159 __asm__("mov %%sp, %0" : "=r" (ksp));
160 show_stack(current, ksp);
161 unhandled_fault(regs->tpc, current, regs);
165 * We now make sure that mmap_sem is held in all paths that call
166 * this. Additionally, to prevent kswapd from ripping ptes from
167 * under us, raise interrupts around the time that we look at the
168 * pte, kswapd will have to wait to get his smp ipi response from
169 * us. This saves us having to get page_table_lock.
171 static unsigned int get_user_insn(unsigned long tpc)
173 pgd_t *pgdp = pgd_offset(current->mm, tpc);
178 unsigned long pstate;
182 pmdp = pmd_offset(pgdp, tpc);
186 /* This disables preemption for us as well. */
187 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
188 __asm__ __volatile__("wrpr %0, %1, %%pstate"
189 : : "r" (pstate), "i" (PSTATE_IE));
190 ptep = pte_offset_map(pmdp, tpc);
192 if (!pte_present(pte))
195 pa = (pte_val(pte) & _PAGE_PADDR);
196 pa += (tpc & ~PAGE_MASK);
198 /* Use phys bypass so we don't pollute dtlb/dcache. */
199 __asm__ __volatile__("lduwa [%1] %2, %0"
201 : "r" (pa), "i" (ASI_PHYS_USE_EC));
205 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
210 extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
212 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
213 unsigned int insn, int fault_code)
220 if (fault_code & FAULT_CODE_ITLB)
221 info.si_addr = (void __user *) regs->tpc;
223 info.si_addr = (void __user *)
224 compute_effective_address(regs, insn, 0);
226 force_sig_info(sig, &info, current);
229 extern int handle_ldf_stq(u32, struct pt_regs *);
230 extern int handle_ld_nf(u32, struct pt_regs *);
232 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
235 if (!regs->tpc || (regs->tpc & 0x3))
237 if (regs->tstate & TSTATE_PRIV) {
238 insn = *(unsigned int *) regs->tpc;
240 insn = get_user_insn(regs->tpc);
246 static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
247 unsigned int insn, unsigned long address)
250 unsigned char asi = ASI_P;
252 if ((!insn) && (regs->tstate & TSTATE_PRIV))
255 /* If user insn could be read (thus insn is zero), that
256 * is fine. We will just gun down the process with a signal
260 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
261 (insn & 0xc0800000) == 0xc0800000) {
263 asi = (regs->tstate >> 24);
266 if ((asi & 0xf2) == 0x82) {
267 if (insn & 0x1000000) {
268 handle_ldf_stq(insn, regs);
270 /* This was a non-faulting load. Just clear the
271 * destination register(s) and continue with the next
274 handle_ld_nf(insn, regs);
280 g2 = regs->u_regs[UREG_G2];
282 /* Is this in ex_table? */
283 if (regs->tstate & TSTATE_PRIV) {
286 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
288 asi = (regs->tstate >> 24);
293 /* Look in asi.h: All _S asis have LS bit set */
295 (fixup = search_extables_range(regs->tpc, &g2))) {
297 regs->tnpc = regs->tpc + 4;
298 regs->u_regs[UREG_G2] = g2;
302 /* The si_code was set to make clear whether
303 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
305 do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
310 unhandled_fault (address, current, regs);
313 asmlinkage void do_sparc64_fault(struct pt_regs *regs)
315 struct mm_struct *mm = current->mm;
316 struct vm_area_struct *vma;
317 unsigned int insn = 0;
318 int si_code, fault_code;
319 unsigned long address;
321 si_code = SEGV_MAPERR;
322 fault_code = get_thread_fault_code();
323 address = current_thread_info()->fault_address;
325 if ((fault_code & FAULT_CODE_ITLB) &&
326 (fault_code & FAULT_CODE_DTLB))
329 if (regs->tstate & TSTATE_PRIV) {
330 unsigned long tpc = regs->tpc;
332 /* Sanity check the PC. */
333 if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
334 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
335 /* Valid, no problems... */
343 * If we're in an interrupt or have no user
344 * context, we must not take the fault..
346 if (in_interrupt() || !mm)
349 if (test_thread_flag(TIF_32BIT)) {
350 if (!(regs->tstate & TSTATE_PRIV))
351 regs->tpc &= 0xffffffff;
352 address &= 0xffffffff;
355 down_read(&mm->mmap_sem);
356 vma = find_vma(mm, address);
360 /* Pure DTLB misses do not tell us whether the fault causing
361 * load/store/atomic was a write or not, it only says that there
362 * was no match. So in such a case we (carefully) read the
363 * instruction to try and figure this out. It's an optimization
364 * so it's ok if we can't do this.
366 * Special hack, window spill/fill knows the exact fault type.
369 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
370 (vma->vm_flags & VM_WRITE) != 0) {
371 insn = get_fault_insn(regs, 0);
374 if ((insn & 0xc0200000) == 0xc0200000 &&
375 (insn & 0x1780000) != 0x1680000) {
376 /* Don't bother updating thread struct value,
377 * because update_mmu_cache only cares which tlb
378 * the access came from.
380 fault_code |= FAULT_CODE_WRITE;
385 if (vma->vm_start <= address)
387 if (!(vma->vm_flags & VM_GROWSDOWN))
389 if (!(fault_code & FAULT_CODE_WRITE)) {
390 /* Non-faulting loads shouldn't expand stack. */
391 insn = get_fault_insn(regs, insn);
392 if ((insn & 0xc0800000) == 0xc0800000) {
396 asi = (regs->tstate >> 24);
399 if ((asi & 0xf2) == 0x82)
403 if (expand_stack(vma, address))
406 * Ok, we have a good vm_area for this memory access, so
410 si_code = SEGV_ACCERR;
412 /* If we took a ITLB miss on a non-executable page, catch
415 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
416 BUG_ON(address != regs->tpc);
417 BUG_ON(regs->tstate & TSTATE_PRIV);
421 if (fault_code & FAULT_CODE_WRITE) {
422 if (!(vma->vm_flags & VM_WRITE))
425 /* Spitfire has an icache which does not snoop
426 * processor stores. Later processors do...
428 if (tlb_type == spitfire &&
429 (vma->vm_flags & VM_EXEC) != 0 &&
430 vma->vm_file != NULL)
431 set_thread_flag(TIF_BLKCOMMIT);
433 /* Allow reads even for write-only mappings */
434 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
438 switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
451 up_read(&mm->mmap_sem);
455 * Something tried to access memory that isn't in our memory map..
456 * Fix it, but check if it's kernel or user first..
459 insn = get_fault_insn(regs, insn);
460 up_read(&mm->mmap_sem);
463 do_kernel_fault(regs, si_code, fault_code, insn, address);
468 * We ran out of memory, or some other thing happened to us that made
469 * us unable to handle the page fault gracefully.
472 insn = get_fault_insn(regs, insn);
473 up_read(&mm->mmap_sem);
474 printk("VM: killing process %s\n", current->comm);
475 if (!(regs->tstate & TSTATE_PRIV))
477 goto handle_kernel_fault;
480 insn = get_fault_insn(regs, 0);
481 goto handle_kernel_fault;
484 insn = get_fault_insn(regs, insn);
485 up_read(&mm->mmap_sem);
488 * Send a sigbus, regardless of whether we were in kernel
491 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
493 /* Kernel mode? Handle exceptions or die */
494 if (regs->tstate & TSTATE_PRIV)
495 goto handle_kernel_fault;
498 /* These values are no longer needed, clear them. */
499 set_thread_fault_code(0);
500 clear_thread_flag(TIF_BLKCOMMIT);
501 current_thread_info()->fault_address = 0;