2 * linux/arch/cris/mm/fault.c
4 * Low level bus fault handler
7 * Copyright (C) 2000, 2001 Axis Communications AB
14 #include <asm/uaccess.h>
15 #include <asm/pgtable.h>
16 #include <asm/arch/svinto.h>
18 /* debug of low-level TLB reload */
27 extern volatile pgd_t *current_pgd;
29 extern const struct exception_table_entry
30 *search_exception_tables(unsigned long addr);
32 asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs,
35 /* fast TLB-fill fault handler
36 * this is called from entry.S with interrupts disabled
40 handle_mmu_bus_fault(struct pt_regs *regs)
48 int miss, we, writeac;
52 unsigned long address;
55 select = *R_TLB_SELECT;
57 address = cause & PAGE_MASK; /* get faulting address */
60 page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause);
61 acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause);
62 inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause);
63 index = IO_EXTRACT(R_TLB_SELECT, index, select);
65 miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause);
66 we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause);
67 writeac = IO_EXTRACT(R_MMU_CAUSE, wr_rd, cause);
69 /* ETRAX 100LX TR89 bugfix: if the second half of an unaligned
70 * write causes a MMU-fault, it will not be restarted correctly.
71 * This could happen if a write crosses a page-boundary and the
72 * second page is not yet COW'ed or even loaded. The workaround
73 * is to clear the unaligned bit in the CPU status record, so
74 * that the CPU will rerun both the first and second halves of
75 * the instruction. This will not have any sideeffects unless
76 * the first half goes to any device or memory that can't be
77 * written twice, and which is mapped through the MMU.
79 * We only need to do this for writes.
83 regs->csrinstr &= ~(1 << 5);
85 /* Set errcode's R/W flag according to the mode which caused the
89 errcode = writeac << 1;
91 D(printk("bus_fault from IRP 0x%lx: addr 0x%lx, miss %d, inv %d, we %d, acc %d, dx %d pid %d\n",
92 regs->irp, address, miss, inv, we, acc, index, page_id));
94 /* for a miss, we need to reload the TLB entry */
97 /* see if the pte exists at all
98 * refer through current_pgd, dont use mm->pgd
101 pmd = (pmd_t *)(current_pgd + pgd_index(address));
105 printk("bad pgdir entry 0x%lx at 0x%p\n", *(unsigned long*)pmd, pmd);
109 pte = *pte_offset_kernel(pmd, address);
110 if (!pte_present(pte))
114 printk(" found pte %lx pg %p ", pte_val(pte), pte_page(pte));
115 if (pte_val(pte) & _PAGE_SILENT_WRITE)
117 if (pte_val(pte) & _PAGE_KERNEL)
119 if (pte_val(pte) & _PAGE_SILENT_READ)
121 if (pte_val(pte) & _PAGE_GLOBAL)
123 if (pte_val(pte) & _PAGE_PRESENT)
125 if (pte_val(pte) & _PAGE_ACCESSED)
127 if (pte_val(pte) & _PAGE_MODIFIED)
129 if (pte_val(pte) & _PAGE_READ)
131 if (pte_val(pte) & _PAGE_WRITE)
132 printk("Writeable ");
136 /* load up the chosen TLB entry
137 * this assumes the pte format is the same as the TLB_LO layout.
139 * the write to R_TLB_LO also writes the vpn and page_id fields from
140 * R_MMU_CAUSE, which we in this case obviously want to keep
143 *R_TLB_LO = pte_val(pte);
148 errcode = 1 | (we << 1);
151 /* leave it to the MM system fault handler below */
152 D(printk("do_page_fault %lx errcode %d\n", address, errcode));
153 do_page_fault(address, regs, errcode);
156 /* Called from arch/cris/mm/fault.c to find fixup code. */
158 find_fixup_code(struct pt_regs *regs)
160 const struct exception_table_entry *fixup;
162 if ((fixup = search_exception_tables(regs->irp)) != 0) {
163 /* Adjust the instruction pointer in the stackframe. */
164 regs->irp = fixup->fixup;
167 * Don't return by restoring the CPU state, so switch
170 regs->frametype = CRIS_FRAME_NORMAL;