2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
16 #include <asm/pgtable.h>
17 #include <asm/system.h>
18 #include <asm/mmu_context.h>
20 extern void except_vec0_generic(void);
21 extern void except_vec0_r4000(void);
22 extern void except_vec1_generic(void);
23 extern void except_vec1_r10k(void);
25 #define NTLB_ENTRIES 64
26 #define NTLB_ENTRIES_HALF 32
28 void local_flush_tlb_all(void)
31 unsigned long old_ctx;
34 local_irq_save(flags);
35 /* Save old context and create impossible VPN2 value */
36 old_ctx = read_c0_entryhi() & ASID_MASK;
37 write_c0_entryhi(CKSEG0);
41 entry = read_c0_wired();
43 /* Blast 'em all away. */
44 while (entry < NTLB_ENTRIES) {
45 write_c0_index(entry);
49 write_c0_entryhi(old_ctx);
50 local_irq_restore(flags);
53 void local_flush_tlb_mm(struct mm_struct *mm)
55 int cpu = smp_processor_id();
56 if (cpu_context(cpu, mm) != 0) {
57 drop_mmu_context(mm,cpu);
61 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
64 struct mm_struct *mm = vma->vm_mm;
65 int cpu = smp_processor_id();
67 if (cpu_context(cpu, mm) != 0) {
71 local_irq_save(flags);
72 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
73 size = (size + 1) >> 1;
74 if (size <= NTLB_ENTRIES_HALF) {
75 int oldpid = (read_c0_entryhi() & ASID_MASK);
76 int newpid = (cpu_context(smp_processor_id(), mm)
79 start &= (PAGE_MASK << 1);
80 end += ((PAGE_SIZE << 1) - 1);
81 end &= (PAGE_MASK << 1);
85 write_c0_entryhi(start | newpid);
86 start += (PAGE_SIZE << 1);
88 idx = read_c0_index();
91 write_c0_entryhi(KSEG0);
96 write_c0_entryhi(oldpid);
98 drop_mmu_context(mm, cpu);
100 local_irq_restore(flags);
104 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
109 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
110 size = (size + 1) >> 1;
112 local_irq_save(flags);
113 if (size <= NTLB_ENTRIES_HALF) {
114 int pid = read_c0_entryhi();
116 start &= (PAGE_MASK << 1);
117 end += ((PAGE_SIZE << 1) - 1);
118 end &= (PAGE_MASK << 1);
120 while (start < end) {
123 write_c0_entryhi(start);
124 start += (PAGE_SIZE << 1);
126 idx = read_c0_index();
127 write_c0_entrylo0(0);
128 write_c0_entrylo1(0);
129 write_c0_entryhi(KSEG0 + (idx << (PAGE_SHIFT+1)));
134 write_c0_entryhi(pid);
136 local_flush_tlb_all();
138 local_irq_restore(flags);
141 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
143 if (cpu_context(smp_processor_id(), vma->vm_mm) != 0) {
145 int oldpid, newpid, idx;
147 newpid = (cpu_context(smp_processor_id(), vma->vm_mm) &
149 page &= (PAGE_MASK << 1);
150 local_irq_save(flags);
151 oldpid = (read_c0_entryhi() & ASID_MASK);
152 write_c0_entryhi(page | newpid);
154 idx = read_c0_index();
155 write_c0_entrylo0(0);
156 write_c0_entrylo1(0);
157 write_c0_entryhi(KSEG0);
163 write_c0_entryhi(oldpid);
164 local_irq_restore(flags);
169 * This one is only used for pages with the global bit set so we don't care
170 * much about the ASID.
172 void local_flush_tlb_one(unsigned long page)
177 local_irq_save(flags);
178 page &= (PAGE_MASK << 1);
179 oldpid = read_c0_entryhi() & 0xff;
180 write_c0_entryhi(page);
182 idx = read_c0_index();
183 write_c0_entrylo0(0);
184 write_c0_entrylo1(0);
186 /* Make sure all entries differ. */
187 write_c0_entryhi(KSEG0+(idx<<(PAGE_SHIFT+1)));
190 write_c0_entryhi(oldpid);
192 local_irq_restore(flags);
195 /* XXX Simplify this. On the R10000 writing a TLB entry for an virtual
196 address that already exists will overwrite the old entry and not result
197 in TLB malfunction or TLB shutdown. */
198 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
207 * Handle debugger faulting in for debugee.
209 if (current->active_mm != vma->vm_mm)
212 pid = read_c0_entryhi() & ASID_MASK;
214 if ((pid != (cpu_context(smp_processor_id(), vma->vm_mm) & ASID_MASK))
215 || (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
217 "%s: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
218 __FUNCTION__, (int) (cpu_context(smp_processor_id(),
219 vma->vm_mm) & ASID_MASK), pid);
222 local_irq_save(flags);
223 address &= (PAGE_MASK << 1);
224 write_c0_entryhi(address | (pid));
225 pgdp = pgd_offset(vma->vm_mm, address);
227 pmdp = pmd_offset(pgdp, address);
228 idx = read_c0_index();
229 ptep = pte_offset_map(pmdp, address);
230 write_c0_entrylo0(pte_val(*ptep++) >> 6);
231 write_c0_entrylo1(pte_val(*ptep) >> 6);
232 write_c0_entryhi(address | pid);
238 write_c0_entryhi(pid);
239 local_irq_restore(flags);
242 void __init tlb_init(void)
245 * You should never change this register:
246 * - On R4600 1.7 the tlbp never hits for pages smaller than
247 * the value in the c0_pagemask register.
248 * - The entire mm handling assumes the c0_pagemask register to
249 * be set for 4kb pages.
251 write_c0_pagemask(PM_4K);
253 write_c0_framemask(0);
255 /* From this point on the ARC firmware is dead. */
256 local_flush_tlb_all();
258 /* Did I tell you that ARC SUCKS? */
261 memcpy((void *)KSEG0, &except_vec0_r4000, 0x80);
262 memcpy((void *)(KSEG0 + 0x080), &except_vec1_generic, 0x80);
263 flush_icache_range(KSEG0, KSEG0 + 0x100);
266 memcpy((void *)(CKSEG0 + 0x000), &except_vec0_generic, 0x80);
267 memcpy((void *)(CKSEG0 + 0x080), except_vec1_r10k, 0x80);
268 flush_icache_range(CKSEG0 + 0x80, CKSEG0 + 0x100);