2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
16 #include <asm/pgtable.h>
17 #include <asm/system.h>
18 #include <asm/mmu_context.h>
20 extern void build_tlb_refill_handler(void);
22 #define NTLB_ENTRIES 64
23 #define NTLB_ENTRIES_HALF 32
25 void local_flush_tlb_all(void)
28 unsigned long old_ctx;
31 local_irq_save(flags);
32 /* Save old context and create impossible VPN2 value */
33 old_ctx = read_c0_entryhi() & ASID_MASK;
34 write_c0_entryhi(CKSEG0);
38 entry = read_c0_wired();
40 /* Blast 'em all away. */
41 while (entry < NTLB_ENTRIES) {
42 write_c0_index(entry);
46 write_c0_entryhi(old_ctx);
47 local_irq_restore(flags);
50 void local_flush_tlb_mm(struct mm_struct *mm)
52 int cpu = smp_processor_id();
53 if (cpu_context(cpu, mm) != 0) {
54 drop_mmu_context(mm,cpu);
58 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
61 struct mm_struct *mm = vma->vm_mm;
62 int cpu = smp_processor_id();
64 if (cpu_context(cpu, mm) != 0) {
68 local_irq_save(flags);
69 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
70 size = (size + 1) >> 1;
71 if (size <= NTLB_ENTRIES_HALF) {
72 int oldpid = (read_c0_entryhi() & ASID_MASK);
73 int newpid = (cpu_context(smp_processor_id(), mm)
76 start &= (PAGE_MASK << 1);
77 end += ((PAGE_SIZE << 1) - 1);
78 end &= (PAGE_MASK << 1);
82 write_c0_entryhi(start | newpid);
83 start += (PAGE_SIZE << 1);
85 idx = read_c0_index();
88 write_c0_entryhi(CKSEG0);
93 write_c0_entryhi(oldpid);
95 drop_mmu_context(mm, cpu);
97 local_irq_restore(flags);
101 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
106 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
107 size = (size + 1) >> 1;
109 local_irq_save(flags);
110 if (size <= NTLB_ENTRIES_HALF) {
111 int pid = read_c0_entryhi();
113 start &= (PAGE_MASK << 1);
114 end += ((PAGE_SIZE << 1) - 1);
115 end &= (PAGE_MASK << 1);
117 while (start < end) {
120 write_c0_entryhi(start);
121 start += (PAGE_SIZE << 1);
123 idx = read_c0_index();
124 write_c0_entrylo0(0);
125 write_c0_entrylo1(0);
126 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT+1)));
131 write_c0_entryhi(pid);
133 local_flush_tlb_all();
135 local_irq_restore(flags);
138 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
140 if (cpu_context(smp_processor_id(), vma->vm_mm) != 0) {
142 int oldpid, newpid, idx;
144 newpid = (cpu_context(smp_processor_id(), vma->vm_mm) &
146 page &= (PAGE_MASK << 1);
147 local_irq_save(flags);
148 oldpid = (read_c0_entryhi() & ASID_MASK);
149 write_c0_entryhi(page | newpid);
151 idx = read_c0_index();
152 write_c0_entrylo0(0);
153 write_c0_entrylo1(0);
154 write_c0_entryhi(CKSEG0);
160 write_c0_entryhi(oldpid);
161 local_irq_restore(flags);
166 * This one is only used for pages with the global bit set so we don't care
167 * much about the ASID.
169 void local_flush_tlb_one(unsigned long page)
174 local_irq_save(flags);
175 page &= (PAGE_MASK << 1);
176 oldpid = read_c0_entryhi() & 0xff;
177 write_c0_entryhi(page);
179 idx = read_c0_index();
180 write_c0_entrylo0(0);
181 write_c0_entrylo1(0);
183 /* Make sure all entries differ. */
184 write_c0_entryhi(CKSEG0+(idx<<(PAGE_SHIFT+1)));
187 write_c0_entryhi(oldpid);
189 local_irq_restore(flags);
192 /* XXX Simplify this. On the R10000 writing a TLB entry for an virtual
193 address that already exists will overwrite the old entry and not result
194 in TLB malfunction or TLB shutdown. */
195 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
204 * Handle debugger faulting in for debugee.
206 if (current->active_mm != vma->vm_mm)
209 pid = read_c0_entryhi() & ASID_MASK;
211 if ((pid != (cpu_context(smp_processor_id(), vma->vm_mm) & ASID_MASK))
212 || (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
214 "%s: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
215 __FUNCTION__, (int) (cpu_context(smp_processor_id(),
216 vma->vm_mm) & ASID_MASK), pid);
219 local_irq_save(flags);
220 address &= (PAGE_MASK << 1);
221 write_c0_entryhi(address | (pid));
222 pgdp = pgd_offset(vma->vm_mm, address);
224 pmdp = pmd_offset(pgdp, address);
225 idx = read_c0_index();
226 ptep = pte_offset_map(pmdp, address);
227 write_c0_entrylo0(pte_val(*ptep++) >> 6);
228 write_c0_entrylo1(pte_val(*ptep) >> 6);
229 write_c0_entryhi(address | pid);
235 write_c0_entryhi(pid);
236 local_irq_restore(flags);
239 void __init tlb_init(void)
242 * You should never change this register:
243 * - On R4600 1.7 the tlbp never hits for pages smaller than
244 * the value in the c0_pagemask register.
245 * - The entire mm handling assumes the c0_pagemask register to
246 * be set for 4kb pages.
248 write_c0_pagemask(PM_4K);
250 write_c0_framemask(0);
252 /* From this point on the ARC firmware is dead. */
253 local_flush_tlb_all();
255 /* Did I tell you that ARC SUCKS? */
257 build_tlb_refill_handler();