ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / mips / mm / tlb-andes.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7  * Copyright (C) 1999 Silicon Graphics, Inc.
8  * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
9  */
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <asm/page.h>
16 #include <asm/pgtable.h>
17 #include <asm/system.h>
18 #include <asm/mmu_context.h>
19
20 extern void except_vec0_generic(void);
21 extern void except_vec0_r4000(void);
22 extern void except_vec1_generic(void);
23 extern void except_vec1_r10k(void);
24
25 #define NTLB_ENTRIES       64
26 #define NTLB_ENTRIES_HALF  32
27
28 void local_flush_tlb_all(void)
29 {
30         unsigned long flags;
31         unsigned long old_ctx;
32         unsigned long entry;
33
34         local_irq_save(flags);
35         /* Save old context and create impossible VPN2 value */
36         old_ctx = read_c0_entryhi() & ASID_MASK;
37         write_c0_entryhi(CKSEG0);
38         write_c0_entrylo0(0);
39         write_c0_entrylo1(0);
40
41         entry = read_c0_wired();
42
43         /* Blast 'em all away. */
44         while (entry < NTLB_ENTRIES) {
45                 write_c0_index(entry);
46                 tlb_write_indexed();
47                 entry++;
48         }
49         write_c0_entryhi(old_ctx);
50         local_irq_restore(flags);
51 }
52
53 void local_flush_tlb_mm(struct mm_struct *mm)
54 {
55         int cpu = smp_processor_id();
56         if (cpu_context(cpu, mm) != 0) {
57                 drop_mmu_context(mm,cpu);
58         }
59 }
60
61 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
62                            unsigned long end)
63 {
64         struct mm_struct *mm = vma->vm_mm;
65         int cpu = smp_processor_id();
66
67         if (cpu_context(cpu, mm) != 0) {
68                 unsigned long flags;
69                 int size;
70
71                 local_irq_save(flags);
72                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
73                 size = (size + 1) >> 1;
74                 if (size <= NTLB_ENTRIES_HALF) {
75                         int oldpid = (read_c0_entryhi() & ASID_MASK);
76                         int newpid = (cpu_context(smp_processor_id(), mm)
77                                       & ASID_MASK);
78
79                         start &= (PAGE_MASK << 1);
80                         end += ((PAGE_SIZE << 1) - 1);
81                         end &= (PAGE_MASK << 1);
82                         while(start < end) {
83                                 int idx;
84
85                                 write_c0_entryhi(start | newpid);
86                                 start += (PAGE_SIZE << 1);
87                                 tlb_probe();
88                                 idx = read_c0_index();
89                                 write_c0_entrylo0(0);
90                                 write_c0_entrylo1(0);
91                                 write_c0_entryhi(KSEG0);
92                                 if(idx < 0)
93                                         continue;
94                                 tlb_write_indexed();
95                         }
96                         write_c0_entryhi(oldpid);
97                 } else {
98                         drop_mmu_context(mm, cpu);
99                 }
100                 local_irq_restore(flags);
101         }
102 }
103
104 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
105 {
106         unsigned long flags;
107         int size;
108
109         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
110         size = (size + 1) >> 1;
111
112         local_irq_save(flags);
113         if (size <= NTLB_ENTRIES_HALF) {
114                 int pid = read_c0_entryhi();
115
116                 start &= (PAGE_MASK << 1);
117                 end += ((PAGE_SIZE << 1) - 1);
118                 end &= (PAGE_MASK << 1);
119
120                 while (start < end) {
121                         int idx;
122
123                         write_c0_entryhi(start);
124                         start += (PAGE_SIZE << 1);
125                         tlb_probe();
126                         idx = read_c0_index();
127                         write_c0_entrylo0(0);
128                         write_c0_entrylo1(0);
129                         write_c0_entryhi(KSEG0 + (idx << (PAGE_SHIFT+1)));
130                         if (idx < 0)
131                                 continue;
132                         tlb_write_indexed();
133                 }
134                 write_c0_entryhi(pid);
135         } else {
136                 local_flush_tlb_all();
137         }
138         local_irq_restore(flags);
139 }
140
141 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
142 {
143         if (cpu_context(smp_processor_id(), vma->vm_mm) != 0) {
144                 unsigned long flags;
145                 int oldpid, newpid, idx;
146
147                 newpid = (cpu_context(smp_processor_id(), vma->vm_mm) &
148                           ASID_MASK);
149                 page &= (PAGE_MASK << 1);
150                 local_irq_save(flags);
151                 oldpid = (read_c0_entryhi() & ASID_MASK);
152                 write_c0_entryhi(page | newpid);
153                 tlb_probe();
154                 idx = read_c0_index();
155                 write_c0_entrylo0(0);
156                 write_c0_entrylo1(0);
157                 write_c0_entryhi(KSEG0);
158                 if (idx < 0)
159                         goto finish;
160                 tlb_write_indexed();
161
162         finish:
163                 write_c0_entryhi(oldpid);
164                 local_irq_restore(flags);
165         }
166 }
167
168 /*
169  * This one is only used for pages with the global bit set so we don't care
170  * much about the ASID.
171  */
172 void local_flush_tlb_one(unsigned long page)
173 {
174         unsigned long flags;
175         int oldpid, idx;
176
177         local_irq_save(flags);
178         page &= (PAGE_MASK << 1);
179         oldpid = read_c0_entryhi() & 0xff;
180         write_c0_entryhi(page);
181         tlb_probe();
182         idx = read_c0_index();
183         write_c0_entrylo0(0);
184         write_c0_entrylo1(0);
185         if (idx >= 0) {
186                 /* Make sure all entries differ. */
187                 write_c0_entryhi(KSEG0+(idx<<(PAGE_SHIFT+1)));
188                 tlb_write_indexed();
189         }
190         write_c0_entryhi(oldpid);
191
192         local_irq_restore(flags);
193 }
194
195 /* XXX Simplify this.  On the R10000 writing a TLB entry for an virtual
196    address that already exists will overwrite the old entry and not result
197    in TLB malfunction or TLB shutdown.  */
198 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
199 {
200         unsigned long flags;
201         pgd_t *pgdp;
202         pmd_t *pmdp;
203         pte_t *ptep;
204         int idx, pid;
205
206         /*
207          * Handle debugger faulting in for debugee.
208          */
209         if (current->active_mm != vma->vm_mm)
210                 return;
211
212         pid = read_c0_entryhi() & ASID_MASK;
213
214         if ((pid != (cpu_context(smp_processor_id(), vma->vm_mm) & ASID_MASK))
215             || (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
216                 printk(KERN_WARNING
217                        "%s: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
218                        __FUNCTION__, (int) (cpu_context(smp_processor_id(),
219                        vma->vm_mm) & ASID_MASK), pid);
220         }
221
222         local_irq_save(flags);
223         address &= (PAGE_MASK << 1);
224         write_c0_entryhi(address | (pid));
225         pgdp = pgd_offset(vma->vm_mm, address);
226         tlb_probe();
227         pmdp = pmd_offset(pgdp, address);
228         idx = read_c0_index();
229         ptep = pte_offset_map(pmdp, address);
230         write_c0_entrylo0(pte_val(*ptep++) >> 6);
231         write_c0_entrylo1(pte_val(*ptep) >> 6);
232         write_c0_entryhi(address | pid);
233         if (idx < 0) {
234                 tlb_write_random();
235         } else {
236                 tlb_write_indexed();
237         }
238         write_c0_entryhi(pid);
239         local_irq_restore(flags);
240 }
241
242 void __init tlb_init(void)
243 {
244         /*
245          * You should never change this register:
246          *   - On R4600 1.7 the tlbp never hits for pages smaller than
247          *     the value in the c0_pagemask register.
248          *   - The entire mm handling assumes the c0_pagemask register to
249          *     be set for 4kb pages.
250          */
251         write_c0_pagemask(PM_4K);
252         write_c0_wired(0);
253         write_c0_framemask(0);
254
255         /* From this point on the ARC firmware is dead.  */
256         local_flush_tlb_all();
257
258         /* Did I tell you that ARC SUCKS?  */
259
260 #ifdef CONFIG_MIPS32
261         memcpy((void *)KSEG0, &except_vec0_r4000, 0x80);
262         memcpy((void *)(KSEG0 + 0x080), &except_vec1_generic, 0x80);
263         flush_icache_range(KSEG0, KSEG0 + 0x100);
264 #endif
265 #ifdef CONFIG_MIPS64
266         memcpy((void *)(CKSEG0 + 0x000), &except_vec0_generic, 0x80);
267         memcpy((void *)(CKSEG0 + 0x080), except_vec1_r10k, 0x80);
268         flush_icache_range(CKSEG0 + 0x80, CKSEG0 + 0x100);
269 #endif
270 }