kernel.org linux-2.6.10
[linux-2.6.git] / arch / mips / mm / tlb-andes.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7  * Copyright (C) 1999 Silicon Graphics, Inc.
8  * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
9  */
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <asm/page.h>
16 #include <asm/pgtable.h>
17 #include <asm/system.h>
18 #include <asm/mmu_context.h>
19
20 extern void build_tlb_refill_handler(void);
21
22 #define NTLB_ENTRIES       64
23 #define NTLB_ENTRIES_HALF  32
24
25 void local_flush_tlb_all(void)
26 {
27         unsigned long flags;
28         unsigned long old_ctx;
29         unsigned long entry;
30
31         local_irq_save(flags);
32         /* Save old context and create impossible VPN2 value */
33         old_ctx = read_c0_entryhi() & ASID_MASK;
34         write_c0_entryhi(CKSEG0);
35         write_c0_entrylo0(0);
36         write_c0_entrylo1(0);
37
38         entry = read_c0_wired();
39
40         /* Blast 'em all away. */
41         while (entry < NTLB_ENTRIES) {
42                 write_c0_index(entry);
43                 tlb_write_indexed();
44                 entry++;
45         }
46         write_c0_entryhi(old_ctx);
47         local_irq_restore(flags);
48 }
49
50 void local_flush_tlb_mm(struct mm_struct *mm)
51 {
52         int cpu = smp_processor_id();
53         if (cpu_context(cpu, mm) != 0) {
54                 drop_mmu_context(mm,cpu);
55         }
56 }
57
58 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
59                            unsigned long end)
60 {
61         struct mm_struct *mm = vma->vm_mm;
62         int cpu = smp_processor_id();
63
64         if (cpu_context(cpu, mm) != 0) {
65                 unsigned long flags;
66                 int size;
67
68                 local_irq_save(flags);
69                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
70                 size = (size + 1) >> 1;
71                 if (size <= NTLB_ENTRIES_HALF) {
72                         int oldpid = (read_c0_entryhi() & ASID_MASK);
73                         int newpid = (cpu_context(smp_processor_id(), mm)
74                                       & ASID_MASK);
75
76                         start &= (PAGE_MASK << 1);
77                         end += ((PAGE_SIZE << 1) - 1);
78                         end &= (PAGE_MASK << 1);
79                         while(start < end) {
80                                 int idx;
81
82                                 write_c0_entryhi(start | newpid);
83                                 start += (PAGE_SIZE << 1);
84                                 tlb_probe();
85                                 idx = read_c0_index();
86                                 write_c0_entrylo0(0);
87                                 write_c0_entrylo1(0);
88                                 write_c0_entryhi(CKSEG0);
89                                 if(idx < 0)
90                                         continue;
91                                 tlb_write_indexed();
92                         }
93                         write_c0_entryhi(oldpid);
94                 } else {
95                         drop_mmu_context(mm, cpu);
96                 }
97                 local_irq_restore(flags);
98         }
99 }
100
101 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
102 {
103         unsigned long flags;
104         int size;
105
106         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
107         size = (size + 1) >> 1;
108
109         local_irq_save(flags);
110         if (size <= NTLB_ENTRIES_HALF) {
111                 int pid = read_c0_entryhi();
112
113                 start &= (PAGE_MASK << 1);
114                 end += ((PAGE_SIZE << 1) - 1);
115                 end &= (PAGE_MASK << 1);
116
117                 while (start < end) {
118                         int idx;
119
120                         write_c0_entryhi(start);
121                         start += (PAGE_SIZE << 1);
122                         tlb_probe();
123                         idx = read_c0_index();
124                         write_c0_entrylo0(0);
125                         write_c0_entrylo1(0);
126                         write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT+1)));
127                         if (idx < 0)
128                                 continue;
129                         tlb_write_indexed();
130                 }
131                 write_c0_entryhi(pid);
132         } else {
133                 local_flush_tlb_all();
134         }
135         local_irq_restore(flags);
136 }
137
138 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
139 {
140         if (cpu_context(smp_processor_id(), vma->vm_mm) != 0) {
141                 unsigned long flags;
142                 int oldpid, newpid, idx;
143
144                 newpid = (cpu_context(smp_processor_id(), vma->vm_mm) &
145                           ASID_MASK);
146                 page &= (PAGE_MASK << 1);
147                 local_irq_save(flags);
148                 oldpid = (read_c0_entryhi() & ASID_MASK);
149                 write_c0_entryhi(page | newpid);
150                 tlb_probe();
151                 idx = read_c0_index();
152                 write_c0_entrylo0(0);
153                 write_c0_entrylo1(0);
154                 write_c0_entryhi(CKSEG0);
155                 if (idx < 0)
156                         goto finish;
157                 tlb_write_indexed();
158
159         finish:
160                 write_c0_entryhi(oldpid);
161                 local_irq_restore(flags);
162         }
163 }
164
165 /*
166  * This one is only used for pages with the global bit set so we don't care
167  * much about the ASID.
168  */
169 void local_flush_tlb_one(unsigned long page)
170 {
171         unsigned long flags;
172         int oldpid, idx;
173
174         local_irq_save(flags);
175         page &= (PAGE_MASK << 1);
176         oldpid = read_c0_entryhi() & 0xff;
177         write_c0_entryhi(page);
178         tlb_probe();
179         idx = read_c0_index();
180         write_c0_entrylo0(0);
181         write_c0_entrylo1(0);
182         if (idx >= 0) {
183                 /* Make sure all entries differ. */
184                 write_c0_entryhi(CKSEG0+(idx<<(PAGE_SHIFT+1)));
185                 tlb_write_indexed();
186         }
187         write_c0_entryhi(oldpid);
188
189         local_irq_restore(flags);
190 }
191
192 /* XXX Simplify this.  On the R10000 writing a TLB entry for an virtual
193    address that already exists will overwrite the old entry and not result
194    in TLB malfunction or TLB shutdown.  */
195 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
196 {
197         unsigned long flags;
198         pgd_t *pgdp;
199         pmd_t *pmdp;
200         pte_t *ptep;
201         int idx, pid;
202
203         /*
204          * Handle debugger faulting in for debugee.
205          */
206         if (current->active_mm != vma->vm_mm)
207                 return;
208
209         pid = read_c0_entryhi() & ASID_MASK;
210
211         if ((pid != (cpu_context(smp_processor_id(), vma->vm_mm) & ASID_MASK))
212             || (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
213                 printk(KERN_WARNING
214                        "%s: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
215                        __FUNCTION__, (int) (cpu_context(smp_processor_id(),
216                        vma->vm_mm) & ASID_MASK), pid);
217         }
218
219         local_irq_save(flags);
220         address &= (PAGE_MASK << 1);
221         write_c0_entryhi(address | (pid));
222         pgdp = pgd_offset(vma->vm_mm, address);
223         tlb_probe();
224         pmdp = pmd_offset(pgdp, address);
225         idx = read_c0_index();
226         ptep = pte_offset_map(pmdp, address);
227         write_c0_entrylo0(pte_val(*ptep++) >> 6);
228         write_c0_entrylo1(pte_val(*ptep) >> 6);
229         write_c0_entryhi(address | pid);
230         if (idx < 0) {
231                 tlb_write_random();
232         } else {
233                 tlb_write_indexed();
234         }
235         write_c0_entryhi(pid);
236         local_irq_restore(flags);
237 }
238
239 void __init tlb_init(void)
240 {
241         /*
242          * You should never change this register:
243          *   - On R4600 1.7 the tlbp never hits for pages smaller than
244          *     the value in the c0_pagemask register.
245          *   - The entire mm handling assumes the c0_pagemask register to
246          *     be set for 4kb pages.
247          */
248         write_c0_pagemask(PM_4K);
249         write_c0_wired(0);
250         write_c0_framemask(0);
251
252         /* From this point on the ARC firmware is dead.  */
253         local_flush_tlb_all();
254
255         /* Did I tell you that ARC SUCKS?  */
256
257         build_tlb_refill_handler();
258 }