vserver 2.0 rc7
[linux-2.6.git] / include / asm-sparc64 / spitfire.h
1 /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2  * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  */
6
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
9
10 #include <asm/asi.h>
11
12 /* The following register addresses are accessible via ASI_DMMU
13  * and ASI_IMMU, that is there is a distinct and unique copy of
14  * each these registers for each TLB.
15  */
16 #define TSB_TAG_TARGET          0x0000000000000000 /* All chips                         */
17 #define TLB_SFSR                0x0000000000000018 /* All chips                         */
18 #define TSB_REG                 0x0000000000000028 /* All chips                         */
19 #define TLB_TAG_ACCESS          0x0000000000000030 /* All chips                         */
20 #define VIRT_WATCHPOINT         0x0000000000000038 /* All chips                         */
21 #define PHYS_WATCHPOINT         0x0000000000000040 /* All chips                         */
22 #define TSB_EXTENSION_P         0x0000000000000048 /* Ultra-III and later               */
23 #define TSB_EXTENSION_S         0x0000000000000050 /* Ultra-III and later, D-TLB only   */
24 #define TSB_EXTENSION_N         0x0000000000000058 /* Ultra-III and later               */
25 #define TLB_TAG_ACCESS_EXT      0x0000000000000060 /* Ultra-III+ and later              */
26
27 /* These registers only exist as one entity, and are accessed
28  * via ASI_DMMU only.
29  */
30 #define PRIMARY_CONTEXT         0x0000000000000008
31 #define SECONDARY_CONTEXT       0x0000000000000010
32 #define DMMU_SFAR               0x0000000000000020
33 #define VIRT_WATCHPOINT         0x0000000000000038
34 #define PHYS_WATCHPOINT         0x0000000000000040
35
36 #define SPITFIRE_HIGHEST_LOCKED_TLBENT  (64 - 1)
37 #define CHEETAH_HIGHEST_LOCKED_TLBENT   (16 - 1)
38
39 #define L1DCACHE_SIZE           0x4000
40
41 #ifndef __ASSEMBLY__
42
43 enum ultra_tlb_layout {
44         spitfire = 0,
45         cheetah = 1,
46         cheetah_plus = 2,
47 };
48
49 extern enum ultra_tlb_layout tlb_type;
50
51 extern int cheetah_pcache_forced_on;
52 extern void cheetah_enable_pcache(void);
53
54 #define sparc64_highest_locked_tlbent() \
55         (tlb_type == spitfire ? \
56          SPITFIRE_HIGHEST_LOCKED_TLBENT : \
57          CHEETAH_HIGHEST_LOCKED_TLBENT)
58
59 static __inline__ unsigned long spitfire_get_isfsr(void)
60 {
61         unsigned long ret;
62
63         __asm__ __volatile__("ldxa      [%1] %2, %0"
64                              : "=r" (ret)
65                              : "r" (TLB_SFSR), "i" (ASI_IMMU));
66         return ret;
67 }
68
69 static __inline__ unsigned long spitfire_get_dsfsr(void)
70 {
71         unsigned long ret;
72
73         __asm__ __volatile__("ldxa      [%1] %2, %0"
74                              : "=r" (ret)
75                              : "r" (TLB_SFSR), "i" (ASI_DMMU));
76         return ret;
77 }
78
79 static __inline__ unsigned long spitfire_get_sfar(void)
80 {
81         unsigned long ret;
82
83         __asm__ __volatile__("ldxa      [%1] %2, %0"
84                              : "=r" (ret)
85                              : "r" (DMMU_SFAR), "i" (ASI_DMMU));
86         return ret;
87 }
88
89 static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
90 {
91         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
92                              "membar    #Sync"
93                              : /* no outputs */
94                              : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
95 }
96
97 static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
98 {
99         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
100                              "membar    #Sync"
101                              : /* no outputs */
102                              : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
103 }
104
105 /* The data cache is write through, so this just invalidates the
106  * specified line.
107  */
108 static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
109 {
110         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
111                              "membar    #Sync"
112                              : /* No outputs */
113                              : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
114         __asm__ __volatile__ ("membar #Sync" : : : "memory");
115 }
116
117 /* The instruction cache lines are flushed with this, but note that
118  * this does not flush the pipeline.  It is possible for a line to
119  * get flushed but stale instructions to still be in the pipeline,
120  * a flush instruction (to any address) is sufficient to handle
121  * this issue after the line is invalidated.
122  */
123 static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
124 {
125         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
126                              "membar    #Sync"
127                              : /* No outputs */
128                              : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
129 }
130
131 static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
132 {
133         unsigned long data;
134
135         __asm__ __volatile__("ldxa      [%1] %2, %0"
136                              : "=r" (data)
137                              : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
138
139         /* Clear TTE diag bits. */
140         data &= ~0x0003fe0000000000UL;
141
142         return data;
143 }
144
145 static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
146 {
147         unsigned long tag;
148
149         __asm__ __volatile__("ldxa      [%1] %2, %0"
150                              : "=r" (tag)
151                              : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
152         return tag;
153 }
154
155 static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
156 {
157         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
158                              "membar    #Sync"
159                              : /* No outputs */
160                              : "r" (data), "r" (entry << 3),
161                                "i" (ASI_DTLB_DATA_ACCESS));
162 }
163
164 static __inline__ unsigned long spitfire_get_itlb_data(int entry)
165 {
166         unsigned long data;
167
168         __asm__ __volatile__("ldxa      [%1] %2, %0"
169                              : "=r" (data)
170                              : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
171
172         /* Clear TTE diag bits. */
173         data &= ~0x0003fe0000000000UL;
174
175         return data;
176 }
177
178 static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
179 {
180         unsigned long tag;
181
182         __asm__ __volatile__("ldxa      [%1] %2, %0"
183                              : "=r" (tag)
184                              : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
185         return tag;
186 }
187
188 static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
189 {
190         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
191                              "membar    #Sync"
192                              : /* No outputs */
193                              : "r" (data), "r" (entry << 3),
194                                "i" (ASI_ITLB_DATA_ACCESS));
195 }
196
197 /* Spitfire hardware assisted TLB flushes. */
198
199 /* Context level flushes. */
200 static __inline__ void spitfire_flush_dtlb_primary_context(void)
201 {
202         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
203                              "membar    #Sync"
204                              : /* No outputs */
205                              : "r" (0x40), "i" (ASI_DMMU_DEMAP));
206 }
207
208 static __inline__ void spitfire_flush_itlb_primary_context(void)
209 {
210         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
211                              "membar    #Sync"
212                              : /* No outputs */
213                              : "r" (0x40), "i" (ASI_IMMU_DEMAP));
214 }
215
216 static __inline__ void spitfire_flush_dtlb_secondary_context(void)
217 {
218         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
219                              "membar    #Sync"
220                              : /* No outputs */
221                              : "r" (0x50), "i" (ASI_DMMU_DEMAP));
222 }
223
224 static __inline__ void spitfire_flush_itlb_secondary_context(void)
225 {
226         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
227                              "membar    #Sync"
228                              : /* No outputs */
229                              : "r" (0x50), "i" (ASI_IMMU_DEMAP));
230 }
231
232 static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
233 {
234         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
235                              "membar    #Sync"
236                              : /* No outputs */
237                              : "r" (0x60), "i" (ASI_DMMU_DEMAP));
238 }
239
240 static __inline__ void spitfire_flush_itlb_nucleus_context(void)
241 {
242         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
243                              "membar    #Sync"
244                              : /* No outputs */
245                              : "r" (0x60), "i" (ASI_IMMU_DEMAP));
246 }
247
248 /* Page level flushes. */
249 static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
250 {
251         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
252                              "membar    #Sync"
253                              : /* No outputs */
254                              : "r" (page), "i" (ASI_DMMU_DEMAP));
255 }
256
257 static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
258 {
259         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
260                              "membar    #Sync"
261                              : /* No outputs */
262                              : "r" (page), "i" (ASI_IMMU_DEMAP));
263 }
264
265 static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
266 {
267         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
268                              "membar    #Sync"
269                              : /* No outputs */
270                              : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
271 }
272
273 static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
274 {
275         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
276                              "membar    #Sync"
277                              : /* No outputs */
278                              : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
279 }
280
281 static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
282 {
283         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
284                              "membar    #Sync"
285                              : /* No outputs */
286                              : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
287 }
288
289 static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
290 {
291         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
292                              "membar    #Sync"
293                              : /* No outputs */
294                              : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
295 }
296
297 /* Cheetah has "all non-locked" tlb flushes. */
298 static __inline__ void cheetah_flush_dtlb_all(void)
299 {
300         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
301                              "membar    #Sync"
302                              : /* No outputs */
303                              : "r" (0x80), "i" (ASI_DMMU_DEMAP));
304 }
305
306 static __inline__ void cheetah_flush_itlb_all(void)
307 {
308         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
309                              "membar    #Sync"
310                              : /* No outputs */
311                              : "r" (0x80), "i" (ASI_IMMU_DEMAP));
312 }
313
314 /* Cheetah has a 4-tlb layout so direct access is a bit different.
315  * The first two TLBs are fully assosciative, hold 16 entries, and are
316  * used only for locked and >8K sized translations.  One exists for
317  * data accesses and one for instruction accesses.
318  *
319  * The third TLB is for data accesses to 8K non-locked translations, is
320  * 2 way assosciative, and holds 512 entries.  The fourth TLB is for
321  * instruction accesses to 8K non-locked translations, is 2 way
322  * assosciative, and holds 128 entries.
323  *
324  * Cheetah has some bug where bogus data can be returned from
325  * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
326  * the problem for me. -DaveM
327  */
328 static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
329 {
330         unsigned long data;
331
332         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
333                              "ldxa      [%1] %2, %0"
334                              : "=r" (data)
335                              : "r" ((0 << 16) | (entry << 3)),
336                              "i" (ASI_DTLB_DATA_ACCESS));
337
338         return data;
339 }
340
341 static __inline__ unsigned long cheetah_get_litlb_data(int entry)
342 {
343         unsigned long data;
344
345         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
346                              "ldxa      [%1] %2, %0"
347                              : "=r" (data)
348                              : "r" ((0 << 16) | (entry << 3)),
349                              "i" (ASI_ITLB_DATA_ACCESS));
350
351         return data;
352 }
353
354 static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
355 {
356         unsigned long tag;
357
358         __asm__ __volatile__("ldxa      [%1] %2, %0"
359                              : "=r" (tag)
360                              : "r" ((0 << 16) | (entry << 3)),
361                              "i" (ASI_DTLB_TAG_READ));
362
363         return tag;
364 }
365
366 static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
367 {
368         unsigned long tag;
369
370         __asm__ __volatile__("ldxa      [%1] %2, %0"
371                              : "=r" (tag)
372                              : "r" ((0 << 16) | (entry << 3)),
373                              "i" (ASI_ITLB_TAG_READ));
374
375         return tag;
376 }
377
378 static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
379 {
380         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
381                              "membar    #Sync"
382                              : /* No outputs */
383                              : "r" (data),
384                                "r" ((0 << 16) | (entry << 3)),
385                                "i" (ASI_DTLB_DATA_ACCESS));
386 }
387
388 static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
389 {
390         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
391                              "membar    #Sync"
392                              : /* No outputs */
393                              : "r" (data),
394                                "r" ((0 << 16) | (entry << 3)),
395                                "i" (ASI_ITLB_DATA_ACCESS));
396 }
397
398 static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
399 {
400         unsigned long data;
401
402         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
403                              "ldxa      [%1] %2, %0"
404                              : "=r" (data)
405                              : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
406
407         return data;
408 }
409
410 static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
411 {
412         unsigned long tag;
413
414         __asm__ __volatile__("ldxa      [%1] %2, %0"
415                              : "=r" (tag)
416                              : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
417         return tag;
418 }
419
420 static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
421 {
422         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
423                              "membar    #Sync"
424                              : /* No outputs */
425                              : "r" (data),
426                                "r" ((tlb << 16) | (entry << 3)),
427                                "i" (ASI_DTLB_DATA_ACCESS));
428 }
429
430 static __inline__ unsigned long cheetah_get_itlb_data(int entry)
431 {
432         unsigned long data;
433
434         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
435                              "ldxa      [%1] %2, %0"
436                              : "=r" (data)
437                              : "r" ((2 << 16) | (entry << 3)),
438                                "i" (ASI_ITLB_DATA_ACCESS));
439
440         return data;
441 }
442
443 static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
444 {
445         unsigned long tag;
446
447         __asm__ __volatile__("ldxa      [%1] %2, %0"
448                              : "=r" (tag)
449                              : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
450         return tag;
451 }
452
453 static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
454 {
455         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
456                              "membar    #Sync"
457                              : /* No outputs */
458                              : "r" (data), "r" ((2 << 16) | (entry << 3)),
459                                "i" (ASI_ITLB_DATA_ACCESS));
460 }
461
462 #endif /* !(__ASSEMBLY__) */
463
464 #endif /* !(_SPARC64_SPITFIRE_H) */