ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / parisc / kernel / cache.c
1 /* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
2  *
3  * This file is subject to the terms and conditions of the GNU General Public
4  * License.  See the file "COPYING" in the main directory of this archive
5  * for more details.
6  *
7  * Copyright (C) 1999 Helge Deller (07-13-1999)
8  * Copyright (C) 1999 SuSE GmbH Nuernberg
9  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10  *
11  * Cache and TLB management
12  *
13  */
14  
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
20
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/system.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29
30 int split_tlb;
31 int dcache_stride;
32 int icache_stride;
33 EXPORT_SYMBOL(dcache_stride);
34
35 struct pdc_cache_info cache_info;
36 #ifndef CONFIG_PA20
37 static struct pdc_btlb_info btlb_info;
38 #endif
39
40 #ifdef CONFIG_SMP
41 void
42 flush_data_cache(void)
43 {
44         on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
45 }
46 #endif
47
48 void
49 flush_cache_all_local(void)
50 {
51         flush_instruction_cache_local();
52         flush_data_cache_local();
53 }
54 EXPORT_SYMBOL(flush_cache_all_local);
55
56 /* flushes EVERYTHING (tlb & cache) */
57
58 void
59 flush_all_caches(void)
60 {
61         flush_cache_all();
62         flush_tlb_all();
63 }
64 EXPORT_SYMBOL(flush_all_caches);
65
66 void
67 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
68 {
69         struct page *page = pte_page(pte);
70
71         if (VALID_PAGE(page) && page_mapping(page) &&
72             test_bit(PG_dcache_dirty, &page->flags)) {
73
74                 flush_kernel_dcache_page(page_address(page));
75                 clear_bit(PG_dcache_dirty, &page->flags);
76         }
77 }
78
79 void
80 show_cache_info(struct seq_file *m)
81 {
82         seq_printf(m, "I-cache\t\t: %ld KB\n", 
83                 cache_info.ic_size/1024 );
84         seq_printf(m, "D-cache\t\t: %ld KB (%s)%s\n", 
85                 cache_info.dc_size/1024,
86                 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
87                 (cache_info.dc_conf.cc_sh ? " - shared I/D":"")
88         );
89
90         seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
91                 cache_info.it_size,
92                 cache_info.dt_size,
93                 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
94         );
95                 
96 #ifndef CONFIG_PA20
97         /* BTLB - Block TLB */
98         if (btlb_info.max_size==0) {
99                 seq_printf(m, "BTLB\t\t: not supported\n" );
100         } else {
101                 seq_printf(m, 
102                 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
103                 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
104                 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
105                 btlb_info.max_size, (int)4096,
106                 btlb_info.max_size>>8,
107                 btlb_info.fixed_range_info.num_i,
108                 btlb_info.fixed_range_info.num_d,
109                 btlb_info.fixed_range_info.num_comb, 
110                 btlb_info.variable_range_info.num_i,
111                 btlb_info.variable_range_info.num_d,
112                 btlb_info.variable_range_info.num_comb
113                 );
114         }
115 #endif
116 }
117
118 void __init 
119 parisc_cache_init(void)
120 {
121         if (pdc_cache_info(&cache_info) < 0)
122                 panic("parisc_cache_init: pdc_cache_info failed");
123
124 #if 0
125         printk(KERN_DEBUG "ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n",
126             cache_info.ic_size,
127             cache_info.dc_size,
128             cache_info.it_size,
129             sizeof (struct pdc_cache_info) / sizeof (long),
130             sizeof (struct pdc_cache_cf)
131         );
132
133         printk(KERN_DEBUG "dc base %x dc stride %x dc count %x dc loop %d\n",
134             cache_info.dc_base,
135             cache_info.dc_stride,
136             cache_info.dc_count,
137             cache_info.dc_loop);
138
139         printk(KERN_DEBUG "dc conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
140             cache_info.dc_conf.cc_alias,
141             cache_info.dc_conf.cc_block,
142             cache_info.dc_conf.cc_line,
143             cache_info.dc_conf.cc_wt,
144             cache_info.dc_conf.cc_sh,
145             cache_info.dc_conf.cc_cst,
146             cache_info.dc_conf.cc_assoc);
147
148         printk(KERN_DEBUG "ic conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
149             cache_info.ic_conf.cc_alias,
150             cache_info.ic_conf.cc_block,
151             cache_info.ic_conf.cc_line,
152             cache_info.ic_conf.cc_wt,
153             cache_info.ic_conf.cc_sh,
154             cache_info.ic_conf.cc_cst,
155             cache_info.ic_conf.cc_assoc);
156
157         printk(KERN_DEBUG "dt conf: sh %d page %d cst %d aid %d pad1 %d \n",
158             cache_info.dt_conf.tc_sh,
159             cache_info.dt_conf.tc_page,
160             cache_info.dt_conf.tc_cst,
161             cache_info.dt_conf.tc_aid,
162             cache_info.dt_conf.tc_pad1);
163
164         printk(KERN_DEBUG "it conf: sh %d page %d cst %d aid %d pad1 %d \n",
165             cache_info.it_conf.tc_sh,
166             cache_info.it_conf.tc_page,
167             cache_info.it_conf.tc_cst,
168             cache_info.it_conf.tc_aid,
169             cache_info.it_conf.tc_pad1);
170 #endif
171
172         split_tlb = 0;
173         if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
174                 if (cache_info.dt_conf.tc_sh == 2)
175                         printk(KERN_WARNING "Unexpected TLB configuration. "
176                         "Will flush I/D separately (could be optimized).\n");
177
178                 split_tlb = 1;
179         }
180
181         dcache_stride = (1 << (cache_info.dc_conf.cc_block + 3)) *
182                                                 cache_info.dc_conf.cc_line;
183         icache_stride = (1 << (cache_info.ic_conf.cc_block + 3)) *
184                                                 cache_info.ic_conf.cc_line;
185 #ifndef CONFIG_PA20
186         if (pdc_btlb_info(&btlb_info) < 0) {
187                 memset(&btlb_info, 0, sizeof btlb_info);
188         }
189 #endif
190
191         if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
192                                                 PDC_MODEL_NVA_UNSUPPORTED) {
193                 printk(KERN_WARNING "Only equivalent aliasing supported\n");
194 #ifndef CONFIG_SMP
195                 panic("SMP kernel required to avoid non-equivalent aliasing");
196 #endif
197         }
198 }
199
200 void disable_sr_hashing(void)
201 {
202         int srhash_type;
203
204         switch (boot_cpu_data.cpu_type) {
205         case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
206                 BUG();
207                 return;
208
209         case pcxs:
210         case pcxt:
211         case pcxt_:
212                 srhash_type = SRHASH_PCXST;
213                 break;
214
215         case pcxl:
216                 srhash_type = SRHASH_PCXL;
217                 break;
218
219         case pcxl2: /* pcxl2 doesn't support space register hashing */
220                 return;
221
222         default: /* Currently all PA2.0 machines use the same ins. sequence */
223                 srhash_type = SRHASH_PA20;
224                 break;
225         }
226
227         disable_sr_hashing_asm(srhash_type);
228 }
229
230 void __flush_dcache_page(struct page *page)
231 {
232         struct address_space *mapping = page_mapping(page);
233         struct mm_struct *mm = current->active_mm;
234         struct list_head *l;
235
236         flush_kernel_dcache_page(page_address(page));
237
238         if (!mapping)
239                 return;
240         /* check shared list first if it's not empty...it's usually
241          * the shortest */
242         list_for_each(l, &mapping->i_mmap_shared) {
243                 struct vm_area_struct *mpnt;
244                 unsigned long off;
245
246                 mpnt = list_entry(l, struct vm_area_struct, shared);
247
248                 /*
249                  * If this VMA is not in our MM, we can ignore it.
250                  */
251                 if (mpnt->vm_mm != mm)
252                         continue;
253
254                 if (page->index < mpnt->vm_pgoff)
255                         continue;
256
257                 off = page->index - mpnt->vm_pgoff;
258                 if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
259                         continue;
260
261                 flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
262
263                 /* All user shared mappings should be equivalently mapped,
264                  * so once we've flushed one we should be ok
265                  */
266                 return;
267         }
268
269         /* then check private mapping list for read only shared mappings
270          * which are flagged by VM_MAYSHARE */
271         list_for_each(l, &mapping->i_mmap) {
272                 struct vm_area_struct *mpnt;
273                 unsigned long off;
274
275                 mpnt = list_entry(l, struct vm_area_struct, shared);
276
277
278                 if (mpnt->vm_mm != mm || !(mpnt->vm_flags & VM_MAYSHARE))
279                         continue;
280
281                 if (page->index < mpnt->vm_pgoff)
282                         continue;
283
284                 off = page->index - mpnt->vm_pgoff;
285                 if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
286                         continue;
287
288                 flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
289
290                 /* All user shared mappings should be equivalently mapped,
291                  * so once we've flushed one we should be ok
292                  */
293                 break;
294         }
295 }
296 EXPORT_SYMBOL(__flush_dcache_page);
297
298 /* Defined in arch/parisc/kernel/pacache.S */
299 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
300 EXPORT_SYMBOL(flush_kernel_dcache_page);
301 EXPORT_SYMBOL(flush_data_cache_local);
302 EXPORT_SYMBOL(flush_kernel_icache_range_asm);