1 /* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 1999 Helge Deller (07-13-1999)
8 * Copyright (C) 1999 SuSE GmbH Nuernberg
9 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
11 * Cache and TLB management
15 #include <linux/init.h>
16 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/system.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
33 EXPORT_SYMBOL(dcache_stride);
35 struct pdc_cache_info cache_info;
37 static struct pdc_btlb_info btlb_info;
42 flush_data_cache(void)
44 on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
49 flush_cache_all_local(void)
51 flush_instruction_cache_local();
52 flush_data_cache_local();
54 EXPORT_SYMBOL(flush_cache_all_local);
56 /* flushes EVERYTHING (tlb & cache) */
59 flush_all_caches(void)
64 EXPORT_SYMBOL(flush_all_caches);
67 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
69 struct page *page = pte_page(pte);
71 if (VALID_PAGE(page) && page_mapping(page) &&
72 test_bit(PG_dcache_dirty, &page->flags)) {
74 flush_kernel_dcache_page(page_address(page));
75 clear_bit(PG_dcache_dirty, &page->flags);
80 show_cache_info(struct seq_file *m)
82 seq_printf(m, "I-cache\t\t: %ld KB\n",
83 cache_info.ic_size/1024 );
84 seq_printf(m, "D-cache\t\t: %ld KB (%s)%s\n",
85 cache_info.dc_size/1024,
86 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
87 (cache_info.dc_conf.cc_sh ? " - shared I/D":"")
90 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
93 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
97 /* BTLB - Block TLB */
98 if (btlb_info.max_size==0) {
99 seq_printf(m, "BTLB\t\t: not supported\n" );
102 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
103 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
104 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
105 btlb_info.max_size, (int)4096,
106 btlb_info.max_size>>8,
107 btlb_info.fixed_range_info.num_i,
108 btlb_info.fixed_range_info.num_d,
109 btlb_info.fixed_range_info.num_comb,
110 btlb_info.variable_range_info.num_i,
111 btlb_info.variable_range_info.num_d,
112 btlb_info.variable_range_info.num_comb
119 parisc_cache_init(void)
121 if (pdc_cache_info(&cache_info) < 0)
122 panic("parisc_cache_init: pdc_cache_info failed");
125 printk(KERN_DEBUG "ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n",
129 sizeof (struct pdc_cache_info) / sizeof (long),
130 sizeof (struct pdc_cache_cf)
133 printk(KERN_DEBUG "dc base %x dc stride %x dc count %x dc loop %d\n",
135 cache_info.dc_stride,
139 printk(KERN_DEBUG "dc conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
140 cache_info.dc_conf.cc_alias,
141 cache_info.dc_conf.cc_block,
142 cache_info.dc_conf.cc_line,
143 cache_info.dc_conf.cc_wt,
144 cache_info.dc_conf.cc_sh,
145 cache_info.dc_conf.cc_cst,
146 cache_info.dc_conf.cc_assoc);
148 printk(KERN_DEBUG "ic conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
149 cache_info.ic_conf.cc_alias,
150 cache_info.ic_conf.cc_block,
151 cache_info.ic_conf.cc_line,
152 cache_info.ic_conf.cc_wt,
153 cache_info.ic_conf.cc_sh,
154 cache_info.ic_conf.cc_cst,
155 cache_info.ic_conf.cc_assoc);
157 printk(KERN_DEBUG "dt conf: sh %d page %d cst %d aid %d pad1 %d \n",
158 cache_info.dt_conf.tc_sh,
159 cache_info.dt_conf.tc_page,
160 cache_info.dt_conf.tc_cst,
161 cache_info.dt_conf.tc_aid,
162 cache_info.dt_conf.tc_pad1);
164 printk(KERN_DEBUG "it conf: sh %d page %d cst %d aid %d pad1 %d \n",
165 cache_info.it_conf.tc_sh,
166 cache_info.it_conf.tc_page,
167 cache_info.it_conf.tc_cst,
168 cache_info.it_conf.tc_aid,
169 cache_info.it_conf.tc_pad1);
173 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
174 if (cache_info.dt_conf.tc_sh == 2)
175 printk(KERN_WARNING "Unexpected TLB configuration. "
176 "Will flush I/D separately (could be optimized).\n");
181 dcache_stride = (1 << (cache_info.dc_conf.cc_block + 3)) *
182 cache_info.dc_conf.cc_line;
183 icache_stride = (1 << (cache_info.ic_conf.cc_block + 3)) *
184 cache_info.ic_conf.cc_line;
186 if (pdc_btlb_info(&btlb_info) < 0) {
187 memset(&btlb_info, 0, sizeof btlb_info);
191 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
192 PDC_MODEL_NVA_UNSUPPORTED) {
193 printk(KERN_WARNING "Only equivalent aliasing supported\n");
195 panic("SMP kernel required to avoid non-equivalent aliasing");
200 void disable_sr_hashing(void)
204 switch (boot_cpu_data.cpu_type) {
205 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
212 srhash_type = SRHASH_PCXST;
216 srhash_type = SRHASH_PCXL;
219 case pcxl2: /* pcxl2 doesn't support space register hashing */
222 default: /* Currently all PA2.0 machines use the same ins. sequence */
223 srhash_type = SRHASH_PA20;
227 disable_sr_hashing_asm(srhash_type);
230 void __flush_dcache_page(struct page *page)
232 struct address_space *mapping = page_mapping(page);
233 struct mm_struct *mm = current->active_mm;
236 flush_kernel_dcache_page(page_address(page));
240 /* check shared list first if it's not empty...it's usually
242 list_for_each(l, &mapping->i_mmap_shared) {
243 struct vm_area_struct *mpnt;
246 mpnt = list_entry(l, struct vm_area_struct, shared);
249 * If this VMA is not in our MM, we can ignore it.
251 if (mpnt->vm_mm != mm)
254 if (page->index < mpnt->vm_pgoff)
257 off = page->index - mpnt->vm_pgoff;
258 if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
261 flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
263 /* All user shared mappings should be equivalently mapped,
264 * so once we've flushed one we should be ok
269 /* then check private mapping list for read only shared mappings
270 * which are flagged by VM_MAYSHARE */
271 list_for_each(l, &mapping->i_mmap) {
272 struct vm_area_struct *mpnt;
275 mpnt = list_entry(l, struct vm_area_struct, shared);
278 if (mpnt->vm_mm != mm || !(mpnt->vm_flags & VM_MAYSHARE))
281 if (page->index < mpnt->vm_pgoff)
284 off = page->index - mpnt->vm_pgoff;
285 if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
288 flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
290 /* All user shared mappings should be equivalently mapped,
291 * so once we've flushed one we should be ok
296 EXPORT_SYMBOL(__flush_dcache_page);
298 /* Defined in arch/parisc/kernel/pacache.S */
299 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
300 EXPORT_SYMBOL(flush_kernel_dcache_page);
301 EXPORT_SYMBOL(flush_data_cache_local);
302 EXPORT_SYMBOL(flush_kernel_icache_range_asm);