X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fparisc%2Fkernel%2Fcache.c;h=0be51e92a2fc62f1e82387249d3dbead462606d7;hb=refs%2Fheads%2Fvserver;hp=9fc0614401223e72cc94726b92f654b51d85d29b;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 9fc061440..0be51e92a 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -4,7 +4,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999 Helge Deller (07-13-1999) + * Copyright (C) 1999-2006 Helge Deller (07-13-1999) * Copyright (C) 1999 SuSE GmbH Nuernberg * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org) * @@ -27,30 +27,44 @@ #include #include #include +#include -int split_tlb; -int dcache_stride; -int icache_stride; +int split_tlb __read_mostly; +int dcache_stride __read_mostly; +int icache_stride __read_mostly; EXPORT_SYMBOL(dcache_stride); -struct pdc_cache_info cache_info; + +/* On some machines (e.g. ones with the Merced bus), there can be + * only a single PxTLB broadcast at a time; this must be guaranteed + * by software. We put a spinlock around all TLB flushes to + * ensure this. + */ +DEFINE_SPINLOCK(pa_tlb_lock); + +struct pdc_cache_info cache_info __read_mostly; #ifndef CONFIG_PA20 -static struct pdc_btlb_info btlb_info; +static struct pdc_btlb_info btlb_info __read_mostly; #endif #ifdef CONFIG_SMP void flush_data_cache(void) { - on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1); + on_each_cpu(flush_data_cache_local, NULL, 1, 1); +} +void +flush_instruction_cache(void) +{ + on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); } #endif void flush_cache_all_local(void) { - flush_instruction_cache_local(); - flush_data_cache_local(); + flush_instruction_cache_local(NULL); + flush_data_cache_local(NULL); } EXPORT_SYMBOL(flush_cache_all_local); @@ -69,25 +83,29 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page = pte_page(pte); - if (VALID_PAGE(page) && page_mapping(page) && + if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { - flush_kernel_dcache_page(page_address(page)); + flush_kernel_dcache_page(page); clear_bit(PG_dcache_dirty, &page->flags); - } + } else if (parisc_requires_coherency()) + flush_kernel_dcache_page(page); } void show_cache_info(struct seq_file *m) { + char buf[32]; + seq_printf(m, "I-cache\t\t: %ld KB\n", cache_info.ic_size/1024 ); - seq_printf(m, "D-cache\t\t: %ld KB (%s)%s\n", + if (cache_info.dc_loop == 1) + snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); + seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", cache_info.dc_size/1024, (cache_info.dc_conf.cc_wt ? "WT":"WB"), - (cache_info.dc_conf.cc_sh ? " - shared I/D":"") - ); - + (cache_info.dc_conf.cc_sh ? ", shared I/D":""), + ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", cache_info.it_size, cache_info.dt_size, @@ -123,51 +141,60 @@ parisc_cache_init(void) panic("parisc_cache_init: pdc_cache_info failed"); #if 0 - printk(KERN_DEBUG "ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n", - cache_info.ic_size, - cache_info.dc_size, - cache_info.it_size, - sizeof (struct pdc_cache_info) / sizeof (long), - sizeof (struct pdc_cache_cf) - ); - - printk(KERN_DEBUG "dc base %x dc stride %x dc count %x dc loop %d\n", - cache_info.dc_base, - cache_info.dc_stride, - cache_info.dc_count, - cache_info.dc_loop); - - printk(KERN_DEBUG "dc conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n", - cache_info.dc_conf.cc_alias, - cache_info.dc_conf.cc_block, - cache_info.dc_conf.cc_line, - cache_info.dc_conf.cc_wt, - cache_info.dc_conf.cc_sh, - cache_info.dc_conf.cc_cst, - cache_info.dc_conf.cc_assoc); - - printk(KERN_DEBUG "ic conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n", - cache_info.ic_conf.cc_alias, - cache_info.ic_conf.cc_block, - cache_info.ic_conf.cc_line, - cache_info.ic_conf.cc_wt, - cache_info.ic_conf.cc_sh, - cache_info.ic_conf.cc_cst, - cache_info.ic_conf.cc_assoc); - - printk(KERN_DEBUG "dt conf: sh %d page %d cst %d aid %d pad1 %d \n", - cache_info.dt_conf.tc_sh, - cache_info.dt_conf.tc_page, - cache_info.dt_conf.tc_cst, - cache_info.dt_conf.tc_aid, - cache_info.dt_conf.tc_pad1); - - printk(KERN_DEBUG "it conf: sh %d page %d cst %d aid %d pad1 %d \n", - cache_info.it_conf.tc_sh, - cache_info.it_conf.tc_page, - cache_info.it_conf.tc_cst, - cache_info.it_conf.tc_aid, - cache_info.it_conf.tc_pad1); + printk("ic_size %lx dc_size %lx it_size %lx\n", + cache_info.ic_size, + cache_info.dc_size, + cache_info.it_size); + + printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", + cache_info.dc_base, + cache_info.dc_stride, + cache_info.dc_count, + cache_info.dc_loop); + + printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n", + *(unsigned long *) (&cache_info.dc_conf), + cache_info.dc_conf.cc_alias, + cache_info.dc_conf.cc_block, + cache_info.dc_conf.cc_line, + cache_info.dc_conf.cc_shift); + printk(" wt %d sh %d cst %d hv %d\n", + cache_info.dc_conf.cc_wt, + cache_info.dc_conf.cc_sh, + cache_info.dc_conf.cc_cst, + cache_info.dc_conf.cc_hv); + + printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", + cache_info.ic_base, + cache_info.ic_stride, + cache_info.ic_count, + cache_info.ic_loop); + + printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n", + *(unsigned long *) (&cache_info.ic_conf), + cache_info.ic_conf.cc_alias, + cache_info.ic_conf.cc_block, + cache_info.ic_conf.cc_line, + cache_info.ic_conf.cc_shift); + printk(" wt %d sh %d cst %d hv %d\n", + cache_info.ic_conf.cc_wt, + cache_info.ic_conf.cc_sh, + cache_info.ic_conf.cc_cst, + cache_info.ic_conf.cc_hv); + + printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n", + cache_info.dt_conf.tc_sh, + cache_info.dt_conf.tc_page, + cache_info.dt_conf.tc_cst, + cache_info.dt_conf.tc_aid, + cache_info.dt_conf.tc_pad1); + + printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n", + cache_info.it_conf.tc_sh, + cache_info.it_conf.tc_page, + cache_info.it_conf.tc_cst, + cache_info.it_conf.tc_aid, + cache_info.it_conf.tc_pad1); #endif split_tlb = 0; @@ -179,10 +206,17 @@ parisc_cache_init(void) split_tlb = 1; } - dcache_stride = (1 << (cache_info.dc_conf.cc_block + 3)) * - cache_info.dc_conf.cc_line; - icache_stride = (1 << (cache_info.ic_conf.cc_block + 3)) * - cache_info.ic_conf.cc_line; + /* "New and Improved" version from Jim Hull + * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift)) + * The following CAFL_STRIDE is an optimized version, see + * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html + * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html + */ +#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift)) + dcache_stride = CAFL_STRIDE(cache_info.dc_conf); + icache_stride = CAFL_STRIDE(cache_info.ic_conf); +#undef CAFL_STRIDE + #ifndef CONFIG_PA20 if (pdc_btlb_info(&btlb_info) < 0) { memset(&btlb_info, 0, sizeof btlb_info); @@ -191,8 +225,8 @@ parisc_cache_init(void) if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) { - printk(KERN_WARNING "Only equivalent aliasing supported\n"); -#ifndef CONFIG_SMP + printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n"); +#if 0 panic("SMP kernel required to avoid non-equivalent aliasing"); #endif } @@ -200,7 +234,8 @@ parisc_cache_init(void) void disable_sr_hashing(void) { - int srhash_type; + int srhash_type, retval; + unsigned long space_bits; switch (boot_cpu_data.cpu_type) { case pcx: /* We shouldn't get this far. setup.c should prevent it. */ @@ -226,18 +261,32 @@ void disable_sr_hashing(void) } disable_sr_hashing_asm(srhash_type); + + retval = pdc_spaceid_bits(&space_bits); + /* If this procedure isn't implemented, don't panic. */ + if (retval < 0 && retval != PDC_BAD_OPTION) + panic("pdc_spaceid_bits call failed.\n"); + if (space_bits != 0) + panic("SpaceID hashing is still on!\n"); } -void __flush_dcache_page(struct page *page) +void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); - struct vm_area_struct *mpnt = NULL; + struct vm_area_struct *mpnt; struct prio_tree_iter iter; unsigned long offset; unsigned long addr; pgoff_t pgoff; + unsigned long pfn = page_to_pfn(page); + - flush_kernel_dcache_page(page_address(page)); + if (mapping && !mapping_mapped(mapping)) { + set_bit(PG_dcache_dirty, &page->flags); + return; + } + + flush_kernel_dcache_page(page); if (!mapping) return; @@ -250,8 +299,7 @@ void __flush_dcache_page(struct page *page) * to flush one address here for them all to become coherent */ flush_dcache_mmap_lock(mapping); - while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap, - &iter, pgoff, pgoff)) != NULL) { + vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; @@ -260,21 +308,105 @@ void __flush_dcache_page(struct page *page) * taking a page fault if the pte doesn't exist. * This is just for speed. If the page translation * isn't there, there's no point exciting the - * nadtlb handler into a nullification frenzy */ - - if (!translation_exists(mpnt, addr)) - continue; - - __flush_cache_page(mpnt, addr); - - break; + * nadtlb handler into a nullification frenzy. + * + * Make sure we really have this page: the private + * mappings may cover this area but have COW'd this + * particular page. + */ + if (translation_exists(mpnt, addr, pfn)) { + __flush_cache_page(mpnt, addr); + break; + } } flush_dcache_mmap_unlock(mapping); } -EXPORT_SYMBOL(__flush_dcache_page); +EXPORT_SYMBOL(flush_dcache_page); /* Defined in arch/parisc/kernel/pacache.S */ EXPORT_SYMBOL(flush_kernel_dcache_range_asm); -EXPORT_SYMBOL(flush_kernel_dcache_page); +EXPORT_SYMBOL(flush_kernel_dcache_page_asm); EXPORT_SYMBOL(flush_data_cache_local); EXPORT_SYMBOL(flush_kernel_icache_range_asm); + +void clear_user_page_asm(void *page, unsigned long vaddr) +{ + /* This function is implemented in assembly in pacache.S */ + extern void __clear_user_page_asm(void *page, unsigned long vaddr); + + purge_tlb_start(); + __clear_user_page_asm(page, vaddr); + purge_tlb_end(); +} + +#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ +int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; + +void parisc_setup_cache_timing(void) +{ + unsigned long rangetime, alltime; + unsigned long size; + + alltime = mfctl(16); + flush_data_cache(); + alltime = mfctl(16) - alltime; + + size = (unsigned long)(_end - _text); + rangetime = mfctl(16); + flush_kernel_dcache_range((unsigned long)_text, size); + rangetime = mfctl(16) - rangetime; + + printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime, size, rangetime); + + /* Racy, but if we see an intermediate value, it's ok too... */ + parisc_cache_flush_threshold = size * alltime / rangetime; + + parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); + if (!parisc_cache_flush_threshold) + parisc_cache_flush_threshold = FLUSH_THRESHOLD; + + printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); +} + +extern void purge_kernel_dcache_page(unsigned long); +extern void clear_user_page_asm(void *page, unsigned long vaddr); + +void clear_user_page(void *page, unsigned long vaddr, struct page *pg) +{ + purge_kernel_dcache_page((unsigned long)page); + purge_tlb_start(); + pdtlb_kernel(page); + purge_tlb_end(); + clear_user_page_asm(page, vaddr); +} +EXPORT_SYMBOL(clear_user_page); + +void flush_kernel_dcache_page_addr(void *addr) +{ + flush_kernel_dcache_page_asm(addr); + purge_tlb_start(); + pdtlb_kernel(addr); + purge_tlb_end(); +} +EXPORT_SYMBOL(flush_kernel_dcache_page_addr); + +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg) +{ + /* no coherency needed (all in kmap/kunmap) */ + copy_user_page_asm(vto, vfrom); + if (!parisc_requires_coherency()) + flush_kernel_dcache_page_asm(vto); +} +EXPORT_SYMBOL(copy_user_page); + +#ifdef CONFIG_PA8X00 + +void kunmap_parisc(void *addr) +{ + if (parisc_requires_coherency()) + flush_kernel_dcache_page_addr(addr); +} +EXPORT_SYMBOL(kunmap_parisc); +#endif