X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fmm%2Fc-sb1.c;h=3a8afd47feaab8b3f1e98e5be73b563acd612282;hb=refs%2Fheads%2Fvserver;hp=2f08b535f20e8e9e6adc8a287398277598b36a5d;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index 2f08b535f..3a8afd47f 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c @@ -18,8 +18,8 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include #include +#include #include #include @@ -50,6 +50,15 @@ static unsigned short dcache_sets; static unsigned int icache_range_cutoff; static unsigned int dcache_range_cutoff; +static inline void sb1_on_each_cpu(void (*func) (void *info), void *info, + int retry, int wait) +{ + preempt_disable(); + smp_call_function(func, info, retry, wait); + func(info); + preempt_enable(); +} + /* * The dcache is fully coherent to the system, with one * big caveat: the instruction stream. In other words, @@ -155,6 +164,26 @@ static inline void __sb1_flush_icache_all(void) } } +/* + * Invalidate a range of the icache. The addresses are virtual, and + * the cache is virtually indexed and tagged. However, we don't + * necessarily have the right ASID context, so use index ops instead + * of hit ops. + */ +static inline void __sb1_flush_icache_range(unsigned long start, + unsigned long end) +{ + start &= ~(icache_line_size - 1); + end = (end + icache_line_size - 1) & ~(icache_line_size - 1); + + while (start != end) { + cache_set_op(Index_Invalidate_I, start & icache_index_mask); + start += icache_line_size; + } + mispredict(); + sync(); +} + /* * Flush the icache for a given physical page. Need to writeback the * dcache first, then invalidate the icache. If the page isn't @@ -174,8 +203,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long /* * Bumping the ASID is probably cheaper than the flush ... */ - if (cpu_context(cpu, vma->vm_mm) != 0) - drop_mmu_context(vma->vm_mm, cpu); + if (vma->vm_mm == current->active_mm) { + if (cpu_context(cpu, vma->vm_mm) != 0) + drop_mmu_context(vma->vm_mm, cpu); + } else + __sb1_flush_icache_range(addr, addr + PAGE_SIZE); } #ifdef CONFIG_SMP @@ -204,33 +236,32 @@ static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, args.vma = vma; args.addr = addr; args.pfn = pfn; - on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); + sb1_on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); } #else void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) __attribute__((alias("local_sb1_flush_cache_page"))); #endif -/* - * Invalidate a range of the icache. The addresses are virtual, and - * the cache is virtually indexed and tagged. However, we don't - * necessarily have the right ASID context, so use index ops instead - * of hit ops. - */ -static inline void __sb1_flush_icache_range(unsigned long start, - unsigned long end) +#ifdef CONFIG_SMP +static void sb1_flush_cache_data_page_ipi(void *info) { - start &= ~(icache_line_size - 1); - end = (end + icache_line_size - 1) & ~(icache_line_size - 1); + unsigned long start = (unsigned long)info; - while (start != end) { - cache_set_op(Index_Invalidate_I, start & icache_index_mask); - start += icache_line_size; - } - mispredict(); - sync(); + __sb1_writeback_inv_dcache_range(start, start + PAGE_SIZE); } +static void sb1_flush_cache_data_page(unsigned long addr) +{ + if (in_atomic()) + __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); + else + on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); +} +#else +void sb1_flush_cache_data_page(unsigned long) + __attribute__((alias("local_sb1_flush_cache_data_page"))); +#endif /* * Invalidate all caches on this CPU @@ -247,7 +278,7 @@ void sb1___flush_cache_all_ipi(void *ignored) static void sb1___flush_cache_all(void) { - on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); + sb1_on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); } #else void sb1___flush_cache_all(void) @@ -297,70 +328,13 @@ void sb1_flush_icache_range(unsigned long start, unsigned long end) args.start = start; args.end = end; - on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); + sb1_on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); } #else void sb1_flush_icache_range(unsigned long start, unsigned long end) __attribute__((alias("local_sb1_flush_icache_range"))); #endif -/* - * Flush the icache for a given physical page. Need to writeback the - * dcache first, then invalidate the icache. If the page isn't - * executable, nothing is required. - */ -static void local_sb1_flush_icache_page(struct vm_area_struct *vma, - struct page *page) -{ - unsigned long start; - int cpu = smp_processor_id(); - -#ifndef CONFIG_SMP - if (!(vma->vm_flags & VM_EXEC)) - return; -#endif - - /* Need to writeback any dirty data for that page, we have the PA */ - start = (unsigned long)(page-mem_map) << PAGE_SHIFT; - __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE); - /* - * If there's a context, bump the ASID (cheaper than a flush, - * since we don't know VAs!) - */ - if (cpu_context(cpu, vma->vm_mm) != 0) { - drop_mmu_context(vma->vm_mm, cpu); - } -} - -#ifdef CONFIG_SMP -struct flush_icache_page_args { - struct vm_area_struct *vma; - struct page *page; -}; - -static void sb1_flush_icache_page_ipi(void *info) -{ - struct flush_icache_page_args *args = info; - local_sb1_flush_icache_page(args->vma, args->page); -} - -/* Dirty dcache could be on another CPU, so do the IPIs */ -static void sb1_flush_icache_page(struct vm_area_struct *vma, - struct page *page) -{ - struct flush_icache_page_args args; - - if (!(vma->vm_flags & VM_EXEC)) - return; - args.vma = vma; - args.page = page; - on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1); -} -#else -void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page) - __attribute__((alias("local_sb1_flush_icache_page"))); -#endif - /* * A signal trampoline must fit into a single cacheline. */ @@ -381,7 +355,7 @@ static void sb1_flush_cache_sigtramp_ipi(void *info) static void sb1_flush_cache_sigtramp(unsigned long addr) { - on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); + sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); } #else void sb1_flush_cache_sigtramp(unsigned long addr) @@ -499,7 +473,6 @@ static __init void probe_cache_sizes(void) void sb1_cache_init(void) { extern char except_vec2_sb1; - extern char handle_vec2_sb1; /* Special cache error handler for SB1 */ set_uncached_handler (0x100, &except_vec2_sb1, 0x80); @@ -521,14 +494,14 @@ void sb1_cache_init(void) /* These routines are for Icache coherence with the Dcache */ flush_icache_range = sb1_flush_icache_range; - flush_icache_page = sb1_flush_icache_page; flush_icache_all = __sb1_flush_icache_all; /* local only */ /* This implies an Icache flush too, so can't be nop'ed */ flush_cache_page = sb1_flush_cache_page; flush_cache_sigtramp = sb1_flush_cache_sigtramp; - flush_data_cache_page = (void *) sb1_nop; + local_flush_data_cache_page = (void *) sb1_nop; + flush_data_cache_page = sb1_flush_cache_data_page; /* Full flush */ __flush_cache_all = sb1___flush_cache_all; @@ -552,5 +525,5 @@ void sb1_cache_init(void) : : "memory"); - flush_cache_all(); + local_sb1___flush_cache_all(); }