#define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
- *(volatile unsigned long *)CKSEG1; \
+ *(volatile unsigned long *)KSEG1; \
if (R4600_V1_HIT_CACHEOP_WAR) \
__asm__ __volatile__("nop;nop;nop;nop"); \
} while (0)
static void (* r4k_blast_dcache)(void);
-static inline void r4k_blast_dcache_setup(void)
+static void r4k_blast_dcache_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
r4k_blast_scache = blast_scache128;
}
-/*
- * This is former mm's flush_cache_all() which really should be
- * flush_cache_vunmap these days ...
- */
-static inline void local_r4k_flush_cache_all(void * args)
-{
- r4k_blast_dcache();
- r4k_blast_icache();
-}
-
static void r4k_flush_cache_all(void)
{
if (!cpu_has_dc_aliases)
return;
- on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
+ r4k_blast_dcache();
+ r4k_blast_icache();
}
-static inline void local_r4k___flush_cache_all(void * args)
+static void r4k___flush_cache_all(void)
{
r4k_blast_dcache();
r4k_blast_icache();
}
}
-static void r4k___flush_cache_all(void)
-{
- on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
-}
-
-static inline void local_r4k_flush_cache_range(void * args)
+static void r4k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
- struct vm_area_struct *vma = args;
int exec;
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
r4k_blast_icache();
}
-static void r4k_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
-}
-
-static inline void local_r4k_flush_cache_mm(void * args)
+static void r4k_flush_cache_mm(struct mm_struct *mm)
{
- struct mm_struct *mm = args;
-
if (!cpu_has_dc_aliases)
return;
r4k_blast_scache();
}
-static void r4k_flush_cache_mm(struct mm_struct *mm)
-{
- on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
-}
-
-struct flush_cache_page_args {
- struct vm_area_struct *vma;
- unsigned long page;
-};
-
-static inline void local_r4k_flush_cache_page(void *args)
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long page)
{
- struct flush_cache_page_args *fcp_args = args;
- struct vm_area_struct *vma = fcp_args->vma;
- unsigned long page = fcp_args->page;
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
}
}
-static void r4k_flush_cache_page(struct vm_area_struct *vma,
- unsigned long page)
-{
- struct flush_cache_page_args args;
-
- args.vma = vma;
- args.page = page;
-
- on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
-}
-
-static inline void local_r4k_flush_data_cache_page(void * addr)
-{
- r4k_blast_dcache_page((unsigned long) addr);
-}
-
static void r4k_flush_data_cache_page(unsigned long addr)
{
- on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
+ r4k_blast_dcache_page(addr);
}
-struct flush_icache_range_args {
- unsigned long start;
- unsigned long end;
-};
-
-static inline void local_r4k_flush_icache_range(void *args)
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
- struct flush_icache_range_args *fir_args = args;
unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long start = fir_args->start;
- unsigned long end = fir_args->end;
unsigned long addr, aend;
if (!cpu_has_ic_fills_f_dc) {
if (end - start > icache_size)
r4k_blast_icache();
else {
- addr = start & ~(ic_lsize - 1);
- aend = (end - 1) & ~(ic_lsize - 1);
+ addr = start & ~(dc_lsize - 1);
+ aend = (end - 1) & ~(dc_lsize - 1);
while (1) {
/* Hit_Invalidate_I */
protected_flush_icache_line(addr);
if (addr == aend)
break;
- addr += ic_lsize;
+ addr += dc_lsize;
}
}
}
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
-{
- struct flush_icache_range_args args;
-
- args.start = start;
- args.end = end;
-
- on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
-}
-
/*
* Ok, this seriously sucks. We use them to flush a user page but don't
* know the virtual address, so we have to blast away the whole icache
* least know the kernel address of the page so we can flush it
* selectivly.
*/
-
-struct flush_icache_page_args {
- struct vm_area_struct *vma;
- struct page *page;
-};
-
-static inline void local_r4k_flush_icache_page(void *args)
+static void r4k_flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
- struct flush_icache_page_args *fip_args = args;
- struct vm_area_struct *vma = fip_args->vma;
- struct page *page = fip_args->page;
+ /*
+ * If there's no context yet, or the page isn't executable, no icache
+ * flush is needed.
+ */
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
/*
* Tricky ... Because we don't know the virtual address we've got the
r4k_blast_icache();
}
-static void r4k_flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
- struct flush_icache_page_args args;
-
- /*
- * If there's no context yet, or the page isn't executable, no I-cache
- * flush is needed.
- */
- if (!(vma->vm_flags & VM_EXEC))
- return;
-
- args.vma = vma;
- args.page = page;
-
- on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
-}
-
-
#ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
-static void local_r4k_flush_cache_sigtramp(void * arg)
+static void r4k_flush_cache_sigtramp(unsigned long addr)
{
unsigned long ic_lsize = current_cpu_data.icache.linesz;
unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long addr = (unsigned long) arg;
R4600_HIT_CACHEOP_WAR_IMPL;
protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
".set push\n\t"
".set noat\n\t"
".set mips3\n\t"
-#ifdef CONFIG_MIPS32
+#if CONFIG_MIPS32
"la $at,1f\n\t"
#endif
-#ifdef CONFIG_MIPS64
+#if CONFIG_MIPS64
"dla $at,1f\n\t"
#endif
"cache %0,($at)\n\t"
__asm__ __volatile__ ("sync");
}
-static void r4k_flush_cache_sigtramp(unsigned long addr)
-{
- on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
-}
-
static void r4k_flush_icache_all(void)
{
if (cpu_has_vtag_icache)
cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
way_string[c->icache.ways], c->icache.linesz);
- printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
+ printk("Primary data cache %ldkB %s, linesize %d bytes.\n",
dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
}
case CPU_R4000MC:
case CPU_R4400SC:
case CPU_R4400MC:
- probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache));
+ probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
sc_present = probe_scache_kseg1(config);
if (sc_present)
c->options |= MIPS_CPU_CACHE_CDEX_S;