This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / mips / mm / c-r4k.c
index 6249ac9..b22df2d 100644 (file)
@@ -49,7 +49,7 @@ struct bcache_ops *bcops = &no_sc_ops;
 #define R4600_HIT_CACHEOP_WAR_IMPL                                     \
 do {                                                                   \
        if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
-               *(volatile unsigned long *)CKSEG1;                      \
+               *(volatile unsigned long *)KSEG1;                       \
        if (R4600_V1_HIT_CACHEOP_WAR)                                   \
                __asm__ __volatile__("nop;nop;nop;nop");                \
 } while (0)
@@ -86,7 +86,7 @@ static inline void r4k_blast_dcache_page_indexed_setup(void)
 
 static void (* r4k_blast_dcache)(void);
 
-static inline void r4k_blast_dcache_setup(void)
+static void r4k_blast_dcache_setup(void)
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
@@ -254,25 +254,16 @@ static inline void r4k_blast_scache_setup(void)
                r4k_blast_scache = blast_scache128;
 }
 
-/*
- * This is former mm's flush_cache_all() which really should be
- * flush_cache_vunmap these days ...
- */
-static inline void local_r4k_flush_cache_all(void * args)
-{
-       r4k_blast_dcache();
-       r4k_blast_icache();
-}
-
 static void r4k_flush_cache_all(void)
 {
        if (!cpu_has_dc_aliases)
                return;
 
-       on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
+       r4k_blast_dcache();
+       r4k_blast_icache();
 }
 
-static inline void local_r4k___flush_cache_all(void * args)
+static void r4k___flush_cache_all(void)
 {
        r4k_blast_dcache();
        r4k_blast_icache();
@@ -288,14 +279,9 @@ static inline void local_r4k___flush_cache_all(void * args)
        }
 }
 
-static void r4k___flush_cache_all(void)
-{
-       on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
-}
-
-static inline void local_r4k_flush_cache_range(void * args)
+static void r4k_flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start, unsigned long end)
 {
-       struct vm_area_struct *vma = args;
        int exec;
 
        if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
@@ -308,16 +294,8 @@ static inline void local_r4k_flush_cache_range(void * args)
                r4k_blast_icache();
 }
 
-static void r4k_flush_cache_range(struct vm_area_struct *vma,
-       unsigned long start, unsigned long end)
-{
-       on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
-}
-
-static inline void local_r4k_flush_cache_mm(void * args)
+static void r4k_flush_cache_mm(struct mm_struct *mm)
 {
-       struct mm_struct *mm = args;
-
        if (!cpu_has_dc_aliases)
                return;
 
@@ -338,21 +316,9 @@ static inline void local_r4k_flush_cache_mm(void * args)
                r4k_blast_scache();
 }
 
-static void r4k_flush_cache_mm(struct mm_struct *mm)
-{
-       on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
-}
-
-struct flush_cache_page_args {
-       struct vm_area_struct *vma;
-       unsigned long page;
-};
-
-static inline void local_r4k_flush_cache_page(void *args)
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+                                       unsigned long page)
 {
-       struct flush_cache_page_args *fcp_args = args;
-       struct vm_area_struct *vma = fcp_args->vma;
-       unsigned long page = fcp_args->page;
        int exec = vma->vm_flags & VM_EXEC;
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgdp;
@@ -411,39 +377,14 @@ static inline void local_r4k_flush_cache_page(void *args)
        }
 }
 
-static void r4k_flush_cache_page(struct vm_area_struct *vma,
-       unsigned long page)
-{
-       struct flush_cache_page_args args;
-
-       args.vma = vma;
-       args.page = page;
-
-       on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
-}
-
-static inline void local_r4k_flush_data_cache_page(void * addr)
-{
-       r4k_blast_dcache_page((unsigned long) addr);
-}
-
 static void r4k_flush_data_cache_page(unsigned long addr)
 {
-       on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
+       r4k_blast_dcache_page(addr);
 }
 
-struct flush_icache_range_args {
-       unsigned long start;
-       unsigned long end;
-};
-
-static inline void local_r4k_flush_icache_range(void *args)
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 {
-       struct flush_icache_range_args *fir_args = args;
        unsigned long dc_lsize = current_cpu_data.dcache.linesz;
-       unsigned long ic_lsize = current_cpu_data.icache.linesz;
-       unsigned long start = fir_args->start;
-       unsigned long end = fir_args->end;
        unsigned long addr, aend;
 
        if (!cpu_has_ic_fills_f_dc) {
@@ -466,28 +407,18 @@ static inline void local_r4k_flush_icache_range(void *args)
        if (end - start > icache_size)
                r4k_blast_icache();
        else {
-               addr = start & ~(ic_lsize - 1);
-               aend = (end - 1) & ~(ic_lsize - 1);
+               addr = start & ~(dc_lsize - 1);
+               aend = (end - 1) & ~(dc_lsize - 1);
                while (1) {
                        /* Hit_Invalidate_I */
                        protected_flush_icache_line(addr);
                        if (addr == aend)
                                break;
-                       addr += ic_lsize;
+                       addr += dc_lsize;
                }
        }
 }
 
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
-{
-       struct flush_icache_range_args args;
-
-       args.start = start;
-       args.end = end;
-
-       on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
-}
-
 /*
  * Ok, this seriously sucks.  We use them to flush a user page but don't
  * know the virtual address, so we have to blast away the whole icache
@@ -495,17 +426,14 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  * least know the kernel address of the page so we can flush it
  * selectivly.
  */
-
-struct flush_icache_page_args {
-       struct vm_area_struct *vma;
-       struct page *page;
-};
-
-static inline void local_r4k_flush_icache_page(void *args)
+static void r4k_flush_icache_page(struct vm_area_struct *vma, struct page *page)
 {
-       struct flush_icache_page_args *fip_args = args;
-       struct vm_area_struct *vma = fip_args->vma;
-       struct page *page = fip_args->page;
+       /*
+        * If there's no context yet, or the page isn't executable, no icache
+        * flush is needed.
+        */
+       if (!(vma->vm_flags & VM_EXEC))
+               return;
 
        /*
         * Tricky ...  Because we don't know the virtual address we've got the
@@ -543,25 +471,6 @@ static inline void local_r4k_flush_icache_page(void *args)
                r4k_blast_icache();
 }
 
-static void r4k_flush_icache_page(struct vm_area_struct *vma,
-       struct page *page)
-{
-       struct flush_icache_page_args args;
-
-       /*
-        * If there's no context yet, or the page isn't executable, no I-cache
-        * flush is needed.
-        */
-       if (!(vma->vm_flags & VM_EXEC))
-               return;
-
-       args.vma = vma;
-       args.page = page;
-
-       on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
-}
-
-
 #ifdef CONFIG_DMA_NONCOHERENT
 
 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
@@ -665,11 +574,10 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  * very much about what happens in that case.  Usually a segmentation
  * fault will dump the process later on anyway ...
  */
-static void local_r4k_flush_cache_sigtramp(void * arg)
+static void r4k_flush_cache_sigtramp(unsigned long addr)
 {
        unsigned long ic_lsize = current_cpu_data.icache.linesz;
        unsigned long dc_lsize = current_cpu_data.dcache.linesz;
-       unsigned long addr = (unsigned long) arg;
 
        R4600_HIT_CACHEOP_WAR_IMPL;
        protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
@@ -679,10 +587,10 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
                        ".set push\n\t"
                        ".set noat\n\t"
                        ".set mips3\n\t"
-#ifdef CONFIG_MIPS32
+#if CONFIG_MIPS32
                        "la     $at,1f\n\t"
 #endif
-#ifdef CONFIG_MIPS64
+#if CONFIG_MIPS64
                        "dla    $at,1f\n\t"
 #endif
                        "cache  %0,($at)\n\t"
@@ -696,11 +604,6 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
                __asm__ __volatile__ ("sync");
 }
 
-static void r4k_flush_cache_sigtramp(unsigned long addr)
-{
-       on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
-}
-
 static void r4k_flush_icache_all(void)
 {
        if (cpu_has_vtag_icache)
@@ -990,7 +893,7 @@ static void __init probe_pcache(void)
               cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
               way_string[c->icache.ways], c->icache.linesz);
 
-       printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
+       printk("Primary data cache %ldkB %s, linesize %d bytes.\n",
               dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
 }
 
@@ -1079,7 +982,7 @@ static void __init setup_scache(void)
        case CPU_R4000MC:
        case CPU_R4400SC:
        case CPU_R4400MC:
-               probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache));
+               probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
                sc_present = probe_scache_kseg1(config);
                if (sc_present)
                        c->options |= MIPS_CPU_CACHE_CDEX_S;