fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / mips / mm / c-r4k.c
index b22df2d..df04a31 100644 (file)
@@ -7,7 +7,6 @@
  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -16,6 +15,7 @@
 
 #include <asm/bcache.h>
 #include <asm/bootinfo.h>
+#include <asm/cache.h>
 #include <asm/cacheops.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 #include <asm/war.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
 
-static unsigned long icache_size, dcache_size, scache_size;
+
+/*
+ * Special Variant of smp_call_function for use by cache functions:
+ *
+ *  o No return value
+ *  o collapses to normal function call on UP kernels
+ *  o collapses to normal function call on systems with a single shared
+ *    primary cache.
+ */
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
+                                   int retry, int wait)
+{
+       preempt_disable();
+
+#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
+       smp_call_function(func, info, retry, wait);
+#endif
+       func(info);
+       preempt_enable();
+}
+
+/*
+ * Must die.
+ */
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
 
 /*
  * Dummy cache handling routines for machines without boardcaches
  */
-static void no_sc_noop(void) {}
+static void cache_noop(void) {}
 
 static struct bcache_ops no_sc_ops = {
-       .bc_enable = (void *)no_sc_noop,
-       .bc_disable = (void *)no_sc_noop,
-       .bc_wback_inv = (void *)no_sc_noop,
-       .bc_inv = (void *)no_sc_noop
+       .bc_enable = (void *)cache_noop,
+       .bc_disable = (void *)cache_noop,
+       .bc_wback_inv = (void *)cache_noop,
+       .bc_inv = (void *)cache_noop
 };
 
 struct bcache_ops *bcops = &no_sc_ops;
 
-#define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x2010)
-#define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x2020)
+#define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
 #define R4600_HIT_CACHEOP_WAR_IMPL                                     \
 do {                                                                   \
        if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
-               *(volatile unsigned long *)KSEG1;                       \
+               *(volatile unsigned long *)CKSEG1;                      \
        if (R4600_V1_HIT_CACHEOP_WAR)                                   \
                __asm__ __volatile__("nop;nop;nop;nop");                \
 } while (0)
@@ -62,11 +89,13 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
        blast_dcache32_page(addr);
 }
 
-static inline void r4k_blast_dcache_page_setup(void)
+static void __init r4k_blast_dcache_page_setup(void)
 {
        unsigned long  dc_lsize = cpu_dcache_line_size();
 
-       if (dc_lsize == 16)
+       if (dc_lsize == 0)
+               r4k_blast_dcache_page = (void *)cache_noop;
+       else if (dc_lsize == 16)
                r4k_blast_dcache_page = blast_dcache16_page;
        else if (dc_lsize == 32)
                r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
@@ -74,11 +103,13 @@ static inline void r4k_blast_dcache_page_setup(void)
 
 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 
-static inline void r4k_blast_dcache_page_indexed_setup(void)
+static void __init r4k_blast_dcache_page_indexed_setup(void)
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
-       if (dc_lsize == 16)
+       if (dc_lsize == 0)
+               r4k_blast_dcache_page_indexed = (void *)cache_noop;
+       else if (dc_lsize == 16)
                r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
        else if (dc_lsize == 32)
                r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
@@ -86,11 +117,13 @@ static inline void r4k_blast_dcache_page_indexed_setup(void)
 
 static void (* r4k_blast_dcache)(void);
 
-static void r4k_blast_dcache_setup(void)
+static void __init r4k_blast_dcache_setup(void)
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
-       if (dc_lsize == 16)
+       if (dc_lsize == 0)
+               r4k_blast_dcache = (void *)cache_noop;
+       else if (dc_lsize == 16)
                r4k_blast_dcache = blast_dcache16;
        else if (dc_lsize == 32)
                r4k_blast_dcache = blast_dcache32;
@@ -126,13 +159,13 @@ static inline void tx49_blast_icache32(void)
 
        CACHE32_UNROLL32_ALIGN2;
        /* I'm in even chunk.  blast odd chunks */
-       for (ws = 0; ws < ws_end; ws += ws_inc) 
-               for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 
+       for (ws = 0; ws < ws_end; ws += ws_inc)
+               for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
                        cache32_unroll32(addr|ws,Index_Invalidate_I);
        CACHE32_UNROLL32_ALIGN;
        /* I'm in odd chunk.  blast even chunks */
-       for (ws = 0; ws < ws_end; ws += ws_inc) 
-               for (addr = start; addr < end; addr += 0x400 * 2) 
+       for (ws = 0; ws < ws_end; ws += ws_inc)
+               for (addr = start; addr < end; addr += 0x400 * 2)
                        cache32_unroll32(addr|ws,Index_Invalidate_I);
 }
 
@@ -147,7 +180,8 @@ static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
 
 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 {
-       unsigned long start = page;
+       unsigned long indexmask = current_cpu_data.icache.waysize - 1;
+       unsigned long start = INDEX_BASE + (page & indexmask);
        unsigned long end = start + PAGE_SIZE;
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
        unsigned long ws_end = current_cpu_data.icache.ways <<
@@ -156,23 +190,25 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 
        CACHE32_UNROLL32_ALIGN2;
        /* I'm in even chunk.  blast odd chunks */
-       for (ws = 0; ws < ws_end; ws += ws_inc) 
-               for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 
+       for (ws = 0; ws < ws_end; ws += ws_inc)
+               for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
                        cache32_unroll32(addr|ws,Index_Invalidate_I);
        CACHE32_UNROLL32_ALIGN;
        /* I'm in odd chunk.  blast even chunks */
-       for (ws = 0; ws < ws_end; ws += ws_inc) 
-               for (addr = start; addr < end; addr += 0x400 * 2) 
+       for (ws = 0; ws < ws_end; ws += ws_inc)
+               for (addr = start; addr < end; addr += 0x400 * 2)
                        cache32_unroll32(addr|ws,Index_Invalidate_I);
 }
 
 static void (* r4k_blast_icache_page)(unsigned long addr);
 
-static inline void r4k_blast_icache_page_setup(void)
+static void __init r4k_blast_icache_page_setup(void)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
-       if (ic_lsize == 16)
+       if (ic_lsize == 0)
+               r4k_blast_icache_page = (void *)cache_noop;
+       else if (ic_lsize == 16)
                r4k_blast_icache_page = blast_icache16_page;
        else if (ic_lsize == 32)
                r4k_blast_icache_page = blast_icache32_page;
@@ -183,19 +219,21 @@ static inline void r4k_blast_icache_page_setup(void)
 
 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 
-static inline void r4k_blast_icache_page_indexed_setup(void)
+static void __init r4k_blast_icache_page_indexed_setup(void)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
-       if (ic_lsize == 16)
+       if (ic_lsize == 0)
+               r4k_blast_icache_page_indexed = (void *)cache_noop;
+       else if (ic_lsize == 16)
                r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
        else if (ic_lsize == 32) {
-               if (TX49XX_ICACHE_INDEX_INV_WAR)
-                       r4k_blast_icache_page_indexed =
-                               tx49_blast_icache32_page_indexed;
-               else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+               if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
                        r4k_blast_icache_page_indexed =
                                blast_icache32_r4600_v1_page_indexed;
+               else if (TX49XX_ICACHE_INDEX_INV_WAR)
+                       r4k_blast_icache_page_indexed =
+                               tx49_blast_icache32_page_indexed;
                else
                        r4k_blast_icache_page_indexed =
                                blast_icache32_page_indexed;
@@ -205,11 +243,13 @@ static inline void r4k_blast_icache_page_indexed_setup(void)
 
 static void (* r4k_blast_icache)(void);
 
-static inline void r4k_blast_icache_setup(void)
+static void __init r4k_blast_icache_setup(void)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
-       if (ic_lsize == 16)
+       if (ic_lsize == 0)
+               r4k_blast_icache = (void *)cache_noop;
+       else if (ic_lsize == 16)
                r4k_blast_icache = blast_icache16;
        else if (ic_lsize == 32) {
                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
@@ -224,11 +264,13 @@ static inline void r4k_blast_icache_setup(void)
 
 static void (* r4k_blast_scache_page)(unsigned long addr);
 
-static inline void r4k_blast_scache_page_setup(void)
+static void __init r4k_blast_scache_page_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
-       if (sc_lsize == 16)
+       if (scache_size == 0)
+               r4k_blast_scache_page = (void *)cache_noop;
+       else if (sc_lsize == 16)
                r4k_blast_scache_page = blast_scache16_page;
        else if (sc_lsize == 32)
                r4k_blast_scache_page = blast_scache32_page;
@@ -238,13 +280,33 @@ static inline void r4k_blast_scache_page_setup(void)
                r4k_blast_scache_page = blast_scache128_page;
 }
 
+static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
+
+static void __init r4k_blast_scache_page_indexed_setup(void)
+{
+       unsigned long sc_lsize = cpu_scache_line_size();
+
+       if (scache_size == 0)
+               r4k_blast_scache_page_indexed = (void *)cache_noop;
+       else if (sc_lsize == 16)
+               r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
+       else if (sc_lsize == 32)
+               r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
+       else if (sc_lsize == 64)
+               r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
+       else if (sc_lsize == 128)
+               r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
+}
+
 static void (* r4k_blast_scache)(void);
 
-static inline void r4k_blast_scache_setup(void)
+static void __init r4k_blast_scache_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
-       if (sc_lsize == 16)
+       if (scache_size == 0)
+               r4k_blast_scache = (void *)cache_noop;
+       else if (sc_lsize == 16)
                r4k_blast_scache = blast_scache16;
        else if (sc_lsize == 32)
                r4k_blast_scache = blast_scache32;
@@ -254,16 +316,24 @@ static inline void r4k_blast_scache_setup(void)
                r4k_blast_scache = blast_scache128;
 }
 
+/*
+ * This is former mm's flush_cache_all() which really should be
+ * flush_cache_vunmap these days ...
+ */
+static inline void local_r4k_flush_cache_all(void * args)
+{
+       r4k_blast_dcache();
+}
+
 static void r4k_flush_cache_all(void)
 {
        if (!cpu_has_dc_aliases)
                return;
 
-       r4k_blast_dcache();
-       r4k_blast_icache();
+       r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
 }
 
-static void r4k___flush_cache_all(void)
+static inline void local_r4k___flush_cache_all(void * args)
 {
        r4k_blast_dcache();
        r4k_blast_icache();
@@ -275,53 +345,83 @@ static void r4k___flush_cache_all(void)
        case CPU_R4400MC:
        case CPU_R10000:
        case CPU_R12000:
+       case CPU_R14000:
                r4k_blast_scache();
        }
 }
 
-static void r4k_flush_cache_range(struct vm_area_struct *vma,
-       unsigned long start, unsigned long end)
+static void r4k___flush_cache_all(void)
 {
-       int exec;
+       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+}
+
+static inline void local_r4k_flush_cache_range(void * args)
+{
+       struct vm_area_struct *vma = args;
 
        if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
                return;
 
-       exec = vma->vm_flags & VM_EXEC;
-       if (cpu_has_dc_aliases || exec)
-               r4k_blast_dcache();
-       if (exec)
-               r4k_blast_icache();
+       r4k_blast_dcache();
 }
 
-static void r4k_flush_cache_mm(struct mm_struct *mm)
+static void r4k_flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start, unsigned long end)
 {
        if (!cpu_has_dc_aliases)
                return;
 
+       r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+}
+
+static inline void local_r4k_flush_cache_mm(void * args)
+{
+       struct mm_struct *mm = args;
+
        if (!cpu_context(smp_processor_id(), mm))
                return;
 
-       r4k_blast_dcache();
-       r4k_blast_icache();
-
        /*
         * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
         * only flush the primary caches but R10000 and R12000 behave sane ...
+        * R4000SC and R4400SC indexed S-cache ops also invalidate primary
+        * caches, so we can bail out early.
         */
        if (current_cpu_data.cputype == CPU_R4000SC ||
            current_cpu_data.cputype == CPU_R4000MC ||
            current_cpu_data.cputype == CPU_R4400SC ||
-           current_cpu_data.cputype == CPU_R4400MC)
+           current_cpu_data.cputype == CPU_R4400MC) {
                r4k_blast_scache();
+               return;
+       }
+
+       r4k_blast_dcache();
 }
 
-static void r4k_flush_cache_page(struct vm_area_struct *vma,
-                                       unsigned long page)
+static void r4k_flush_cache_mm(struct mm_struct *mm)
 {
+       if (!cpu_has_dc_aliases)
+               return;
+
+       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+}
+
+struct flush_cache_page_args {
+       struct vm_area_struct *vma;
+       unsigned long addr;
+       unsigned long pfn;
+};
+
+static inline void local_r4k_flush_cache_page(void *args)
+{
+       struct flush_cache_page_args *fcp_args = args;
+       struct vm_area_struct *vma = fcp_args->vma;
+       unsigned long addr = fcp_args->addr;
+       unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
        int exec = vma->vm_flags & VM_EXEC;
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgdp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
 
@@ -332,10 +432,11 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
        if (cpu_context(smp_processor_id(), mm) == 0)
                return;
 
-       page &= PAGE_MASK;
-       pgdp = pgd_offset(mm, page);
-       pmdp = pmd_offset(pgdp, page);
-       ptep = pte_offset(pmdp, page);
+       addr &= PAGE_MASK;
+       pgdp = pgd_offset(mm, addr);
+       pudp = pud_offset(pgdp, addr);
+       pmdp = pmd_offset(pudp, addr);
+       ptep = pte_offset(pmdp, addr);
 
        /*
         * If the page isn't marked valid, the page cannot possibly be
@@ -351,10 +452,13 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
         * in that case, which doesn't overly flush the cache too much.
         */
        if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
-               if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
-                       r4k_blast_dcache_page(page);
+               if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+                       r4k_blast_dcache_page(addr);
+                       if (exec && !cpu_icache_snoops_remote_store)
+                               r4k_blast_scache_page(addr);
+               }
                if (exec)
-                       r4k_blast_icache_page(page);
+                       r4k_blast_icache_page(addr);
 
                return;
        }
@@ -363,139 +467,102 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
         * Do indexed flush, too much work to get the (possible) TLB refills
         * to work correctly.
         */
-       page = INDEX_BASE + (page & (dcache_size - 1));
-       if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
-               r4k_blast_dcache_page_indexed(page);
+       if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+               r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
+                                             paddr : addr);
+               if (exec && !cpu_icache_snoops_remote_store) {
+                       r4k_blast_scache_page_indexed(paddr);
+               }
+       }
        if (exec) {
-               if (cpu_has_vtag_icache) {
+               if (cpu_has_vtag_icache && mm == current->active_mm) {
                        int cpu = smp_processor_id();
 
-                       if (cpu_context(cpu, vma->vm_mm) != 0)
-                               drop_mmu_context(vma->vm_mm, cpu);
+                       if (cpu_context(cpu, mm) != 0)
+                               drop_mmu_context(mm, cpu);
                } else
-                       r4k_blast_icache_page_indexed(page);
+                       r4k_blast_icache_page_indexed(addr);
        }
 }
 
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+       unsigned long addr, unsigned long pfn)
+{
+       struct flush_cache_page_args args;
+
+       args.vma = vma;
+       args.addr = addr;
+       args.pfn = pfn;
+
+       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+}
+
+static inline void local_r4k_flush_data_cache_page(void * addr)
+{
+       r4k_blast_dcache_page((unsigned long) addr);
+}
+
 static void r4k_flush_data_cache_page(unsigned long addr)
 {
-       r4k_blast_dcache_page(addr);
+       r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
 }
 
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+struct flush_icache_range_args {
+       unsigned long start;
+       unsigned long end;
+};
+
+static inline void local_r4k_flush_icache_range(void *args)
 {
-       unsigned long dc_lsize = current_cpu_data.dcache.linesz;
-       unsigned long addr, aend;
+       struct flush_icache_range_args *fir_args = args;
+       unsigned long start = fir_args->start;
+       unsigned long end = fir_args->end;
 
        if (!cpu_has_ic_fills_f_dc) {
-               if (end - start > dcache_size)
+               if (end - start >= dcache_size) {
                        r4k_blast_dcache();
-               else {
-                       addr = start & ~(dc_lsize - 1);
-                       aend = (end - 1) & ~(dc_lsize - 1);
-
-                       while (1) {
-                               /* Hit_Writeback_Inv_D */
-                               protected_writeback_dcache_line(addr);
-                               if (addr == aend)
-                                       break;
-                               addr += dc_lsize;
-                       }
+               } else {
+                       R4600_HIT_CACHEOP_WAR_IMPL;
+                       protected_blast_dcache_range(start, end);
+               }
+
+               if (!cpu_icache_snoops_remote_store && scache_size) {
+                       if (end - start > scache_size)
+                               r4k_blast_scache();
+                       else
+                               protected_blast_scache_range(start, end);
                }
        }
 
        if (end - start > icache_size)
                r4k_blast_icache();
-       else {
-               addr = start & ~(dc_lsize - 1);
-               aend = (end - 1) & ~(dc_lsize - 1);
-               while (1) {
-                       /* Hit_Invalidate_I */
-                       protected_flush_icache_line(addr);
-                       if (addr == aend)
-                               break;
-                       addr += dc_lsize;
-               }
-       }
+       else
+               protected_blast_icache_range(start, end);
 }
 
-/*
- * Ok, this seriously sucks.  We use them to flush a user page but don't
- * know the virtual address, so we have to blast away the whole icache
- * which is significantly more expensive than the real thing.  Otoh we at
- * least know the kernel address of the page so we can flush it
- * selectivly.
- */
-static void r4k_flush_icache_page(struct vm_area_struct *vma, struct page *page)
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 {
-       /*
-        * If there's no context yet, or the page isn't executable, no icache
-        * flush is needed.
-        */
-       if (!(vma->vm_flags & VM_EXEC))
-               return;
-
-       /*
-        * Tricky ...  Because we don't know the virtual address we've got the
-        * choice of either invalidating the entire primary and secondary
-        * caches or invalidating the secondary caches also.  With the subset
-        * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
-        * secondary cache will result in any entries in the primary caches
-        * also getting invalidated which hopefully is a bit more economical.
-        */
-       if (cpu_has_subset_pcaches) {
-               unsigned long addr = (unsigned long) page_address(page);
-
-               r4k_blast_scache_page(addr);
-               ClearPageDcacheDirty(page);
+       struct flush_icache_range_args args;
 
-               return;
-       }
+       args.start = start;
+       args.end = end;
 
-       if (!cpu_has_ic_fills_f_dc) {
-               unsigned long addr = (unsigned long) page_address(page);
-               r4k_blast_dcache_page(addr);
-               ClearPageDcacheDirty(page);
-       }
-
-       /*
-        * We're not sure of the virtual address(es) involved here, so
-        * we have to flush the entire I-cache.
-        */
-       if (cpu_has_vtag_icache) {
-               int cpu = smp_processor_id();
-
-               if (cpu_context(cpu, vma->vm_mm) != 0)
-                       drop_mmu_context(vma->vm_mm, cpu);
-       } else
-               r4k_blast_icache();
+       r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+       instruction_hazard();
 }
 
 #ifdef CONFIG_DMA_NONCOHERENT
 
 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 {
-       unsigned long end, a;
-
        /* Catch bad driver code */
        BUG_ON(size == 0);
 
-       if (cpu_has_subset_pcaches) {
-               unsigned long sc_lsize = current_cpu_data.scache.linesz;
-
-               if (size >= scache_size) {
+       if (cpu_has_inclusive_pcaches) {
+               if (size >= scache_size)
                        r4k_blast_scache();
-                       return;
-               }
-
-               a = addr & ~(sc_lsize - 1);
-               end = (addr + size - 1) & ~(sc_lsize - 1);
-               while (1) {
-                       flush_scache_line(a);   /* Hit_Writeback_Inv_SD */
-                       if (a == end)
-                               break;
-                       a += sc_lsize;
-               }
+               else
+                       blast_scache_range(addr, addr + size);
                return;
        }
 
@@ -507,17 +574,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
        if (size >= dcache_size) {
                r4k_blast_dcache();
        } else {
-               unsigned long dc_lsize = current_cpu_data.dcache.linesz;
-
                R4600_HIT_CACHEOP_WAR_IMPL;
-               a = addr & ~(dc_lsize - 1);
-               end = (addr + size - 1) & ~(dc_lsize - 1);
-               while (1) {
-                       flush_dcache_line(a);   /* Hit_Writeback_Inv_D */
-                       if (a == end)
-                               break;
-                       a += dc_lsize;
-               }
+               blast_dcache_range(addr, addr + size);
        }
 
        bc_wback_inv(addr, size);
@@ -525,44 +583,22 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 
 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 {
-       unsigned long end, a;
-
        /* Catch bad driver code */
        BUG_ON(size == 0);
 
-       if (cpu_has_subset_pcaches) {
-               unsigned long sc_lsize = current_cpu_data.scache.linesz;
-
-               if (size >= scache_size) {
+       if (cpu_has_inclusive_pcaches) {
+               if (size >= scache_size)
                        r4k_blast_scache();
-                       return;
-               }
-
-               a = addr & ~(sc_lsize - 1);
-               end = (addr + size - 1) & ~(sc_lsize - 1);
-               while (1) {
-                       flush_scache_line(a);   /* Hit_Writeback_Inv_SD */
-                       if (a == end)
-                               break;
-                       a += sc_lsize;
-               }
+               else
+                       blast_scache_range(addr, addr + size);
                return;
        }
 
        if (size >= dcache_size) {
                r4k_blast_dcache();
        } else {
-               unsigned long dc_lsize = current_cpu_data.dcache.linesz;
-
                R4600_HIT_CACHEOP_WAR_IMPL;
-               a = addr & ~(dc_lsize - 1);
-               end = (addr + size - 1) & ~(dc_lsize - 1);
-               while (1) {
-                       flush_dcache_line(a);   /* Hit_Writeback_Inv_D */
-                       if (a == end)
-                               break;
-                       a += dc_lsize;
-               }
+               blast_dcache_range(addr, addr + size);
        }
 
        bc_inv(addr, size);
@@ -574,23 +610,29 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  * very much about what happens in that case.  Usually a segmentation
  * fault will dump the process later on anyway ...
  */
-static void r4k_flush_cache_sigtramp(unsigned long addr)
+static void local_r4k_flush_cache_sigtramp(void * arg)
 {
-       unsigned long ic_lsize = current_cpu_data.icache.linesz;
-       unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+       unsigned long ic_lsize = cpu_icache_line_size();
+       unsigned long dc_lsize = cpu_dcache_line_size();
+       unsigned long sc_lsize = cpu_scache_line_size();
+       unsigned long addr = (unsigned long) arg;
 
        R4600_HIT_CACHEOP_WAR_IMPL;
-       protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
-       protected_flush_icache_line(addr & ~(ic_lsize - 1));
+       if (dc_lsize)
+               protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+       if (!cpu_icache_snoops_remote_store && scache_size)
+               protected_writeback_scache_line(addr & ~(sc_lsize - 1));
+       if (ic_lsize)
+               protected_flush_icache_line(addr & ~(ic_lsize - 1));
        if (MIPS4K_ICACHE_REFILL_WAR) {
                __asm__ __volatile__ (
                        ".set push\n\t"
                        ".set noat\n\t"
                        ".set mips3\n\t"
-#if CONFIG_MIPS32
+#ifdef CONFIG_32BIT
                        "la     $at,1f\n\t"
 #endif
-#if CONFIG_MIPS64
+#ifdef CONFIG_64BIT
                        "dla    $at,1f\n\t"
 #endif
                        "cache  %0,($at)\n\t"
@@ -604,6 +646,11 @@ static void r4k_flush_cache_sigtramp(unsigned long addr)
                __asm__ __volatile__ ("sync");
 }
 
+static void r4k_flush_cache_sigtramp(unsigned long addr)
+{
+       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+}
+
 static void r4k_flush_icache_all(void)
 {
        if (cpu_has_vtag_icache)
@@ -621,6 +668,7 @@ static inline void rm7k_erratum31(void)
 
        for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
                __asm__ __volatile__ (
+                       ".set push\n\t"
                        ".set noreorder\n\t"
                        ".set mips3\n\t"
                        "cache\t%1, 0(%0)\n\t"
@@ -635,15 +683,14 @@ static inline void rm7k_erratum31(void)
                        "cache\t%1, 0x1000(%0)\n\t"
                        "cache\t%1, 0x2000(%0)\n\t"
                        "cache\t%1, 0x3000(%0)\n\t"
-                       ".set\tmips0\n\t"
-                       ".set\treorder\n\t"
+                       ".set pop\n"
                        :
                        : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
        }
 }
 
-static char *way_string[] = { NULL, "direct mapped", "2-way", "3-way", "4-way",
-       "5-way", "6-way", "7-way", "8-way"
+static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
+       "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
 };
 
 static void __init probe_pcache(void)
@@ -662,12 +709,12 @@ static void __init probe_pcache(void)
                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
                c->icache.ways = 2;
-               c->icache.waybit = ffs(icache_size/2) - 1;
+               c->icache.waybit = __ffs(icache_size/2);
 
                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
                c->dcache.ways = 2;
-               c->dcache.waybit= ffs(dcache_size/2) - 1;
+               c->dcache.waybit= __ffs(dcache_size/2);
 
                c->options |= MIPS_CPU_CACHE_CDEX_P;
                break;
@@ -699,6 +746,7 @@ static void __init probe_pcache(void)
                c->dcache.waybit = 0;
 
                c->options |= MIPS_CPU_CACHE_CDEX_P;
+               c->options |= MIPS_CPU_PREFETCH;
                break;
 
        case CPU_R4000PC:
@@ -723,6 +771,7 @@ static void __init probe_pcache(void)
 
        case CPU_R10000:
        case CPU_R12000:
+       case CPU_R14000:
                icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
                c->icache.linesz = 64;
                c->icache.ways = 2;
@@ -737,26 +786,27 @@ static void __init probe_pcache(void)
                break;
 
        case CPU_VR4133:
-               write_c0_config(config & ~CONF_EB);
+               write_c0_config(config & ~VR41_CONF_P4K);
        case CPU_VR4131:
                /* Workaround for cache instruction bug of VR4131 */
                if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
                    c->processor_id == 0x0c82U) {
-                       config &= ~0x00000030U;
-                       config |= 0x00410000U;
+                       config |= 0x00400000U;
+                       if (c->processor_id == 0x0c80U)
+                               config |= VR41_CONF_BP;
                        write_c0_config(config);
-               }
+               } else
+                       c->options |= MIPS_CPU_CACHE_CDEX_P;
+
                icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
                c->icache.ways = 2;
-               c->icache.waybit = ffs(icache_size/2) - 1;
+               c->icache.waybit = __ffs(icache_size/2);
 
                dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
                c->dcache.ways = 2;
-               c->dcache.waybit = ffs(dcache_size/2) - 1;
-
-               c->options |= MIPS_CPU_CACHE_CDEX_P;
+               c->dcache.waybit = __ffs(dcache_size/2);
                break;
 
        case CPU_VR41XX:
@@ -785,12 +835,12 @@ static void __init probe_pcache(void)
                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
                c->icache.ways = 4;
-               c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
+               c->icache.waybit = __ffs(icache_size / c->icache.ways);
 
                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
                c->dcache.ways = 4;
-               c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
+               c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
 
 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
                c->options |= MIPS_CPU_CACHE_CDEX_P;
@@ -818,7 +868,7 @@ static void __init probe_pcache(void)
                icache_size = c->icache.sets *
                              c->icache.ways *
                              c->icache.linesz;
-               c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
+               c->icache.waybit = __ffs(icache_size/c->icache.ways);
 
                if (config & 0x8)               /* VI bit */
                        c->icache.flags |= MIPS_CACHE_VTAG;
@@ -838,7 +888,7 @@ static void __init probe_pcache(void)
                dcache_size = c->dcache.sets *
                              c->dcache.ways *
                              c->dcache.linesz;
-               c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
+               c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
 
                c->options |= MIPS_CPU_PREFETCH;
                break;
@@ -861,8 +911,10 @@ static void __init probe_pcache(void)
        c->icache.waysize = icache_size / c->icache.ways;
        c->dcache.waysize = dcache_size / c->dcache.ways;
 
-       c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
-       c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
+       c->icache.sets = c->icache.linesz ?
+               icache_size / (c->icache.linesz * c->icache.ways) : 0;
+       c->dcache.sets = c->dcache.linesz ?
+               dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
 
        /*
         * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
@@ -870,9 +922,28 @@ static void __init probe_pcache(void)
         * normally they'd suffer from aliases but magic in the hardware deals
         * with that for us so we don't need to take care ourselves.
         */
-       if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000)
+       switch (c->cputype) {
+       case CPU_20KC:
+       case CPU_25KF:
+               c->dcache.flags |= MIPS_CACHE_PINDEX;
+       case CPU_R10000:
+       case CPU_R12000:
+       case CPU_R14000:
+       case CPU_SB1:
+               break;
+       case CPU_24K:
+       case CPU_34K:
+       case CPU_74K:
+               if ((read_c0_config7() & (1 << 16))) {
+                       /* effectively physically indexed dcache,
+                          thus no virtual aliases. */
+                       c->dcache.flags |= MIPS_CACHE_PINDEX;
+                       break;
+               }
+       default:
                if (c->dcache.waysize > PAGE_SIZE)
-                       c->dcache.flags |= MIPS_CACHE_ALIASES;
+                       c->dcache.flags |= MIPS_CACHE_ALIASES;
+       }
 
        switch (c->cputype) {
        case CPU_20KC:
@@ -883,7 +954,11 @@ static void __init probe_pcache(void)
                c->icache.flags |= MIPS_CACHE_VTAG;
                break;
 
+       case CPU_AU1000:
        case CPU_AU1500:
+       case CPU_AU1100:
+       case CPU_AU1550:
+       case CPU_AU1200:
                c->icache.flags |= MIPS_CACHE_IC_F_DC;
                break;
        }
@@ -893,7 +968,7 @@ static void __init probe_pcache(void)
               cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
               way_string[c->icache.ways], c->icache.linesz);
 
-       printk("Primary data cache %ldkB %s, linesize %d bytes.\n",
+       printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
               dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
 }
 
@@ -961,15 +1036,14 @@ static int __init probe_scache(void)
        return 1;
 }
 
-typedef int (*probe_func_t)(unsigned long);
 extern int r5k_sc_init(void);
 extern int rm7k_sc_init(void);
+extern int mips_sc_init(void);
 
 static void __init setup_scache(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int config = read_c0_config();
-       probe_func_t probe_scache_kseg1;
        int sc_present = 0;
 
        /*
@@ -982,14 +1056,14 @@ static void __init setup_scache(void)
        case CPU_R4000MC:
        case CPU_R4400SC:
        case CPU_R4400MC:
-               probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
-               sc_present = probe_scache_kseg1(config);
+               sc_present = run_uncached(probe_scache);
                if (sc_present)
                        c->options |= MIPS_CPU_CACHE_CDEX_S;
                break;
 
        case CPU_R10000:
        case CPU_R12000:
+       case CPU_R14000:
                scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
                c->scache.linesz = 64 << ((config >> 13) & 1);
                c->scache.ways = 2;
@@ -1012,17 +1086,29 @@ static void __init setup_scache(void)
                return;
 
        default:
+               if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
+                   c->isa_level == MIPS_CPU_ISA_M32R2 ||
+                   c->isa_level == MIPS_CPU_ISA_M64R1 ||
+                   c->isa_level == MIPS_CPU_ISA_M64R2) {
+#ifdef CONFIG_MIPS_CPU_SCACHE
+                       if (mips_sc_init ()) {
+                               scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
+                               printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
+                                      scache_size >> 10,
+                                      way_string[c->scache.ways], c->scache.linesz);
+                       }
+#else
+                       if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
+                               panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
+#endif
+                       return;
+               }
                sc_present = 0;
        }
 
        if (!sc_present)
                return;
 
-       if ((c->isa_level == MIPS_CPU_ISA_M32 ||
-            c->isa_level == MIPS_CPU_ISA_M64) &&
-           !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
-               panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
-
        /* compute a couple of other cache variables */
        c->scache.waysize = scache_size / c->scache.ways;
 
@@ -1031,10 +1117,35 @@ static void __init setup_scache(void)
        printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
 
-       c->options |= MIPS_CPU_SUBSET_CACHES;
+       c->options |= MIPS_CPU_INCLUSIVE_CACHES;
 }
 
-static inline void coherency_setup(void)
+void au1x00_fixup_config_od(void)
+{
+       /*
+        * c0_config.od (bit 19) was write only (and read as 0)
+        * on the early revisions of Alchemy SOCs.  It disables the bus
+        * transaction overlapping and needs to be set to fix various errata.
+        */
+       switch (read_c0_prid()) {
+       case 0x00030100: /* Au1000 DA */
+       case 0x00030201: /* Au1000 HA */
+       case 0x00030202: /* Au1000 HB */
+       case 0x01030200: /* Au1500 AB */
+       /*
+        * Au1100 errata actually keeps silence about this bit, so we set it
+        * just in case for those revisions that require it to be set according
+        * to arch/mips/au1000/common/cputable.c
+        */
+       case 0x02030200: /* Au1100 AB */
+       case 0x02030201: /* Au1100 BA */
+       case 0x02030202: /* Au1100 BC */
+               set_c0_config(1 << 19);
+               break;
+       }
+}
+
+static void __init coherency_setup(void)
 {
        change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
 
@@ -1054,10 +1165,19 @@ static inline void coherency_setup(void)
        case CPU_R4400MC:
                clear_c0_config(CONF_CU);
                break;
+       /*
+        * We need to catch the early Alchemy SOCs with
+        * the write-only co_config.od bit and set it back to one...
+        */
+       case CPU_AU1000: /* rev. DA, HA, HB */
+       case CPU_AU1100: /* rev. AB, BA, BC ?? */
+       case CPU_AU1500: /* rev. AB */
+               au1x00_fixup_config_od();
+               break;
        }
 }
 
-void __init ld_mmu_r4xx0(void)
+void __init r4k_cache_init(void)
 {
        extern void build_clear_page(void);
        extern void build_copy_page(void);
@@ -1065,15 +1185,11 @@ void __init ld_mmu_r4xx0(void)
        struct cpuinfo_mips *c = &current_cpu_data;
 
        /* Default cache error handler for R4000 and R5000 family */
-       memcpy((void *)(CAC_BASE   + 0x100), &except_vec2_generic, 0x80);
-       memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
+       set_uncached_handler (0x100, &except_vec2_generic, 0x80);
 
        probe_pcache();
        setup_scache();
 
-       if (c->dcache.sets * c->dcache.ways > PAGE_SIZE)
-               c->dcache.flags |= MIPS_CACHE_ALIASES;
-
        r4k_blast_dcache_page_setup();
        r4k_blast_dcache_page_indexed_setup();
        r4k_blast_dcache_setup();
@@ -1081,6 +1197,7 @@ void __init ld_mmu_r4xx0(void)
        r4k_blast_icache_page_indexed_setup();
        r4k_blast_icache_setup();
        r4k_blast_scache_page_setup();
+       r4k_blast_scache_page_indexed_setup();
        r4k_blast_scache_setup();
 
        /*
@@ -1088,19 +1205,21 @@ void __init ld_mmu_r4xx0(void)
         * This code supports virtually indexed processors and will be
         * unnecessarily inefficient on physically indexed processors.
         */
-       shm_align_mask = max_t( unsigned long,
-                               c->dcache.sets * c->dcache.linesz - 1,
-                               PAGE_SIZE - 1);
-
+       if (c->dcache.linesz)
+               shm_align_mask = max_t( unsigned long,
+                                       c->dcache.sets * c->dcache.linesz - 1,
+                                       PAGE_SIZE - 1);
+       else
+               shm_align_mask = PAGE_SIZE-1;
        flush_cache_all         = r4k_flush_cache_all;
        __flush_cache_all       = r4k___flush_cache_all;
        flush_cache_mm          = r4k_flush_cache_mm;
        flush_cache_page        = r4k_flush_cache_page;
-       flush_icache_page       = r4k_flush_icache_page;
        flush_cache_range       = r4k_flush_cache_range;
 
        flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
        flush_icache_all        = r4k_flush_icache_all;
+       local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
        flush_data_cache_page   = r4k_flush_data_cache_page;
        flush_icache_range      = r4k_flush_icache_range;
 
@@ -1110,9 +1229,8 @@ void __init ld_mmu_r4xx0(void)
        _dma_cache_inv          = r4k_dma_cache_inv;
 #endif
 
-       __flush_cache_all();
-       coherency_setup();
-
        build_clear_page();
        build_copy_page();
+       local_r4k___flush_cache_all(NULL);
+       coherency_setup();
 }