fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / include / asm-mips / hazards.h
index 4a024fa..5007315 100644 (file)
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) MIPS Technologies, Inc.
+ *   written by Ralf Baechle <ralf@linux-mips.org>
  */
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#include <linux/config.h>
 
 #ifdef __ASSEMBLY__
+#define ASMMACRO(name, code...) .macro name; code; .endm
+#else
+
+#define ASMMACRO(name, code...)                                                \
+__asm__(".macro " #name "; " #code "; .endm");                         \
+                                                                       \
+static inline void name(void)                                          \
+{                                                                      \
+       __asm__ __volatile__ (#name);                                   \
+}
+
+#endif
+
+ASMMACRO(_ssnop,
+        sll    $0, $0, 1
+       )
+
+ASMMACRO(_ehb,
+        sll    $0, $0, 3
+       )
 
 /*
- * RM9000 hazards.  When the JTLB is updated by tlbwi or tlbwr, a subsequent
- * use of the JTLB for instructions should not occur for 4 cpu cycles and use
- * for data translations should not occur for 3 cpu cycles.
+ * TLB hazards
  */
-#ifdef CONFIG_CPU_RM9000
-#define mtc0_tlbw_hazard                                               \
-       .set    push;                                                   \
-       .set    mips32;                                                 \
-       ssnop; ssnop; ssnop; ssnop;                                     \
-       .set    pop
-
-#define tlbw_eret_hazard                                               \
-       .set    push;                                                   \
-       .set    mips32;                                                 \
-       ssnop; ssnop; ssnop; ssnop;                                     \
-       .set    pop
+#if defined(CONFIG_CPU_MIPSR2)
 
-#else
+/*
+ * MIPSR2 defines ehb for hazard avoidance
+ */
 
+ASMMACRO(mtc0_tlbw_hazard,
+        _ehb
+       )
+ASMMACRO(tlbw_use_hazard,
+        _ehb
+       )
+ASMMACRO(tlb_probe_hazard,
+        _ehb
+       )
+ASMMACRO(irq_enable_hazard,
+       )
+ASMMACRO(irq_disable_hazard,
+       _ehb
+       )
+ASMMACRO(back_to_back_c0_hazard,
+        _ehb
+       )
 /*
- * The taken branch will result in a two cycle penalty for the two killed
- * instructions on R4000 / R4400.  Other processors only have a single cycle
- * hazard so this is nice trick to have an optimal code for a range of
- * processors.
+ * gcc has a tradition of misscompiling the previous construct using the
+ * address of a label as argument to inline assembler.  Gas otoh has the
+ * annoying difference between la and dla which are only usable for 32-bit
+ * rsp. 64-bit code, so can't be used without conditional compilation.
+ * The alterantive is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code ...
  */
-#define mtc0_tlbw_hazard                                               \
-       b       . + 8
-#define tlbw_eret_hazard
-#endif
+#define instruction_hazard()                                           \
+do {                                                                   \
+       unsigned long tmp;                                              \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "       .set    mips64r2                                \n"     \
+       "       dla     %0, 1f                                  \n"     \
+       "       jr.hb   %0                                      \n"     \
+       "       .set    mips0                                   \n"     \
+       "1:                                                     \n"     \
+       : "=r" (tmp));                                                  \
+} while (0)
+
+#elif defined(CONFIG_CPU_R10000)
 
-#else /* __ASSEMBLY__ */
+/*
+ * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
+ */
+
+ASMMACRO(mtc0_tlbw_hazard,
+       )
+ASMMACRO(tlbw_use_hazard,
+       )
+ASMMACRO(tlb_probe_hazard,
+       )
+ASMMACRO(irq_enable_hazard,
+       )
+ASMMACRO(irq_disable_hazard,
+       )
+ASMMACRO(back_to_back_c0_hazard,
+       )
+#define instruction_hazard() do { } while (0)
+
+#elif defined(CONFIG_CPU_RM9000)
 
 /*
  * RM9000 hazards.  When the JTLB is updated by tlbwi or tlbwr, a subsequent
  * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  * for data translations should not occur for 3 cpu cycles.
  */
-#ifdef CONFIG_CPU_RM9000
 
-#define mtc0_tlbw_hazard()                                             \
-       __asm__ __volatile__(                                           \
-               ".set\tmips32\n\t"                                      \
-               "ssnop; ssnop; ssnop; ssnop\n\t"                        \
-               ".set\tmips0")
+ASMMACRO(mtc0_tlbw_hazard,
+        _ssnop; _ssnop; _ssnop; _ssnop
+       )
+ASMMACRO(tlbw_use_hazard,
+        _ssnop; _ssnop; _ssnop; _ssnop
+       )
+ASMMACRO(tlb_probe_hazard,
+        _ssnop; _ssnop; _ssnop; _ssnop
+       )
+ASMMACRO(irq_enable_hazard,
+       )
+ASMMACRO(irq_disable_hazard,
+       )
+ASMMACRO(back_to_back_c0_hazard,
+       )
+#define instruction_hazard() do { } while (0)
 
-#define tlbw_use_hazard()                                              \
-       __asm__ __volatile__(                                           \
-               ".set\tmips32\n\t"                                      \
-               "ssnop; ssnop; ssnop; ssnop\n\t"                        \
-               ".set\tmips0")
-#else
+#elif defined(CONFIG_CPU_SB1)
 
 /*
- * Overkill warning ...
+ * Mostly like R4000 for historic reasons
  */
-#define mtc0_tlbw_hazard()                                             \
-       __asm__ __volatile__(                                           \
-               ".set noreorder\n\t"                                    \
-               "nop; nop; nop; nop; nop; nop;\n\t"                     \
-               ".set reorder\n\t")
+ASMMACRO(mtc0_tlbw_hazard,
+       )
+ASMMACRO(tlbw_use_hazard,
+       )
+ASMMACRO(tlb_probe_hazard,
+       )
+ASMMACRO(irq_enable_hazard,
+       )
+ASMMACRO(irq_disable_hazard,
+        _ssnop; _ssnop; _ssnop
+       )
+ASMMACRO(back_to_back_c0_hazard,
+       )
+#define instruction_hazard() do { } while (0)
 
-#define tlbw_use_hazard()                                              \
-       __asm__ __volatile__(                                           \
-               ".set noreorder\n\t"                                    \
-               "nop; nop; nop; nop; nop; nop;\n\t"                     \
-               ".set reorder\n\t")
+#else
 
-#endif
+/*
+ * Finally the catchall case for all other processors including R4000, R4400,
+ * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
+ *
+ * The taken branch will result in a two cycle penalty for the two killed
+ * instructions on R4000 / R4400.  Other processors only have a single cycle
+ * hazard so this is nice trick to have an optimal code for a range of
+ * processors.
+ */
+ASMMACRO(mtc0_tlbw_hazard,
+       nop; nop
+       )
+ASMMACRO(tlbw_use_hazard,
+       nop; nop; nop
+       )
+ASMMACRO(tlb_probe_hazard,
+        nop; nop; nop
+       )
+ASMMACRO(irq_enable_hazard,
+       )
+ASMMACRO(irq_disable_hazard,
+       nop; nop; nop
+       )
+ASMMACRO(back_to_back_c0_hazard,
+        _ssnop; _ssnop; _ssnop;
+       )
+#define instruction_hazard() do { } while (0)
 
-#endif /* __ASSEMBLY__ */
+#endif
 
 #endif /* _ASM_HAZARDS_H */