#ifdef __ASSEMBLY__
.macro _ssnop
- sll $0, $2, 1
+ sll $0, $0, 1
+ .endm
+
+ .macro _ehb
+ sll $0, $0, 3
.endm
/*
*/
#ifdef CONFIG_CPU_RM9000
-#define mtc0_tlbw_hazard \
- .set push; \
- .set mips32; \
- _ssnop; _ssnop; _ssnop; _ssnop; \
+ .macro mtc0_tlbw_hazard
+ .set push
+ .set mips32
+ _ssnop; _ssnop; _ssnop; _ssnop
.set pop
+ .endm
-#define tlbw_eret_hazard \
- .set push; \
- .set mips32; \
- _ssnop; _ssnop; _ssnop; _ssnop; \
+ .macro tlbw_eret_hazard
+ .set push
+ .set mips32
+ _ssnop; _ssnop; _ssnop; _ssnop
.set pop
+ .endm
#else
* hazard so this is nice trick to have an optimal code for a range of
* processors.
*/
-#define mtc0_tlbw_hazard \
+ .macro mtc0_tlbw_hazard
b . + 8
-#define tlbw_eret_hazard
+ .endm
+
+ .macro tlbw_eret_hazard
+ .endm
#endif
/*
/*
* Use a macro for ehb unless explicit support for MIPSR2 is enabled
*/
- .macro ehb
- sll $0, $0, 3
- .endm
-#define irq_enable_hazard \
- ehb # irq_enable_hazard
+#define irq_enable_hazard
+ _ehb
-#define irq_disable_hazard \
- ehb # irq_disable_hazard
+#define irq_disable_hazard
+ _ehb
-#else
+#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
+
+/*
+ * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
+ */
#define irq_enable_hazard
+
#define irq_disable_hazard
+#else
+
+/*
+ * Classic MIPS needs 1 - 3 nops or ssnops
+ */
+#define irq_enable_hazard
+#define irq_disable_hazard \
+ _ssnop; _ssnop; _ssnop
+
#endif
#else /* __ASSEMBLY__ */
+__asm__(
+ " .macro _ssnop \n\t"
+ " sll $0, $2, 1 \n\t"
+ " .endm \n\t"
+ " \n\t"
+ " .macro _ehb \n\t"
+ " sll $0, $0, 3 \n\t"
+ " .endm \n\t");
+
+#ifdef CONFIG_CPU_RM9000
/*
* RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
* use of the JTLB for instructions should not occur for 4 cpu cycles and use
* for data translations should not occur for 3 cpu cycles.
*/
-#ifdef CONFIG_CPU_RM9000
#define mtc0_tlbw_hazard() \
__asm__ __volatile__( \
* Use a macro for ehb unless explicit support for MIPSR2 is enabled
*/
__asm__(
- " .macro ehb \n\t"
- " sll $0, $0, 3 \n\t"
- " .endm \n\t"
- " \n\t"
" .macro\tirq_enable_hazard \n\t"
- " ehb \n\t"
+ " _ehb \n\t"
" .endm \n\t"
" \n\t"
" .macro\tirq_disable_hazard \n\t"
- " ehb \n\t"
+ " _ehb \n\t"
" .endm");
#define irq_enable_hazard() \
__asm__ __volatile__( \
- "ehb\t\t\t\t# irq_enable_hazard")
+ "_ehb\t\t\t\t# irq_enable_hazard")
#define irq_disable_hazard() \
__asm__ __volatile__( \
- "ehb\t\t\t\t# irq_disable_hazard")
+ "_ehb\t\t\t\t# irq_disable_hazard")
-#elif defined(CONFIG_CPU_R10000)
+#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
*/
__asm__(
- " .macro _ssnop \n\t"
- " sll $0, $2, 1 \n\t"
- " .endm \n\t"
- " \n\t"
" # \n\t"
" # There is a hazard but we do not care \n\t"
" # \n\t"