#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/byteorder.h> /* sigh ... */
+#include <asm/cpu-features.h>
#if (_MIPS_SZLONG == 32)
#define SZLONG_LOG 5
#define SZLONG_MASK 31UL
-#define __LL "ll"
-#define __SC "sc"
+#define __LL "ll "
+#define __SC "sc "
#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
#elif (_MIPS_SZLONG == 64)
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
-#define __LL "lld"
-#define __SC "scd"
+#define __LL "lld "
+#define __SC "scd "
#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
#endif
#ifdef __KERNEL__
+#include <asm/interrupt.h>
#include <asm/sgidefs.h>
-#include <asm/system.h>
+#include <asm/war.h>
/*
* clear_bit() doesn't provide any barrier for the compiler.
* Only disable interrupt for kernel mode stuff to keep usermode stuff
* that dares to use kernel include files alive.
*/
+
#define __bi_flags unsigned long flags
-#define __bi_cli() local_irq_disable()
-#define __bi_save_flags(x) local_save_flags(x)
#define __bi_local_irq_save(x) local_irq_save(x)
#define __bi_local_irq_restore(x) local_irq_restore(x)
#else
#define __bi_flags
-#define __bi_cli()
-#define __bi_save_flags(x)
#define __bi_local_irq_save(x)
#define __bi_local_irq_restore(x)
#endif /* __KERNEL__ */
-#ifdef CONFIG_CPU_HAS_LLSC
-
-/*
- * These functions for MIPS ISA > 1 are interrupt and SMP proof and
- * interrupt friendly
- */
-
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- "1:\t" __LL "\t%0, %1\t\t# set_bit\n\t"
- "or\t%0, %2\n\t"
- __SC "\t%0, %1\n\t"
- "beqz\t%0, 1b"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # set_bit \n"
+ " or %0, %2 \n"
+ " "__SC "%0, %1 \n"
+ " beqzl %0, 1b \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # set_bit \n"
+ " or %0, %2 \n"
+ " "__SC "%0, %1 \n"
+ " beqz %0, 1b \n"
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ __bi_flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ __bi_local_irq_save(flags);
+ *a |= mask;
+ __bi_local_irq_restore(flags);
+ }
}
/*
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- "1:\t" __LL "\t%0, %1\t\t# clear_bit\n\t"
- "and\t%0, %2\n\t"
- __SC "\t%0, %1\n\t"
- "beqz\t%0, 1b\n\t"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # clear_bit \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " beqzl %0, 1b \n"
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # clear_bit \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " beqz %0, 1b \n"
: "=&r" (temp), "=m" (*m)
: "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ __bi_flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ __bi_local_irq_save(flags);
+ *a &= ~mask;
+ __bi_local_irq_restore(flags);
+ }
}
/*
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
- __asm__ __volatile__(
- "1:\t" __LL "\t%0, %1\t\t# change_bit\n\t"
- "xor\t%0, %2\n\t"
- __SC "\t%0, %1\n\t"
- "beqz\t%0, 1b"
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # change_bit \n"
+ " xor %0, %2 \n"
+ " "__SC "%0, %1 \n"
+ " beqzl %0, 1b \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ } else if (cpu_has_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # change_bit \n"
+ " xor %0, %2 \n"
+ " "__SC "%0, %1 \n"
+ " beqz %0, 1b \n"
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ __bi_flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ __bi_local_irq_save(flags);
+ *a ^= mask;
+ __bi_local_irq_restore(flags);
+ }
}
/*
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\t\t# test_and_set_bit\n"
- "1:\t" __LL "\t%0, %1\n\t"
- "or\t%2, %0, %3\n\t"
- __SC "\t%2, %1\n\t"
- "beqz\t%2, 1b\n\t"
- " and\t%2, %0, %3\n\t"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " beqzl %2, 1b \n"
+ " and %2, %0, %3 \n"
#ifdef CONFIG_SMP
- "sync\n\t"
+ "sync \n"
+#endif
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "memory");
+
+ return res != 0;
+ } else if (cpu_has_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ " .set noreorder # test_and_set_bit \n"
+ "1: " __LL "%0, %1 \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " beqz %2, 1b \n"
+ " and %2, %0, %3 \n"
+#ifdef CONFIG_SMP
+ "sync \n"
#endif
".set\treorder"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
: "memory");
- return res != 0;
+ return res != 0;
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ int retval;
+ __bi_flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ __bi_local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ __bi_local_irq_restore(flags);
+
+ return retval;
+ }
}
/*
int retval;
a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
+ mask = 1UL << (nr & SZLONG_MASK);
retval = (mask & *a) != 0;
*a |= mask;
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\t\t# test_and_clear_bit\n"
- "1:\t" __LL "\t%0, %1\n\t"
- "or\t%2, %0, %3\n\t"
- "xor\t%2, %3\n\t"
- __SC "\t%2, %1\n\t"
- "beqz\t%2, 1b\n\t"
- " and\t%2, %0, %3\n\t"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ "1: " __LL "%0, %1 # test_and_clear_bit \n"
+ " or %2, %0, %3 \n"
+ " xor %2, %3 \n"
+ __SC "%2, %1 \n"
+ " beqzl %2, 1b \n"
+ " and %2, %0, %3 \n"
#ifdef CONFIG_SMP
- "sync\n\t"
+ " sync \n"
#endif
- ".set\treorder"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
: "memory");
- return res != 0;
+ return res != 0;
+ } else if (cpu_has_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ " .set noreorder # test_and_clear_bit \n"
+ "1: " __LL "%0, %1 \n"
+ " or %2, %0, %3 \n"
+ " xor %2, %3 \n"
+ __SC "%2, %1 \n"
+ " beqz %2, 1b \n"
+ " and %2, %0, %3 \n"
+#ifdef CONFIG_SMP
+ " sync \n"
+#endif
+ " .set reorder \n"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "memory");
+
+ return res != 0;
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ int retval;
+ __bi_flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ __bi_local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ __bi_local_irq_restore(flags);
+
+ return retval;
+ }
}
/*
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\t\t# test_and_change_bit\n"
- "1:\t" __LL "\t%0, %1\n\t"
- "xor\t%2, %0, %3\n\t"
- __SC "\t%2, %1\n\t"
- "beqz\t%2, 1b\n\t"
- " and\t%2, %0, %3\n\t"
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ "1: " __LL " %0, %1 # test_and_change_bit \n"
+ " xor %2, %0, %3 \n"
+ " "__SC "%2, %1 \n"
+ " beqzl %2, 1b \n"
+ " and %2, %0, %3 \n"
#ifdef CONFIG_SMP
- "sync\n\t"
+ " sync \n"
#endif
- ".set\treorder"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
: "memory");
- return res != 0;
-}
-
-/*
- * __test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_change_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
-
- a += (nr >> SZLONG_LOG);
- mask = 1UL << (nr & SZLONG_MASK);
- retval = ((mask & *a) != 0);
- *a ^= mask;
-
- return retval;
-}
-
-#else /* MIPS I */
-
-/*
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(unsigned long nr, volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- __bi_flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- __bi_local_irq_save(flags);
- *a |= mask;
- __bi_local_irq_restore(flags);
-}
-
-/*
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
-
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- *a |= mask;
-}
-
-/*
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(unsigned long nr, volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- __bi_flags;
+ return res != 0;
+ } else if (cpu_has_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ " .set noreorder # test_and_change_bit \n"
+ "1: " __LL " %0, %1 \n"
+ " xor %2, %0, %3 \n"
+ " "__SC "\t%2, %1 \n"
+ " beqz %2, 1b \n"
+ " and %2, %0, %3 \n"
+#ifdef CONFIG_SMP
+ " sync \n"
+#endif
+ " .set reorder \n"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "memory");
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- __bi_local_irq_save(flags);
- *a &= ~mask;
- __bi_local_irq_restore(flags);
-}
+ return res != 0;
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask, retval;
+ __bi_flags;
-static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << (nr & SZLONG_MASK);
+ __bi_local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ __bi_local_irq_restore(flags);
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- *a &= ~mask;
+ return retval;
+ }
}
/*
- * change_bit - Toggle a bit in memory
+ * __test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(unsigned long nr, volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- __bi_flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- __bi_local_irq_save(flags);
- *a ^= mask;
- __bi_local_irq_restore(flags);
-}
-
-/*
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-
- *m ^= 1UL << (nr & SZLONG_MASK);
-}
-
-/*
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(unsigned long nr,
- volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
- __bi_flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- __bi_local_irq_save(flags);
- retval = (mask & *a) != 0;
- *a |= mask;
- __bi_local_irq_restore(flags);
-
- return retval;
-}
-
-/*
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(unsigned long nr,
+static inline int __test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned long mask;
int retval;
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- retval = (mask & *a) != 0;
- *a |= mask;
-
- return retval;
-}
-
-/*
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(unsigned long nr,
- volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
- __bi_flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- __bi_local_irq_save(flags);
- retval = (mask & *a) != 0;
- *a &= ~mask;
- __bi_local_irq_restore(flags);
-
- return retval;
-}
-
-/*
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(unsigned long nr,
- volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
-
a += (nr >> SZLONG_LOG);
mask = 1UL << (nr & SZLONG_MASK);
retval = ((mask & *a) != 0);
- *a &= ~mask;
-
- return retval;
-}
-
-/*
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(unsigned long nr,
- volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask, retval;
- __bi_flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1 << (nr & SZLONG_MASK);
- __bi_local_irq_save(flags);
- retval = (mask & *a) != 0;
- *a ^= mask;
- __bi_local_irq_restore(flags);
-
- return retval;
-}
-
-/*
- * __test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_change_bit(unsigned long nr,
- volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
-
- a += (nr >> SZLONG_LOG);
- mask = 1 << (nr & SZLONG_MASK);
- retval = (mask & *a) != 0;
*a ^= mask;
return retval;
}
#undef __bi_flags
-#undef __bi_cli
-#undef __bi_save_flags
+#undef __bi_local_irq_save
#undef __bi_local_irq_restore
-#endif /* MIPS I */
-
/*
* test_bit - Determine whether a bit is set
* @nr: bit number to test