Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / include / asm-mips / atomic.h
index 7d89e87..2c8b853 100644 (file)
 #define _ASM_ATOMIC_H
 
 #include <asm/cpu-features.h>
+#include <asm/interrupt.h>
 #include <asm/war.h>
 
-extern spinlock_t atomic_lock;
-
 typedef struct { volatile int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)    { (i) }
@@ -62,28 +61,32 @@ static __inline__ void atomic_add(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %0, %1          # atomic_add            \n"
                "       addu    %0, %2                                  \n"
                "       sc      %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else if (cpu_has_llsc) {
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %0, %1          # atomic_add            \n"
                "       addu    %0, %2                                  \n"
                "       sc      %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter += i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -100,28 +103,32 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %0, %1          # atomic_sub            \n"
                "       subu    %0, %2                                  \n"
                "       sc      %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else if (cpu_has_llsc) {
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %0, %1          # atomic_sub            \n"
                "       subu    %0, %2                                  \n"
                "       sc      %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter -= i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -136,12 +143,14 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %1, %2          # atomic_add_return     \n"
                "       addu    %0, %1, %3                              \n"
                "       sc      %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
@@ -149,23 +158,25 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %1, %2          # atomic_add_return     \n"
                "       addu    %0, %1, %3                              \n"
                "       sc      %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result += i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -179,12 +190,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %1, %2          # atomic_sub_return     \n"
                "       subu    %0, %1, %3                              \n"
                "       sc      %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
@@ -192,34 +205,37 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %1, %2          # atomic_sub_return     \n"
                "       subu    %0, %1, %3                              \n"
                "       sc      %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
 }
 
 /*
- * atomic_sub_if_positive - add integer to atomic variable
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
  * @v: pointer of type atomic_t
  *
- * Atomically test @v and decrement if it is greater than 0.
- * The function returns the old value of @v minus 1.
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
  */
 static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 {
@@ -229,13 +245,18 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
                "       sc      %0, %2                                  \n"
+               "       .set    noreorder                               \n"
                "       beqzl   %0, 1b                                  \n"
+               "        subu   %0, %1, %3                              \n"
+               "       .set    reorder                                 \n"
                "       sync                                            \n"
                "1:                                                     \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
@@ -243,30 +264,57 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
                "       sc      %0, %2                                  \n"
+               "       .set    noreorder                               \n"
                "       beqz    %0, 1b                                  \n"
+               "        subu   %0, %1, %3                              \n"
+               "       .set    reorder                                 \n"
                "       sync                                            \n"
                "1:                                                     \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                if (result >= 0)
                        v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
 }
 
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u)                             \
+({                                                             \
+       int c, old;                                             \
+       c = atomic_read(v);                                     \
+       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+               c = old;                                        \
+       c != (u);                                               \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
 #define atomic_dec_return(v) atomic_sub_return(1,(v))
 #define atomic_inc_return(v) atomic_add_return(1,(v))
 
@@ -334,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
  */
 #define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
 
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
 
 typedef struct { volatile __s64 counter; } atomic64_t;
 
@@ -367,28 +415,32 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
                "       addu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else if (cpu_has_llsc) {
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
                "       addu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter += i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -405,28 +457,32 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
                "       subu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else if (cpu_has_llsc) {
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
                "       subu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
+               "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter));
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter -= i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -441,12 +497,14 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
                "       addu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
@@ -454,23 +512,25 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
                "       addu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result += i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -484,12 +544,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
                "       subu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
@@ -497,34 +559,37 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
                "       subu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
                "       sync                                            \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
 }
 
 /*
- * atomic64_sub_if_positive - add integer to atomic variable
+ * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
  * @v: pointer of type atomic64_t
  *
- * Atomically test @v and decrement if it is greater than 0.
- * The function returns the old value of @v minus 1.
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
  */
 static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 {
@@ -534,13 +599,18 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
                "       scd     %0, %2                                  \n"
+               "       .set    noreorder                               \n"
                "       beqzl   %0, 1b                                  \n"
+               "        dsubu  %0, %1, %3                              \n"
+               "       .set    reorder                                 \n"
                "       sync                                            \n"
                "1:                                                     \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
@@ -548,25 +618,30 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                unsigned long temp;
 
                __asm__ __volatile__(
+               "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
                "       scd     %0, %2                                  \n"
+               "       .set    noreorder                               \n"
                "       beqz    %0, 1b                                  \n"
+               "        dsubu  %0, %1, %3                              \n"
+               "       .set    reorder                                 \n"
                "       sync                                            \n"
                "1:                                                     \n"
+               "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
                : "memory");
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                if (result >= 0)
                        v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -639,7 +714,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
  */
 #define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
 
-#endif /* CONFIG_MIPS64 */
+#endif /* CONFIG_64BIT */
 
 /*
  * atomic*_return operations are serializing but not the non-*_return
@@ -650,4 +725,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 #define smp_mb__before_atomic_inc()    smp_mb()
 #define smp_mb__after_atomic_inc()     smp_mb()
 
+#include <asm-generic/atomic.h>
 #endif /* _ASM_ATOMIC_H */