static __inline__ void atomic_add(int i, atomic_t *v)
{
- __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "d" (i), "0" (*v));
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i));
+#else
+ __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i));
+#endif
}
static __inline__ void atomic_sub(int i, atomic_t *v)
{
- __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "d" (i), "0" (*v));
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i));
+#else
+ __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i));
+#endif
+}
+
+static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
+{
+ char c;
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__("subl %2,%1; seq %0"
+ : "=d" (c), "+m" (*v)
+ : "d" (i));
+#else
+ __asm__ __volatile__("subl %2,%1; seq %0"
+ : "=d" (c), "+m" (*v)
+ : "di" (i));
+#endif
+ return c != 0;
}
static __inline__ void atomic_inc(volatile atomic_t *v)
{
- __asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v));
+ __asm__ __volatile__("addql #1,%0" : "+m" (*v));
+}
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+
+static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
+{
+ char c;
+ __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
+ return c != 0;
}
static __inline__ void atomic_dec(volatile atomic_t *v)
{
- __asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v));
+ __asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
{
char c;
- __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v));
+ __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_clear_mask(mask, v) \
- __asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v))
+static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
+{
+ __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
+}
-#define atomic_set_mask(mask, v) \
- __asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v))
+static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
+{
+ __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
+}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-extern __inline__ int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t * v)
{
unsigned long temp, flags;
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-extern __inline__ int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t * v)
{
unsigned long temp, flags;
return temp;
}
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+#include <asm-generic/atomic.h>
#endif /* __ARCH_M68KNOMMU_ATOMIC __ */