X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m68k%2Fatomic.h;h=d5eed64cb833d277bcc459ab0175022c045232be;hb=7172c64a7cee4dfa95864f49c914f7ea8cf497c8;hp=d7ef37661179e0d682124410da3e82690f4e5de9;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index d7ef37661..d5eed64cb 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -1,6 +1,9 @@ #ifndef __ARCH_M68K_ATOMIC__ #define __ARCH_M68K_ATOMIC__ + +#include /* local_irq_XXX() */ + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. @@ -16,38 +19,172 @@ typedef struct { int counter; } atomic_t; #define atomic_read(v) ((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) -static __inline__ void atomic_add(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); + __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i)); } -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { - __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); + __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i)); } -static __inline__ void atomic_inc(volatile atomic_t *v) +static inline void atomic_inc(atomic_t *v) { - __asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v)); + __asm__ __volatile__("addql #1,%0" : "+m" (*v)); } -static __inline__ void atomic_dec(volatile atomic_t *v) +static inline void atomic_dec(atomic_t *v) { - __asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v)); + __asm__ __volatile__("subql #1,%0" : "+m" (*v)); +} + +static inline int atomic_dec_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; } -static __inline__ int atomic_dec_and_test(volatile atomic_t *v) +static inline int atomic_inc_and_test(atomic_t *v) { char c; - __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v)); + __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); return c != 0; } -#define atomic_clear_mask(mask, v) \ - __asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v)) +#ifdef CONFIG_RMW_INSNS + +static inline int atomic_add_return(int i, atomic_t *v) +{ + int t, tmp; + + __asm__ __volatile__( + "1: movel %2,%1\n" + " addl %3,%1\n" + " casl %2,%1,%0\n" + " jne 1b" + : "+m" (*v), "=&d" (t), "=&d" (tmp) + : "g" (i), "2" (atomic_read(v))); + return t; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + int t, tmp; + + __asm__ __volatile__( + "1: movel %2,%1\n" + " subl %3,%1\n" + " casl %2,%1,%0\n" + " jne 1b" + : "+m" (*v), "=&d" (t), "=&d" (tmp) + : "g" (i), "2" (atomic_read(v))); + return t; +} + +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#else /* !CONFIG_RMW_INSNS */ + +static inline int atomic_add_return(int i, atomic_t * v) +{ + unsigned long flags; + int t; + + local_irq_save(flags); + t = atomic_read(v); + t += i; + atomic_set(v, t); + local_irq_restore(flags); + + return t; +} + +static inline int atomic_sub_return(int i, atomic_t * v) +{ + unsigned long flags; + int t; + + local_irq_save(flags); + t = atomic_read(v); + t -= i; + atomic_set(v, t); + local_irq_restore(flags); + + return t; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + unsigned long flags; + int prev; + + local_irq_save(flags); + prev = atomic_read(v); + if (prev == old) + atomic_set(v, new); + local_irq_restore(flags); + return prev; +} + +static inline int atomic_xchg(atomic_t *v, int new) +{ + unsigned long flags; + int prev; + + local_irq_save(flags); + prev = atomic_read(v); + atomic_set(v, new); + local_irq_restore(flags); + return prev; +} + +#endif /* !CONFIG_RMW_INSNS */ + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +static inline int atomic_sub_and_test(int i, atomic_t *v) +{ + char c; + __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); + return c != 0; +} + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + char c; + __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); + return c != 0; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); +} + +static inline void atomic_set_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); +} -#define atomic_set_mask(mask, v) \ - __asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v)) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() @@ -55,4 +192,5 @@ static __inline__ int atomic_dec_and_test(volatile atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include #endif /* __ARCH_M68K_ATOMIC __ */