X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m68k%2Fatomic.h;h=3f867738062a94bd65723501f530ce94c267a971;hb=9bf4aaab3e101692164d49b7ca357651eb691cb6;hp=d7ef37661179e0d682124410da3e82690f4e5de9;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index d7ef37661..3f8677380 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -1,6 +1,8 @@ #ifndef __ARCH_M68K_ATOMIC__ #define __ARCH_M68K_ATOMIC__ +#include /* local_irq_XXX() */ + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. @@ -16,38 +18,124 @@ typedef struct { int counter; } atomic_t; #define atomic_read(v) ((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) -static __inline__ void atomic_add(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i)); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i)); +} + +static inline void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__("addql #1,%0" : "+m" (*v)); +} + +static inline void atomic_dec(atomic_t *v) +{ + __asm__ __volatile__("subql #1,%0" : "+m" (*v)); +} + +static inline int atomic_dec_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; +} + +static inline int atomic_inc_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; +} + +#ifdef CONFIG_RMW_INSNS +static inline int atomic_add_return(int i, atomic_t *v) { - __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); + int t, tmp; + + __asm__ __volatile__( + "1: movel %2,%1\n" + " addl %3,%1\n" + " casl %2,%1,%0\n" + " jne 1b" + : "+m" (*v), "=&d" (t), "=&d" (tmp) + : "g" (i), "2" (atomic_read(v))); + return t; } -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t *v) { - __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); + int t, tmp; + + __asm__ __volatile__( + "1: movel %2,%1\n" + " subl %3,%1\n" + " casl %2,%1,%0\n" + " jne 1b" + : "+m" (*v), "=&d" (t), "=&d" (tmp) + : "g" (i), "2" (atomic_read(v))); + return t; +} +#else /* !CONFIG_RMW_INSNS */ +static inline int atomic_add_return(int i, atomic_t * v) +{ + unsigned long flags; + int t; + + local_irq_save(flags); + t = atomic_read(v); + t += i; + atomic_set(v, t); + local_irq_restore(flags); + + return t; } -static __inline__ void atomic_inc(volatile atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t * v) { - __asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v)); + unsigned long flags; + int t; + + local_irq_save(flags); + t = atomic_read(v); + t -= i; + atomic_set(v, t); + local_irq_restore(flags); + + return t; } +#endif /* !CONFIG_RMW_INSNS */ + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) -static __inline__ void atomic_dec(volatile atomic_t *v) +static inline int atomic_sub_and_test(int i, atomic_t *v) { - __asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v)); + char c; + __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); + return c != 0; } -static __inline__ int atomic_dec_and_test(volatile atomic_t *v) +static inline int atomic_add_negative(int i, atomic_t *v) { char c; - __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v)); + __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); return c != 0; } -#define atomic_clear_mask(mask, v) \ - __asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v)) +static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); +} -#define atomic_set_mask(mask, v) \ - __asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v)) +static inline void atomic_set_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); +} /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier()