X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-sh%2Fatomic.h;h=e12570b9339dcb1a127222cdc04383752a88fc59;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=e95641a430443f51d1b6f208e76fbc1fb25eeca9;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index e95641a43..e12570b93 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -14,84 +14,66 @@ typedef struct { volatile int counter; } atomic_t; #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) +#include #include -/* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ - -static __inline__ void atomic_add(int i, atomic_t * v) -{ - unsigned long flags; - - local_irq_save(flags); - *(long *)v += i; - local_irq_restore(flags); -} - -static __inline__ void atomic_sub(int i, atomic_t *v) -{ - unsigned long flags; - - local_irq_save(flags); - *(long *)v -= i; - local_irq_restore(flags); -} - -static __inline__ int atomic_add_return(int i, atomic_t * v) -{ - unsigned long temp, flags; +#ifdef CONFIG_CPU_SH4A +#include +#else +#include +#endif - local_irq_save(flags); - temp = *(long *)v; - temp += i; - *(long *)v = temp; - local_irq_restore(flags); - - return temp; -} - -static __inline__ int atomic_sub_return(int i, atomic_t * v) -{ - unsigned long temp, flags; - - local_irq_save(flags); - temp = *(long *)v; - temp -= i; - *(long *)v = temp; - local_irq_restore(flags); - - return temp; -} +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc(v) atomic_add(1,(v)) #define atomic_dec(v) atomic_sub(1,(v)) -static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { + int ret; unsigned long flags; local_irq_save(flags); - *(long *)v &= ~mask; + ret = v->counter; + if (likely(ret == old)) + v->counter = new; local_irq_restore(flags); + + return ret; } -static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) { + int ret; unsigned long flags; local_irq_save(flags); - *(long *)v |= mask; + ret = v->counter; + if (ret != u) + v->counter += a; local_irq_restore(flags); + + return ret != u; } +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* Atomic operations are already serializing on SH */ #define smp_mb__before_atomic_dec() barrier() @@ -99,4 +81,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include #endif /* __ASM_SH_ATOMIC_H */