X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ia64%2Fatomic.h;h=569ec7574baf24b01ce450c4f6a5cdc32d805a60;hb=a2f44b27303a5353859d77a3e96a1d3f33f56ab7;hp=f2e179d4bb76d989e1265f988b6085d31c9e492e;hpb=daddc0d38b3571bed170afa273a49a0eba090c1e;p=linux-2.6.git diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index f2e179d4b..569ec7574 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); return new; } @@ -84,10 +84,29 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); return new; } +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ @@ -159,10 +178,10 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0) +#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) != 0) +#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) #define atomic_add(i,v) atomic_add_return((i), (v)) #define atomic_sub(i,v) atomic_sub_return((i), (v)) @@ -180,4 +199,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include #endif /* _ASM_IA64_ATOMIC_H */