X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-alpha%2Fatomic.h;h=fc77f74130832a099ab91bf26449f1f3ea348690;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=1b383e3cb68c942454d3e8d049e3266a772eea52;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 1b383e3cb..fc77f7413 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -1,6 +1,8 @@ #ifndef _ALPHA_ATOMIC_H #define _ALPHA_ATOMIC_H +#include + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc... @@ -100,77 +102,95 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) static __inline__ long atomic_add_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " addl %0,%3,%2\n" " addl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - static __inline__ long atomic64_add_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " addq %0,%3,%2\n" " addq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic_sub_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " subl %0,%3,%2\n" " subl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " subq %0,%3,%2\n" " subq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) @@ -181,6 +201,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) +#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) + #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) @@ -195,4 +217,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include #endif /* _ALPHA_ATOMIC_H */