X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-s390%2Fatomic.h;h=399bf02894dd10cf5fc320557484eb0fcb27f7cd;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=d5a05cf4716805931c4a6ebbca4268bc65ff5d8b;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index d5a05cf47..399bf0289 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -1,11 +1,13 @@ #ifndef __ARCH_S390_ATOMIC__ #define __ARCH_S390_ATOMIC__ +#include + /* * include/asm-s390/atomic.h * * S390 version - * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow, * Arnd Bergmann (arndb@de.ibm.com) @@ -45,59 +47,64 @@ typedef struct { #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic_add(int i, atomic_t * v) -{ - __CS_LOOP(v, i, "ar"); -} static __inline__ int atomic_add_return(int i, atomic_t * v) { return __CS_LOOP(v, i, "ar"); } -static __inline__ int atomic_add_negative(int i, atomic_t * v) -{ - return __CS_LOOP(v, i, "ar") < 0; -} -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - __CS_LOOP(v, i, "sr"); -} +#define atomic_add(_i, _v) atomic_add_return(_i, _v) +#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) +#define atomic_inc(_v) atomic_add_return(1, _v) +#define atomic_inc_return(_v) atomic_add_return(1, _v) +#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) + static __inline__ int atomic_sub_return(int i, atomic_t * v) { return __CS_LOOP(v, i, "sr"); } -static __inline__ void atomic_inc(volatile atomic_t * v) -{ - __CS_LOOP(v, 1, "ar"); -} -static __inline__ int atomic_inc_return(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "ar"); -} +#define atomic_sub(_i, _v) atomic_sub_return(_i, _v) +#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) +#define atomic_dec(_v) atomic_sub_return(1, _v) +#define atomic_dec_return(_v) atomic_sub_return(1, _v) +#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static __inline__ int atomic_inc_and_test(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "ar") == 0; -} -static __inline__ void atomic_dec(volatile atomic_t * v) -{ - __CS_LOOP(v, 1, "sr"); -} -static __inline__ int atomic_dec_return(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "sr"); -} -static __inline__ int atomic_dec_and_test(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "sr") == 0; -} static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) { __CS_LOOP(v, ~mask, "nr"); } + static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) { __CS_LOOP(v, mask, "or"); } + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + __asm__ __volatile__(" cs %0,%3,0(%2)\n" + : "+d" (old), "=m" (v->counter) + : "a" (v), "d" (new), "m" (v->counter) + : "cc", "memory" ); + return old; +} + +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == u)) + break; + old = atomic_cmpxchg(v, c, c + a); + if (likely(old == c)) + break; + c = old; + } + return c != u; +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #undef __CS_LOOP #ifdef __s390x__ @@ -123,85 +130,72 @@ typedef struct { #define atomic64_read(v) ((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic64_add(int i, atomic64_t * v) -{ - __CSG_LOOP(v, i, "agr"); -} -static __inline__ long long atomic64_add_return(int i, atomic64_t * v) +static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_negative(int i, atomic64_t * v) -{ - return __CSG_LOOP(v, i, "agr") < 0; -} -static __inline__ void atomic64_sub(int i, atomic64_t * v) -{ - __CSG_LOOP(v, i, "sgr"); -} -static __inline__ void atomic64_inc(volatile atomic64_t * v) -{ - __CSG_LOOP(v, 1, "agr"); -} -static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "agr"); -} -static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "agr") == 0; -} -static __inline__ void atomic64_dec(volatile atomic64_t * v) -{ - __CSG_LOOP(v, 1, "sgr"); -} -static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "sgr"); -} -static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) +#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) +#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) +#define atomic64_inc(_v) atomic64_add_return(1, _v) +#define atomic64_inc_return(_v) atomic64_add_return(1, _v) +#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) + +static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) { - return __CSG_LOOP(v, 1, "sgr") == 0; + return __CSG_LOOP(v, i, "sgr"); } +#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) +#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) +#define atomic64_dec(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) + static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) { __CSG_LOOP(v, ~mask, "ngr"); } + static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) { __CSG_LOOP(v, mask, "ogr"); } -#undef __CSG_LOOP -#endif - -/* - returns 0 if expected_oldval==value in *v ( swap was successful ) - returns 1 if unsuccessful. +static __inline__ long long atomic64_cmpxchg(atomic64_t *v, + long long old, long long new) +{ + __asm__ __volatile__(" csg %0,%3,0(%2)\n" + : "+d" (old), "=m" (v->counter) + : "a" (v), "d" (new), "m" (v->counter) + : "cc", "memory" ); + return old; +} - This is non-portable, use bitops or spinlocks instead! -*/ -static __inline__ int -atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) +static __inline__ int atomic64_add_unless(atomic64_t *v, + long long a, long long u) { - int retval; - - __asm__ __volatile__( - " lr %0,%3\n" - " cs %0,%4,0(%2)\n" - " ipm %0\n" - " srl %0,28\n" - "0:" - : "=&d" (retval), "=m" (v->counter) - : "a" (v), "d" (expected_oldval) , "d" (new_val), - "m" (v->counter) : "cc", "memory" ); - return retval; + long long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == u)) + break; + old = atomic64_cmpxchg(v, c, c + a); + if (likely(old == c)) + break; + c = old; + } + return c != u; } +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +#undef __CSG_LOOP +#endif + #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include #endif /* __KERNEL__ */ #endif /* __ARCH_S390_ATOMIC__ */