X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m32r%2Fatomic.h;h=f5a7d7301c72170523cc265402acfc487d16b553;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=570cc546b6e5bb6c55f1860ca0e2df66ba7c9767;hpb=a2c21200f1c81b08cb55e417b68150bba439b646;p=linux-2.6.git diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index 570cc546b..f5a7d7301 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -9,7 +9,7 @@ * Copyright (C) 2004 Hirokazu Takata */ -#include +#include #include /* @@ -17,16 +17,6 @@ * resource counting etc.. */ -#undef LOAD -#undef STORE -#ifdef CONFIG_SMP -#define LOAD "lock" -#define STORE "unlock" -#else -#define LOAD "ld" -#define STORE "st" -#endif - /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, @@ -60,7 +50,7 @@ typedef struct { volatile int counter; } atomic_t; * * Atomically adds @i to @v and return (@i + @v). */ -static inline int atomic_add_return(int i, atomic_t *v) +static __inline__ int atomic_add_return(int i, atomic_t *v) { unsigned long flags; int result; @@ -69,9 +59,9 @@ static inline int atomic_add_return(int i, atomic_t *v) __asm__ __volatile__ ( "# atomic_add_return \n\t" DCACHE_CLEAR("%0", "r4", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "add %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (result) : "r" (&v->counter), "r" (i) : "memory" @@ -91,7 +81,7 @@ static inline int atomic_add_return(int i, atomic_t *v) * * Atomically subtracts @i from @v and return (@v - @i). */ -static inline int atomic_sub_return(int i, atomic_t *v) +static __inline__ int atomic_sub_return(int i, atomic_t *v) { unsigned long flags; int result; @@ -100,9 +90,9 @@ static inline int atomic_sub_return(int i, atomic_t *v) __asm__ __volatile__ ( "# atomic_sub_return \n\t" DCACHE_CLEAR("%0", "r4", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "sub %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (result) : "r" (&v->counter), "r" (i) : "memory" @@ -150,7 +140,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) * * Atomically increments @v by 1 and returns the result. */ -static inline int atomic_inc_return(atomic_t *v) +static __inline__ int atomic_inc_return(atomic_t *v) { unsigned long flags; int result; @@ -159,9 +149,9 @@ static inline int atomic_inc_return(atomic_t *v) __asm__ __volatile__ ( "# atomic_inc_return \n\t" DCACHE_CLEAR("%0", "r4", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "addi %0, #1; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (result) : "r" (&v->counter) : "memory" @@ -180,7 +170,7 @@ static inline int atomic_inc_return(atomic_t *v) * * Atomically decrements @v by 1 and returns the result. */ -static inline int atomic_dec_return(atomic_t *v) +static __inline__ int atomic_dec_return(atomic_t *v) { unsigned long flags; int result; @@ -189,9 +179,9 @@ static inline int atomic_dec_return(atomic_t *v) __asm__ __volatile__ ( "# atomic_dec_return \n\t" DCACHE_CLEAR("%0", "r4", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "addi %0, #-1; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (result) : "r" (&v->counter) : "memory" @@ -251,7 +241,29 @@ static inline int atomic_dec_return(atomic_t *v) */ #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) -static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr) +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) { unsigned long flags; unsigned long tmp; @@ -260,9 +272,9 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr) __asm__ __volatile__ ( "# atomic_clear_mask \n\t" DCACHE_CLEAR("%0", "r5", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "and %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (tmp) : "r" (addr), "r" (~mask) : "memory" @@ -273,7 +285,7 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr) local_irq_restore(flags); } -static inline void atomic_set_mask(unsigned long mask, atomic_t *addr) +static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) { unsigned long flags; unsigned long tmp; @@ -282,9 +294,9 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *addr) __asm__ __volatile__ ( "# atomic_set_mask \n\t" DCACHE_CLEAR("%0", "r5", "%1") - LOAD" %0, @%1; \n\t" + M32R_LOCK" %0, @%1; \n\t" "or %0, %2; \n\t" - STORE" %0, @%1; \n\t" + M32R_UNLOCK" %0, @%1; \n\t" : "=&r" (tmp) : "r" (addr), "r" (mask) : "memory" @@ -301,5 +313,5 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include #endif /* _ASM_M32R_ATOMIC_H */ -