X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fatomic.h;h=c57441bb290502c449e4217684711a3b11c34a3f;hb=refs%2Fheads%2Fvserver;hp=de649d3aa2d45d630874634c11f732e5e4819491;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index de649d3aa..c57441bb2 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -1,7 +1,6 @@ #ifndef __ARCH_I386_ATOMIC__ #define __ARCH_I386_ATOMIC__ -#include #include #include @@ -10,18 +9,12 @@ * resource counting etc.. */ -#ifdef CONFIG_SMP -#define LOCK "lock ; " -#else -#define LOCK "" -#endif - /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { volatile int counter; } atomic_t; +typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } @@ -52,9 +45,9 @@ typedef struct { volatile int counter; } atomic_t; static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "addl %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + LOCK_PREFIX "addl %1,%0" + :"+m" (v->counter) + :"ir" (i)); } /** @@ -67,9 +60,9 @@ static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "subl %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + LOCK_PREFIX "subl %1,%0" + :"+m" (v->counter) + :"ir" (i)); } /** @@ -86,9 +79,9 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subl %2,%0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + LOCK_PREFIX "subl %2,%0; sete %1" + :"+m" (v->counter), "=qm" (c) + :"ir" (i) : "memory"); return c; } @@ -101,9 +94,8 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - LOCK "incl %0" - :"=m" (v->counter) - :"m" (v->counter)); + LOCK_PREFIX "incl %0" + :"+m" (v->counter)); } /** @@ -115,9 +107,8 @@ static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( - LOCK "decl %0" - :"=m" (v->counter) - :"m" (v->counter)); + LOCK_PREFIX "decl %0" + :"+m" (v->counter)); } /** @@ -133,9 +124,9 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decl %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + LOCK_PREFIX "decl %0; sete %1" + :"+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -152,9 +143,9 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incl %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + LOCK_PREFIX "incl %0; sete %1" + :"+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -172,9 +163,9 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addl %2,%0; sets %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + LOCK_PREFIX "addl %2,%0; sets %1" + :"+m" (v->counter), "=qm" (c) + :"ir" (i) : "memory"); return c; } @@ -189,23 +180,24 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i; #ifdef CONFIG_M386 + unsigned long flags; if(unlikely(boot_cpu_data.x86==3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; __asm__ __volatile__( - LOCK "xaddl %0, %1;" - :"=r"(i) - :"m"(v->counter), "0"(i)); + LOCK_PREFIX "xaddl %0, %1" + :"+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ - local_irq_disable(); + local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); - local_irq_enable(); + local_irq_restore(flags); return i + __i; #endif } @@ -231,8 +223,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -242,11 +240,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK "andl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK "orl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ : : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */