#ifndef __ARCH_I386_ATOMIC__
#define __ARCH_I386_ATOMIC__
-#include <linux/config.h>
#include <linux/compiler.h>
#include <asm/processor.h>
* resource counting etc..
*/
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ LOCK_PREFIX "addl %1,%0"
+ :"+m" (v->counter)
+ :"ir" (i));
}
/**
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ LOCK_PREFIX "subl %1,%0"
+ :"+m" (v->counter)
+ :"ir" (i));
}
/**
unsigned char c;
__asm__ __volatile__(
- LOCK "subl %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ LOCK_PREFIX "subl %2,%0; sete %1"
+ :"+m" (v->counter), "=qm" (c)
+ :"ir" (i) : "memory");
return c;
}
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ LOCK_PREFIX "incl %0"
+ :"+m" (v->counter));
}
/**
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ LOCK_PREFIX "decl %0"
+ :"+m" (v->counter));
}
/**
unsigned char c;
__asm__ __volatile__(
- LOCK "decl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ LOCK_PREFIX "decl %0; sete %1"
+ :"+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
unsigned char c;
__asm__ __volatile__(
- LOCK "incl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ LOCK_PREFIX "incl %0; sete %1"
+ :"+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
unsigned char c;
__asm__ __volatile__(
- LOCK "addl %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ LOCK_PREFIX "addl %2,%0; sets %1"
+ :"+m" (v->counter), "=qm" (c)
+ :"ir" (i) : "memory");
return c;
}
{
int __i;
#ifdef CONFIG_M386
+ unsigned long flags;
if(unlikely(boot_cpu_data.x86==3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
+ LOCK_PREFIX "xaddl %0, %1"
+ :"+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
- local_irq_disable();
+ local_irq_save(flags);
__i = atomic_read(v);
atomic_set(v, i + __i);
- local_irq_enable();
+ local_irq_restore(flags);
return i + __i;
#endif
}
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK "andl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK "orl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */