* resource counting etc..
*/
+#ifdef CONFIG_SMP
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
+ LOCK "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
+ LOCK "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
+ LOCK "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "incl %0"
+ LOCK "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "decl %0"
+ LOCK "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
+ LOCK "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
+ LOCK "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
+ LOCK "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
{
int __i;
#ifdef CONFIG_M386
- unsigned long flags;
if(unlikely(boot_cpu_data.x86==3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1;"
+ LOCK "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
- local_irq_save(flags);
+ local_irq_disable();
__i = atomic_read(v);
atomic_set(v, i + __i);
- local_irq_restore(flags);
+ local_irq_enable();
return i + __i;
#endif
}
({ \
int c, old; \
c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
- } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
+__asm__ __volatile__(LOCK "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
+__asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */