#ifndef __ARCH_X86_64_ATOMIC__
#define __ARCH_X86_64_ATOMIC__
-#include <asm/alternative.h>
+#include <linux/config.h>
+#include <asm/types.h>
/* atomic_t should be 32 bit signed type */
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
+ LOCK "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
+ LOCK "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
+ LOCK "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "incl %0"
+ LOCK "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "decl %0"
+ LOCK "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
+ LOCK "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
+ LOCK "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
+ LOCK "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
{
int __i = i;
__asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1;"
+ LOCK "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
static __inline__ void atomic64_add(long i, atomic64_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "addq %1,%0"
+ LOCK "addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
static __inline__ void atomic64_sub(long i, atomic64_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "subq %1,%0"
+ LOCK "subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "subq %2,%0; sete %1"
+ LOCK "subq %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
static __inline__ void atomic64_inc(atomic64_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "incq %0"
+ LOCK "incq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
static __inline__ void atomic64_dec(atomic64_t *v)
{
__asm__ __volatile__(
- LOCK_PREFIX "decq %0"
+ LOCK "decq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "decq %0; sete %1"
+ LOCK "decq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "incq %0; sete %1"
+ LOCK "incq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
unsigned char c;
__asm__ __volatile__(
- LOCK_PREFIX "addq %2,%0; sets %1"
+ LOCK "addq %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
{
long __i = i;
__asm__ __volatile__(
- LOCK_PREFIX "xaddq %0, %1;"
+ LOCK "xaddq %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
+__asm__ __volatile__(LOCK "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
+__asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */