git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Revert to Fedora kernel-2.6.17-1.2187_FC5 patched with vs2.0.2.1; there are too many...
[linux-2.6.git]
/
include
/
asm-x86_64
/
atomic.h
diff --git
a/include/asm-x86_64/atomic.h
b/include/asm-x86_64/atomic.h
index
007e88d
..
cecbf7b
100644
(file)
--- a/
include/asm-x86_64/atomic.h
+++ b/
include/asm-x86_64/atomic.h
@@
-1,7
+1,8
@@
#ifndef __ARCH_X86_64_ATOMIC__
#define __ARCH_X86_64_ATOMIC__
#ifndef __ARCH_X86_64_ATOMIC__
#define __ARCH_X86_64_ATOMIC__
-#include <asm/alternative.h>
+#include <linux/config.h>
+#include <asm/types.h>
/* atomic_t should be 32 bit signed type */
/* atomic_t should be 32 bit signed type */
@@
-52,7
+53,7
@@
typedef struct { volatile int counter; } atomic_t;
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"addl %1,%0"
+ LOCK "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@
-67,7
+68,7
@@
static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"subl %1,%0"
+ LOCK "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@
-86,7
+87,7
@@
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"subl %2,%0; sete %1"
+ LOCK "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@
-101,7
+102,7
@@
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"incl %0"
+ LOCK "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
:"=m" (v->counter)
:"m" (v->counter));
}
@@
-115,7
+116,7
@@
static __inline__ void atomic_inc(atomic_t *v)
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"decl %0"
+ LOCK "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
:"=m" (v->counter)
:"m" (v->counter));
}
@@
-133,7
+134,7
@@
static __inline__ int atomic_dec_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"decl %0; sete %1"
+ LOCK "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@
-152,7
+153,7
@@
static __inline__ int atomic_inc_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"incl %0; sete %1"
+ LOCK "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@
-172,7
+173,7
@@
static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"addl %2,%0; sets %1"
+ LOCK "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@
-189,7
+190,7
@@
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
__asm__ __volatile__(
{
int __i = i;
__asm__ __volatile__(
- LOCK
_PREFIX
"xaddl %0, %1;"
+ LOCK "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
@@
-237,7
+238,7
@@
typedef struct { volatile long counter; } atomic64_t;
static __inline__ void atomic64_add(long i, atomic64_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic64_add(long i, atomic64_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"addq %1,%0"
+ LOCK "addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@
-252,7
+253,7
@@
static __inline__ void atomic64_add(long i, atomic64_t *v)
static __inline__ void atomic64_sub(long i, atomic64_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic64_sub(long i, atomic64_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"subq %1,%0"
+ LOCK "subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@
-271,7
+272,7
@@
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"subq %2,%0; sete %1"
+ LOCK "subq %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@
-286,7
+287,7
@@
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
static __inline__ void atomic64_inc(atomic64_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic64_inc(atomic64_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"incq %0"
+ LOCK "incq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
:"=m" (v->counter)
:"m" (v->counter));
}
@@
-300,7
+301,7
@@
static __inline__ void atomic64_inc(atomic64_t *v)
static __inline__ void atomic64_dec(atomic64_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic64_dec(atomic64_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"decq %0"
+ LOCK "decq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
:"=m" (v->counter)
:"m" (v->counter));
}
@@
-318,7
+319,7
@@
static __inline__ int atomic64_dec_and_test(atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"decq %0; sete %1"
+ LOCK "decq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@
-337,7
+338,7
@@
static __inline__ int atomic64_inc_and_test(atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"incq %0; sete %1"
+ LOCK "incq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@
-357,7
+358,7
@@
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"addq %2,%0; sets %1"
+ LOCK "addq %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@
-374,7
+375,7
@@
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
__asm__ __volatile__(
{
long __i = i;
__asm__ __volatile__(
- LOCK
_PREFIX
"xaddq %0, %1;"
+ LOCK "xaddq %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
@@
-418,11
+419,11
@@
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK
_PREFIX
"andl %0,%1" \
+__asm__ __volatile__(LOCK "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK
_PREFIX
"orl %0,%1" \
+__asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */