git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git]
/
include
/
asm-i386
/
atomic.h
diff --git
a/include/asm-i386/atomic.h
b/include/asm-i386/atomic.h
index
4ddce52
..
de649d3
100644
(file)
--- a/
include/asm-i386/atomic.h
+++ b/
include/asm-i386/atomic.h
@@
-10,6
+10,12
@@
* resource counting etc..
*/
* resource counting etc..
*/
+#ifdef CONFIG_SMP
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
@@
-46,7
+52,7
@@
typedef struct { volatile int counter; } atomic_t;
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"addl %1,%0"
+ LOCK "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@
-61,7
+67,7
@@
static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"subl %1,%0"
+ LOCK "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@
-80,7
+86,7
@@
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"subl %2,%0; sete %1"
+ LOCK "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@
-95,7
+101,7
@@
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"incl %0"
+ LOCK "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
:"=m" (v->counter)
:"m" (v->counter));
}
@@
-109,7
+115,7
@@
static __inline__ void atomic_inc(atomic_t *v)
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK
_PREFIX
"decl %0"
+ LOCK "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
:"=m" (v->counter)
:"m" (v->counter));
}
@@
-127,7
+133,7
@@
static __inline__ int atomic_dec_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"decl %0; sete %1"
+ LOCK "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@
-146,7
+152,7
@@
static __inline__ int atomic_inc_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"incl %0; sete %1"
+ LOCK "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@
-166,7
+172,7
@@
static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
unsigned char c;
__asm__ __volatile__(
- LOCK
_PREFIX
"addl %2,%0; sets %1"
+ LOCK "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@
-183,24
+189,23
@@
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
{
int __i;
#ifdef CONFIG_M386
- unsigned long flags;
if(unlikely(boot_cpu_data.x86==3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
if(unlikely(boot_cpu_data.x86==3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK
_PREFIX
"xaddl %0, %1;"
+ LOCK "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
- local_irq_
save(flags
);
+ local_irq_
disable(
);
__i = atomic_read(v);
atomic_set(v, i + __i);
__i = atomic_read(v);
atomic_set(v, i + __i);
- local_irq_
restore(flags
);
+ local_irq_
enable(
);
return i + __i;
#endif
}
return i + __i;
#endif
}
@@
-226,14
+231,8
@@
static __inline__ int atomic_sub_return(int i, atomic_t *v)
({ \
int c, old; \
c = atomic_read(v); \
({ \
int c, old; \
c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c = old; \
- } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
@@
-243,11
+242,11
@@
static __inline__ int atomic_sub_return(int i, atomic_t *v)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK
_PREFIX
"andl %0,%1" \
+__asm__ __volatile__(LOCK "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK
_PREFIX
"orl %0,%1" \
+__asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */