#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
+#include <linux/config.h>
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
#define __raw_spin_lock_string \
"\n1:\t" \
- LOCK_PREFIX " ; decl %0\n\t" \
+ "lock ; decl %0\n\t" \
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
"jmp 1b\n" \
LOCK_SECTION_END
-#define __raw_spin_lock_string_up \
- "\n\tdecl %0"
-
#define __raw_spin_unlock_string \
"movl $1,%0" \
:"=m" (lock->slock) : : "memory"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
+ __asm__ __volatile__(
+ __raw_spin_lock_string
+ :"=m" (lock->slock) : : "memory");
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores. See
+ * semaphore.h for details. -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
*/
#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
- __build_read_lock(rw);
+ __build_read_lock(rw, "__read_lock_failed");
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
- __build_write_lock(rw);
+ __build_write_lock(rw, "__write_lock_failed");
}
static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
+ asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
: "=m" (rw->lock) : : "memory");
}