#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-/* It seems that people are forgetting to
- * initialize their spinlocks properly, tsk tsk.
- * Remember to turn this off in 2.4. -ben
- */
-#if defined(CONFIG_DEBUG_SPINLOCK)
-#define SPINLOCK_DEBUG 1
-#else
-#define SPINLOCK_DEBUG 0
-#endif
-
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
- volatile int lock;
-#if SPINLOCK_DEBUG
+ volatile int slock;
+#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
#ifdef CONFIG_PREEMPT
#define SPINLOCK_MAGIC 0xdead4ead
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
#else
#define SPINLOCK_MAGIC_INIT /* */
* We make no fairness assumptions. They have a cost.
*/
-#define spin_is_locked(x) (*(volatile int *)(&(x)->lock) <= 0)
+#define spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
unsigned long tmp1, tmp2;
/*
- * lock->lock : =1 : unlock
- * : <=0 : lock
+ * lock->slock : =1 : unlock
+ * : <=0 : lock
* {
- * oldval = lock->lock; <--+ need atomic operation
- * lock->lock = 0; <--+
+ * oldval = lock->slock; <--+ need atomic operation
+ * lock->slock = 0; <--+
* }
*/
__asm__ __volatile__ (
"unlock %1, @%3; \n\t"
"mvtc %2, psw; \n\t"
: "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
- : "r" (&lock->lock)
+ : "r" (&lock->slock)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r6"
{
unsigned long tmp0, tmp1;
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
__label__ here;
here:
if (lock->magic != SPINLOCK_MAGIC) {
- printk("eip: %p\n", &&here);
+ printk("pc: %p\n", &&here);
BUG();
}
#endif
/*
- * lock->lock : =1 : unlock
- * : <=0 : lock
+ * lock->slock : =1 : unlock
+ * : <=0 : lock
*
* for ( ; ; ) {
- * lock->lock -= 1; <-- need atomic operation
- * if (lock->lock == 0) break;
- * for ( ; lock->lock <= 0 ; );
+ * lock->slock -= 1; <-- need atomic operation
+ * if (lock->slock == 0) break;
+ * for ( ; lock->slock <= 0 ; );
* }
*/
__asm__ __volatile__ (
"bra 2b; \n\t"
LOCK_SECTION_END
: "=&r" (tmp0), "=&r" (tmp1)
- : "r" (&lock->lock)
+ : "r" (&lock->slock)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r6"
static inline void _raw_spin_unlock(spinlock_t *lock)
{
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(lock->magic != SPINLOCK_MAGIC);
BUG_ON(!spin_is_locked(lock));
#endif
mb();
- lock->lock = 1;
+ lock->slock = 1;
}
/*
*/
typedef struct {
volatile int lock;
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
#ifdef CONFIG_PREEMPT
#define RWLOCK_MAGIC 0xdeaf1eed
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
#else
#define RWLOCK_MAGIC_INIT /* */
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
/*
* On x86, we implement read-write locks as a 32-bit counter
{
unsigned long tmp0, tmp1;
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif
/*
{
unsigned long tmp0, tmp1, tmp2;
-#if SPINLOCK_DEBUG
+#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif
/*