X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-m32r%2Fspinlock.h;h=f5cfba81ee10c1f59b007dcf34fd9db508a9bff5;hb=refs%2Fheads%2Fvserver;hp=6fd012a5e15007e1d86ecc32020ff8ec58b2232e;hpb=a2c21200f1c81b08cb55e417b68150bba439b646;p=linux-2.6.git diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h index 6fd012a5e..f5cfba81e 100644 --- a/include/asm-m32r/spinlock.h +++ b/include/asm-m32r/spinlock.h @@ -9,86 +9,48 @@ * Copyright (C) 2004 Hirokazu Takata */ -#include /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */ #include #include #include -extern int printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))); - -#define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -/* It seems that people are forgetting to - * initialize their spinlocks properly, tsk tsk. - * Remember to turn this off in 2.4. -ben - */ -#if defined(CONFIG_DEBUG_SPINLOCK) -#define SPINLOCK_DEBUG 1 -#else -#define SPINLOCK_DEBUG 0 -#endif - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile int lock; -#if SPINLOCK_DEBUG - unsigned magic; -#endif -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#if SPINLOCK_DEBUG -#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC -#else -#define SPINLOCK_MAGIC_INIT /* */ -#endif - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -/* + * + * (the type definitions are in asm/spinlock_types.h) + * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. */ -#define spin_is_locked(x) (*(volatile int *)(&(x)->lock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_unlock_wait(x) \ + do { cpu_relax(); } while (__raw_spin_is_locked(x)) /** - * _raw_spin_trylock - Try spin lock and return a result + * __raw_spin_trylock - Try spin lock and return a result * @lock: Pointer to the lock variable * - * _raw_spin_trylock() tries to get the lock and returns a result. + * __raw_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int _raw_spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; /* - * lock->lock : =1 : unlock - * : <=0 : lock + * lock->slock : =1 : unlock + * : <=0 : lock * { - * oldval = lock->lock; <--+ need atomic operation - * lock->lock = 0; <--+ + * oldval = lock->slock; <--+ need atomic operation + * lock->slock = 0; <--+ * } */ __asm__ __volatile__ ( - "# spin_trylock \n\t" + "# __raw_spin_trylock \n\t" "ldi %1, #0; \n\t" "mvfc %2, psw; \n\t" "clrpsw #0x40 -> nop; \n\t" @@ -97,7 +59,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) "unlock %1, @%3; \n\t" "mvtc %2, psw; \n\t" : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2) - : "r" (&lock->lock) + : "r" (&lock->slock) : "memory" #ifdef CONFIG_CHIP_M32700_TS1 , "r6" @@ -107,30 +69,22 @@ static inline int _raw_spin_trylock(spinlock_t *lock) return (oldval > 0); } -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { unsigned long tmp0, tmp1; -#if SPINLOCK_DEBUG - __label__ here; -here: - if (lock->magic != SPINLOCK_MAGIC) { - printk("eip: %p\n", &&here); - BUG(); - } -#endif /* - * lock->lock : =1 : unlock - * : <=0 : lock + * lock->slock : =1 : unlock + * : <=0 : lock * * for ( ; ; ) { - * lock->lock -= 1; <-- need atomic operation - * if (lock->lock == 0) break; - * for ( ; lock->lock <= 0 ; ); + * lock->slock -= 1; <-- need atomic operation + * if (lock->slock == 0) break; + * for ( ; lock->slock <= 0 ; ); * } */ __asm__ __volatile__ ( - "# spin_lock \n\t" + "# __raw_spin_lock \n\t" ".fillinsn \n" "1: \n\t" "mvfc %1, psw; \n\t" @@ -149,7 +103,7 @@ here: "bra 2b; \n\t" LOCK_SECTION_END : "=&r" (tmp0), "=&r" (tmp1) - : "r" (&lock->lock) + : "r" (&lock->slock) : "memory" #ifdef CONFIG_CHIP_M32700_TS1 , "r6" @@ -157,14 +111,10 @@ here: ); } -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { -#if SPINLOCK_DEBUG - BUG_ON(lock->magic != SPINLOCK_MAGIC); - BUG_ON(!spin_is_locked(lock)); -#endif mb(); - lock->lock = 1; + lock->slock = 1; } /* @@ -176,32 +126,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. - */ -typedef struct { - volatile int lock; -#if SPINLOCK_DEBUG - unsigned magic; -#endif -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; - -#define RWLOCK_MAGIC 0xdeaf1eed - -#if SPINLOCK_DEBUG -#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC -#else -#define RWLOCK_MAGIC_INIT /* */ -#endif - -#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } - -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS) - -/* + * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * @@ -210,15 +135,23 @@ typedef struct { * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben */ -/* the spinlock helpers are in arch/i386/kernel/semaphore.c */ -static inline void _raw_read_lock(rwlock_t *rw) +/** + * read_can_lock - would read_trylock() succeed? + * @lock: the rwlock in question. + */ +#define __raw_read_can_lock(x) ((int)(x)->lock > 0) + +/** + * write_can_lock - would write_trylock() succeed? + * @lock: the rwlock in question. + */ +#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) + +static inline void __raw_read_lock(raw_rwlock_t *rw) { unsigned long tmp0, tmp1; -#if SPINLOCK_DEBUG - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif /* * rw->lock : >0 : unlock * : <=0 : lock @@ -266,13 +199,10 @@ static inline void _raw_read_lock(rwlock_t *rw) ); } -static inline void _raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; -#if SPINLOCK_DEBUG - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif /* * rw->lock : =RW_LOCK_BIAS_STR : unlock * : !=RW_LOCK_BIAS_STR : lock @@ -322,7 +252,7 @@ static inline void _raw_write_lock(rwlock_t *rw) ); } -static inline void _raw_read_unlock(rwlock_t *rw) +static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -344,7 +274,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) ); } -static inline void _raw_write_unlock(rwlock_t *rw) +static inline void __raw_write_unlock(raw_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -368,9 +298,16 @@ static inline void _raw_write_unlock(rwlock_t *rw) ); } -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +static inline int __raw_read_trylock(raw_rwlock_t *lock) +{ + atomic_t *count = (atomic_t*)lock; + if (atomic_dec_return(count) >= 0) + return 1; + atomic_inc(count); + return 0; +} -static inline int _raw_write_trylock(rwlock_t *lock) +static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -379,4 +316,8 @@ static inline int _raw_write_trylock(rwlock_t *lock) return 0; } +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() + #endif /* _ASM_M32R_SPINLOCK_H */