X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fspinlock.h;h=88bf981e73cf45c084c66d6d60c25e066fa53340;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=3177138f9e319e9dfcefa454462b23010d9b1613;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 3177138f9..88bf981e7 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -4,127 +4,89 @@ #include #include #include -#include - -extern int printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))); +#include /* * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned magic; -#endif -} spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#ifdef CONFIG_DEBUG_SPINLOCK -#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC -#else -#define SPINLOCK_MAGIC_INIT /* */ -#endif - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -/* + * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. + * + * (the type definitions are in asm/spinlock_types.h) */ -#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - -#define spin_lock_string \ - "\n1:\t" \ - "lock ; decb %0\n\t" \ - "js 2f\n" \ - LOCK_SECTION_START("") \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END - -/* - * This works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE) - * (PPro errata 66, 92) - */ - -#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) - -#define spin_unlock_string \ - "movb $1,%0" \ - :"=m" (lock->lock) : : "memory" - - -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(lock->magic != SPINLOCK_MAGIC); - BUG_ON(!spin_is_locked(lock)); -#endif - __asm__ __volatile__( - spin_unlock_string - ); + return *(volatile signed int *)(&(lock)->slock) <= 0; } -#else - -#define spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "=m" (lock->lock) \ - :"0" (oldval) : "memory" - -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { - char oldval = 1; -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(lock->magic != SPINLOCK_MAGIC); - BUG_ON(!spin_is_locked(lock)); -#endif - __asm__ __volatile__( - spin_unlock_string - ); + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decl %0\n\t" + "jns 2f\n" + "3:\n" + "rep;nop\n\t" + "cmpl $0,%0\n\t" + "jle 3b\n\t" + "jmp 1b\n" + "2:\t" : "=m" (lock->slock) : : "memory"); } +/* + * Same as __raw_spin_lock, but reenable interrupts during spinning. + */ +#ifndef CONFIG_PROVE_LOCKING +static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +{ + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decl %0\n\t" + "jns 5f\n" + "testl $0x200, %1\n\t" /* interrupts were disabled? */ + "jz 4f\n\t" + "sti\n" + "3:\t" + "rep;nop\n\t" + "cmpl $0, %0\n\t" + "jle 3b\n\t" + "cli\n\t" + "jmp 1b\n" + "4:\t" + "rep;nop\n\t" + "cmpl $0, %0\n\t" + "jg 1b\n\t" + "jmp 4b\n" + "5:\n\t" + : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); +} #endif -static inline int _raw_spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { - char oldval; - __asm__ __volatile__( - "xchgb %b0,%1" - :"=q" (oldval), "=m" (lock->lock) + int oldval; + + asm volatile( + "xchgl %0,%1" + :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); + return oldval > 0; } -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - __label__ here; -here: - if (lock->magic != SPINLOCK_MAGIC) { -printk("eip: %p\n", &&here); - BUG(); - } -#endif - __asm__ __volatile__( - spin_lock_string - :"=m" (lock->lock) : : "memory"); + asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); } +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} /* * Read-write spinlocks, allowing multiple readers @@ -135,59 +97,50 @@ printk("eip: %p\n", &&here); * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. - */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned magic; -#endif -} rwlock_t; - -#define RWLOCK_MAGIC 0xdeaf1eed - -#ifdef CONFIG_DEBUG_SPINLOCK -#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC -#else -#define RWLOCK_MAGIC_INIT /* */ -#endif - -#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } - -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS) - -/* + * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. - * - * The inline assembly is non-obvious. Think about it. - * - * Changed to use the same technique as rw semaphores. See - * semaphore.h for details. -ben */ -/* the spinlock helpers are in arch/i386/kernel/semaphore.c */ -static inline void _raw_read_lock(rwlock_t *rw) +static inline int __raw_read_can_lock(raw_rwlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif - __build_read_lock(rw, "__read_lock_failed"); + return (int)(lock)->lock > 0; } -static inline void _raw_write_lock(rwlock_t *rw) +static inline int __raw_write_can_lock(raw_rwlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif - __build_write_lock(rw, "__write_lock_failed"); + return (lock)->lock == RW_LOCK_BIAS; +} + +static inline void __raw_read_lock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n" + "1:\n" + ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); +} + +static inline void __raw_write_lock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" + "jz 1f\n" + "\tcall __write_lock_failed\n\t" + "1:\n" + ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } -#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") -#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") +static inline int __raw_read_trylock(raw_rwlock_t *lock) +{ + atomic_t *count = (atomic_t *)lock; + atomic_dec(count); + if (atomic_read(count) >= 0) + return 1; + atomic_inc(count); + return 0; +} -static inline int _raw_write_trylock(rwlock_t *lock) +static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -196,4 +149,19 @@ static inline int _raw_write_trylock(rwlock_t *lock) return 0; } +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" + : "=m" (rw->lock) : : "memory"); +} + +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() + #endif /* __ASM_SPINLOCK_H */