X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fspinlock.h;h=fe484a699cc3609cca9d1c592e17624bd7bd22a8;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=5aeb57a3baad30b7111aa28cc6149cf891394bd5;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3b..fe484a699 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -6,126 +6,66 @@ #include #include -extern int printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))); - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned magic; -#endif -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#ifdef CONFIG_DEBUG_SPINLOCK -#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC -#else -#define SPINLOCK_MAGIC_INIT /* */ -#endif - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -/* + * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. + * + * (the type definitions are in asm/spinlock_types.h) */ -#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +#define __raw_spin_is_locked(x) \ + (*(volatile signed int *)(&(x)->slock) <= 0) -#define spin_lock_string \ +#define __raw_spin_lock_string \ "\n1:\t" \ - "lock ; decb %0\n\t" \ + "lock ; decl %0\n\t" \ "js 2f\n" \ LOCK_SECTION_START("") \ "2:\t" \ "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ + "cmpl $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END -/* - * This works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE) - * (PPro errata 66, 92) - */ - -#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) - -#define spin_unlock_string \ - "movb $1,%0" \ - :"=m" (lock->lock) : : "memory" +#define __raw_spin_unlock_string \ + "movl $1,%0" \ + :"=m" (lock->slock) : : "memory" - -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(lock->magic != SPINLOCK_MAGIC); - assert_spin_locked(lock); -#endif __asm__ __volatile__( - spin_unlock_string - ); + __raw_spin_lock_string + :"=m" (lock->slock) : : "memory"); } -#else +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "=m" (lock->lock) \ - :"0" (oldval) : "memory" - -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { - char oldval = 1; -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(lock->magic != SPINLOCK_MAGIC); - assert_spin_locked(lock); -#endif - __asm__ __volatile__( - spin_unlock_string - ); -} - -#endif + int oldval; -static inline int _raw_spin_trylock(spinlock_t *lock) -{ - char oldval; __asm__ __volatile__( - "xchgb %b0,%1" - :"=q" (oldval), "=m" (lock->lock) + "xchgl %0,%1" + :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); + return oldval > 0; } -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - if (lock->magic != SPINLOCK_MAGIC) { - printk("eip: %p\n", __builtin_return_address(0)); - BUG(); - } -#endif __asm__ __volatile__( - spin_lock_string - :"=m" (lock->lock) : : "memory"); + __raw_spin_unlock_string + ); } +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers @@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. - */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned magic; -#endif -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; - -#define RWLOCK_MAGIC 0xdeaf1eed - -#ifdef CONFIG_DEBUG_SPINLOCK -#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC -#else -#define RWLOCK_MAGIC_INIT /* */ -#endif - -#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } - -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -#define read_can_lock(x) ((int)(x)->lock > 0) -#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) - -/* + * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * @@ -170,29 +84,24 @@ typedef struct { * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben + * + * the helpers are in arch/i386/kernel/semaphore.c */ -/* the spinlock helpers are in arch/i386/kernel/semaphore.c */ -static inline void _raw_read_lock(rwlock_t *rw) +#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) + +static inline void __raw_read_lock(raw_rwlock_t *rw) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif __build_read_lock(rw, "__read_lock_failed"); } -static inline void _raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif __build_write_lock(rw, "__write_lock_failed"); } -#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") -#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") - -static inline int _raw_read_trylock(rwlock_t *lock) +static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); @@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) return 0; } -static inline int _raw_write_trylock(rwlock_t *lock) +static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) return 0; } +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" + : "=m" (rw->lock) : : "memory"); +} + #endif /* __ASM_SPINLOCK_H */