X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fspinlock.h;h=f9ff31f400369f0e1429f72829d95f6738e7a129;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=d6dbfd46956597d3c146ad8ef3db22d8af9c7ebe;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d6dbfd469..f9ff31f40 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -15,10 +15,13 @@ asmlinkage int printk(const char * fmt, ...) */ typedef struct { - volatile unsigned int lock; + volatile unsigned int slock; #ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif +#ifdef CONFIG_PREEMPT + unsigned int break_lock; +#endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead @@ -40,26 +43,24 @@ typedef struct { * We make no fairness assumptions. They have a cost. */ -#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) +#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) #define spin_lock_string \ "\n1:\t" \ "lock ; decb %0\n\t" \ - "js 2f\n" \ - LOCK_SECTION_START("") \ + "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ "cmpb $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ - LOCK_SECTION_END + "3:\n\t" #define spin_lock_string_flags \ "\n1:\t" \ "lock ; decb %0\n\t" \ - "js 2f\n\t" \ - LOCK_SECTION_START("") \ + "jns 4f\n\t" \ "2:\t" \ "testl $0x200, %1\n\t" \ "jz 3f\n\t" \ @@ -70,19 +71,19 @@ typedef struct { "jle 3b\n\t" \ "cli\n\t" \ "jmp 1b\n" \ - LOCK_SECTION_END + "4:\n\t" /* * This works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE) * (PPro errata 66, 92) */ - + #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) #define spin_unlock_string \ "movb $1,%0" \ - :"=m" (lock->lock) : : "memory" + :"=m" (lock->slock) : : "memory" static inline void _raw_spin_unlock(spinlock_t *lock) @@ -100,7 +101,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) #define spin_unlock_string \ "xchgb %b0, %1" \ - :"=q" (oldval), "=m" (lock->lock) \ + :"=q" (oldval), "=m" (lock->slock) \ :"0" (oldval) : "memory" static inline void _raw_spin_unlock(spinlock_t *lock) @@ -122,7 +123,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) char oldval; __asm__ __volatile__( "xchgb %b0,%1" - :"=q" (oldval), "=m" (lock->lock) + :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); return oldval > 0; } @@ -130,31 +131,27 @@ static inline int _raw_spin_trylock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock) { #ifdef CONFIG_DEBUG_SPINLOCK - __label__ here; -here: if (unlikely(lock->magic != SPINLOCK_MAGIC)) { - printk("eip: %p\n", &&here); + printk("eip: %p\n", __builtin_return_address(0)); BUG(); } #endif __asm__ __volatile__( spin_lock_string - :"=m" (lock->lock) : : "memory"); + :"=m" (lock->slock) : : "memory"); } static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) { #ifdef CONFIG_DEBUG_SPINLOCK - __label__ here; -here: if (unlikely(lock->magic != SPINLOCK_MAGIC)) { - printk("eip: %p\n", &&here); + printk("eip: %p\n", __builtin_return_address(0)); BUG(); } #endif __asm__ __volatile__( spin_lock_string_flags - :"=m" (lock->lock) : "r" (flags) : "memory"); + :"=m" (lock->slock) : "r" (flags) : "memory"); } /* @@ -172,6 +169,9 @@ typedef struct { #ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif +#ifdef CONFIG_PREEMPT + unsigned int break_lock; +#endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed @@ -186,7 +186,17 @@ typedef struct { #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) -#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS) +/** + * read_can_lock - would read_trylock() succeed? + * @lock: the rwlock in question. + */ +#define read_can_lock(x) ((int)(x)->lock > 0) + +/** + * write_can_lock - would write_trylock() succeed? + * @lock: the rwlock in question. + */ +#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) /* * On x86, we implement read-write locks as a 32-bit counter @@ -218,6 +228,16 @@ static inline void _raw_write_lock(rwlock_t *rw) #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") +static inline int _raw_read_trylock(rwlock_t *lock) +{ + atomic_t *count = (atomic_t *)lock; + atomic_dec(count); + if (atomic_read(count) >= 0) + return 1; + atomic_inc(count); + return 0; +} + static inline int _raw_write_trylock(rwlock_t *lock) { atomic_t *count = (atomic_t *)lock;