1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <linux/config.h>
8 #include <linux/compiler.h>
10 asmlinkage int printk(const char * fmt, ...)
11 __attribute__ ((format (printf, 1, 2)));
14 * Your basic SMP spinlocks, allowing only a single CPU anywhere
18 volatile unsigned int lock;
19 #ifdef CONFIG_DEBUG_SPINLOCK
24 #define SPINLOCK_MAGIC 0xdead4ead
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
29 #define SPINLOCK_MAGIC_INIT /* */
32 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
34 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
37 * Simple spin lock operations. There are two variants, one clears IRQ's
38 * on the local processor, one does not.
40 * We make no fairness assumptions. They have a cost.
43 #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
44 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
45 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
47 #define spin_lock_string \
49 "lock ; decb %0\n\t" \
51 LOCK_SECTION_START("") \
60 * This works. Despite all the confusion.
61 * (except on PPro SMP or if we are using OOSTORE)
62 * (PPro errata 66, 92)
65 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
67 #define spin_unlock_string \
69 :"=m" (lock->lock) : : "memory"
72 static inline void _raw_spin_unlock(spinlock_t *lock)
74 #ifdef CONFIG_DEBUG_SPINLOCK
75 BUG_ON(lock->magic != SPINLOCK_MAGIC);
76 BUG_ON(!spin_is_locked(lock));
85 #define spin_unlock_string \
87 :"=q" (oldval), "=m" (lock->lock) \
88 :"0" (oldval) : "memory"
90 static inline void _raw_spin_unlock(spinlock_t *lock)
93 #ifdef CONFIG_DEBUG_SPINLOCK
94 BUG_ON(lock->magic != SPINLOCK_MAGIC);
95 BUG_ON(!spin_is_locked(lock));
104 static inline int _raw_spin_trylock(spinlock_t *lock)
107 __asm__ __volatile__(
109 :"=q" (oldval), "=m" (lock->lock)
110 :"0" (0) : "memory");
114 static inline void _raw_spin_lock(spinlock_t *lock)
116 #ifdef CONFIG_DEBUG_SPINLOCK
119 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
120 printk("eip: %p\n", &&here);
124 __asm__ __volatile__(
126 :"=m" (lock->lock) : : "memory");
131 * Read-write spinlocks, allowing multiple readers
132 * but only one writer.
134 * NOTE! it is quite common to have readers in interrupts
135 * but no interrupt writers. For those circumstances we
136 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe
141 volatile unsigned int lock;
142 #ifdef CONFIG_DEBUG_SPINLOCK
147 #define RWLOCK_MAGIC 0xdeaf1eed
149 #ifdef CONFIG_DEBUG_SPINLOCK
150 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
152 #define RWLOCK_MAGIC_INIT /* */
155 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
157 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
159 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
162 * On x86, we implement read-write locks as a 32-bit counter
163 * with the high bit (sign) being the "contended" bit.
165 * The inline assembly is non-obvious. Think about it.
167 * Changed to use the same technique as rw semaphores. See
168 * semaphore.h for details. -ben
170 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
172 static inline void _raw_read_lock(rwlock_t *rw)
174 #ifdef CONFIG_DEBUG_SPINLOCK
175 BUG_ON(rw->magic != RWLOCK_MAGIC);
177 __build_read_lock(rw, "__read_lock_failed");
180 static inline void _raw_write_lock(rwlock_t *rw)
182 #ifdef CONFIG_DEBUG_SPINLOCK
183 BUG_ON(rw->magic != RWLOCK_MAGIC);
185 __build_write_lock(rw, "__write_lock_failed");
188 #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
189 #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
191 static inline int _raw_write_trylock(rwlock_t *lock)
193 atomic_t *count = (atomic_t *)lock;
194 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
196 atomic_add(RW_LOCK_BIAS, count);
200 #endif /* __ASM_SPINLOCK_H */