1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <linux/config.h>
9 extern int printk(const char * fmt, ...)
10 __attribute__ ((format (printf, 1, 2)));
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
17 volatile unsigned int lock;
18 #ifdef CONFIG_DEBUG_SPINLOCK
23 #define SPINLOCK_MAGIC 0xdead4ead
25 #ifdef CONFIG_DEBUG_SPINLOCK
26 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
28 #define SPINLOCK_MAGIC_INIT /* */
31 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
33 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
36 * Simple spin lock operations. There are two variants, one clears IRQ's
37 * on the local processor, one does not.
39 * We make no fairness assumptions. They have a cost.
42 #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
43 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
45 #define spin_lock_string \
47 "lock ; decb %0\n\t" \
49 LOCK_SECTION_START("") \
58 * This works. Despite all the confusion.
59 * (except on PPro SMP or if we are using OOSTORE)
60 * (PPro errata 66, 92)
63 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
65 #define spin_unlock_string \
67 :"=m" (lock->lock) : : "memory"
70 static inline void _raw_spin_unlock(spinlock_t *lock)
72 #ifdef CONFIG_DEBUG_SPINLOCK
73 BUG_ON(lock->magic != SPINLOCK_MAGIC);
74 BUG_ON(!spin_is_locked(lock));
83 #define spin_unlock_string \
85 :"=q" (oldval), "=m" (lock->lock) \
86 :"0" (oldval) : "memory"
88 static inline void _raw_spin_unlock(spinlock_t *lock)
91 #ifdef CONFIG_DEBUG_SPINLOCK
92 BUG_ON(lock->magic != SPINLOCK_MAGIC);
93 BUG_ON(!spin_is_locked(lock));
102 static inline int _raw_spin_trylock(spinlock_t *lock)
105 __asm__ __volatile__(
107 :"=q" (oldval), "=m" (lock->lock)
108 :"0" (0) : "memory");
112 static inline void _raw_spin_lock(spinlock_t *lock)
114 #ifdef CONFIG_DEBUG_SPINLOCK
117 if (lock->magic != SPINLOCK_MAGIC) {
118 printk("eip: %p\n", &&here);
122 __asm__ __volatile__(
124 :"=m" (lock->lock) : : "memory");
129 * Read-write spinlocks, allowing multiple readers
130 * but only one writer.
132 * NOTE! it is quite common to have readers in interrupts
133 * but no interrupt writers. For those circumstances we
134 * can "mix" irq-safe locks - any writer needs to get a
135 * irq-safe write-lock, but readers can get non-irqsafe
139 volatile unsigned int lock;
140 #ifdef CONFIG_DEBUG_SPINLOCK
145 #define RWLOCK_MAGIC 0xdeaf1eed
147 #ifdef CONFIG_DEBUG_SPINLOCK
148 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
150 #define RWLOCK_MAGIC_INIT /* */
153 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
155 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
157 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
160 * On x86, we implement read-write locks as a 32-bit counter
161 * with the high bit (sign) being the "contended" bit.
163 * The inline assembly is non-obvious. Think about it.
165 * Changed to use the same technique as rw semaphores. See
166 * semaphore.h for details. -ben
168 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
170 static inline void _raw_read_lock(rwlock_t *rw)
172 #ifdef CONFIG_DEBUG_SPINLOCK
173 BUG_ON(rw->magic != RWLOCK_MAGIC);
175 __build_read_lock(rw, "__read_lock_failed");
178 static inline void _raw_write_lock(rwlock_t *rw)
180 #ifdef CONFIG_DEBUG_SPINLOCK
181 BUG_ON(rw->magic != RWLOCK_MAGIC);
183 __build_write_lock(rw, "__write_lock_failed");
186 #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
187 #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
189 static inline int _raw_write_trylock(rwlock_t *lock)
191 atomic_t *count = (atomic_t *)lock;
192 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
194 atomic_add(RW_LOCK_BIAS, count);
198 #endif /* __ASM_SPINLOCK_H */