1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
5 * include/linux/spinlock.h - generic locking declarations
8 #include <linux/config.h>
9 #include <linux/preempt.h>
10 #include <linux/linkage.h>
11 #include <linux/compiler.h>
12 #include <linux/thread_info.h>
13 #include <linux/kernel.h>
14 #include <linux/stringify.h>
16 #include <asm/processor.h> /* for cpu relax */
17 #include <asm/system.h>
20 * Must define these before including other files, inline functions need them
22 #define LOCK_SECTION_NAME \
23 ".text.lock." __stringify(KBUILD_BASENAME)
25 #define LOCK_SECTION_START(extra) \
28 ".ifndef " LOCK_SECTION_NAME "\n\t" \
29 LOCK_SECTION_NAME ":\n\t" \
32 #define LOCK_SECTION_END \
35 #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
38 * If CONFIG_SMP is set, pull in the _raw_* definitions
41 #include <asm/spinlock.h>
43 int __lockfunc _spin_trylock(spinlock_t *lock);
44 int __lockfunc _write_trylock(rwlock_t *lock);
46 void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
47 void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
48 void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
50 void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t);
51 void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t);
52 void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t);
54 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t);
55 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t);
56 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t);
58 void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t);
59 void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
60 void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
61 void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
62 void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
63 void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
65 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t);
66 void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t);
67 void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t);
68 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t);
69 void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
70 void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
71 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t);
72 void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
73 void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
75 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
76 int in_lock_functions(unsigned long addr);
80 #define in_lock_functions(ADDR) 0
82 #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
83 # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
84 # define ATOMIC_DEC_AND_LOCK
87 #ifdef CONFIG_DEBUG_SPINLOCK
89 #define SPINLOCK_MAGIC 0x1D244B3C
92 volatile unsigned long lock;
93 volatile unsigned int babble;
98 #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
100 #define spin_lock_init(x) \
102 (x)->magic = SPINLOCK_MAGIC; \
105 (x)->module = __FILE__; \
110 #define CHECK_LOCK(x) \
112 if ((x)->magic != SPINLOCK_MAGIC) { \
114 panic("%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
115 __FILE__, __LINE__, (x)); \
119 #define _raw_spin_lock(x) \
122 if ((x)->lock&&(x)->babble) { \
125 panic("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
126 __FILE__,__LINE__, (x)->module, \
127 (x), (x)->owner, (x)->oline); \
130 (x)->owner = __FILE__; \
131 (x)->oline = __LINE__; \
134 /* without debugging, spin_is_locked on UP always says
135 * FALSE. --> printk if already locked. */
136 #define spin_is_locked(x) \
139 if ((x)->lock&&(x)->babble) { \
141 printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
142 __FILE__,__LINE__, (x)->module, \
143 (x), (x)->owner, (x)->oline); \
148 /* without debugging, spin_trylock on UP always says
149 * TRUE. --> printk if already locked. */
150 #define _raw_spin_trylock(x) \
153 if ((x)->lock&&(x)->babble) { \
155 printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
156 __FILE__,__LINE__, (x)->module, \
157 (x), (x)->owner, (x)->oline); \
160 (x)->owner = __FILE__; \
161 (x)->oline = __LINE__; \
165 #define spin_unlock_wait(x) \
168 if ((x)->lock&&(x)->babble) { \
170 printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
171 __FILE__,__LINE__, (x)->module, (x), \
172 (x)->owner, (x)->oline); \
176 #define _raw_spin_unlock(x) \
179 if (!(x)->lock&&(x)->babble) { \
182 panic("%s:%d: spin_unlock(%s:%p) not locked\n", \
183 __FILE__,__LINE__, (x)->module, (x));\
189 * gcc versions before ~2.95 have a nasty bug with empty initializers.
192 typedef struct { } spinlock_t;
193 #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
195 typedef struct { int gcc_is_buggy; } spinlock_t;
196 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
200 * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
202 #define spin_lock_init(lock) do { (void)(lock); } while(0)
203 #define _raw_spin_lock(lock) do { (void)(lock); } while(0)
204 #define spin_is_locked(lock) ((void)(lock), 0)
205 #define _raw_spin_trylock(lock) (((void)(lock), 1))
206 #define spin_unlock_wait(lock) (void)(lock);
207 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
208 #endif /* CONFIG_DEBUG_SPINLOCK */
210 /* RW spinlocks: No debug version */
213 typedef struct { } rwlock_t;
214 #define RW_LOCK_UNLOCKED (rwlock_t) { }
216 typedef struct { int gcc_is_buggy; } rwlock_t;
217 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
220 #define rwlock_init(lock) do { (void)(lock); } while(0)
221 #define _raw_read_lock(lock) do { (void)(lock); } while(0)
222 #define _raw_read_unlock(lock) do { (void)(lock); } while(0)
223 #define _raw_write_lock(lock) do { (void)(lock); } while(0)
224 #define _raw_write_unlock(lock) do { (void)(lock); } while(0)
225 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
227 #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
228 1 : ({preempt_enable(); 0;});})
230 #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \
231 1 : ({preempt_enable(); 0;});})
233 #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
234 _raw_spin_trylock(lock) ? \
235 1 : ({preempt_enable(); local_bh_enable(); 0;});})
237 #define _spin_lock(lock) \
240 _raw_spin_lock(lock); \
244 #define _write_lock(lock) \
247 _raw_write_lock(lock); \
251 #define _read_lock(lock) \
254 _raw_read_lock(lock); \
258 #define _spin_unlock(lock) \
260 _raw_spin_unlock(lock); \
265 #define _write_unlock(lock) \
267 _raw_write_unlock(lock); \
272 #define _read_unlock(lock) \
274 _raw_read_unlock(lock); \
279 #define _spin_lock_irqsave(lock, flags) \
281 local_irq_save(flags); \
283 _raw_spin_lock(lock); \
287 #define _spin_lock_irq(lock) \
289 local_irq_disable(); \
291 _raw_spin_lock(lock); \
295 #define _spin_lock_bh(lock) \
297 local_bh_disable(); \
299 _raw_spin_lock(lock); \
303 #define _read_lock_irqsave(lock, flags) \
305 local_irq_save(flags); \
307 _raw_read_lock(lock); \
311 #define _read_lock_irq(lock) \
313 local_irq_disable(); \
315 _raw_read_lock(lock); \
319 #define _read_lock_bh(lock) \
321 local_bh_disable(); \
323 _raw_read_lock(lock); \
327 #define _write_lock_irqsave(lock, flags) \
329 local_irq_save(flags); \
331 _raw_write_lock(lock); \
335 #define _write_lock_irq(lock) \
337 local_irq_disable(); \
339 _raw_write_lock(lock); \
343 #define _write_lock_bh(lock) \
345 local_bh_disable(); \
347 _raw_write_lock(lock); \
351 #define _spin_unlock_irqrestore(lock, flags) \
353 _raw_spin_unlock(lock); \
354 local_irq_restore(flags); \
359 #define _spin_unlock_irq(lock) \
361 _raw_spin_unlock(lock); \
362 local_irq_enable(); \
367 #define _spin_unlock_bh(lock) \
369 _raw_spin_unlock(lock); \
375 #define _write_unlock_bh(lock) \
377 _raw_write_unlock(lock); \
383 #define _read_unlock_irqrestore(lock, flags) \
385 _raw_read_unlock(lock); \
386 local_irq_restore(flags); \
391 #define _write_unlock_irqrestore(lock, flags) \
393 _raw_write_unlock(lock); \
394 local_irq_restore(flags); \
399 #define _read_unlock_irq(lock) \
401 _raw_read_unlock(lock); \
402 local_irq_enable(); \
407 #define _read_unlock_bh(lock) \
409 _raw_read_unlock(lock); \
415 #define _write_unlock_irq(lock) \
417 _raw_write_unlock(lock); \
418 local_irq_enable(); \
426 * Define the various spin_lock and rw_lock methods. Note we define these
427 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
428 * methods are defined as nops in the case they are not required.
430 #define spin_trylock(lock) __cond_lock(_spin_trylock(lock))
431 #define write_trylock(lock) __cond_lock(_write_trylock(lock))
433 /* Where's read_trylock? */
435 #define spin_lock(lock) _spin_lock(lock)
436 #define write_lock(lock) _write_lock(lock)
437 #define read_lock(lock) _read_lock(lock)
438 #define spin_unlock(lock) _spin_unlock(lock)
439 #define write_unlock(lock) _write_unlock(lock)
440 #define read_unlock(lock) _read_unlock(lock)
443 #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock)
444 #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock)
445 #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
447 #define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags)
448 #define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags)
449 #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags)
452 #define spin_lock_irq(lock) _spin_lock_irq(lock)
453 #define spin_lock_bh(lock) _spin_lock_bh(lock)
455 #define read_lock_irq(lock) _read_lock_irq(lock)
456 #define read_lock_bh(lock) _read_lock_bh(lock)
458 #define write_lock_irq(lock) _write_lock_irq(lock)
459 #define write_lock_bh(lock) _write_lock_bh(lock)
460 #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags)
461 #define spin_unlock_irq(lock) _spin_unlock_irq(lock)
462 #define spin_unlock_bh(lock) _spin_unlock_bh(lock)
464 #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags)
465 #define read_unlock_irq(lock) _read_unlock_irq(lock)
466 #define read_unlock_bh(lock) _read_unlock_bh(lock)
468 #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags)
469 #define write_unlock_irq(lock) _write_unlock_irq(lock)
470 #define write_unlock_bh(lock) _write_unlock_bh(lock)
472 #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock))
474 #define spin_trylock_irq(lock) \
476 local_irq_disable(); \
477 _spin_trylock(lock) ? \
478 1 : ({local_irq_enable(); 0; }); \
481 #define spin_trylock_irqsave(lock, flags) \
483 local_irq_save(flags); \
484 _spin_trylock(lock) ? \
485 1 : ({local_irq_restore(flags); 0;}); \
488 #ifdef CONFIG_LOCKMETER
489 extern void _metered_spin_lock (spinlock_t *lock);
490 extern void _metered_spin_unlock (spinlock_t *lock);
491 extern int _metered_spin_trylock(spinlock_t *lock);
492 extern void _metered_read_lock (rwlock_t *lock);
493 extern void _metered_read_unlock (rwlock_t *lock);
494 extern void _metered_write_lock (rwlock_t *lock);
495 extern void _metered_write_unlock (rwlock_t *lock);
496 extern int _metered_write_trylock(rwlock_t *lock);
499 /* "lock on reference count zero" */
500 #ifndef ATOMIC_DEC_AND_LOCK
501 #include <asm/atomic.h>
502 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
505 #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
508 * bit-based spin_lock()
510 * Don't use this unless you really need to: spin_lock() and spin_unlock()
511 * are significantly faster.
513 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
516 * Assuming the lock is uncontended, this never enters
517 * the body of the outer loop. If it is contended, then
518 * within the inner loop a non-atomic test is used to
519 * busywait with less bus contention for a good time to
520 * attempt to acquire the lock bit.
523 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
524 while (test_and_set_bit(bitnum, addr)) {
525 while (test_bit(bitnum, addr))
533 * Return true if it was acquired
535 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
538 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
539 if (test_and_set_bit(bitnum, addr)) {
549 * bit-based spin_unlock()
551 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
553 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
554 BUG_ON(!test_bit(bitnum, addr));
555 smp_mb__before_clear_bit();
556 clear_bit(bitnum, addr);
563 * Return true if the lock is held.
565 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
567 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
568 return test_bit(bitnum, addr);
569 #elif defined CONFIG_PREEMPT
570 return preempt_count();
576 #endif /* __LINUX_SPINLOCK_H */