1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
5 * include/linux/spinlock.h - generic locking declarations
8 #include <linux/config.h>
9 #include <linux/preempt.h>
10 #include <linux/linkage.h>
11 #include <linux/compiler.h>
12 #include <linux/thread_info.h>
13 #include <linux/kernel.h>
14 #include <linux/stringify.h>
16 #include <asm/processor.h> /* for cpu relax */
17 #include <asm/system.h>
20 * Must define these before including other files, inline functions need them
22 #define LOCK_SECTION_NAME \
23 ".text.lock." __stringify(KBUILD_BASENAME)
25 #define LOCK_SECTION_START(extra) \
28 ".ifndef " LOCK_SECTION_NAME "\n\t" \
29 LOCK_SECTION_NAME ":\n\t" \
32 #define LOCK_SECTION_END \
35 #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
38 * If CONFIG_SMP is set, pull in the _raw_* definitions
41 #include <asm/spinlock.h>
43 int __lockfunc _spin_trylock(spinlock_t *lock);
44 int __lockfunc _write_trylock(rwlock_t *lock);
46 void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
47 void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
48 void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
50 void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t);
51 void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t);
52 void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t);
54 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t);
55 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t);
56 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t);
58 void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t);
59 void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
60 void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
61 void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
62 void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
63 void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
65 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t);
66 void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t);
67 void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t);
68 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t);
69 void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
70 void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
71 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t);
72 void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
73 void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
75 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
76 int in_lock_functions(unsigned long addr);
80 #define in_lock_functions(ADDR) 0
82 #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
83 # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
84 # define ATOMIC_DEC_AND_LOCK
87 #ifdef CONFIG_DEBUG_SPINLOCK
89 #define SPINLOCK_MAGIC 0x1D244B3C
92 volatile unsigned long lock;
93 volatile unsigned int babble;
98 #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
100 #define spin_lock_init(x) \
102 (x)->magic = SPINLOCK_MAGIC; \
105 (x)->module = __FILE__; \
110 #define CHECK_LOCK(x) \
112 if ((x)->magic != SPINLOCK_MAGIC) { \
113 printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
114 __FILE__, __LINE__, (x)); \
118 #define _raw_spin_lock(x) \
121 if ((x)->lock&&(x)->babble) { \
123 printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
124 __FILE__,__LINE__, (x)->module, \
125 (x), (x)->owner, (x)->oline); \
128 (x)->owner = __FILE__; \
129 (x)->oline = __LINE__; \
132 /* without debugging, spin_is_locked on UP always says
133 * FALSE. --> printk if already locked. */
134 #define spin_is_locked(x) \
137 if ((x)->lock&&(x)->babble) { \
139 printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
140 __FILE__,__LINE__, (x)->module, \
141 (x), (x)->owner, (x)->oline); \
146 /* without debugging, spin_trylock on UP always says
147 * TRUE. --> printk if already locked. */
148 #define _raw_spin_trylock(x) \
151 if ((x)->lock&&(x)->babble) { \
153 printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
154 __FILE__,__LINE__, (x)->module, \
155 (x), (x)->owner, (x)->oline); \
158 (x)->owner = __FILE__; \
159 (x)->oline = __LINE__; \
163 #define spin_unlock_wait(x) \
166 if ((x)->lock&&(x)->babble) { \
168 printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
169 __FILE__,__LINE__, (x)->module, (x), \
170 (x)->owner, (x)->oline); \
174 #define _raw_spin_unlock(x) \
177 if (!(x)->lock&&(x)->babble) { \
179 printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
180 __FILE__,__LINE__, (x)->module, (x));\
186 * gcc versions before ~2.95 have a nasty bug with empty initializers.
189 typedef struct { } spinlock_t;
190 #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
192 typedef struct { int gcc_is_buggy; } spinlock_t;
193 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
197 * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
199 #define spin_lock_init(lock) do { (void)(lock); } while(0)
200 #define _raw_spin_lock(lock) do { (void)(lock); } while(0)
201 #define spin_is_locked(lock) ((void)(lock), 0)
202 #define _raw_spin_trylock(lock) (((void)(lock), 1))
203 #define spin_unlock_wait(lock) (void)(lock);
204 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
205 #endif /* CONFIG_DEBUG_SPINLOCK */
207 /* RW spinlocks: No debug version */
210 typedef struct { } rwlock_t;
211 #define RW_LOCK_UNLOCKED (rwlock_t) { }
213 typedef struct { int gcc_is_buggy; } rwlock_t;
214 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
217 #define rwlock_init(lock) do { (void)(lock); } while(0)
218 #define _raw_read_lock(lock) do { (void)(lock); } while(0)
219 #define _raw_read_unlock(lock) do { (void)(lock); } while(0)
220 #define _raw_write_lock(lock) do { (void)(lock); } while(0)
221 #define _raw_write_unlock(lock) do { (void)(lock); } while(0)
222 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
224 #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
225 1 : ({preempt_enable(); 0;});})
227 #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \
228 1 : ({preempt_enable(); 0;});})
230 #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
231 _raw_spin_trylock(lock) ? \
232 1 : ({preempt_enable(); local_bh_enable(); 0;});})
234 #define _spin_lock(lock) \
237 _raw_spin_lock(lock); \
241 #define _write_lock(lock) \
244 _raw_write_lock(lock); \
248 #define _read_lock(lock) \
251 _raw_read_lock(lock); \
255 #define _spin_unlock(lock) \
257 _raw_spin_unlock(lock); \
262 #define _write_unlock(lock) \
264 _raw_write_unlock(lock); \
269 #define _read_unlock(lock) \
271 _raw_read_unlock(lock); \
276 #define _spin_lock_irqsave(lock, flags) \
278 local_irq_save(flags); \
280 _raw_spin_lock(lock); \
284 #define _spin_lock_irq(lock) \
286 local_irq_disable(); \
288 _raw_spin_lock(lock); \
292 #define _spin_lock_bh(lock) \
294 local_bh_disable(); \
296 _raw_spin_lock(lock); \
300 #define _read_lock_irqsave(lock, flags) \
302 local_irq_save(flags); \
304 _raw_read_lock(lock); \
308 #define _read_lock_irq(lock) \
310 local_irq_disable(); \
312 _raw_read_lock(lock); \
316 #define _read_lock_bh(lock) \
318 local_bh_disable(); \
320 _raw_read_lock(lock); \
324 #define _write_lock_irqsave(lock, flags) \
326 local_irq_save(flags); \
328 _raw_write_lock(lock); \
332 #define _write_lock_irq(lock) \
334 local_irq_disable(); \
336 _raw_write_lock(lock); \
340 #define _write_lock_bh(lock) \
342 local_bh_disable(); \
344 _raw_write_lock(lock); \
348 #define _spin_unlock_irqrestore(lock, flags) \
350 _raw_spin_unlock(lock); \
351 local_irq_restore(flags); \
356 #define _spin_unlock_irq(lock) \
358 _raw_spin_unlock(lock); \
359 local_irq_enable(); \
364 #define _spin_unlock_bh(lock) \
366 _raw_spin_unlock(lock); \
372 #define _write_unlock_bh(lock) \
374 _raw_write_unlock(lock); \
380 #define _read_unlock_irqrestore(lock, flags) \
382 _raw_read_unlock(lock); \
383 local_irq_restore(flags); \
388 #define _write_unlock_irqrestore(lock, flags) \
390 _raw_write_unlock(lock); \
391 local_irq_restore(flags); \
396 #define _read_unlock_irq(lock) \
398 _raw_read_unlock(lock); \
399 local_irq_enable(); \
404 #define _read_unlock_bh(lock) \
406 _raw_read_unlock(lock); \
412 #define _write_unlock_irq(lock) \
414 _raw_write_unlock(lock); \
415 local_irq_enable(); \
423 * Define the various spin_lock and rw_lock methods. Note we define these
424 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
425 * methods are defined as nops in the case they are not required.
427 #define spin_trylock(lock) __cond_lock(_spin_trylock(lock))
428 #define write_trylock(lock) __cond_lock(_write_trylock(lock))
430 /* Where's read_trylock? */
432 #define spin_lock(lock) _spin_lock(lock)
433 #define write_lock(lock) _write_lock(lock)
434 #define read_lock(lock) _read_lock(lock)
435 #define spin_unlock(lock) _spin_unlock(lock)
436 #define write_unlock(lock) _write_unlock(lock)
437 #define read_unlock(lock) _read_unlock(lock)
440 #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock)
441 #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock)
442 #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
444 #define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags)
445 #define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags)
446 #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags)
449 #define spin_lock_irq(lock) _spin_lock_irq(lock)
450 #define spin_lock_bh(lock) _spin_lock_bh(lock)
452 #define read_lock_irq(lock) _read_lock_irq(lock)
453 #define read_lock_bh(lock) _read_lock_bh(lock)
455 #define write_lock_irq(lock) _write_lock_irq(lock)
456 #define write_lock_bh(lock) _write_lock_bh(lock)
457 #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags)
458 #define spin_unlock_irq(lock) _spin_unlock_irq(lock)
459 #define spin_unlock_bh(lock) _spin_unlock_bh(lock)
461 #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags)
462 #define read_unlock_irq(lock) _read_unlock_irq(lock)
463 #define read_unlock_bh(lock) _read_unlock_bh(lock)
465 #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags)
466 #define write_unlock_irq(lock) _write_unlock_irq(lock)
467 #define write_unlock_bh(lock) _write_unlock_bh(lock)
469 #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock))
471 #ifdef CONFIG_LOCKMETER
472 extern void _metered_spin_lock (spinlock_t *lock);
473 extern void _metered_spin_unlock (spinlock_t *lock);
474 extern int _metered_spin_trylock(spinlock_t *lock);
475 extern void _metered_read_lock (rwlock_t *lock);
476 extern void _metered_read_unlock (rwlock_t *lock);
477 extern void _metered_write_lock (rwlock_t *lock);
478 extern void _metered_write_unlock (rwlock_t *lock);
479 extern int _metered_write_trylock(rwlock_t *lock);
482 /* "lock on reference count zero" */
483 #ifndef ATOMIC_DEC_AND_LOCK
484 #include <asm/atomic.h>
485 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
488 #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
491 * bit-based spin_lock()
493 * Don't use this unless you really need to: spin_lock() and spin_unlock()
494 * are significantly faster.
496 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
499 * Assuming the lock is uncontended, this never enters
500 * the body of the outer loop. If it is contended, then
501 * within the inner loop a non-atomic test is used to
502 * busywait with less bus contention for a good time to
503 * attempt to acquire the lock bit.
506 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
507 while (test_and_set_bit(bitnum, addr)) {
508 while (test_bit(bitnum, addr))
516 * Return true if it was acquired
518 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
521 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
522 if (test_and_set_bit(bitnum, addr)) {
532 * bit-based spin_unlock()
534 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
536 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
537 BUG_ON(!test_bit(bitnum, addr));
538 smp_mb__before_clear_bit();
539 clear_bit(bitnum, addr);
546 * Return true if the lock is held.
548 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
550 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
551 return test_bit(bitnum, addr);
552 #elif defined CONFIG_PREEMPT
553 return preempt_count();
559 #endif /* __LINUX_SPINLOCK_H */