kernel.org linux-2.6.10
[linux-2.6.git] / include / linux / spinlock.h
1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5  * include/linux/spinlock.h - generic locking declarations
6  */
7
8 #include <linux/config.h>
9 #include <linux/preempt.h>
10 #include <linux/linkage.h>
11 #include <linux/compiler.h>
12 #include <linux/thread_info.h>
13 #include <linux/kernel.h>
14 #include <linux/stringify.h>
15
16 #include <asm/processor.h>      /* for cpu relax */
17 #include <asm/system.h>
18
19 /*
20  * Must define these before including other files, inline functions need them
21  */
22 #define LOCK_SECTION_NAME                       \
23         ".text.lock." __stringify(KBUILD_BASENAME)
24
25 #define LOCK_SECTION_START(extra)               \
26         ".subsection 1\n\t"                     \
27         extra                                   \
28         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
29         LOCK_SECTION_NAME ":\n\t"               \
30         ".endif\n"
31
32 #define LOCK_SECTION_END                        \
33         ".previous\n\t"
34
35 #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
36
37 /*
38  * If CONFIG_SMP is set, pull in the _raw_* definitions
39  */
40 #ifdef CONFIG_SMP
41 #include <asm/spinlock.h>
42
43 int __lockfunc _spin_trylock(spinlock_t *lock);
44 int __lockfunc _write_trylock(rwlock_t *lock);
45
46 void __lockfunc _spin_lock(spinlock_t *lock)    __acquires(spinlock_t);
47 void __lockfunc _read_lock(rwlock_t *lock)      __acquires(rwlock_t);
48 void __lockfunc _write_lock(rwlock_t *lock)     __acquires(rwlock_t);
49
50 void __lockfunc _spin_unlock(spinlock_t *lock)  __releases(spinlock_t);
51 void __lockfunc _read_unlock(rwlock_t *lock)    __releases(rwlock_t);
52 void __lockfunc _write_unlock(rwlock_t *lock)   __releases(rwlock_t);
53
54 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)   __acquires(spinlock_t);
55 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)     __acquires(rwlock_t);
56 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)    __acquires(rwlock_t);
57
58 void __lockfunc _spin_lock_irq(spinlock_t *lock)        __acquires(spinlock_t);
59 void __lockfunc _spin_lock_bh(spinlock_t *lock)         __acquires(spinlock_t);
60 void __lockfunc _read_lock_irq(rwlock_t *lock)          __acquires(rwlock_t);
61 void __lockfunc _read_lock_bh(rwlock_t *lock)           __acquires(rwlock_t);
62 void __lockfunc _write_lock_irq(rwlock_t *lock)         __acquires(rwlock_t);
63 void __lockfunc _write_lock_bh(rwlock_t *lock)          __acquires(rwlock_t);
64
65 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)  __releases(spinlock_t);
66 void __lockfunc _spin_unlock_irq(spinlock_t *lock)                              __releases(spinlock_t);
67 void __lockfunc _spin_unlock_bh(spinlock_t *lock)                               __releases(spinlock_t);
68 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)    __releases(rwlock_t);
69 void __lockfunc _read_unlock_irq(rwlock_t *lock)                                __releases(rwlock_t);
70 void __lockfunc _read_unlock_bh(rwlock_t *lock)                                 __releases(rwlock_t);
71 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)   __releases(rwlock_t);
72 void __lockfunc _write_unlock_irq(rwlock_t *lock)                               __releases(rwlock_t);
73 void __lockfunc _write_unlock_bh(rwlock_t *lock)                                __releases(rwlock_t);
74
75 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
76 int in_lock_functions(unsigned long addr);
77
78 #else
79
80 #define in_lock_functions(ADDR) 0
81
82 #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
83 # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
84 # define ATOMIC_DEC_AND_LOCK
85 #endif
86
87 #ifdef CONFIG_DEBUG_SPINLOCK
88  
89 #define SPINLOCK_MAGIC  0x1D244B3C
90 typedef struct {
91         unsigned long magic;
92         volatile unsigned long lock;
93         volatile unsigned int babble;
94         const char *module;
95         char *owner;
96         int oline;
97 } spinlock_t;
98 #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
99
100 #define spin_lock_init(x) \
101         do { \
102                 (x)->magic = SPINLOCK_MAGIC; \
103                 (x)->lock = 0; \
104                 (x)->babble = 5; \
105                 (x)->module = __FILE__; \
106                 (x)->owner = NULL; \
107                 (x)->oline = 0; \
108         } while (0)
109
110 #define CHECK_LOCK(x) \
111         do { \
112                 if ((x)->magic != SPINLOCK_MAGIC) { \
113                         printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
114                                         __FILE__, __LINE__, (x)); \
115                 } \
116         } while(0)
117
118 #define _raw_spin_lock(x)               \
119         do { \
120                 CHECK_LOCK(x); \
121                 if ((x)->lock&&(x)->babble) { \
122                         (x)->babble--; \
123                         printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
124                                         __FILE__,__LINE__, (x)->module, \
125                                         (x), (x)->owner, (x)->oline); \
126                 } \
127                 (x)->lock = 1; \
128                 (x)->owner = __FILE__; \
129                 (x)->oline = __LINE__; \
130         } while (0)
131
132 /* without debugging, spin_is_locked on UP always says
133  * FALSE. --> printk if already locked. */
134 #define spin_is_locked(x) \
135         ({ \
136                 CHECK_LOCK(x); \
137                 if ((x)->lock&&(x)->babble) { \
138                         (x)->babble--; \
139                         printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
140                                         __FILE__,__LINE__, (x)->module, \
141                                         (x), (x)->owner, (x)->oline); \
142                 } \
143                 0; \
144         })
145
146 /* without debugging, spin_trylock on UP always says
147  * TRUE. --> printk if already locked. */
148 #define _raw_spin_trylock(x) \
149         ({ \
150                 CHECK_LOCK(x); \
151                 if ((x)->lock&&(x)->babble) { \
152                         (x)->babble--; \
153                         printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
154                                         __FILE__,__LINE__, (x)->module, \
155                                         (x), (x)->owner, (x)->oline); \
156                 } \
157                 (x)->lock = 1; \
158                 (x)->owner = __FILE__; \
159                 (x)->oline = __LINE__; \
160                 1; \
161         })
162
163 #define spin_unlock_wait(x)     \
164         do { \
165                 CHECK_LOCK(x); \
166                 if ((x)->lock&&(x)->babble) { \
167                         (x)->babble--; \
168                         printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
169                                         __FILE__,__LINE__, (x)->module, (x), \
170                                         (x)->owner, (x)->oline); \
171                 }\
172         } while (0)
173
174 #define _raw_spin_unlock(x) \
175         do { \
176                 CHECK_LOCK(x); \
177                 if (!(x)->lock&&(x)->babble) { \
178                         (x)->babble--; \
179                         printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
180                                         __FILE__,__LINE__, (x)->module, (x));\
181                 } \
182                 (x)->lock = 0; \
183         } while (0)
184 #else
185 /*
186  * gcc versions before ~2.95 have a nasty bug with empty initializers.
187  */
188 #if (__GNUC__ > 2)
189   typedef struct { } spinlock_t;
190   #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
191 #else
192   typedef struct { int gcc_is_buggy; } spinlock_t;
193   #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
194 #endif
195
196 /*
197  * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
198  */
199 #define spin_lock_init(lock)    do { (void)(lock); } while(0)
200 #define _raw_spin_lock(lock)    do { (void)(lock); } while(0)
201 #define spin_is_locked(lock)    ((void)(lock), 0)
202 #define _raw_spin_trylock(lock) (((void)(lock), 1))
203 #define spin_unlock_wait(lock)  (void)(lock);
204 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
205 #endif /* CONFIG_DEBUG_SPINLOCK */
206
207 /* RW spinlocks: No debug version */
208
209 #if (__GNUC__ > 2)
210   typedef struct { } rwlock_t;
211   #define RW_LOCK_UNLOCKED (rwlock_t) { }
212 #else
213   typedef struct { int gcc_is_buggy; } rwlock_t;
214   #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
215 #endif
216
217 #define rwlock_init(lock)       do { (void)(lock); } while(0)
218 #define _raw_read_lock(lock)    do { (void)(lock); } while(0)
219 #define _raw_read_unlock(lock)  do { (void)(lock); } while(0)
220 #define _raw_write_lock(lock)   do { (void)(lock); } while(0)
221 #define _raw_write_unlock(lock) do { (void)(lock); } while(0)
222 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
223
224 #define _spin_trylock(lock)     ({preempt_disable(); _raw_spin_trylock(lock) ? \
225                                 1 : ({preempt_enable(); 0;});})
226
227 #define _write_trylock(lock)    ({preempt_disable(); _raw_write_trylock(lock) ? \
228                                 1 : ({preempt_enable(); 0;});})
229
230 #define _spin_trylock_bh(lock)  ({preempt_disable(); local_bh_disable(); \
231                                 _raw_spin_trylock(lock) ? \
232                                 1 : ({preempt_enable(); local_bh_enable(); 0;});})
233
234 #define _spin_lock(lock)        \
235 do { \
236         preempt_disable(); \
237         _raw_spin_lock(lock); \
238         __acquire(lock); \
239 } while(0)
240
241 #define _write_lock(lock) \
242 do { \
243         preempt_disable(); \
244         _raw_write_lock(lock); \
245         __acquire(lock); \
246 } while(0)
247  
248 #define _read_lock(lock)        \
249 do { \
250         preempt_disable(); \
251         _raw_read_lock(lock); \
252         __acquire(lock); \
253 } while(0)
254
255 #define _spin_unlock(lock) \
256 do { \
257         _raw_spin_unlock(lock); \
258         preempt_enable(); \
259         __release(lock); \
260 } while (0)
261
262 #define _write_unlock(lock) \
263 do { \
264         _raw_write_unlock(lock); \
265         preempt_enable(); \
266         __release(lock); \
267 } while(0)
268
269 #define _read_unlock(lock) \
270 do { \
271         _raw_read_unlock(lock); \
272         preempt_enable(); \
273         __release(lock); \
274 } while(0)
275
276 #define _spin_lock_irqsave(lock, flags) \
277 do {    \
278         local_irq_save(flags); \
279         preempt_disable(); \
280         _raw_spin_lock(lock); \
281         __acquire(lock); \
282 } while (0)
283
284 #define _spin_lock_irq(lock) \
285 do { \
286         local_irq_disable(); \
287         preempt_disable(); \
288         _raw_spin_lock(lock); \
289         __acquire(lock); \
290 } while (0)
291
292 #define _spin_lock_bh(lock) \
293 do { \
294         local_bh_disable(); \
295         preempt_disable(); \
296         _raw_spin_lock(lock); \
297         __acquire(lock); \
298 } while (0)
299
300 #define _read_lock_irqsave(lock, flags) \
301 do {    \
302         local_irq_save(flags); \
303         preempt_disable(); \
304         _raw_read_lock(lock); \
305         __acquire(lock); \
306 } while (0)
307
308 #define _read_lock_irq(lock) \
309 do { \
310         local_irq_disable(); \
311         preempt_disable(); \
312         _raw_read_lock(lock); \
313         __acquire(lock); \
314 } while (0)
315
316 #define _read_lock_bh(lock) \
317 do { \
318         local_bh_disable(); \
319         preempt_disable(); \
320         _raw_read_lock(lock); \
321         __acquire(lock); \
322 } while (0)
323
324 #define _write_lock_irqsave(lock, flags) \
325 do {    \
326         local_irq_save(flags); \
327         preempt_disable(); \
328         _raw_write_lock(lock); \
329         __acquire(lock); \
330 } while (0)
331
332 #define _write_lock_irq(lock) \
333 do { \
334         local_irq_disable(); \
335         preempt_disable(); \
336         _raw_write_lock(lock); \
337         __acquire(lock); \
338 } while (0)
339
340 #define _write_lock_bh(lock) \
341 do { \
342         local_bh_disable(); \
343         preempt_disable(); \
344         _raw_write_lock(lock); \
345         __acquire(lock); \
346 } while (0)
347
348 #define _spin_unlock_irqrestore(lock, flags) \
349 do { \
350         _raw_spin_unlock(lock); \
351         local_irq_restore(flags); \
352         preempt_enable(); \
353         __release(lock); \
354 } while (0)
355
356 #define _spin_unlock_irq(lock) \
357 do { \
358         _raw_spin_unlock(lock); \
359         local_irq_enable(); \
360         preempt_enable(); \
361         __release(lock); \
362 } while (0)
363
364 #define _spin_unlock_bh(lock) \
365 do { \
366         _raw_spin_unlock(lock); \
367         preempt_enable(); \
368         local_bh_enable(); \
369         __release(lock); \
370 } while (0)
371
372 #define _write_unlock_bh(lock) \
373 do { \
374         _raw_write_unlock(lock); \
375         preempt_enable(); \
376         local_bh_enable(); \
377         __release(lock); \
378 } while (0)
379
380 #define _read_unlock_irqrestore(lock, flags) \
381 do { \
382         _raw_read_unlock(lock); \
383         local_irq_restore(flags); \
384         preempt_enable(); \
385         __release(lock); \
386 } while (0)
387
388 #define _write_unlock_irqrestore(lock, flags) \
389 do { \
390         _raw_write_unlock(lock); \
391         local_irq_restore(flags); \
392         preempt_enable(); \
393         __release(lock); \
394 } while (0)
395
396 #define _read_unlock_irq(lock)  \
397 do { \
398         _raw_read_unlock(lock); \
399         local_irq_enable();     \
400         preempt_enable();       \
401         __release(lock); \
402 } while (0)
403
404 #define _read_unlock_bh(lock)   \
405 do { \
406         _raw_read_unlock(lock); \
407         local_bh_enable();      \
408         preempt_enable();       \
409         __release(lock); \
410 } while (0)
411
412 #define _write_unlock_irq(lock) \
413 do { \
414         _raw_write_unlock(lock);        \
415         local_irq_enable();     \
416         preempt_enable();       \
417         __release(lock); \
418 } while (0)
419
420 #endif /* !SMP */
421
422 /*
423  * Define the various spin_lock and rw_lock methods.  Note we define these
424  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
425  * methods are defined as nops in the case they are not required.
426  */
427 #define spin_trylock(lock)      __cond_lock(_spin_trylock(lock))
428 #define write_trylock(lock)     __cond_lock(_write_trylock(lock))
429
430 /* Where's read_trylock? */
431
432 #define spin_lock(lock)         _spin_lock(lock)
433 #define write_lock(lock)        _write_lock(lock)
434 #define read_lock(lock)         _read_lock(lock)
435 #define spin_unlock(lock)       _spin_unlock(lock)
436 #define write_unlock(lock)      _write_unlock(lock)
437 #define read_unlock(lock)       _read_unlock(lock)
438
439 #ifdef CONFIG_SMP
440 #define spin_lock_irqsave(lock, flags)  flags = _spin_lock_irqsave(lock)
441 #define read_lock_irqsave(lock, flags)  flags = _read_lock_irqsave(lock)
442 #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
443 #else
444 #define spin_lock_irqsave(lock, flags)  _spin_lock_irqsave(lock, flags)
445 #define read_lock_irqsave(lock, flags)  _read_lock_irqsave(lock, flags)
446 #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags)
447 #endif
448
449 #define spin_lock_irq(lock)             _spin_lock_irq(lock)
450 #define spin_lock_bh(lock)              _spin_lock_bh(lock)
451
452 #define read_lock_irq(lock)             _read_lock_irq(lock)
453 #define read_lock_bh(lock)              _read_lock_bh(lock)
454
455 #define write_lock_irq(lock)            _write_lock_irq(lock)
456 #define write_lock_bh(lock)             _write_lock_bh(lock)
457 #define spin_unlock_irqrestore(lock, flags)     _spin_unlock_irqrestore(lock, flags)
458 #define spin_unlock_irq(lock)           _spin_unlock_irq(lock)
459 #define spin_unlock_bh(lock)            _spin_unlock_bh(lock)
460
461 #define read_unlock_irqrestore(lock, flags)     _read_unlock_irqrestore(lock, flags)
462 #define read_unlock_irq(lock)                   _read_unlock_irq(lock)
463 #define read_unlock_bh(lock)                    _read_unlock_bh(lock)
464
465 #define write_unlock_irqrestore(lock, flags)    _write_unlock_irqrestore(lock, flags)
466 #define write_unlock_irq(lock)                  _write_unlock_irq(lock)
467 #define write_unlock_bh(lock)                   _write_unlock_bh(lock)
468
469 #define spin_trylock_bh(lock)                   __cond_lock(_spin_trylock_bh(lock))
470
471 #ifdef CONFIG_LOCKMETER
472 extern void _metered_spin_lock   (spinlock_t *lock);
473 extern void _metered_spin_unlock (spinlock_t *lock);
474 extern int  _metered_spin_trylock(spinlock_t *lock);
475 extern void _metered_read_lock    (rwlock_t *lock);
476 extern void _metered_read_unlock  (rwlock_t *lock);
477 extern void _metered_write_lock   (rwlock_t *lock);
478 extern void _metered_write_unlock (rwlock_t *lock);
479 extern int  _metered_write_trylock(rwlock_t *lock);
480 #endif
481
482 /* "lock on reference count zero" */
483 #ifndef ATOMIC_DEC_AND_LOCK
484 #include <asm/atomic.h>
485 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
486 #endif
487
488 #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
489
490 /*
491  *  bit-based spin_lock()
492  *
493  * Don't use this unless you really need to: spin_lock() and spin_unlock()
494  * are significantly faster.
495  */
496 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
497 {
498         /*
499          * Assuming the lock is uncontended, this never enters
500          * the body of the outer loop. If it is contended, then
501          * within the inner loop a non-atomic test is used to
502          * busywait with less bus contention for a good time to
503          * attempt to acquire the lock bit.
504          */
505         preempt_disable();
506 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
507         while (test_and_set_bit(bitnum, addr)) {
508                 while (test_bit(bitnum, addr))
509                         cpu_relax();
510         }
511 #endif
512         __acquire(bitlock);
513 }
514
515 /*
516  * Return true if it was acquired
517  */
518 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
519 {
520         preempt_disable();      
521 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
522         if (test_and_set_bit(bitnum, addr)) {
523                 preempt_enable();
524                 return 0;
525         }
526 #endif
527         __acquire(bitlock);
528         return 1;
529 }
530
531 /*
532  *  bit-based spin_unlock()
533  */
534 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
535 {
536 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
537         BUG_ON(!test_bit(bitnum, addr));
538         smp_mb__before_clear_bit();
539         clear_bit(bitnum, addr);
540 #endif
541         preempt_enable();
542         __release(bitlock);
543 }
544
545 /*
546  * Return true if the lock is held.
547  */
548 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
549 {
550 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
551         return test_bit(bitnum, addr);
552 #elif defined CONFIG_PREEMPT
553         return preempt_count();
554 #else
555         return 1;
556 #endif
557 }
558
559 #endif /* __LINUX_SPINLOCK_H */