upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / include / linux / spinlock.h
1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5  * include/linux/spinlock.h - generic locking declarations
6  */
7
8 #include <linux/config.h>
9 #include <linux/preempt.h>
10 #include <linux/linkage.h>
11 #include <linux/compiler.h>
12 #include <linux/thread_info.h>
13 #include <linux/kernel.h>
14 #include <linux/stringify.h>
15
16 #include <asm/processor.h>      /* for cpu relax */
17 #include <asm/system.h>
18
19 /*
20  * Must define these before including other files, inline functions need them
21  */
22 #define LOCK_SECTION_NAME                       \
23         ".text.lock." __stringify(KBUILD_BASENAME)
24
25 #define LOCK_SECTION_START(extra)               \
26         ".subsection 1\n\t"                     \
27         extra                                   \
28         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
29         LOCK_SECTION_NAME ":\n\t"               \
30         ".endif\n"
31
32 #define LOCK_SECTION_END                        \
33         ".previous\n\t"
34
35 #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
36
37 /*
38  * If CONFIG_SMP is set, pull in the _raw_* definitions
39  */
40 #ifdef CONFIG_SMP
41 #include <asm/spinlock.h>
42
43 int __lockfunc _spin_trylock(spinlock_t *lock);
44 int __lockfunc _write_trylock(rwlock_t *lock);
45
46 void __lockfunc _spin_lock(spinlock_t *lock)    __acquires(spinlock_t);
47 void __lockfunc _read_lock(rwlock_t *lock)      __acquires(rwlock_t);
48 void __lockfunc _write_lock(rwlock_t *lock)     __acquires(rwlock_t);
49
50 void __lockfunc _spin_unlock(spinlock_t *lock)  __releases(spinlock_t);
51 void __lockfunc _read_unlock(rwlock_t *lock)    __releases(rwlock_t);
52 void __lockfunc _write_unlock(rwlock_t *lock)   __releases(rwlock_t);
53
54 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)   __acquires(spinlock_t);
55 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)     __acquires(rwlock_t);
56 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)    __acquires(rwlock_t);
57
58 void __lockfunc _spin_lock_irq(spinlock_t *lock)        __acquires(spinlock_t);
59 void __lockfunc _spin_lock_bh(spinlock_t *lock)         __acquires(spinlock_t);
60 void __lockfunc _read_lock_irq(rwlock_t *lock)          __acquires(rwlock_t);
61 void __lockfunc _read_lock_bh(rwlock_t *lock)           __acquires(rwlock_t);
62 void __lockfunc _write_lock_irq(rwlock_t *lock)         __acquires(rwlock_t);
63 void __lockfunc _write_lock_bh(rwlock_t *lock)          __acquires(rwlock_t);
64
65 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)  __releases(spinlock_t);
66 void __lockfunc _spin_unlock_irq(spinlock_t *lock)                              __releases(spinlock_t);
67 void __lockfunc _spin_unlock_bh(spinlock_t *lock)                               __releases(spinlock_t);
68 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)    __releases(rwlock_t);
69 void __lockfunc _read_unlock_irq(rwlock_t *lock)                                __releases(rwlock_t);
70 void __lockfunc _read_unlock_bh(rwlock_t *lock)                                 __releases(rwlock_t);
71 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)   __releases(rwlock_t);
72 void __lockfunc _write_unlock_irq(rwlock_t *lock)                               __releases(rwlock_t);
73 void __lockfunc _write_unlock_bh(rwlock_t *lock)                                __releases(rwlock_t);
74
75 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
76 int in_lock_functions(unsigned long addr);
77
78 #else
79
80 #define in_lock_functions(ADDR) 0
81
82 #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
83 # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
84 # define ATOMIC_DEC_AND_LOCK
85 #endif
86
87 #ifdef CONFIG_DEBUG_SPINLOCK
88  
89 #define SPINLOCK_MAGIC  0x1D244B3C
90 typedef struct {
91         unsigned long magic;
92         volatile unsigned long lock;
93         volatile unsigned int babble;
94         const char *module;
95         char *owner;
96         int oline;
97 } spinlock_t;
98 #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
99
100 #define spin_lock_init(x) \
101         do { \
102                 (x)->magic = SPINLOCK_MAGIC; \
103                 (x)->lock = 0; \
104                 (x)->babble = 5; \
105                 (x)->module = __FILE__; \
106                 (x)->owner = NULL; \
107                 (x)->oline = 0; \
108         } while (0)
109
110 #define CHECK_LOCK(x) \
111         do { \
112                 if ((x)->magic != SPINLOCK_MAGIC) { \
113                         dump_stack(); \
114                         panic("%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
115                                         __FILE__, __LINE__, (x)); \
116                 } \
117         } while(0)
118
119 #define _raw_spin_lock(x)               \
120         do { \
121                 CHECK_LOCK(x); \
122                 if ((x)->lock&&(x)->babble) { \
123                         (x)->babble--; \
124                         dump_stack(); \
125                         panic("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
126                                         __FILE__,__LINE__, (x)->module, \
127                                         (x), (x)->owner, (x)->oline); \
128                 } \
129                 (x)->lock = 1; \
130                 (x)->owner = __FILE__; \
131                 (x)->oline = __LINE__; \
132         } while (0)
133
134 /* without debugging, spin_is_locked on UP always says
135  * FALSE. --> printk if already locked. */
136 #define spin_is_locked(x) \
137         ({ \
138                 CHECK_LOCK(x); \
139                 if ((x)->lock&&(x)->babble) { \
140                         (x)->babble--; \
141                         printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
142                                         __FILE__,__LINE__, (x)->module, \
143                                         (x), (x)->owner, (x)->oline); \
144                 } \
145                 0; \
146         })
147
148 /* without debugging, spin_trylock on UP always says
149  * TRUE. --> printk if already locked. */
150 #define _raw_spin_trylock(x) \
151         ({ \
152                 CHECK_LOCK(x); \
153                 if ((x)->lock&&(x)->babble) { \
154                         (x)->babble--; \
155                         printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
156                                         __FILE__,__LINE__, (x)->module, \
157                                         (x), (x)->owner, (x)->oline); \
158                 } \
159                 (x)->lock = 1; \
160                 (x)->owner = __FILE__; \
161                 (x)->oline = __LINE__; \
162                 1; \
163         })
164
165 #define spin_unlock_wait(x)     \
166         do { \
167                 CHECK_LOCK(x); \
168                 if ((x)->lock&&(x)->babble) { \
169                         (x)->babble--; \
170                         printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
171                                         __FILE__,__LINE__, (x)->module, (x), \
172                                         (x)->owner, (x)->oline); \
173                 }\
174         } while (0)
175
176 #define _raw_spin_unlock(x) \
177         do { \
178                 CHECK_LOCK(x); \
179                 if (!(x)->lock&&(x)->babble) { \
180                         (x)->babble--; \
181                         dump_stack(); \
182                         panic("%s:%d: spin_unlock(%s:%p) not locked\n", \
183                                         __FILE__,__LINE__, (x)->module, (x));\
184                 } \
185                 (x)->lock = 0; \
186         } while (0)
187 #else
188 /*
189  * gcc versions before ~2.95 have a nasty bug with empty initializers.
190  */
191 #if (__GNUC__ > 2)
192   typedef struct { } spinlock_t;
193   #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
194 #else
195   typedef struct { int gcc_is_buggy; } spinlock_t;
196   #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
197 #endif
198
199 /*
200  * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
201  */
202 #define spin_lock_init(lock)    do { (void)(lock); } while(0)
203 #define _raw_spin_lock(lock)    do { (void)(lock); } while(0)
204 #define spin_is_locked(lock)    ((void)(lock), 0)
205 #define _raw_spin_trylock(lock) (((void)(lock), 1))
206 #define spin_unlock_wait(lock)  (void)(lock);
207 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
208 #endif /* CONFIG_DEBUG_SPINLOCK */
209
210 /* RW spinlocks: No debug version */
211
212 #if (__GNUC__ > 2)
213   typedef struct { } rwlock_t;
214   #define RW_LOCK_UNLOCKED (rwlock_t) { }
215 #else
216   typedef struct { int gcc_is_buggy; } rwlock_t;
217   #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
218 #endif
219
220 #define rwlock_init(lock)       do { (void)(lock); } while(0)
221 #define _raw_read_lock(lock)    do { (void)(lock); } while(0)
222 #define _raw_read_unlock(lock)  do { (void)(lock); } while(0)
223 #define _raw_write_lock(lock)   do { (void)(lock); } while(0)
224 #define _raw_write_unlock(lock) do { (void)(lock); } while(0)
225 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
226
227 #define _spin_trylock(lock)     ({preempt_disable(); _raw_spin_trylock(lock) ? \
228                                 1 : ({preempt_enable(); 0;});})
229
230 #define _write_trylock(lock)    ({preempt_disable(); _raw_write_trylock(lock) ? \
231                                 1 : ({preempt_enable(); 0;});})
232
233 #define _spin_trylock_bh(lock)  ({preempt_disable(); local_bh_disable(); \
234                                 _raw_spin_trylock(lock) ? \
235                                 1 : ({preempt_enable(); local_bh_enable(); 0;});})
236
237 #define _spin_lock(lock)        \
238 do { \
239         preempt_disable(); \
240         _raw_spin_lock(lock); \
241         __acquire(lock); \
242 } while(0)
243
244 #define _write_lock(lock) \
245 do { \
246         preempt_disable(); \
247         _raw_write_lock(lock); \
248         __acquire(lock); \
249 } while(0)
250  
251 #define _read_lock(lock)        \
252 do { \
253         preempt_disable(); \
254         _raw_read_lock(lock); \
255         __acquire(lock); \
256 } while(0)
257
258 #define _spin_unlock(lock) \
259 do { \
260         _raw_spin_unlock(lock); \
261         preempt_enable(); \
262         __release(lock); \
263 } while (0)
264
265 #define _write_unlock(lock) \
266 do { \
267         _raw_write_unlock(lock); \
268         preempt_enable(); \
269         __release(lock); \
270 } while(0)
271
272 #define _read_unlock(lock) \
273 do { \
274         _raw_read_unlock(lock); \
275         preempt_enable(); \
276         __release(lock); \
277 } while(0)
278
279 #define _spin_lock_irqsave(lock, flags) \
280 do {    \
281         local_irq_save(flags); \
282         preempt_disable(); \
283         _raw_spin_lock(lock); \
284         __acquire(lock); \
285 } while (0)
286
287 #define _spin_lock_irq(lock) \
288 do { \
289         local_irq_disable(); \
290         preempt_disable(); \
291         _raw_spin_lock(lock); \
292         __acquire(lock); \
293 } while (0)
294
295 #define _spin_lock_bh(lock) \
296 do { \
297         local_bh_disable(); \
298         preempt_disable(); \
299         _raw_spin_lock(lock); \
300         __acquire(lock); \
301 } while (0)
302
303 #define _read_lock_irqsave(lock, flags) \
304 do {    \
305         local_irq_save(flags); \
306         preempt_disable(); \
307         _raw_read_lock(lock); \
308         __acquire(lock); \
309 } while (0)
310
311 #define _read_lock_irq(lock) \
312 do { \
313         local_irq_disable(); \
314         preempt_disable(); \
315         _raw_read_lock(lock); \
316         __acquire(lock); \
317 } while (0)
318
319 #define _read_lock_bh(lock) \
320 do { \
321         local_bh_disable(); \
322         preempt_disable(); \
323         _raw_read_lock(lock); \
324         __acquire(lock); \
325 } while (0)
326
327 #define _write_lock_irqsave(lock, flags) \
328 do {    \
329         local_irq_save(flags); \
330         preempt_disable(); \
331         _raw_write_lock(lock); \
332         __acquire(lock); \
333 } while (0)
334
335 #define _write_lock_irq(lock) \
336 do { \
337         local_irq_disable(); \
338         preempt_disable(); \
339         _raw_write_lock(lock); \
340         __acquire(lock); \
341 } while (0)
342
343 #define _write_lock_bh(lock) \
344 do { \
345         local_bh_disable(); \
346         preempt_disable(); \
347         _raw_write_lock(lock); \
348         __acquire(lock); \
349 } while (0)
350
351 #define _spin_unlock_irqrestore(lock, flags) \
352 do { \
353         _raw_spin_unlock(lock); \
354         local_irq_restore(flags); \
355         preempt_enable(); \
356         __release(lock); \
357 } while (0)
358
359 #define _spin_unlock_irq(lock) \
360 do { \
361         _raw_spin_unlock(lock); \
362         local_irq_enable(); \
363         preempt_enable(); \
364         __release(lock); \
365 } while (0)
366
367 #define _spin_unlock_bh(lock) \
368 do { \
369         _raw_spin_unlock(lock); \
370         preempt_enable(); \
371         local_bh_enable(); \
372         __release(lock); \
373 } while (0)
374
375 #define _write_unlock_bh(lock) \
376 do { \
377         _raw_write_unlock(lock); \
378         preempt_enable(); \
379         local_bh_enable(); \
380         __release(lock); \
381 } while (0)
382
383 #define _read_unlock_irqrestore(lock, flags) \
384 do { \
385         _raw_read_unlock(lock); \
386         local_irq_restore(flags); \
387         preempt_enable(); \
388         __release(lock); \
389 } while (0)
390
391 #define _write_unlock_irqrestore(lock, flags) \
392 do { \
393         _raw_write_unlock(lock); \
394         local_irq_restore(flags); \
395         preempt_enable(); \
396         __release(lock); \
397 } while (0)
398
399 #define _read_unlock_irq(lock)  \
400 do { \
401         _raw_read_unlock(lock); \
402         local_irq_enable();     \
403         preempt_enable();       \
404         __release(lock); \
405 } while (0)
406
407 #define _read_unlock_bh(lock)   \
408 do { \
409         _raw_read_unlock(lock); \
410         local_bh_enable();      \
411         preempt_enable();       \
412         __release(lock); \
413 } while (0)
414
415 #define _write_unlock_irq(lock) \
416 do { \
417         _raw_write_unlock(lock);        \
418         local_irq_enable();     \
419         preempt_enable();       \
420         __release(lock); \
421 } while (0)
422
423 #endif /* !SMP */
424
425 /*
426  * Define the various spin_lock and rw_lock methods.  Note we define these
427  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
428  * methods are defined as nops in the case they are not required.
429  */
430 #define spin_trylock(lock)      __cond_lock(_spin_trylock(lock))
431 #define write_trylock(lock)     __cond_lock(_write_trylock(lock))
432
433 /* Where's read_trylock? */
434
435 #define spin_lock(lock)         _spin_lock(lock)
436 #define write_lock(lock)        _write_lock(lock)
437 #define read_lock(lock)         _read_lock(lock)
438 #define spin_unlock(lock)       _spin_unlock(lock)
439 #define write_unlock(lock)      _write_unlock(lock)
440 #define read_unlock(lock)       _read_unlock(lock)
441
442 #ifdef CONFIG_SMP
443 #define spin_lock_irqsave(lock, flags)  flags = _spin_lock_irqsave(lock)
444 #define read_lock_irqsave(lock, flags)  flags = _read_lock_irqsave(lock)
445 #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
446 #else
447 #define spin_lock_irqsave(lock, flags)  _spin_lock_irqsave(lock, flags)
448 #define read_lock_irqsave(lock, flags)  _read_lock_irqsave(lock, flags)
449 #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags)
450 #endif
451
452 #define spin_lock_irq(lock)             _spin_lock_irq(lock)
453 #define spin_lock_bh(lock)              _spin_lock_bh(lock)
454
455 #define read_lock_irq(lock)             _read_lock_irq(lock)
456 #define read_lock_bh(lock)              _read_lock_bh(lock)
457
458 #define write_lock_irq(lock)            _write_lock_irq(lock)
459 #define write_lock_bh(lock)             _write_lock_bh(lock)
460 #define spin_unlock_irqrestore(lock, flags)     _spin_unlock_irqrestore(lock, flags)
461 #define spin_unlock_irq(lock)           _spin_unlock_irq(lock)
462 #define spin_unlock_bh(lock)            _spin_unlock_bh(lock)
463
464 #define read_unlock_irqrestore(lock, flags)     _read_unlock_irqrestore(lock, flags)
465 #define read_unlock_irq(lock)                   _read_unlock_irq(lock)
466 #define read_unlock_bh(lock)                    _read_unlock_bh(lock)
467
468 #define write_unlock_irqrestore(lock, flags)    _write_unlock_irqrestore(lock, flags)
469 #define write_unlock_irq(lock)                  _write_unlock_irq(lock)
470 #define write_unlock_bh(lock)                   _write_unlock_bh(lock)
471
472 #define spin_trylock_bh(lock)                   __cond_lock(_spin_trylock_bh(lock))
473
474 #define spin_trylock_irq(lock) \
475 ({ \
476         local_irq_disable(); \
477         _spin_trylock(lock) ? \
478         1 : ({local_irq_enable(); 0; }); \
479 })
480
481 #define spin_trylock_irqsave(lock, flags) \
482 ({ \
483         local_irq_save(flags); \
484         _spin_trylock(lock) ? \
485         1 : ({local_irq_restore(flags); 0;}); \
486 })
487
488 #ifdef CONFIG_LOCKMETER
489 extern void _metered_spin_lock   (spinlock_t *lock);
490 extern void _metered_spin_unlock (spinlock_t *lock);
491 extern int  _metered_spin_trylock(spinlock_t *lock);
492 extern void _metered_read_lock    (rwlock_t *lock);
493 extern void _metered_read_unlock  (rwlock_t *lock);
494 extern void _metered_write_lock   (rwlock_t *lock);
495 extern void _metered_write_unlock (rwlock_t *lock);
496 extern int  _metered_write_trylock(rwlock_t *lock);
497 #endif
498
499 /* "lock on reference count zero" */
500 #ifndef ATOMIC_DEC_AND_LOCK
501 #include <asm/atomic.h>
502 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
503 #endif
504
505 #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
506
507 /*
508  *  bit-based spin_lock()
509  *
510  * Don't use this unless you really need to: spin_lock() and spin_unlock()
511  * are significantly faster.
512  */
513 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
514 {
515         /*
516          * Assuming the lock is uncontended, this never enters
517          * the body of the outer loop. If it is contended, then
518          * within the inner loop a non-atomic test is used to
519          * busywait with less bus contention for a good time to
520          * attempt to acquire the lock bit.
521          */
522         preempt_disable();
523 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
524         while (test_and_set_bit(bitnum, addr)) {
525                 while (test_bit(bitnum, addr))
526                         cpu_relax();
527         }
528 #endif
529         __acquire(bitlock);
530 }
531
532 /*
533  * Return true if it was acquired
534  */
535 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
536 {
537         preempt_disable();      
538 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
539         if (test_and_set_bit(bitnum, addr)) {
540                 preempt_enable();
541                 return 0;
542         }
543 #endif
544         __acquire(bitlock);
545         return 1;
546 }
547
548 /*
549  *  bit-based spin_unlock()
550  */
551 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
552 {
553 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
554         BUG_ON(!test_bit(bitnum, addr));
555         smp_mb__before_clear_bit();
556         clear_bit(bitnum, addr);
557 #endif
558         preempt_enable();
559         __release(bitlock);
560 }
561
562 /*
563  * Return true if the lock is held.
564  */
565 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
566 {
567 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
568         return test_bit(bitnum, addr);
569 #elif defined CONFIG_PREEMPT
570         return preempt_count();
571 #else
572         return 1;
573 #endif
574 }
575
576 #endif /* __LINUX_SPINLOCK_H */