ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / linux / spinlock.h
1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5  * include/linux/spinlock.h - generic locking declarations
6  */
7
8 #include <linux/config.h>
9 #include <linux/preempt.h>
10 #include <linux/linkage.h>
11 #include <linux/compiler.h>
12 #include <linux/thread_info.h>
13 #include <linux/kernel.h>
14 #include <linux/stringify.h>
15
16 #include <asm/processor.h>      /* for cpu relax */
17 #include <asm/system.h>
18
19 /*
20  * Must define these before including other files, inline functions need them
21  */
22 #define LOCK_SECTION_NAME                       \
23         ".text.lock." __stringify(KBUILD_BASENAME)
24
25 #define LOCK_SECTION_START(extra)               \
26         ".subsection 1\n\t"                     \
27         extra                                   \
28         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
29         LOCK_SECTION_NAME ":\n\t"               \
30         ".endif\n\t"
31
32 #define LOCK_SECTION_END                        \
33         ".previous\n\t"
34
35 /*
36  * If CONFIG_SMP is set, pull in the _raw_* definitions
37  */
38 #ifdef CONFIG_SMP
39 #include <asm/spinlock.h>
40
41 #else
42
43 #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
44 # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
45 # define ATOMIC_DEC_AND_LOCK
46 #endif
47
48 #ifdef CONFIG_DEBUG_SPINLOCK
49  
50 #define SPINLOCK_MAGIC  0x1D244B3C
51 typedef struct {
52         unsigned long magic;
53         volatile unsigned long lock;
54         volatile unsigned int babble;
55         const char *module;
56         char *owner;
57         int oline;
58 } spinlock_t;
59 #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
60
61 #define spin_lock_init(x) \
62         do { \
63                 (x)->magic = SPINLOCK_MAGIC; \
64                 (x)->lock = 0; \
65                 (x)->babble = 5; \
66                 (x)->module = __FILE__; \
67                 (x)->owner = NULL; \
68                 (x)->oline = 0; \
69         } while (0)
70
71 #define CHECK_LOCK(x) \
72         do { \
73                 if ((x)->magic != SPINLOCK_MAGIC) { \
74                         printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
75                                         __FILE__, __LINE__, (x)); \
76                 } \
77         } while(0)
78
79 #define _raw_spin_lock(x)               \
80         do { \
81                 CHECK_LOCK(x); \
82                 if ((x)->lock&&(x)->babble) { \
83                         (x)->babble--; \
84                         printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
85                                         __FILE__,__LINE__, (x)->module, \
86                                         (x), (x)->owner, (x)->oline); \
87                 } \
88                 (x)->lock = 1; \
89                 (x)->owner = __FILE__; \
90                 (x)->oline = __LINE__; \
91         } while (0)
92
93 /* without debugging, spin_is_locked on UP always says
94  * FALSE. --> printk if already locked. */
95 #define spin_is_locked(x) \
96         ({ \
97                 CHECK_LOCK(x); \
98                 if ((x)->lock&&(x)->babble) { \
99                         (x)->babble--; \
100                         printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
101                                         __FILE__,__LINE__, (x)->module, \
102                                         (x), (x)->owner, (x)->oline); \
103                 } \
104                 0; \
105         })
106
107 /* without debugging, spin_trylock on UP always says
108  * TRUE. --> printk if already locked. */
109 #define _raw_spin_trylock(x) \
110         ({ \
111                 CHECK_LOCK(x); \
112                 if ((x)->lock&&(x)->babble) { \
113                         (x)->babble--; \
114                         printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
115                                         __FILE__,__LINE__, (x)->module, \
116                                         (x), (x)->owner, (x)->oline); \
117                 } \
118                 (x)->lock = 1; \
119                 (x)->owner = __FILE__; \
120                 (x)->oline = __LINE__; \
121                 1; \
122         })
123
124 #define spin_unlock_wait(x)     \
125         do { \
126                 CHECK_LOCK(x); \
127                 if ((x)->lock&&(x)->babble) { \
128                         (x)->babble--; \
129                         printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
130                                         __FILE__,__LINE__, (x)->module, (x), \
131                                         (x)->owner, (x)->oline); \
132                 }\
133         } while (0)
134
135 #define _raw_spin_unlock(x) \
136         do { \
137                 CHECK_LOCK(x); \
138                 if (!(x)->lock&&(x)->babble) { \
139                         (x)->babble--; \
140                         printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
141                                         __FILE__,__LINE__, (x)->module, (x));\
142                 } \
143                 (x)->lock = 0; \
144         } while (0)
145 #else
146 /*
147  * gcc versions before ~2.95 have a nasty bug with empty initializers.
148  */
149 #if (__GNUC__ > 2)
150   typedef struct { } spinlock_t;
151   #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
152 #else
153   typedef struct { int gcc_is_buggy; } spinlock_t;
154   #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
155 #endif
156
157 /*
158  * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
159  */
160 #define spin_lock_init(lock)    do { (void)(lock); } while(0)
161 #define _raw_spin_lock(lock)    do { (void)(lock); } while(0)
162 #define spin_is_locked(lock)    ((void)(lock), 0)
163 #define _raw_spin_trylock(lock) ((void)(lock), 1)
164 #define spin_unlock_wait(lock)  do { (void)(lock); } while(0)
165 #define _raw_spin_unlock(lock)  do { (void)(lock); } while(0)
166 #endif /* CONFIG_DEBUG_SPINLOCK */
167
168 /* RW spinlocks: No debug version */
169
170 #if (__GNUC__ > 2)
171   typedef struct { } rwlock_t;
172   #define RW_LOCK_UNLOCKED (rwlock_t) { }
173 #else
174   typedef struct { int gcc_is_buggy; } rwlock_t;
175   #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
176 #endif
177
178 #define rwlock_init(lock)       do { (void)(lock); } while(0)
179 #define _raw_read_lock(lock)    do { (void)(lock); } while(0)
180 #define _raw_read_unlock(lock)  do { (void)(lock); } while(0)
181 #define _raw_write_lock(lock)   do { (void)(lock); } while(0)
182 #define _raw_write_unlock(lock) do { (void)(lock); } while(0)
183 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
184
185 #endif /* !SMP */
186
187 /*
188  * Define the various spin_lock and rw_lock methods.  Note we define these
189  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
190  * methods are defined as nops in the case they are not required.
191  */
192 #define spin_trylock(lock)      ({preempt_disable(); _raw_spin_trylock(lock) ? \
193                                 1 : ({preempt_enable(); 0;});})
194
195 #define write_trylock(lock)     ({preempt_disable();_raw_write_trylock(lock) ? \
196                                 1 : ({preempt_enable(); 0;});})
197
198 /* Where's read_trylock? */
199
200 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
201 void __preempt_spin_lock(spinlock_t *lock);
202 void __preempt_write_lock(rwlock_t *lock);
203
204 #define spin_lock(lock) \
205 do { \
206         preempt_disable(); \
207         if (unlikely(!_raw_spin_trylock(lock))) \
208                 __preempt_spin_lock(lock); \
209 } while (0)
210
211 #define write_lock(lock) \
212 do { \
213         preempt_disable(); \
214         if (unlikely(!_raw_write_trylock(lock))) \
215                 __preempt_write_lock(lock); \
216 } while (0)
217
218 #else
219 #define spin_lock(lock) \
220 do { \
221         preempt_disable(); \
222         _raw_spin_lock(lock); \
223 } while(0)
224
225 #define write_lock(lock) \
226 do { \
227         preempt_disable(); \
228         _raw_write_lock(lock); \
229 } while(0)
230 #endif
231
232 #define read_lock(lock) \
233 do { \
234         preempt_disable(); \
235         _raw_read_lock(lock); \
236 } while(0)
237
238 #define spin_unlock(lock) \
239 do { \
240         _raw_spin_unlock(lock); \
241         preempt_enable(); \
242 } while (0)
243
244 #define write_unlock(lock) \
245 do { \
246         _raw_write_unlock(lock); \
247         preempt_enable(); \
248 } while(0)
249
250 #define read_unlock(lock) \
251 do { \
252         _raw_read_unlock(lock); \
253         preempt_enable(); \
254 } while(0)
255
256 #define spin_lock_irqsave(lock, flags) \
257 do { \
258         local_irq_save(flags); \
259         preempt_disable(); \
260         _raw_spin_lock(lock); \
261 } while (0)
262
263 #define spin_lock_irq(lock) \
264 do { \
265         local_irq_disable(); \
266         preempt_disable(); \
267         _raw_spin_lock(lock); \
268 } while (0)
269
270 #define spin_lock_bh(lock) \
271 do { \
272         local_bh_disable(); \
273         preempt_disable(); \
274         _raw_spin_lock(lock); \
275 } while (0)
276
277 #define read_lock_irqsave(lock, flags) \
278 do { \
279         local_irq_save(flags); \
280         preempt_disable(); \
281         _raw_read_lock(lock); \
282 } while (0)
283
284 #define read_lock_irq(lock) \
285 do { \
286         local_irq_disable(); \
287         preempt_disable(); \
288         _raw_read_lock(lock); \
289 } while (0)
290
291 #define read_lock_bh(lock) \
292 do { \
293         local_bh_disable(); \
294         preempt_disable(); \
295         _raw_read_lock(lock); \
296 } while (0)
297
298 #define write_lock_irqsave(lock, flags) \
299 do { \
300         local_irq_save(flags); \
301         preempt_disable(); \
302         _raw_write_lock(lock); \
303 } while (0)
304
305 #define write_lock_irq(lock) \
306 do { \
307         local_irq_disable(); \
308         preempt_disable(); \
309         _raw_write_lock(lock); \
310 } while (0)
311
312 #define write_lock_bh(lock) \
313 do { \
314         local_bh_disable(); \
315         preempt_disable(); \
316         _raw_write_lock(lock); \
317 } while (0)
318
319 #define spin_unlock_irqrestore(lock, flags) \
320 do { \
321         _raw_spin_unlock(lock); \
322         local_irq_restore(flags); \
323         preempt_enable(); \
324 } while (0)
325
326 #define _raw_spin_unlock_irqrestore(lock, flags) \
327 do { \
328         _raw_spin_unlock(lock); \
329         local_irq_restore(flags); \
330 } while (0)
331
332 #define spin_unlock_irq(lock) \
333 do { \
334         _raw_spin_unlock(lock); \
335         local_irq_enable(); \
336         preempt_enable(); \
337 } while (0)
338
339 #define spin_unlock_bh(lock) \
340 do { \
341         _raw_spin_unlock(lock); \
342         preempt_enable(); \
343         local_bh_enable(); \
344 } while (0)
345
346 #define read_unlock_irqrestore(lock, flags) \
347 do { \
348         _raw_read_unlock(lock); \
349         local_irq_restore(flags); \
350         preempt_enable(); \
351 } while (0)
352
353 #define read_unlock_irq(lock) \
354 do { \
355         _raw_read_unlock(lock); \
356         local_irq_enable(); \
357         preempt_enable(); \
358 } while (0)
359
360 #define read_unlock_bh(lock) \
361 do { \
362         _raw_read_unlock(lock); \
363         preempt_enable(); \
364         local_bh_enable(); \
365 } while (0)
366
367 #define write_unlock_irqrestore(lock, flags) \
368 do { \
369         _raw_write_unlock(lock); \
370         local_irq_restore(flags); \
371         preempt_enable(); \
372 } while (0)
373
374 #define write_unlock_irq(lock) \
375 do { \
376         _raw_write_unlock(lock); \
377         local_irq_enable(); \
378         preempt_enable(); \
379 } while (0)
380
381 #define write_unlock_bh(lock) \
382 do { \
383         _raw_write_unlock(lock); \
384         preempt_enable(); \
385         local_bh_enable(); \
386 } while (0)
387
388 #define spin_trylock_bh(lock)   ({ local_bh_disable(); preempt_disable(); \
389                                 _raw_spin_trylock(lock) ? 1 : \
390                                 ({preempt_enable(); local_bh_enable(); 0;});})
391
392 /* "lock on reference count zero" */
393 #ifndef ATOMIC_DEC_AND_LOCK
394 #include <asm/atomic.h>
395 extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
396 #endif
397
398 /*
399  *  bit-based spin_lock()
400  *
401  * Don't use this unless you really need to: spin_lock() and spin_unlock()
402  * are significantly faster.
403  */
404 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
405 {
406         /*
407          * Assuming the lock is uncontended, this never enters
408          * the body of the outer loop. If it is contended, then
409          * within the inner loop a non-atomic test is used to
410          * busywait with less bus contention for a good time to
411          * attempt to acquire the lock bit.
412          */
413         preempt_disable();
414 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
415         while (test_and_set_bit(bitnum, addr)) {
416                 while (test_bit(bitnum, addr))
417                         cpu_relax();
418         }
419 #endif
420 }
421
422 /*
423  * Return true if it was acquired
424  */
425 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
426 {
427 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
428         int ret;
429
430         preempt_disable();
431         ret = !test_and_set_bit(bitnum, addr);
432         if (!ret)
433                 preempt_enable();
434         return ret;
435 #else
436         preempt_disable();
437         return 1;
438 #endif
439 }
440
441 /*
442  *  bit-based spin_unlock()
443  */
444 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
445 {
446 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
447         BUG_ON(!test_bit(bitnum, addr));
448         smp_mb__before_clear_bit();
449         clear_bit(bitnum, addr);
450 #endif
451         preempt_enable();
452 }
453
454 /*
455  * Return true if the lock is held.
456  */
457 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
458 {
459 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
460         return test_bit(bitnum, addr);
461 #elif defined CONFIG_PREEMPT
462         return preempt_count();
463 #else
464         return 1;
465 #endif
466 }
467
468 #endif /* __LINUX_SPINLOCK_H */