ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / linux / smp_lock.h
1 #ifndef __LINUX_SMPLOCK_H
2 #define __LINUX_SMPLOCK_H
3
4 #include <linux/config.h>
5 #include <linux/sched.h>
6 #include <linux/spinlock.h>
7
8 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
9
10 extern spinlock_t kernel_flag;
11
12 #define kernel_locked()         (current->lock_depth >= 0)
13
14 #define get_kernel_lock()       spin_lock(&kernel_flag)
15 #define put_kernel_lock()       spin_unlock(&kernel_flag)
16
17 /*
18  * Release global kernel lock.
19  */
20 static inline void release_kernel_lock(struct task_struct *task)
21 {
22         if (unlikely(task->lock_depth >= 0))
23                 put_kernel_lock();
24 }
25
26 /*
27  * Re-acquire the kernel lock
28  */
29 static inline void reacquire_kernel_lock(struct task_struct *task)
30 {
31         if (unlikely(task->lock_depth >= 0))
32                 get_kernel_lock();
33 }
34
35 /*
36  * Getting the big kernel lock.
37  *
38  * This cannot happen asynchronously,
39  * so we only need to worry about other
40  * CPU's.
41  */
42 static inline void lock_kernel(void)
43 {
44         int depth = current->lock_depth+1;
45         if (likely(!depth))
46                 get_kernel_lock();
47         current->lock_depth = depth;
48 }
49
50 static inline void unlock_kernel(void)
51 {
52         BUG_ON(current->lock_depth < 0);
53         if (likely(--current->lock_depth < 0))
54                 put_kernel_lock();
55 }
56
57 #else
58
59 #define lock_kernel()                           do { } while(0)
60 #define unlock_kernel()                         do { } while(0)
61 #define release_kernel_lock(task)               do { } while(0)
62 #define reacquire_kernel_lock(task)             do { } while(0)
63 #define kernel_locked()                         1
64
65 #endif /* CONFIG_SMP || CONFIG_PREEMPT */
66 #endif /* __LINUX_SMPLOCK_H */