4 * This is the traditional BKL - big kernel lock. Largely
5 * relegated to obsolescense, but used by various less
6 * important (or lazy) subsystems.
8 #include <linux/smp_lock.h>
9 #include <linux/module.h>
12 * The 'big kernel lock'
14 * This spinlock is taken and released recursively by lock_kernel()
15 * and unlock_kernel(). It is transparently dropped and reaquired
16 * over schedule(). It is used to protect legacy code that hasn't
17 * been migrated to a proper locking design yet.
19 * Don't use in new code.
21 static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
25 * Acquire/release the underlying lock from the scheduler.
27 * This is called with preemption disabled, and should
28 * return an error value if it cannot get the lock and
29 * TIF_NEED_RESCHED gets set.
31 * If it successfully gets the lock, it should increment
32 * the preemption count like any spinlock does.
34 * (This works on UP too - _raw_spin_trylock will never
35 * return false in that case)
37 int __lockfunc get_kernel_lock(void)
39 while (!_raw_spin_trylock(&kernel_flag)) {
40 if (test_thread_flag(TIF_NEED_RESCHED))
48 void __lockfunc put_kernel_lock(void)
50 _raw_spin_unlock(&kernel_flag);
51 preempt_enable_no_resched();
55 * These are the BKL spinlocks - we try to be polite about preemption.
56 * If SMP is not on (ie UP preemption), this all goes away because the
57 * _raw_spin_trylock() will always succeed.
60 static inline void __lock_kernel(void)
63 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
65 * If preemption was disabled even before this
66 * was called, there's nothing we can be polite
69 if (preempt_count() > 1) {
70 _raw_spin_lock(&kernel_flag);
75 * Otherwise, let's wait for the kernel lock
76 * with preemption enabled..
80 while (spin_is_locked(&kernel_flag))
83 } while (!_raw_spin_trylock(&kernel_flag));
90 * Non-preemption case - just get the spinlock
92 static inline void __lock_kernel(void)
94 _raw_spin_lock(&kernel_flag);
98 static inline void __unlock_kernel(void)
100 _raw_spin_unlock(&kernel_flag);
105 * Getting the big kernel lock.
107 * This cannot happen asynchronously, so we only need to
108 * worry about other CPU's.
110 void __lockfunc lock_kernel(void)
112 int depth = current->lock_depth+1;
115 current->lock_depth = depth;
118 void __lockfunc unlock_kernel(void)
120 BUG_ON(current->lock_depth < 0);
121 if (likely(--current->lock_depth < 0))
125 EXPORT_SYMBOL(lock_kernel);
126 EXPORT_SYMBOL(unlock_kernel);