X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fkernel_lock.c;h=e0fdfddb406ec3975673f150f46bd3302bc08599;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=48dc05a13963da616818462a5cbd40aa61a976d2;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 48dc05a13..e0fdfddb4 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -7,18 +7,98 @@ */ #include #include +#include + +#ifdef CONFIG_PREEMPT_BKL +/* + * The 'big kernel semaphore' + * + * This mutex is taken and released recursively by lock_kernel() + * and unlock_kernel(). It is transparently dropped and reacquired + * over schedule(). It is used to protect legacy code that hasn't + * been migrated to a proper locking design yet. + * + * Note: code locked by this semaphore will only be serialized against + * other code using the same locking facility. The code guarantees that + * the task remains on the same CPU. + * + * Don't use in new code. + */ +static DECLARE_MUTEX(kernel_sem); + +/* + * Re-acquire the kernel semaphore. + * + * This function is called with preemption off. + * + * We are executing in schedule() so the code must be extremely careful + * about recursion, both due to the down() and due to the enabling of + * preemption. schedule() will re-check the preemption flag after + * reacquiring the semaphore. + */ +int __lockfunc __reacquire_kernel_lock(void) +{ + struct task_struct *task = current; + int saved_lock_depth = task->lock_depth; + + BUG_ON(saved_lock_depth < 0); + + task->lock_depth = -1; + preempt_enable_no_resched(); + + down(&kernel_sem); + + preempt_disable(); + task->lock_depth = saved_lock_depth; + + return 0; +} + +void __lockfunc __release_kernel_lock(void) +{ + up(&kernel_sem); +} + +/* + * Getting the big kernel semaphore. + */ +void __lockfunc lock_kernel(void) +{ + struct task_struct *task = current; + int depth = task->lock_depth + 1; + + if (likely(!depth)) + /* + * No recursion worries - we set up lock_depth _after_ + */ + down(&kernel_sem); + + task->lock_depth = depth; +} + +void __lockfunc unlock_kernel(void) +{ + struct task_struct *task = current; + + BUG_ON(task->lock_depth < 0); + + if (likely(--task->lock_depth < 0)) + up(&kernel_sem); +} + +#else /* * The 'big kernel lock' * * This spinlock is taken and released recursively by lock_kernel() - * and unlock_kernel(). It is transparently dropped and reaquired + * and unlock_kernel(). It is transparently dropped and reacquired * over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * * Don't use in new code. */ -static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); /* @@ -34,7 +114,7 @@ static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; * (This works on UP too - _raw_spin_trylock will never * return false in that case) */ -int __lockfunc get_kernel_lock(void) +int __lockfunc __reacquire_kernel_lock(void) { while (!_raw_spin_trylock(&kernel_flag)) { if (test_thread_flag(TIF_NEED_RESCHED)) @@ -45,7 +125,7 @@ int __lockfunc get_kernel_lock(void) return 0; } -void __lockfunc put_kernel_lock(void) +void __lockfunc __release_kernel_lock(void) { _raw_spin_unlock(&kernel_flag); preempt_enable_no_resched(); @@ -97,6 +177,10 @@ static inline void __lock_kernel(void) static inline void __unlock_kernel(void) { + /* + * the BKL is not covered by lockdep, so we open-code the + * unlocking sequence (and thus avoid the dep-chain ops): + */ _raw_spin_unlock(&kernel_flag); preempt_enable(); } @@ -122,5 +206,8 @@ void __lockfunc unlock_kernel(void) __unlock_kernel(); } +#endif + EXPORT_SYMBOL(lock_kernel); EXPORT_SYMBOL(unlock_kernel); +