X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fkernel_lock.c;h=c739bfcad659538430eada06a7cfe003ad2aea79;hb=6c6294c4656749c1eeed12df7ae48e2bf5a394b3;hp=48dc05a13963da616818462a5cbd40aa61a976d2;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 48dc05a13..c739bfcad 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -7,6 +7,141 @@ */ #include #include +#include + +#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \ + defined(CONFIG_DEBUG_PREEMPT) + +/* + * Debugging check. + */ +unsigned int smp_processor_id(void) +{ + unsigned long preempt_count = preempt_count(); + int this_cpu = __smp_processor_id(); + cpumask_t this_mask; + + if (likely(preempt_count)) + goto out; + + if (irqs_disabled()) + goto out; + + /* + * Kernel threads bound to a single CPU can safely use + * smp_processor_id(): + */ + this_mask = cpumask_of_cpu(this_cpu); + + if (cpus_equal(current->cpus_allowed, this_mask)) + goto out; + + /* + * It is valid to assume CPU-locality during early bootup: + */ + if (system_state != SYSTEM_RUNNING) + goto out; + + /* + * Avoid recursion: + */ + preempt_disable(); + + if (!printk_ratelimit()) + goto out_enable; + + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid); + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); + +out_enable: + preempt_enable_no_resched(); +out: + return this_cpu; +} + +EXPORT_SYMBOL(smp_processor_id); + +#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */ + +#ifdef CONFIG_PREEMPT_BKL +/* + * The 'big kernel semaphore' + * + * This mutex is taken and released recursively by lock_kernel() + * and unlock_kernel(). It is transparently dropped and reaquired + * over schedule(). It is used to protect legacy code that hasn't + * been migrated to a proper locking design yet. + * + * Note: code locked by this semaphore will only be serialized against + * other code using the same locking facility. The code guarantees that + * the task remains on the same CPU. + * + * Don't use in new code. + */ +DECLARE_MUTEX(kernel_sem); + +/* + * Re-acquire the kernel semaphore. + * + * This function is called with preemption off. + * + * We are executing in schedule() so the code must be extremely careful + * about recursion, both due to the down() and due to the enabling of + * preemption. schedule() will re-check the preemption flag after + * reacquiring the semaphore. + */ +int __lockfunc __reacquire_kernel_lock(void) +{ + struct task_struct *task = current; + int saved_lock_depth = task->lock_depth; + + BUG_ON(saved_lock_depth < 0); + + task->lock_depth = -1; + preempt_enable_no_resched(); + + down(&kernel_sem); + + preempt_disable(); + task->lock_depth = saved_lock_depth; + + return 0; +} + +void __lockfunc __release_kernel_lock(void) +{ + up(&kernel_sem); +} + +/* + * Getting the big kernel semaphore. + */ +void __lockfunc lock_kernel(void) +{ + struct task_struct *task = current; + int depth = task->lock_depth + 1; + + if (likely(!depth)) + /* + * No recursion worries - we set up lock_depth _after_ + */ + down(&kernel_sem); + + task->lock_depth = depth; +} + +void __lockfunc unlock_kernel(void) +{ + struct task_struct *task = current; + + BUG_ON(task->lock_depth < 0); + + if (likely(--task->lock_depth < 0)) + up(&kernel_sem); +} + +#else /* * The 'big kernel lock' @@ -18,7 +153,7 @@ * * Don't use in new code. */ -static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); /* @@ -34,7 +169,7 @@ static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; * (This works on UP too - _raw_spin_trylock will never * return false in that case) */ -int __lockfunc get_kernel_lock(void) +int __lockfunc __reacquire_kernel_lock(void) { while (!_raw_spin_trylock(&kernel_flag)) { if (test_thread_flag(TIF_NEED_RESCHED)) @@ -45,7 +180,7 @@ int __lockfunc get_kernel_lock(void) return 0; } -void __lockfunc put_kernel_lock(void) +void __lockfunc __release_kernel_lock(void) { _raw_spin_unlock(&kernel_flag); preempt_enable_no_resched(); @@ -122,5 +257,8 @@ void __lockfunc unlock_kernel(void) __unlock_kernel(); } +#endif + EXPORT_SYMBOL(lock_kernel); EXPORT_SYMBOL(unlock_kernel); +