vserver 1.9.5.x5
[linux-2.6.git] / lib / kernel_lock.c
index 48dc05a..c739bfc 100644 (file)
@@ -7,6 +7,141 @@
  */
 #include <linux/smp_lock.h>
 #include <linux/module.h>
+#include <linux/kallsyms.h>
+
+#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
+               defined(CONFIG_DEBUG_PREEMPT)
+
+/*
+ * Debugging check.
+ */
+unsigned int smp_processor_id(void)
+{
+       unsigned long preempt_count = preempt_count();
+       int this_cpu = __smp_processor_id();
+       cpumask_t this_mask;
+
+       if (likely(preempt_count))
+               goto out;
+
+       if (irqs_disabled())
+               goto out;
+
+       /*
+        * Kernel threads bound to a single CPU can safely use
+        * smp_processor_id():
+        */
+       this_mask = cpumask_of_cpu(this_cpu);
+
+       if (cpus_equal(current->cpus_allowed, this_mask))
+               goto out;
+
+       /*
+        * It is valid to assume CPU-locality during early bootup:
+        */
+       if (system_state != SYSTEM_RUNNING)
+               goto out;
+
+       /*
+        * Avoid recursion:
+        */
+       preempt_disable();
+
+       if (!printk_ratelimit())
+               goto out_enable;
+
+       printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
+       print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+       dump_stack();
+
+out_enable:
+       preempt_enable_no_resched();
+out:
+       return this_cpu;
+}
+
+EXPORT_SYMBOL(smp_processor_id);
+
+#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
+
+#ifdef CONFIG_PREEMPT_BKL
+/*
+ * The 'big kernel semaphore'
+ *
+ * This mutex is taken and released recursively by lock_kernel()
+ * and unlock_kernel().  It is transparently dropped and reaquired
+ * over schedule().  It is used to protect legacy code that hasn't
+ * been migrated to a proper locking design yet.
+ *
+ * Note: code locked by this semaphore will only be serialized against
+ * other code using the same locking facility. The code guarantees that
+ * the task remains on the same CPU.
+ *
+ * Don't use in new code.
+ */
+DECLARE_MUTEX(kernel_sem);
+
+/*
+ * Re-acquire the kernel semaphore.
+ *
+ * This function is called with preemption off.
+ *
+ * We are executing in schedule() so the code must be extremely careful
+ * about recursion, both due to the down() and due to the enabling of
+ * preemption. schedule() will re-check the preemption flag after
+ * reacquiring the semaphore.
+ */
+int __lockfunc __reacquire_kernel_lock(void)
+{
+       struct task_struct *task = current;
+       int saved_lock_depth = task->lock_depth;
+
+       BUG_ON(saved_lock_depth < 0);
+
+       task->lock_depth = -1;
+       preempt_enable_no_resched();
+
+       down(&kernel_sem);
+
+       preempt_disable();
+       task->lock_depth = saved_lock_depth;
+
+       return 0;
+}
+
+void __lockfunc __release_kernel_lock(void)
+{
+       up(&kernel_sem);
+}
+
+/*
+ * Getting the big kernel semaphore.
+ */
+void __lockfunc lock_kernel(void)
+{
+       struct task_struct *task = current;
+       int depth = task->lock_depth + 1;
+
+       if (likely(!depth))
+               /*
+                * No recursion worries - we set up lock_depth _after_
+                */
+               down(&kernel_sem);
+
+       task->lock_depth = depth;
+}
+
+void __lockfunc unlock_kernel(void)
+{
+       struct task_struct *task = current;
+
+       BUG_ON(task->lock_depth < 0);
+
+       if (likely(--task->lock_depth < 0))
+               up(&kernel_sem);
+}
+
+#else
 
 /*
  * The 'big kernel lock'
  *
  * Don't use in new code.
  */
-static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
 
 
 /*
@@ -34,7 +169,7 @@ static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
  * (This works on UP too - _raw_spin_trylock will never
  * return false in that case)
  */
-int __lockfunc get_kernel_lock(void)
+int __lockfunc __reacquire_kernel_lock(void)
 {
        while (!_raw_spin_trylock(&kernel_flag)) {
                if (test_thread_flag(TIF_NEED_RESCHED))
@@ -45,7 +180,7 @@ int __lockfunc get_kernel_lock(void)
        return 0;
 }
 
-void __lockfunc put_kernel_lock(void)
+void __lockfunc __release_kernel_lock(void)
 {
        _raw_spin_unlock(&kernel_flag);
        preempt_enable_no_resched();
@@ -122,5 +257,8 @@ void __lockfunc unlock_kernel(void)
                __unlock_kernel();
 }
 
+#endif
+
 EXPORT_SYMBOL(lock_kernel);
 EXPORT_SYMBOL(unlock_kernel);
+