vserver 1.9.5.x5
[linux-2.6.git] / include / linux / smp_lock.h
index 9a20995..b63ce70 100644 (file)
@@ -5,62 +5,50 @@
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
-
-extern spinlock_t kernel_flag;
+#ifdef CONFIG_LOCK_KERNEL
 
 #define kernel_locked()                (current->lock_depth >= 0)
 
-#define get_kernel_lock()      spin_lock(&kernel_flag)
-#define put_kernel_lock()      spin_unlock(&kernel_flag)
+extern int __lockfunc __reacquire_kernel_lock(void);
+extern void __lockfunc __release_kernel_lock(void);
 
 /*
- * Release global kernel lock.
+ * Release/re-acquire global kernel lock for the scheduler
  */
-static inline void release_kernel_lock(struct task_struct *task)
-{
-       if (unlikely(task->lock_depth >= 0))
-               put_kernel_lock();
-}
+#define release_kernel_lock(tsk) do {          \
+       if (unlikely((tsk)->lock_depth >= 0))   \
+               __release_kernel_lock();        \
+} while (0)
 
 /*
- * Re-acquire the kernel lock
+ * Non-SMP kernels will never block on the kernel lock,
+ * so we are better off returning a constant zero from
+ * reacquire_kernel_lock() so that the compiler can see
+ * it at compile-time.
  */
-static inline void reacquire_kernel_lock(struct task_struct *task)
-{
-       if (unlikely(task->lock_depth >= 0))
-               get_kernel_lock();
-}
+#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
+# define return_value_on_smp return
+#else
+# define return_value_on_smp
+#endif
 
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static inline void lock_kernel(void)
+static inline int reacquire_kernel_lock(struct task_struct *task)
 {
-       int depth = current->lock_depth+1;
-       if (likely(!depth))
-               get_kernel_lock();
-       current->lock_depth = depth;
+       if (unlikely(task->lock_depth >= 0))
+               return_value_on_smp __reacquire_kernel_lock();
+       return 0;
 }
 
-static inline void unlock_kernel(void)
-{
-       BUG_ON(current->lock_depth < 0);
-       if (likely(--current->lock_depth < 0))
-               put_kernel_lock();
-}
+extern void __lockfunc lock_kernel(void)       __acquires(kernel_lock);
+extern void __lockfunc unlock_kernel(void)     __releases(kernel_lock);
 
 #else
 
 #define lock_kernel()                          do { } while(0)
 #define unlock_kernel()                                do { } while(0)
 #define release_kernel_lock(task)              do { } while(0)
-#define reacquire_kernel_lock(task)            do { } while(0)
+#define reacquire_kernel_lock(task)            0
 #define kernel_locked()                                1
 
-#endif /* CONFIG_SMP || CONFIG_PREEMPT */
+#endif /* CONFIG_LOCK_KERNEL */
 #endif /* __LINUX_SMPLOCK_H */