#include <linux/sched.h>
#include <linux/spinlock.h>
-#ifdef CONFIG_LOCK_KERNEL
+#define BKL_DEBUG /* For testing for sleep_on() abuse */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) || defined(BKL_DEBUG)
+
+extern spinlock_t kernel_flag;
#define kernel_locked() (current->lock_depth >= 0)
-extern int __lockfunc get_kernel_lock(void);
-extern void __lockfunc put_kernel_lock(void);
+#define get_kernel_lock() spin_lock(&kernel_flag)
+#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
- * Release/re-acquire global kernel lock for the scheduler
+ * Release global kernel lock.
*/
-#define release_kernel_lock(tsk) do { \
- if (unlikely((tsk)->lock_depth >= 0)) \
- put_kernel_lock(); \
-} while (0)
+static inline void release_kernel_lock(struct task_struct *task)
+{
+ if (unlikely(task->lock_depth >= 0))
+ put_kernel_lock();
+}
/*
- * Non-SMP kernels will never block on the kernel lock,
- * so we are better off returning a constant zero from
- * reacquire_kernel_lock() so that the compiler can see
- * it at compile-time.
+ * Re-acquire the kernel lock
*/
-#ifdef CONFIG_SMP
-#define return_value_on_smp return
-#else
-#define return_value_on_smp
-#endif
-
-static inline int reacquire_kernel_lock(struct task_struct *task)
+static inline void reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
- return_value_on_smp get_kernel_lock();
- return 0;
+ get_kernel_lock();
}
-extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
-extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static inline void lock_kernel(void)
+{
+ int depth = current->lock_depth+1;
+ if (likely(!depth))
+ get_kernel_lock();
+ current->lock_depth = depth;
+}
+
+static inline void unlock_kernel(void)
+{
+ BUG_ON(current->lock_depth < 0);
+ if (likely(--current->lock_depth < 0))
+ put_kernel_lock();
+}
#else
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task) do { } while(0)
-#define reacquire_kernel_lock(task) 0
+#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
-#endif /* CONFIG_LOCK_KERNEL */
+#endif /* CONFIG_SMP || CONFIG_PREEMPT */
#endif /* __LINUX_SMPLOCK_H */