X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fmutex.c;fp=kernel%2Fmutex.c;h=5449b210d9edb1aaf4be340730a47e07d1756edf;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=8c71cf72a497de63ec5672f3f5f8f728fbb9f9df;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/kernel/mutex.c b/kernel/mutex.c index 8c71cf72a..5449b210d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -17,7 +17,6 @@ #include #include #include -#include /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, @@ -39,14 +38,13 @@ * * It is not allowed to initialize an already locked mutex. */ -void -__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) +void fastcall __mutex_init(struct mutex *lock, const char *name) { atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); - debug_mutex_init(lock, name, key); + debug_mutex_init(lock, name); } EXPORT_SYMBOL(__mutex_init); @@ -58,7 +56,7 @@ EXPORT_SYMBOL(__mutex_init); * branch is predicted by the CPU as default-untaken. */ static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count); +__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); /*** * mutex_lock - acquire the mutex @@ -81,7 +79,7 @@ __mutex_lock_slowpath(atomic_t *lock_count); * * This function is similar to (but not equivalent to) down(). */ -void inline fastcall __sched mutex_lock(struct mutex *lock) +void fastcall __sched mutex_lock(struct mutex *lock) { might_sleep(); /* @@ -94,7 +92,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) EXPORT_SYMBOL(mutex_lock); static void fastcall noinline __sched -__mutex_unlock_slowpath(atomic_t *lock_count); +__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); /*** * mutex_unlock - release the mutex @@ -122,18 +120,17 @@ EXPORT_SYMBOL(mutex_unlock); * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched -__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) +__mutex_lock_common(struct mutex *lock, long state __IP_DECL__) { struct task_struct *task = current; struct mutex_waiter waiter; unsigned int old_val; - unsigned long flags; - spin_lock_mutex(&lock->wait_lock, flags); + debug_mutex_init_waiter(&waiter); - debug_mutex_lock_common(lock, &waiter); - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - debug_mutex_add_waiter(lock, &waiter, task->thread_info); + spin_lock_mutex(&lock->wait_lock); + + debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); @@ -160,8 +157,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) if (unlikely(state == TASK_INTERRUPTIBLE && signal_pending(task))) { mutex_remove_waiter(lock, &waiter, task->thread_info); - mutex_release(&lock->dep_map, 1, _RET_IP_); - spin_unlock_mutex(&lock->wait_lock, flags); + spin_unlock_mutex(&lock->wait_lock); debug_mutex_free_waiter(&waiter); return -EINTR; @@ -169,57 +165,48 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) __set_task_state(task, state); /* didnt get the lock, go to sleep: */ - spin_unlock_mutex(&lock->wait_lock, flags); + spin_unlock_mutex(&lock->wait_lock); schedule(); - spin_lock_mutex(&lock->wait_lock, flags); + spin_lock_mutex(&lock->wait_lock); } /* got the lock - rejoice! */ mutex_remove_waiter(lock, &waiter, task->thread_info); - debug_mutex_set_owner(lock, task->thread_info); + debug_mutex_set_owner(lock, task->thread_info __IP__); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); - spin_unlock_mutex(&lock->wait_lock, flags); + spin_unlock_mutex(&lock->wait_lock); debug_mutex_free_waiter(&waiter); + DEBUG_WARN_ON(list_empty(&lock->held_list)); + DEBUG_WARN_ON(lock->owner != task->thread_info); + return 0; } static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count) +__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) { struct mutex *lock = container_of(lock_count, struct mutex, count); - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); -} - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -void __sched -mutex_lock_nested(struct mutex *lock, unsigned int subclass) -{ - might_sleep(); - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); } -EXPORT_SYMBOL_GPL(mutex_lock_nested); -#endif - /* * Release the lock, slowpath: */ -static fastcall inline void -__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) +static fastcall noinline void +__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) { struct mutex *lock = container_of(lock_count, struct mutex, count); - unsigned long flags; - spin_lock_mutex(&lock->wait_lock, flags); - mutex_release(&lock->dep_map, nested, _RET_IP_); - debug_mutex_unlock(lock); + DEBUG_WARN_ON(lock->owner != current_thread_info()); + + spin_lock_mutex(&lock->wait_lock); /* * some architectures leave the lock unlocked in the fastpath failure @@ -229,6 +216,8 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); + debug_mutex_unlock(lock); + if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = @@ -242,16 +231,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) debug_mutex_clear_owner(lock); - spin_unlock_mutex(&lock->wait_lock, flags); -} - -/* - * Release the lock, slowpath: - */ -static fastcall noinline void -__mutex_unlock_slowpath(atomic_t *lock_count) -{ - __mutex_unlock_common_slowpath(lock_count, 1); + spin_unlock_mutex(&lock->wait_lock); } /* @@ -259,7 +239,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count) * mutex_lock_interruptible() and mutex_trylock(). */ static int fastcall noinline __sched -__mutex_lock_interruptible_slowpath(atomic_t *lock_count); +__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); /*** * mutex_lock_interruptible - acquire the mutex, interruptable @@ -282,11 +262,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) EXPORT_SYMBOL(mutex_lock_interruptible); static int fastcall noinline __sched -__mutex_lock_interruptible_slowpath(atomic_t *lock_count) +__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) { struct mutex *lock = container_of(lock_count, struct mutex, count); - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); } /* @@ -296,21 +276,18 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count) static inline int __mutex_trylock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); - unsigned long flags; int prev; - spin_lock_mutex(&lock->wait_lock, flags); + spin_lock_mutex(&lock->wait_lock); prev = atomic_xchg(&lock->count, -1); - if (likely(prev == 1)) { - debug_mutex_set_owner(lock, current_thread_info()); - mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } + if (likely(prev == 1)) + debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); - spin_unlock_mutex(&lock->wait_lock, flags); + spin_unlock_mutex(&lock->wait_lock); return prev == 1; } @@ -329,7 +306,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ -int fastcall __sched mutex_trylock(struct mutex *lock) +int fastcall mutex_trylock(struct mutex *lock) { return __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);