X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Frwsem-spinlock.c;h=c4cfd6c0342ff1e4fbc04b9fff3b93f0d00118bd;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=96255f47b1f853f20ae96072ed913f1e8248da49;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 96255f47b..c4cfd6c03 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,27 +17,22 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; -#if RWSEM_DEBUG -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ - if (sem->debug) - printk("[%d] %s({%d,%d})\n", - current->pid, str, sem->activity, - list_empty(&sem->wait_list) ? 0 : 1); -} -#endif - /* * initialise the semaphore */ -void fastcall init_rwsem(struct rw_semaphore *sem) +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif sem->activity = 0; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* @@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) struct task_struct *tsk; int woken; - rwsemtrace(sem, "Entering __rwsem_do_wake"); - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { @@ -76,7 +69,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) list_del(&waiter->list); tsk = waiter->task; /* Don't touch waiter after ->task has been NULLed */ - mb(); + smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); @@ -91,7 +84,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) list_del(&waiter->list); tsk = waiter->task; - mb(); + smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); @@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) sem->activity += woken; out: - rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; } @@ -123,7 +115,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem) list_del(&waiter->list); tsk = waiter->task; - mb(); + smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); @@ -138,14 +130,12 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem, "Entering __down_read"); - - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); goto out; } @@ -160,7 +150,7 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) } tsk->state = TASK_RUNNING; - out: - rwsemtrace(sem, "Leaving __down_read"); + ; } /* @@ -181,10 +170,11 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) */ int fastcall __down_read_trylock(struct rw_semaphore *sem) { + unsigned long flags; int ret = 0; - rwsemtrace(sem, "Entering __down_read_trylock"); - spin_lock(&sem->wait_lock); + + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ @@ -192,9 +182,8 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) ret = 1; } - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem, "Leaving __down_read_trylock"); return ret; } @@ -202,19 +191,17 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ -void fastcall __sched __down_write(struct rw_semaphore *sem) +void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem, "Entering __down_write"); - - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); goto out; } @@ -229,7 +216,7 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -240,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) } tsk->state = TASK_RUNNING; - out: - rwsemtrace(sem, "Leaving __down_write"); + ; +} + +void fastcall __sched __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); } /* @@ -250,10 +241,10 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) */ int fastcall __down_write_trylock(struct rw_semaphore *sem) { + unsigned long flags; int ret = 0; - rwsemtrace(sem, "Entering __down_write_trylock"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ @@ -261,9 +252,8 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) ret = 1; } - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem, "Leaving __down_write_trylock"); return ret; } @@ -272,16 +262,14 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) */ void fastcall __up_read(struct rw_semaphore *sem) { - rwsemtrace(sem, "Entering __up_read"); + unsigned long flags; - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem, "Leaving __up_read"); + spin_unlock_irqrestore(&sem->wait_lock, flags); } /* @@ -289,17 +277,15 @@ void fastcall __up_read(struct rw_semaphore *sem) */ void fastcall __up_write(struct rw_semaphore *sem) { - rwsemtrace(sem, "Entering __up_write"); + unsigned long flags; - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem, "Leaving __up_write"); + spin_unlock_irqrestore(&sem->wait_lock, flags); } /* @@ -308,27 +294,23 @@ void fastcall __up_write(struct rw_semaphore *sem) */ void fastcall __downgrade_write(struct rw_semaphore *sem) { - rwsemtrace(sem, "Entering __downgrade_write"); + unsigned long flags; - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem, "Leaving __downgrade_write"); + spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(init_rwsem); +EXPORT_SYMBOL(__init_rwsem); EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read_trylock); +EXPORT_SYMBOL(__down_write_nested); EXPORT_SYMBOL(__down_write); EXPORT_SYMBOL(__down_write_trylock); EXPORT_SYMBOL(__up_read); EXPORT_SYMBOL(__up_write); EXPORT_SYMBOL(__downgrade_write); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif