X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Frwsem.c;h=cdb4e3d0560733c2706a27ccd20681017da2e34a;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=27dcd95f7d3b2fb2ba64f4fbed6ed2393b5c7c41;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/lib/rwsem.c b/lib/rwsem.c index 27dcd95f7..cdb4e3d05 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -8,44 +8,52 @@ #include #include +/* + * Initialize an rwsem: + */ +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +EXPORT_SYMBOL(__init_rwsem); + struct rwsem_waiter { - struct list_head list; - struct task_struct *task; - unsigned int flags; + struct list_head list; + struct task_struct *task; + unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; -#if RWSEM_DEBUG -#undef rwsemtrace -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ - printk("sem=%p\n",sem); - printk("(sem)=%08lx\n",sem->count); - if (sem->debug) - printk("[%d] %s({%08lx})\n",current->pid,str,sem->count); -} -#endif - /* - * handle the lock being released whilst there are processes blocked on it that can now run + * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: - * - the 'active part' of the count (&0x0000ffff) had reached zero (but may have changed) - * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so) + * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) + * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) * - there must be someone on the queue * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if downgrading is false */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int downgrading) +static inline struct rw_semaphore * +__rwsem_do_wake(struct rw_semaphore *sem, int downgrading) { struct rwsem_waiter *waiter; struct task_struct *tsk; struct list_head *next; signed long oldcount, woken, loop; - rwsemtrace(sem,"Entering __rwsem_do_wake"); - if (downgrading) goto dont_wake_writers; @@ -53,22 +61,27 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int * if we can transition the active part of the count from 0 -> 1 */ try_again: - oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS; + oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) + - RWSEM_ACTIVE_BIAS; if (oldcount & RWSEM_ACTIVE_MASK) goto undo; - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - /* try to grant a single write lock if there's a writer at the front of the queue - * - note we leave the 'active part' of the count incremented by 1 and the waiting part - * incremented by 0x00010000 + /* try to grant a single write lock if there's a writer at the front + * of the queue - note we leave the 'active part' of the count + * incremented by 1 and the waiting part incremented by 0x00010000 */ if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) goto readers_only; + /* We must be careful not to touch 'waiter' after we set ->task = NULL. + * It is an allocated on the waiter's stack and may become invalid at + * any time after that point (due to a wakeup from another source). + */ list_del(&waiter->list); tsk = waiter->task; - mb(); + smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); @@ -76,7 +89,7 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int /* don't want to wake any writers */ dont_wake_writers: - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; @@ -90,26 +103,28 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int do { woken++; - if (waiter->list.next==&sem->wait_list) + if (waiter->list.next == &sem->wait_list) break; - waiter = list_entry(waiter->list.next,struct rwsem_waiter,list); + waiter = list_entry(waiter->list.next, + struct rwsem_waiter, list); } while (waiter->flags & RWSEM_WAITING_FOR_READ); loop = woken; woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; if (!downgrading) - woken -= RWSEM_ACTIVE_BIAS; /* we'd already done one increment - * earlier */ - rwsem_atomic_add(woken,sem); + /* we'd already done one increment earlier */ + woken -= RWSEM_ACTIVE_BIAS; + + rwsem_atomic_add(woken, sem); next = sem->wait_list.next; - for (; loop>0; loop--) { - waiter = list_entry(next,struct rwsem_waiter,list); + for (; loop > 0; loop--) { + waiter = list_entry(next, struct rwsem_waiter, list); next = waiter->list.next; tsk = waiter->task; - mb(); + smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); @@ -119,12 +134,11 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int next->prev = &sem->wait_list; out: - rwsemtrace(sem,"Leaving __rwsem_do_wake"); return sem; /* undo the change to count, but check for a transition 1->0 */ undo: - if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0) + if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) goto out; goto try_again; } @@ -132,32 +146,30 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int /* * wait for a lock to be granted */ -static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem, - struct rwsem_waiter *waiter, - signed long adjustment) +static struct rw_semaphore * +rwsem_down_failed_common(struct rw_semaphore *sem, + struct rwsem_waiter *waiter, signed long adjustment) { struct task_struct *tsk = current; signed long count; - set_task_state(tsk,TASK_UNINTERRUPTIBLE); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); waiter->task = tsk; get_task_struct(tsk); - list_add_tail(&waiter->list,&sem->wait_list); + list_add_tail(&waiter->list, &sem->wait_list); - /* note that we're now waiting on the lock, but no longer actively read-locking */ - count = rwsem_atomic_update(adjustment,sem); + /* we're now waiting on the lock, but no longer actively read-locking */ + count = rwsem_atomic_update(adjustment, sem); - /* if there are no longer active locks, wake the front queued process(es) up - * - it might even be this process, since the waker takes a more active part - */ + /* if there are no active locks, wake the front queued process(es) up */ if (!(count & RWSEM_ACTIVE_MASK)) sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -175,81 +187,71 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore /* * wait for the read lock to be granted */ -struct rw_semaphore fastcall __sched *rwsem_down_read_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall __sched * +rwsem_down_read_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; - rwsemtrace(sem,"Entering rwsem_down_read_failed"); - waiter.flags = RWSEM_WAITING_FOR_READ; - rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS); - - rwsemtrace(sem,"Leaving rwsem_down_read_failed"); + rwsem_down_failed_common(sem, &waiter, + RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); return sem; } /* * wait for the write lock to be granted */ -struct rw_semaphore fastcall __sched *rwsem_down_write_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall __sched * +rwsem_down_write_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; - rwsemtrace(sem,"Entering rwsem_down_write_failed"); - waiter.flags = RWSEM_WAITING_FOR_WRITE; - rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS); + rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); - rwsemtrace(sem,"Leaving rwsem_down_write_failed"); return sem; } /* * handle waking up a waiter on the semaphore - * - up_read/up_write has decremented the active part of the count if we come here + * - up_read/up_write has decremented the active part of count if we come here */ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering rwsem_wake"); + unsigned long flags; - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem,"Leaving rwsem_wake"); + spin_unlock_irqrestore(&sem->wait_lock, flags); return sem; } /* * downgrade a write lock into a read lock - * - caller incremented waiting part of count, and discovered it to be still negative + * - caller incremented waiting part of count and discovered it still negative * - just wake up any readers at the front of the queue */ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering rwsem_downgrade_wake"); + unsigned long flags; - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem,"Leaving rwsem_downgrade_wake"); return sem; } -EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed); -EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed); -EXPORT_SYMBOL_NOVERS(rwsem_wake); -EXPORT_SYMBOL_NOVERS(rwsem_downgrade_wake); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif +EXPORT_SYMBOL(rwsem_down_read_failed); +EXPORT_SYMBOL(rwsem_down_write_failed); +EXPORT_SYMBOL(rwsem_wake); +EXPORT_SYMBOL(rwsem_downgrade_wake);