X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Frwsem-spinlock.c;h=a71152d550f015b4847ffd9fb6fa525924420192;hb=5e3b93f248c98873cc843e83092bb8da92ac85a2;hp=96255f47b1f853f20ae96072ed913f1e8248da49;hpb=a91482bdcc2e0f6035702e46f1b99043a0893346;p=linux-2.6.git diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 96255f47b..a71152d55 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -1,5 +1,5 @@ -/* rwsem-spinlock.c: R/W semaphores: contention handling functions for - * generic spinlock implementation +/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock + * implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli @@ -10,9 +10,9 @@ #include struct rwsem_waiter { - struct list_head list; - struct task_struct *task; - unsigned int flags; + struct list_head list; + struct task_struct *task; + unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; @@ -22,8 +22,7 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str) { if (sem->debug) printk("[%d] %s({%d,%d})\n", - current->pid, str, sem->activity, - list_empty(&sem->wait_list) ? 0 : 1); + current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1); } #endif @@ -41,7 +40,7 @@ void fastcall init_rwsem(struct rw_semaphore *sem) } /* - * handle the lock release when processes blocked on it that can now run + * handle the lock being released whilst there are processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero @@ -49,16 +48,15 @@ void fastcall init_rwsem(struct rw_semaphore *sem) * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore * -__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; - rwsemtrace(sem, "Entering __rwsem_do_wake"); + rwsemtrace(sem,"Entering __rwsem_do_wake"); - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) @@ -66,16 +64,14 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) goto dont_wake_writers; } - /* if we are allowed to wake writers try to grant a single write lock - * if there's a writer at the front of the queue - * - we leave the 'waiting count' incremented to signify potential - * contention + /* if we are allowed to wake writers try to grant a single write lock if there's a + * writer at the front of the queue + * - we leave the 'waiting count' incremented to signify potential contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); tsk = waiter->task; - /* Don't touch waiter after ->task has been NULLed */ mb(); waiter->task = NULL; wake_up_process(tsk); @@ -83,10 +79,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) goto out; } - /* grant an infinite number of read locks to the front of the queue */ + /* grant an infinite number of read locks to the readers at the front of the queue */ dont_wake_writers: woken = 0; - while (waiter->flags & RWSEM_WAITING_FOR_READ) { + while (waiter->flags&RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); @@ -98,28 +94,27 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) woken++; if (list_empty(&sem->wait_list)) break; - waiter = list_entry(next, struct rwsem_waiter, list); + waiter = list_entry(next,struct rwsem_waiter,list); } sem->activity += woken; out: - rwsemtrace(sem, "Leaving __rwsem_do_wake"); + rwsemtrace(sem,"Leaving __rwsem_do_wake"); return sem; } /* * wake a single writer */ -static inline struct rw_semaphore * -__rwsem_wake_one_writer(struct rw_semaphore *sem) +static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; sem->activity = -1; - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); list_del(&waiter->list); tsk = waiter->task; @@ -133,16 +128,16 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem) /* * get a read lock on the semaphore */ -void fastcall __sched __down_read(struct rw_semaphore *sem) +void fastcall __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem, "Entering __down_read"); + rwsemtrace(sem,"Entering __down_read"); spin_lock(&sem->wait_lock); - if (sem->activity >= 0 && list_empty(&sem->wait_list)) { + if (sem->activity>=0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; spin_unlock(&sem->wait_lock); @@ -150,14 +145,14 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) } tsk = current; - set_task_state(tsk, TASK_UNINTERRUPTIBLE); + set_task_state(tsk,TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; get_task_struct(tsk); - list_add_tail(&waiter.list, &sem->wait_list); + list_add_tail(&waiter.list,&sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock(&sem->wait_lock); @@ -173,7 +168,7 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) tsk->state = TASK_RUNNING; out: - rwsemtrace(sem, "Leaving __down_read"); + rwsemtrace(sem,"Leaving __down_read"); } /* @@ -182,11 +177,11 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) int fastcall __down_read_trylock(struct rw_semaphore *sem) { int ret = 0; - rwsemtrace(sem, "Entering __down_read_trylock"); + rwsemtrace(sem,"Entering __down_read_trylock"); spin_lock(&sem->wait_lock); - if (sem->activity >= 0 && list_empty(&sem->wait_list)) { + if (sem->activity>=0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; @@ -194,24 +189,24 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) spin_unlock(&sem->wait_lock); - rwsemtrace(sem, "Leaving __down_read_trylock"); + rwsemtrace(sem,"Leaving __down_read_trylock"); return ret; } /* * get a write lock on the semaphore - * - we increment the waiting count anyway to indicate an exclusive lock + * - note that we increment the waiting count anyway to indicate an exclusive lock */ -void fastcall __sched __down_write(struct rw_semaphore *sem) +void fastcall __down_write(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem, "Entering __down_write"); + rwsemtrace(sem,"Entering __down_write"); spin_lock(&sem->wait_lock); - if (sem->activity == 0 && list_empty(&sem->wait_list)) { + if (sem->activity==0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; spin_unlock(&sem->wait_lock); @@ -219,14 +214,14 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) } tsk = current; - set_task_state(tsk, TASK_UNINTERRUPTIBLE); + set_task_state(tsk,TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; get_task_struct(tsk); - list_add_tail(&waiter.list, &sem->wait_list); + list_add_tail(&waiter.list,&sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock(&sem->wait_lock); @@ -242,7 +237,7 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) tsk->state = TASK_RUNNING; out: - rwsemtrace(sem, "Leaving __down_write"); + rwsemtrace(sem,"Leaving __down_write"); } /* @@ -251,11 +246,11 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) int fastcall __down_write_trylock(struct rw_semaphore *sem) { int ret = 0; - rwsemtrace(sem, "Entering __down_write_trylock"); + rwsemtrace(sem,"Entering __down_write_trylock"); spin_lock(&sem->wait_lock); - if (sem->activity == 0 && list_empty(&sem->wait_list)) { + if (sem->activity==0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; @@ -263,7 +258,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) spin_unlock(&sem->wait_lock); - rwsemtrace(sem, "Leaving __down_write_trylock"); + rwsemtrace(sem,"Leaving __down_write_trylock"); return ret; } @@ -272,16 +267,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) */ void fastcall __up_read(struct rw_semaphore *sem) { - rwsemtrace(sem, "Entering __up_read"); + rwsemtrace(sem,"Entering __up_read"); spin_lock(&sem->wait_lock); - if (--sem->activity == 0 && !list_empty(&sem->wait_list)) + if (--sem->activity==0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock(&sem->wait_lock); - rwsemtrace(sem, "Leaving __up_read"); + rwsemtrace(sem,"Leaving __up_read"); } /* @@ -289,7 +284,7 @@ void fastcall __up_read(struct rw_semaphore *sem) */ void fastcall __up_write(struct rw_semaphore *sem) { - rwsemtrace(sem, "Entering __up_write"); + rwsemtrace(sem,"Entering __up_write"); spin_lock(&sem->wait_lock); @@ -299,7 +294,7 @@ void fastcall __up_write(struct rw_semaphore *sem) spin_unlock(&sem->wait_lock); - rwsemtrace(sem, "Leaving __up_write"); + rwsemtrace(sem,"Leaving __up_write"); } /* @@ -308,17 +303,17 @@ void fastcall __up_write(struct rw_semaphore *sem) */ void fastcall __downgrade_write(struct rw_semaphore *sem) { - rwsemtrace(sem, "Entering __downgrade_write"); + rwsemtrace(sem,"Entering __downgrade_write"); spin_lock(&sem->wait_lock); sem->activity = 1; if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem, 0); + sem = __rwsem_do_wake(sem,0); spin_unlock(&sem->wait_lock); - rwsemtrace(sem, "Leaving __downgrade_write"); + rwsemtrace(sem,"Leaving __downgrade_write"); } EXPORT_SYMBOL(init_rwsem);