X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Frwsem-spinlock.c;h=21f0db2c97113451de1eac3751cf8f63e5a3541e;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=a71152d550f015b4847ffd9fb6fa525924420192;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index a71152d55..21f0db2c9 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -1,5 +1,5 @@ -/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock - * implementation +/* rwsem-spinlock.c: R/W semaphores: contention handling functions for + * generic spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli @@ -10,9 +10,9 @@ #include struct rwsem_waiter { - struct list_head list; - struct task_struct *task; - unsigned int flags; + struct list_head list; + struct task_struct *task; + unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; @@ -22,7 +22,8 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str) { if (sem->debug) printk("[%d] %s({%d,%d})\n", - current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1); + current->pid, str, sem->activity, + list_empty(&sem->wait_list) ? 0 : 1); } #endif @@ -40,7 +41,7 @@ void fastcall init_rwsem(struct rw_semaphore *sem) } /* - * handle the lock being released whilst there are processes blocked on it that can now run + * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero @@ -48,15 +49,16 @@ void fastcall init_rwsem(struct rw_semaphore *sem) * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +static inline struct rw_semaphore * +__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; - rwsemtrace(sem,"Entering __rwsem_do_wake"); + rwsemtrace(sem, "Entering __rwsem_do_wake"); - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) @@ -64,14 +66,16 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int goto dont_wake_writers; } - /* if we are allowed to wake writers try to grant a single write lock if there's a - * writer at the front of the queue - * - we leave the 'waiting count' incremented to signify potential contention + /* if we are allowed to wake writers try to grant a single write lock + * if there's a writer at the front of the queue + * - we leave the 'waiting count' incremented to signify potential + * contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); tsk = waiter->task; + /* Don't touch waiter after ->task has been NULLed */ mb(); waiter->task = NULL; wake_up_process(tsk); @@ -79,10 +83,10 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int goto out; } - /* grant an infinite number of read locks to the readers at the front of the queue */ + /* grant an infinite number of read locks to the front of the queue */ dont_wake_writers: woken = 0; - while (waiter->flags&RWSEM_WAITING_FOR_READ) { + while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); @@ -94,27 +98,28 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int woken++; if (list_empty(&sem->wait_list)) break; - waiter = list_entry(next,struct rwsem_waiter,list); + waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; out: - rwsemtrace(sem,"Leaving __rwsem_do_wake"); + rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; } /* * wake a single writer */ -static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem) +static inline struct rw_semaphore * +__rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; sem->activity = -1; - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); list_del(&waiter->list); tsk = waiter->task; @@ -128,34 +133,34 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore * /* * get a read lock on the semaphore */ -void fastcall __down_read(struct rw_semaphore *sem) +void fastcall __sched __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem,"Entering __down_read"); + rwsemtrace(sem, "Entering __down_read"); - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); - if (sem->activity>=0 && list_empty(&sem->wait_list)) { + if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); goto out; } tsk = current; - set_task_state(tsk,TASK_UNINTERRUPTIBLE); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; get_task_struct(tsk); - list_add_tail(&waiter.list,&sem->wait_list); + list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -168,7 +173,7 @@ void fastcall __down_read(struct rw_semaphore *sem) tsk->state = TASK_RUNNING; out: - rwsemtrace(sem,"Leaving __down_read"); + rwsemtrace(sem, "Leaving __down_read"); } /* @@ -176,55 +181,57 @@ void fastcall __down_read(struct rw_semaphore *sem) */ int fastcall __down_read_trylock(struct rw_semaphore *sem) { + unsigned long flags; int ret = 0; - rwsemtrace(sem,"Entering __down_read_trylock"); - spin_lock(&sem->wait_lock); + rwsemtrace(sem, "Entering __down_read_trylock"); - if (sem->activity>=0 && list_empty(&sem->wait_list)) { + spin_lock_irqsave(&sem->wait_lock, flags); + + if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; } - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem,"Leaving __down_read_trylock"); + rwsemtrace(sem, "Leaving __down_read_trylock"); return ret; } /* * get a write lock on the semaphore - * - note that we increment the waiting count anyway to indicate an exclusive lock + * - we increment the waiting count anyway to indicate an exclusive lock */ -void fastcall __down_write(struct rw_semaphore *sem) +void fastcall __sched __down_write(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem,"Entering __down_write"); + rwsemtrace(sem, "Entering __down_write"); - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); - if (sem->activity==0 && list_empty(&sem->wait_list)) { + if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); goto out; } tsk = current; - set_task_state(tsk,TASK_UNINTERRUPTIBLE); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; get_task_struct(tsk); - list_add_tail(&waiter.list,&sem->wait_list); + list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -237,7 +244,7 @@ void fastcall __down_write(struct rw_semaphore *sem) tsk->state = TASK_RUNNING; out: - rwsemtrace(sem,"Leaving __down_write"); + rwsemtrace(sem, "Leaving __down_write"); } /* @@ -245,20 +252,22 @@ void fastcall __down_write(struct rw_semaphore *sem) */ int fastcall __down_write_trylock(struct rw_semaphore *sem) { + unsigned long flags; int ret = 0; - rwsemtrace(sem,"Entering __down_write_trylock"); - spin_lock(&sem->wait_lock); + rwsemtrace(sem, "Entering __down_write_trylock"); + + spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity==0 && list_empty(&sem->wait_list)) { + if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; } - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem,"Leaving __down_write_trylock"); + rwsemtrace(sem, "Leaving __down_write_trylock"); return ret; } @@ -267,16 +276,18 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) */ void fastcall __up_read(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering __up_read"); + unsigned long flags; + + rwsemtrace(sem, "Entering __up_read"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); - if (--sem->activity==0 && !list_empty(&sem->wait_list)) + if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem,"Leaving __up_read"); + rwsemtrace(sem, "Leaving __up_read"); } /* @@ -284,17 +295,19 @@ void fastcall __up_read(struct rw_semaphore *sem) */ void fastcall __up_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering __up_write"); + unsigned long flags; - spin_lock(&sem->wait_lock); + rwsemtrace(sem, "Entering __up_write"); + + spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem,"Leaving __up_write"); + rwsemtrace(sem, "Leaving __up_write"); } /* @@ -303,17 +316,19 @@ void fastcall __up_write(struct rw_semaphore *sem) */ void fastcall __downgrade_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering __downgrade_write"); + unsigned long flags; + + rwsemtrace(sem, "Entering __downgrade_write"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem,0); + sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem,"Leaving __downgrade_write"); + rwsemtrace(sem, "Leaving __downgrade_write"); } EXPORT_SYMBOL(init_rwsem);