1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
13 struct list_head list;
14 struct task_struct *task;
16 #define RWSEM_WAITING_FOR_READ 0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
21 void rwsemtrace(struct rw_semaphore *sem, const char *str)
24 printk("[%d] %s({%d,%d})\n",
25 current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
30 * initialise the semaphore
32 void fastcall init_rwsem(struct rw_semaphore *sem)
35 spin_lock_init(&sem->wait_lock);
36 INIT_LIST_HEAD(&sem->wait_list);
43 * handle the lock being released whilst there are processes blocked on it that can now run
44 * - if we come here, then:
45 * - the 'active count' _reached_ zero
46 * - the 'waiting count' is non-zero
47 * - the spinlock must be held by the caller
48 * - woken process blocks are discarded from the list after having task zeroed
49 * - writers are only woken if wakewrite is non-zero
51 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
53 struct rwsem_waiter *waiter;
54 struct task_struct *tsk;
57 rwsemtrace(sem,"Entering __rwsem_do_wake");
59 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
62 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
64 goto dont_wake_writers;
67 /* if we are allowed to wake writers try to grant a single write lock if there's a
68 * writer at the front of the queue
69 * - we leave the 'waiting count' incremented to signify potential contention
71 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
73 list_del(&waiter->list);
82 /* grant an infinite number of read locks to the readers at the front of the queue */
85 while (waiter->flags&RWSEM_WAITING_FOR_READ) {
86 struct list_head *next = waiter->list.next;
88 list_del(&waiter->list);
95 if (list_empty(&sem->wait_list))
97 waiter = list_entry(next,struct rwsem_waiter,list);
100 sem->activity += woken;
103 rwsemtrace(sem,"Leaving __rwsem_do_wake");
108 * wake a single writer
110 static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
112 struct rwsem_waiter *waiter;
113 struct task_struct *tsk;
117 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
118 list_del(&waiter->list);
123 wake_up_process(tsk);
124 put_task_struct(tsk);
129 * get a read lock on the semaphore
131 void fastcall __down_read(struct rw_semaphore *sem)
133 struct rwsem_waiter waiter;
134 struct task_struct *tsk;
136 rwsemtrace(sem,"Entering __down_read");
138 spin_lock(&sem->wait_lock);
140 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
143 spin_unlock(&sem->wait_lock);
148 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
150 /* set up my own style of waitqueue */
152 waiter.flags = RWSEM_WAITING_FOR_READ;
153 get_task_struct(tsk);
155 list_add_tail(&waiter.list,&sem->wait_list);
157 /* we don't need to touch the semaphore struct anymore */
158 spin_unlock(&sem->wait_lock);
160 /* wait to be given the lock */
165 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
168 tsk->state = TASK_RUNNING;
171 rwsemtrace(sem,"Leaving __down_read");
175 * trylock for reading -- returns 1 if successful, 0 if contention
177 int fastcall __down_read_trylock(struct rw_semaphore *sem)
180 rwsemtrace(sem,"Entering __down_read_trylock");
182 spin_lock(&sem->wait_lock);
184 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
190 spin_unlock(&sem->wait_lock);
192 rwsemtrace(sem,"Leaving __down_read_trylock");
197 * get a write lock on the semaphore
198 * - note that we increment the waiting count anyway to indicate an exclusive lock
200 void fastcall __down_write(struct rw_semaphore *sem)
202 struct rwsem_waiter waiter;
203 struct task_struct *tsk;
205 rwsemtrace(sem,"Entering __down_write");
207 spin_lock(&sem->wait_lock);
209 if (sem->activity==0 && list_empty(&sem->wait_list)) {
212 spin_unlock(&sem->wait_lock);
217 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
219 /* set up my own style of waitqueue */
221 waiter.flags = RWSEM_WAITING_FOR_WRITE;
222 get_task_struct(tsk);
224 list_add_tail(&waiter.list,&sem->wait_list);
226 /* we don't need to touch the semaphore struct anymore */
227 spin_unlock(&sem->wait_lock);
229 /* wait to be given the lock */
234 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
237 tsk->state = TASK_RUNNING;
240 rwsemtrace(sem,"Leaving __down_write");
244 * trylock for writing -- returns 1 if successful, 0 if contention
246 int fastcall __down_write_trylock(struct rw_semaphore *sem)
249 rwsemtrace(sem,"Entering __down_write_trylock");
251 spin_lock(&sem->wait_lock);
253 if (sem->activity==0 && list_empty(&sem->wait_list)) {
259 spin_unlock(&sem->wait_lock);
261 rwsemtrace(sem,"Leaving __down_write_trylock");
266 * release a read lock on the semaphore
268 void fastcall __up_read(struct rw_semaphore *sem)
270 rwsemtrace(sem,"Entering __up_read");
272 spin_lock(&sem->wait_lock);
274 if (--sem->activity==0 && !list_empty(&sem->wait_list))
275 sem = __rwsem_wake_one_writer(sem);
277 spin_unlock(&sem->wait_lock);
279 rwsemtrace(sem,"Leaving __up_read");
283 * release a write lock on the semaphore
285 void fastcall __up_write(struct rw_semaphore *sem)
287 rwsemtrace(sem,"Entering __up_write");
289 spin_lock(&sem->wait_lock);
292 if (!list_empty(&sem->wait_list))
293 sem = __rwsem_do_wake(sem, 1);
295 spin_unlock(&sem->wait_lock);
297 rwsemtrace(sem,"Leaving __up_write");
301 * downgrade a write lock into a read lock
302 * - just wake up any readers at the front of the queue
304 void fastcall __downgrade_write(struct rw_semaphore *sem)
306 rwsemtrace(sem,"Entering __downgrade_write");
308 spin_lock(&sem->wait_lock);
311 if (!list_empty(&sem->wait_list))
312 sem = __rwsem_do_wake(sem,0);
314 spin_unlock(&sem->wait_lock);
316 rwsemtrace(sem,"Leaving __downgrade_write");
319 EXPORT_SYMBOL(init_rwsem);
320 EXPORT_SYMBOL(__down_read);
321 EXPORT_SYMBOL(__down_read_trylock);
322 EXPORT_SYMBOL(__down_write);
323 EXPORT_SYMBOL(__down_write_trylock);
324 EXPORT_SYMBOL(__up_read);
325 EXPORT_SYMBOL(__up_write);
326 EXPORT_SYMBOL(__downgrade_write);
328 EXPORT_SYMBOL(rwsemtrace);