ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / lib / rwsem-spinlock.c
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
2  *                                   implementation
3  *
4  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
5  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6  * - Derived also from comments by Linus
7  */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11
12 struct rwsem_waiter {
13         struct list_head        list;
14         struct task_struct      *task;
15         unsigned int            flags;
16 #define RWSEM_WAITING_FOR_READ  0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
18 };
19
20 #if RWSEM_DEBUG
21 void rwsemtrace(struct rw_semaphore *sem, const char *str)
22 {
23         if (sem->debug)
24                 printk("[%d] %s({%d,%d})\n",
25                        current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
26 }
27 #endif
28
29 /*
30  * initialise the semaphore
31  */
32 void fastcall init_rwsem(struct rw_semaphore *sem)
33 {
34         sem->activity = 0;
35         spin_lock_init(&sem->wait_lock);
36         INIT_LIST_HEAD(&sem->wait_list);
37 #if RWSEM_DEBUG
38         sem->debug = 0;
39 #endif
40 }
41
42 /*
43  * handle the lock being released whilst there are processes blocked on it that can now run
44  * - if we come here, then:
45  *   - the 'active count' _reached_ zero
46  *   - the 'waiting count' is non-zero
47  * - the spinlock must be held by the caller
48  * - woken process blocks are discarded from the list after having task zeroed
49  * - writers are only woken if wakewrite is non-zero
50  */
51 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
52 {
53         struct rwsem_waiter *waiter;
54         struct task_struct *tsk;
55         int woken;
56
57         rwsemtrace(sem,"Entering __rwsem_do_wake");
58
59         waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
60
61         if (!wakewrite) {
62                 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
63                         goto out;
64                 goto dont_wake_writers;
65         }
66
67         /* if we are allowed to wake writers try to grant a single write lock if there's a
68          * writer at the front of the queue
69          * - we leave the 'waiting count' incremented to signify potential contention
70          */
71         if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
72                 sem->activity = -1;
73                 list_del(&waiter->list);
74                 tsk = waiter->task;
75                 mb();
76                 waiter->task = NULL;
77                 wake_up_process(tsk);
78                 put_task_struct(tsk);
79                 goto out;
80         }
81
82         /* grant an infinite number of read locks to the readers at the front of the queue */
83  dont_wake_writers:
84         woken = 0;
85         while (waiter->flags&RWSEM_WAITING_FOR_READ) {
86                 struct list_head *next = waiter->list.next;
87
88                 list_del(&waiter->list);
89                 tsk = waiter->task;
90                 mb();
91                 waiter->task = NULL;
92                 wake_up_process(tsk);
93                 put_task_struct(tsk);
94                 woken++;
95                 if (list_empty(&sem->wait_list))
96                         break;
97                 waiter = list_entry(next,struct rwsem_waiter,list);
98         }
99
100         sem->activity += woken;
101
102  out:
103         rwsemtrace(sem,"Leaving __rwsem_do_wake");
104         return sem;
105 }
106
107 /*
108  * wake a single writer
109  */
110 static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
111 {
112         struct rwsem_waiter *waiter;
113         struct task_struct *tsk;
114
115         sem->activity = -1;
116
117         waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
118         list_del(&waiter->list);
119
120         tsk = waiter->task;
121         mb();
122         waiter->task = NULL;
123         wake_up_process(tsk);
124         put_task_struct(tsk);
125         return sem;
126 }
127
128 /*
129  * get a read lock on the semaphore
130  */
131 void fastcall __down_read(struct rw_semaphore *sem)
132 {
133         struct rwsem_waiter waiter;
134         struct task_struct *tsk;
135
136         rwsemtrace(sem,"Entering __down_read");
137
138         spin_lock(&sem->wait_lock);
139
140         if (sem->activity>=0 && list_empty(&sem->wait_list)) {
141                 /* granted */
142                 sem->activity++;
143                 spin_unlock(&sem->wait_lock);
144                 goto out;
145         }
146
147         tsk = current;
148         set_task_state(tsk,TASK_UNINTERRUPTIBLE);
149
150         /* set up my own style of waitqueue */
151         waiter.task = tsk;
152         waiter.flags = RWSEM_WAITING_FOR_READ;
153         get_task_struct(tsk);
154
155         list_add_tail(&waiter.list,&sem->wait_list);
156
157         /* we don't need to touch the semaphore struct anymore */
158         spin_unlock(&sem->wait_lock);
159
160         /* wait to be given the lock */
161         for (;;) {
162                 if (!waiter.task)
163                         break;
164                 schedule();
165                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
166         }
167
168         tsk->state = TASK_RUNNING;
169
170  out:
171         rwsemtrace(sem,"Leaving __down_read");
172 }
173
174 /*
175  * trylock for reading -- returns 1 if successful, 0 if contention
176  */
177 int fastcall __down_read_trylock(struct rw_semaphore *sem)
178 {
179         int ret = 0;
180         rwsemtrace(sem,"Entering __down_read_trylock");
181
182         spin_lock(&sem->wait_lock);
183
184         if (sem->activity>=0 && list_empty(&sem->wait_list)) {
185                 /* granted */
186                 sem->activity++;
187                 ret = 1;
188         }
189
190         spin_unlock(&sem->wait_lock);
191
192         rwsemtrace(sem,"Leaving __down_read_trylock");
193         return ret;
194 }
195
196 /*
197  * get a write lock on the semaphore
198  * - note that we increment the waiting count anyway to indicate an exclusive lock
199  */
200 void fastcall __down_write(struct rw_semaphore *sem)
201 {
202         struct rwsem_waiter waiter;
203         struct task_struct *tsk;
204
205         rwsemtrace(sem,"Entering __down_write");
206
207         spin_lock(&sem->wait_lock);
208
209         if (sem->activity==0 && list_empty(&sem->wait_list)) {
210                 /* granted */
211                 sem->activity = -1;
212                 spin_unlock(&sem->wait_lock);
213                 goto out;
214         }
215
216         tsk = current;
217         set_task_state(tsk,TASK_UNINTERRUPTIBLE);
218
219         /* set up my own style of waitqueue */
220         waiter.task = tsk;
221         waiter.flags = RWSEM_WAITING_FOR_WRITE;
222         get_task_struct(tsk);
223
224         list_add_tail(&waiter.list,&sem->wait_list);
225
226         /* we don't need to touch the semaphore struct anymore */
227         spin_unlock(&sem->wait_lock);
228
229         /* wait to be given the lock */
230         for (;;) {
231                 if (!waiter.task)
232                         break;
233                 schedule();
234                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
235         }
236
237         tsk->state = TASK_RUNNING;
238
239  out:
240         rwsemtrace(sem,"Leaving __down_write");
241 }
242
243 /*
244  * trylock for writing -- returns 1 if successful, 0 if contention
245  */
246 int fastcall __down_write_trylock(struct rw_semaphore *sem)
247 {
248         int ret = 0;
249         rwsemtrace(sem,"Entering __down_write_trylock");
250
251         spin_lock(&sem->wait_lock);
252
253         if (sem->activity==0 && list_empty(&sem->wait_list)) {
254                 /* granted */
255                 sem->activity = -1;
256                 ret = 1;
257         }
258
259         spin_unlock(&sem->wait_lock);
260
261         rwsemtrace(sem,"Leaving __down_write_trylock");
262         return ret;
263 }
264
265 /*
266  * release a read lock on the semaphore
267  */
268 void fastcall __up_read(struct rw_semaphore *sem)
269 {
270         rwsemtrace(sem,"Entering __up_read");
271
272         spin_lock(&sem->wait_lock);
273
274         if (--sem->activity==0 && !list_empty(&sem->wait_list))
275                 sem = __rwsem_wake_one_writer(sem);
276
277         spin_unlock(&sem->wait_lock);
278
279         rwsemtrace(sem,"Leaving __up_read");
280 }
281
282 /*
283  * release a write lock on the semaphore
284  */
285 void fastcall __up_write(struct rw_semaphore *sem)
286 {
287         rwsemtrace(sem,"Entering __up_write");
288
289         spin_lock(&sem->wait_lock);
290
291         sem->activity = 0;
292         if (!list_empty(&sem->wait_list))
293                 sem = __rwsem_do_wake(sem, 1);
294
295         spin_unlock(&sem->wait_lock);
296
297         rwsemtrace(sem,"Leaving __up_write");
298 }
299
300 /*
301  * downgrade a write lock into a read lock
302  * - just wake up any readers at the front of the queue
303  */
304 void fastcall __downgrade_write(struct rw_semaphore *sem)
305 {
306         rwsemtrace(sem,"Entering __downgrade_write");
307
308         spin_lock(&sem->wait_lock);
309
310         sem->activity = 1;
311         if (!list_empty(&sem->wait_list))
312                 sem = __rwsem_do_wake(sem,0);
313
314         spin_unlock(&sem->wait_lock);
315
316         rwsemtrace(sem,"Leaving __downgrade_write");
317 }
318
319 EXPORT_SYMBOL(init_rwsem);
320 EXPORT_SYMBOL(__down_read);
321 EXPORT_SYMBOL(__down_read_trylock);
322 EXPORT_SYMBOL(__down_write);
323 EXPORT_SYMBOL(__down_write_trylock);
324 EXPORT_SYMBOL(__up_read);
325 EXPORT_SYMBOL(__up_write);
326 EXPORT_SYMBOL(__downgrade_write);
327 #if RWSEM_DEBUG
328 EXPORT_SYMBOL(rwsemtrace);
329 #endif