2 * Generic waiting primitives.
4 * (C) 2004 William Irwin, Oracle
6 #include <linux/config.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/sched.h>
11 #include <linux/wait.h>
12 #include <linux/hash.h>
14 void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
18 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
19 spin_lock_irqsave(&q->lock, flags);
20 __add_wait_queue(q, wait);
21 spin_unlock_irqrestore(&q->lock, flags);
23 EXPORT_SYMBOL(add_wait_queue);
25 void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
29 wait->flags |= WQ_FLAG_EXCLUSIVE;
30 spin_lock_irqsave(&q->lock, flags);
31 __add_wait_queue_tail(q, wait);
32 spin_unlock_irqrestore(&q->lock, flags);
34 EXPORT_SYMBOL(add_wait_queue_exclusive);
36 int fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
39 struct list_head *list;
42 spin_lock_irqsave(&q->lock, flags);
48 struct list_head *next;
49 if (list == &wait->task_list)
52 if (next->prev != list) {
57 } while (list != &q->task_list);
60 __remove_wait_queue(q, wait);
63 spin_unlock_irqrestore(&q->lock, flags);
66 EXPORT_SYMBOL(remove_wait_queue);
70 * Note: we use "set_current_state()" _after_ the wait-queue add,
71 * because we need a memory barrier there on SMP, so that any
72 * wake-function that tests for the wait-queue being active
73 * will be guaranteed to see waitqueue addition _or_ subsequent
74 * tests in this thread will see the wakeup having taken place.
76 * The spin_unlock() itself is semi-permeable and only protects
77 * one way (it only protects stuff inside the critical region and
78 * stops them from bleeding out - it would still allow subsequent
79 * loads to move into the the critical region).
82 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
86 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
87 spin_lock_irqsave(&q->lock, flags);
88 if (list_empty(&wait->task_list))
89 __add_wait_queue(q, wait);
91 * don't alter the task state if this is just going to
92 * queue an async wait queue callback
94 if (is_sync_wait(wait))
95 set_current_state(state);
96 spin_unlock_irqrestore(&q->lock, flags);
98 EXPORT_SYMBOL(prepare_to_wait);
101 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
105 wait->flags |= WQ_FLAG_EXCLUSIVE;
106 spin_lock_irqsave(&q->lock, flags);
107 if (list_empty(&wait->task_list))
108 __add_wait_queue_tail(q, wait);
110 * don't alter the task state if this is just going to
111 * queue an async wait queue callback
113 if (is_sync_wait(wait))
114 set_current_state(state);
115 spin_unlock_irqrestore(&q->lock, flags);
117 EXPORT_SYMBOL(prepare_to_wait_exclusive);
119 void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
123 __set_current_state(TASK_RUNNING);
125 * We can check for list emptiness outside the lock
127 * - we use the "careful" check that verifies both
128 * the next and prev pointers, so that there cannot
129 * be any half-pending updates in progress on other
130 * CPU's that we haven't seen yet (and that might
131 * still change the stack area.
133 * - all other users take the lock (ie we can only
134 * have _one_ other CPU that looks at or modifies
137 if (!list_empty_careful(&wait->task_list)) {
138 spin_lock_irqsave(&q->lock, flags);
139 list_del_init(&wait->task_list);
140 spin_unlock_irqrestore(&q->lock, flags);
143 EXPORT_SYMBOL(finish_wait);
145 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
147 int ret = default_wake_function(wait, mode, sync, key);
150 list_del_init(&wait->task_list);
153 EXPORT_SYMBOL(autoremove_wake_function);
155 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
157 struct wait_bit_key *key = arg;
158 struct wait_bit_queue *wait_bit
159 = container_of(wait, struct wait_bit_queue, wait);
161 if (wait_bit->key.flags != key->flags ||
162 wait_bit->key.bit_nr != key->bit_nr ||
163 test_bit(key->bit_nr, key->flags))
166 return autoremove_wake_function(wait, mode, sync, key);
168 EXPORT_SYMBOL(wake_bit_function);
171 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
172 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
173 * permitted return codes. Nonzero return codes halt waiting and return.
176 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
177 int (*action)(void *), unsigned mode)
182 prepare_to_wait(wq, &q->wait, mode);
183 if (test_bit(q->key.bit_nr, q->key.flags))
184 ret = (*action)(q->key.flags);
185 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
186 finish_wait(wq, &q->wait);
189 EXPORT_SYMBOL(__wait_on_bit);
191 int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
192 int (*action)(void *), unsigned mode)
194 wait_queue_head_t *wq = bit_waitqueue(word, bit);
195 DEFINE_WAIT_BIT(wait, word, bit);
197 return __wait_on_bit(wq, &wait, action, mode);
199 EXPORT_SYMBOL(out_of_line_wait_on_bit);
202 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
203 int (*action)(void *), unsigned mode)
208 prepare_to_wait_exclusive(wq, &q->wait, mode);
209 if (test_bit(q->key.bit_nr, q->key.flags)) {
210 if ((ret = (*action)(q->key.flags)))
213 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
214 finish_wait(wq, &q->wait);
217 EXPORT_SYMBOL(__wait_on_bit_lock);
219 int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
220 int (*action)(void *), unsigned mode)
222 wait_queue_head_t *wq = bit_waitqueue(word, bit);
223 DEFINE_WAIT_BIT(wait, word, bit);
225 return __wait_on_bit_lock(wq, &wait, action, mode);
227 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
229 void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
231 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
232 if (waitqueue_active(wq))
233 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
235 EXPORT_SYMBOL(__wake_up_bit);
238 * wake_up_bit - wake up a waiter on a bit
239 * @word: the word being waited on, a kernel virtual address
240 * @bit: the bit of the word being waited on
242 * There is a standard hashed waitqueue table for generic use. This
243 * is the part of the hashtable's accessor API that wakes up waiters
244 * on a bit. For instance, if one were to have waiters on a bitflag,
245 * one would call wake_up_bit() after clearing the bit.
247 * In order for this to function properly, as it uses waitqueue_active()
248 * internally, some kind of memory barrier must be done prior to calling
249 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
250 * cases where bitflags are manipulated non-atomically under a lock, one
251 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
252 * because spin_unlock() does not guarantee a memory barrier.
254 void fastcall wake_up_bit(void *word, int bit)
256 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
258 EXPORT_SYMBOL(wake_up_bit);
260 fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
262 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
263 const struct zone *zone = page_zone(virt_to_page(word));
264 unsigned long val = (unsigned long)word << shift | bit;
266 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
268 EXPORT_SYMBOL(bit_waitqueue);