2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@redhat.com>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/init.h>
21 #include <linux/signal.h>
22 #include <linux/completion.h>
23 #include <linux/workqueue.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/notifier.h>
27 #include <linux/kthread.h>
30 * The per-CPU workqueue (if single thread, we always use cpu 0's).
32 * The sequence counters are for flush_scheduled_work(). It wants to wait
33 * until until all currently-scheduled works are completed, but it doesn't
34 * want to be livelocked by new, incoming ones. So it waits until
35 * remove_sequence is >= the insert_sequence which pertained when
36 * flush_scheduled_work() was called.
38 struct cpu_workqueue_struct {
42 long remove_sequence; /* Least-recently added (next to run) */
43 long insert_sequence; /* Next to add */
45 struct list_head worklist;
46 wait_queue_head_t more_work;
47 wait_queue_head_t work_done;
49 struct workqueue_struct *wq;
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct {
60 struct cpu_workqueue_struct cpu_wq[NR_CPUS];
62 struct list_head list; /* Empty if single thread */
65 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
67 static spinlock_t workqueue_lock = SPIN_LOCK_UNLOCKED;
68 static LIST_HEAD(workqueues);
70 /* If it's single threaded, it isn't in the list of workqueues. */
71 static inline int is_single_threaded(struct workqueue_struct *wq)
73 return list_empty(&wq->list);
76 /* Preempt must be disabled. */
77 static void __queue_work(struct cpu_workqueue_struct *cwq,
78 struct work_struct *work)
82 spin_lock_irqsave(&cwq->lock, flags);
84 list_add_tail(&work->entry, &cwq->worklist);
85 cwq->insert_sequence++;
86 wake_up(&cwq->more_work);
87 spin_unlock_irqrestore(&cwq->lock, flags);
91 * Queue work on a workqueue. Return non-zero if it was successfully
94 * We queue the work to the CPU it was submitted, but there is no
95 * guarantee that it will be processed by that CPU.
97 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
99 int ret = 0, cpu = get_cpu();
101 if (!test_and_set_bit(0, &work->pending)) {
102 if (unlikely(is_single_threaded(wq)))
104 BUG_ON(!list_empty(&work->entry));
105 __queue_work(wq->cpu_wq + cpu, work);
112 static void delayed_work_timer_fn(unsigned long __data)
114 struct work_struct *work = (struct work_struct *)__data;
115 struct workqueue_struct *wq = work->wq_data;
116 int cpu = smp_processor_id();
118 if (unlikely(is_single_threaded(wq)))
121 __queue_work(wq->cpu_wq + cpu, work);
124 int fastcall queue_delayed_work(struct workqueue_struct *wq,
125 struct work_struct *work, unsigned long delay)
128 struct timer_list *timer = &work->timer;
130 if (!test_and_set_bit(0, &work->pending)) {
131 BUG_ON(timer_pending(timer));
132 BUG_ON(!list_empty(&work->entry));
134 /* This stores wq for the moment, for the timer_fn */
136 timer->expires = jiffies + delay;
137 timer->data = (unsigned long)work;
138 timer->function = delayed_work_timer_fn;
145 static inline void run_workqueue(struct cpu_workqueue_struct *cwq)
150 * Keep taking off work from the queue until
153 spin_lock_irqsave(&cwq->lock, flags);
155 if (cwq->run_depth > 3) {
156 /* morton gets to eat his hat */
157 printk("%s: recursion depth exceeded: %d\n",
158 __FUNCTION__, cwq->run_depth);
161 while (!list_empty(&cwq->worklist)) {
162 struct work_struct *work = list_entry(cwq->worklist.next,
163 struct work_struct, entry);
164 void (*f) (void *) = work->func;
165 void *data = work->data;
167 list_del_init(cwq->worklist.next);
168 spin_unlock_irqrestore(&cwq->lock, flags);
170 BUG_ON(work->wq_data != cwq);
171 clear_bit(0, &work->pending);
174 spin_lock_irqsave(&cwq->lock, flags);
175 cwq->remove_sequence++;
176 wake_up(&cwq->work_done);
179 spin_unlock_irqrestore(&cwq->lock, flags);
182 static int worker_thread(void *__cwq)
184 struct cpu_workqueue_struct *cwq = __cwq;
185 DECLARE_WAITQUEUE(wait, current);
186 struct k_sigaction sa;
189 current->flags |= PF_NOFREEZE;
191 set_user_nice(current, -10);
193 /* Block and flush all signals */
194 sigfillset(&blocked);
195 sigprocmask(SIG_BLOCK, &blocked, NULL);
196 flush_signals(current);
198 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
199 sa.sa.sa_handler = SIG_IGN;
201 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
202 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
204 set_current_state(TASK_INTERRUPTIBLE);
205 while (!kthread_should_stop()) {
206 add_wait_queue(&cwq->more_work, &wait);
207 if (list_empty(&cwq->worklist))
210 __set_current_state(TASK_RUNNING);
211 remove_wait_queue(&cwq->more_work, &wait);
213 if (!list_empty(&cwq->worklist))
215 set_current_state(TASK_INTERRUPTIBLE);
217 __set_current_state(TASK_RUNNING);
222 * flush_workqueue - ensure that any scheduled work has run to completion.
224 * Forces execution of the workqueue and blocks until its completion.
225 * This is typically used in driver shutdown handlers.
227 * This function will sample each workqueue's current insert_sequence number and
228 * will sleep until the head sequence is greater than or equal to that. This
229 * means that we sleep until all works which were queued on entry have been
230 * handled, but we are not livelocked by new incoming ones.
232 * This function used to run the workqueues itself. Now we just wait for the
233 * helper threads to do it.
235 void fastcall flush_workqueue(struct workqueue_struct *wq)
237 struct cpu_workqueue_struct *cwq;
243 for_each_online_cpu(cpu) {
245 long sequence_needed;
247 if (is_single_threaded(wq))
248 cwq = wq->cpu_wq + 0; /* Always use cpu 0's area. */
250 cwq = wq->cpu_wq + cpu;
252 if (cwq->thread == current) {
254 * Probably keventd trying to flush its own queue.
255 * So simply run it by hand rather than deadlocking.
260 spin_lock_irq(&cwq->lock);
261 sequence_needed = cwq->insert_sequence;
263 while (sequence_needed - cwq->remove_sequence > 0) {
264 prepare_to_wait(&cwq->work_done, &wait,
265 TASK_UNINTERRUPTIBLE);
266 spin_unlock_irq(&cwq->lock);
268 spin_lock_irq(&cwq->lock);
270 finish_wait(&cwq->work_done, &wait);
271 spin_unlock_irq(&cwq->lock);
273 unlock_cpu_hotplug();
276 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
279 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
280 struct task_struct *p;
282 spin_lock_init(&cwq->lock);
285 cwq->insert_sequence = 0;
286 cwq->remove_sequence = 0;
287 INIT_LIST_HEAD(&cwq->worklist);
288 init_waitqueue_head(&cwq->more_work);
289 init_waitqueue_head(&cwq->work_done);
291 if (is_single_threaded(wq))
292 p = kthread_create(worker_thread, cwq, "%s", wq->name);
294 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
301 struct workqueue_struct *__create_workqueue(const char *name,
304 int cpu, destroy = 0;
305 struct workqueue_struct *wq;
306 struct task_struct *p;
308 BUG_ON(strlen(name) > 10);
310 wq = kmalloc(sizeof(*wq), GFP_KERNEL);
313 memset(wq, 0, sizeof(*wq));
316 /* We don't need the distraction of CPUs appearing and vanishing. */
319 INIT_LIST_HEAD(&wq->list);
320 p = create_workqueue_thread(wq, 0);
326 spin_lock(&workqueue_lock);
327 list_add(&wq->list, &workqueues);
328 spin_unlock(&workqueue_lock);
329 for_each_online_cpu(cpu) {
330 p = create_workqueue_thread(wq, cpu);
332 kthread_bind(p, cpu);
338 unlock_cpu_hotplug();
341 * Was there any error during startup? If yes then clean up:
344 destroy_workqueue(wq);
350 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
352 struct cpu_workqueue_struct *cwq;
354 struct task_struct *p;
356 cwq = wq->cpu_wq + cpu;
357 spin_lock_irqsave(&cwq->lock, flags);
360 spin_unlock_irqrestore(&cwq->lock, flags);
365 void destroy_workqueue(struct workqueue_struct *wq)
371 /* We don't need the distraction of CPUs appearing and vanishing. */
373 if (is_single_threaded(wq))
374 cleanup_workqueue_thread(wq, 0);
376 for_each_online_cpu(cpu)
377 cleanup_workqueue_thread(wq, cpu);
378 spin_lock(&workqueue_lock);
380 spin_unlock(&workqueue_lock);
382 unlock_cpu_hotplug();
386 static struct workqueue_struct *keventd_wq;
388 int fastcall schedule_work(struct work_struct *work)
390 return queue_work(keventd_wq, work);
393 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
395 return queue_delayed_work(keventd_wq, work, delay);
398 void flush_scheduled_work(void)
400 flush_workqueue(keventd_wq);
405 return keventd_wq != NULL;
408 int current_is_keventd(void)
410 struct cpu_workqueue_struct *cwq;
411 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
416 cwq = keventd_wq->cpu_wq + cpu;
417 if (current == cwq->thread)
424 #ifdef CONFIG_HOTPLUG_CPU
425 /* Take the work from this (downed) CPU. */
426 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
428 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
430 struct work_struct *work;
432 spin_lock_irq(&cwq->lock);
433 list_splice_init(&cwq->worklist, &list);
435 while (!list_empty(&list)) {
436 printk("Taking work for %s\n", wq->name);
437 work = list_entry(list.next,struct work_struct,entry);
438 list_del(&work->entry);
439 __queue_work(wq->cpu_wq + smp_processor_id(), work);
441 spin_unlock_irq(&cwq->lock);
444 /* We're holding the cpucontrol mutex here */
445 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
446 unsigned long action,
449 unsigned int hotcpu = (unsigned long)hcpu;
450 struct workqueue_struct *wq;
454 /* Create a new workqueue thread for it. */
455 list_for_each_entry(wq, &workqueues, list) {
456 if (create_workqueue_thread(wq, hotcpu) < 0) {
457 printk("workqueue for %i failed\n", hotcpu);
464 /* Kick off worker threads. */
465 list_for_each_entry(wq, &workqueues, list)
466 wake_up_process(wq->cpu_wq[hotcpu].thread);
469 case CPU_UP_CANCELED:
470 list_for_each_entry(wq, &workqueues, list) {
471 /* Unbind so it can run. */
472 kthread_bind(wq->cpu_wq[hotcpu].thread,
474 cleanup_workqueue_thread(wq, hotcpu);
479 list_for_each_entry(wq, &workqueues, list)
480 cleanup_workqueue_thread(wq, hotcpu);
481 list_for_each_entry(wq, &workqueues, list)
482 take_over_work(wq, hotcpu);
490 void init_workqueues(void)
492 hotcpu_notifier(workqueue_cpu_callback, 0);
493 keventd_wq = create_workqueue("events");
497 EXPORT_SYMBOL_GPL(__create_workqueue);
498 EXPORT_SYMBOL_GPL(queue_work);
499 EXPORT_SYMBOL_GPL(queue_delayed_work);
500 EXPORT_SYMBOL_GPL(flush_workqueue);
501 EXPORT_SYMBOL_GPL(destroy_workqueue);
503 EXPORT_SYMBOL(schedule_work);
504 EXPORT_SYMBOL(schedule_delayed_work);
505 EXPORT_SYMBOL(flush_scheduled_work);