wait_queue_head_t work_done;
struct workqueue_struct *wq;
- struct task_struct *thread;
+ task_t *thread;
int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
threads to each one as cpus come/go. */
-static DEFINE_MUTEX(workqueue_mutex);
+static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
static int singlethread_cpu;
spin_unlock_irqrestore(&cwq->lock, flags);
}
-/**
- * queue_work - queue work on a workqueue
- * @wq: workqueue to use
- * @work: work to queue
- *
- * Returns non-zero if it was successfully added.
+/*
+ * Queue work on a workqueue. Return non-zero if it was successfully
+ * added.
*
* We queue the work to the CPU it was submitted, but there is no
* guarantee that it will be processed by that CPU.
put_cpu();
return ret;
}
-EXPORT_SYMBOL_GPL(queue_work);
static void delayed_work_timer_fn(unsigned long __data)
{
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
-/**
- * queue_delayed_work - queue work on a workqueue after delay
- * @wq: workqueue to use
- * @work: work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * Returns non-zero if it was successfully added.
- */
int fastcall queue_delayed_work(struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay)
{
}
return ret;
}
-EXPORT_SYMBOL_GPL(queue_delayed_work);
-
-/**
- * queue_delayed_work_on - queue work on specific CPU after delay
- * @cpu: CPU number to execute work on
- * @wq: workqueue to use
- * @work: work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * Returns non-zero if it was successfully added.
- */
-int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay)
-{
- int ret = 0;
- struct timer_list *timer = &work->timer;
-
- if (!test_and_set_bit(0, &work->pending)) {
- BUG_ON(timer_pending(timer));
- BUG_ON(!list_empty(&work->entry));
-
- /* This stores wq for the moment, for the timer_fn */
- work->wq_data = wq;
- timer->expires = jiffies + delay;
- timer->data = (unsigned long)work;
- timer->function = delayed_work_timer_fn;
- add_timer_on(timer, cpu);
- ret = 1;
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
}
}
-/**
+/*
* flush_workqueue - ensure that any scheduled work has run to completion.
- * @wq: workqueue to flush
*
* Forces execution of the workqueue and blocks until its completion.
* This is typically used in driver shutdown handlers.
} else {
int cpu;
- mutex_lock(&workqueue_mutex);
+ lock_cpu_hotplug();
for_each_online_cpu(cpu)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
- mutex_unlock(&workqueue_mutex);
+ unlock_cpu_hotplug();
}
}
-EXPORT_SYMBOL_GPL(flush_workqueue);
static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
int cpu)
}
wq->name = name;
- mutex_lock(&workqueue_mutex);
+ /* We don't need the distraction of CPUs appearing and vanishing. */
+ lock_cpu_hotplug();
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
p = create_workqueue_thread(wq, singlethread_cpu);
else
wake_up_process(p);
} else {
+ spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues);
+ spin_unlock(&workqueue_lock);
for_each_online_cpu(cpu) {
p = create_workqueue_thread(wq, cpu);
if (p) {
destroy = 1;
}
}
- mutex_unlock(&workqueue_mutex);
+ unlock_cpu_hotplug();
/*
* Was there any error during startup? If yes then clean up:
}
return wq;
}
-EXPORT_SYMBOL_GPL(__create_workqueue);
static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
{
kthread_stop(p);
}
-/**
- * destroy_workqueue - safely terminate a workqueue
- * @wq: target workqueue
- *
- * Safely destroy a workqueue. All work currently pending will be done first.
- */
void destroy_workqueue(struct workqueue_struct *wq)
{
int cpu;
flush_workqueue(wq);
/* We don't need the distraction of CPUs appearing and vanishing. */
- mutex_lock(&workqueue_mutex);
+ lock_cpu_hotplug();
if (is_single_threaded(wq))
cleanup_workqueue_thread(wq, singlethread_cpu);
else {
for_each_online_cpu(cpu)
cleanup_workqueue_thread(wq, cpu);
+ spin_lock(&workqueue_lock);
list_del(&wq->list);
+ spin_unlock(&workqueue_lock);
}
- mutex_unlock(&workqueue_mutex);
+ unlock_cpu_hotplug();
free_percpu(wq->cpu_wq);
kfree(wq);
}
-EXPORT_SYMBOL_GPL(destroy_workqueue);
static struct workqueue_struct *keventd_wq;
-/**
- * schedule_work - put work task in global workqueue
- * @work: job to be done
- *
- * This puts a job in the kernel-global workqueue.
- */
int fastcall schedule_work(struct work_struct *work)
{
return queue_work(keventd_wq, work);
}
-EXPORT_SYMBOL(schedule_work);
-/**
- * schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
- *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue.
- */
int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
{
return queue_delayed_work(keventd_wq, work, delay);
}
-EXPORT_SYMBOL(schedule_delayed_work);
-/**
- * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
- * @cpu: cpu to use
- * @work: job to be done
- * @delay: number of jiffies to wait
- *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue on the specified CPU.
- */
int schedule_delayed_work_on(int cpu,
struct work_struct *work, unsigned long delay)
{
- return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+ int ret = 0;
+ struct timer_list *timer = &work->timer;
+
+ if (!test_and_set_bit(0, &work->pending)) {
+ BUG_ON(timer_pending(timer));
+ BUG_ON(!list_empty(&work->entry));
+ /* This stores keventd_wq for the moment, for the timer_fn */
+ work->wq_data = keventd_wq;
+ timer->expires = jiffies + delay;
+ timer->data = (unsigned long)work;
+ timer->function = delayed_work_timer_fn;
+ add_timer_on(timer, cpu);
+ ret = 1;
+ }
+ return ret;
}
-EXPORT_SYMBOL(schedule_delayed_work_on);
-/**
- * schedule_on_each_cpu - call a function on each online CPU from keventd
- * @func: the function to call
- * @info: a pointer to pass to func()
- *
- * Returns zero on success.
- * Returns -ve errno on failure.
- *
- * Appears to be racy against CPU hotplug.
- *
- * schedule_on_each_cpu() is very slow.
- */
-int schedule_on_each_cpu(void (*func)(void *info), void *info)
+int schedule_on_each_cpu(void (*func) (void *info), void *info)
{
int cpu;
- struct work_struct *works;
+ struct work_struct *work;
- works = alloc_percpu(struct work_struct);
- if (!works)
- return -ENOMEM;
+ work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
- mutex_lock(&workqueue_mutex);
+ if (!work)
+ return -ENOMEM;
for_each_online_cpu(cpu) {
- INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+ INIT_WORK(work + cpu, func, info);
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
- per_cpu_ptr(works, cpu));
+ work + cpu);
}
- mutex_unlock(&workqueue_mutex);
flush_workqueue(keventd_wq);
- free_percpu(works);
+ kfree(work);
return 0;
}
{
flush_workqueue(keventd_wq);
}
-EXPORT_SYMBOL(flush_scheduled_work);
/**
* cancel_rearming_delayed_workqueue - reliably kill off a delayed
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
- struct list_head list;
+ LIST_HEAD(list);
struct work_struct *work;
spin_lock_irq(&cwq->lock);
- list_replace_init(&cwq->worklist, &list);
+ list_splice_init(&cwq->worklist, &list);
while (!list_empty(&list)) {
printk("Taking work for %s\n", wq->name);
}
/* We're holding the cpucontrol mutex here */
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_UP_PREPARE:
- mutex_lock(&workqueue_mutex);
/* Create a new workqueue thread for it. */
list_for_each_entry(wq, &workqueues, list) {
if (!create_workqueue_thread(wq, hotcpu)) {
kthread_bind(cwq->thread, hotcpu);
wake_up_process(cwq->thread);
}
- mutex_unlock(&workqueue_mutex);
break;
case CPU_UP_CANCELED:
list_for_each_entry(wq, &workqueues, list) {
- if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
- continue;
/* Unbind so it can run. */
kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
any_online_cpu(cpu_online_map));
cleanup_workqueue_thread(wq, hotcpu);
}
- mutex_unlock(&workqueue_mutex);
- break;
-
- case CPU_DOWN_PREPARE:
- mutex_lock(&workqueue_mutex);
- break;
-
- case CPU_DOWN_FAILED:
- mutex_unlock(&workqueue_mutex);
break;
case CPU_DEAD:
cleanup_workqueue_thread(wq, hotcpu);
list_for_each_entry(wq, &workqueues, list)
take_over_work(wq, hotcpu);
- mutex_unlock(&workqueue_mutex);
break;
}
BUG_ON(!keventd_wq);
}
+EXPORT_SYMBOL_GPL(__create_workqueue);
+EXPORT_SYMBOL_GPL(queue_work);
+EXPORT_SYMBOL_GPL(queue_delayed_work);
+EXPORT_SYMBOL_GPL(flush_workqueue);
+EXPORT_SYMBOL_GPL(destroy_workqueue);
+
+EXPORT_SYMBOL(schedule_work);
+EXPORT_SYMBOL(schedule_delayed_work);
+EXPORT_SYMBOL(schedule_delayed_work_on);
+EXPORT_SYMBOL(flush_scheduled_work);