fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / kernel / workqueue.c
index b052e2c..a3da07c 100644 (file)
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
+#include <linux/hardirq.h>
+#include <linux/mempolicy.h>
+#include <linux/freezer.h>
+#include <linux/kallsyms.h>
+#include <linux/debug_locks.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
  * possible cpu).
  *
  * The sequence counters are for flush_scheduled_work().  It wants to wait
- * until until all currently-scheduled works are completed, but it doesn't
+ * until all currently-scheduled works are completed, but it doesn't
  * want to be livelocked by new, incoming ones.  So it waits until
  * remove_sequence is >= the insert_sequence which pertained when
  * flush_scheduled_work() was called.
@@ -50,9 +55,11 @@ struct cpu_workqueue_struct {
        wait_queue_head_t work_done;
 
        struct workqueue_struct *wq;
-       task_t *thread;
+       struct task_struct *thread;
 
        int run_depth;          /* Detect run_workqueue() recursion depth */
+
+       int freezeable;         /* Freeze the thread during suspend */
 } ____cacheline_aligned;
 
 /*
@@ -67,7 +74,7 @@ struct workqueue_struct {
 
 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
    threads to each one as cpus come/go. */
-static DEFINE_SPINLOCK(workqueue_lock);
+static DEFINE_MUTEX(workqueue_mutex);
 static LIST_HEAD(workqueues);
 
 static int singlethread_cpu;
@@ -78,6 +85,99 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
        return list_empty(&wq->list);
 }
 
+/*
+ * Set the workqueue on which a work item is to be run
+ * - Must *only* be called if the pending flag is set
+ */
+static inline void set_wq_data(struct work_struct *work, void *wq)
+{
+       unsigned long new;
+
+       BUG_ON(!work_pending(work));
+
+       new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+       new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
+       atomic_long_set(&work->data, new);
+}
+
+static inline void *get_wq_data(struct work_struct *work)
+{
+       return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
+}
+
+static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cwq->lock, flags);
+       /*
+        * We need to re-validate the work info after we've gotten
+        * the cpu_workqueue lock. We can run the work now iff:
+        *
+        *  - the wq_data still matches the cpu_workqueue_struct
+        *  - AND the work is still marked pending
+        *  - AND the work is still on a list (which will be this
+        *    workqueue_struct list)
+        *
+        * All these conditions are important, because we
+        * need to protect against the work being run right
+        * now on another CPU (all but the last one might be
+        * true if it's currently running and has not been
+        * released yet, for example).
+        */
+       if (get_wq_data(work) == cwq
+           && work_pending(work)
+           && !list_empty(&work->entry)) {
+               work_func_t f = work->func;
+               list_del_init(&work->entry);
+               spin_unlock_irqrestore(&cwq->lock, flags);
+
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
+                       work_release(work);
+               f(work);
+
+               spin_lock_irqsave(&cwq->lock, flags);
+               cwq->remove_sequence++;
+               wake_up(&cwq->work_done);
+               ret = 1;
+       }
+       spin_unlock_irqrestore(&cwq->lock, flags);
+       return ret;
+}
+
+/**
+ * run_scheduled_work - run scheduled work synchronously
+ * @work: work to run
+ *
+ * This checks if the work was pending, and runs it
+ * synchronously if so. It returns a boolean to indicate
+ * whether it had any scheduled work to run or not.
+ *
+ * NOTE! This _only_ works for normal work_structs. You
+ * CANNOT use this for delayed work, because the wq data
+ * for delayed work will not point properly to the per-
+ * CPU workqueue struct, but will change!
+ */
+int fastcall run_scheduled_work(struct work_struct *work)
+{
+       for (;;) {
+               struct cpu_workqueue_struct *cwq;
+
+               if (!work_pending(work))
+                       return 0;
+               if (list_empty(&work->entry))
+                       return 0;
+               /* NOTE! This depends intimately on __queue_work! */
+               cwq = get_wq_data(work);
+               if (!cwq)
+                       return 0;
+               if (__run_work(cwq, work))
+                       return 1;
+       }
+}
+EXPORT_SYMBOL(run_scheduled_work);
+
 /* Preempt must be disabled. */
 static void __queue_work(struct cpu_workqueue_struct *cwq,
                         struct work_struct *work)
@@ -85,16 +185,19 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
        unsigned long flags;
 
        spin_lock_irqsave(&cwq->lock, flags);
-       work->wq_data = cwq;
+       set_wq_data(work, cwq);
        list_add_tail(&work->entry, &cwq->worklist);
        cwq->insert_sequence++;
        wake_up(&cwq->more_work);
        spin_unlock_irqrestore(&cwq->lock, flags);
 }
 
-/*
- * Queue work on a workqueue. Return non-zero if it was successfully
- * added.
+/**
+ * queue_work - queue work on a workqueue
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
  *
  * We queue the work to the CPU it was submitted, but there is no
  * guarantee that it will be processed by that CPU.
@@ -103,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
        int ret = 0, cpu = get_cpu();
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                if (unlikely(is_single_threaded(wq)))
                        cpu = singlethread_cpu;
                BUG_ON(!list_empty(&work->entry));
@@ -113,39 +216,85 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
        put_cpu();
        return ret;
 }
+EXPORT_SYMBOL_GPL(queue_work);
 
 static void delayed_work_timer_fn(unsigned long __data)
 {
-       struct work_struct *work = (struct work_struct *)__data;
-       struct workqueue_struct *wq = work->wq_data;
+       struct delayed_work *dwork = (struct delayed_work *)__data;
+       struct workqueue_struct *wq = get_wq_data(&dwork->work);
        int cpu = smp_processor_id();
 
        if (unlikely(is_single_threaded(wq)))
                cpu = singlethread_cpu;
 
-       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
 }
 
+/**
+ * queue_delayed_work - queue work on a workqueue after delay
+ * @wq: workqueue to use
+ * @dwork: delayable work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
+ */
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (delay == 0)
+               return queue_work(wq, work);
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                /* This stores wq for the moment, for the timer_fn */
-               work->wq_data = wq;
+               set_wq_data(work, wq);
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer(timer);
                ret = 1;
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(queue_delayed_work);
+
+/**
+ * queue_delayed_work_on - queue work on specific CPU after delay
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
+ */
+int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay)
+{
+       int ret = 0;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
+               BUG_ON(timer_pending(timer));
+               BUG_ON(!list_empty(&work->entry));
+
+               /* This stores wq for the moment, for the timer_fn */
+               set_wq_data(work, wq);
+               timer->expires = jiffies + delay;
+               timer->data = (unsigned long)dwork;
+               timer->function = delayed_work_timer_fn;
+               add_timer_on(timer, cpu);
+               ret = 1;
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
@@ -166,15 +315,26 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
-               void (*f) (void *) = work->func;
-               void *data = work->data;
+               work_func_t f = work->func;
 
                list_del_init(cwq->worklist.next);
                spin_unlock_irqrestore(&cwq->lock, flags);
 
-               BUG_ON(work->wq_data != cwq);
-               clear_bit(0, &work->pending);
-               f(data);
+               BUG_ON(get_wq_data(work) != cwq);
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
+                       work_release(work);
+               f(work);
+
+               if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+                       printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+                                       "%s/0x%08x/%d\n",
+                                       current->comm, preempt_count(),
+                                       current->pid);
+                       printk(KERN_ERR "    last function: ");
+                       print_symbol("%s\n", (unsigned long)f);
+                       debug_show_held_locks(current);
+                       dump_stack();
+               }
 
                spin_lock_irqsave(&cwq->lock, flags);
                cwq->remove_sequence++;
@@ -191,7 +351,8 @@ static int worker_thread(void *__cwq)
        struct k_sigaction sa;
        sigset_t blocked;
 
-       current->flags |= PF_NOFREEZE;
+       if (!cwq->freezeable)
+               current->flags |= PF_NOFREEZE;
 
        set_user_nice(current, -5);
 
@@ -200,6 +361,12 @@ static int worker_thread(void *__cwq)
        sigprocmask(SIG_BLOCK, &blocked, NULL);
        flush_signals(current);
 
+       /*
+        * We inherited MPOL_INTERLEAVE from the booting kernel.
+        * Set MPOL_DEFAULT to insure node local allocations.
+        */
+       numa_default_policy();
+
        /* SIG_IGN makes children autoreap: see do_notify_parent(). */
        sa.sa.sa_handler = SIG_IGN;
        sa.sa.sa_flags = 0;
@@ -208,6 +375,9 @@ static int worker_thread(void *__cwq)
 
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
+               if (cwq->freezeable)
+                       try_to_freeze();
+
                add_wait_queue(&cwq->more_work, &wait);
                if (list_empty(&cwq->worklist))
                        schedule();
@@ -250,8 +420,9 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
        }
 }
 
-/*
+/**
  * flush_workqueue - ensure that any scheduled work has run to completion.
+ * @wq: workqueue to flush
  *
  * Forces execution of the workqueue and blocks until its completion.
  * This is typically used in driver shutdown handlers.
@@ -274,15 +445,16 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
        } else {
                int cpu;
 
-               lock_cpu_hotplug();
+               mutex_lock(&workqueue_mutex);
                for_each_online_cpu(cpu)
                        flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
-               unlock_cpu_hotplug();
+               mutex_unlock(&workqueue_mutex);
        }
 }
+EXPORT_SYMBOL_GPL(flush_workqueue);
 
 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
-                                                  int cpu)
+                                                  int cpu, int freezeable)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
        struct task_struct *p;
@@ -292,6 +464,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
        cwq->thread = NULL;
        cwq->insert_sequence = 0;
        cwq->remove_sequence = 0;
+       cwq->freezeable = freezeable;
        INIT_LIST_HEAD(&cwq->worklist);
        init_waitqueue_head(&cwq->more_work);
        init_waitqueue_head(&cwq->work_done);
@@ -307,7 +480,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
 }
 
 struct workqueue_struct *__create_workqueue(const char *name,
-                                           int singlethread)
+                                           int singlethread, int freezeable)
 {
        int cpu, destroy = 0;
        struct workqueue_struct *wq;
@@ -324,21 +497,18 @@ struct workqueue_struct *__create_workqueue(const char *name,
        }
 
        wq->name = name;
-       /* We don't need the distraction of CPUs appearing and vanishing. */
-       lock_cpu_hotplug();
+       mutex_lock(&workqueue_mutex);
        if (singlethread) {
                INIT_LIST_HEAD(&wq->list);
-               p = create_workqueue_thread(wq, singlethread_cpu);
+               p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
                if (!p)
                        destroy = 1;
                else
                        wake_up_process(p);
        } else {
-               spin_lock(&workqueue_lock);
                list_add(&wq->list, &workqueues);
-               spin_unlock(&workqueue_lock);
                for_each_online_cpu(cpu) {
-                       p = create_workqueue_thread(wq, cpu);
+                       p = create_workqueue_thread(wq, cpu, freezeable);
                        if (p) {
                                kthread_bind(p, cpu);
                                wake_up_process(p);
@@ -346,7 +516,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
                                destroy = 1;
                }
        }
-       unlock_cpu_hotplug();
+       mutex_unlock(&workqueue_mutex);
 
        /*
         * Was there any error during startup? If yes then clean up:
@@ -357,6 +527,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        }
        return wq;
 }
+EXPORT_SYMBOL_GPL(__create_workqueue);
 
 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
 {
@@ -373,6 +544,12 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
                kthread_stop(p);
 }
 
+/**
+ * destroy_workqueue - safely terminate a workqueue
+ * @wq: target workqueue
+ *
+ * Safely destroy a workqueue. All work currently pending will be done first.
+ */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
        int cpu;
@@ -380,69 +557,95 @@ void destroy_workqueue(struct workqueue_struct *wq)
        flush_workqueue(wq);
 
        /* We don't need the distraction of CPUs appearing and vanishing. */
-       lock_cpu_hotplug();
+       mutex_lock(&workqueue_mutex);
        if (is_single_threaded(wq))
                cleanup_workqueue_thread(wq, singlethread_cpu);
        else {
                for_each_online_cpu(cpu)
                        cleanup_workqueue_thread(wq, cpu);
-               spin_lock(&workqueue_lock);
                list_del(&wq->list);
-               spin_unlock(&workqueue_lock);
        }
-       unlock_cpu_hotplug();
+       mutex_unlock(&workqueue_mutex);
        free_percpu(wq->cpu_wq);
        kfree(wq);
 }
+EXPORT_SYMBOL_GPL(destroy_workqueue);
 
 static struct workqueue_struct *keventd_wq;
 
+/**
+ * schedule_work - put work task in global workqueue
+ * @work: job to be done
+ *
+ * This puts a job in the kernel-global workqueue.
+ */
 int fastcall schedule_work(struct work_struct *work)
 {
        return queue_work(keventd_wq, work);
 }
+EXPORT_SYMBOL(schedule_work);
 
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+/**
+ * schedule_delayed_work - put work task in global workqueue after delay
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ */
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work(keventd_wq, work, delay);
+       return queue_delayed_work(keventd_wq, dwork, delay);
 }
+EXPORT_SYMBOL(schedule_delayed_work);
 
+/**
+ * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
+ * @cpu: cpu to use
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue on the specified CPU.
+ */
 int schedule_delayed_work_on(int cpu,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
-       int ret = 0;
-       struct timer_list *timer = &work->timer;
-
-       if (!test_and_set_bit(0, &work->pending)) {
-               BUG_ON(timer_pending(timer));
-               BUG_ON(!list_empty(&work->entry));
-               /* This stores keventd_wq for the moment, for the timer_fn */
-               work->wq_data = keventd_wq;
-               timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
-               timer->function = delayed_work_timer_fn;
-               add_timer_on(timer, cpu);
-               ret = 1;
-       }
-       return ret;
+       return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
+EXPORT_SYMBOL(schedule_delayed_work_on);
 
-int schedule_on_each_cpu(void (*func) (void *info), void *info)
+/**
+ * schedule_on_each_cpu - call a function on each online CPU from keventd
+ * @func: the function to call
+ *
+ * Returns zero on success.
+ * Returns -ve errno on failure.
+ *
+ * Appears to be racy against CPU hotplug.
+ *
+ * schedule_on_each_cpu() is very slow.
+ */
+int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
-       struct work_struct *work;
+       struct work_struct *works;
 
-       work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
-
-       if (!work)
+       works = alloc_percpu(struct work_struct);
+       if (!works)
                return -ENOMEM;
+
+       mutex_lock(&workqueue_mutex);
        for_each_online_cpu(cpu) {
-               INIT_WORK(work + cpu, func, info);
-               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
-                               work + cpu);
+               struct work_struct *work = per_cpu_ptr(works, cpu);
+
+               INIT_WORK(work, func);
+               set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
+               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
        }
+       mutex_unlock(&workqueue_mutex);
        flush_workqueue(keventd_wq);
-       kfree(work);
+       free_percpu(works);
        return 0;
 }
 
@@ -450,17 +653,18 @@ void flush_scheduled_work(void)
 {
        flush_workqueue(keventd_wq);
 }
+EXPORT_SYMBOL(flush_scheduled_work);
 
 /**
  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  *                     work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-                                      struct work_struct *work)
+                                      struct delayed_work *dwork)
 {
-       while (!cancel_delayed_work(work))
+       while (!cancel_delayed_work(dwork))
                flush_workqueue(wq);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -468,14 +672,40 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
 /**
  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  *                     work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
-       cancel_rearming_delayed_workqueue(keventd_wq, work);
+       cancel_rearming_delayed_workqueue(keventd_wq, dwork);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
+/**
+ * execute_in_process_context - reliably execute the routine with user context
+ * @fn:                the function to execute
+ * @ew:                guaranteed storage for the execute work structure (must
+ *             be available when the work executes)
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns:    0 - function was executed
+ *             1 - function was scheduled for execution
+ */
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
+{
+       if (!in_interrupt()) {
+               fn(&ew->work);
+               return 0;
+       }
+
+       INIT_WORK(&ew->work, fn);
+       schedule_work(&ew->work);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(execute_in_process_context);
+
 int keventd_up(void)
 {
        return keventd_wq != NULL;
@@ -497,16 +727,15 @@ int current_is_keventd(void)
 
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* Take the work from this (downed) CPU. */
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-       LIST_HEAD(list);
+       struct list_head list;
        struct work_struct *work;
 
        spin_lock_irq(&cwq->lock);
-       list_splice_init(&cwq->worklist, &list);
+       list_replace_init(&cwq->worklist, &list);
 
        while (!list_empty(&list)) {
                printk("Taking work for %s\n", wq->name);
@@ -527,9 +756,10 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_UP_PREPARE:
+               mutex_lock(&workqueue_mutex);
                /* Create a new workqueue thread for it. */
                list_for_each_entry(wq, &workqueues, list) {
-                       if (!create_workqueue_thread(wq, hotcpu)) {
+                       if (!create_workqueue_thread(wq, hotcpu, 0)) {
                                printk("workqueue for %i failed\n", hotcpu);
                                return NOTIFY_BAD;
                        }
@@ -545,15 +775,27 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                        kthread_bind(cwq->thread, hotcpu);
                        wake_up_process(cwq->thread);
                }
+               mutex_unlock(&workqueue_mutex);
                break;
 
        case CPU_UP_CANCELED:
                list_for_each_entry(wq, &workqueues, list) {
+                       if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
+                               continue;
                        /* Unbind so it can run. */
                        kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
                                     any_online_cpu(cpu_online_map));
                        cleanup_workqueue_thread(wq, hotcpu);
                }
+               mutex_unlock(&workqueue_mutex);
+               break;
+
+       case CPU_DOWN_PREPARE:
+               mutex_lock(&workqueue_mutex);
+               break;
+
+       case CPU_DOWN_FAILED:
+               mutex_unlock(&workqueue_mutex);
                break;
 
        case CPU_DEAD:
@@ -561,12 +803,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                        cleanup_workqueue_thread(wq, hotcpu);
                list_for_each_entry(wq, &workqueues, list)
                        take_over_work(wq, hotcpu);
+               mutex_unlock(&workqueue_mutex);
                break;
        }
 
        return NOTIFY_OK;
 }
-#endif
 
 void init_workqueues(void)
 {
@@ -576,13 +818,3 @@ void init_workqueues(void)
        BUG_ON(!keventd_wq);
 }
 
-EXPORT_SYMBOL_GPL(__create_workqueue);
-EXPORT_SYMBOL_GPL(queue_work);
-EXPORT_SYMBOL_GPL(queue_delayed_work);
-EXPORT_SYMBOL_GPL(flush_workqueue);
-EXPORT_SYMBOL_GPL(destroy_workqueue);
-
-EXPORT_SYMBOL(schedule_work);
-EXPORT_SYMBOL(schedule_delayed_work);
-EXPORT_SYMBOL(schedule_delayed_work_on);
-EXPORT_SYMBOL(flush_scheduled_work);