datapath: Remove compat workqueue.
authorPravin B Shelar <pshelar@nicira.com>
Mon, 9 Sep 2013 20:53:40 +0000 (13:53 -0700)
committerPravin B Shelar <pshelar@nicira.com>
Sun, 8 Sep 2013 02:06:04 +0000 (19:06 -0700)
OVS has its own workq implementation for coupe of reasons. first
was to avoid system freeze due to ovs-flow rehash softlockup.
We have moved out rehash from workq, So this problem does not exist.
second was related bugs in kernel workq implementation in pre-2.6.32
kernel. But we have dropped support for older kernel.
So there is no reason to keep ovs-workq around. Following patch
removes it.

Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
datapath/datapath.c
datapath/dp_notify.c
datapath/linux/Modules.mk
datapath/linux/compat/include/linux/workqueue.h
datapath/linux/compat/vxlan.c
datapath/linux/compat/workqueue.c [deleted file]

index 0780589..4defcdb 100644 (file)
@@ -2385,13 +2385,9 @@ static int __init dp_init(void)
        pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
                VERSION);
 
-       err = ovs_workqueues_init();
-       if (err)
-               goto error;
-
        err = ovs_flow_init();
        if (err)
-               goto error_wq;
+               goto error;
 
        err = ovs_vport_init();
        if (err)
@@ -2419,8 +2415,6 @@ error_vport_exit:
        ovs_vport_exit();
 error_flow_exit:
        ovs_flow_exit();
-error_wq:
-       ovs_workqueues_exit();
 error:
        return err;
 }
@@ -2433,7 +2427,6 @@ static void dp_cleanup(void)
        rcu_barrier();
        ovs_vport_exit();
        ovs_flow_exit();
-       ovs_workqueues_exit();
 }
 
 module_init(dp_init);
index 847f611..b5178fc 100644 (file)
@@ -90,7 +90,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
 
        if (event == NETDEV_UNREGISTER) {
                ovs_net = net_generic(dev_net(dev), ovs_net_id);
-               queue_work(&ovs_net->dp_notify_work);
+               queue_work(system_wq, &ovs_net->dp_notify_work);
        }
 
        return NOTIFY_DONE;
index e3c42cd..057e1d5 100644 (file)
@@ -12,7 +12,6 @@ openvswitch_sources += \
        linux/compat/reciprocal_div.c \
        linux/compat/skbuff-openvswitch.c \
        linux/compat/vxlan.c    \
-       linux/compat/workqueue.c \
        linux/compat/utils.c
 openvswitch_headers += \
        linux/compat/gso.h \
index b2de545..461fefd 100644 (file)
@@ -1,74 +1,10 @@
 #ifndef __LINUX_WORKQUEUE_WRAPPER_H
 #define __LINUX_WORKQUEUE_WRAPPER_H 1
 
-#include <linux/timer.h>
+#include_next <linux/workqueue.h>
 
-int __init ovs_workqueues_init(void);
-void ovs_workqueues_exit(void);
-
-/* Older kernels have an implementation of work queues with some very bad
- * characteristics when trying to cancel work (potential deadlocks, use after
- * free, etc.  Therefore we implement simple ovs specific work queue using
- * single worker thread. work-queue API are kept similar for compatibility.
- * It seems it is useful even on newer kernel. As it can avoid system wide
- * freeze in event of softlockup due to workq blocked on genl_lock.
- */
-
-struct work_struct;
-
-typedef void (*work_func_t)(struct work_struct *work);
-
-#define work_data_bits(work) ((unsigned long *)(&(work)->data))
-
-struct work_struct {
-#define WORK_STRUCT_PENDING 0           /* T if work item pending execution */
-       atomic_long_t data;
-       struct list_head entry;
-       work_func_t func;
-#ifdef CONFIG_LOCKDEP
-       struct lockdep_map lockdep_map;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+#define queue_work(wq, dw) schedule_work(dw);
 #endif
-};
-
-#define WORK_DATA_INIT()        ATOMIC_LONG_INIT(0)
-
-#define work_clear_pending(work)                               \
-       clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
-
-struct delayed_work {
-       struct work_struct work;
-       struct timer_list timer;
-};
-
-#define __WORK_INITIALIZER(n, f) {                             \
-       .data = WORK_DATA_INIT(),                               \
-       .entry  = { &(n).entry, &(n).entry },                   \
-       .func = (f),                                            \
-}
-
-#define __DELAYED_WORK_INITIALIZER(n, f) {                     \
-       .work = __WORK_INITIALIZER((n).work, (f)),              \
-       .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
-}
-
-#define DECLARE_DELAYED_WORK(n, f)                             \
-       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
-
-#define schedule_delayed_work rpl_schedule_delayed_work
-int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-
-#define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
-int cancel_delayed_work_sync(struct delayed_work *dwork);
-
-#define INIT_WORK(_work, _func)                                        \
-       do {                                                    \
-               (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
-               INIT_LIST_HEAD(&(_work)->entry);                \
-               (_work)->func = (_func);                        \
-       } while (0)
-
-extern void flush_scheduled_work(void);
-extern void queue_work(struct work_struct *work);
-extern bool cancel_work_sync(struct work_struct *work);
 
 #endif
index db14f2f..4f7671b 100644 (file)
@@ -361,7 +361,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
        hlist_del_rcu(&vs->hlist);
        spin_unlock(&vn->sock_lock);
 
-       queue_work(&vs->del_work);
+       queue_work(system_wq, &vs->del_work);
 }
 
 static int vxlan_init_net(struct net *net)
diff --git a/datapath/linux/compat/workqueue.c b/datapath/linux/compat/workqueue.c
deleted file mode 100644 (file)
index cdb3615..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Derived from the kernel/workqueue.c
- *
- * This is the generic async execution mechanism.  Work items as are
- * executed in process context.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <linux/kthread.h>
-#include <linux/hardirq.h>
-#include <linux/mempolicy.h>
-#include <linux/kallsyms.h>
-#include <linux/debug_locks.h>
-#include <linux/lockdep.h>
-#include <linux/idr.h>
-
-static spinlock_t wq_lock;
-static struct list_head workq;
-static wait_queue_head_t more_work;
-static struct task_struct *workq_thread;
-static struct work_struct *current_work;
-
-static void add_work_to_ovs_wq(struct work_struct *work)
-{
-       list_add_tail(&work->entry, &workq);
-       wake_up(&more_work);
-}
-static void __queue_work(struct work_struct *work)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&wq_lock, flags);
-       add_work_to_ovs_wq(work);
-       spin_unlock_irqrestore(&wq_lock, flags);
-}
-
-void queue_work(struct work_struct *work)
-{
-       if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
-               return;
-       __queue_work(work);
-}
-
-static void _delayed_work_timer_fn(unsigned long __data)
-{
-       struct delayed_work *dwork = (struct delayed_work *)__data;
-       __queue_work(&dwork->work);
-}
-
-static void __queue_delayed_work(struct delayed_work *dwork,
-               unsigned long delay)
-{
-       struct timer_list *timer = &dwork->timer;
-       struct work_struct *work = &dwork->work;
-
-       BUG_ON(timer_pending(timer));
-       BUG_ON(!list_empty(&work->entry));
-
-       timer->expires = jiffies + delay;
-       timer->data = (unsigned long)dwork;
-       timer->function = _delayed_work_timer_fn;
-
-       add_timer(timer);
-}
-
-int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
-{
-       if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work)))
-               return 0;
-
-       if (delay == 0)
-               __queue_work(&dwork->work);
-       else
-               __queue_delayed_work(dwork, delay);
-
-       return 1;
-}
-
-struct wq_barrier {
-       struct work_struct      work;
-       struct completion       done;
-};
-
-static void wq_barrier_func(struct work_struct *work)
-{
-       struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
-       complete(&barr->done);
-}
-
-static void workqueue_barrier(struct work_struct *work)
-{
-       bool need_barrier;
-       struct wq_barrier barr;
-
-       spin_lock_irq(&wq_lock);
-       if (current_work != work)
-               need_barrier = false;
-       else {
-               INIT_WORK(&barr.work, wq_barrier_func);
-               init_completion(&barr.done);
-               add_work_to_ovs_wq(&barr.work);
-               need_barrier = true;
-       }
-       spin_unlock_irq(&wq_lock);
-
-       if (need_barrier)
-               wait_for_completion(&barr.done);
-}
-
-static int try_to_grab_pending(struct work_struct *work)
-{
-       int ret;
-
-       BUG_ON(in_interrupt());
-
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
-               return 0;
-
-       spin_lock_irq(&wq_lock);
-       if (!list_empty(&work->entry)) {
-               list_del_init(&work->entry);
-               ret = 0;
-       } else
-               /* Already executed, retry. */
-               ret = -1;
-       spin_unlock_irq(&wq_lock);
-
-       return ret;
-}
-
-static int __cancel_work_timer(struct work_struct *work,
-                              struct timer_list *timer)
-{
-       int ret;
-
-       for (;;) {
-               ret = (timer && likely(del_timer(timer)));
-               if (ret) /* Was active timer, return true. */
-                       break;
-
-               /* Inactive timer case */
-               ret = try_to_grab_pending(work);
-               if (!ret)
-                       break;
-       }
-       workqueue_barrier(work);
-       work_clear_pending(work);
-       return ret;
-}
-
-int cancel_delayed_work_sync(struct delayed_work *dwork)
-{
-       return __cancel_work_timer(&dwork->work, &dwork->timer);
-}
-
-bool cancel_work_sync(struct work_struct *work)
-{
-       return __cancel_work_timer(work, NULL);
-}
-
-static void run_workqueue(void)
-{
-       spin_lock_irq(&wq_lock);
-       while (!list_empty(&workq)) {
-               struct work_struct *work = list_entry(workq.next,
-                               struct work_struct, entry);
-
-               work_func_t f = work->func;
-               list_del_init(workq.next);
-               current_work = work;
-               spin_unlock_irq(&wq_lock);
-
-               work_clear_pending(work);
-               f(work);
-
-               BUG_ON(in_interrupt());
-               spin_lock_irq(&wq_lock);
-               current_work = NULL;
-       }
-       spin_unlock_irq(&wq_lock);
-}
-
-static int worker_thread(void *dummy)
-{
-       for (;;) {
-               wait_event_interruptible(more_work,
-                               (kthread_should_stop() || !list_empty(&workq)));
-
-               if (kthread_should_stop())
-                       break;
-
-               run_workqueue();
-       }
-
-       return 0;
-}
-
-int __init ovs_workqueues_init(void)
-{
-       spin_lock_init(&wq_lock);
-       INIT_LIST_HEAD(&workq);
-       init_waitqueue_head(&more_work);
-
-       workq_thread = kthread_create(worker_thread, NULL, "ovs_workq");
-       if (IS_ERR(workq_thread))
-               return PTR_ERR(workq_thread);
-
-       wake_up_process(workq_thread);
-       return 0;
-}
-
-void  ovs_workqueues_exit(void)
-{
-       BUG_ON(!list_empty(&workq));
-       kthread_stop(workq_thread);
-}