883665bf45e3b2e0d6519a01ec0556443341f627
[sliver-openvswitch.git] / datapath / linux / compat / workqueue.c
1 /*
2  * Derived from the kernel/workqueue.c
3  *
4  * This is the generic async execution mechanism.  Work items as are
5  * executed in process context.
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/init.h>
12 #include <linux/signal.h>
13 #include <linux/completion.h>
14 #include <linux/workqueue.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/notifier.h>
18 #include <linux/kthread.h>
19 #include <linux/hardirq.h>
20 #include <linux/mempolicy.h>
21 #include <linux/kallsyms.h>
22 #include <linux/debug_locks.h>
23 #include <linux/lockdep.h>
24 #include <linux/idr.h>
25
26 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
27
28 static spinlock_t wq_lock;
29 static struct list_head workq;
30 static wait_queue_head_t more_work;
31 static struct task_struct *workq_thread;
32 static struct work_struct *current_work;
33
34 static void queue_work(struct work_struct *work)
35 {
36         unsigned long flags;
37
38         spin_lock_irqsave(&wq_lock, flags);
39         list_add_tail(&work->entry, &workq);
40         wake_up(&more_work);
41         spin_unlock_irqrestore(&wq_lock, flags);
42 }
43
44 static void _delayed_work_timer_fn(unsigned long __data)
45 {
46         struct delayed_work *dwork = (struct delayed_work *)__data;
47         queue_work(&dwork->work);
48 }
49
50 static void __queue_delayed_work(struct delayed_work *dwork,
51                 unsigned long delay)
52 {
53         struct timer_list *timer = &dwork->timer;
54         struct work_struct *work = &dwork->work;
55
56         BUG_ON(timer_pending(timer));
57         BUG_ON(!list_empty(&work->entry));
58
59         timer->expires = jiffies + delay;
60         timer->data = (unsigned long)dwork;
61         timer->function = _delayed_work_timer_fn;
62
63         add_timer(timer);
64 }
65
66 int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
67 {
68         if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work)))
69                 return 0;
70
71         if (delay == 0)
72                 queue_work(&dwork->work);
73         else
74                 __queue_delayed_work(dwork, delay);
75
76         return 1;
77 }
78
79 struct wq_barrier {
80         struct work_struct      work;
81         struct completion       done;
82 };
83
84 static void wq_barrier_func(struct work_struct *work)
85 {
86         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
87         complete(&barr->done);
88 }
89
90 static void workqueue_barrier(struct work_struct *work)
91 {
92         bool need_barrier;
93         struct wq_barrier barr;
94
95         spin_lock_irq(&wq_lock);
96         if (current_work != work)
97                 need_barrier = false;
98         else {
99                 INIT_WORK(&barr.work, wq_barrier_func);
100                 init_completion(&barr.done);
101                 list_add(&barr.work.entry, &workq);
102                 wake_up(&more_work);
103                 need_barrier = true;
104         }
105         spin_unlock_irq(&wq_lock);
106
107         if (need_barrier)
108                 wait_for_completion(&barr.done);
109 }
110
111 static int try_to_grab_pending(struct work_struct *work)
112 {
113         int ret;
114
115         BUG_ON(in_interrupt());
116
117         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
118                 return 0;
119
120         spin_lock_irq(&wq_lock);
121         if (!list_empty(&work->entry)) {
122                 list_del_init(&work->entry);
123                 ret = 0;
124         } else
125                 /* Already executed, retry. */
126                 ret = -1;
127         spin_unlock_irq(&wq_lock);
128
129         return ret;
130 }
131
132 static int __cancel_work_timer(struct work_struct *work,
133                                struct timer_list *timer)
134 {
135         int ret;
136
137         for (;;) {
138                 ret = (timer && likely(del_timer(timer)));
139                 if (ret) /* Was active timer, return true. */
140                         break;
141
142                 /* Inactive timer case */
143                 ret = try_to_grab_pending(work);
144                 if (!ret)
145                         break;
146         }
147         workqueue_barrier(work);
148         work_clear_pending(work);
149         return ret;
150 }
151
152 int cancel_delayed_work_sync(struct delayed_work *dwork)
153 {
154         return __cancel_work_timer(&dwork->work, &dwork->timer);
155 }
156
157 static void run_workqueue(void)
158 {
159         spin_lock_irq(&wq_lock);
160         while (!list_empty(&workq)) {
161                 struct work_struct *work = list_entry(workq.next,
162                                 struct work_struct, entry);
163
164                 work_func_t f = work->func;
165                 list_del_init(workq.next);
166                 current_work = work;
167                 spin_unlock_irq(&wq_lock);
168
169                 work_clear_pending(work);
170                 f(work);
171
172                 BUG_ON(in_interrupt());
173                 spin_lock_irq(&wq_lock);
174                 current_work = NULL;
175         }
176         spin_unlock_irq(&wq_lock);
177 }
178
179 static int worker_thread(void *dummy)
180 {
181         for (;;) {
182                 wait_event_interruptible(more_work,
183                                 (kthread_should_stop() || !list_empty(&workq)));
184
185                 if (kthread_should_stop())
186                         break;
187
188                 run_workqueue();
189         }
190
191         return 0;
192 }
193
194 int __init ovs_workqueues_init(void)
195 {
196         spin_lock_init(&wq_lock);
197         INIT_LIST_HEAD(&workq);
198         init_waitqueue_head(&more_work);
199
200         workq_thread = kthread_create(worker_thread, NULL, "ovs_workq");
201         if (IS_ERR(workq_thread))
202                 return PTR_ERR(workq_thread);
203
204         wake_up_process(workq_thread);
205         return 0;
206 }
207
208 void  ovs_workqueues_exit(void)
209 {
210         BUG_ON(!list_empty(&workq));
211         kthread_stop(workq_thread);
212 }
213 #endif