datapath: Add workqueue API to ovs compat workqueue.
[sliver-openvswitch.git] / datapath / linux / compat / include / linux / workqueue.h
1 #ifndef __LINUX_WORKQUEUE_WRAPPER_H
2 #define __LINUX_WORKQUEUE_WRAPPER_H 1
3
4 #include <linux/timer.h>
5
6 int __init ovs_workqueues_init(void);
7 void ovs_workqueues_exit(void);
8
9 /* Older kernels have an implementation of work queues with some very bad
10  * characteristics when trying to cancel work (potential deadlocks, use after
11  * free, etc.  Therefore we implement simple ovs specific work queue using
12  * single worker thread. work-queue API are kept similar for compatibility.
13  * It seems it is useful even on newer kernel. As it can avoid system wide
14  * freeze in event of softlockup due to workq blocked on genl_lock.
15  */
16
17 struct work_struct;
18
19 typedef void (*work_func_t)(struct work_struct *work);
20
21 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
22
23 struct work_struct {
24 #define WORK_STRUCT_PENDING 0           /* T if work item pending execution */
25         atomic_long_t data;
26         struct list_head entry;
27         work_func_t func;
28 #ifdef CONFIG_LOCKDEP
29         struct lockdep_map lockdep_map;
30 #endif
31 };
32
33 #define WORK_DATA_INIT()        ATOMIC_LONG_INIT(0)
34
35 #define work_clear_pending(work)                                \
36         clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
37
38 struct delayed_work {
39         struct work_struct work;
40         struct timer_list timer;
41 };
42
43 #define __WORK_INITIALIZER(n, f) {                              \
44         .data = WORK_DATA_INIT(),                               \
45         .entry  = { &(n).entry, &(n).entry },                   \
46         .func = (f),                                            \
47 }
48
49 #define __DELAYED_WORK_INITIALIZER(n, f) {                      \
50         .work = __WORK_INITIALIZER((n).work, (f)),              \
51         .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
52 }
53
54 #define DECLARE_DELAYED_WORK(n, f)                              \
55         struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
56
57 #define schedule_delayed_work rpl_schedule_delayed_work
58 int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
59
60 #define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
61 int cancel_delayed_work_sync(struct delayed_work *dwork);
62
63 #define INIT_WORK(_work, _func)                                 \
64         do {                                                    \
65                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
66                 INIT_LIST_HEAD(&(_work)->entry);                \
67                 (_work)->func = (_func);                        \
68         } while (0)
69
70 extern void flush_scheduled_work(void);
71 extern void queue_work(struct work_struct *work);
72 extern bool cancel_work_sync(struct work_struct *work);
73
74 #endif