3 * Oh dear. Task queues were removed from Linux 2.6 and replaced by work
4 * queues. Unfortunately the semantics is not the same. With task queues we
5 * can defer work until a particular event occurs -- this is not
6 * straightforwardly done with work queues (queued work is performed asap, or
7 * after some fixed timeout). Conversely, work queues are a (slightly) neater
8 * way of deferring work to a process context than using task queues in 2.4.
10 * This is a bit of a needless reimplementation -- should have just pulled
11 * the code from 2.4, but I tried leveraging work queues to simplify things.
12 * They didn't help. :-(
18 #include <linux/version.h>
19 #include <linux/list.h>
20 #include <linux/workqueue.h>
25 struct list_head list;
26 unsigned long pending;
28 #define INIT_TQUEUE(_name, _fn, _arg) \
30 INIT_LIST_HEAD(&(_name)->list); \
31 (_name)->pending = 0; \
32 (_name)->fn = (_fn); (_name)->arg = (_arg); \
34 #define DECLARE_TQUEUE(_name, _fn, _arg) \
35 struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
38 struct list_head list;
41 #define DECLARE_TASK_QUEUE(_name) \
42 task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
44 static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
47 if ( test_and_set_bit(0, &tqe->pending) )
49 spin_lock_irqsave(&tql->lock, flags);
50 list_add_tail(&tqe->list, &tql->list);
51 spin_unlock_irqrestore(&tql->lock, flags);
55 static inline void run_task_queue(task_queue *tql)
57 struct list_head head, *ent;
58 struct tq_struct *tqe;
63 spin_lock_irqsave(&tql->lock, flags);
64 list_add(&head, &tql->list);
65 list_del_init(&tql->list);
66 spin_unlock_irqrestore(&tql->lock, flags);
68 while ( !list_empty(&head) )
72 tqe = list_entry(ent, struct tq_struct, list);
81 #endif /* __QUEUES_H__ */