VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / net / sched / sch_generic.c
1 /*
2  * net/sched/sch_generic.c      Generic packet scheduler routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11  *              - Ingress support
12  */
13
14 #include <asm/uaccess.h>
15 #include <asm/system.h>
16 #include <asm/bitops.h>
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/string.h>
23 #include <linux/mm.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/in.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/netdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/init.h>
33 #include <linux/rcupdate.h>
34 #include <linux/list.h>
35 #include <net/sock.h>
36 #include <net/pkt_sched.h>
37
38 /* Main transmission queue. */
39
40 /* Main qdisc structure lock. 
41
42    However, modifications
43    to data, participating in scheduling must be additionally
44    protected with dev->queue_lock spinlock.
45
46    The idea is the following:
47    - enqueue, dequeue are serialized via top level device
48      spinlock dev->queue_lock.
49    - tree walking is protected by read_lock_bh(qdisc_tree_lock)
50      and this lock is used only in process context.
51    - updates to tree are made under rtnl semaphore or
52      from softirq context (__qdisc_destroy rcu-callback)
53      hence this lock needs local bh disabling.
54
55    qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
56  */
57 rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
58
59 void qdisc_lock_tree(struct net_device *dev)
60 {
61         write_lock_bh(&qdisc_tree_lock);
62         spin_lock_bh(&dev->queue_lock);
63 }
64
65 void qdisc_unlock_tree(struct net_device *dev)
66 {
67         spin_unlock_bh(&dev->queue_lock);
68         write_unlock_bh(&qdisc_tree_lock);
69 }
70
71 /* 
72    dev->queue_lock serializes queue accesses for this device
73    AND dev->qdisc pointer itself.
74
75    dev->xmit_lock serializes accesses to device driver.
76
77    dev->queue_lock and dev->xmit_lock are mutually exclusive,
78    if one is grabbed, another must be free.
79  */
80
81
82 /* Kick device.
83    Note, that this procedure can be called by a watchdog timer, so that
84    we do not check dev->tbusy flag here.
85
86    Returns:  0  - queue is empty.
87             >0  - queue is not empty, but throttled.
88             <0  - queue is not empty. Device is throttled, if dev->tbusy != 0.
89
90    NOTE: Called under dev->queue_lock with locally disabled BH.
91 */
92
93 int qdisc_restart(struct net_device *dev)
94 {
95         struct Qdisc *q = dev->qdisc;
96         struct sk_buff *skb;
97
98         /* Dequeue packet */
99         if ((skb = q->dequeue(q)) != NULL) {
100                 if (spin_trylock(&dev->xmit_lock)) {
101                         /* Remember that the driver is grabbed by us. */
102                         dev->xmit_lock_owner = smp_processor_id();
103
104                         /* And release queue */
105                         spin_unlock(&dev->queue_lock);
106
107                         if (!netif_queue_stopped(dev)) {
108                                 if (netdev_nit)
109                                         dev_queue_xmit_nit(skb, dev);
110
111                                 if (dev->hard_start_xmit(skb, dev) == 0) {
112                                         dev->xmit_lock_owner = -1;
113                                         spin_unlock(&dev->xmit_lock);
114
115                                         spin_lock(&dev->queue_lock);
116                                         return -1;
117                                 }
118                         }
119
120                         /* Release the driver */
121                         dev->xmit_lock_owner = -1;
122                         spin_unlock(&dev->xmit_lock);
123                         spin_lock(&dev->queue_lock);
124                         q = dev->qdisc;
125                 } else {
126                         /* So, someone grabbed the driver. */
127
128                         /* It may be transient configuration error,
129                            when hard_start_xmit() recurses. We detect
130                            it by checking xmit owner and drop the
131                            packet when deadloop is detected.
132                          */
133                         if (dev->xmit_lock_owner == smp_processor_id()) {
134                                 kfree_skb(skb);
135                                 if (net_ratelimit())
136                                         printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
137                                 return -1;
138                         }
139                         __get_cpu_var(netdev_rx_stat).cpu_collision++;
140                 }
141
142                 /* Device kicked us out :(
143                    This is possible in three cases:
144
145                    0. driver is locked
146                    1. fastroute is enabled
147                    2. device cannot determine busy state
148                       before start of transmission (f.e. dialout)
149                    3. device is buggy (ppp)
150                  */
151
152                 q->ops->requeue(skb, q);
153                 netif_schedule(dev);
154                 return 1;
155         }
156         return q->q.qlen;
157 }
158
159 static void dev_watchdog(unsigned long arg)
160 {
161         struct net_device *dev = (struct net_device *)arg;
162
163         spin_lock(&dev->xmit_lock);
164         if (dev->qdisc != &noop_qdisc) {
165                 if (netif_device_present(dev) &&
166                     netif_running(dev) &&
167                     netif_carrier_ok(dev)) {
168                         if (netif_queue_stopped(dev) &&
169                             (jiffies - dev->trans_start) > dev->watchdog_timeo) {
170                                 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
171                                 dev->tx_timeout(dev);
172                         }
173                         if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
174                                 dev_hold(dev);
175                 }
176         }
177         spin_unlock(&dev->xmit_lock);
178
179         dev_put(dev);
180 }
181
182 static void dev_watchdog_init(struct net_device *dev)
183 {
184         init_timer(&dev->watchdog_timer);
185         dev->watchdog_timer.data = (unsigned long)dev;
186         dev->watchdog_timer.function = dev_watchdog;
187 }
188
189 void __netdev_watchdog_up(struct net_device *dev)
190 {
191         if (dev->tx_timeout) {
192                 if (dev->watchdog_timeo <= 0)
193                         dev->watchdog_timeo = 5*HZ;
194                 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
195                         dev_hold(dev);
196         }
197 }
198
199 static void dev_watchdog_up(struct net_device *dev)
200 {
201         spin_lock_bh(&dev->xmit_lock);
202         __netdev_watchdog_up(dev);
203         spin_unlock_bh(&dev->xmit_lock);
204 }
205
206 static void dev_watchdog_down(struct net_device *dev)
207 {
208         spin_lock_bh(&dev->xmit_lock);
209         if (del_timer(&dev->watchdog_timer))
210                 __dev_put(dev);
211         spin_unlock_bh(&dev->xmit_lock);
212 }
213
214 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
215    under all circumstances. It is difficult to invent anything faster or
216    cheaper.
217  */
218
219 static int
220 noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
221 {
222         kfree_skb(skb);
223         return NET_XMIT_CN;
224 }
225
226 static struct sk_buff *
227 noop_dequeue(struct Qdisc * qdisc)
228 {
229         return NULL;
230 }
231
232 static int
233 noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
234 {
235         if (net_ratelimit())
236                 printk(KERN_DEBUG "%s deferred output. It is buggy.\n", skb->dev->name);
237         kfree_skb(skb);
238         return NET_XMIT_CN;
239 }
240
241 struct Qdisc_ops noop_qdisc_ops = {
242         .next           =       NULL,
243         .cl_ops         =       NULL,
244         .id             =       "noop",
245         .priv_size      =       0,
246         .enqueue        =       noop_enqueue,
247         .dequeue        =       noop_dequeue,
248         .requeue        =       noop_requeue,
249         .owner          =       THIS_MODULE,
250 };
251
252 struct Qdisc noop_qdisc = {
253         .enqueue        =       noop_enqueue,
254         .dequeue        =       noop_dequeue,
255         .flags          =       TCQ_F_BUILTIN,
256         .ops            =       &noop_qdisc_ops,        
257 };
258
259 struct Qdisc_ops noqueue_qdisc_ops = {
260         .next           =       NULL,
261         .cl_ops         =       NULL,
262         .id             =       "noqueue",
263         .priv_size      =       0,
264         .enqueue        =       noop_enqueue,
265         .dequeue        =       noop_dequeue,
266         .requeue        =       noop_requeue,
267         .owner          =       THIS_MODULE,
268 };
269
270 struct Qdisc noqueue_qdisc = {
271         .enqueue        =       NULL,
272         .dequeue        =       noop_dequeue,
273         .flags          =       TCQ_F_BUILTIN,
274         .ops            =       &noqueue_qdisc_ops,
275 };
276
277
278 static const u8 prio2band[TC_PRIO_MAX+1] =
279         { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
280
281 /* 3-band FIFO queue: old style, but should be a bit faster than
282    generic prio+fifo combination.
283  */
284
285 static int
286 pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
287 {
288         struct sk_buff_head *list = qdisc_priv(qdisc);
289
290         list += prio2band[skb->priority&TC_PRIO_MAX];
291
292         if (list->qlen < qdisc->dev->tx_queue_len) {
293                 __skb_queue_tail(list, skb);
294                 qdisc->q.qlen++;
295                 qdisc->stats.bytes += skb->len;
296                 qdisc->stats.packets++;
297                 return 0;
298         }
299         qdisc->stats.drops++;
300         kfree_skb(skb);
301         return NET_XMIT_DROP;
302 }
303
304 static struct sk_buff *
305 pfifo_fast_dequeue(struct Qdisc* qdisc)
306 {
307         int prio;
308         struct sk_buff_head *list = qdisc_priv(qdisc);
309         struct sk_buff *skb;
310
311         for (prio = 0; prio < 3; prio++, list++) {
312                 skb = __skb_dequeue(list);
313                 if (skb) {
314                         qdisc->q.qlen--;
315                         return skb;
316                 }
317         }
318         return NULL;
319 }
320
321 static int
322 pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
323 {
324         struct sk_buff_head *list = qdisc_priv(qdisc);
325
326         list += prio2band[skb->priority&TC_PRIO_MAX];
327
328         __skb_queue_head(list, skb);
329         qdisc->q.qlen++;
330         return 0;
331 }
332
333 static void
334 pfifo_fast_reset(struct Qdisc* qdisc)
335 {
336         int prio;
337         struct sk_buff_head *list = qdisc_priv(qdisc);
338
339         for (prio=0; prio < 3; prio++)
340                 skb_queue_purge(list+prio);
341         qdisc->q.qlen = 0;
342 }
343
344 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
345 {
346         unsigned char    *b = skb->tail;
347         struct tc_prio_qopt opt;
348
349         opt.bands = 3; 
350         memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
351         RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
352         return skb->len;
353
354 rtattr_failure:
355         skb_trim(skb, b - skb->data);
356         return -1;
357 }
358
359 static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
360 {
361         int i;
362         struct sk_buff_head *list = qdisc_priv(qdisc);
363
364         for (i=0; i<3; i++)
365                 skb_queue_head_init(list+i);
366
367         return 0;
368 }
369
370 static struct Qdisc_ops pfifo_fast_ops = {
371         .next           =       NULL,
372         .cl_ops         =       NULL,
373         .id             =       "pfifo_fast",
374         .priv_size      =       3 * sizeof(struct sk_buff_head),
375         .enqueue        =       pfifo_fast_enqueue,
376         .dequeue        =       pfifo_fast_dequeue,
377         .requeue        =       pfifo_fast_requeue,
378         .init           =       pfifo_fast_init,
379         .reset          =       pfifo_fast_reset,
380         .dump           =       pfifo_fast_dump,
381         .owner          =       THIS_MODULE,
382 };
383
384 struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
385 {
386         void *p;
387         struct Qdisc *sch;
388         int size;
389
390         /* ensure that the Qdisc and the private data are 32-byte aligned */
391         size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST);
392         size += ops->priv_size + QDISC_ALIGN_CONST;
393
394         p = kmalloc(size, GFP_KERNEL);
395         if (!p)
396                 return NULL;
397         memset(p, 0, size);
398
399         sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST) 
400                                & ~QDISC_ALIGN_CONST);
401         sch->padded = (char *)sch - (char *)p;
402
403         INIT_LIST_HEAD(&sch->list);
404         skb_queue_head_init(&sch->q);
405         sch->ops = ops;
406         sch->enqueue = ops->enqueue;
407         sch->dequeue = ops->dequeue;
408         sch->dev = dev;
409         dev_hold(dev);
410         sch->stats_lock = &dev->queue_lock;
411         atomic_set(&sch->refcnt, 1);
412         /* enqueue is accessed locklessly - make sure it's visible
413          * before we set a netdevice's qdisc pointer to sch */
414         smp_wmb();
415         if (!ops->init || ops->init(sch, NULL) == 0)
416                 return sch;
417
418         kfree(p);
419         return NULL;
420 }
421
422 /* Under dev->queue_lock and BH! */
423
424 void qdisc_reset(struct Qdisc *qdisc)
425 {
426         struct Qdisc_ops *ops = qdisc->ops;
427
428         if (ops->reset)
429                 ops->reset(qdisc);
430 }
431
432 /* this is the rcu callback function to clean up a qdisc when there 
433  * are no further references to it */
434
435 static void __qdisc_destroy(struct rcu_head *head)
436 {
437         struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
438         struct Qdisc_ops  *ops = qdisc->ops;
439
440 #ifdef CONFIG_NET_ESTIMATOR
441         qdisc_kill_estimator(&qdisc->stats);
442 #endif
443         write_lock(&qdisc_tree_lock);
444         if (ops->reset)
445                 ops->reset(qdisc);
446         if (ops->destroy)
447                 ops->destroy(qdisc);
448         write_unlock(&qdisc_tree_lock);
449         module_put(ops->owner);
450
451         dev_put(qdisc->dev);
452         if (!(qdisc->flags&TCQ_F_BUILTIN))
453                 kfree((char *) qdisc - qdisc->padded);
454 }
455
456 /* Under dev->queue_lock and BH! */
457
458 void qdisc_destroy(struct Qdisc *qdisc)
459 {
460         if (!atomic_dec_and_test(&qdisc->refcnt))
461                 return;
462         list_del(&qdisc->list);
463         call_rcu(&qdisc->q_rcu, __qdisc_destroy);
464 }
465
466 void dev_activate(struct net_device *dev)
467 {
468         /* No queueing discipline is attached to device;
469            create default one i.e. pfifo_fast for devices,
470            which need queueing and noqueue_qdisc for
471            virtual interfaces
472          */
473
474         if (dev->qdisc_sleeping == &noop_qdisc) {
475                 struct Qdisc *qdisc;
476                 if (dev->tx_queue_len) {
477                         qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
478                         if (qdisc == NULL) {
479                                 printk(KERN_INFO "%s: activation failed\n", dev->name);
480                                 return;
481                         }
482                         write_lock_bh(&qdisc_tree_lock);
483                         list_add_tail(&qdisc->list, &dev->qdisc_list);
484                         write_unlock_bh(&qdisc_tree_lock);
485                 } else {
486                         qdisc =  &noqueue_qdisc;
487                 }
488                 write_lock_bh(&qdisc_tree_lock);
489                 dev->qdisc_sleeping = qdisc;
490                 write_unlock_bh(&qdisc_tree_lock);
491         }
492
493         spin_lock_bh(&dev->queue_lock);
494         if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) {
495                 dev->trans_start = jiffies;
496                 dev_watchdog_up(dev);
497         }
498         spin_unlock_bh(&dev->queue_lock);
499 }
500
501 void dev_deactivate(struct net_device *dev)
502 {
503         struct Qdisc *qdisc;
504
505         spin_lock_bh(&dev->queue_lock);
506         qdisc = dev->qdisc;
507         dev->qdisc = &noop_qdisc;
508
509         qdisc_reset(qdisc);
510
511         spin_unlock_bh(&dev->queue_lock);
512
513         dev_watchdog_down(dev);
514
515         while (test_bit(__LINK_STATE_SCHED, &dev->state))
516                 yield();
517
518         spin_unlock_wait(&dev->xmit_lock);
519 }
520
521 void dev_init_scheduler(struct net_device *dev)
522 {
523         qdisc_lock_tree(dev);
524         dev->qdisc = &noop_qdisc;
525         dev->qdisc_sleeping = &noop_qdisc;
526         INIT_LIST_HEAD(&dev->qdisc_list);
527         qdisc_unlock_tree(dev);
528
529         dev_watchdog_init(dev);
530 }
531
532 void dev_shutdown(struct net_device *dev)
533 {
534         struct Qdisc *qdisc;
535
536         qdisc_lock_tree(dev);
537         qdisc = dev->qdisc_sleeping;
538         dev->qdisc = &noop_qdisc;
539         dev->qdisc_sleeping = &noop_qdisc;
540         qdisc_destroy(qdisc);
541 #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
542         if ((qdisc = dev->qdisc_ingress) != NULL) {
543                 dev->qdisc_ingress = NULL;
544                 qdisc_destroy(qdisc);
545         }
546 #endif
547         BUG_TRAP(!timer_pending(&dev->watchdog_timer));
548         qdisc_unlock_tree(dev);
549 }
550
551 EXPORT_SYMBOL(__netdev_watchdog_up);
552 EXPORT_SYMBOL(noop_qdisc);
553 EXPORT_SYMBOL(noop_qdisc_ops);
554 EXPORT_SYMBOL(qdisc_create_dflt);
555 EXPORT_SYMBOL(qdisc_destroy);
556 EXPORT_SYMBOL(qdisc_reset);
557 EXPORT_SYMBOL(qdisc_restart);
558 EXPORT_SYMBOL(qdisc_lock_tree);
559 EXPORT_SYMBOL(qdisc_unlock_tree);