2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <asm/bitops.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/string.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/if_ether.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/notifier.h>
36 #include <net/route.h>
37 #include <linux/skbuff.h>
39 #include <net/pkt_sched.h>
42 /* Simple Token Bucket Filter.
43 =======================================
53 A data flow obeys TBF with rate R and depth B, if for any
54 time interval t_i...t_f the number of transmitted bits
55 does not exceed B + R*(t_f-t_i).
57 Packetized version of this definition:
58 The sequence of packets of sizes s_i served at moments t_i
59 obeys TBF, if for any i<=k:
61 s_i+....+s_k <= B + R*(t_k - t_i)
66 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
68 N(t+delta) = min{B/R, N(t) + delta}
70 If the first packet in queue has length S, it may be
71 transmitted only at the time t_* when S/R <= N(t_*),
72 and in this case N(t) jumps:
74 N(t_* + 0) = N(t_* - 0) - S/R.
78 Actually, QoS requires two TBF to be applied to a data stream.
79 One of them controls steady state burst size, another
80 one with rate P (peak rate) and depth M (equal to link MTU)
81 limits bursts at a smaller time scale.
83 It is easy to see that P>R, and B>M. If P is infinity, this double
84 TBF is equivalent to a single one.
86 When TBF works in reshaping mode, latency is estimated as:
88 lat = max ((L-B)/R, (L-M)/P)
94 If TBF throttles, it starts a watchdog timer, which will wake it up
95 when it is ready to transmit.
96 Note that the minimal timer resolution is 1/HZ.
97 If no new packets arrive during this period,
98 or if the device is not awaken by EOI for some previous packet,
99 TBF can stop its activity for 1/HZ.
102 This means, that with depth B, the maximal rate is
106 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
108 Note that the peak rate TBF is much more tough: with MTU 1500
109 P_crit = 150Kbytes/sec. So, if you need greater peak
110 rates, use alpha with HZ=1000 :-)
112 With classful TBF, limit is just kept for backwards compatibility.
113 It is passed to the default bfifo qdisc - if the inner qdisc is
114 changed the limit is not effective anymore.
117 struct tbf_sched_data
120 u32 limit; /* Maximal length of backlog: bytes */
121 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
124 struct qdisc_rate_table *R_tab;
125 struct qdisc_rate_table *P_tab;
128 long tokens; /* Current number of B tokens */
129 long ptokens; /* Current number of P tokens */
130 psched_time_t t_c; /* Time check-point */
131 struct timer_list wd_timer; /* Watchdog timer */
132 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
135 #define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
136 #define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
138 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
140 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
143 if (skb->len > q->max_size) {
145 #ifdef CONFIG_NET_CLS_POLICE
146 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
150 return NET_XMIT_DROP;
153 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
159 sch->stats.bytes += skb->len;
160 sch->stats.packets++;
164 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
166 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
169 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
175 static unsigned int tbf_drop(struct Qdisc* sch)
177 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
180 if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
187 static void tbf_watchdog(unsigned long arg)
189 struct Qdisc *sch = (struct Qdisc*)arg;
191 sch->flags &= ~TCQ_F_THROTTLED;
192 netif_schedule(sch->dev);
195 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
197 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
200 skb = q->qdisc->dequeue(q->qdisc);
206 unsigned int len = skb->len;
208 PSCHED_GET_TIME(now);
210 toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer, 0);
213 ptoks = toks + q->ptokens;
214 if (ptoks > (long)q->mtu)
216 ptoks -= L2T_P(q, len);
219 if (toks > (long)q->buffer)
223 if ((toks|ptoks) >= 0) {
228 sch->flags &= ~TCQ_F_THROTTLED;
232 delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
237 mod_timer(&q->wd_timer, jiffies+delay);
239 /* Maybe we have a shorter packet in the queue,
240 which can be sent now. It sounds cool,
241 but, however, this is wrong in principle.
242 We MUST NOT reorder packets under these circumstances.
244 Really, if we split the flow into independent
245 subflows, it would be a very good solution.
246 This is the main idea of all FQ algorithms
247 (cf. CSZ, HPFQ, HFSC)
250 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
251 /* When requeue fails skb is dropped */
256 sch->flags |= TCQ_F_THROTTLED;
257 sch->stats.overlimits++;
262 static void tbf_reset(struct Qdisc* sch)
264 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
266 qdisc_reset(q->qdisc);
268 PSCHED_GET_TIME(q->t_c);
269 q->tokens = q->buffer;
271 sch->flags &= ~TCQ_F_THROTTLED;
272 del_timer(&q->wd_timer);
275 static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit)
277 struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
282 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
284 rta->rta_type = RTM_NEWQDISC;
285 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
286 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
288 ret = q->ops->change(q, rta);
300 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
303 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
304 struct rtattr *tb[TCA_TBF_PTAB];
305 struct tc_tbf_qopt *qopt;
306 struct qdisc_rate_table *rtab = NULL;
307 struct qdisc_rate_table *ptab = NULL;
308 struct Qdisc *child = NULL;
311 if (rtattr_parse(tb, TCA_TBF_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
312 tb[TCA_TBF_PARMS-1] == NULL ||
313 RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
316 qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
317 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
321 if (qopt->peakrate.rate) {
322 if (qopt->peakrate.rate > qopt->rate.rate)
323 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
328 for (n = 0; n < 256; n++)
329 if (rtab->data[n] > qopt->buffer) break;
330 max_size = (n << qopt->rate.cell_log)-1;
334 for (n = 0; n < 256; n++)
335 if (ptab->data[n] > qopt->mtu) break;
336 size = (n << qopt->peakrate.cell_log)-1;
337 if (size < max_size) max_size = size;
342 if (q->qdisc == &noop_qdisc) {
343 if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL)
348 if (child) q->qdisc = child;
349 q->limit = qopt->limit;
351 q->max_size = max_size;
352 q->buffer = qopt->buffer;
353 q->tokens = q->buffer;
355 rtab = xchg(&q->R_tab, rtab);
356 ptab = xchg(&q->P_tab, ptab);
357 sch_tree_unlock(sch);
361 qdisc_put_rtab(rtab);
363 qdisc_put_rtab(ptab);
367 static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
369 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
374 PSCHED_GET_TIME(q->t_c);
375 init_timer(&q->wd_timer);
376 q->wd_timer.function = tbf_watchdog;
377 q->wd_timer.data = (unsigned long)sch;
379 q->qdisc = &noop_qdisc;
381 return tbf_change(sch, opt);
384 static void tbf_destroy(struct Qdisc *sch)
386 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
388 del_timer(&q->wd_timer);
391 qdisc_put_rtab(q->P_tab);
393 qdisc_put_rtab(q->R_tab);
395 qdisc_destroy(q->qdisc);
396 q->qdisc = &noop_qdisc;
399 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
401 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
402 unsigned char *b = skb->tail;
404 struct tc_tbf_qopt opt;
406 rta = (struct rtattr*)b;
407 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
409 opt.limit = q->limit;
410 opt.rate = q->R_tab->rate;
412 opt.peakrate = q->P_tab->rate;
414 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
416 opt.buffer = q->buffer;
417 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
418 rta->rta_len = skb->tail - b;
423 skb_trim(skb, b - skb->data);
427 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
428 struct sk_buff *skb, struct tcmsg *tcm)
430 struct tbf_sched_data *q = (struct tbf_sched_data*)sch->data;
432 if (cl != 1) /* only one class */
435 tcm->tcm_handle |= TC_H_MIN(1);
436 tcm->tcm_info = q->qdisc->handle;
441 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
444 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
450 *old = xchg(&q->qdisc, new);
453 sch_tree_unlock(sch);
458 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
460 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
464 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
469 static void tbf_put(struct Qdisc *sch, unsigned long arg)
473 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
474 struct rtattr **tca, unsigned long *arg)
479 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
484 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
487 if (walker->count >= walker->skip)
488 if (walker->fn(sch, 1, walker) < 0) {
496 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
501 static struct Qdisc_class_ops tbf_class_ops =
507 .change = tbf_change_class,
508 .delete = tbf_delete,
510 .tcf_chain = tbf_find_tcf,
511 .dump = tbf_dump_class,
514 static struct Qdisc_ops tbf_qdisc_ops = {
516 .cl_ops = &tbf_class_ops,
518 .priv_size = sizeof(struct tbf_sched_data),
519 .enqueue = tbf_enqueue,
520 .dequeue = tbf_dequeue,
521 .requeue = tbf_requeue,
525 .destroy = tbf_destroy,
526 .change = tbf_change,
528 .owner = THIS_MODULE,
531 static int __init tbf_module_init(void)
533 return register_qdisc(&tbf_qdisc_ops);
536 static void __exit tbf_module_exit(void)
538 unregister_qdisc(&tbf_qdisc_ops);
540 module_init(tbf_module_init)
541 module_exit(tbf_module_exit)
542 MODULE_LICENSE("GPL");