2 * net/sched/sch_csz.c Clark-Shenker-Zhang scheduler.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <asm/uaccess.h>
16 #include <asm/system.h>
17 #include <asm/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
21 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/if_ether.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/notifier.h>
34 #include <net/route.h>
35 #include <linux/skbuff.h>
37 #include <net/pkt_sched.h>
40 /* Clark-Shenker-Zhang algorithm.
41 =======================================
45 David D. Clark, Scott Shenker and Lixia Zhang
46 "Supporting Real-Time Applications in an Integrated Services Packet
47 Network: Architecture and Mechanism".
49 CBQ presents a flexible universal algorithm for packet scheduling,
50 but it has pretty poor delay characteristics.
51 Round-robin scheduling and link-sharing goals
52 apparently contradict minimization of network delay and jitter.
53 Moreover, correct handling of predictive flows seems to be
56 CSZ presents a more precise but less flexible and less efficient
57 approach. As I understand it, the main idea is to create
58 WFQ flows for each guaranteed service and to allocate
59 the rest of bandwidth to dummy flow-0. Flow-0 comprises
60 the predictive services and the best effort traffic;
61 it is handled by a priority scheduler with the highest
62 priority band allocated for predictive services, and the rest ---
63 to the best effort packets.
65 Note that in CSZ flows are NOT limited to their bandwidth. It
66 is supposed that the flow passed admission control at the edge
67 of the QoS network and it doesn't need further shaping. Any
68 attempt to improve the flow or to shape it to a token bucket
69 at intermediate hops will introduce undesired delays and raise
72 At the moment CSZ is the only scheduler that provides
73 true guaranteed service. Another schemes (including CBQ)
74 do not provide guaranteed delay and randomize jitter.
75 There is a proof (Sally Floyd), that delay
76 can be estimated by a IntServ compliant formula.
77 This result is true formally, but it is wrong in principle.
78 It takes into account only round-robin delays,
79 ignoring delays introduced by link sharing i.e. overlimiting.
80 Note that temporary overlimits are inevitable because
81 real links are not ideal, and the real algorithm must take this
88 $B$ is link bandwidth (bits/sec).
90 $I$ is set of all flows, including flow $0$.
91 Every flow $a \in I$ has associated bandwidth slice $r_a < 1$ and
92 $\sum_{a \in I} r_a = 1$.
96 Let $m_a$ is the number of backlogged bits in flow $a$.
97 The flow is {\em active}, if $m_a > 0$.
98 This number is a discontinuous function of time;
99 when a packet $i$ arrives:
101 m_a(t_i+0) - m_a(t_i-0) = L^i,
103 where $L^i$ is the length of the arrived packet.
104 The flow queue is drained continuously until $m_a == 0$:
106 {d m_a \over dt} = - { B r_a \over \sum_{b \in A} r_b}.
108 I.e. flow rates are their allocated rates proportionally
109 scaled to take all available link bandwidth. Apparently,
110 it is not the only possible policy. F.e. CBQ classes
111 without borrowing would be modelled by:
113 {d m_a \over dt} = - B r_a .
115 More complicated hierarchical bandwidth allocation
116 policies are possible, but unfortunately, the basic
117 flow equations have a simple solution only for proportional
122 We calculate the time until the last bit of packet is sent:
124 E_a^i(t) = { m_a(t_i) - \delta_a(t) \over r_a },
126 where $\delta_a(t)$ is number of bits drained since $t_i$.
127 We have to evaluate $E_a^i$ for all queued packets,
128 then find the packet with minimal $E_a^i$ and send it.
130 This sounds good, but direct implementation of the algorithm
131 is absolutely infeasible. Luckily, if flow rates
132 are scaled proportionally, the equations have a simple solution.
134 The differential equation for $E_a^i$ is
136 {d E_a^i (t) \over dt } = - { d \delta_a(t) \over dt} { 1 \over r_a} =
137 { B \over \sum_{b \in A} r_b}
139 with initial condition
141 E_a^i (t_i) = { m_a(t_i) \over r_a } .
144 Let's introduce an auxiliary function $R(t)$:
148 Consider the following model: we rotate over active flows,
149 sending $r_a B$ bits from every flow, so that we send
150 $B \sum_{a \in A} r_a$ bits per round, that takes
151 $\sum_{a \in A} r_a$ seconds.
153 Hence, $R(t)$ (round number) is a monotonically increasing
154 linear function of time when $A$ is not changed
156 { d R(t) \over dt } = { 1 \over \sum_{a \in A} r_a }
158 and it is continuous when $A$ changes.
160 The central observation is that the quantity
161 $F_a^i = R(t) + E_a^i(t)/B$ does not depend on time at all!
162 $R(t)$ does not depend on flow, so that $F_a^i$ can be
163 calculated only once on packet arrival, and we need not
164 recalculate $E$ numbers and resorting queues.
165 The number $F_a^i$ is called finish number of the packet.
166 It is just the value of $R(t)$ when the last bit of packet
169 Maximal finish number on flow is called finish number of flow
170 and minimal one is "start number of flow".
171 Apparently, flow is active if and only if $F_a \leq R$.
173 When a packet of length $L_i$ bit arrives to flow $a$ at time $t_i$,
174 we calculate $F_a^i$ as:
176 If flow was inactive ($F_a < R$):
177 $F_a^i = R(t) + {L_i \over B r_a}$
179 $F_a^i = F_a + {L_i \over B r_a}$
181 These equations complete the algorithm specification.
183 It looks pretty hairy, but there is a simple
184 procedure for solving these equations.
185 See procedure csz_update(), that is a generalization of
186 the algorithm from S. Keshav's thesis Chapter 3
187 "Efficient Implementation of Fair Queuing".
191 * We implement only the simplest variant of CSZ,
192 when flow-0 is a explicit 4band priority fifo.
193 This is bad, but we need a "peek" operation in addition
194 to "dequeue" to implement complete CSZ.
195 I do not want to do that, unless it is absolutely
198 * A primitive support for token bucket filtering
199 presents itself too. It directly contradicts CSZ, but
200 even though the Internet is on the globe ... :-)
201 "the edges of the network" really exist.
205 * Fixed point arithmetic is overcomplicated, suboptimal and even
206 wrong. Check it later. */
209 /* This number is arbitrary */
211 #define CSZ_GUARANTEED 16
212 #define CSZ_FLOWS (CSZ_GUARANTEED+4)
216 struct csz_head *snext;
217 struct csz_head *sprev;
218 struct csz_head *fnext;
219 struct csz_head *fprev;
224 struct csz_head *snext;
225 struct csz_head *sprev;
226 struct csz_head *fnext;
227 struct csz_head *fprev;
230 struct tc_ratespec rate;
231 struct tc_ratespec slice;
232 u32 *L_tab; /* Lookup table for L/(B*r_a) values */
233 unsigned long limit; /* Maximal length of queue */
235 struct tc_ratespec peakrate;
236 __u32 buffer; /* Depth of token bucket, normalized
243 unsigned long tokens; /* Tokens number: usecs */
249 unsigned long start; /* Finish number of the first skb */
250 unsigned long finish; /* Finish number of the flow */
252 struct sk_buff_head q; /* FIFO queue */
255 #define L2R(f,L) ((f)->L_tab[(L)>>(f)->slice.cell_log])
257 struct csz_sched_data
260 unsigned char rate_log; /* fixed point position for rate;
261 * really we need not it */
262 unsigned char R_log; /* fixed point position for round number */
263 unsigned char delta_log; /* 1<<delta_log is maximal timeout in usecs;
264 * 21 <-> 2.1sec is MAXIMAL value */
267 struct tcf_proto *filter_list;
268 u8 prio2band[TC_PRIO_MAX+1];
270 struct timer_list wd_timer;
273 psched_time_t t_c; /* Time check-point */
274 unsigned long R_c; /* R-number check-point */
275 unsigned long rate; /* Current sum of rates of active flows */
276 struct csz_head s; /* Flows sorted by "start" */
277 struct csz_head f; /* Flows sorted by "finish" */
279 struct sk_buff_head other[4];/* Predicted (0) and the best efforts
281 struct csz_flow flow[CSZ_GUARANTEED]; /* Array of flows */
284 /* These routines (csz_insert_finish and csz_insert_start) are
285 the most time consuming part of all the algorithm.
287 We insert to sorted list, so that time
288 is linear with respect to number of active flows in the worst case.
289 Note that we have not very large number of guaranteed flows,
290 so that logarithmic algorithms (heap etc.) are useless,
291 they are slower than linear one when length of list <= 32.
293 Heap would take sence if we used WFQ for best efforts
294 flows, but SFQ is better choice in this case.
298 /* Insert flow "this" to the list "b" before
299 flow with greater finish number.
304 static inline void csz_insert_finish(struct csz_head *b,
305 struct csz_flow *this)
307 struct csz_head *f = b->fnext;
308 unsigned long finish = this->finish;
311 if (((struct csz_flow*)f)->finish - finish > 0)
316 this->fprev = f->fprev;
317 this->fnext->fprev = this->fprev->fnext = (struct csz_head*)this;
321 static inline void csz_insert_finish(struct csz_head *b,
322 struct csz_flow *this)
324 struct csz_head *f = b->fprev;
325 unsigned long finish = this->finish;
328 if (((struct csz_flow*)f)->finish - finish <= 0)
332 this->fnext = f->fnext;
334 this->fnext->fprev = this->fprev->fnext = (struct csz_head*)this;
338 /* Insert flow "this" to the list "b" before
339 flow with greater start number.
342 static inline void csz_insert_start(struct csz_head *b,
343 struct csz_flow *this)
345 struct csz_head *f = b->snext;
346 unsigned long start = this->start;
349 if (((struct csz_flow*)f)->start - start > 0)
354 this->sprev = f->sprev;
355 this->snext->sprev = this->sprev->snext = (struct csz_head*)this;
359 /* Calculate and return current round number.
360 It is another time consuming part, but
361 it is impossible to avoid it.
363 It costs O(N) that make all the algorithm useful only
364 to play with closest to ideal fluid model.
366 There exist less academic, but more practical modifications,
367 which might have even better characteristics (WF2Q+, HPFQ, HFSC)
370 static unsigned long csz_update(struct Qdisc *sch)
372 struct csz_sched_data *q = (struct csz_sched_data*)sch->data;
380 PSCHED_GET_TIME(now);
381 delay = PSCHED_TDIFF_SAFE(now, q->t_c, 0, goto do_reset);
383 if (delay>>q->delta_log) {
385 /* Delta is too large.
386 It is possible if MTU/BW > 1<<q->delta_log
387 (i.e. configuration error) or because of hardware
388 fault. We have no choice...
397 a = (struct csz_flow*)q->f.fnext;
399 /* No more active flows. Reset R and exit. */
400 if (a == (struct csz_flow*)&q->f) {
403 printk("csz_update: rate!=0 on inactive csz\n");
415 printk("csz_update: rate=0 on active csz\n");
421 * tmp = (t - q->t_c)/q->rate;
424 tmp = ((delay<<(31-q->delta_log))/q->rate)>>(31-q->delta_log+q->R_log);
428 /* OK, this flow (and all flows with greater
429 finish numbers) is still active */
433 /* It is more not active */
435 a->fprev->fnext = a->fnext;
436 a->fnext->fprev = a->fprev;
439 * q->t_c += (F - q->R_c)*q->rate
442 tmp = ((F-q->R_c)*q->rate)<<q->R_log;
444 q->rate -= a->slice.rate;
446 if ((long)(delay - tmp) >= 0) {
457 unsigned csz_classify(struct sk_buff *skb, struct csz_sched_data *q)
459 return CSZ_GUARANTEED;
463 csz_enqueue(struct sk_buff *skb, struct Qdisc* sch)
465 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
466 unsigned flow_id = csz_classify(skb, q);
469 struct csz_flow *this;
471 if (flow_id >= CSZ_GUARANTEED) {
472 prio = flow_id - CSZ_GUARANTEED;
476 this = &q->flow[flow_id];
477 if (this->q.qlen >= this->limit || this->L_tab == NULL) {
480 return NET_XMIT_DROP;
485 if ((long)(this->finish - R) >= 0) {
487 this->finish += L2R(this,skb->len);
489 /* It is inactive; activate it */
490 this->finish = R + L2R(this,skb->len);
491 q->rate += this->slice.rate;
492 csz_insert_finish(&q->f, this);
495 /* If this flow was empty, remember start number
496 and insert it into start queue */
497 if (this->q.qlen == 0) {
498 this->start = this->finish;
499 csz_insert_start(&q->s, this);
502 skb_queue_tail(&this->q, skb);
504 skb_queue_tail(&q->other[prio], skb);
506 sch->stats.bytes += skb->len;
507 sch->stats.packets++;
511 static __inline__ struct sk_buff *
512 skb_dequeue_best(struct csz_sched_data * q)
517 for (i=0; i<4; i++) {
518 skb = skb_dequeue(&q->other[i]);
527 static __inline__ struct sk_buff *
528 skb_peek_best(struct csz_sched_data * q)
533 for (i=0; i<4; i++) {
534 skb = skb_peek(&q->other[i]);
543 static void csz_watchdog(unsigned long arg)
545 struct Qdisc *sch = (struct Qdisc*)arg;
547 qdisc_wakeup(sch->dev);
550 static __inline__ void
551 csz_move_queue(struct csz_flow *this, long delta)
553 this->fprev->fnext = this->fnext;
554 this->fnext->fprev = this->fprev;
556 this->start += delta;
557 this->finish += delta;
559 csz_insert_finish(this);
562 static __inline__ int csz_enough_tokens(struct csz_sched_data *q,
563 struct csz_flow *this,
570 PSCHED_GET_TIME(now);
572 toks = PSCHED_TDIFF(now, t_tbf) + this->tokens - L2R(q,this,skb->len);
575 if (this->throttled) {
576 /* Remember aposteriory delay */
578 unsigned long R = csz_update(q);
579 shift = R - this->R_tbf;
584 /* Now we have enough tokens to proceed */
586 this->tokens = toks <= this->depth ? toks : this->depth;
589 if (!this->throttled)
592 /* Flow was throttled. Update its start&finish numbers
593 with delay calculated aposteriori.
598 csz_move_queue(this, shift);
602 if (!this->throttled) {
603 /* Flow has just been throttled; remember
604 current round number to calculate aposteriori delay
607 this->R_tbf = csz_update(q);
610 /* Move all the queue to the time when it will be allowed to send.
611 We should translate time to round number, but it is impossible,
612 so that we made the most conservative estimate i.e. we suppose
613 that only this flow is active and, hence, R = t.
614 Really toks <= R <= toks/r_a.
616 This apriory shift in R will be adjusted later to reflect
617 real delay. We cannot avoid it because of:
618 - throttled flow continues to be active from the viewpoint
619 of CSZ, so that it would acquire the highest priority,
620 if you not adjusted start numbers.
621 - Eventually, finish number would become less than round
622 number and flow were declared inactive.
627 /* Remember, that we should start watchdog */
628 if (toks < q->wd_expires)
629 q->wd_expires = toks;
635 csz_move_queue(this, shift);
637 csz_insert_start(this);
643 static struct sk_buff *
644 csz_dequeue(struct Qdisc* sch)
646 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
648 struct csz_flow *this;
653 this = (struct csz_flow*)q->s.snext;
655 while (this != (struct csz_flow*)&q->s) {
657 /* First of all: unlink from start list */
658 this->sprev->snext = this->snext;
659 this->snext->sprev = this->sprev;
661 if (this != &q->flow[0]) { /* Guaranteed flow */
662 skb = __skb_dequeue(&this->q);
666 if (!csz_enough_tokens(q, this, skb))
671 struct sk_buff *nskb = skb_peek(&this->q);
672 this->start += L2R(this,nskb->len);
673 csz_insert_start(&q->s, this);
678 } else { /* Predicted or best effort flow */
679 skb = skb_dequeue_best(q);
681 unsigned peeked = this->peeked;
684 if (--this->q.qlen) {
685 struct sk_buff *nskb;
686 unsigned dequeued = L2R(this,skb->len);
688 /* We got not the same thing that
689 peeked earlier; adjust start number
691 if (peeked != dequeued && peeked)
692 this->start += dequeued - peeked;
694 nskb = skb_peek_best(q);
695 peeked = L2R(this,nskb->len);
696 this->start += peeked;
697 this->peeked = peeked;
698 csz_insert_start(&q->s, this);
706 /* We are about to return no skb.
707 Schedule watchdog timer, if it occurred because of shaping.
710 unsigned long delay = PSCHED_US2JIFFIE(q->wd_expires);
713 mod_timer(&q->wd_timer, jiffies + delay);
714 sch->stats.overlimits++;
721 csz_reset(struct Qdisc* sch)
723 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
727 skb_queue_purge(&q->other[i]);
729 for (i=0; i<CSZ_GUARANTEED; i++) {
730 struct csz_flow *this = q->flow + i;
731 skb_queue_purge(&this->q);
732 this->snext = this->sprev =
733 this->fnext = this->fprev = (struct csz_head*)this;
734 this->start = this->finish = 0;
736 q->s.snext = q->s.sprev = &q->s;
737 q->f.fnext = q->f.fprev = &q->f;
740 PSCHED_GET_TIME(&q->t_tbf);
741 q->tokens = q->depth;
742 del_timer(&q->wd_timer);
748 csz_destroy(struct Qdisc* sch)
750 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
751 struct tcf_proto *tp;
753 while ((tp = q->filter_list) != NULL) {
754 q->filter_list = tp->next;
759 static int csz_init(struct Qdisc *sch, struct rtattr *opt)
761 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
762 struct rtattr *tb[TCA_CSZ_PTAB];
763 struct tc_csz_qopt *qopt;
766 rtattr_parse(tb, TCA_CSZ_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt));
767 if (tb[TCA_CSZ_PARMS-1] == NULL ||
768 RTA_PAYLOAD(tb[TCA_CSZ_PARMS-1]) < sizeof(*qopt))
770 qopt = RTA_DATA(tb[TCA_CSZ_PARMS-1]);
772 q->R_log = qopt->R_log;
773 q->delta_log = qopt->delta_log;
774 for (i=0; i<=TC_PRIO_MAX; i++) {
775 if (qopt->priomap[i] >= CSZ_FLOWS)
777 q->prio2band[i] = qopt->priomap[i];
781 skb_queue_head_init(&q->other[i]);
783 for (i=0; i<CSZ_GUARANTEED; i++) {
784 struct csz_flow *this = q->flow + i;
785 skb_queue_head_init(&this->q);
786 this->snext = this->sprev =
787 this->fnext = this->fprev = (struct csz_head*)this;
788 this->start = this->finish = 0;
790 q->s.snext = q->s.sprev = &q->s;
791 q->f.fnext = q->f.fprev = &q->f;
794 init_timer(&q->wd_timer);
795 q->wd_timer.data = (unsigned long)sch;
796 q->wd_timer.function = csz_watchdog;
801 static int csz_dump(struct Qdisc *sch, struct sk_buff *skb)
803 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
804 unsigned char *b = skb->tail;
806 struct tc_csz_qopt opt;
808 rta = (struct rtattr*)b;
809 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
811 opt.flows = CSZ_FLOWS;
812 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
813 RTA_PUT(skb, TCA_CSZ_PARMS, sizeof(opt), &opt);
814 rta->rta_len = skb->tail - b;
819 skb_trim(skb, b - skb->data);
823 static int csz_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
829 static struct Qdisc * csz_leaf(struct Qdisc *sch, unsigned long cl)
835 static unsigned long csz_get(struct Qdisc *sch, u32 classid)
837 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
838 unsigned long band = TC_H_MIN(classid) - 1;
840 if (band >= CSZ_FLOWS)
843 if (band < CSZ_GUARANTEED && q->flow[band].L_tab == NULL)
849 static unsigned long csz_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
851 return csz_get(sch, classid);
855 static void csz_put(struct Qdisc *sch, unsigned long cl)
860 static int csz_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg)
862 unsigned long cl = *arg;
863 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
864 struct rtattr *opt = tca[TCA_OPTIONS-1];
865 struct rtattr *tb[TCA_CSZ_PTAB];
866 struct tc_csz_copt *copt;
868 rtattr_parse(tb, TCA_CSZ_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt));
869 if (tb[TCA_CSZ_PARMS-1] == NULL ||
870 RTA_PAYLOAD(tb[TCA_CSZ_PARMS-1]) < sizeof(*copt))
872 copt = RTA_DATA(tb[TCA_CSZ_PARMS-1]);
874 if (tb[TCA_CSZ_RTAB-1] &&
875 RTA_PAYLOAD(tb[TCA_CSZ_RTAB-1]) < 1024)
883 if (cl >= CSZ_GUARANTEED || q->flow[cl].L_tab == NULL)
888 spin_lock_bh(&sch->dev->queue_lock);
890 a->rate_log = copt->rate_log;
893 a->limit = copt->limit;
894 a->rate = copt->rate;
895 a->buffer = copt->buffer;
899 if (tb[TCA_CSZ_RTAB-1])
900 memcpy(a->L_tab, RTA_DATA(tb[TCA_CSZ_RTAB-1]), 1024);
902 spin_unlock_bh(&sch->dev->queue_lock);
909 static int csz_delete(struct Qdisc *sch, unsigned long cl)
911 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
918 if (cl >= CSZ_GUARANTEED || q->flow[cl].L_tab == NULL)
923 spin_lock_bh(&sch->dev->queue_lock);
924 a->fprev->fnext = a->fnext;
925 a->fnext->fprev = a->fprev;
926 a->sprev->snext = a->snext;
927 a->snext->sprev = a->sprev;
928 a->start = a->finish = 0;
929 kfree(xchg(&q->flow[cl].L_tab, NULL));
930 spin_unlock_bh(&sch->dev->queue_lock);
935 static int csz_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm)
937 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
938 unsigned char *b = skb->tail;
940 struct tc_csz_copt opt;
942 tcm->tcm_handle = sch->handle|cl;
949 if (cl < CSZ_GUARANTEED) {
950 struct csz_flow *f = &q->flow[cl];
952 if (f->L_tab == NULL)
955 rta = (struct rtattr*)b;
956 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
958 opt.limit = f->limit;
960 opt.slice = f->slice;
961 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
963 opt.buffer = f->buffer;
970 RTA_PUT(skb, TCA_CSZ_PARMS, sizeof(opt), &opt);
971 rta->rta_len = skb->tail - b;
977 skb_trim(skb, b - skb->data);
981 static void csz_walk(struct Qdisc *sch, struct qdisc_walker *arg)
983 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
989 for (prio = 0; prio < CSZ_FLOWS; prio++) {
990 if (arg->count < arg->skip) {
994 if (prio < CSZ_GUARANTEED && q->flow[prio].L_tab == NULL) {
998 if (arg->fn(sch, prio+1, arg) < 0) {
1006 static struct tcf_proto ** csz_find_tcf(struct Qdisc *sch, unsigned long cl)
1008 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
1013 return &q->filter_list;
1016 struct Qdisc_class_ops csz_class_ops = {
1021 .change = csz_change,
1022 .delete = csz_delete,
1024 .tcf_chain = csz_find_tcf,
1025 .bind_tcf = csz_bind,
1026 .unbind_tcf = csz_put,
1027 .dump = csz_dump_class,
1030 static struct Qdisc_ops csz_qdisc_ops = {
1032 .cl_ops = &csz_class_ops,
1034 .priv_size = sizeof(struct csz_sched_data),
1035 .enqueue = csz_enqueue,
1036 .dequeue = csz_dequeue,
1041 .destroy = csz_destroy,
1044 .owner = THIS_MODULE,
1047 static int __init csz_module_init(void)
1049 return register_qdisc(&csz_qdisc_ops);
1051 static void __exit csz_module_exit(void)
1053 unregister_qdisc(&csz_qdisc_ops);
1055 module_init(csz_module_init)
1056 module_exit(csz_module_exit)
1057 MODULE_LICENSE("GPL");