vserver 1.9.5.x5
[linux-2.6.git] / net / sched / sch_htb.c
1 /* vim: ts=8 sw=8
2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *                      HTB support at LARTC mailing list
14  *              Ondrej Kraus, <krauso@barr.cz> 
15  *                      found missing INIT_QDISC(htb)
16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *                      helped a lot to locate nasty class stall bug
18  *              Andi Kleen, Jamal Hadi, Bert Hubert
19  *                      code review and helpful comments on shaping
20  *              Tomasz Wrona, <tw@eter.tym.pl>
21  *                      created test case so that I was able to fix nasty bug
22  *              Wilfried Weissmann
23  *                      spotted bug in dequeue code and helped with fix
24  *              Jiri Fojtasek
25  *                      fixed requeue routine
26  *              and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <linux/bitops.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/string.h>
39 #include <linux/mm.h>
40 #include <linux/socket.h>
41 #include <linux/sockios.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/inet.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/notifier.h>
50 #include <net/ip.h>
51 #include <net/route.h>
52 #include <linux/skbuff.h>
53 #include <linux/list.h>
54 #include <linux/compiler.h>
55 #include <net/sock.h>
56 #include <net/pkt_sched.h>
57 #include <linux/rbtree.h>
58
59 /* HTB algorithm.
60     Author: devik@cdi.cz
61     ========================================================================
62     HTB is like TBF with multiple classes. It is also similar to CBQ because
63     it allows to assign priority to each class in hierarchy. 
64     In fact it is another implementation of Floyd's formal sharing.
65
66     Levels:
67     Each class is assigned level. Leaf has ALWAYS level 0 and root 
68     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
69     one less than their parent.
70 */
71
72 #define HTB_HSIZE 16    /* classid hash size */
73 #define HTB_EWMAC 2     /* rate average over HTB_EWMAC*HTB_HSIZE sec */
74 #undef HTB_DEBUG        /* compile debugging support (activated by tc tool) */
75 #define HTB_RATECM 1    /* whether to use rate computer */
76 #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
77 #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
78 #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
79 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
80
81 #if HTB_VER >> 16 != TC_HTB_PROTOVER
82 #error "Mismatched sch_htb.c and pkt_sch.h"
83 #endif
84
85 /* debugging support; S is subsystem, these are defined:
86   0 - netlink messages
87   1 - enqueue
88   2 - drop & requeue
89   3 - dequeue main
90   4 - dequeue one prio DRR part
91   5 - dequeue class accounting
92   6 - class overlimit status computation
93   7 - hint tree
94   8 - event queue
95  10 - rate estimator
96  11 - classifier 
97  12 - fast dequeue cache
98
99  L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
100  q->debug uint32 contains 16 2-bit fields one for subsystem starting
101  from LSB
102  */
103 #ifdef HTB_DEBUG
104 #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
105 #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
106         printk(KERN_DEBUG FMT,##ARG)
107 #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
108 #define HTB_PASSQ q,
109 #define HTB_ARGQ struct htb_sched *q,
110 #define static
111 #undef __inline__
112 #define __inline__
113 #undef inline
114 #define inline
115 #define HTB_CMAGIC 0xFEFAFEF1
116 #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
117                 if ((N)->rb_color == -1) break; \
118                 rb_erase(N,R); \
119                 (N)->rb_color = -1; } while (0)
120 #else
121 #define HTB_DBG_COND(S,L) (0)
122 #define HTB_DBG(S,L,FMT,ARG...)
123 #define HTB_PASSQ
124 #define HTB_ARGQ
125 #define HTB_CHCL(cl)
126 #define htb_safe_rb_erase(N,R) rb_erase(N,R)
127 #endif
128
129
130 /* used internaly to keep status of single class */
131 enum htb_cmode {
132     HTB_CANT_SEND,              /* class can't send and can't borrow */
133     HTB_MAY_BORROW,             /* class can't send but may borrow */
134     HTB_CAN_SEND                /* class can send */
135 };
136
137 /* interior & leaf nodes; props specific to leaves are marked L: */
138 struct htb_class
139 {
140 #ifdef HTB_DEBUG
141         unsigned magic;
142 #endif
143     /* general class parameters */
144     u32 classid;
145     struct gnet_stats_basic bstats;
146     struct gnet_stats_queue qstats;
147     struct gnet_stats_rate_est rate_est;
148     struct tc_htb_xstats xstats;/* our special stats */
149     int refcnt;                 /* usage count of this class */
150
151 #ifdef HTB_RATECM
152     /* rate measurement counters */
153     unsigned long rate_bytes,sum_bytes;
154     unsigned long rate_packets,sum_packets;
155 #endif
156
157     /* topology */
158     int level;                  /* our level (see above) */
159     struct htb_class *parent;   /* parent class */
160     struct list_head hlist;     /* classid hash list item */
161     struct list_head sibling;   /* sibling list item */
162     struct list_head children;  /* children list */
163
164     union {
165             struct htb_class_leaf {
166                     struct Qdisc *q;
167                     int prio;
168                     int aprio;  
169                     int quantum;
170                     int deficit[TC_HTB_MAXDEPTH];
171                     struct list_head drop_list;
172             } leaf;
173             struct htb_class_inner {
174                     struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
175                     struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
176             /* When class changes from state 1->2 and disconnects from 
177                parent's feed then we lost ptr value and start from the
178               first child again. Here we store classid of the
179               last valid ptr (used when ptr is NULL). */
180               u32 last_ptr_id[TC_HTB_NUMPRIO];
181             } inner;
182     } un;
183     struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
184     struct rb_node pq_node;              /* node for event queue */
185     unsigned long pq_key;       /* the same type as jiffies global */
186     
187     int prio_activity;          /* for which prios are we active */
188     enum htb_cmode cmode;       /* current mode of the class */
189
190     /* class attached filters */
191     struct tcf_proto *filter_list;
192     int filter_cnt;
193
194     int warned;         /* only one warning about non work conserving .. */
195
196     /* token bucket parameters */
197     struct qdisc_rate_table *rate;      /* rate table of the class itself */
198     struct qdisc_rate_table *ceil;      /* ceiling rate (limits borrows too) */
199     long buffer,cbuffer;                /* token bucket depth/rate */
200     long mbuffer;                       /* max wait time */
201     long tokens,ctokens;                /* current number of tokens */
202     psched_time_t t_c;                  /* checkpoint time */
203 };
204
205 /* TODO: maybe compute rate when size is too large .. or drop ? */
206 static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
207         int size)
208
209     int slot = size >> rate->rate.cell_log;
210     if (slot > 255) {
211         cl->xstats.giants++;
212         slot = 255;
213     }
214     return rate->data[slot];
215 }
216
217 struct htb_sched
218 {
219     struct list_head root;                      /* root classes list */
220     struct list_head hash[HTB_HSIZE];           /* hashed by classid */
221     struct list_head drops[TC_HTB_NUMPRIO];     /* active leaves (for drops) */
222     
223     /* self list - roots of self generating tree */
224     struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
225     int row_mask[TC_HTB_MAXDEPTH];
226     struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
227     u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
228
229     /* self wait list - roots of wait PQs per row */
230     struct rb_root wait_pq[TC_HTB_MAXDEPTH];
231
232     /* time of nearest event per level (row) */
233     unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
234
235     /* cached value of jiffies in dequeue */
236     unsigned long jiffies;
237
238     /* whether we hit non-work conserving class during this dequeue; we use */
239     int nwc_hit;        /* this to disable mindelay complaint in dequeue */
240
241     int defcls;         /* class where unclassified flows go to */
242     u32 debug;          /* subsystem debug levels */
243
244     /* filters for qdisc itself */
245     struct tcf_proto *filter_list;
246     int filter_cnt;
247
248     int rate2quantum;           /* quant = rate / rate2quantum */
249     psched_time_t now;          /* cached dequeue time */
250     struct timer_list timer;    /* send delay timer */
251 #ifdef HTB_RATECM
252     struct timer_list rttim;    /* rate computer timer */
253     int recmp_bucket;           /* which hash bucket to recompute next */
254 #endif
255     
256     /* non shaped skbs; let them go directly thru */
257     struct sk_buff_head direct_queue;
258     int direct_qlen;  /* max qlen of above */
259
260     long direct_pkts;
261 };
262
263 /* compute hash of size HTB_HSIZE for given handle */
264 static __inline__ int htb_hash(u32 h) 
265 {
266 #if HTB_HSIZE != 16
267  #error "Declare new hash for your HTB_HSIZE"
268 #endif
269     h ^= h>>8;  /* stolen from cbq_hash */
270     h ^= h>>4;
271     return h & 0xf;
272 }
273
274 /* find class in global hash table using given handle */
275 static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
276 {
277         struct htb_sched *q = qdisc_priv(sch);
278         struct list_head *p;
279         if (TC_H_MAJ(handle) != sch->handle) 
280                 return NULL;
281         
282         list_for_each (p,q->hash+htb_hash(handle)) {
283                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
284                 if (cl->classid == handle)
285                         return cl;
286         }
287         return NULL;
288 }
289
290 /**
291  * htb_classify - classify a packet into class
292  *
293  * It returns NULL if the packet should be dropped or -1 if the packet
294  * should be passed directly thru. In all other cases leaf class is returned.
295  * We allow direct class selection by classid in priority. The we examine
296  * filters in qdisc and in inner nodes (if higher filter points to the inner
297  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
298  * internal fifo (direct). These packets then go directly thru. If we still 
299  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
300  * then finish and return direct queue.
301  */
302 #define HTB_DIRECT (struct htb_class*)-1
303 static inline u32 htb_classid(struct htb_class *cl)
304 {
305         return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
306 }
307
308 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
309 {
310         struct htb_sched *q = qdisc_priv(sch);
311         struct htb_class *cl;
312         struct tcf_result res;
313         struct tcf_proto *tcf;
314         int result;
315
316         /* allow to select class by setting skb->priority to valid classid;
317            note that nfmark can be used too by attaching filter fw with no
318            rules in it */
319         if (skb->priority == sch->handle)
320                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
321         if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
322                 return cl;
323
324         *qerr = NET_XMIT_DROP;
325         tcf = q->filter_list;
326         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
327 #ifdef CONFIG_NET_CLS_ACT
328                 switch (result) {
329                 case TC_ACT_QUEUED:
330                 case TC_ACT_STOLEN: 
331                         *qerr = NET_XMIT_SUCCESS;
332                 case TC_ACT_SHOT:
333                         return NULL;
334                 }
335 #elif defined(CONFIG_NET_CLS_POLICE)
336                 if (result == TC_POLICE_SHOT)
337                         return HTB_DIRECT;
338 #endif
339                 if ((cl = (void*)res.class) == NULL) {
340                         if (res.classid == sch->handle)
341                                 return HTB_DIRECT;  /* X:0 (direct flow) */
342                         if ((cl = htb_find(res.classid,sch)) == NULL)
343                                 break; /* filter selected invalid classid */
344                 }
345                 if (!cl->level)
346                         return cl; /* we hit leaf; return it */
347
348                 /* we have got inner class; apply inner filter chain */
349                 tcf = cl->filter_list;
350         }
351         /* classification failed; try to use default class */
352         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
353         if (!cl || cl->level)
354                 return HTB_DIRECT; /* bad default .. this is safe bet */
355         return cl;
356 }
357
358 #ifdef HTB_DEBUG
359 static void htb_next_rb_node(struct rb_node **n);
360 #define HTB_DUMTREE(root,memb) if(root) { \
361         struct rb_node *n = (root)->rb_node; \
362         while (n->rb_left) n = n->rb_left; \
363         while (n) { \
364                 struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
365                 printk(" %x",cl->classid); htb_next_rb_node (&n); \
366         } }
367
368 static void htb_debug_dump (struct htb_sched *q)
369 {
370         int i,p;
371         printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
372         /* rows */
373         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
374                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
375                 for (p=0;p<TC_HTB_NUMPRIO;p++) {
376                         if (!q->row[i][p].rb_node) continue;
377                         printk(" p%d:",p);
378                         HTB_DUMTREE(q->row[i]+p,node[p]);
379                 }
380                 printk("\n");
381         }
382         /* classes */
383         for (i = 0; i < HTB_HSIZE; i++) {
384                 struct list_head *l;
385                 list_for_each (l,q->hash+i) {
386                         struct htb_class *cl = list_entry(l,struct htb_class,hlist);
387                         long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
388                         printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
389                                         "pa=%x f:",
390                                 cl->classid,cl->cmode,cl->tokens,cl->ctokens,
391                                 cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
392                                 cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
393                         if (cl->level)
394                         for (p=0;p<TC_HTB_NUMPRIO;p++) {
395                                 if (!cl->un.inner.feed[p].rb_node) continue;
396                                 printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
397                                 HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
398                         }
399                         printk("\n");
400                 }
401         }
402 }
403 #endif
404 /**
405  * htb_add_to_id_tree - adds class to the round robin list
406  *
407  * Routine adds class to the list (actually tree) sorted by classid.
408  * Make sure that class is not already on such list for given prio.
409  */
410 static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
411                 struct htb_class *cl,int prio)
412 {
413         struct rb_node **p = &root->rb_node, *parent = NULL;
414         HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
415 #ifdef HTB_DEBUG
416         if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
417         HTB_CHCL(cl);
418         if (*p) {
419                 struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
420                 HTB_CHCL(x);
421         }
422 #endif
423         while (*p) {
424                 struct htb_class *c; parent = *p;
425                 c = rb_entry(parent, struct htb_class, node[prio]);
426                 HTB_CHCL(c);
427                 if (cl->classid > c->classid)
428                         p = &parent->rb_right;
429                 else 
430                         p = &parent->rb_left;
431         }
432         rb_link_node(&cl->node[prio], parent, p);
433         rb_insert_color(&cl->node[prio], root);
434 }
435
436 /**
437  * htb_add_to_wait_tree - adds class to the event queue with delay
438  *
439  * The class is added to priority event queue to indicate that class will
440  * change its mode in cl->pq_key microseconds. Make sure that class is not
441  * already in the queue.
442  */
443 static void htb_add_to_wait_tree (struct htb_sched *q,
444                 struct htb_class *cl,long delay,int debug_hint)
445 {
446         struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
447         HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
448 #ifdef HTB_DEBUG
449         if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
450         HTB_CHCL(cl);
451         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
452                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
453 #endif
454         cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
455         if (cl->pq_key == q->jiffies)
456                 cl->pq_key++;
457
458         /* update the nearest event cache */
459         if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
460                 q->near_ev_cache[cl->level] = cl->pq_key;
461         
462         while (*p) {
463                 struct htb_class *c; parent = *p;
464                 c = rb_entry(parent, struct htb_class, pq_node);
465                 if (time_after_eq(cl->pq_key, c->pq_key))
466                         p = &parent->rb_right;
467                 else 
468                         p = &parent->rb_left;
469         }
470         rb_link_node(&cl->pq_node, parent, p);
471         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
472 }
473
474 /**
475  * htb_next_rb_node - finds next node in binary tree
476  *
477  * When we are past last key we return NULL.
478  * Average complexity is 2 steps per call.
479  */
480 static void htb_next_rb_node(struct rb_node **n)
481 {
482         *n = rb_next(*n);
483 }
484
485 /**
486  * htb_add_class_to_row - add class to its row
487  *
488  * The class is added to row at priorities marked in mask.
489  * It does nothing if mask == 0.
490  */
491 static inline void htb_add_class_to_row(struct htb_sched *q, 
492                 struct htb_class *cl,int mask)
493 {
494         HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
495                         cl->classid,mask,q->row_mask[cl->level]);
496         HTB_CHCL(cl);
497         q->row_mask[cl->level] |= mask;
498         while (mask) {
499                 int prio = ffz(~mask);
500                 mask &= ~(1 << prio);
501                 htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
502         }
503 }
504
505 /**
506  * htb_remove_class_from_row - removes class from its row
507  *
508  * The class is removed from row at priorities marked in mask.
509  * It does nothing if mask == 0.
510  */
511 static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
512                 struct htb_class *cl,int mask)
513 {
514         int m = 0;
515         HTB_CHCL(cl);
516         while (mask) {
517                 int prio = ffz(~mask);
518                 mask &= ~(1 << prio);
519                 if (q->ptr[cl->level][prio] == cl->node+prio)
520                         htb_next_rb_node(q->ptr[cl->level]+prio);
521                 htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
522                 if (!q->row[cl->level][prio].rb_node) 
523                         m |= 1 << prio;
524         }
525         HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
526                         cl->classid,mask,q->row_mask[cl->level],m);
527         q->row_mask[cl->level] &= ~m;
528 }
529
530 /**
531  * htb_activate_prios - creates active classe's feed chain
532  *
533  * The class is connected to ancestors and/or appropriate rows
534  * for priorities it is participating on. cl->cmode must be new 
535  * (activated) mode. It does nothing if cl->prio_activity == 0.
536  */
537 static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
538 {
539         struct htb_class *p = cl->parent;
540         long m,mask = cl->prio_activity;
541         HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
542         HTB_CHCL(cl);
543
544         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
545                 HTB_CHCL(p);
546                 m = mask; while (m) {
547                         int prio = ffz(~m);
548                         m &= ~(1 << prio);
549                         
550                         if (p->un.inner.feed[prio].rb_node)
551                                 /* parent already has its feed in use so that
552                                    reset bit in mask as parent is already ok */
553                                 mask &= ~(1 << prio);
554                         
555                         htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
556                 }
557                 HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
558                                 p->classid,p->prio_activity,mask,p->cmode);
559                 p->prio_activity |= mask;
560                 cl = p; p = cl->parent;
561                 HTB_CHCL(cl);
562         }
563         if (cl->cmode == HTB_CAN_SEND && mask)
564                 htb_add_class_to_row(q,cl,mask);
565 }
566
567 /**
568  * htb_deactivate_prios - remove class from feed chain
569  *
570  * cl->cmode must represent old mode (before deactivation). It does 
571  * nothing if cl->prio_activity == 0. Class is removed from all feed
572  * chains and rows.
573  */
574 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
575 {
576         struct htb_class *p = cl->parent;
577         long m,mask = cl->prio_activity;
578         HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
579         HTB_CHCL(cl);
580
581         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
582                 m = mask; mask = 0; 
583                 while (m) {
584                         int prio = ffz(~m);
585                         m &= ~(1 << prio);
586                         
587                         if (p->un.inner.ptr[prio] == cl->node+prio) {
588                                 /* we are removing child which is pointed to from
589                                    parent feed - forget the pointer but remember
590                                    classid */
591                                 p->un.inner.last_ptr_id[prio] = cl->classid;
592                                 p->un.inner.ptr[prio] = NULL;
593                         }
594                         
595                         htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
596                         
597                         if (!p->un.inner.feed[prio].rb_node) 
598                                 mask |= 1 << prio;
599                 }
600                 HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
601                                 p->classid,p->prio_activity,mask,p->cmode);
602                 p->prio_activity &= ~mask;
603                 cl = p; p = cl->parent;
604                 HTB_CHCL(cl);
605         }
606         if (cl->cmode == HTB_CAN_SEND && mask) 
607                 htb_remove_class_from_row(q,cl,mask);
608 }
609
610 /**
611  * htb_class_mode - computes and returns current class mode
612  *
613  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
614  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
615  * from now to time when cl will change its state. 
616  * Also it is worth to note that class mode doesn't change simply
617  * at cl->{c,}tokens == 0 but there can rather be hysteresis of 
618  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
619  * mode transitions per time unit. The speed gain is about 1/6.
620  */
621 static __inline__ enum htb_cmode 
622 htb_class_mode(struct htb_class *cl,long *diff)
623 {
624     long toks;
625
626     if ((toks = (cl->ctokens + *diff)) < (
627 #if HTB_HYSTERESIS
628             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
629 #endif
630             0)) {
631             *diff = -toks;
632             return HTB_CANT_SEND;
633     }
634     if ((toks = (cl->tokens + *diff)) >= (
635 #if HTB_HYSTERESIS
636             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
637 #endif
638             0))
639             return HTB_CAN_SEND;
640
641     *diff = -toks;
642     return HTB_MAY_BORROW;
643 }
644
645 /**
646  * htb_change_class_mode - changes classe's mode
647  *
648  * This should be the only way how to change classe's mode under normal
649  * cirsumstances. Routine will update feed lists linkage, change mode
650  * and add class to the wait event queue if appropriate. New mode should
651  * be different from old one and cl->pq_key has to be valid if changing
652  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
653  */
654 static void 
655 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
656
657         enum htb_cmode new_mode = htb_class_mode(cl,diff);
658         
659         HTB_CHCL(cl);
660         HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
661
662         if (new_mode == cl->cmode)
663                 return; 
664         
665         if (cl->prio_activity) { /* not necessary: speed optimization */
666                 if (cl->cmode != HTB_CANT_SEND) 
667                         htb_deactivate_prios(q,cl);
668                 cl->cmode = new_mode;
669                 if (new_mode != HTB_CANT_SEND) 
670                         htb_activate_prios(q,cl);
671         } else 
672                 cl->cmode = new_mode;
673 }
674
675 /**
676  * htb_activate - inserts leaf cl into appropriate active feeds 
677  *
678  * Routine learns (new) priority of leaf and activates feed chain
679  * for the prio. It can be called on already active leaf safely.
680  * It also adds leaf into droplist.
681  */
682 static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
683 {
684         BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
685         HTB_CHCL(cl);
686         if (!cl->prio_activity) {
687                 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
688                 htb_activate_prios(q,cl);
689                 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
690         }
691 }
692
693 /**
694  * htb_deactivate - remove leaf cl from active feeds 
695  *
696  * Make sure that leaf is active. In the other words it can't be called
697  * with non-active leaf. It also removes class from the drop list.
698  */
699 static __inline__ void 
700 htb_deactivate(struct htb_sched *q,struct htb_class *cl)
701 {
702         BUG_TRAP(cl->prio_activity);
703         HTB_CHCL(cl);
704         htb_deactivate_prios(q,cl);
705         cl->prio_activity = 0;
706         list_del_init(&cl->un.leaf.drop_list);
707 }
708
709 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
710 {
711     int ret;
712     struct htb_sched *q = qdisc_priv(sch);
713     struct htb_class *cl = htb_classify(skb,sch,&ret);
714
715     if (cl == HTB_DIRECT) {
716         /* enqueue to helper queue */
717         if (q->direct_queue.qlen < q->direct_qlen) {
718             __skb_queue_tail(&q->direct_queue, skb);
719             q->direct_pkts++;
720         }
721 #ifdef CONFIG_NET_CLS_ACT
722     } else if (!cl) {
723         if (ret == NET_XMIT_DROP)
724                 sch->qstats.drops++;
725         kfree_skb (skb);
726         return ret;
727 #endif
728     } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
729         sch->qstats.drops++;
730         cl->qstats.drops++;
731         return NET_XMIT_DROP;
732     } else {
733         cl->bstats.packets++; cl->bstats.bytes += skb->len;
734         htb_activate (q,cl);
735     }
736
737     sch->q.qlen++;
738     sch->bstats.packets++; sch->bstats.bytes += skb->len;
739     HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
740     return NET_XMIT_SUCCESS;
741 }
742
743 /* TODO: requeuing packet charges it to policers again !! */
744 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
745 {
746     struct htb_sched *q = qdisc_priv(sch);
747     int ret =  NET_XMIT_SUCCESS;
748     struct htb_class *cl = htb_classify(skb,sch, &ret);
749     struct sk_buff *tskb;
750
751     if (cl == HTB_DIRECT || !cl) {
752         /* enqueue to helper queue */
753         if (q->direct_queue.qlen < q->direct_qlen && cl) {
754             __skb_queue_head(&q->direct_queue, skb);
755         } else {
756             __skb_queue_head(&q->direct_queue, skb);
757             tskb = __skb_dequeue_tail(&q->direct_queue);
758             kfree_skb (tskb);
759             sch->qstats.drops++;
760             return NET_XMIT_CN; 
761         }
762     } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
763         sch->qstats.drops++;
764         cl->qstats.drops++;
765         return NET_XMIT_DROP;
766     } else 
767             htb_activate (q,cl);
768
769     sch->q.qlen++;
770     sch->qstats.requeues++;
771     HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
772     return NET_XMIT_SUCCESS;
773 }
774
775 static void htb_timer(unsigned long arg)
776 {
777     struct Qdisc *sch = (struct Qdisc*)arg;
778     sch->flags &= ~TCQ_F_THROTTLED;
779     wmb();
780     netif_schedule(sch->dev);
781 }
782
783 #ifdef HTB_RATECM
784 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
785 static void htb_rate_timer(unsigned long arg)
786 {
787         struct Qdisc *sch = (struct Qdisc*)arg;
788         struct htb_sched *q = qdisc_priv(sch);
789         struct list_head *p;
790
791         /* lock queue so that we can muck with it */
792         HTB_QLOCK(sch);
793         HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
794
795         q->rttim.expires = jiffies + HZ;
796         add_timer(&q->rttim);
797
798         /* scan and recompute one bucket at time */
799         if (++q->recmp_bucket >= HTB_HSIZE) 
800                 q->recmp_bucket = 0;
801         list_for_each (p,q->hash+q->recmp_bucket) {
802                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
803                 HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
804                                 cl->classid,cl->sum_bytes,cl->sum_packets);
805                 RT_GEN (cl->sum_bytes,cl->rate_bytes);
806                 RT_GEN (cl->sum_packets,cl->rate_packets);
807         }
808         HTB_QUNLOCK(sch);
809 }
810 #endif
811
812 /**
813  * htb_charge_class - charges amount "bytes" to leaf and ancestors
814  *
815  * Routine assumes that packet "bytes" long was dequeued from leaf cl
816  * borrowing from "level". It accounts bytes to ceil leaky bucket for
817  * leaf and all ancestors and to rate bucket for ancestors at levels
818  * "level" and higher. It also handles possible change of mode resulting
819  * from the update. Note that mode can also increase here (MAY_BORROW to
820  * CAN_SEND) because we can use more precise clock that event queue here.
821  * In such case we remove class from event queue first.
822  */
823 static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
824                 int level,int bytes)
825 {       
826         long toks,diff;
827         enum htb_cmode old_mode;
828         HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
829
830 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
831         if (toks > cl->B) toks = cl->B; \
832         toks -= L2T(cl, cl->R, bytes); \
833         if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
834         cl->T = toks
835
836         while (cl) {
837                 HTB_CHCL(cl);
838                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
839 #ifdef HTB_DEBUG
840                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
841                         if (net_ratelimit())
842                                 printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
843                                        cl->classid, diff,
844 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
845                                        q->now.tv_sec * 1000000ULL + q->now.tv_usec,
846                                        cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
847 #else
848                                        (unsigned long long) q->now,
849                                        (unsigned long long) cl->t_c,
850 #endif
851                                        q->jiffies);
852                         diff = 1000;
853                 }
854 #endif
855                 if (cl->level >= level) {
856                         if (cl->level == level) cl->xstats.lends++;
857                         HTB_ACCNT (tokens,buffer,rate);
858                 } else {
859                         cl->xstats.borrows++;
860                         cl->tokens += diff; /* we moved t_c; update tokens */
861                 }
862                 HTB_ACCNT (ctokens,cbuffer,ceil);
863                 cl->t_c = q->now;
864                 HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
865
866                 old_mode = cl->cmode; diff = 0;
867                 htb_change_class_mode(q,cl,&diff);
868                 if (old_mode != cl->cmode) {
869                         if (old_mode != HTB_CAN_SEND)
870                                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
871                         if (cl->cmode != HTB_CAN_SEND)
872                                 htb_add_to_wait_tree (q,cl,diff,1);
873                 }
874                 
875 #ifdef HTB_RATECM
876                 /* update rate counters */
877                 cl->sum_bytes += bytes; cl->sum_packets++;
878 #endif
879
880                 /* update byte stats except for leaves which are already updated */
881                 if (cl->level) {
882                         cl->bstats.bytes += bytes;
883                         cl->bstats.packets++;
884                 }
885                 cl = cl->parent;
886         }
887 }
888
889 /**
890  * htb_do_events - make mode changes to classes at the level
891  *
892  * Scans event queue for pending events and applies them. Returns jiffies to
893  * next pending event (0 for no event in pq).
894  * Note: Aplied are events whose have cl->pq_key <= jiffies.
895  */
896 static long htb_do_events(struct htb_sched *q,int level)
897 {
898         int i;
899         HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
900                         level,q->wait_pq[level].rb_node,q->row_mask[level]);
901         for (i = 0; i < 500; i++) {
902                 struct htb_class *cl;
903                 long diff;
904                 struct rb_node *p = q->wait_pq[level].rb_node;
905                 if (!p) return 0;
906                 while (p->rb_left) p = p->rb_left;
907
908                 cl = rb_entry(p, struct htb_class, pq_node);
909                 if (time_after(cl->pq_key, q->jiffies)) {
910                         HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
911                         return cl->pq_key - q->jiffies;
912                 }
913                 htb_safe_rb_erase(p,q->wait_pq+level);
914                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
915 #ifdef HTB_DEBUG
916                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
917                         if (net_ratelimit())
918                                 printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
919                                        cl->classid, diff,
920 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
921                                        q->now.tv_sec * 1000000ULL + q->now.tv_usec,
922                                        cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
923 #else
924                                        (unsigned long long) q->now,
925                                        (unsigned long long) cl->t_c,
926 #endif
927                                        q->jiffies);
928                         diff = 1000;
929                 }
930 #endif
931                 htb_change_class_mode(q,cl,&diff);
932                 if (cl->cmode != HTB_CAN_SEND)
933                         htb_add_to_wait_tree (q,cl,diff,2);
934         }
935         if (net_ratelimit())
936                 printk(KERN_WARNING "htb: too many events !\n");
937         return HZ/10;
938 }
939
940 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
941    is no such one exists. */
942 static struct rb_node *
943 htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
944 {
945         struct rb_node *r = NULL;
946         while (n) {
947                 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
948                 if (id == cl->classid) return n;
949                 
950                 if (id > cl->classid) {
951                         n = n->rb_right;
952                 } else {
953                         r = n;
954                         n = n->rb_left;
955                 }
956         }
957         return r;
958 }
959
960 /**
961  * htb_lookup_leaf - returns next leaf class in DRR order
962  *
963  * Find leaf where current feed pointers points to.
964  */
965 static struct htb_class *
966 htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
967 {
968         int i;
969         struct {
970                 struct rb_node *root;
971                 struct rb_node **pptr;
972                 u32 *pid;
973         } stk[TC_HTB_MAXDEPTH],*sp = stk;
974         
975         BUG_TRAP(tree->rb_node);
976         sp->root = tree->rb_node;
977         sp->pptr = pptr;
978         sp->pid = pid;
979
980         for (i = 0; i < 65535; i++) {
981                 HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
982                 
983                 if (!*sp->pptr && *sp->pid) { 
984                         /* ptr was invalidated but id is valid - try to recover 
985                            the original or next ptr */
986                         *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
987                 }
988                 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
989                                  can become out of date quickly */
990                 if (!*sp->pptr) { /* we are at right end; rewind & go up */
991                         *sp->pptr = sp->root;
992                         while ((*sp->pptr)->rb_left) 
993                                 *sp->pptr = (*sp->pptr)->rb_left;
994                         if (sp > stk) {
995                                 sp--;
996                                 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
997                                 htb_next_rb_node (sp->pptr);
998                         }
999                 } else {
1000                         struct htb_class *cl;
1001                         cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
1002                         HTB_CHCL(cl);
1003                         if (!cl->level) 
1004                                 return cl;
1005                         (++sp)->root = cl->un.inner.feed[prio].rb_node;
1006                         sp->pptr = cl->un.inner.ptr+prio;
1007                         sp->pid = cl->un.inner.last_ptr_id+prio;
1008                 }
1009         }
1010         BUG_TRAP(0);
1011         return NULL;
1012 }
1013
1014 /* dequeues packet at given priority and level; call only if
1015    you are sure that there is active class at prio/level */
1016 static struct sk_buff *
1017 htb_dequeue_tree(struct htb_sched *q,int prio,int level)
1018 {
1019         struct sk_buff *skb = NULL;
1020         struct htb_class *cl,*start;
1021         /* look initial class up in the row */
1022         start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
1023                         q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1024         
1025         do {
1026 next:
1027                 BUG_TRAP(cl); 
1028                 if (!cl) return NULL;
1029                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
1030                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
1031
1032                 /* class can be empty - it is unlikely but can be true if leaf
1033                    qdisc drops packets in enqueue routine or if someone used
1034                    graft operation on the leaf since last dequeue; 
1035                    simply deactivate and skip such class */
1036                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
1037                         struct htb_class *next;
1038                         htb_deactivate(q,cl);
1039
1040                         /* row/level might become empty */
1041                         if ((q->row_mask[level] & (1 << prio)) == 0)
1042                                 return NULL; 
1043                         
1044                         next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
1045                                         prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1046
1047                         if (cl == start) /* fix start if we just deleted it */
1048                                 start = next;
1049                         cl = next;
1050                         goto next;
1051                 }
1052         
1053                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
1054                         break;
1055                 if (!cl->warned) {
1056                         printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
1057                         cl->warned = 1;
1058                 }
1059                 q->nwc_hit++;
1060                 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1061                 cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
1062                                 q->last_ptr_id[level]+prio);
1063
1064         } while (cl != start);
1065
1066         if (likely(skb != NULL)) {
1067                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1068                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
1069                                 level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
1070                         cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
1071                         htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1072                 }
1073                 /* this used to be after charge_class but this constelation
1074                    gives us slightly better performance */
1075                 if (!cl->un.leaf.q->q.qlen)
1076                         htb_deactivate (q,cl);
1077                 htb_charge_class (q,cl,level,skb->len);
1078         }
1079         return skb;
1080 }
1081
1082 static void htb_delay_by(struct Qdisc *sch,long delay)
1083 {
1084         struct htb_sched *q = qdisc_priv(sch);
1085         if (delay <= 0) delay = 1;
1086         if (unlikely(delay > 5*HZ)) {
1087                 if (net_ratelimit())
1088                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
1089                 delay = 5*HZ;
1090         }
1091         /* why don't use jiffies here ? because expires can be in past */
1092         mod_timer(&q->timer, q->jiffies + delay);
1093         sch->flags |= TCQ_F_THROTTLED;
1094         sch->qstats.overlimits++;
1095         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
1096 }
1097
1098 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1099 {
1100         struct sk_buff *skb = NULL;
1101         struct htb_sched *q = qdisc_priv(sch);
1102         int level;
1103         long min_delay;
1104 #ifdef HTB_DEBUG
1105         int evs_used = 0;
1106 #endif
1107
1108         q->jiffies = jiffies;
1109         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
1110                         sch->q.qlen);
1111
1112         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
1113         if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
1114                 sch->flags &= ~TCQ_F_THROTTLED;
1115                 sch->q.qlen--;
1116                 return skb;
1117         }
1118
1119         if (!sch->q.qlen) goto fin;
1120         PSCHED_GET_TIME(q->now);
1121
1122         min_delay = LONG_MAX;
1123         q->nwc_hit = 0;
1124         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1125                 /* common case optimization - skip event handler quickly */
1126                 int m;
1127                 long delay;
1128                 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1129                         delay = htb_do_events(q,level);
1130                         q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1131 #ifdef HTB_DEBUG
1132                         evs_used++;
1133 #endif
1134                 } else
1135                         delay = q->near_ev_cache[level] - q->jiffies;   
1136                 
1137                 if (delay && min_delay > delay) 
1138                         min_delay = delay;
1139                 m = ~q->row_mask[level];
1140                 while (m != (int)(-1)) {
1141                         int prio = ffz (m);
1142                         m |= 1 << prio;
1143                         skb = htb_dequeue_tree(q,prio,level);
1144                         if (likely(skb != NULL)) {
1145                                 sch->q.qlen--;
1146                                 sch->flags &= ~TCQ_F_THROTTLED;
1147                                 goto fin;
1148                         }
1149                 }
1150         }
1151 #ifdef HTB_DEBUG
1152         if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
1153                 if (min_delay == LONG_MAX) {
1154                         printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
1155                                         evs_used,q->jiffies,jiffies);
1156                         htb_debug_dump(q);
1157                 } else 
1158                         printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
1159                                         "too small rate\n",min_delay);
1160         }
1161 #endif
1162         htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
1163 fin:
1164         HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
1165         return skb;
1166 }
1167
1168 /* try to drop from each class (by prio) until one succeed */
1169 static unsigned int htb_drop(struct Qdisc* sch)
1170 {
1171         struct htb_sched *q = qdisc_priv(sch);
1172         int prio;
1173
1174         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1175                 struct list_head *p;
1176                 list_for_each (p,q->drops+prio) {
1177                         struct htb_class *cl = list_entry(p, struct htb_class,
1178                                                           un.leaf.drop_list);
1179                         unsigned int len;
1180                         if (cl->un.leaf.q->ops->drop && 
1181                                 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1182                                 sch->q.qlen--;
1183                                 if (!cl->un.leaf.q->q.qlen)
1184                                         htb_deactivate (q,cl);
1185                                 return len;
1186                         }
1187                 }
1188         }
1189         return 0;
1190 }
1191
1192 /* reset all classes */
1193 /* always caled under BH & queue lock */
1194 static void htb_reset(struct Qdisc* sch)
1195 {
1196         struct htb_sched *q = qdisc_priv(sch);
1197         int i;
1198         HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1199
1200         for (i = 0; i < HTB_HSIZE; i++) {
1201                 struct list_head *p;
1202                 list_for_each (p,q->hash+i) {
1203                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1204                         if (cl->level)
1205                                 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1206                         else {
1207                                 if (cl->un.leaf.q) 
1208                                         qdisc_reset(cl->un.leaf.q);
1209                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1210                         }
1211                         cl->prio_activity = 0;
1212                         cl->cmode = HTB_CAN_SEND;
1213 #ifdef HTB_DEBUG
1214                         cl->pq_node.rb_color = -1;
1215                         memset(cl->node,255,sizeof(cl->node));
1216 #endif
1217
1218                 }
1219         }
1220         sch->flags &= ~TCQ_F_THROTTLED;
1221         del_timer(&q->timer);
1222         __skb_queue_purge(&q->direct_queue);
1223         sch->q.qlen = 0;
1224         memset(q->row,0,sizeof(q->row));
1225         memset(q->row_mask,0,sizeof(q->row_mask));
1226         memset(q->wait_pq,0,sizeof(q->wait_pq));
1227         memset(q->ptr,0,sizeof(q->ptr));
1228         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1229                 INIT_LIST_HEAD(q->drops+i);
1230 }
1231
1232 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1233 {
1234         struct htb_sched *q = qdisc_priv(sch);
1235         struct rtattr *tb[TCA_HTB_INIT];
1236         struct tc_htb_glob *gopt;
1237         int i;
1238 #ifdef HTB_DEBUG
1239         printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1240                           HTB_VER >> 16,HTB_VER & 0xffff);
1241 #endif
1242         if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1243                         tb[TCA_HTB_INIT-1] == NULL ||
1244                         RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1245                 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1246                 return -EINVAL;
1247         }
1248         gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1249         if (gopt->version != HTB_VER >> 16) {
1250                 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1251                                 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1252                 return -EINVAL;
1253         }
1254         q->debug = gopt->debug;
1255         HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1256
1257         INIT_LIST_HEAD(&q->root);
1258         for (i = 0; i < HTB_HSIZE; i++)
1259                 INIT_LIST_HEAD(q->hash+i);
1260         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1261                 INIT_LIST_HEAD(q->drops+i);
1262
1263         init_timer(&q->timer);
1264         skb_queue_head_init(&q->direct_queue);
1265
1266         q->direct_qlen = sch->dev->tx_queue_len;
1267         if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1268                 q->direct_qlen = 2;
1269         q->timer.function = htb_timer;
1270         q->timer.data = (unsigned long)sch;
1271
1272 #ifdef HTB_RATECM
1273         init_timer(&q->rttim);
1274         q->rttim.function = htb_rate_timer;
1275         q->rttim.data = (unsigned long)sch;
1276         q->rttim.expires = jiffies + HZ;
1277         add_timer(&q->rttim);
1278 #endif
1279         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1280                 q->rate2quantum = 1;
1281         q->defcls = gopt->defcls;
1282
1283         return 0;
1284 }
1285
1286 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1287 {
1288         struct htb_sched *q = qdisc_priv(sch);
1289         unsigned char    *b = skb->tail;
1290         struct rtattr *rta;
1291         struct tc_htb_glob gopt;
1292         HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1293         HTB_QLOCK(sch);
1294         gopt.direct_pkts = q->direct_pkts;
1295
1296 #ifdef HTB_DEBUG
1297         if (HTB_DBG_COND(0,2))
1298                 htb_debug_dump(q);
1299 #endif
1300         gopt.version = HTB_VER;
1301         gopt.rate2quantum = q->rate2quantum;
1302         gopt.defcls = q->defcls;
1303         gopt.debug = q->debug;
1304         rta = (struct rtattr*)b;
1305         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1306         RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1307         rta->rta_len = skb->tail - b;
1308         HTB_QUNLOCK(sch);
1309         return skb->len;
1310 rtattr_failure:
1311         HTB_QUNLOCK(sch);
1312         skb_trim(skb, skb->tail - skb->data);
1313         return -1;
1314 }
1315
1316 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1317         struct sk_buff *skb, struct tcmsg *tcm)
1318 {
1319 #ifdef HTB_DEBUG
1320         struct htb_sched *q = qdisc_priv(sch);
1321 #endif
1322         struct htb_class *cl = (struct htb_class*)arg;
1323         unsigned char    *b = skb->tail;
1324         struct rtattr *rta;
1325         struct tc_htb_opt opt;
1326
1327         HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1328
1329         HTB_QLOCK(sch);
1330         tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1331         tcm->tcm_handle = cl->classid;
1332         if (!cl->level && cl->un.leaf.q)
1333                 tcm->tcm_info = cl->un.leaf.q->handle;
1334
1335         rta = (struct rtattr*)b;
1336         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1337
1338         memset (&opt,0,sizeof(opt));
1339
1340         opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1341         opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1342         opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1343         opt.level = cl->level; 
1344         RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1345         rta->rta_len = skb->tail - b;
1346         HTB_QUNLOCK(sch);
1347         return skb->len;
1348 rtattr_failure:
1349         HTB_QUNLOCK(sch);
1350         skb_trim(skb, b - skb->data);
1351         return -1;
1352 }
1353
1354 static int
1355 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1356         struct gnet_dump *d)
1357 {
1358         struct htb_class *cl = (struct htb_class*)arg;
1359
1360 #ifdef HTB_RATECM
1361         cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1362         cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1363 #endif
1364
1365         if (!cl->level && cl->un.leaf.q)
1366                 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1367         cl->xstats.tokens = cl->tokens;
1368         cl->xstats.ctokens = cl->ctokens;
1369
1370         if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1371             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1372             gnet_stats_copy_queue(d, &cl->qstats) < 0)
1373                 return -1;
1374
1375         return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1376 }
1377
1378 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1379         struct Qdisc **old)
1380 {
1381         struct htb_class *cl = (struct htb_class*)arg;
1382
1383         if (cl && !cl->level) {
1384                 if (new == NULL && (new = qdisc_create_dflt(sch->dev, 
1385                                         &pfifo_qdisc_ops)) == NULL)
1386                                         return -ENOBUFS;
1387                 sch_tree_lock(sch);
1388                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1389                         if (cl->prio_activity)
1390                                 htb_deactivate (qdisc_priv(sch),cl);
1391
1392                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
1393                         sch->q.qlen -= (*old)->q.qlen;  
1394                         qdisc_reset(*old);
1395                 }
1396                 sch_tree_unlock(sch);
1397                 return 0;
1398         }
1399         return -ENOENT;
1400 }
1401
1402 static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1403 {
1404         struct htb_class *cl = (struct htb_class*)arg;
1405         return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1406 }
1407
1408 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1409 {
1410 #ifdef HTB_DEBUG
1411         struct htb_sched *q = qdisc_priv(sch);
1412 #endif
1413         struct htb_class *cl = htb_find(classid,sch);
1414         HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1415         if (cl) 
1416                 cl->refcnt++;
1417         return (unsigned long)cl;
1418 }
1419
1420 static void htb_destroy_filters(struct tcf_proto **fl)
1421 {
1422         struct tcf_proto *tp;
1423
1424         while ((tp = *fl) != NULL) {
1425                 *fl = tp->next;
1426                 tcf_destroy(tp);
1427         }
1428 }
1429
1430 static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1431 {
1432         struct htb_sched *q = qdisc_priv(sch);
1433         HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1434         if (!cl->level) {
1435                 BUG_TRAP(cl->un.leaf.q);
1436                 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1437                 qdisc_destroy(cl->un.leaf.q);
1438         }
1439         qdisc_put_rtab(cl->rate);
1440         qdisc_put_rtab(cl->ceil);
1441         
1442         htb_destroy_filters (&cl->filter_list);
1443         
1444         while (!list_empty(&cl->children)) 
1445                 htb_destroy_class (sch,list_entry(cl->children.next,
1446                                         struct htb_class,sibling));
1447
1448         /* note: this delete may happen twice (see htb_delete) */
1449         list_del(&cl->hlist);
1450         list_del(&cl->sibling);
1451         
1452         if (cl->prio_activity)
1453                 htb_deactivate (q,cl);
1454         
1455         if (cl->cmode != HTB_CAN_SEND)
1456                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1457         
1458         kfree(cl);
1459 }
1460
1461 /* always caled under BH & queue lock */
1462 static void htb_destroy(struct Qdisc* sch)
1463 {
1464         struct htb_sched *q = qdisc_priv(sch);
1465         HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1466
1467         del_timer_sync (&q->timer);
1468 #ifdef HTB_RATECM
1469         del_timer_sync (&q->rttim);
1470 #endif
1471         /* This line used to be after htb_destroy_class call below
1472            and surprisingly it worked in 2.4. But it must precede it 
1473            because filter need its target class alive to be able to call
1474            unbind_filter on it (without Oops). */
1475         htb_destroy_filters(&q->filter_list);
1476         
1477         while (!list_empty(&q->root)) 
1478                 htb_destroy_class (sch,list_entry(q->root.next,
1479                                         struct htb_class,sibling));
1480
1481         __skb_queue_purge(&q->direct_queue);
1482 }
1483
1484 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1485 {
1486         struct htb_sched *q = qdisc_priv(sch);
1487         struct htb_class *cl = (struct htb_class*)arg;
1488         HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1489
1490         // TODO: why don't allow to delete subtree ? references ? does
1491         // tc subsys quarantee us that in htb_destroy it holds no class
1492         // refs so that we can remove children safely there ?
1493         if (!list_empty(&cl->children) || cl->filter_cnt)
1494                 return -EBUSY;
1495         
1496         sch_tree_lock(sch);
1497         
1498         /* delete from hash and active; remainder in destroy_class */
1499         list_del_init(&cl->hlist);
1500         if (cl->prio_activity)
1501                 htb_deactivate (q,cl);
1502
1503         if (--cl->refcnt == 0)
1504                 htb_destroy_class(sch,cl);
1505
1506         sch_tree_unlock(sch);
1507         return 0;
1508 }
1509
1510 static void htb_put(struct Qdisc *sch, unsigned long arg)
1511 {
1512 #ifdef HTB_DEBUG
1513         struct htb_sched *q = qdisc_priv(sch);
1514 #endif
1515         struct htb_class *cl = (struct htb_class*)arg;
1516         HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1517
1518         if (--cl->refcnt == 0)
1519                 htb_destroy_class(sch,cl);
1520 }
1521
1522 static int htb_change_class(struct Qdisc *sch, u32 classid, 
1523                 u32 parentid, struct rtattr **tca, unsigned long *arg)
1524 {
1525         int err = -EINVAL;
1526         struct htb_sched *q = qdisc_priv(sch);
1527         struct htb_class *cl = (struct htb_class*)*arg,*parent;
1528         struct rtattr *opt = tca[TCA_OPTIONS-1];
1529         struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1530         struct rtattr *tb[TCA_HTB_RTAB];
1531         struct tc_htb_opt *hopt;
1532
1533         /* extract all subattrs from opt attr */
1534         if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1535                         tb[TCA_HTB_PARMS-1] == NULL ||
1536                         RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1537                 goto failure;
1538         
1539         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1540
1541         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1542         HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1543         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1544         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1545         if (!rtab || !ctab) goto failure;
1546
1547         if (!cl) { /* new class */
1548                 struct Qdisc *new_q;
1549                 /* check for valid classid */
1550                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1551                         goto failure;
1552
1553                 /* check maximal depth */
1554                 if (parent && parent->parent && parent->parent->level < 2) {
1555                         printk(KERN_ERR "htb: tree is too deep\n");
1556                         goto failure;
1557                 }
1558                 err = -ENOBUFS;
1559                 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1560                         goto failure;
1561                 
1562                 memset(cl, 0, sizeof(*cl));
1563                 cl->refcnt = 1;
1564                 INIT_LIST_HEAD(&cl->sibling);
1565                 INIT_LIST_HEAD(&cl->hlist);
1566                 INIT_LIST_HEAD(&cl->children);
1567                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1568 #ifdef HTB_DEBUG
1569                 cl->magic = HTB_CMAGIC;
1570 #endif
1571
1572                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1573                    so that can't be used inside of sch_tree_lock
1574                    -- thanks to Karlis Peisenieks */
1575                 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1576                 sch_tree_lock(sch);
1577                 if (parent && !parent->level) {
1578                         /* turn parent into inner node */
1579                         sch->q.qlen -= parent->un.leaf.q->q.qlen;
1580                         qdisc_destroy (parent->un.leaf.q);
1581                         if (parent->prio_activity) 
1582                                 htb_deactivate (q,parent);
1583
1584                         /* remove from evt list because of level change */
1585                         if (parent->cmode != HTB_CAN_SEND) {
1586                                 htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1587                                 parent->cmode = HTB_CAN_SEND;
1588                         }
1589                         parent->level = (parent->parent ? parent->parent->level
1590                                         : TC_HTB_MAXDEPTH) - 1;
1591                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
1592                 }
1593                 /* leaf (we) needs elementary qdisc */
1594                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1595
1596                 cl->classid = classid; cl->parent = parent;
1597
1598                 /* set class to be in HTB_CAN_SEND state */
1599                 cl->tokens = hopt->buffer;
1600                 cl->ctokens = hopt->cbuffer;
1601                 cl->mbuffer = 60000000; /* 1min */
1602                 PSCHED_GET_TIME(cl->t_c);
1603                 cl->cmode = HTB_CAN_SEND;
1604
1605                 /* attach to the hash list and parent's family */
1606                 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1607                 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1608 #ifdef HTB_DEBUG
1609                 { 
1610                         int i;
1611                         for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1612                         cl->pq_node.rb_color = -1;
1613                 }
1614 #endif
1615         } else sch_tree_lock(sch);
1616
1617         /* it used to be a nasty bug here, we have to check that node
1618            is really leaf before changing cl->un.leaf ! */
1619         if (!cl->level) {
1620                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1621                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1622                         printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1623                         cl->un.leaf.quantum = 1000;
1624                 }
1625                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1626                         printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1627                         cl->un.leaf.quantum = 200000;
1628                 }
1629                 if (hopt->quantum)
1630                         cl->un.leaf.quantum = hopt->quantum;
1631                 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1632                         cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1633         }
1634
1635         cl->buffer = hopt->buffer;
1636         cl->cbuffer = hopt->cbuffer;
1637         if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1638         if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1639         sch_tree_unlock(sch);
1640
1641         *arg = (unsigned long)cl;
1642         return 0;
1643
1644 failure:
1645         if (rtab) qdisc_put_rtab(rtab);
1646         if (ctab) qdisc_put_rtab(ctab);
1647         return err;
1648 }
1649
1650 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1651 {
1652         struct htb_sched *q = qdisc_priv(sch);
1653         struct htb_class *cl = (struct htb_class *)arg;
1654         struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1655         HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1656         return fl;
1657 }
1658
1659 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1660         u32 classid)
1661 {
1662         struct htb_sched *q = qdisc_priv(sch);
1663         struct htb_class *cl = htb_find (classid,sch);
1664         HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1665         /*if (cl && !cl->level) return 0;
1666           The line above used to be there to prevent attaching filters to 
1667           leaves. But at least tc_index filter uses this just to get class 
1668           for other reasons so that we have to allow for it.
1669           ----
1670           19.6.2002 As Werner explained it is ok - bind filter is just
1671           another way to "lock" the class - unlike "get" this lock can
1672           be broken by class during destroy IIUC.
1673          */
1674         if (cl) 
1675                 cl->filter_cnt++; 
1676         else 
1677                 q->filter_cnt++;
1678         return (unsigned long)cl;
1679 }
1680
1681 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1682 {
1683         struct htb_sched *q = qdisc_priv(sch);
1684         struct htb_class *cl = (struct htb_class *)arg;
1685         HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1686         if (cl) 
1687                 cl->filter_cnt--; 
1688         else 
1689                 q->filter_cnt--;
1690 }
1691
1692 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1693 {
1694         struct htb_sched *q = qdisc_priv(sch);
1695         int i;
1696
1697         if (arg->stop)
1698                 return;
1699
1700         for (i = 0; i < HTB_HSIZE; i++) {
1701                 struct list_head *p;
1702                 list_for_each (p,q->hash+i) {
1703                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1704                         if (arg->count < arg->skip) {
1705                                 arg->count++;
1706                                 continue;
1707                         }
1708                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1709                                 arg->stop = 1;
1710                                 return;
1711                         }
1712                         arg->count++;
1713                 }
1714         }
1715 }
1716
1717 static struct Qdisc_class_ops htb_class_ops = {
1718         .graft          =       htb_graft,
1719         .leaf           =       htb_leaf,
1720         .get            =       htb_get,
1721         .put            =       htb_put,
1722         .change         =       htb_change_class,
1723         .delete         =       htb_delete,
1724         .walk           =       htb_walk,
1725         .tcf_chain      =       htb_find_tcf,
1726         .bind_tcf       =       htb_bind_filter,
1727         .unbind_tcf     =       htb_unbind_filter,
1728         .dump           =       htb_dump_class,
1729         .dump_stats     =       htb_dump_class_stats,
1730 };
1731
1732 static struct Qdisc_ops htb_qdisc_ops = {
1733         .next           =       NULL,
1734         .cl_ops         =       &htb_class_ops,
1735         .id             =       "htb",
1736         .priv_size      =       sizeof(struct htb_sched),
1737         .enqueue        =       htb_enqueue,
1738         .dequeue        =       htb_dequeue,
1739         .requeue        =       htb_requeue,
1740         .drop           =       htb_drop,
1741         .init           =       htb_init,
1742         .reset          =       htb_reset,
1743         .destroy        =       htb_destroy,
1744         .change         =       NULL /* htb_change */,
1745         .dump           =       htb_dump,
1746         .owner          =       THIS_MODULE,
1747 };
1748
1749 static int __init htb_module_init(void)
1750 {
1751     return register_qdisc(&htb_qdisc_ops);
1752 }
1753 static void __exit htb_module_exit(void) 
1754 {
1755     unregister_qdisc(&htb_qdisc_ops);
1756 }
1757 module_init(htb_module_init)
1758 module_exit(htb_module_exit)
1759 MODULE_LICENSE("GPL");