65797b3306cccb7c6c066b87f904ac3750df21ec
[linux-2.6.git] / net / sched / sch_htb.c
1 /* vim: ts=8 sw=8
2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *                      HTB support at LARTC mailing list
14  *              Ondrej Kraus, <krauso@barr.cz> 
15  *                      found missing INIT_QDISC(htb)
16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *                      helped a lot to locate nasty class stall bug
18  *              Andi Kleen, Jamal Hadi, Bert Hubert
19  *                      code review and helpful comments on shaping
20  *              Tomasz Wrona, <tw@eter.tym.pl>
21  *                      created test case so that I was able to fix nasty bug
22  *              Wilfried Weissmann
23  *                      spotted bug in dequeue code and helped with fix
24  *              Jiri Fojtasek
25  *                      fixed requeue routine
26  *              and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <asm/bitops.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/string.h>
39 #include <linux/mm.h>
40 #include <linux/socket.h>
41 #include <linux/sockios.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/inet.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/notifier.h>
50 #include <net/ip.h>
51 #include <net/route.h>
52 #include <linux/skbuff.h>
53 #include <linux/list.h>
54 #include <linux/compiler.h>
55 #include <net/sock.h>
56 #include <net/pkt_sched.h>
57 #include <linux/rbtree.h>
58
59 /* HTB algorithm.
60     Author: devik@cdi.cz
61     ========================================================================
62     HTB is like TBF with multiple classes. It is also similar to CBQ because
63     it allows to assign priority to each class in hierarchy. 
64     In fact it is another implementation of Floyd's formal sharing.
65
66     Levels:
67     Each class is assigned level. Leaf has ALWAYS level 0 and root 
68     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
69     one less than their parent.
70 */
71
72 #define HTB_HSIZE 16    /* classid hash size */
73 #define HTB_EWMAC 2     /* rate average over HTB_EWMAC*HTB_HSIZE sec */
74 #define HTB_DEBUG 1     /* compile debugging support (activated by tc tool) */
75 #define HTB_RATECM 1    /* whether to use rate computer */
76 #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
77 #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
78 #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
79 #define HTB_VER 0x30010 /* major must be matched with number suplied by TC as version */
80
81 #if HTB_VER >> 16 != TC_HTB_PROTOVER
82 #error "Mismatched sch_htb.c and pkt_sch.h"
83 #endif
84
85 /* debugging support; S is subsystem, these are defined:
86   0 - netlink messages
87   1 - enqueue
88   2 - drop & requeue
89   3 - dequeue main
90   4 - dequeue one prio DRR part
91   5 - dequeue class accounting
92   6 - class overlimit status computation
93   7 - hint tree
94   8 - event queue
95  10 - rate estimator
96  11 - classifier 
97  12 - fast dequeue cache
98
99  L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
100  q->debug uint32 contains 16 2-bit fields one for subsystem starting
101  from LSB
102  */
103 #ifdef HTB_DEBUG
104 #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
105 #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
106         printk(KERN_DEBUG FMT,##ARG)
107 #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
108 #define HTB_PASSQ q,
109 #define HTB_ARGQ struct htb_sched *q,
110 #define static
111 #undef __inline__
112 #define __inline__
113 #undef inline
114 #define inline
115 #define HTB_CMAGIC 0xFEFAFEF1
116 #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
117                 if ((N)->rb_color == -1) break; \
118                 rb_erase(N,R); \
119                 (N)->rb_color = -1; } while (0)
120 #else
121 #define HTB_DBG_COND(S,L) (0)
122 #define HTB_DBG(S,L,FMT,ARG...)
123 #define HTB_PASSQ
124 #define HTB_ARGQ
125 #define HTB_CHCL(cl)
126 #define htb_safe_rb_erase(N,R) rb_erase(N,R)
127 #endif
128
129
130 /* used internaly to keep status of single class */
131 enum htb_cmode {
132     HTB_CANT_SEND,              /* class can't send and can't borrow */
133     HTB_MAY_BORROW,             /* class can't send but may borrow */
134     HTB_CAN_SEND                /* class can send */
135 };
136
137 /* interior & leaf nodes; props specific to leaves are marked L: */
138 struct htb_class
139 {
140 #ifdef HTB_DEBUG
141         unsigned magic;
142 #endif
143     /* general class parameters */
144     u32 classid;
145     struct tc_stats     stats;  /* generic stats */
146     spinlock_t          *stats_lock;
147     struct tc_htb_xstats xstats;/* our special stats */
148     int refcnt;                 /* usage count of this class */
149
150 #ifdef HTB_RATECM
151     /* rate measurement counters */
152     unsigned long rate_bytes,sum_bytes;
153     unsigned long rate_packets,sum_packets;
154 #endif
155
156     /* topology */
157     int level;                  /* our level (see above) */
158     struct htb_class *parent;   /* parent class */
159     struct list_head hlist;     /* classid hash list item */
160     struct list_head sibling;   /* sibling list item */
161     struct list_head children;  /* children list */
162
163     union {
164             struct htb_class_leaf {
165                     struct Qdisc *q;
166                     int prio;
167                     int aprio;  
168                     int quantum;
169                     int deficit[TC_HTB_MAXDEPTH];
170                     struct list_head drop_list;
171             } leaf;
172             struct htb_class_inner {
173                     struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
174                     struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
175             } inner;
176     } un;
177     struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
178     struct rb_node pq_node;              /* node for event queue */
179     unsigned long pq_key;       /* the same type as jiffies global */
180     
181     int prio_activity;          /* for which prios are we active */
182     enum htb_cmode cmode;       /* current mode of the class */
183
184     /* class attached filters */
185     struct tcf_proto *filter_list;
186     int filter_cnt;
187
188     int warned;         /* only one warning about non work conserving .. */
189
190     /* token bucket parameters */
191     struct qdisc_rate_table *rate;      /* rate table of the class itself */
192     struct qdisc_rate_table *ceil;      /* ceiling rate (limits borrows too) */
193     long buffer,cbuffer;                /* token bucket depth/rate */
194     long mbuffer;                       /* max wait time */
195     long tokens,ctokens;                /* current number of tokens */
196     psched_time_t t_c;                  /* checkpoint time */
197 };
198
199 /* TODO: maybe compute rate when size is too large .. or drop ? */
200 static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
201         int size)
202
203     int slot = size >> rate->rate.cell_log;
204     if (slot > 255) {
205         cl->xstats.giants++;
206         slot = 255;
207     }
208     return rate->data[slot];
209 }
210
211 struct htb_sched
212 {
213     struct list_head root;                      /* root classes list */
214     struct list_head hash[HTB_HSIZE];           /* hashed by classid */
215     struct list_head drops[TC_HTB_NUMPRIO];     /* active leaves (for drops) */
216     
217     /* self list - roots of self generating tree */
218     struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
219     int row_mask[TC_HTB_MAXDEPTH];
220     struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
221
222     /* self wait list - roots of wait PQs per row */
223     struct rb_root wait_pq[TC_HTB_MAXDEPTH];
224
225     /* time of nearest event per level (row) */
226     unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
227
228     /* cached value of jiffies in dequeue */
229     unsigned long jiffies;
230
231     /* whether we hit non-work conserving class during this dequeue; we use */
232     int nwc_hit;        /* this to disable mindelay complaint in dequeue */
233
234     int defcls;         /* class where unclassified flows go to */
235     u32 debug;          /* subsystem debug levels */
236
237     /* filters for qdisc itself */
238     struct tcf_proto *filter_list;
239     int filter_cnt;
240
241     int rate2quantum;           /* quant = rate / rate2quantum */
242     psched_time_t now;          /* cached dequeue time */
243     struct timer_list timer;    /* send delay timer */
244 #ifdef HTB_RATECM
245     struct timer_list rttim;    /* rate computer timer */
246     int recmp_bucket;           /* which hash bucket to recompute next */
247 #endif
248     
249     /* non shaped skbs; let them go directly thru */
250     struct sk_buff_head direct_queue;
251     int direct_qlen;  /* max qlen of above */
252
253     long direct_pkts;
254 };
255
256 /* compute hash of size HTB_HSIZE for given handle */
257 static __inline__ int htb_hash(u32 h) 
258 {
259 #if HTB_HSIZE != 16
260  #error "Declare new hash for your HTB_HSIZE"
261 #endif
262     h ^= h>>8;  /* stolen from cbq_hash */
263     h ^= h>>4;
264     return h & 0xf;
265 }
266
267 /* find class in global hash table using given handle */
268 static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
269 {
270         struct htb_sched *q = (struct htb_sched *)sch->data;
271         struct list_head *p;
272         if (TC_H_MAJ(handle) != sch->handle) 
273                 return NULL;
274         
275         list_for_each (p,q->hash+htb_hash(handle)) {
276                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
277                 if (cl->classid == handle)
278                         return cl;
279         }
280         return NULL;
281 }
282
283 /**
284  * htb_classify - classify a packet into class
285  *
286  * It returns NULL if the packet should be dropped or -1 if the packet
287  * should be passed directly thru. In all other cases leaf class is returned.
288  * We allow direct class selection by classid in priority. The we examine
289  * filters in qdisc and in inner nodes (if higher filter points to the inner
290  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
291  * internal fifo (direct). These packets then go directly thru. If we still 
292  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
293  * then finish and return direct queue.
294  */
295 #define HTB_DIRECT (struct htb_class*)-1
296 static inline u32 htb_classid(struct htb_class *cl)
297 {
298         return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
299 }
300
301 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
302 {
303         struct htb_sched *q = (struct htb_sched *)sch->data;
304         struct htb_class *cl;
305         struct tcf_result res;
306         struct tcf_proto *tcf;
307         int result;
308
309         /* allow to select class by setting skb->priority to valid classid;
310            note that nfmark can be used too by attaching filter fw with no
311            rules in it */
312         if (skb->priority == sch->handle)
313                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
314         if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
315                 return cl;
316
317         tcf = q->filter_list;
318         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
319 #ifdef CONFIG_NET_CLS_ACT
320                 int terminal = 0;
321                 switch (result) {
322                 case TC_ACT_SHOT: /* Stop and kfree */
323                         *qres = NET_XMIT_DROP;
324                         terminal = 1;
325                         break;
326                 case TC_ACT_QUEUED:
327                 case TC_ACT_STOLEN: 
328                         terminal = 1;
329                         break;
330                 case TC_ACT_RECLASSIFY:  /* Things look good */
331                 case TC_ACT_OK:
332                 case TC_ACT_UNSPEC:
333                 default:
334                 break;
335                 }
336
337                 if (terminal) {
338                         kfree_skb(skb);
339                         return NULL;
340                 }
341 #else
342 #ifdef CONFIG_NET_CLS_POLICE
343                 if (result == TC_POLICE_SHOT)
344                         return NULL;
345 #endif
346 #endif
347                 if ((cl = (void*)res.class) == NULL) {
348                         if (res.classid == sch->handle)
349                                 return HTB_DIRECT;  /* X:0 (direct flow) */
350                         if ((cl = htb_find(res.classid,sch)) == NULL)
351                                 break; /* filter selected invalid classid */
352                 }
353                 if (!cl->level)
354                         return cl; /* we hit leaf; return it */
355
356                 /* we have got inner class; apply inner filter chain */
357                 tcf = cl->filter_list;
358         }
359         /* classification failed; try to use default class */
360         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
361         if (!cl || cl->level)
362                 return HTB_DIRECT; /* bad default .. this is safe bet */
363         return cl;
364 }
365
366 #ifdef HTB_DEBUG
367 static void htb_next_rb_node(struct rb_node **n);
368 #define HTB_DUMTREE(root,memb) if(root) { \
369         struct rb_node *n = (root)->rb_node; \
370         while (n->rb_left) n = n->rb_left; \
371         while (n) { \
372                 struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
373                 printk(" %x",cl->classid); htb_next_rb_node (&n); \
374         } }
375
376 static void htb_debug_dump (struct htb_sched *q)
377 {
378         int i,p;
379         printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
380         /* rows */
381         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
382                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
383                 for (p=0;p<TC_HTB_NUMPRIO;p++) {
384                         if (!q->row[i][p].rb_node) continue;
385                         printk(" p%d:",p);
386                         HTB_DUMTREE(q->row[i]+p,node[p]);
387                 }
388                 printk("\n");
389         }
390         /* classes */
391         for (i = 0; i < HTB_HSIZE; i++) {
392                 struct list_head *l;
393                 list_for_each (l,q->hash+i) {
394                         struct htb_class *cl = list_entry(l,struct htb_class,hlist);
395                         long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
396                         printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
397                                         "pa=%x f:",
398                                 cl->classid,cl->cmode,cl->tokens,cl->ctokens,
399                                 cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
400                                 cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
401                         if (cl->level)
402                         for (p=0;p<TC_HTB_NUMPRIO;p++) {
403                                 if (!cl->un.inner.feed[p].rb_node) continue;
404                                 printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
405                                 HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
406                         }
407                         printk("\n");
408                 }
409         }
410 }
411 #endif
412 /**
413  * htb_add_to_id_tree - adds class to the round robin list
414  *
415  * Routine adds class to the list (actually tree) sorted by classid.
416  * Make sure that class is not already on such list for given prio.
417  */
418 static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
419                 struct htb_class *cl,int prio)
420 {
421         struct rb_node **p = &root->rb_node, *parent = NULL;
422         HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
423 #ifdef HTB_DEBUG
424         if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
425         HTB_CHCL(cl);
426         if (*p) {
427                 struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
428                 HTB_CHCL(x);
429         }
430 #endif
431         while (*p) {
432                 struct htb_class *c; parent = *p;
433                 c = rb_entry(parent, struct htb_class, node[prio]);
434                 HTB_CHCL(c);
435                 if (cl->classid > c->classid)
436                         p = &parent->rb_right;
437                 else 
438                         p = &parent->rb_left;
439         }
440         rb_link_node(&cl->node[prio], parent, p);
441         rb_insert_color(&cl->node[prio], root);
442 }
443
444 /**
445  * htb_add_to_wait_tree - adds class to the event queue with delay
446  *
447  * The class is added to priority event queue to indicate that class will
448  * change its mode in cl->pq_key microseconds. Make sure that class is not
449  * already in the queue.
450  */
451 static void htb_add_to_wait_tree (struct htb_sched *q,
452                 struct htb_class *cl,long delay,int debug_hint)
453 {
454         struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
455         HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
456 #ifdef HTB_DEBUG
457         if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
458         HTB_CHCL(cl);
459         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
460                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
461 #endif
462         cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
463         if (cl->pq_key == q->jiffies)
464                 cl->pq_key++;
465
466         /* update the nearest event cache */
467         if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
468                 q->near_ev_cache[cl->level] = cl->pq_key;
469         
470         while (*p) {
471                 struct htb_class *c; parent = *p;
472                 c = rb_entry(parent, struct htb_class, pq_node);
473                 if (time_after_eq(cl->pq_key, c->pq_key))
474                         p = &parent->rb_right;
475                 else 
476                         p = &parent->rb_left;
477         }
478         rb_link_node(&cl->pq_node, parent, p);
479         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
480 }
481
482 /**
483  * htb_next_rb_node - finds next node in binary tree
484  *
485  * When we are past last key we return NULL.
486  * Average complexity is 2 steps per call.
487  */
488 static void htb_next_rb_node(struct rb_node **n)
489 {
490         *n = rb_next(*n);
491 }
492
493 /**
494  * htb_add_class_to_row - add class to its row
495  *
496  * The class is added to row at priorities marked in mask.
497  * It does nothing if mask == 0.
498  */
499 static inline void htb_add_class_to_row(struct htb_sched *q, 
500                 struct htb_class *cl,int mask)
501 {
502         HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
503                         cl->classid,mask,q->row_mask[cl->level]);
504         HTB_CHCL(cl);
505         q->row_mask[cl->level] |= mask;
506         while (mask) {
507                 int prio = ffz(~mask);
508                 mask &= ~(1 << prio);
509                 htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
510         }
511 }
512
513 /**
514  * htb_remove_class_from_row - removes class from its row
515  *
516  * The class is removed from row at priorities marked in mask.
517  * It does nothing if mask == 0.
518  */
519 static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
520                 struct htb_class *cl,int mask)
521 {
522         int m = 0;
523         HTB_CHCL(cl);
524         while (mask) {
525                 int prio = ffz(~mask);
526                 mask &= ~(1 << prio);
527                 if (q->ptr[cl->level][prio] == cl->node+prio)
528                         htb_next_rb_node(q->ptr[cl->level]+prio);
529                 htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
530                 if (!q->row[cl->level][prio].rb_node) 
531                         m |= 1 << prio;
532         }
533         HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
534                         cl->classid,mask,q->row_mask[cl->level],m);
535         q->row_mask[cl->level] &= ~m;
536 }
537
538 /**
539  * htb_activate_prios - creates active classe's feed chain
540  *
541  * The class is connected to ancestors and/or appropriate rows
542  * for priorities it is participating on. cl->cmode must be new 
543  * (activated) mode. It does nothing if cl->prio_activity == 0.
544  */
545 static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
546 {
547         struct htb_class *p = cl->parent;
548         long m,mask = cl->prio_activity;
549         HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
550         HTB_CHCL(cl);
551
552         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
553                 HTB_CHCL(p);
554                 m = mask; while (m) {
555                         int prio = ffz(~m);
556                         m &= ~(1 << prio);
557                         
558                         if (p->un.inner.feed[prio].rb_node)
559                                 /* parent already has its feed in use so that
560                                    reset bit in mask as parent is already ok */
561                                 mask &= ~(1 << prio);
562                         
563                         htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
564                 }
565                 HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
566                                 p->classid,p->prio_activity,mask,p->cmode);
567                 p->prio_activity |= mask;
568                 cl = p; p = cl->parent;
569                 HTB_CHCL(cl);
570         }
571         if (cl->cmode == HTB_CAN_SEND && mask)
572                 htb_add_class_to_row(q,cl,mask);
573 }
574
575 /**
576  * htb_deactivate_prios - remove class from feed chain
577  *
578  * cl->cmode must represent old mode (before deactivation). It does 
579  * nothing if cl->prio_activity == 0. Class is removed from all feed
580  * chains and rows.
581  */
582 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
583 {
584         struct htb_class *p = cl->parent;
585         long m,mask = cl->prio_activity;
586         HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
587         HTB_CHCL(cl);
588
589         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
590                 m = mask; mask = 0; 
591                 while (m) {
592                         int prio = ffz(~m);
593                         m &= ~(1 << prio);
594                         
595                         if (p->un.inner.ptr[prio] == cl->node+prio)
596                                 htb_next_rb_node(p->un.inner.ptr + prio);
597                         
598                         htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
599                         
600                         if (!p->un.inner.feed[prio].rb_node) 
601                                 mask |= 1 << prio;
602                 }
603                 HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
604                                 p->classid,p->prio_activity,mask,p->cmode);
605                 p->prio_activity &= ~mask;
606                 cl = p; p = cl->parent;
607                 HTB_CHCL(cl);
608         }
609         if (cl->cmode == HTB_CAN_SEND && mask) 
610                 htb_remove_class_from_row(q,cl,mask);
611 }
612
613 /**
614  * htb_class_mode - computes and returns current class mode
615  *
616  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
617  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
618  * from now to time when cl will change its state. 
619  * Also it is worth to note that class mode doesn't change simply
620  * at cl->{c,}tokens == 0 but there can rather be hysteresis of 
621  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
622  * mode transitions per time unit. The speed gain is about 1/6.
623  */
624 static __inline__ enum htb_cmode 
625 htb_class_mode(struct htb_class *cl,long *diff)
626 {
627     long toks;
628
629     if ((toks = (cl->ctokens + *diff)) < (
630 #if HTB_HYSTERESIS
631             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
632 #endif
633             0)) {
634             *diff = -toks;
635             return HTB_CANT_SEND;
636     }
637     if ((toks = (cl->tokens + *diff)) >= (
638 #if HTB_HYSTERESIS
639             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
640 #endif
641             0))
642             return HTB_CAN_SEND;
643
644     *diff = -toks;
645     return HTB_MAY_BORROW;
646 }
647
648 /**
649  * htb_change_class_mode - changes classe's mode
650  *
651  * This should be the only way how to change classe's mode under normal
652  * cirsumstances. Routine will update feed lists linkage, change mode
653  * and add class to the wait event queue if appropriate. New mode should
654  * be different from old one and cl->pq_key has to be valid if changing
655  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
656  */
657 static void 
658 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
659
660         enum htb_cmode new_mode = htb_class_mode(cl,diff);
661         
662         HTB_CHCL(cl);
663         HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
664
665         if (new_mode == cl->cmode)
666                 return; 
667         
668         if (cl->prio_activity) { /* not necessary: speed optimization */
669                 if (cl->cmode != HTB_CANT_SEND) 
670                         htb_deactivate_prios(q,cl);
671                 cl->cmode = new_mode;
672                 if (new_mode != HTB_CANT_SEND) 
673                         htb_activate_prios(q,cl);
674         } else 
675                 cl->cmode = new_mode;
676 }
677
678 /**
679  * htb_activate - inserts leaf cl into appropriate active feeds 
680  *
681  * Routine learns (new) priority of leaf and activates feed chain
682  * for the prio. It can be called on already active leaf safely.
683  * It also adds leaf into droplist.
684  */
685 static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
686 {
687         BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
688         HTB_CHCL(cl);
689         if (!cl->prio_activity) {
690                 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
691                 htb_activate_prios(q,cl);
692                 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
693         }
694 }
695
696 /**
697  * htb_deactivate - remove leaf cl from active feeds 
698  *
699  * Make sure that leaf is active. In the other words it can't be called
700  * with non-active leaf. It also removes class from the drop list.
701  */
702 static __inline__ void 
703 htb_deactivate(struct htb_sched *q,struct htb_class *cl)
704 {
705         BUG_TRAP(cl->prio_activity);
706         HTB_CHCL(cl);
707         htb_deactivate_prios(q,cl);
708         cl->prio_activity = 0;
709         list_del_init(&cl->un.leaf.drop_list);
710 }
711
712 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
713 {
714     int ret = NET_XMIT_SUCCESS;
715     struct htb_sched *q = (struct htb_sched *)sch->data;
716     struct htb_class *cl = htb_classify(skb,sch,&ret);
717
718
719 #ifdef CONFIG_NET_CLS_ACT
720     if (cl == HTB_DIRECT ) {
721         if (q->direct_queue.qlen < q->direct_qlen ) {
722             __skb_queue_tail(&q->direct_queue, skb);
723             q->direct_pkts++;
724         }
725     } else if (!cl) {
726             if (NET_XMIT_DROP == ret) {
727                     sch->stats.drops++;
728             }
729             return ret;
730     }
731 #else
732     if (cl == HTB_DIRECT || !cl) {
733         /* enqueue to helper queue */
734         if (q->direct_queue.qlen < q->direct_qlen && cl) {
735             __skb_queue_tail(&q->direct_queue, skb);
736             q->direct_pkts++;
737         } else {
738             kfree_skb (skb);
739             sch->stats.drops++;
740             return NET_XMIT_DROP;
741         }
742     }
743 #endif
744     else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
745         sch->stats.drops++;
746         cl->stats.drops++;
747         return NET_XMIT_DROP;
748     } else {
749         cl->stats.packets++; cl->stats.bytes += skb->len;
750         htb_activate (q,cl);
751     }
752
753     sch->q.qlen++;
754     sch->stats.packets++; sch->stats.bytes += skb->len;
755     HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
756     return NET_XMIT_SUCCESS;
757 }
758
759 /* TODO: requeuing packet charges it to policers again !! */
760 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
761 {
762     struct htb_sched *q = (struct htb_sched *)sch->data;
763     int ret =  NET_XMIT_SUCCESS;
764     struct htb_class *cl = htb_classify(skb,sch, &ret);
765     struct sk_buff *tskb;
766
767     if (cl == HTB_DIRECT || !cl) {
768         /* enqueue to helper queue */
769         if (q->direct_queue.qlen < q->direct_qlen && cl) {
770             __skb_queue_head(&q->direct_queue, skb);
771         } else {
772             __skb_queue_head(&q->direct_queue, skb);
773             tskb = __skb_dequeue_tail(&q->direct_queue);
774             kfree_skb (tskb);
775             sch->stats.drops++;
776             return NET_XMIT_CN; 
777         }
778     } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
779         sch->stats.drops++;
780         cl->stats.drops++;
781         return NET_XMIT_DROP;
782     } else 
783             htb_activate (q,cl);
784
785     sch->q.qlen++;
786     HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
787     return NET_XMIT_SUCCESS;
788 }
789
790 static void htb_timer(unsigned long arg)
791 {
792     struct Qdisc *sch = (struct Qdisc*)arg;
793     sch->flags &= ~TCQ_F_THROTTLED;
794     wmb();
795     netif_schedule(sch->dev);
796 }
797
798 #ifdef HTB_RATECM
799 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
800 static void htb_rate_timer(unsigned long arg)
801 {
802         struct Qdisc *sch = (struct Qdisc*)arg;
803         struct htb_sched *q = (struct htb_sched *)sch->data;
804         struct list_head *p;
805
806         /* lock queue so that we can muck with it */
807         HTB_QLOCK(sch);
808         HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
809
810         q->rttim.expires = jiffies + HZ;
811         add_timer(&q->rttim);
812
813         /* scan and recompute one bucket at time */
814         if (++q->recmp_bucket >= HTB_HSIZE) 
815                 q->recmp_bucket = 0;
816         list_for_each (p,q->hash+q->recmp_bucket) {
817                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
818                 HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
819                                 cl->classid,cl->sum_bytes,cl->sum_packets);
820                 RT_GEN (cl->sum_bytes,cl->rate_bytes);
821                 RT_GEN (cl->sum_packets,cl->rate_packets);
822         }
823         HTB_QUNLOCK(sch);
824 }
825 #endif
826
827 /**
828  * htb_charge_class - charges amount "bytes" to leaf and ancestors
829  *
830  * Routine assumes that packet "bytes" long was dequeued from leaf cl
831  * borrowing from "level". It accounts bytes to ceil leaky bucket for
832  * leaf and all ancestors and to rate bucket for ancestors at levels
833  * "level" and higher. It also handles possible change of mode resulting
834  * from the update. Note that mode can also increase here (MAY_BORROW to
835  * CAN_SEND) because we can use more precise clock that event queue here.
836  * In such case we remove class from event queue first.
837  */
838 static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
839                 int level,int bytes)
840 {       
841         long toks,diff;
842         enum htb_cmode old_mode;
843         HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
844
845 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
846         if (toks > cl->B) toks = cl->B; \
847         toks -= L2T(cl, cl->R, bytes); \
848         if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
849         cl->T = toks
850
851         while (cl) {
852                 HTB_CHCL(cl);
853                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
854 #ifdef HTB_DEBUG
855                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
856                         if (net_ratelimit())
857                                 printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
858                                        cl->classid, diff,
859                                        (unsigned long long) q->now,
860                                        (unsigned long long) cl->t_c,
861                                        q->jiffies);
862                         diff = 1000;
863                 }
864 #endif
865                 if (cl->level >= level) {
866                         if (cl->level == level) cl->xstats.lends++;
867                         HTB_ACCNT (tokens,buffer,rate);
868                 } else {
869                         cl->xstats.borrows++;
870                         cl->tokens += diff; /* we moved t_c; update tokens */
871                 }
872                 HTB_ACCNT (ctokens,cbuffer,ceil);
873                 cl->t_c = q->now;
874                 HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
875
876                 old_mode = cl->cmode; diff = 0;
877                 htb_change_class_mode(q,cl,&diff);
878                 if (old_mode != cl->cmode) {
879                         if (old_mode != HTB_CAN_SEND)
880                                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
881                         if (cl->cmode != HTB_CAN_SEND)
882                                 htb_add_to_wait_tree (q,cl,diff,1);
883                 }
884                 
885 #ifdef HTB_RATECM
886                 /* update rate counters */
887                 cl->sum_bytes += bytes; cl->sum_packets++;
888 #endif
889
890                 /* update byte stats except for leaves which are already updated */
891                 if (cl->level) {
892                         cl->stats.bytes += bytes;
893                         cl->stats.packets++;
894                 }
895                 cl = cl->parent;
896         }
897 }
898
899 /**
900  * htb_do_events - make mode changes to classes at the level
901  *
902  * Scans event queue for pending events and applies them. Returns jiffies to
903  * next pending event (0 for no event in pq).
904  * Note: Aplied are events whose have cl->pq_key <= jiffies.
905  */
906 static long htb_do_events(struct htb_sched *q,int level)
907 {
908         int i;
909         HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
910                         level,q->wait_pq[level].rb_node,q->row_mask[level]);
911         for (i = 0; i < 500; i++) {
912                 struct htb_class *cl;
913                 long diff;
914                 struct rb_node *p = q->wait_pq[level].rb_node;
915                 if (!p) return 0;
916                 while (p->rb_left) p = p->rb_left;
917
918                 cl = rb_entry(p, struct htb_class, pq_node);
919                 if (time_after(cl->pq_key, q->jiffies)) {
920                         HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
921                         return cl->pq_key - q->jiffies;
922                 }
923                 htb_safe_rb_erase(p,q->wait_pq+level);
924                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
925 #ifdef HTB_DEBUG
926                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
927                         if (net_ratelimit())
928                                 printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
929                                        cl->classid, diff,
930                                        (unsigned long long) q->now,
931                                        (unsigned long long) cl->t_c,
932                                        q->jiffies);
933                         diff = 1000;
934                 }
935 #endif
936                 htb_change_class_mode(q,cl,&diff);
937                 if (cl->cmode != HTB_CAN_SEND)
938                         htb_add_to_wait_tree (q,cl,diff,2);
939         }
940         if (net_ratelimit())
941                 printk(KERN_WARNING "htb: too many events !\n");
942         return HZ/10;
943 }
944
945 /**
946  * htb_lookup_leaf - returns next leaf class in DRR order
947  *
948  * Find leaf where current feed pointers points to.
949  */
950 static struct htb_class *
951 htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr)
952 {
953         int i;
954         struct {
955                 struct rb_node *root;
956                 struct rb_node **pptr;
957         } stk[TC_HTB_MAXDEPTH],*sp = stk;
958         
959         BUG_TRAP(tree->rb_node);
960         sp->root = tree->rb_node;
961         sp->pptr = pptr;
962
963         for (i = 0; i < 65535; i++) {
964                 if (!*sp->pptr) { /* we are at right end; rewind & go up */
965                         *sp->pptr = sp->root;
966                         while ((*sp->pptr)->rb_left) 
967                                 *sp->pptr = (*sp->pptr)->rb_left;
968                         if (sp > stk) {
969                                 sp--;
970                                 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
971                                 htb_next_rb_node (sp->pptr);
972                         }
973                 } else {
974                         struct htb_class *cl;
975                         cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
976                         HTB_CHCL(cl);
977                         if (!cl->level) 
978                                 return cl;
979                         (++sp)->root = cl->un.inner.feed[prio].rb_node;
980                         sp->pptr = cl->un.inner.ptr+prio;
981                 }
982         }
983         BUG_TRAP(0);
984         return NULL;
985 }
986
987 /* dequeues packet at given priority and level; call only if
988    you are sure that there is active class at prio/level */
989 static struct sk_buff *
990 htb_dequeue_tree(struct htb_sched *q,int prio,int level)
991 {
992         struct sk_buff *skb = NULL;
993         struct htb_class *cl,*start;
994         /* look initial class up in the row */
995         start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
996         
997         do {
998 next:
999                 BUG_TRAP(cl); 
1000                 if (!cl) return NULL;
1001                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
1002                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
1003
1004                 /* class can be empty - it is unlikely but can be true if leaf
1005                    qdisc drops packets in enqueue routine or if someone used
1006                    graft operation on the leaf since last dequeue; 
1007                    simply deactivate and skip such class */
1008                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
1009                         struct htb_class *next;
1010                         htb_deactivate(q,cl);
1011
1012                         /* row/level might become empty */
1013                         if ((q->row_mask[level] & (1 << prio)) == 0)
1014                                 return NULL; 
1015                         
1016                         next = htb_lookup_leaf (q->row[level]+prio,
1017                                         prio,q->ptr[level]+prio);
1018                         if (cl == start) /* fix start if we just deleted it */
1019                                 start = next;
1020                         cl = next;
1021                         goto next;
1022                 }
1023         
1024                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
1025                         break;
1026                 if (!cl->warned) {
1027                         printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
1028                         cl->warned = 1;
1029                 }
1030                 q->nwc_hit++;
1031                 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1032                 cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
1033         } while (cl != start);
1034
1035         if (likely(skb != NULL)) {
1036                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1037                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
1038                                 level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
1039                         cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
1040                         htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1041                 }
1042                 /* this used to be after charge_class but this constelation
1043                    gives us slightly better performance */
1044                 if (!cl->un.leaf.q->q.qlen)
1045                         htb_deactivate (q,cl);
1046                 htb_charge_class (q,cl,level,skb->len);
1047         }
1048         return skb;
1049 }
1050
1051 static void htb_delay_by(struct Qdisc *sch,long delay)
1052 {
1053         struct htb_sched *q = (struct htb_sched *)sch->data;
1054         if (delay <= 0) delay = 1;
1055         if (unlikely(delay > 5*HZ)) {
1056                 if (net_ratelimit())
1057                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
1058                 delay = 5*HZ;
1059         }
1060         /* why don't use jiffies here ? because expires can be in past */
1061         mod_timer(&q->timer, q->jiffies + delay);
1062         sch->flags |= TCQ_F_THROTTLED;
1063         sch->stats.overlimits++;
1064         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
1065 }
1066
1067 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1068 {
1069         struct sk_buff *skb = NULL;
1070         struct htb_sched *q = (struct htb_sched *)sch->data;
1071         int level;
1072         long min_delay;
1073 #ifdef HTB_DEBUG
1074         int evs_used = 0;
1075 #endif
1076
1077         q->jiffies = jiffies;
1078         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
1079                         sch->q.qlen);
1080
1081         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
1082         if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
1083                 sch->flags &= ~TCQ_F_THROTTLED;
1084                 sch->q.qlen--;
1085                 return skb;
1086         }
1087
1088         if (!sch->q.qlen) goto fin;
1089         PSCHED_GET_TIME(q->now);
1090
1091         min_delay = LONG_MAX;
1092         q->nwc_hit = 0;
1093         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1094                 /* common case optimization - skip event handler quickly */
1095                 int m;
1096                 long delay;
1097                 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1098                         delay = htb_do_events(q,level);
1099                         q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1100 #ifdef HTB_DEBUG
1101                         evs_used++;
1102 #endif
1103                 } else
1104                         delay = q->near_ev_cache[level] - q->jiffies;   
1105                 
1106                 if (delay && min_delay > delay) 
1107                         min_delay = delay;
1108                 m = ~q->row_mask[level];
1109                 while (m != (int)(-1)) {
1110                         int prio = ffz (m);
1111                         m |= 1 << prio;
1112                         skb = htb_dequeue_tree(q,prio,level);
1113                         if (likely(skb != NULL)) {
1114                                 sch->q.qlen--;
1115                                 sch->flags &= ~TCQ_F_THROTTLED;
1116                                 goto fin;
1117                         }
1118                 }
1119         }
1120 #ifdef HTB_DEBUG
1121         if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
1122                 if (min_delay == LONG_MAX) {
1123                         printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
1124                                         evs_used,q->jiffies,jiffies);
1125                         htb_debug_dump(q);
1126                 } else 
1127                         printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
1128                                         "too small rate\n",min_delay);
1129         }
1130 #endif
1131         htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
1132 fin:
1133         HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
1134         return skb;
1135 }
1136
1137 /* try to drop from each class (by prio) until one succeed */
1138 static unsigned int htb_drop(struct Qdisc* sch)
1139 {
1140         struct htb_sched *q = (struct htb_sched *)sch->data;
1141         int prio;
1142
1143         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1144                 struct list_head *p;
1145                 list_for_each (p,q->drops+prio) {
1146                         struct htb_class *cl = list_entry(p, struct htb_class,
1147                                                           un.leaf.drop_list);
1148                         unsigned int len;
1149                         if (cl->un.leaf.q->ops->drop && 
1150                                 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1151                                 sch->q.qlen--;
1152                                 if (!cl->un.leaf.q->q.qlen)
1153                                         htb_deactivate (q,cl);
1154                                 return len;
1155                         }
1156                 }
1157         }
1158         return 0;
1159 }
1160
1161 /* reset all classes */
1162 /* always caled under BH & queue lock */
1163 static void htb_reset(struct Qdisc* sch)
1164 {
1165         struct htb_sched *q = (struct htb_sched *)sch->data;
1166         int i;
1167         HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1168
1169         for (i = 0; i < HTB_HSIZE; i++) {
1170                 struct list_head *p;
1171                 list_for_each (p,q->hash+i) {
1172                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1173                         if (cl->level)
1174                                 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1175                         else {
1176                                 if (cl->un.leaf.q) 
1177                                         qdisc_reset(cl->un.leaf.q);
1178                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1179                         }
1180                         cl->prio_activity = 0;
1181                         cl->cmode = HTB_CAN_SEND;
1182 #ifdef HTB_DEBUG
1183                         cl->pq_node.rb_color = -1;
1184                         memset(cl->node,255,sizeof(cl->node));
1185 #endif
1186
1187                 }
1188         }
1189         sch->flags &= ~TCQ_F_THROTTLED;
1190         del_timer(&q->timer);
1191         __skb_queue_purge(&q->direct_queue);
1192         sch->q.qlen = 0;
1193         memset(q->row,0,sizeof(q->row));
1194         memset(q->row_mask,0,sizeof(q->row_mask));
1195         memset(q->wait_pq,0,sizeof(q->wait_pq));
1196         memset(q->ptr,0,sizeof(q->ptr));
1197         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1198                 INIT_LIST_HEAD(q->drops+i);
1199 }
1200
1201 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1202 {
1203         struct htb_sched *q = (struct htb_sched*)sch->data;
1204         struct rtattr *tb[TCA_HTB_INIT];
1205         struct tc_htb_glob *gopt;
1206         int i;
1207 #ifdef HTB_DEBUG
1208         printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1209                           HTB_VER >> 16,HTB_VER & 0xffff);
1210 #endif
1211         if (!opt || rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1212                         tb[TCA_HTB_INIT-1] == NULL ||
1213                         RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1214                 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1215                 return -EINVAL;
1216         }
1217         gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1218         if (gopt->version != HTB_VER >> 16) {
1219                 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1220                                 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1221                 return -EINVAL;
1222         }
1223         memset(q,0,sizeof(*q));
1224         q->debug = gopt->debug;
1225         HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1226
1227         INIT_LIST_HEAD(&q->root);
1228         for (i = 0; i < HTB_HSIZE; i++)
1229                 INIT_LIST_HEAD(q->hash+i);
1230         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1231                 INIT_LIST_HEAD(q->drops+i);
1232
1233         init_timer(&q->timer);
1234         skb_queue_head_init(&q->direct_queue);
1235
1236         q->direct_qlen = sch->dev->tx_queue_len;
1237         if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1238                 q->direct_qlen = 2;
1239         q->timer.function = htb_timer;
1240         q->timer.data = (unsigned long)sch;
1241
1242 #ifdef HTB_RATECM
1243         init_timer(&q->rttim);
1244         q->rttim.function = htb_rate_timer;
1245         q->rttim.data = (unsigned long)sch;
1246         q->rttim.expires = jiffies + HZ;
1247         add_timer(&q->rttim);
1248 #endif
1249         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1250                 q->rate2quantum = 1;
1251         q->defcls = gopt->defcls;
1252
1253         return 0;
1254 }
1255
1256 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1257 {
1258         struct htb_sched *q = (struct htb_sched*)sch->data;
1259         unsigned char    *b = skb->tail;
1260         struct rtattr *rta;
1261         struct tc_htb_glob gopt;
1262         HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1263         /* stats */
1264         HTB_QLOCK(sch);
1265         gopt.direct_pkts = q->direct_pkts;
1266
1267 #ifdef HTB_DEBUG
1268         if (HTB_DBG_COND(0,2))
1269                 htb_debug_dump(q);
1270 #endif
1271         gopt.version = HTB_VER;
1272         gopt.rate2quantum = q->rate2quantum;
1273         gopt.defcls = q->defcls;
1274         gopt.debug = q->debug;
1275         rta = (struct rtattr*)b;
1276         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1277         RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1278         rta->rta_len = skb->tail - b;
1279         sch->stats.qlen = sch->q.qlen;
1280         RTA_PUT(skb, TCA_STATS, sizeof(sch->stats), &sch->stats);
1281         HTB_QUNLOCK(sch);
1282         return skb->len;
1283 rtattr_failure:
1284         HTB_QUNLOCK(sch);
1285         skb_trim(skb, skb->tail - skb->data);
1286         return -1;
1287 }
1288
1289 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1290         struct sk_buff *skb, struct tcmsg *tcm)
1291 {
1292 #ifdef HTB_DEBUG
1293         struct htb_sched *q = (struct htb_sched*)sch->data;
1294 #endif
1295         struct htb_class *cl = (struct htb_class*)arg;
1296         unsigned char    *b = skb->tail;
1297         struct rtattr *rta;
1298         struct tc_htb_opt opt;
1299
1300         HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1301
1302         HTB_QLOCK(sch);
1303         tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1304         tcm->tcm_handle = cl->classid;
1305         if (!cl->level && cl->un.leaf.q) {
1306                 tcm->tcm_info = cl->un.leaf.q->handle;
1307                 cl->stats.qlen = cl->un.leaf.q->q.qlen;
1308         }
1309
1310         rta = (struct rtattr*)b;
1311         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1312
1313         memset (&opt,0,sizeof(opt));
1314
1315         opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1316         opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1317         opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1318         opt.level = cl->level; 
1319         RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1320         rta->rta_len = skb->tail - b;
1321
1322 #ifdef HTB_RATECM
1323         cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1324         cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1325 #endif
1326
1327         cl->xstats.tokens = cl->tokens;
1328         cl->xstats.ctokens = cl->ctokens;
1329         RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
1330         RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
1331         HTB_QUNLOCK(sch);
1332         return skb->len;
1333 rtattr_failure:
1334         HTB_QUNLOCK(sch);
1335         skb_trim(skb, b - skb->data);
1336         return -1;
1337 }
1338
1339 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1340         struct Qdisc **old)
1341 {
1342         struct htb_class *cl = (struct htb_class*)arg;
1343
1344         if (cl && !cl->level) {
1345                 if (new == NULL && (new = qdisc_create_dflt(sch->dev, 
1346                                         &pfifo_qdisc_ops)) == NULL)
1347                                         return -ENOBUFS;
1348                 sch_tree_lock(sch);
1349                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1350                         if (cl->prio_activity)
1351                                 htb_deactivate ((struct htb_sched*)sch->data,cl);
1352
1353                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
1354                         sch->q.qlen -= (*old)->q.qlen;  
1355                         qdisc_reset(*old);
1356                 }
1357                 sch_tree_unlock(sch);
1358                 return 0;
1359         }
1360         return -ENOENT;
1361 }
1362
1363 static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1364 {
1365         struct htb_class *cl = (struct htb_class*)arg;
1366         return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1367 }
1368
1369 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1370 {
1371 #ifdef HTB_DEBUG
1372         struct htb_sched *q = (struct htb_sched *)sch->data;
1373 #endif
1374         struct htb_class *cl = htb_find(classid,sch);
1375         HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1376         if (cl) 
1377                 cl->refcnt++;
1378         return (unsigned long)cl;
1379 }
1380
1381 static void htb_destroy_filters(struct tcf_proto **fl)
1382 {
1383         struct tcf_proto *tp;
1384
1385         while ((tp = *fl) != NULL) {
1386                 *fl = tp->next;
1387                 tcf_destroy(tp);
1388         }
1389 }
1390
1391 static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1392 {
1393         struct htb_sched *q = (struct htb_sched *)sch->data;
1394         HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1395         if (!cl->level) {
1396                 BUG_TRAP(cl->un.leaf.q);
1397                 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1398                 qdisc_destroy(cl->un.leaf.q);
1399         }
1400         qdisc_put_rtab(cl->rate);
1401         qdisc_put_rtab(cl->ceil);
1402         
1403 #ifdef CONFIG_NET_ESTIMATOR
1404         qdisc_kill_estimator(&cl->stats);
1405 #endif
1406         htb_destroy_filters (&cl->filter_list);
1407         
1408         while (!list_empty(&cl->children)) 
1409                 htb_destroy_class (sch,list_entry(cl->children.next,
1410                                         struct htb_class,sibling));
1411
1412         /* note: this delete may happen twice (see htb_delete) */
1413         list_del(&cl->hlist);
1414         list_del(&cl->sibling);
1415         
1416         if (cl->prio_activity)
1417                 htb_deactivate (q,cl);
1418         
1419         if (cl->cmode != HTB_CAN_SEND)
1420                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1421         
1422         kfree(cl);
1423 }
1424
1425 /* always caled under BH & queue lock */
1426 static void htb_destroy(struct Qdisc* sch)
1427 {
1428         struct htb_sched *q = (struct htb_sched *)sch->data;
1429         HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1430
1431         del_timer_sync (&q->timer);
1432 #ifdef HTB_RATECM
1433         del_timer_sync (&q->rttim);
1434 #endif
1435         /* This line used to be after htb_destroy_class call below
1436            and surprisingly it worked in 2.4. But it must precede it 
1437            because filter need its target class alive to be able to call
1438            unbind_filter on it (without Oops). */
1439         htb_destroy_filters(&q->filter_list);
1440         
1441         while (!list_empty(&q->root)) 
1442                 htb_destroy_class (sch,list_entry(q->root.next,
1443                                         struct htb_class,sibling));
1444
1445         __skb_queue_purge(&q->direct_queue);
1446 }
1447
1448 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1449 {
1450         struct htb_sched *q = (struct htb_sched *)sch->data;
1451         struct htb_class *cl = (struct htb_class*)arg;
1452         HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1453
1454         // TODO: why don't allow to delete subtree ? references ? does
1455         // tc subsys quarantee us that in htb_destroy it holds no class
1456         // refs so that we can remove children safely there ?
1457         if (!list_empty(&cl->children) || cl->filter_cnt)
1458                 return -EBUSY;
1459         
1460         sch_tree_lock(sch);
1461         
1462         /* delete from hash and active; remainder in destroy_class */
1463         list_del_init(&cl->hlist);
1464         if (cl->prio_activity)
1465                 htb_deactivate (q,cl);
1466
1467         if (--cl->refcnt == 0)
1468                 htb_destroy_class(sch,cl);
1469
1470         sch_tree_unlock(sch);
1471         return 0;
1472 }
1473
1474 static void htb_put(struct Qdisc *sch, unsigned long arg)
1475 {
1476 #ifdef HTB_DEBUG
1477         struct htb_sched *q = (struct htb_sched *)sch->data;
1478 #endif
1479         struct htb_class *cl = (struct htb_class*)arg;
1480         HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1481
1482         if (--cl->refcnt == 0)
1483                 htb_destroy_class(sch,cl);
1484 }
1485
1486 static int htb_change_class(struct Qdisc *sch, u32 classid, 
1487                 u32 parentid, struct rtattr **tca, unsigned long *arg)
1488 {
1489         int err = -EINVAL;
1490         struct htb_sched *q = (struct htb_sched *)sch->data;
1491         struct htb_class *cl = (struct htb_class*)*arg,*parent;
1492         struct rtattr *opt = tca[TCA_OPTIONS-1];
1493         struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1494         struct rtattr *tb[TCA_HTB_RTAB];
1495         struct tc_htb_opt *hopt;
1496
1497         /* extract all subattrs from opt attr */
1498         if (!opt || rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1499                         tb[TCA_HTB_PARMS-1] == NULL ||
1500                         RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1501                 goto failure;
1502         
1503         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1504
1505         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1506         HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1507         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1508         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1509         if (!rtab || !ctab) goto failure;
1510
1511         if (!cl) { /* new class */
1512                 struct Qdisc *new_q;
1513                 /* check for valid classid */
1514                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1515                         goto failure;
1516
1517                 /* check maximal depth */
1518                 if (parent && parent->parent && parent->parent->level < 2) {
1519                         printk(KERN_ERR "htb: tree is too deep\n");
1520                         goto failure;
1521                 }
1522                 err = -ENOBUFS;
1523                 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1524                         goto failure;
1525                 
1526                 memset(cl, 0, sizeof(*cl));
1527                 cl->refcnt = 1;
1528                 INIT_LIST_HEAD(&cl->sibling);
1529                 INIT_LIST_HEAD(&cl->hlist);
1530                 INIT_LIST_HEAD(&cl->children);
1531                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1532 #ifdef HTB_DEBUG
1533                 cl->magic = HTB_CMAGIC;
1534 #endif
1535
1536                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1537                    so that can't be used inside of sch_tree_lock
1538                    -- thanks to Karlis Peisenieks */
1539                 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1540                 sch_tree_lock(sch);
1541                 if (parent && !parent->level) {
1542                         /* turn parent into inner node */
1543                         sch->q.qlen -= parent->un.leaf.q->q.qlen;
1544                         qdisc_destroy (parent->un.leaf.q);
1545                         if (parent->prio_activity) 
1546                                 htb_deactivate (q,parent);
1547
1548                         /* remove from evt list because of level change */
1549                         if (parent->cmode != HTB_CAN_SEND) {
1550                                 htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1551                                 parent->cmode = HTB_CAN_SEND;
1552                         }
1553                         parent->level = (parent->parent ? parent->parent->level
1554                                         : TC_HTB_MAXDEPTH) - 1;
1555                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
1556                 }
1557                 /* leaf (we) needs elementary qdisc */
1558                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1559
1560                 cl->classid = classid; cl->parent = parent;
1561
1562                 /* set class to be in HTB_CAN_SEND state */
1563                 cl->tokens = hopt->buffer;
1564                 cl->ctokens = hopt->cbuffer;
1565                 cl->mbuffer = 60000000; /* 1min */
1566                 PSCHED_GET_TIME(cl->t_c);
1567                 cl->cmode = HTB_CAN_SEND;
1568
1569                 /* attach to the hash list and parent's family */
1570                 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1571                 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1572 #ifdef HTB_DEBUG
1573                 { 
1574                         int i;
1575                         for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1576                         cl->pq_node.rb_color = -1;
1577                 }
1578 #endif
1579         } else sch_tree_lock(sch);
1580
1581         /* it used to be a nasty bug here, we have to check that node
1582            is really leaf before changing cl->un.leaf ! */
1583         if (!cl->level) {
1584                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1585                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1586                         printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1587                         cl->un.leaf.quantum = 1000;
1588                 }
1589                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1590                         printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1591                         cl->un.leaf.quantum = 200000;
1592                 }
1593                 if (hopt->quantum)
1594                         cl->un.leaf.quantum = hopt->quantum;
1595                 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1596                         cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1597         }
1598
1599         cl->buffer = hopt->buffer;
1600         cl->cbuffer = hopt->cbuffer;
1601         if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1602         if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1603         sch_tree_unlock(sch);
1604
1605         *arg = (unsigned long)cl;
1606         return 0;
1607
1608 failure:
1609         if (rtab) qdisc_put_rtab(rtab);
1610         if (ctab) qdisc_put_rtab(ctab);
1611         return err;
1612 }
1613
1614 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1615 {
1616         struct htb_sched *q = (struct htb_sched *)sch->data;
1617         struct htb_class *cl = (struct htb_class *)arg;
1618         struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1619         HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1620         return fl;
1621 }
1622
1623 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1624         u32 classid)
1625 {
1626         struct htb_sched *q = (struct htb_sched *)sch->data;
1627         struct htb_class *cl = htb_find (classid,sch);
1628         HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1629         /*if (cl && !cl->level) return 0;
1630           The line above used to be there to prevent attaching filters to 
1631           leaves. But at least tc_index filter uses this just to get class 
1632           for other reasons so that we have to allow for it.
1633           ----
1634           19.6.2002 As Werner explained it is ok - bind filter is just
1635           another way to "lock" the class - unlike "get" this lock can
1636           be broken by class during destroy IIUC.
1637          */
1638         if (cl) 
1639                 cl->filter_cnt++; 
1640         else 
1641                 q->filter_cnt++;
1642         return (unsigned long)cl;
1643 }
1644
1645 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1646 {
1647         struct htb_sched *q = (struct htb_sched *)sch->data;
1648         struct htb_class *cl = (struct htb_class *)arg;
1649         HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1650         if (cl) 
1651                 cl->filter_cnt--; 
1652         else 
1653                 q->filter_cnt--;
1654 }
1655
1656 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1657 {
1658         struct htb_sched *q = (struct htb_sched *)sch->data;
1659         int i;
1660
1661         if (arg->stop)
1662                 return;
1663
1664         for (i = 0; i < HTB_HSIZE; i++) {
1665                 struct list_head *p;
1666                 list_for_each (p,q->hash+i) {
1667                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1668                         if (arg->count < arg->skip) {
1669                                 arg->count++;
1670                                 continue;
1671                         }
1672                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1673                                 arg->stop = 1;
1674                                 return;
1675                         }
1676                         arg->count++;
1677                 }
1678         }
1679 }
1680
1681 static struct Qdisc_class_ops htb_class_ops = {
1682         .graft          =       htb_graft,
1683         .leaf           =       htb_leaf,
1684         .get            =       htb_get,
1685         .put            =       htb_put,
1686         .change         =       htb_change_class,
1687         .delete         =       htb_delete,
1688         .walk           =       htb_walk,
1689         .tcf_chain      =       htb_find_tcf,
1690         .bind_tcf       =       htb_bind_filter,
1691         .unbind_tcf     =       htb_unbind_filter,
1692         .dump           =       htb_dump_class,
1693 };
1694
1695 static struct Qdisc_ops htb_qdisc_ops = {
1696         .next           =       NULL,
1697         .cl_ops         =       &htb_class_ops,
1698         .id             =       "htb",
1699         .priv_size      =       sizeof(struct htb_sched),
1700         .enqueue        =       htb_enqueue,
1701         .dequeue        =       htb_dequeue,
1702         .requeue        =       htb_requeue,
1703         .drop           =       htb_drop,
1704         .init           =       htb_init,
1705         .reset          =       htb_reset,
1706         .destroy        =       htb_destroy,
1707         .change         =       NULL /* htb_change */,
1708         .dump           =       htb_dump,
1709         .owner          =       THIS_MODULE,
1710 };
1711
1712 static int __init htb_module_init(void)
1713 {
1714     return register_qdisc(&htb_qdisc_ops);
1715 }
1716 static void __exit htb_module_exit(void) 
1717 {
1718     unregister_qdisc(&htb_qdisc_ops);
1719 }
1720 module_init(htb_module_init)
1721 module_exit(htb_module_exit)
1722 MODULE_LICENSE("GPL");