#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
-#define HTB_VER 0x30010 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
/* general class parameters */
u32 classid;
struct tc_stats stats; /* generic stats */
+ spinlock_t *stats_lock;
struct tc_htb_xstats xstats;/* our special stats */
int refcnt; /* usage count of this class */
struct htb_class_inner {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
+ /* When class changes from state 1->2 and disconnects from
+ parent's feed then we lost ptr value and start from the
+ first child again. Here we store classid of the
+ last valid ptr (used when ptr is NULL). */
+ u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
int row_mask[TC_HTB_MAXDEPTH];
struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
+ u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
/* self wait list - roots of wait PQs per row */
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* find class in global hash table using given handle */
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct list_head *p;
if (TC_H_MAJ(handle) != sch->handle)
return NULL;
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}
-static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
+static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ int terminal = 0;
+ switch (result) {
+ case TC_ACT_SHOT: /* Stop and kfree */
+ *qres = NET_XMIT_DROP;
+ terminal = 1;
+ break;
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ terminal = 1;
+ break;
+ case TC_ACT_RECLASSIFY: /* Things look good */
+ case TC_ACT_OK:
+ case TC_ACT_UNSPEC:
+ default:
+ break;
+ }
+
+ if (terminal) {
+ kfree_skb(skb);
+ return NULL;
+ }
+#else
#ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT)
return NULL;
+#endif
#endif
if ((cl = (void*)res.class) == NULL) {
if (res.classid == sch->handle)
struct list_head *l;
list_for_each (l,q->hash+i) {
struct htb_class *cl = list_entry(l,struct htb_class,hlist);
- long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
+ long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
"pa=%x f:",
cl->classid,cl->cmode,cl->tokens,cl->ctokens,
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.ptr[prio] == cl->node+prio)
- htb_next_rb_node(p->un.inner.ptr + prio);
+ if (p->un.inner.ptr[prio] == cl->node+prio) {
+ /* we are removing child which is pointed to from
+ parent feed - forget the pointer but remember
+ classid */
+ p->un.inner.last_ptr_id[prio] = cl->classid;
+ p->un.inner.ptr[prio] = NULL;
+ }
htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
- struct htb_class *cl = htb_classify(skb,sch);
+ int ret = NET_XMIT_SUCCESS;
+ struct htb_sched *q = qdisc_priv(sch);
+ struct htb_class *cl = htb_classify(skb,sch,&ret);
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (cl == HTB_DIRECT ) {
+ if (q->direct_queue.qlen < q->direct_qlen ) {
+ __skb_queue_tail(&q->direct_queue, skb);
+ q->direct_pkts++;
+ }
+ } else if (!cl) {
+ if (NET_XMIT_DROP == ret) {
+ sch->stats.drops++;
+ }
+ return ret;
+ }
+#else
if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
sch->stats.drops++;
return NET_XMIT_DROP;
}
- } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
+ }
+#endif
+ else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++;
cl->stats.drops++;
return NET_XMIT_DROP;
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
- struct htb_class *cl = htb_classify(skb,sch);
+ struct htb_sched *q = qdisc_priv(sch);
+ int ret = NET_XMIT_SUCCESS;
+ struct htb_class *cl = htb_classify(skb,sch, &ret);
struct sk_buff *tskb;
if (cl == HTB_DIRECT || !cl) {
static void htb_rate_timer(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct list_head *p;
/* lock queue so that we can muck with it */
while (cl) {
HTB_CHCL(cl);
- diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
+ diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
#ifdef HTB_DEBUG
if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+ q->now.tv_sec * 1000000ULL + q->now.tv_usec,
+ cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
+#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
+#endif
q->jiffies);
diff = 1000;
}
return cl->pq_key - q->jiffies;
}
htb_safe_rb_erase(p,q->wait_pq+level);
- diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
+ diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
#ifdef HTB_DEBUG
if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+ q->now.tv_sec * 1000000ULL + q->now.tv_usec,
+ cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
+#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
+#endif
q->jiffies);
diff = 1000;
}
return HZ/10;
}
+/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
+ is no such one exists. */
+static struct rb_node *
+htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
+{
+ struct rb_node *r = NULL;
+ while (n) {
+ struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
+ if (id == cl->classid) return n;
+
+ if (id > cl->classid) {
+ n = n->rb_right;
+ } else {
+ r = n;
+ n = n->rb_left;
+ }
+ }
+ return r;
+}
+
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to.
*/
static struct htb_class *
-htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr)
+htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
{
int i;
struct {
struct rb_node *root;
struct rb_node **pptr;
+ u32 *pid;
} stk[TC_HTB_MAXDEPTH],*sp = stk;
BUG_TRAP(tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
+ sp->pid = pid;
for (i = 0; i < 65535; i++) {
+ HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
+
+ if (!*sp->pptr && *sp->pid) {
+ /* ptr was invalidated but id is valid - try to recover
+ the original or next ptr */
+ *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
+ }
+ *sp->pid = 0; /* ptr is valid now so that remove this hint as it
+ can become out of date quickly */
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
return cl;
(++sp)->root = cl->un.inner.feed[prio].rb_node;
sp->pptr = cl->un.inner.ptr+prio;
+ sp->pid = cl->un.inner.last_ptr_id+prio;
}
}
BUG_TRAP(0);
struct sk_buff *skb = NULL;
struct htb_class *cl,*start;
/* look initial class up in the row */
- start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
+ start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
+ q->ptr[level]+prio,q->last_ptr_id[level]+prio);
do {
next:
if ((q->row_mask[level] & (1 << prio)) == 0)
return NULL;
- next = htb_lookup_leaf (q->row[level]+prio,
- prio,q->ptr[level]+prio);
+ next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
+ prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
+
if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
}
q->nwc_hit++;
htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
- cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
+ cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
+ q->last_ptr_id[level]+prio);
+
} while (cl != start);
if (likely(skb != NULL)) {
static void htb_delay_by(struct Qdisc *sch,long delay)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
- if (netif_queue_stopped(sch->dev)) return;
+ struct htb_sched *q = qdisc_priv(sch);
if (delay <= 0) delay = 1;
if (unlikely(delay > 5*HZ)) {
if (net_ratelimit())
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int level;
long min_delay;
#ifdef HTB_DEBUG
/* try to drop from each class (by prio) until one succeed */
static unsigned int htb_drop(struct Qdisc* sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc* sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int i;
HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
static int htb_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct htb_sched *q = (struct htb_sched*)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct rtattr *tb[TCA_HTB_INIT];
struct tc_htb_glob *gopt;
int i;
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct htb_sched *q = (struct htb_sched*)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_htb_glob gopt;
struct sk_buff *skb, struct tcmsg *tcm)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = (struct htb_sched*)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
#endif
struct htb_class *cl = (struct htb_class*)arg;
unsigned char *b = skb->tail;
sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
if (cl->prio_activity)
- htb_deactivate ((struct htb_sched*)sch->data,cl);
+ htb_deactivate (qdisc_priv(sch),cl);
/* TODO: is it correct ? Why CBQ doesn't do it ? */
sch->q.qlen -= (*old)->q.qlen;
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
#endif
struct htb_class *cl = htb_find(classid,sch);
HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
if (!cl->level) {
BUG_TRAP(cl->un.leaf.q);
/* always caled under BH & queue lock */
static void htb_destroy(struct Qdisc* sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
HTB_DBG(0,1,"htb_destroy q=%p\n",q);
del_timer_sync (&q->timer);
static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
static void htb_put(struct Qdisc *sch, unsigned long arg)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
#endif
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
u32 parentid, struct rtattr **tca, unsigned long *arg)
{
int err = -EINVAL;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class*)*arg,*parent;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_find (classid,sch);
HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
/*if (cl && !cl->level) return 0;
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
if (cl)
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int i;
if (arg->stop)