#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/list.h>
+#include <linux/rbtree.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
u32 classid; /* class id */
unsigned int refcnt; /* usage count */
- struct tc_stats stats; /* generic statistics */
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_rate_est rate_est;
+ spinlock_t *stats_lock;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
struct list_head children; /* child classes */
struct Qdisc *qdisc; /* leaf qdisc */
- struct list_head actlist; /* active children list */
- struct list_head alist; /* active children list member */
- struct list_head ellist; /* eligible list member */
+ struct rb_node el_node; /* qdisc's eligible tree member */
+ struct rb_root vt_tree; /* active children sorted by cl_vt */
+ struct rb_node vt_node; /* parent's vt_tree member */
+ struct rb_root cf_tree; /* active children sorted by cl_f */
+ struct rb_node cf_node; /* parent's cf_heap member */
struct list_head hlist; /* hash list member */
struct list_head dlist; /* drop list member */
adjustment */
u64 cl_vtoff; /* inter-period cumulative vt offset */
u64 cl_cvtmax; /* max child's vt in the last period */
+ u64 cl_cvtoff; /* cumulative cvtmax of all periods */
+ u64 cl_pcvtoff; /* parent's cvtoff at initalization
+ time */
struct internal_sc cl_rsc; /* internal real-time service curve */
struct internal_sc cl_fsc; /* internal fair service curve */
u16 defcls; /* default class id */
struct hfsc_class root; /* root class */
struct list_head clhash[HFSC_HSIZE]; /* class hash */
- struct list_head eligible; /* eligible list */
+ struct rb_root eligible; /* eligible tree */
struct list_head droplist; /* active leaf class list (for
dropping) */
struct sk_buff_head requeue; /* requeued packet */
/*
* macros
*/
-#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
#include <linux/time.h>
#undef PSCHED_GET_TIME
#define PSCHED_GET_TIME(stamp) \
do { \
struct timeval tv; \
do_gettimeofday(&tv); \
- (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \
+ (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \
} while (0)
#endif
/*
- * eligible list holds backlogged classes being sorted by their eligible times.
- * there is one eligible list per hfsc instance.
+ * eligible tree holds backlogged classes being sorted by their eligible times.
+ * there is one eligible tree per hfsc instance.
*/
static void
-ellist_insert(struct hfsc_class *cl)
+eltree_insert(struct hfsc_class *cl)
{
- struct list_head *head = &cl->sched->eligible;
- struct hfsc_class *p;
-
- /* check the last entry first */
- if (list_empty(head) ||
- ((p = list_entry(head->prev, struct hfsc_class, ellist)) &&
- p->cl_e <= cl->cl_e)) {
- list_add_tail(&cl->ellist, head);
- return;
- }
+ struct rb_node **p = &cl->sched->eligible.rb_node;
+ struct rb_node *parent = NULL;
+ struct hfsc_class *cl1;
- list_for_each_entry(p, head, ellist) {
- if (cl->cl_e < p->cl_e) {
- /* insert cl before p */
- list_add_tail(&cl->ellist, &p->ellist);
- return;
- }
+ while (*p != NULL) {
+ parent = *p;
+ cl1 = rb_entry(parent, struct hfsc_class, el_node);
+ if (cl->cl_e >= cl1->cl_e)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
}
- ASSERT(0); /* should not reach here */
+ rb_link_node(&cl->el_node, parent, p);
+ rb_insert_color(&cl->el_node, &cl->sched->eligible);
}
static inline void
-ellist_remove(struct hfsc_class *cl)
+eltree_remove(struct hfsc_class *cl)
{
- list_del(&cl->ellist);
+ rb_erase(&cl->el_node, &cl->sched->eligible);
}
-static void
-ellist_update(struct hfsc_class *cl)
+static inline void
+eltree_update(struct hfsc_class *cl)
{
- struct list_head *head = &cl->sched->eligible;
- struct hfsc_class *p, *last;
-
- /*
- * the eligible time of a class increases monotonically.
- * if the next entry has a larger eligible time, nothing to do.
- */
- if (cl->ellist.next == head ||
- ((p = list_entry(cl->ellist.next, struct hfsc_class, ellist)) &&
- cl->cl_e <= p->cl_e))
- return;
-
- /* check the last entry */
- last = list_entry(head->prev, struct hfsc_class, ellist);
- if (last->cl_e <= cl->cl_e) {
- list_move_tail(&cl->ellist, head);
- return;
- }
-
- /*
- * the new position must be between the next entry
- * and the last entry
- */
- list_for_each_entry_continue(p, head, ellist) {
- if (cl->cl_e < p->cl_e) {
- list_move_tail(&cl->ellist, &p->ellist);
- return;
- }
- }
- ASSERT(0); /* should not reach here */
+ eltree_remove(cl);
+ eltree_insert(cl);
}
/* find the class with the minimum deadline among the eligible classes */
static inline struct hfsc_class *
-ellist_get_mindl(struct list_head *head, u64 cur_time)
+eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
{
struct hfsc_class *p, *cl = NULL;
+ struct rb_node *n;
- list_for_each_entry(p, head, ellist) {
+ for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
+ p = rb_entry(n, struct hfsc_class, el_node);
if (p->cl_e > cur_time)
break;
if (cl == NULL || p->cl_d < cl->cl_d)
/* find the class with minimum eligible time among the eligible classes */
static inline struct hfsc_class *
-ellist_get_minel(struct list_head *head)
+eltree_get_minel(struct hfsc_sched *q)
{
- if (list_empty(head))
+ struct rb_node *n;
+
+ n = rb_first(&q->eligible);
+ if (n == NULL)
return NULL;
- return list_entry(head->next, struct hfsc_class, ellist);
+ return rb_entry(n, struct hfsc_class, el_node);
}
/*
- * active children list holds backlogged child classes being sorted
- * by their virtual time. each intermediate class has one active
- * children list.
+ * vttree holds holds backlogged child classes being sorted by their virtual
+ * time. each intermediate class has one vttree.
*/
static void
-actlist_insert(struct hfsc_class *cl)
+vttree_insert(struct hfsc_class *cl)
{
- struct list_head *head = &cl->cl_parent->actlist;
- struct hfsc_class *p;
-
- /* check the last entry first */
- if (list_empty(head) ||
- ((p = list_entry(head->prev, struct hfsc_class, alist)) &&
- p->cl_vt <= cl->cl_vt)) {
- list_add_tail(&cl->alist, head);
- return;
- }
+ struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct hfsc_class *cl1;
- list_for_each_entry(p, head, alist) {
- if (cl->cl_vt < p->cl_vt) {
- /* insert cl before p */
- list_add_tail(&cl->alist, &p->alist);
- return;
- }
+ while (*p != NULL) {
+ parent = *p;
+ cl1 = rb_entry(parent, struct hfsc_class, vt_node);
+ if (cl->cl_vt >= cl1->cl_vt)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
}
- ASSERT(0); /* should not reach here */
+ rb_link_node(&cl->vt_node, parent, p);
+ rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
}
static inline void
-actlist_remove(struct hfsc_class *cl)
+vttree_remove(struct hfsc_class *cl)
{
- list_del(&cl->alist);
+ rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
}
-static void
-actlist_update(struct hfsc_class *cl)
+static inline void
+vttree_update(struct hfsc_class *cl)
{
- struct list_head *head = &cl->cl_parent->actlist;
- struct hfsc_class *p, *last;
-
- /*
- * the virtual time of a class increases monotonically.
- * if the next entry has a larger virtual time, nothing to do.
- */
- if (cl->alist.next == head ||
- ((p = list_entry(cl->alist.next, struct hfsc_class, alist)) &&
- cl->cl_vt <= p->cl_vt))
- return;
-
- /* check the last entry */
- last = list_entry(head->prev, struct hfsc_class, alist);
- if (last->cl_vt <= cl->cl_vt) {
- list_move_tail(&cl->alist, head);
- return;
- }
-
- /*
- * the new position must be between the next entry
- * and the last entry
- */
- list_for_each_entry_continue(p, head, alist) {
- if (cl->cl_vt < p->cl_vt) {
- list_move_tail(&cl->alist, &p->alist);
- return;
- }
- }
- ASSERT(0); /* should not reach here */
+ vttree_remove(cl);
+ vttree_insert(cl);
}
static inline struct hfsc_class *
-actlist_firstfit(struct hfsc_class *cl, u64 cur_time)
+vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
{
struct hfsc_class *p;
+ struct rb_node *n;
- list_for_each_entry(p, &cl->actlist, alist) {
- if (p->cl_f <= cur_time) {
+ for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
+ p = rb_entry(n, struct hfsc_class, vt_node);
+ if (p->cl_f <= cur_time)
return p;
- }
}
return NULL;
}
* get the leaf class with the minimum vt in the hierarchy
*/
static struct hfsc_class *
-actlist_get_minvt(struct hfsc_class *cl, u64 cur_time)
+vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
{
/* if root-class's cfmin is bigger than cur_time nothing to do */
if (cl->cl_cfmin > cur_time)
return NULL;
while (cl->level > 0) {
- cl = actlist_firstfit(cl, cur_time);
+ cl = vttree_firstfit(cl, cur_time);
if (cl == NULL)
return NULL;
/*
return cl;
}
+static void
+cftree_insert(struct hfsc_class *cl)
+{
+ struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct hfsc_class *cl1;
+
+ while (*p != NULL) {
+ parent = *p;
+ cl1 = rb_entry(parent, struct hfsc_class, cf_node);
+ if (cl->cl_f >= cl1->cl_f)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&cl->cf_node, parent, p);
+ rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
+}
+
+static inline void
+cftree_remove(struct hfsc_class *cl)
+{
+ rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
+}
+
+static inline void
+cftree_update(struct hfsc_class *cl)
+{
+ cftree_remove(cl);
+ cftree_insert(cl);
+}
+
/*
* service curve support functions
*
* ism: (psched_us/byte) << ISM_SHIFT
* dx: psched_us
*
- * Time source resolution
- * PSCHED_JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
- * PSCHED_CPU: resolution is between 0.5us and 1us.
- * PSCHED_GETTIMEOFDAY: resolution is exactly 1us.
+ * Clock source resolution (CONFIG_NET_SCH_CLK_*)
+ * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
+ * CPU: resolution is between 0.5us and 1us.
+ * GETTIMEOFDAY: resolution is exactly 1us.
*
* sm and ism are scaled in order to keep effective digits.
* SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
u64 dx;
dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
- dx += 1000000 - 1;
- do_div(dx, 1000000);
+ dx += USEC_PER_SEC - 1;
+ do_div(dx, USEC_PER_SEC);
return dx;
}
{
u64 d;
- d = dx * 1000000;
+ d = dx * USEC_PER_SEC;
do_div(d, PSCHED_JIFFIE2US(HZ));
return (u32)d;
}
cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
- ellist_insert(cl);
+ eltree_insert(cl);
}
static void
cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
- ellist_update(cl);
+ eltree_update(cl);
}
static inline void
cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
}
-static void
+static inline void
update_cfmin(struct hfsc_class *cl)
{
+ struct rb_node *n = rb_first(&cl->cf_tree);
struct hfsc_class *p;
- u64 cfmin;
- if (list_empty(&cl->actlist)) {
+ if (n == NULL) {
cl->cl_cfmin = 0;
return;
}
- cfmin = HT_INFINITY;
- list_for_each_entry(p, &cl->actlist, alist) {
- if (p->cl_f == 0) {
- cl->cl_cfmin = 0;
- return;
- }
- if (p->cl_f < cfmin)
- cfmin = p->cl_f;
- }
- cl->cl_cfmin = cfmin;
+ p = rb_entry(n, struct hfsc_class, cf_node);
+ cl->cl_cfmin = p->cl_f;
}
static void
init_vf(struct hfsc_class *cl, unsigned int len)
{
- struct hfsc_class *max_cl, *p;
+ struct hfsc_class *max_cl;
+ struct rb_node *n;
u64 vt, f, cur_time;
int go_active;
go_active = 0;
if (go_active) {
- if (!list_empty(&cl->cl_parent->actlist)) {
- max_cl = list_entry(cl->cl_parent->actlist.prev,
- struct hfsc_class, alist);
+ n = rb_last(&cl->cl_parent->vt_tree);
+ if (n != NULL) {
+ max_cl = rb_entry(n, struct hfsc_class,vt_node);
/*
* set vt to the average of the min and max
* classes. if the parent's period didn't
} else {
/*
* first child for a new parent backlog period.
- * add parent's cvtmax to vtoff of children
- * to make a new vt (vtoff + vt) larger than
- * the vt in the last period for all children.
+ * add parent's cvtmax to cvtoff to make a new
+ * vt (vtoff + vt) larger than the vt in the
+ * last period for all children.
*/
vt = cl->cl_parent->cl_cvtmax;
- list_for_each_entry(p, &cl->cl_parent->children,
- siblings)
- p->cl_vtoff += vt;
- cl->cl_vt = 0;
+ cl->cl_parent->cl_cvtoff += vt;
cl->cl_parent->cl_cvtmax = 0;
cl->cl_parent->cl_cvtmin = 0;
+ cl->cl_vt = 0;
}
+ cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
+ cl->cl_pcvtoff;
+
/* update the virtual curve */
vt = cl->cl_vt + cl->cl_vtoff;
rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
cl->cl_parentperiod++;
cl->cl_f = 0;
- actlist_insert(cl);
+ vttree_insert(cl);
+ cftree_insert(cl);
if (cl->cl_flags & HFSC_USC) {
/* class has upper limit curve */
f = max(cl->cl_myf, cl->cl_cfmin);
if (f != cl->cl_f) {
cl->cl_f = f;
+ cftree_update(cl);
update_cfmin(cl->cl_parent);
}
}
if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
cl->cl_parent->cl_cvtmax = cl->cl_vt;
- /* remove this class from the vt list */
- actlist_remove(cl);
+ /* remove this class from the vt tree */
+ vttree_remove(cl);
+ cftree_remove(cl);
update_cfmin(cl->cl_parent);
continue;
cl->cl_vt = cl->cl_parent->cl_cvtmin;
}
- /* update the vt list */
- actlist_update(cl);
+ /* update the vt tree */
+ vttree_update(cl);
if (cl->cl_flags & HFSC_USC) {
cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
f = max(cl->cl_myf, cl->cl_cfmin);
if (f != cl->cl_f) {
cl->cl_f = f;
+ cftree_update(cl);
update_cfmin(cl->cl_parent);
}
}
set_passive(struct hfsc_class *cl)
{
if (cl->cl_flags & HFSC_RSC)
- ellist_remove(cl);
+ eltree_remove(cl);
list_del(&cl->dlist);
/*
- * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
- * needs to be called explicitly to remove a class from actlist
+ * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
+ * needs to be called explicitly to remove a class from vttree.
*/
}
static inline struct hfsc_class *
hfsc_find_class(u32 classid, struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct rtattr **tca, unsigned long *arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)*arg;
struct hfsc_class *parent = NULL;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
u64 cur_time;
- if (opt == NULL ||
- rtattr_parse(tb, TCA_HFSC_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)))
+ if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
return -EINVAL;
if (tb[TCA_HFSC_RSC-1]) {
sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
- if (tca[TCA_RATE-1]) {
- qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
- }
+ if (tca[TCA_RATE-1])
+ gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
return 0;
}
cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
- cl->stats.lock = &sch->dev->queue_lock;
+ cl->stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&cl->children);
- INIT_LIST_HEAD(&cl->actlist);
+ cl->vt_tree = RB_ROOT;
+ cl->cf_tree = RB_ROOT;
sch_tree_lock(sch);
list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
if (parent->level == 0)
hfsc_purge_queue(sch, parent);
hfsc_adjust_levels(parent);
+ cl->cl_pcvtoff = parent->cl_cvtoff;
sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ gen_new_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
return 0;
static void
hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
hfsc_destroy_filters(&cl->filter_list);
qdisc_destroy(cl->qdisc);
#ifdef CONFIG_NET_ESTIMATOR
- qdisc_kill_estimator(&cl->stats);
+ gen_kill_estimator(&cl->bstats, &cl->rate_est);
#endif
if (cl != &q->root)
kfree(cl);
static int
hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
}
static struct hfsc_class *
-hfsc_classify(struct sk_buff *skb, struct Qdisc *sch)
+hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
if (cl->level == 0)
return cl;
+ *qerr = NET_XMIT_BYPASS;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ *qerr = NET_XMIT_SUCCESS;
+ case TC_ACT_SHOT:
+ return NULL;
+ }
+#elif defined(CONFIG_NET_CLS_POLICE)
if (result == TC_POLICE_SHOT)
return NULL;
#endif
static struct tcf_proto **
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl == NULL)
return -1;
}
-static inline int
-hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
-{
- cl->stats.qlen = cl->qdisc->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats) < 0)
- goto rtattr_failure;
-
- return skb->len;
-
- rtattr_failure:
- return -1;
-}
-
-static inline int
-hfsc_dump_xstats(struct sk_buff *skb, struct hfsc_class *cl)
-{
- struct tc_hfsc_stats xstats;
-
- xstats.level = cl->level;
- xstats.period = cl->cl_vtperiod;
- xstats.work = cl->cl_total;
- xstats.rtwork = cl->cl_cumul;
- RTA_PUT(skb, TCA_XSTATS, sizeof(xstats), &xstats);
-
- return skb->len;
-
- rtattr_failure:
- return -1;
-}
-
static int
hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
struct tcmsg *tcm)
if (hfsc_dump_curves(skb, cl) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
-
- if ((hfsc_dump_stats(skb, cl) < 0) ||
- (hfsc_dump_xstats(skb, cl) < 0))
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
return -1;
}
+static int
+hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+ struct gnet_dump *d)
+{
+ struct hfsc_class *cl = (struct hfsc_class *)arg;
+ struct tc_hfsc_stats xstats;
+
+ cl->qstats.qlen = cl->qdisc->q.qlen;
+ xstats.level = cl->level;
+ xstats.period = cl->cl_vtperiod;
+ xstats.work = cl->cl_total;
+ xstats.rtwork = cl->cl_cumul;
+
+ if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+#ifdef CONFIG_NET_ESTIMATOR
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+#endif
+ gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ return -1;
+
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+
+
static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
u64 next_time = 0;
long delay;
- if ((cl = ellist_get_minel(&q->eligible)) != NULL)
+ if ((cl = eltree_get_minel(q)) != NULL)
next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin)
static int
hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
unsigned int i;
return -EINVAL;
qopt = RTA_DATA(opt);
- memset(q, 0, sizeof(struct hfsc_sched));
- sch->stats.lock = &sch->dev->queue_lock;
+ sch->stats_lock = &sch->dev->queue_lock;
q->defcls = qopt->defcls;
for (i = 0; i < HFSC_HSIZE; i++)
INIT_LIST_HEAD(&q->clhash[i]);
- INIT_LIST_HEAD(&q->eligible);
+ q->eligible = RB_ROOT;
INIT_LIST_HEAD(&q->droplist);
skb_queue_head_init(&q->requeue);
q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
- q->root.stats.lock = &sch->dev->queue_lock;
+ q->root.stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&q->root.children);
- INIT_LIST_HEAD(&q->root.actlist);
+ q->root.vt_tree = RB_ROOT;
+ q->root.cf_tree = RB_ROOT;
list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
static int
hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
cl->cl_vtoff = 0;
cl->cl_cvtmin = 0;
cl->cl_cvtmax = 0;
+ cl->cl_cvtoff = 0;
+ cl->cl_pcvtoff = 0;
cl->cl_vtperiod = 0;
cl->cl_parentperiod = 0;
cl->cl_f = 0;
cl->cl_myfadj = 0;
cl->cl_cfmin = 0;
cl->cl_nactive = 0;
- INIT_LIST_HEAD(&cl->actlist);
+
+ cl->vt_tree = RB_ROOT;
+ cl->cf_tree = RB_ROOT;
qdisc_reset(cl->qdisc);
if (cl->cl_flags & HFSC_RSC)
static void
hfsc_reset_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int i;
hfsc_reset_class(cl);
}
__skb_queue_purge(&q->requeue);
- INIT_LIST_HEAD(&q->eligible);
+ q->eligible = RB_ROOT;
INIT_LIST_HEAD(&q->droplist);
del_timer(&q->wd_timer);
sch->flags &= ~TCQ_F_THROTTLED;
static void
hfsc_destroy_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl, *next;
unsigned int i;
static int
hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct tc_hfsc_qopt qopt;
qopt.defcls = q->defcls;
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
-
- sch->stats.qlen = sch->q.qlen;
- if (qdisc_copy_stats(skb, &sch->stats) < 0)
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct hfsc_class *cl = hfsc_classify(skb, sch);
- unsigned int len = skb->len;
+ struct hfsc_class *cl;
+ unsigned int len;
int err;
+ cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
+ if (err == NET_XMIT_BYPASS)
+ sch->qstats.drops++;
kfree_skb(skb);
- sch->stats.drops++;
- return NET_XMIT_DROP;
+ return err;
}
+ len = skb->len;
err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
- cl->stats.drops++;
- sch->stats.drops++;
+ cl->qstats.drops++;
+ sch->qstats.drops++;
return err;
}
if (cl->qdisc->q.qlen == 1)
set_active(cl, len);
- cl->stats.packets++;
- cl->stats.bytes += len;
- sch->stats.packets++;
- sch->stats.bytes += len;
+ cl->bstats.packets++;
+ cl->bstats.bytes += len;
+ sch->bstats.packets++;
+ sch->bstats.bytes += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
static struct sk_buff *
hfsc_dequeue(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
struct sk_buff *skb;
u64 cur_time;
* find the class with the minimum deadline among
* the eligible classes.
*/
- if ((cl = ellist_get_mindl(&q->eligible, cur_time)) != NULL) {
+ if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
realtime = 1;
} else {
/*
* use link-sharing criteria
* get the class with the minimum vt in the hierarchy
*/
- cl = actlist_get_minvt(&q->root, cur_time);
+ cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) {
- sch->stats.overlimits++;
- if (!netif_queue_stopped(sch->dev))
- hfsc_schedule_watchdog(sch, cur_time);
+ sch->qstats.overlimits++;
+ hfsc_schedule_watchdog(sch, cur_time);
return NULL;
}
}
static int
hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
+ sch->qstats.requeues++;
return NET_XMIT_SUCCESS;
}
static unsigned int
hfsc_drop(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int len;
} else {
list_move_tail(&cl->dlist, &q->droplist);
}
- cl->stats.drops++;
- sch->stats.drops++;
+ cl->qstats.drops++;
+ sch->qstats.drops++;
sch->q.qlen--;
return len;
}
.unbind_tcf = hfsc_unbind_tcf,
.tcf_chain = hfsc_tcf_chain,
.dump = hfsc_dump_class,
+ .dump_stats = hfsc_dump_class_stats,
.walk = hfsc_walk
};