u32 classid; /* class id */
unsigned int refcnt; /* usage count */
- struct tc_stats stats; /* generic statistics */
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_rate_est rate_est;
spinlock_t *stats_lock;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
u64 cur_time;
- if (opt == NULL ||
- rtattr_parse(tb, TCA_HFSC_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)))
+ if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
return -EINVAL;
if (tb[TCA_HFSC_RSC-1]) {
sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
- if (tca[TCA_RATE-1]) {
- qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, cl->stats_lock,
- tca[TCA_RATE-1]);
- }
+ if (tca[TCA_RATE-1])
+ gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
return 0;
}
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, cl->stats_lock,
- tca[TCA_RATE-1]);
+ gen_new_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
return 0;
hfsc_destroy_filters(&cl->filter_list);
qdisc_destroy(cl->qdisc);
#ifdef CONFIG_NET_ESTIMATOR
- qdisc_kill_estimator(&cl->stats);
+ gen_kill_estimator(&cl->bstats, &cl->rate_est);
#endif
if (cl != &q->root)
kfree(cl);
}
static struct hfsc_class *
-hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
+hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
if (cl->level == 0)
return cl;
+ *qerr = NET_XMIT_DROP;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
- int terminal = 0;
switch (result) {
- case TC_ACT_SHOT:
- *qres = NET_XMIT_DROP;
- terminal = 1;
- break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- terminal = 1;
- break;
- case TC_ACT_RECLASSIFY:
- case TC_ACT_OK:
- case TC_ACT_UNSPEC:
- default:
- break;
- }
-
- if (terminal) {
- kfree_skb(skb);
+ *qerr = NET_XMIT_SUCCESS;
+ case TC_ACT_SHOT:
return NULL;
}
-#else
-#ifdef CONFIG_NET_CLS_POLICE
+#elif defined(CONFIG_NET_CLS_POLICE)
if (result == TC_POLICE_SHOT)
return NULL;
-#endif
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
return -1;
}
-static inline int
-hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
-{
- cl->stats.qlen = cl->qdisc->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock) < 0)
- goto rtattr_failure;
-
- return skb->len;
-
- rtattr_failure:
- return -1;
-}
-
-static inline int
-hfsc_dump_xstats(struct sk_buff *skb, struct hfsc_class *cl)
-{
- struct tc_hfsc_stats xstats;
-
- xstats.level = cl->level;
- xstats.period = cl->cl_vtperiod;
- xstats.work = cl->cl_total;
- xstats.rtwork = cl->cl_cumul;
- RTA_PUT(skb, TCA_XSTATS, sizeof(xstats), &xstats);
-
- return skb->len;
-
- rtattr_failure:
- return -1;
-}
-
static int
hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
struct tcmsg *tcm)
if (hfsc_dump_curves(skb, cl) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
-
- if ((hfsc_dump_stats(skb, cl) < 0) ||
- (hfsc_dump_xstats(skb, cl) < 0))
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
return -1;
}
+static int
+hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+ struct gnet_dump *d)
+{
+ struct hfsc_class *cl = (struct hfsc_class *)arg;
+ struct tc_hfsc_stats xstats;
+
+ cl->qstats.qlen = cl->qdisc->q.qlen;
+ xstats.level = cl->level;
+ xstats.period = cl->cl_vtperiod;
+ xstats.work = cl->cl_total;
+ xstats.rtwork = cl->cl_cumul;
+
+ if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+#ifdef CONFIG_NET_ESTIMATOR
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+#endif
+ gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ return -1;
+
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+
+
static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
qopt.defcls = q->defcls;
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
-
- sch->stats.qlen = sch->q.qlen;
- if (qdisc_copy_stats(skb, &sch->stats, sch->stats_lock) < 0)
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- int ret = NET_XMIT_SUCCESS;
- struct hfsc_class *cl = hfsc_classify(skb, sch, &ret);
- unsigned int len = skb->len;
+ struct hfsc_class *cl;
+ unsigned int len;
int err;
-
-#ifdef CONFIG_NET_CLS_ACT
- if (cl == NULL) {
- if (NET_XMIT_DROP == ret) {
- sch->stats.drops++;
- }
- return ret;
- }
-#else
+ cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
+ if (err == NET_XMIT_DROP)
+ sch->qstats.drops++;
kfree_skb(skb);
- sch->stats.drops++;
- return NET_XMIT_DROP;
+ return err;
}
-#endif
+ len = skb->len;
err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
- cl->stats.drops++;
- sch->stats.drops++;
+ cl->qstats.drops++;
+ sch->qstats.drops++;
return err;
}
if (cl->qdisc->q.qlen == 1)
set_active(cl, len);
- cl->stats.packets++;
- cl->stats.bytes += len;
- sch->stats.packets++;
- sch->stats.bytes += len;
+ cl->bstats.packets++;
+ cl->bstats.bytes += len;
+ sch->bstats.packets++;
+ sch->bstats.bytes += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
*/
cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) {
- sch->stats.overlimits++;
+ sch->qstats.overlimits++;
hfsc_schedule_watchdog(sch, cur_time);
return NULL;
}
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
+ sch->qstats.requeues++;
return NET_XMIT_SUCCESS;
}
} else {
list_move_tail(&cl->dlist, &q->droplist);
}
- cl->stats.drops++;
- sch->stats.drops++;
+ cl->qstats.drops++;
+ sch->qstats.drops++;
sch->q.qlen--;
return len;
}
.unbind_tcf = hfsc_unbind_tcf,
.tcf_chain = hfsc_tcf_chain,
.dump = hfsc_dump_class,
+ .dump_stats = hfsc_dump_class_stats,
.walk = hfsc_walk
};