*/
#include <linux/kernel.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
u32 classid; /* class id */
unsigned int refcnt; /* usage count */
- struct tc_stats stats; /* generic statistics */
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_rate_est rate_est;
spinlock_t *stats_lock;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
do { \
struct timeval tv; \
do_gettimeofday(&tv); \
- (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \
+ (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \
} while (0)
#endif
u64 dx;
dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
- dx += 1000000 - 1;
- do_div(dx, 1000000);
+ dx += USEC_PER_SEC - 1;
+ do_div(dx, USEC_PER_SEC);
return dx;
}
{
u64 d;
- d = dx * 1000000;
+ d = dx * USEC_PER_SEC;
do_div(d, PSCHED_JIFFIE2US(HZ));
return (u32)d;
}
if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
if (net_ratelimit())
printk("qdisc_peek_len: failed to requeue\n");
+ qdisc_tree_decrease_qlen(sch, 1);
return 0;
}
return len;
unsigned int len = cl->qdisc->q.qlen;
qdisc_reset(cl->qdisc);
- if (len > 0) {
- update_vf(cl, 0, 0);
- set_passive(cl);
- sch->q.qlen -= len;
- }
+ qdisc_tree_decrease_qlen(cl->qdisc, len);
}
static void
do {
level = 0;
list_for_each_entry(p, &cl->children, siblings) {
- if (p->level > level)
- level = p->level;
+ if (p->level >= level)
+ level = p->level + 1;
}
- cl->level = level + 1;
+ cl->level = level;
} while ((cl = cl->cl_parent) != NULL);
}
struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
u64 cur_time;
- if (opt == NULL ||
- rtattr_parse(tb, TCA_HFSC_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)))
+ if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
return -EINVAL;
if (tb[TCA_HFSC_RSC-1]) {
sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
- if (tca[TCA_RATE-1]) {
- qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, cl->stats_lock,
- tca[TCA_RATE-1]);
- }
+ if (tca[TCA_RATE-1])
+ gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
return 0;
}
if (rsc == NULL && fsc == NULL)
return -EINVAL;
- cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL);
+ cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
if (cl == NULL)
return -ENOBUFS;
- memset(cl, 0, sizeof(struct hfsc_class));
if (rsc != NULL)
hfsc_change_rsc(cl, rsc, 0);
cl->classid = classid;
cl->sched = q;
cl->cl_parent = parent;
- cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
cl->stats_lock = &sch->dev->queue_lock;
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, cl->stats_lock,
- tca[TCA_RATE-1]);
+ gen_new_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
return 0;
hfsc_destroy_filters(&cl->filter_list);
qdisc_destroy(cl->qdisc);
#ifdef CONFIG_NET_ESTIMATOR
- qdisc_kill_estimator(&cl->stats);
+ gen_kill_estimator(&cl->bstats, &cl->rate_est);
#endif
if (cl != &q->root)
kfree(cl);
}
static struct hfsc_class *
-hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
+hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
if (cl->level == 0)
return cl;
+ *qerr = NET_XMIT_BYPASS;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
- int terminal = 0;
switch (result) {
- case TC_ACT_SHOT:
- *qres = NET_XMIT_DROP;
- terminal = 1;
- break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- terminal = 1;
- break;
- case TC_ACT_RECLASSIFY:
- case TC_ACT_OK:
- case TC_ACT_UNSPEC:
- default:
- break;
- }
-
- if (terminal) {
- kfree_skb(skb);
+ *qerr = NET_XMIT_SUCCESS;
+ case TC_ACT_SHOT:
return NULL;
}
-#else
-#ifdef CONFIG_NET_CLS_POLICE
+#elif defined(CONFIG_NET_CLS_POLICE)
if (result == TC_POLICE_SHOT)
return NULL;
-#endif
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
if (cl->level > 0)
return -EINVAL;
if (new == NULL) {
- new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+ cl->classid);
if (new == NULL)
new = &noop_qdisc;
}
return NULL;
}
+static void
+hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
+{
+ struct hfsc_class *cl = (struct hfsc_class *)arg;
+
+ if (cl->qdisc->q.qlen == 0) {
+ update_vf(cl, 0, 0);
+ set_passive(cl);
+ }
+}
+
static unsigned long
hfsc_get_class(struct Qdisc *sch, u32 classid)
{
return -1;
}
-static inline int
-hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
-{
- cl->stats.qlen = cl->qdisc->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock) < 0)
- goto rtattr_failure;
-
- return skb->len;
-
- rtattr_failure:
- return -1;
-}
-
-static inline int
-hfsc_dump_xstats(struct sk_buff *skb, struct hfsc_class *cl)
-{
- struct tc_hfsc_stats xstats;
-
- xstats.level = cl->level;
- xstats.period = cl->cl_vtperiod;
- xstats.work = cl->cl_total;
- xstats.rtwork = cl->cl_cumul;
- RTA_PUT(skb, TCA_XSTATS, sizeof(xstats), &xstats);
-
- return skb->len;
-
- rtattr_failure:
- return -1;
-}
-
static int
hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
struct tcmsg *tcm)
if (hfsc_dump_curves(skb, cl) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
-
- if ((hfsc_dump_stats(skb, cl) < 0) ||
- (hfsc_dump_xstats(skb, cl) < 0))
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
return -1;
}
+static int
+hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+ struct gnet_dump *d)
+{
+ struct hfsc_class *cl = (struct hfsc_class *)arg;
+ struct tc_hfsc_stats xstats;
+
+ cl->qstats.qlen = cl->qdisc->q.qlen;
+ xstats.level = cl->level;
+ xstats.period = cl->cl_vtperiod;
+ xstats.work = cl->cl_total;
+ xstats.rtwork = cl->cl_cumul;
+
+ if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+#ifdef CONFIG_NET_ESTIMATOR
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+#endif
+ gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ return -1;
+
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+
+
static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
q->root.refcnt = 1;
q->root.classid = sch->handle;
q->root.sched = q;
- q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+ sch->handle);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
q->root.stats_lock = &sch->dev->queue_lock;
qopt.defcls = q->defcls;
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
-
- sch->stats.qlen = sch->q.qlen;
- if (qdisc_copy_stats(skb, &sch->stats, sch->stats_lock) < 0)
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- int ret = NET_XMIT_SUCCESS;
- struct hfsc_class *cl = hfsc_classify(skb, sch, &ret);
- unsigned int len = skb->len;
+ struct hfsc_class *cl;
+ unsigned int len;
int err;
-
-#ifdef CONFIG_NET_CLS_ACT
- if (cl == NULL) {
- if (NET_XMIT_DROP == ret) {
- sch->stats.drops++;
- }
- return ret;
- }
-#else
+ cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
+ if (err == NET_XMIT_BYPASS)
+ sch->qstats.drops++;
kfree_skb(skb);
- sch->stats.drops++;
- return NET_XMIT_DROP;
+ return err;
}
-#endif
+ len = skb->len;
err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
- cl->stats.drops++;
- sch->stats.drops++;
+ cl->qstats.drops++;
+ sch->qstats.drops++;
return err;
}
if (cl->qdisc->q.qlen == 1)
set_active(cl, len);
- cl->stats.packets++;
- cl->stats.bytes += len;
- sch->stats.packets++;
- sch->stats.bytes += len;
+ cl->bstats.packets++;
+ cl->bstats.bytes += len;
+ sch->bstats.packets++;
+ sch->bstats.bytes += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
*/
cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) {
- sch->stats.overlimits++;
+ sch->qstats.overlimits++;
hfsc_schedule_watchdog(sch, cur_time);
return NULL;
}
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
+ sch->qstats.requeues++;
return NET_XMIT_SUCCESS;
}
} else {
list_move_tail(&cl->dlist, &q->droplist);
}
- cl->stats.drops++;
- sch->stats.drops++;
+ cl->qstats.drops++;
+ sch->qstats.drops++;
sch->q.qlen--;
return len;
}
.delete = hfsc_delete_class,
.graft = hfsc_graft_class,
.leaf = hfsc_class_leaf,
+ .qlen_notify = hfsc_qlen_notify,
.get = hfsc_get_class,
.put = hfsc_put_class,
.bind_tcf = hfsc_bind_tcf,
.unbind_tcf = hfsc_unbind_tcf,
.tcf_chain = hfsc_tcf_chain,
.dump = hfsc_dump_class,
+ .dump_stats = hfsc_dump_class_stats,
.walk = hfsc_walk
};