#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
long avgidle;
long deficit; /* Saved deficit for WRR */
unsigned long penalized;
- struct tc_stats stats;
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_rate_est rate_est;
+ spinlock_t *stats_lock;
struct tc_cbq_xstats xstats;
struct tcf_proto *filter_list;
*/
static struct cbq_class *
-cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
+cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *head = &q->link;
struct cbq_class **defmap;
struct cbq_class *cl = NULL;
*/
if (TC_H_MAJ(prio^sch->handle) == 0 &&
(cl = cbq_class_lookup(q, prio)) != NULL)
- return cl;
+ return cl;
+ *qerr = NET_XMIT_BYPASS;
for (;;) {
int result = 0;
-
defmap = head->defaults;
/*
goto fallback;
}
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ *qerr = NET_XMIT_SUCCESS;
+ case TC_ACT_SHOT:
+ return NULL;
+ }
+#elif defined(CONFIG_NET_CLS_POLICE)
switch (result) {
case TC_POLICE_RECLASSIFY:
return cbq_reclassify(skb, cl);
static __inline__ void cbq_activate_class(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority;
struct cbq_class *cl_tail;
static void cbq_deactivate_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(this->qdisc);
int prio = this->cpriority;
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
- struct cbq_class *cl = cbq_classify(skb, sch);
+ struct cbq_sched_data *q = qdisc_priv(sch);
int len = skb->len;
- int ret = NET_XMIT_POLICED;
+ int ret;
+ struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_POLICE
q->rx_class = cl;
#endif
- if (cl) {
-#ifdef CONFIG_NET_CLS_POLICE
- cl->q->__parent = sch;
-#endif
- if ((ret = cl->q->enqueue(skb, cl->q)) == 0) {
- sch->q.qlen++;
- sch->stats.packets++;
- sch->stats.bytes+=len;
- cbq_mark_toplevel(q, cl);
- if (!cl->next_alive)
- cbq_activate_class(cl);
- return 0;
- }
+ if (cl == NULL) {
+ if (ret == NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
}
- sch->stats.drops++;
- if (cl == NULL)
- kfree_skb(skb);
- else {
+#ifdef CONFIG_NET_CLS_POLICE
+ cl->q->__parent = sch;
+#endif
+ if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
+ sch->q.qlen++;
+ sch->bstats.packets++;
+ sch->bstats.bytes+=len;
cbq_mark_toplevel(q, cl);
- cl->stats.drops++;
+ if (!cl->next_alive)
+ cbq_activate_class(cl);
+ return ret;
}
+
+ sch->qstats.drops++;
+ cbq_mark_toplevel(q, cl);
+ cl->qstats.drops++;
return ret;
}
static int
cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
int ret;
if ((cl = q->tx_class) == NULL) {
kfree_skb(skb);
- sch->stats.drops++;
+ sch->qstats.drops++;
return NET_XMIT_CN;
}
q->tx_class = NULL;
#endif
if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
sch->q.qlen++;
+ sch->qstats.requeues++;
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
}
- sch->stats.drops++;
- cl->stats.drops++;
+ sch->qstats.drops++;
+ cl->qstats.drops++;
return ret;
}
static void cbq_ovl_classic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this = cl;
do {
static void cbq_ovl_delay(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_lowprio(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl->penalized = jiffies + cl->penalty;
static void cbq_undelay(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
long delay = 0;
unsigned pmask;
{
int len = skb->len;
struct Qdisc *sch = child->__parent;
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = q->rx_class;
q->rx_class = NULL;
if (cl->q->enqueue(skb, cl->q) == 0) {
sch->q.qlen++;
- sch->stats.packets++;
- sch->stats.bytes+=len;
+ sch->bstats.packets++;
+ sch->bstats.bytes+=len;
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
}
- sch->stats.drops++;
+ sch->qstats.drops++;
return 0;
}
- sch->stats.drops++;
+ sch->qstats.drops++;
return -1;
}
#endif
long avgidle = cl->avgidle;
long idle;
- cl->stats.packets++;
- cl->stats.bytes += len;
+ cl->bstats.packets++;
+ cl->bstats.bytes += len;
/*
(now - last) is total time between packet right edges.
static __inline__ struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this_cl = cl;
if (cl->tparent == NULL)
no another solution exists.
*/
if ((cl = cl->borrow) == NULL) {
- this_cl->stats.overlimits++;
+ this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl);
return NULL;
}
static __inline__ struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl_tail, *cl_prev, *cl;
struct sk_buff *skb;
int deficit;
static __inline__ struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
unsigned activemask;
cbq_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
psched_time_t now;
psched_tdiff_t incr;
Sigh... start watchdog timer in the last case. */
if (sch->q.qlen) {
- sch->stats.overlimits++;
- if (q->wd_expires && !netif_queue_stopped(sch->dev)) {
+ sch->qstats.overlimits++;
+ if (q->wd_expires) {
long delay = PSCHED_US2JIFFIE(q->wd_expires);
if (delay <= 0)
delay = 1;
static void cbq_sync_defmap(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split;
unsigned h;
int i;
static void cbq_unlink_class(struct cbq_class *this)
{
struct cbq_class *cl, **clp;
- struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(this->qdisc);
for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) {
if (cl == this) {
static void cbq_link_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(this->qdisc);
unsigned h = cbq_hash(this->classid);
struct cbq_class *parent = this->tparent;
static unsigned int cbq_drop(struct Qdisc* sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
int prio;
unsigned int len;
static void
cbq_reset(struct Qdisc* sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
int prio;
unsigned h;
static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
if (wrr->allot)
cl->allot = wrr->allot;
static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct rtattr *tb[TCA_CBQ_MAX];
struct tc_ratespec *r;
- if (rtattr_parse(tb, TCA_CBQ_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0 ||
+ if (rtattr_parse_nested(tb, TCA_CBQ_MAX, opt) < 0 ||
tb[TCA_CBQ_RTAB-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL ||
RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec))
return -EINVAL;
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
- q->link.stats.lock = &sch->dev->queue_lock;
+ q->link.stats_lock = &sch->dev->queue_lock;
init_timer(&q->wd_timer);
q->wd_timer.data = (unsigned long)sch;
opt.strategy = cl->ovl_strategy;
opt.priority2 = cl->priority2+1;
+ opt.pad = 0;
opt.penalty = (cl->penalty*1000)/HZ;
RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
return skb->len;
if (cl->police) {
opt.police = cl->police;
+ opt.__res1 = 0;
+ opt.__res2 = 0;
RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
}
return skb->len;
return 0;
}
-int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st)
-{
- RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
- return 0;
-
-rtattr_failure:
- return -1;
-}
-
-
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct rtattr *rta;
if (cbq_dump_attr(skb, &q->link) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
- spin_lock_bh(&sch->dev->queue_lock);
- q->link.xstats.avgidle = q->link.avgidle;
- if (cbq_copy_xstats(skb, &q->link.xstats)) {
- spin_unlock_bh(&sch->dev->queue_lock);
- goto rtattr_failure;
- }
- spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
rtattr_failure:
return -1;
}
+static int
+cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct cbq_sched_data *q = qdisc_priv(sch);
+
+ q->link.xstats.avgidle = q->link.avgidle;
+ return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
+}
+
static int
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
unsigned char *b = skb->tail;
struct rtattr *rta;
if (cbq_dump_attr(skb, cl) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
- cl->stats.qlen = cl->q->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats))
- goto rtattr_failure;
- spin_lock_bh(&sch->dev->queue_lock);
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+
+static int
+cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+ struct gnet_dump *d)
+{
+ struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_class *cl = (struct cbq_class*)arg;
+
+ cl->qstats.qlen = cl->q->q.qlen;
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
+
if (!PSCHED_IS_PASTPERFECT(cl->undertime))
cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
- q->link.xstats.avgidle = q->link.avgidle;
- if (cbq_copy_xstats(skb, &cl->xstats)) {
- spin_unlock_bh(&sch->dev->queue_lock);
- goto rtattr_failure;
- }
- spin_unlock_bh(&sch->dev->queue_lock);
- return skb->len;
+ if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+#ifdef CONFIG_NET_ESTIMATOR
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+#endif
+ gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ return -1;
-rtattr_failure:
- skb_trim(skb, b - skb->data);
- return -1;
+ return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
}
static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
}
}
-static void cbq_destroy_class(struct cbq_class *cl)
+static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
{
+ struct cbq_sched_data *q = qdisc_priv(sch);
+
+ BUG_TRAP(!cl->filters);
+
cbq_destroy_filters(cl);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
#ifdef CONFIG_NET_ESTIMATOR
- qdisc_kill_estimator(&cl->stats);
+ gen_kill_estimator(&cl->bstats, &cl->rate_est);
#endif
- kfree(cl);
+ if (cl != &q->link)
+ kfree(cl);
}
static void
cbq_destroy(struct Qdisc* sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
unsigned h;
#ifdef CONFIG_NET_CLS_POLICE
q->rx_class = NULL;
#endif
- for (h = 0; h < 16; h++) {
+ /*
+ * Filters must be destroyed first because we don't destroy the
+ * classes from root to leafs which means that filters can still
+ * be bound to classes which have been destroyed already. --TGR '04
+ */
+ for (h = 0; h < 16; h++)
for (cl = q->classes[h]; cl; cl = cl->next)
cbq_destroy_filters(cl);
- }
for (h = 0; h < 16; h++) {
struct cbq_class *next;
for (cl = q->classes[h]; cl; cl = next) {
next = cl->next;
- if (cl != &q->link)
- cbq_destroy_class(cl);
+ cbq_destroy_class(sch, cl);
}
}
-
- qdisc_put_rtab(q->link.R_tab);
}
static void cbq_put(struct Qdisc *sch, unsigned long arg)
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
spin_unlock_bh(&sch->dev->queue_lock);
#endif
- cbq_destroy_class(cl);
+ cbq_destroy_class(sch, cl);
}
}
unsigned long *arg)
{
int err;
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class*)*arg;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct rtattr *tb[TCA_CBQ_MAX];
struct cbq_class *parent;
struct qdisc_rate_table *rtab = NULL;
- if (opt==NULL ||
- rtattr_parse(tb, TCA_CBQ_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)))
+ if (opt==NULL || rtattr_parse_nested(tb, TCA_CBQ_MAX, opt))
return -EINVAL;
if (tb[TCA_CBQ_OVL_STRATEGY-1] &&
sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
- if (tca[TCA_RATE-1]) {
- qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
- }
+ if (tca[TCA_RATE-1])
+ gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
return 0;
}
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
- cl->stats.lock = &sch->dev->queue_lock;
+ cl->stats_lock = &sch->dev->queue_lock;
sch_tree_lock(sch);
cbq_link_class(cl);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ gen_new_estimator(&cl->bstats, &cl->rate_est,
+ cl->stats_lock, tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class*)arg;
if (cl->filters || cl->children || cl == &q->link)
sch_tree_unlock(sch);
if (--cl->refcnt == 0)
- cbq_destroy_class(cl);
+ cbq_destroy_class(sch, cl);
return 0;
}
static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
if (cl == NULL)
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *p = (struct cbq_class*)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
unsigned h;
if (arg->stop)
.bind_tcf = cbq_bind_filter,
.unbind_tcf = cbq_unbind_filter,
.dump = cbq_dump_class,
+ .dump_stats = cbq_dump_class_stats,
};
static struct Qdisc_ops cbq_qdisc_ops = {
.destroy = cbq_destroy,
.change = NULL,
.dump = cbq_dump,
+ .dump_stats = cbq_dump_stats,
.owner = THIS_MODULE,
};