#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
q->qave >>= 1;
}
} else {
- q->qave += sch->stats.backlog - (q->qave >> q->Wlog);
+ q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
/* NOTE:
q->qave is fixed point number with point at Wlog.
The formulae above is equvalent to floating point
version:
- qave = qave*(1-W) + sch->stats.backlog*W;
+ qave = qave*(1-W) + sch->qstats.backlog*W;
--ANK (980924)
*/
}
if (q->qave < q->qth_min) {
q->qcount = -1;
enqueue:
- if (sch->stats.backlog + skb->len <= q->limit) {
+ if (sch->qstats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb);
- sch->stats.backlog += skb->len;
- sch->stats.bytes += skb->len;
- sch->stats.packets++;
+ sch->qstats.backlog += skb->len;
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
return NET_XMIT_SUCCESS;
} else {
q->st.pdrop++;
}
kfree_skb(skb);
- sch->stats.drops++;
+ sch->qstats.drops++;
return NET_XMIT_DROP;
}
if (q->qave >= q->qth_max) {
q->qcount = -1;
- sch->stats.overlimits++;
+ sch->qstats.overlimits++;
mark:
if (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) {
q->st.early++;
goto enqueue;
q->qcount = 0;
q->qR = net_random()&q->Rmask;
- sch->stats.overlimits++;
+ sch->qstats.overlimits++;
goto mark;
}
q->qR = net_random()&q->Rmask;
drop:
kfree_skb(skb);
- sch->stats.drops++;
+ sch->qstats.drops++;
return NET_XMIT_CN;
}
PSCHED_SET_PASTPERFECT(q->qidlestart);
__skb_queue_head(&sch->q, skb);
- sch->stats.backlog += skb->len;
+ sch->qstats.backlog += skb->len;
+ sch->qstats.requeues++;
return 0;
}
skb = __skb_dequeue(&sch->q);
if (skb) {
- sch->stats.backlog -= skb->len;
+ sch->qstats.backlog -= skb->len;
return skb;
}
PSCHED_GET_TIME(q->qidlestart);
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
unsigned int len = skb->len;
- sch->stats.backlog -= len;
- sch->stats.drops++;
+ sch->qstats.backlog -= len;
+ sch->qstats.drops++;
q->st.other++;
kfree_skb(skb);
return len;
struct red_sched_data *q = qdisc_priv(sch);
__skb_queue_purge(&sch->q);
- sch->stats.backlog = 0;
+ sch->qstats.backlog = 0;
PSCHED_SET_PASTPERFECT(q->qidlestart);
q->qave = 0;
q->qcount = -1;
struct tc_red_qopt *ctl;
if (opt == NULL ||
- rtattr_parse(tb, TCA_RED_STAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
+ rtattr_parse_nested(tb, TCA_RED_STAB, opt) ||
tb[TCA_RED_PARMS-1] == 0 || tb[TCA_RED_STAB-1] == 0 ||
RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < 256)
return red_change(sch, opt);
}
-
-int red_copy_xstats(struct sk_buff *skb, struct tc_red_xstats *st)
-{
- RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
- return 0;
-
-rtattr_failure:
- return 1;
-}
-
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct red_sched_data *q = qdisc_priv(sch);
RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
rta->rta_len = skb->tail - b;
- if (red_copy_xstats(skb, &q->st))
- goto rtattr_failure;
-
return skb->len;
rtattr_failure:
return -1;
}
+static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct red_sched_data *q = qdisc_priv(sch);
+
+ return gnet_stats_copy_app(d, &q->st, sizeof(q->st));
+}
+
static struct Qdisc_ops red_qdisc_ops = {
.next = NULL,
.cl_ops = NULL,
.reset = red_reset,
.change = red_change,
.dump = red_dump,
+ .dump_stats = red_dump_stats,
.owner = THIS_MODULE,
};