This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / net / sched / sch_red.c
index 4ea63bc..e98f79b 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/module.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
-#include <linux/bitops.h>
+#include <asm/bitops.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -228,13 +228,13 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                                q->qave >>= 1;
                }
        } else {
-               q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
+               q->qave += sch->stats.backlog - (q->qave >> q->Wlog);
                /* NOTE:
                   q->qave is fixed point number with point at Wlog.
                   The formulae above is equvalent to floating point
                   version:
 
-                  qave = qave*(1-W) + sch->qstats.backlog*W;
+                  qave = qave*(1-W) + sch->stats.backlog*W;
                                                           --ANK (980924)
                 */
        }
@@ -242,22 +242,22 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
        if (q->qave < q->qth_min) {
                q->qcount = -1;
 enqueue:
-               if (sch->qstats.backlog + skb->len <= q->limit) {
+               if (sch->stats.backlog + skb->len <= q->limit) {
                        __skb_queue_tail(&sch->q, skb);
-                       sch->qstats.backlog += skb->len;
-                       sch->bstats.bytes += skb->len;
-                       sch->bstats.packets++;
+                       sch->stats.backlog += skb->len;
+                       sch->stats.bytes += skb->len;
+                       sch->stats.packets++;
                        return NET_XMIT_SUCCESS;
                } else {
                        q->st.pdrop++;
                }
                kfree_skb(skb);
-               sch->qstats.drops++;
+               sch->stats.drops++;
                return NET_XMIT_DROP;
        }
        if (q->qave >= q->qth_max) {
                q->qcount = -1;
-               sch->qstats.overlimits++;
+               sch->stats.overlimits++;
 mark:
                if  (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) {
                        q->st.early++;
@@ -288,7 +288,7 @@ mark:
                        goto enqueue;
                q->qcount = 0;
                q->qR = net_random()&q->Rmask;
-               sch->qstats.overlimits++;
+               sch->stats.overlimits++;
                goto mark;
        }
        q->qR = net_random()&q->Rmask;
@@ -296,7 +296,7 @@ mark:
 
 drop:
        kfree_skb(skb);
-       sch->qstats.drops++;
+       sch->stats.drops++;
        return NET_XMIT_CN;
 }
 
@@ -308,8 +308,7 @@ red_requeue(struct sk_buff *skb, struct Qdisc* sch)
        PSCHED_SET_PASTPERFECT(q->qidlestart);
 
        __skb_queue_head(&sch->q, skb);
-       sch->qstats.backlog += skb->len;
-       sch->qstats.requeues++;
+       sch->stats.backlog += skb->len;
        return 0;
 }
 
@@ -321,7 +320,7 @@ red_dequeue(struct Qdisc* sch)
 
        skb = __skb_dequeue(&sch->q);
        if (skb) {
-               sch->qstats.backlog -= skb->len;
+               sch->stats.backlog -= skb->len;
                return skb;
        }
        PSCHED_GET_TIME(q->qidlestart);
@@ -336,8 +335,8 @@ static unsigned int red_drop(struct Qdisc* sch)
        skb = __skb_dequeue_tail(&sch->q);
        if (skb) {
                unsigned int len = skb->len;
-               sch->qstats.backlog -= len;
-               sch->qstats.drops++;
+               sch->stats.backlog -= len;
+               sch->stats.drops++;
                q->st.other++;
                kfree_skb(skb);
                return len;
@@ -351,7 +350,7 @@ static void red_reset(struct Qdisc* sch)
        struct red_sched_data *q = qdisc_priv(sch);
 
        __skb_queue_purge(&sch->q);
-       sch->qstats.backlog = 0;
+       sch->stats.backlog = 0;
        PSCHED_SET_PASTPERFECT(q->qidlestart);
        q->qave = 0;
        q->qcount = -1;
@@ -396,6 +395,16 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt)
        return red_change(sch, opt);
 }
 
+
+int red_copy_xstats(struct sk_buff *skb, struct tc_red_xstats *st)
+{
+        RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
+        return 0;
+
+rtattr_failure:
+        return 1;
+}
+
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct red_sched_data *q = qdisc_priv(sch);
@@ -415,6 +424,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
        rta->rta_len = skb->tail - b;
 
+       if (red_copy_xstats(skb, &q->st))
+               goto rtattr_failure;
+
        return skb->len;
 
 rtattr_failure:
@@ -422,13 +434,6 @@ rtattr_failure:
        return -1;
 }
 
-static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
-{
-       struct red_sched_data *q = qdisc_priv(sch);
-
-       return gnet_stats_copy_app(d, &q->st, sizeof(q->st));
-}
-
 static struct Qdisc_ops red_qdisc_ops = {
        .next           =       NULL,
        .cl_ops         =       NULL,
@@ -442,7 +447,6 @@ static struct Qdisc_ops red_qdisc_ops = {
        .reset          =       red_reset,
        .change         =       red_change,
        .dump           =       red_dump,
-       .dump_stats     =       red_dump_stats,
        .owner          =       THIS_MODULE,
 };