This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / net / sched / sch_api.c
index 1f9bf9d..575ca50 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kmod.h>
-#include <linux/list.h>
 
 #include <net/sock.h>
 #include <net/pkt_sched.h>
@@ -196,7 +195,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
        struct Qdisc *q;
 
-       list_for_each_entry(q, &dev->qdisc_list, list) {
+       for (q = dev->qdisc_list; q; q = q->next) {
                if (q->handle == handle)
                        return q;
        }
@@ -307,7 +306,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
        if (dev->flags & IFF_UP)
                dev_deactivate(dev);
 
-       qdisc_lock_tree(dev);
+       write_lock(&qdisc_tree_lock);
+       spin_lock_bh(&dev->queue_lock);
        if (qdisc && qdisc->flags&TCQ_F_INGRES) {
                oqdisc = dev->qdisc_ingress;
                /* Prune old scheduler */
@@ -334,7 +334,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
                dev->qdisc = &noop_qdisc;
        }
 
-       qdisc_unlock_tree(dev);
+       spin_unlock_bh(&dev->queue_lock);
+       write_unlock(&qdisc_tree_lock);
 
        if (dev->flags & IFF_UP)
                dev_activate(dev);
@@ -422,7 +423,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
 
        memset(sch, 0, size);
 
-       INIT_LIST_HEAD(&sch->list);
        skb_queue_head_init(&sch->q);
 
        if (handle == TC_H_INGRESS)
@@ -432,7 +432,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev = dev;
-       dev_hold(dev);
        atomic_set(&sch->refcnt, 1);
        sch->stats_lock = &dev->queue_lock;
        if (handle == 0) {
@@ -455,10 +454,10 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
         * before we set a netdevice's qdisc pointer to sch */
        smp_wmb();
        if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
-               qdisc_lock_tree(dev);
-               list_add_tail(&sch->list, &dev->qdisc_list);
-               qdisc_unlock_tree(dev);
-
+               write_lock(&qdisc_tree_lock);
+               sch->next = dev->qdisc_list;
+               dev->qdisc_list = sch;
+               write_unlock(&qdisc_tree_lock);
 #ifdef CONFIG_NET_ESTIMATOR
                if (tca[TCA_RATE-1])
                        qdisc_new_estimator(&sch->stats, sch->stats_lock,
@@ -753,7 +752,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        nlh->nlmsg_flags = flags;
        tcm = NLMSG_DATA(nlh);
        tcm->tcm_family = AF_UNSPEC;
-       tcm->tcm_ifindex = q->dev->ifindex;
+       tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
        tcm->tcm_parent = clid;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = atomic_read(&q->refcnt);
@@ -814,21 +813,18 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (idx > s_idx)
                        s_q_idx = 0;
-               read_lock_bh(&qdisc_tree_lock);
-               q_idx = 0;
-               list_for_each_entry(q, &dev->qdisc_list, list) {
-                       if (q_idx < s_q_idx) {
-                               q_idx++;
+               read_lock(&qdisc_tree_lock);
+               for (q = dev->qdisc_list, q_idx = 0; q;
+                    q = q->next, q_idx++) {
+                       if (q_idx < s_q_idx)
                                continue;
-                       }
                        if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
                                          cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
-                               read_unlock_bh(&qdisc_tree_lock);
+                               read_unlock(&qdisc_tree_lock);
                                goto done;
                        }
-                       q_idx++;
                }
-               read_unlock_bh(&qdisc_tree_lock);
+               read_unlock(&qdisc_tree_lock);
        }
 
 done:
@@ -973,7 +969,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        nlh->nlmsg_flags = flags;
        tcm = NLMSG_DATA(nlh);
        tcm->tcm_family = AF_UNSPEC;
-       tcm->tcm_ifindex = q->dev->ifindex;
+       tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
        tcm->tcm_parent = q->handle;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = 0;
@@ -1037,16 +1033,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
                return 0;
 
        s_t = cb->args[0];
-       t = 0;
-
-       read_lock_bh(&qdisc_tree_lock);
-       list_for_each_entry(q, &dev->qdisc_list, list) {
-               if (t < s_t || !q->ops->cl_ops ||
-                   (tcm->tcm_parent &&
-                    TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
-                       t++;
+
+       read_lock(&qdisc_tree_lock);
+       for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
+               if (t < s_t) continue;
+               if (!q->ops->cl_ops) continue;
+               if (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)
                        continue;
-               }
                if (t > s_t)
                        memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
                arg.w.fn = qdisc_class_dump;
@@ -1059,9 +1052,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
                cb->args[1] = arg.w.count;
                if (arg.w.stop)
                        break;
-               t++;
        }
-       read_unlock_bh(&qdisc_tree_lock);
+       read_unlock(&qdisc_tree_lock);
 
        cb->args[0] = t;
 
@@ -1096,7 +1088,7 @@ static struct file_operations psched_fops = {
 };     
 #endif
 
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
 int psched_tod_diff(int delta_sec, int bound)
 {
        int delta;
@@ -1111,34 +1103,42 @@ int psched_tod_diff(int delta_sec, int bound)
 EXPORT_SYMBOL(psched_tod_diff);
 #endif
 
-#ifdef CONFIG_NET_SCH_CLK_CPU
+psched_time_t psched_time_base;
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
 psched_tdiff_t psched_clock_per_hz;
 int psched_clock_scale;
 EXPORT_SYMBOL(psched_clock_per_hz);
 EXPORT_SYMBOL(psched_clock_scale);
+#endif
 
-psched_time_t psched_time_base;
-cycles_t psched_time_mark;
+#ifdef PSCHED_WATCHER
+PSCHED_WATCHER psched_time_mark;
 EXPORT_SYMBOL(psched_time_mark);
 EXPORT_SYMBOL(psched_time_base);
 
-/*
- * Periodically adjust psched_time_base to avoid overflow
- * with 32-bit get_cycles(). Safe up to 4GHz CPU.
- */
 static void psched_tick(unsigned long);
+
 static struct timer_list psched_timer = TIMER_INITIALIZER(psched_tick, 0, 0);
 
 static void psched_tick(unsigned long dummy)
 {
-       if (sizeof(cycles_t) == sizeof(u32)) {
-               psched_time_t dummy_stamp;
-               PSCHED_GET_TIME(dummy_stamp);
-               psched_timer.expires = jiffies + 1*HZ;
-               add_timer(&psched_timer);
-       }
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
+       psched_time_t dummy_stamp;
+       PSCHED_GET_TIME(dummy_stamp);
+       /* It is OK up to 4GHz cpu */
+       psched_timer.expires = jiffies + 1*HZ;
+#else
+       unsigned long now = jiffies;
+       psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
+       psched_time_mark = now;
+       psched_timer.expires = now + 60*60*HZ;
+#endif
+       add_timer(&psched_timer);
 }
+#endif
 
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
 int __init psched_calibrate_clock(void)
 {
        psched_time_t stamp, stamp1;
@@ -1147,7 +1147,9 @@ int __init psched_calibrate_clock(void)
        long rdelay;
        unsigned long stop;
 
+#ifdef PSCHED_WATCHER
        psched_tick(0);
+#endif
        stop = jiffies + HZ/10;
        PSCHED_GET_TIME(stamp);
        do_gettimeofday(&tv);
@@ -1177,12 +1179,15 @@ static int __init pktsched_init(void)
 {
        struct rtnetlink_link *link_p;
 
-#ifdef CONFIG_NET_SCH_CLK_CPU
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
        if (psched_calibrate_clock() < 0)
                return -1;
-#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
+#elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
        psched_tick_per_us = HZ<<PSCHED_JSCALE;
        psched_us_per_tick = 1000000;
+#ifdef PSCHED_WATCHER
+       psched_tick(0);
+#endif
 #endif
 
        link_p = rtnetlink_links[PF_UNSPEC];