treewide: Use pr_fmt and pr_<level>
[sliver-openvswitch.git] / datapath / vport.c
index 5437a97..91b650e 100644 (file)
@@ -6,6 +6,8 @@
  * kernel, by Linus Torvalds and others.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/dcache.h>
 #include <linux/etherdevice.h>
 #include <linux/if.h>
@@ -16,6 +18,7 @@
 #include <linux/percpu.h>
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
+#include <linux/version.h>
 
 #include "vport.h"
 #include "vport-internal_dev.h"
@@ -27,6 +30,9 @@ static struct vport_ops *base_vport_ops_list[] = {
        &internal_vport_ops,
        &patch_vport_ops,
        &gre_vport_ops,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       &capwap_vport_ops,
+#endif
 };
 
 static const struct vport_ops **vport_ops_list;
@@ -35,17 +41,6 @@ static int n_vport_types;
 static struct hlist_head *dev_table;
 #define VPORT_HASH_BUCKETS 1024
 
-/* We limit the number of times that we pass through vport_send() to
- * avoid blowing out the stack in the event that we have a loop. There is
- * a separate counter for each CPU for both interrupt and non-interrupt
- * context in order to keep the limit deterministic for a given packet. */
-struct percpu_loop_counter {
-       int count[2];
-};
-
-static struct percpu_loop_counter *vport_loop_counter;
-#define VPORT_MAX_LOOPS 5
-
 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
  *
  * If you use vport_locate and then perform some operations, you need to hold
@@ -83,13 +78,14 @@ void vport_unlock(void)
        mutex_unlock(&vport_mutex);
 }
 
-#define ASSERT_VPORT() do { \
-       if (unlikely(!mutex_is_locked(&vport_mutex))) { \
-               printk(KERN_ERR "openvswitch: vport lock not held at %s (%d)\n", \
-                       __FILE__, __LINE__); \
-               dump_stack(); \
-       } \
-} while(0)
+#define ASSERT_VPORT()                                         \
+do {                                                           \
+       if (unlikely(!mutex_is_locked(&vport_mutex))) {         \
+               pr_err("vport lock not held at %s (%d)\n",      \
+                      __FILE__, __LINE__);                     \
+               dump_stack();                                   \
+       }                                                       \
+} while (0)
 
 /**
  *     vport_init - initialize vport subsystem
@@ -116,12 +112,6 @@ int vport_init(void)
                goto error_dev_table;
        }
 
-       vport_loop_counter = alloc_percpu(struct percpu_loop_counter);
-       if (!vport_loop_counter) {
-               err = -ENOMEM;
-               goto error_ops_list;
-       }
-
        for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
                struct vport_ops *new_ops = base_vport_ops_list[i];
 
@@ -140,8 +130,6 @@ int vport_init(void)
 
        return 0;
 
-error_ops_list:
-       kfree(vport_ops_list);
 error_dev_table:
        kfree(dev_table);
 error:
@@ -185,7 +173,6 @@ void vport_exit(void)
                        vport_ops_list[i]->exit();
        }
 
-       free_percpu(vport_loop_counter);
        kfree(vport_ops_list);
        kfree(dev_table);
 }
@@ -632,7 +619,7 @@ struct vport *vport_locate(const char *name)
        struct hlist_node *node;
 
        if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
-               printk(KERN_ERR "openvswitch: neither RTNL nor vport lock held in vport_locate\n");
+               pr_err("neither RTNL nor vport lock held in vport_locate\n");
                dump_stack();
        }
 
@@ -807,9 +794,6 @@ int vport_attach(struct vport *vport, struct dp_port *dp_port)
 {
        ASSERT_RTNL();
 
-       if (dp_port->vport)
-               return -EBUSY;
-
        if (vport_get_dp_port(vport))
                return -EBUSY;
 
@@ -821,7 +805,6 @@ int vport_attach(struct vport *vport, struct dp_port *dp_port)
                        return err;
        }
 
-       dp_port->vport = vport;
        rcu_assign_pointer(vport->dp_port, dp_port);
 
        return 0;
@@ -845,7 +828,6 @@ int vport_detach(struct vport *vport)
        if (!dp_port)
                return -EINVAL;
 
-       dp_port->vport = NULL;
        rcu_assign_pointer(vport->dp_port, NULL);
 
        if (vport->ops->detach)
@@ -1077,12 +1059,20 @@ int vport_get_stats(struct vport *vport, struct odp_vport_stats *stats)
 
                for_each_possible_cpu(i) {
                        const struct vport_percpu_stats *percpu_stats;
+                       struct vport_percpu_stats local_stats;
+                       unsigned seqcount;
 
                        percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
-                       stats->rx_bytes         += percpu_stats->rx_bytes;
-                       stats->rx_packets       += percpu_stats->rx_packets;
-                       stats->tx_bytes         += percpu_stats->tx_bytes;
-                       stats->tx_packets       += percpu_stats->tx_packets;
+
+                       do {
+                               seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+                               local_stats = *percpu_stats;
+                       } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+                       stats->rx_bytes         += local_stats.rx_bytes;
+                       stats->rx_packets       += local_stats.rx_packets;
+                       stats->tx_bytes         += local_stats.tx_bytes;
+                       stats->tx_packets       += local_stats.tx_packets;
                }
 
                err = 0;
@@ -1217,10 +1207,12 @@ void vport_receive(struct vport *vport, struct sk_buff *skb)
                struct vport_percpu_stats *stats;
 
                local_bh_disable();
-
                stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+               write_seqcount_begin(&stats->seqlock);
                stats->rx_packets++;
                stats->rx_bytes += skb->len;
+               write_seqcount_end(&stats->seqlock);
 
                local_bh_enable();
        }
@@ -1252,25 +1244,15 @@ static inline unsigned packet_length(const struct sk_buff *skb)
  */
 int vport_send(struct vport *vport, struct sk_buff *skb)
 {
-       int *loop_count;
        int mtu;
        int sent;
 
-       loop_count = &per_cpu_ptr(vport_loop_counter, get_cpu())->count[!!in_interrupt()];
-       (*loop_count)++;
-
-       if (unlikely(*loop_count > VPORT_MAX_LOOPS)) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s: dropping packet that has looped more than %d times\n",
-                              dp_name(vport_get_dp_port(vport)->dp), VPORT_MAX_LOOPS);
-               goto error;
-       }
-
        mtu = vport_get_mtu(vport);
        if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
                if (net_ratelimit())
-                       printk(KERN_WARNING "%s: dropped over-mtu packet: %d > %d\n",
-                              dp_name(vport_get_dp_port(vport)->dp), packet_length(skb), mtu);
+                       pr_warn("%s: dropped over-mtu packet: %d > %d\n",
+                               dp_name(vport_get_dp_port(vport)->dp),
+                               packet_length(skb), mtu);
                goto error;
        }
 
@@ -1280,25 +1262,22 @@ int vport_send(struct vport *vport, struct sk_buff *skb)
                struct vport_percpu_stats *stats;
 
                local_bh_disable();
-
                stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+               write_seqcount_begin(&stats->seqlock);
                stats->tx_packets++;
                stats->tx_bytes += sent;
+               write_seqcount_end(&stats->seqlock);
 
                local_bh_enable();
        }
 
-       goto out;
+       return sent;
 
 error:
-       sent = 0;
        kfree_skb(skb);
        vport_record_error(vport, VPORT_E_TX_DROPPED);
-out:
-       (*loop_count)--;
-       put_cpu();
-
-       return sent;
+       return 0;
 }
 
 /**