datapath: Move segmentation compatibility code into a compatibility function
authorSimon Horman <horms@verge.net.au>
Tue, 24 Sep 2013 07:44:06 +0000 (16:44 +0900)
committerJesse Gross <jesse@nicira.com>
Thu, 26 Sep 2013 01:19:00 +0000 (18:19 -0700)
Move segmentation compatibility code out of netdev_send and into
rpl_dev_queue_xmit(), a compatibility function used in place
of dev_queue_xmit() as necessary.

As suggested by Jesse Gross.

Some minor though verbose implementation notes:

* This rpl_dev_queue_xmit() endeavours to return a valid error code or
  zero on success as per dev_queue_xmit(). The exception is that when
  dev_queue_xmit() is called in a loop only the status of the last call is
  taken into account, thus ignoring any errors returned by previous calls.
  This is derived from the previous calls to dev_queue_xmit() in a loop
  where netdev_send() ignores the return value of dev_queue_xmit()
  entirely.

* netdev_send() continues to ignore the value of dev_queue_xmit().
  So the discussion of the return value of rpl_dev_queue_xmit()
  above is has no bearing on run-time behaviour.

* The return value of netdev_send() may differ from the previous
  implementation in the case where segmentation is performed before
  calling the real dev_queue_xmit(). This is because previously in
  this case netdev_send() would return the combined length of the
  skbs resulting from segmentation. Whereas the current code
  always returns the length of the original skb.

Signed-off-by: Simon Horman <horms@verge.net.au>
[jesse: adjust error path in netdev_send() to match upstream]
Signed-off-by: Jesse Gross <jesse@nicira.com>
datapath/linux/compat/gso.c
datapath/linux/compat/include/linux/netdevice.h
datapath/vport-netdev.c

index 30332a2..32f906c 100644 (file)
 
 #include "gso.h"
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
+       !defined(HAVE_VLAN_BUG_WORKAROUND)
+#include <linux/module.h>
+
+static int vlan_tso __read_mostly;
+module_param(vlan_tso, int, 0644);
+MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
+#else
+#define vlan_tso true
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+static bool dev_supports_vlan_tx(struct net_device *dev)
+{
+#if defined(HAVE_VLAN_BUG_WORKAROUND)
+       return dev->features & NETIF_F_HW_VLAN_TX;
+#else
+       /* Assume that the driver is buggy. */
+       return false;
+#endif
+}
+
+int rpl_dev_queue_xmit(struct sk_buff *skb)
+{
+#undef dev_queue_xmit
+       int err = -ENOMEM;
+
+       if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
+               int features;
+
+               features = netif_skb_features(skb);
+
+               if (!vlan_tso)
+                       features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
+                                     NETIF_F_UFO | NETIF_F_FSO);
+
+               skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
+               if (unlikely(!skb))
+                       return err;
+               vlan_set_tci(skb, 0);
+
+               if (netif_needs_gso(skb, features)) {
+                       struct sk_buff *nskb;
+
+                       nskb = skb_gso_segment(skb, features);
+                       if (!nskb) {
+                               if (unlikely(skb_cloned(skb) &&
+                                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+                                       goto drop;
+
+                               skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
+                               goto xmit;
+                       }
+
+                       if (IS_ERR(nskb)) {
+                               err = PTR_ERR(nskb);
+                               goto drop;
+                       }
+                       consume_skb(skb);
+                       skb = nskb;
+
+                       do {
+                               nskb = skb->next;
+                               skb->next = NULL;
+                               err = dev_queue_xmit(skb);
+                               skb = nskb;
+                       } while (skb);
+
+                       return err;
+               }
+       }
+xmit:
+       return dev_queue_xmit(skb);
+
+drop:
+       kfree_skb(skb);
+       return err;
+}
+#endif /* kernel version < 2.6.37 */
+
 static __be16 __skb_network_protocol(struct sk_buff *skb)
 {
        __be16 type = skb->protocol;
index 4e2b7f5..2b2c855 100644 (file)
@@ -120,4 +120,9 @@ static inline void netdev_upper_dev_unlink(struct net_device *dev,
 }
 #endif
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+#define dev_queue_xmit rpl_dev_queue_xmit
+int dev_queue_xmit(struct sk_buff *skb);
+#endif
+
 #endif
index 31680fd..2a83f73 100644 (file)
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
-       !defined(HAVE_VLAN_BUG_WORKAROUND)
-#include <linux/module.h>
-
-static int vlan_tso __read_mostly;
-module_param(vlan_tso, int, 0644);
-MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
-#else
-#define vlan_tso true
-#endif
-
 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb);
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
@@ -259,19 +248,6 @@ static unsigned int packet_length(const struct sk_buff *skb)
        return length;
 }
 
-static bool dev_supports_vlan_tx(struct net_device *dev)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
-       /* Software fallback means every device supports vlan_tci on TX. */
-       return true;
-#elif defined(HAVE_VLAN_BUG_WORKAROUND)
-       return dev->features & NETIF_F_HW_VLAN_TX;
-#else
-       /* Assume that the driver is buggy. */
-       return false;
-#endif
-}
-
 static int netdev_send(struct vport *vport, struct sk_buff *skb)
 {
        struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
@@ -286,53 +262,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
        }
 
        skb->dev = netdev_vport->dev;
-
-       if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
-               int features;
-
-               features = netif_skb_features(skb);
-
-               if (!vlan_tso)
-                       features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
-                                     NETIF_F_UFO | NETIF_F_FSO);
-
-               skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
-               if (unlikely(!skb))
-                       return 0;
-               vlan_set_tci(skb, 0);
-
-               if (netif_needs_gso(skb, features)) {
-                       struct sk_buff *nskb;
-
-                       nskb = skb_gso_segment(skb, features);
-                       if (!nskb) {
-                               if (unlikely(skb_cloned(skb) &&
-                                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
-                                       goto drop;
-
-                               skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
-                               goto xmit;
-                       }
-
-                       if (IS_ERR(nskb))
-                               goto drop;
-                       consume_skb(skb);
-                       skb = nskb;
-
-                       len = 0;
-                       do {
-                               nskb = skb->next;
-                               skb->next = NULL;
-                               len += skb->len;
-                               dev_queue_xmit(skb);
-                               skb = nskb;
-                       } while (skb);
-
-                       return len;
-               }
-       }
-
-xmit:
        len = skb->len;
        dev_queue_xmit(skb);