2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)
22 #include <linux/module.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/icmp.h>
28 #include <linux/kernel.h>
29 #include <linux/kmod.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
36 #include <net/protocol.h>
37 #include <net/route.h>
42 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
43 !defined(HAVE_VLAN_BUG_WORKAROUND)
44 #include <linux/module.h>
46 static int vlan_tso __read_mostly;
47 module_param(vlan_tso, int, 0644);
48 MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
54 static bool dev_supports_vlan_tx(struct net_device *dev)
56 #if defined(HAVE_VLAN_BUG_WORKAROUND)
57 return dev->features & NETIF_F_HW_VLAN_TX;
59 /* Assume that the driver is buggy. */
64 int rpl_dev_queue_xmit(struct sk_buff *skb)
69 if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
72 features = netif_skb_features(skb);
75 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
76 NETIF_F_UFO | NETIF_F_FSO);
78 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
83 if (netif_needs_gso(skb, features)) {
86 nskb = skb_gso_segment(skb, features);
88 if (unlikely(skb_cloned(skb) &&
89 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
92 skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
106 err = dev_queue_xmit(skb);
114 return dev_queue_xmit(skb);
120 #endif /* kernel version < 2.6.37 */
122 static __be16 __skb_network_protocol(struct sk_buff *skb)
124 __be16 type = skb->protocol;
125 int vlan_depth = ETH_HLEN;
127 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
130 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
133 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
134 type = vh->h_vlan_encapsulated_proto;
135 vlan_depth += VLAN_HLEN;
141 static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
142 netdev_features_t features,
145 struct iphdr *iph = ip_hdr(skb);
146 int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
147 int mac_offset = skb_inner_mac_offset(skb);
148 struct sk_buff *skb1 = skb;
149 struct sk_buff *segs;
150 __be16 proto = skb->protocol;
151 char cb[sizeof(skb->cb)];
153 /* setup whole inner packet to get protocol. */
154 __skb_pull(skb, mac_offset);
155 skb->protocol = __skb_network_protocol(skb);
157 /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
158 __skb_pull(skb, (pkt_hlen - mac_offset));
159 skb_reset_mac_header(skb);
160 skb_reset_network_header(skb);
161 skb_reset_transport_header(skb);
163 /* From 3.9 kernel skb->cb is used by skb gso. Therefore
164 * make copy of it to restore it back. */
165 memcpy(cb, skb->cb, sizeof(cb));
167 segs = __skb_gso_segment(skb, 0, tx_path);
168 if (!segs || IS_ERR(segs))
173 __skb_push(skb, pkt_hlen);
174 skb_reset_mac_header(skb);
175 skb_reset_network_header(skb);
176 skb_set_transport_header(skb, sizeof(struct iphdr));
179 memcpy(ip_hdr(skb), iph, pkt_hlen);
180 memcpy(skb->cb, cb, sizeof(cb));
181 if (OVS_GSO_CB(skb)->fix_segment)
182 OVS_GSO_CB(skb)->fix_segment(skb);
184 skb->protocol = proto;
192 int rpl_ip_local_out(struct sk_buff *skb)
194 int ret = NETDEV_TX_OK;
197 if (skb_is_gso(skb)) {
202 skb = tnl_skb_gso_segment(skb, 0, false);
203 if (!skb || IS_ERR(skb))
205 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
208 err = skb_checksum_help(skb);
214 struct sk_buff *next_skb = skb->next;
222 iph->id = htons(id++);
224 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
227 err = ip_local_out(skb);
228 if (unlikely(net_xmit_eval(err)))