datapath: Add support for Linux 3.12
[sliver-openvswitch.git] / datapath / linux / compat / gso.c
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)
21
22 #include <linux/module.h>
23 #include <linux/if.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/icmp.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/kernel.h>
29 #include <linux/kmod.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33
34 #include <net/gre.h>
35 #include <net/icmp.h>
36 #include <net/protocol.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39
40 #include "gso.h"
41
42 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
43         !defined(HAVE_VLAN_BUG_WORKAROUND)
44 #include <linux/module.h>
45
46 static int vlan_tso __read_mostly;
47 module_param(vlan_tso, int, 0644);
48 MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
49 #else
50 #define vlan_tso true
51 #endif
52
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
54 static bool dev_supports_vlan_tx(struct net_device *dev)
55 {
56 #if defined(HAVE_VLAN_BUG_WORKAROUND)
57         return dev->features & NETIF_F_HW_VLAN_TX;
58 #else
59         /* Assume that the driver is buggy. */
60         return false;
61 #endif
62 }
63
64 int rpl_dev_queue_xmit(struct sk_buff *skb)
65 {
66 #undef dev_queue_xmit
67         int err = -ENOMEM;
68
69         if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
70                 int features;
71
72                 features = netif_skb_features(skb);
73
74                 if (!vlan_tso)
75                         features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
76                                       NETIF_F_UFO | NETIF_F_FSO);
77
78                 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
79                 if (unlikely(!skb))
80                         return err;
81                 vlan_set_tci(skb, 0);
82
83                 if (netif_needs_gso(skb, features)) {
84                         struct sk_buff *nskb;
85
86                         nskb = skb_gso_segment(skb, features);
87                         if (!nskb) {
88                                 if (unlikely(skb_cloned(skb) &&
89                                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
90                                         goto drop;
91
92                                 skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
93                                 goto xmit;
94                         }
95
96                         if (IS_ERR(nskb)) {
97                                 err = PTR_ERR(nskb);
98                                 goto drop;
99                         }
100                         consume_skb(skb);
101                         skb = nskb;
102
103                         do {
104                                 nskb = skb->next;
105                                 skb->next = NULL;
106                                 err = dev_queue_xmit(skb);
107                                 skb = nskb;
108                         } while (skb);
109
110                         return err;
111                 }
112         }
113 xmit:
114         return dev_queue_xmit(skb);
115
116 drop:
117         kfree_skb(skb);
118         return err;
119 }
120 #endif /* kernel version < 2.6.37 */
121
122 static __be16 __skb_network_protocol(struct sk_buff *skb)
123 {
124         __be16 type = skb->protocol;
125         int vlan_depth = ETH_HLEN;
126
127         while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
128                 struct vlan_hdr *vh;
129
130                 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
131                         return 0;
132
133                 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
134                 type = vh->h_vlan_encapsulated_proto;
135                 vlan_depth += VLAN_HLEN;
136         }
137
138         return type;
139 }
140
141 static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
142                                            netdev_features_t features,
143                                            bool tx_path)
144 {
145         struct iphdr *iph = ip_hdr(skb);
146         int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
147         int mac_offset = skb_inner_mac_offset(skb);
148         struct sk_buff *skb1 = skb;
149         struct sk_buff *segs;
150         __be16 proto = skb->protocol;
151         char cb[sizeof(skb->cb)];
152
153         /* setup whole inner packet to get protocol. */
154         __skb_pull(skb, mac_offset);
155         skb->protocol = __skb_network_protocol(skb);
156
157         /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
158         __skb_pull(skb, (pkt_hlen - mac_offset));
159         skb_reset_mac_header(skb);
160         skb_reset_network_header(skb);
161         skb_reset_transport_header(skb);
162
163         /* From 3.9 kernel skb->cb is used by skb gso. Therefore
164          * make copy of it to restore it back. */
165         memcpy(cb, skb->cb, sizeof(cb));
166
167         segs = __skb_gso_segment(skb, 0, tx_path);
168         if (!segs || IS_ERR(segs))
169                 goto free;
170
171         skb = segs;
172         while (skb) {
173                 __skb_push(skb, pkt_hlen);
174                 skb_reset_mac_header(skb);
175                 skb_reset_network_header(skb);
176                 skb_set_transport_header(skb, sizeof(struct iphdr));
177                 skb->mac_len = 0;
178
179                 memcpy(ip_hdr(skb), iph, pkt_hlen);
180                 memcpy(skb->cb, cb, sizeof(cb));
181                 if (OVS_GSO_CB(skb)->fix_segment)
182                         OVS_GSO_CB(skb)->fix_segment(skb);
183
184                 skb->protocol = proto;
185                 skb = skb->next;
186         }
187 free:
188         consume_skb(skb1);
189         return segs;
190 }
191
192 int rpl_ip_local_out(struct sk_buff *skb)
193 {
194         int ret = NETDEV_TX_OK;
195         int id = -1;
196
197         if (skb_is_gso(skb)) {
198                 struct iphdr *iph;
199
200                 iph = ip_hdr(skb);
201                 id = ntohs(iph->id);
202                 skb = tnl_skb_gso_segment(skb, 0, false);
203                 if (!skb || IS_ERR(skb))
204                         return 0;
205         }  else if (skb->ip_summed == CHECKSUM_PARTIAL) {
206                 int err;
207
208                 err = skb_checksum_help(skb);
209                 if (unlikely(err))
210                         return 0;
211         }
212
213         while (skb) {
214                 struct sk_buff *next_skb = skb->next;
215                 struct iphdr *iph;
216                 int err;
217
218                 skb->next = NULL;
219
220                 iph = ip_hdr(skb);
221                 if (id >= 0)
222                         iph->id = htons(id++);
223
224                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
225
226 #undef ip_local_out
227                 err = ip_local_out(skb);
228                 if (unlikely(net_xmit_eval(err)))
229                         ret = err;
230
231                 skb = next_skb;
232         }
233         return ret;
234 }
235 #endif /* 3.12 */