8c93e18ed140651367c2d74d18a124973e6620cf
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/in.h>
22 #include <linux/in_route.h>
23 #include <linux/inetdevice.h>
24 #include <linux/jhash.h>
25 #include <linux/list.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/workqueue.h>
29 #include <linux/rculist.h>
30 #include <net/route.h>
31 #include <net/xfrm.h>
32
33 #include "checksum.h"
34 #include "compat.h"
35 #include "datapath.h"
36 #include "tunnel.h"
37 #include "vlan.h"
38 #include "vport.h"
39
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
41 #define rt_dst(rt) (rt->dst)
42 #else
43 #define rt_dst(rt) (rt->u.dst)
44 #endif
45
46 /**
47  *      ovs_tnl_rcv - ingress point for generic tunnel code
48  *
49  * @vport: port this packet was received on
50  * @skb: received packet
51  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
52  *
53  * Must be called with rcu_read_lock.
54  *
55  * Packets received by this function are in the following state:
56  * - skb->data points to the inner Ethernet header.
57  * - The inner Ethernet header is in the linear data area.
58  * - skb->csum does not include the inner Ethernet header.
59  * - The layer pointers are undefined.
60  */
61 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb,
62                  struct ovs_key_ipv4_tunnel *tun_key)
63 {
64         struct ethhdr *eh;
65
66         skb_reset_mac_header(skb);
67         eh = eth_hdr(skb);
68
69         if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
70                 skb->protocol = eh->h_proto;
71         else
72                 skb->protocol = htons(ETH_P_802_2);
73
74         skb_dst_drop(skb);
75         nf_reset(skb);
76         skb_clear_rxhash(skb);
77         secpath_reset(skb);
78         vlan_set_tci(skb, 0);
79
80         if (unlikely(compute_ip_summed(skb, false))) {
81                 kfree_skb(skb);
82                 return;
83         }
84
85         ovs_vport_receive(vport, skb, tun_key);
86 }
87
88 static struct rtable *find_route(struct net *net,
89                 __be32 *saddr, __be32 daddr, u8 ipproto,
90                 u8 tos, u32 skb_mark)
91 {
92         struct rtable *rt;
93         /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
94          * router expect RT_TOS bits only. */
95
96 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
97         struct flowi fl = { .nl_u = { .ip4_u = {
98                                         .daddr = daddr,
99                                         .saddr = *saddr,
100 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
101                                         .fwmark = skb_mark,
102 #endif
103                                         .tos   = RT_TOS(tos) } },
104 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
105                                         .mark = skb_mark,
106 #endif
107                                         .proto = ipproto };
108
109         if (unlikely(ip_route_output_key(net, &rt, &fl)))
110                 return ERR_PTR(-EADDRNOTAVAIL);
111         *saddr = fl.nl_u.ip4_u.saddr;
112         return rt;
113 #else
114         struct flowi4 fl = { .daddr = daddr,
115                              .saddr = *saddr,
116                              .flowi4_tos = RT_TOS(tos),
117                              .flowi4_mark = skb_mark,
118                              .flowi4_proto = ipproto };
119
120         rt = ip_route_output_key(net, &fl);
121         *saddr = fl.saddr;
122         return rt;
123 #endif
124 }
125
126 static bool need_linearize(const struct sk_buff *skb)
127 {
128         int i;
129
130         if (unlikely(skb_shinfo(skb)->frag_list))
131                 return true;
132
133         /*
134          * Generally speaking we should linearize if there are paged frags.
135          * However, if all of the refcounts are 1 we know nobody else can
136          * change them from underneath us and we can skip the linearization.
137          */
138         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
139                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
140                         return true;
141
142         return false;
143 }
144
145 static struct sk_buff *handle_offloads(struct sk_buff *skb)
146 {
147         int err;
148
149         forward_ip_summed(skb, true);
150
151         if (skb_is_gso(skb)) {
152                 struct sk_buff *nskb;
153
154                 nskb = __skb_gso_segment(skb, 0, false);
155                 if (IS_ERR(nskb)) {
156                         err = PTR_ERR(nskb);
157                         goto error;
158                 }
159
160                 consume_skb(skb);
161                 skb = nskb;
162         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
163                 /* Pages aren't locked and could change at any time.
164                  * If this happens after we compute the checksum, the
165                  * checksum will be wrong.  We linearize now to avoid
166                  * this problem.
167                  */
168                 if (unlikely(need_linearize(skb))) {
169                         err = __skb_linearize(skb);
170                         if (unlikely(err))
171                                 goto error;
172                 }
173
174                 err = skb_checksum_help(skb);
175                 if (unlikely(err))
176                         goto error;
177         }
178
179         set_ip_summed(skb, OVS_CSUM_NONE);
180
181         return skb;
182
183 error:
184         return ERR_PTR(err);
185 }
186
187 /* Compute source UDP port for outgoing packet.
188  * Currently we use the flow hash.
189  */
190 u16 ovs_tnl_get_src_port(struct sk_buff *skb)
191 {
192         int low;
193         int high;
194         unsigned int range;
195         u32 hash = OVS_CB(skb)->flow->hash;
196
197         inet_get_local_port_range(&low, &high);
198         range = (high - low) + 1;
199         return (((u64) hash * range) >> 32) + low;
200 }
201
202 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb,
203                  u8 ipproto, int tunnel_hlen,
204                  void (*build_header)(const struct vport *,
205                                       struct sk_buff *,
206                                       int tunnel_hlen))
207 {
208         int min_headroom;
209         struct rtable *rt;
210         __be32 saddr;
211         int sent_len = 0;
212         int err;
213         struct sk_buff *nskb;
214
215         /* Route lookup */
216         saddr = OVS_CB(skb)->tun_key->ipv4_src;
217         rt = find_route(ovs_dp_get_net(vport->dp),
218                         &saddr,
219                         OVS_CB(skb)->tun_key->ipv4_dst,
220                         ipproto,
221                         OVS_CB(skb)->tun_key->ipv4_tos,
222                         skb_get_mark(skb));
223         if (IS_ERR(rt)) {
224                 err = PTR_ERR(rt);
225                 goto error;
226         }
227
228         tunnel_hlen += sizeof(struct iphdr);
229
230         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
231                         + tunnel_hlen
232                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
233
234         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
235                 int head_delta = SKB_DATA_ALIGN(min_headroom -
236                                                 skb_headroom(skb) +
237                                                 16);
238
239                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
240                                         0, GFP_ATOMIC);
241                 if (unlikely(err))
242                         goto err_free_rt;
243         }
244
245         /* Offloading */
246         nskb = handle_offloads(skb);
247         if (IS_ERR(nskb)) {
248                 err = PTR_ERR(nskb);
249                 goto err_free_rt;
250         }
251         skb = nskb;
252
253         /* Reset SKB */
254         nf_reset(skb);
255         secpath_reset(skb);
256         skb_dst_drop(skb);
257         skb_clear_rxhash(skb);
258
259         while (skb) {
260                 struct sk_buff *next_skb = skb->next;
261                 struct iphdr *iph;
262                 int frag_len;
263
264                 skb->next = NULL;
265
266                 if (unlikely(vlan_deaccel_tag(skb)))
267                         goto next;
268
269                 frag_len = skb->len;
270                 skb_push(skb, tunnel_hlen);
271                 skb_reset_network_header(skb);
272                 skb_set_transport_header(skb, sizeof(struct iphdr));
273
274                 if (next_skb)
275                         skb_dst_set(skb, dst_clone(&rt_dst(rt)));
276                 else
277                         skb_dst_set(skb, &rt_dst(rt));
278
279                 /* Push Tunnel header. */
280                 build_header(vport, skb, tunnel_hlen);
281
282                 /* Push IP header. */
283                 iph = ip_hdr(skb);
284                 iph->version    = 4;
285                 iph->ihl        = sizeof(struct iphdr) >> 2;
286                 iph->protocol   = ipproto;
287                 iph->daddr      = OVS_CB(skb)->tun_key->ipv4_dst;
288                 iph->saddr      = saddr;
289                 iph->tos        = OVS_CB(skb)->tun_key->ipv4_tos;
290                 iph->ttl        = OVS_CB(skb)->tun_key->ipv4_ttl;
291                 iph->frag_off   = OVS_CB(skb)->tun_key->tun_flags &
292                                   OVS_TNL_F_DONT_FRAGMENT ?  htons(IP_DF) : 0;
293                 /*
294                  * Allow our local IP stack to fragment the outer packet even
295                  * if the DF bit is set as a last resort.  We also need to
296                  * force selection of an IP ID here with __ip_select_ident(),
297                  * as ip_select_ident() assumes a proper ID is not needed when
298                  * when the DF bit is set.
299                  */
300                 skb->local_df = 1;
301                 __ip_select_ident(iph, skb_dst(skb), 0);
302
303                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
304
305                 err = ip_local_out(skb);
306                 if (unlikely(net_xmit_eval(err)))
307                         goto next;
308
309                 sent_len += frag_len;
310
311 next:
312                 skb = next_skb;
313         }
314
315         return sent_len;
316
317 err_free_rt:
318         ip_rt_put(rt);
319 error:
320         return err;
321 }