datapath: Factor out common code from *_build_header() to ovs_tnl_send().
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/in.h>
22 #include <linux/in_route.h>
23 #include <linux/inetdevice.h>
24 #include <linux/jhash.h>
25 #include <linux/list.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/workqueue.h>
29 #include <linux/rculist.h>
30 #include <net/route.h>
31 #include <net/xfrm.h>
32
33 #include "checksum.h"
34 #include "compat.h"
35 #include "datapath.h"
36 #include "tunnel.h"
37 #include "vlan.h"
38 #include "vport.h"
39
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
41 #define rt_dst(rt) (rt->dst)
42 #else
43 #define rt_dst(rt) (rt->u.dst)
44 #endif
45
46 /**
47  *      ovs_tnl_rcv - ingress point for generic tunnel code
48  *
49  * @vport: port this packet was received on
50  * @skb: received packet
51  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
52  *
53  * Must be called with rcu_read_lock.
54  *
55  * Packets received by this function are in the following state:
56  * - skb->data points to the inner Ethernet header.
57  * - The inner Ethernet header is in the linear data area.
58  * - skb->csum does not include the inner Ethernet header.
59  * - The layer pointers are undefined.
60  */
61 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
62 {
63         struct ethhdr *eh;
64
65         skb_reset_mac_header(skb);
66         eh = eth_hdr(skb);
67
68         if (likely(ntohs(eh->h_proto) >= 1536))
69                 skb->protocol = eh->h_proto;
70         else
71                 skb->protocol = htons(ETH_P_802_2);
72
73         skb_dst_drop(skb);
74         nf_reset(skb);
75         skb_clear_rxhash(skb);
76         secpath_reset(skb);
77         vlan_set_tci(skb, 0);
78
79         if (unlikely(compute_ip_summed(skb, false))) {
80                 kfree_skb(skb);
81                 return;
82         }
83
84         ovs_vport_receive(vport, skb);
85 }
86
87 static struct rtable *find_route(struct net *net,
88                 __be32 *saddr, __be32 daddr, u8 ipproto,
89                 u8 tos, u32 skb_mark)
90 {
91         struct rtable *rt;
92         /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
93          * router expect RT_TOS bits only. */
94
95 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
96         struct flowi fl = { .nl_u = { .ip4_u = {
97                                         .daddr = daddr,
98                                         .saddr = *saddr,
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
100                                         .fwmark = skb_mark,
101 #endif
102                                         .tos   = RT_TOS(tos) } },
103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
104                                         .mark = skb_mark,
105 #endif
106                                         .proto = ipproto };
107
108         if (unlikely(ip_route_output_key(net, &rt, &fl)))
109                 return ERR_PTR(-EADDRNOTAVAIL);
110         *saddr = fl.nl_u.ip4_u.saddr;
111         return rt;
112 #else
113         struct flowi4 fl = { .daddr = daddr,
114                              .saddr = *saddr,
115                              .flowi4_tos = RT_TOS(tos),
116                              .flowi4_mark = skb_mark,
117                              .flowi4_proto = ipproto };
118
119         rt = ip_route_output_key(net, &fl);
120         *saddr = fl.saddr;
121         return rt;
122 #endif
123 }
124
125 static bool need_linearize(const struct sk_buff *skb)
126 {
127         int i;
128
129         if (unlikely(skb_shinfo(skb)->frag_list))
130                 return true;
131
132         /*
133          * Generally speaking we should linearize if there are paged frags.
134          * However, if all of the refcounts are 1 we know nobody else can
135          * change them from underneath us and we can skip the linearization.
136          */
137         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
138                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
139                         return true;
140
141         return false;
142 }
143
144 static struct sk_buff *handle_offloads(struct sk_buff *skb,
145                                        const struct rtable *rt,
146                                        int tunnel_hlen)
147 {
148         int min_headroom;
149         int err;
150
151         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
152                         + tunnel_hlen
153                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
154
155         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
156                 int head_delta = SKB_DATA_ALIGN(min_headroom -
157                                                 skb_headroom(skb) +
158                                                 16);
159                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
160                                         0, GFP_ATOMIC);
161                 if (unlikely(err))
162                         goto error_free;
163         }
164
165         forward_ip_summed(skb, true);
166
167         if (skb_is_gso(skb)) {
168                 struct sk_buff *nskb;
169
170                 nskb = __skb_gso_segment(skb, 0, false);
171                 if (IS_ERR(nskb)) {
172                         kfree_skb(skb);
173                         err = PTR_ERR(nskb);
174                         goto error;
175                 }
176
177                 consume_skb(skb);
178                 skb = nskb;
179         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
180                 /* Pages aren't locked and could change at any time.
181                  * If this happens after we compute the checksum, the
182                  * checksum will be wrong.  We linearize now to avoid
183                  * this problem.
184                  */
185                 if (unlikely(need_linearize(skb))) {
186                         err = __skb_linearize(skb);
187                         if (unlikely(err))
188                                 goto error_free;
189                 }
190
191                 err = skb_checksum_help(skb);
192                 if (unlikely(err))
193                         goto error_free;
194         }
195
196         set_ip_summed(skb, OVS_CSUM_NONE);
197
198         return skb;
199
200 error_free:
201         kfree_skb(skb);
202 error:
203         return ERR_PTR(err);
204 }
205
206 /* Compute source UDP port for outgoing packet.
207  * Currently we use the flow hash.
208  */
209 u16 ovs_tnl_get_src_port(struct sk_buff *skb)
210 {
211         int low;
212         int high;
213         unsigned int range;
214         u32 hash = OVS_CB(skb)->flow->hash;
215
216         inet_get_local_port_range(&low, &high);
217         range = (high - low) + 1;
218         return (((u64) hash * range) >> 32) + low;
219 }
220
221 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
222 {
223         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
224         struct rtable *rt;
225         __be32 saddr;
226         int sent_len = 0;
227         int tunnel_hlen;
228
229         if (unlikely(!OVS_CB(skb)->tun_key))
230                 goto error_free;
231
232         /* Route lookup */
233         saddr = OVS_CB(skb)->tun_key->ipv4_src;
234         rt = find_route(ovs_dp_get_net(vport->dp),
235                         &saddr,
236                         OVS_CB(skb)->tun_key->ipv4_dst,
237                         tnl_vport->tnl_ops->ipproto,
238                         OVS_CB(skb)->tun_key->ipv4_tos,
239                         skb_get_mark(skb));
240         if (IS_ERR(rt))
241                 goto error_free;
242
243         /* Offloading */
244         tunnel_hlen = tnl_vport->tnl_ops->hdr_len(OVS_CB(skb)->tun_key);
245         tunnel_hlen += sizeof(struct iphdr);
246
247         skb = handle_offloads(skb, rt, tunnel_hlen);
248         if (IS_ERR(skb)) {
249                 skb = NULL;
250                 goto err_free_rt;
251         }
252
253         /* Reset SKB */
254         nf_reset(skb);
255         secpath_reset(skb);
256         skb_dst_drop(skb);
257         skb_clear_rxhash(skb);
258
259         while (skb) {
260                 struct sk_buff *next_skb = skb->next;
261                 struct iphdr *iph;
262                 int frag_len;
263                 int err;
264
265                 skb->next = NULL;
266
267                 if (unlikely(vlan_deaccel_tag(skb)))
268                         goto next;
269
270                 frag_len = skb->len;
271                 skb_push(skb, tunnel_hlen);
272                 skb_reset_network_header(skb);
273                 skb_set_transport_header(skb, sizeof(struct iphdr));
274
275                 if (next_skb)
276                         skb_dst_set(skb, dst_clone(&rt_dst(rt)));
277                 else
278                         skb_dst_set(skb, &rt_dst(rt));
279
280                 /* Push Tunnel header. */
281                 tnl_vport->tnl_ops->build_header(vport, skb, tunnel_hlen);
282
283                 /*
284                  * Allow our local IP stack to fragment the outer packet even
285                  * if the DF bit is set as a last resort.  We also need to
286                  * force selection of an IP ID here because Linux will
287                  * otherwise leave it at 0 if the packet originally had DF set.
288                  */
289                 skb->local_df = 1;
290                 __ip_select_ident(ip_hdr(skb), skb_dst(skb), 0);
291
292                 /* Push IP header. */
293                 iph = ip_hdr(skb);
294                 iph->version    = 4;
295                 iph->ihl        = sizeof(struct iphdr) >> 2;
296                 iph->protocol   = tnl_vport->tnl_ops->ipproto;
297                 iph->daddr      = OVS_CB(skb)->tun_key->ipv4_dst;
298                 iph->saddr      = saddr;
299                 iph->tos        = OVS_CB(skb)->tun_key->ipv4_tos;
300                 iph->ttl        = OVS_CB(skb)->tun_key->ipv4_ttl;
301                 iph->frag_off   = OVS_CB(skb)->tun_key->tun_flags &
302                                   OVS_TNL_F_DONT_FRAGMENT ?  htons(IP_DF) : 0;
303                 ip_select_ident(iph, &rt_dst(rt), NULL);
304
305                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
306
307                 err = ip_local_out(skb);
308                 if (unlikely(net_xmit_eval(err)))
309                         goto next;
310
311                 sent_len += frag_len;
312
313 next:
314                 skb = next_skb;
315         }
316
317         if (unlikely(sent_len == 0))
318                 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
319
320         return sent_len;
321
322 err_free_rt:
323         ip_rt_put(rt);
324 error_free:
325         kfree_skb(skb);
326         ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
327         return sent_len;
328 }
329
330 struct vport *ovs_tnl_create(const struct vport_parms *parms,
331                              const struct vport_ops *vport_ops,
332                              const struct tnl_ops *tnl_ops)
333 {
334         struct vport *vport;
335         struct tnl_vport *tnl_vport;
336         int err;
337
338         vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
339         if (IS_ERR(vport)) {
340                 err = PTR_ERR(vport);
341                 goto error;
342         }
343
344         tnl_vport = tnl_vport_priv(vport);
345
346         strcpy(tnl_vport->name, parms->name);
347         tnl_vport->tnl_ops = tnl_ops;
348
349         return vport;
350
351 error:
352         return ERR_PTR(err);
353 }
354
355 static void free_port_rcu(struct rcu_head *rcu)
356 {
357         struct tnl_vport *tnl_vport = container_of(rcu,
358                                                    struct tnl_vport, rcu);
359
360         ovs_vport_free(vport_from_priv(tnl_vport));
361 }
362
363 void ovs_tnl_destroy(struct vport *vport)
364 {
365         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
366
367         call_rcu(&tnl_vport->rcu, free_port_rcu);
368 }
369
370 const char *ovs_tnl_get_name(const struct vport *vport)
371 {
372         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
373         return tnl_vport->name;
374 }