4000c749b296c87e9505c6de58d1deb36fa92d7d
[sliver-openvswitch.git] / datapath / vport-gre.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
26 #include <linux/in.h>
27
28 #include <net/icmp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31
32 #include "datapath.h"
33 #include "tunnel.h"
34 #include "vport.h"
35 #include "vport-generic.h"
36
37 /*
38  * The GRE header is composed of a series of sections: a base and then a variable
39  * number of options.
40  */
41 #define GRE_HEADER_SECTION 4
42
43 struct gre_base_hdr {
44         __be16 flags;
45         __be16 protocol;
46 };
47
48 static int gre_hdr_len(const struct tnl_mutable_config *mutable)
49 {
50         int len;
51
52         len = GRE_HEADER_SECTION;
53
54         if (mutable->flags & TNL_F_CSUM)
55                 len += GRE_HEADER_SECTION;
56
57         if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION) {
58                 len += GRE_HEADER_SECTION;
59                 if (mutable->key.tunnel_type & TNL_T_PROTO_GRE64)
60                         len += GRE_HEADER_SECTION;
61         }
62         return len;
63 }
64
65
66 /* Returns the least-significant 32 bits of a __be64. */
67 static __be32 be64_get_low32(__be64 x)
68 {
69 #ifdef __BIG_ENDIAN
70         return (__force __be32)x;
71 #else
72         return (__force __be32)((__force u64)x >> 32);
73 #endif
74 }
75
76 static __be32 be64_get_high32(__be64 x)
77 {
78 #ifdef __BIG_ENDIAN
79         return (__force __be32)((__force u64)x >> 32);
80 #else
81         return (__force __be32)x;
82 #endif
83 }
84
85 static void gre_build_header(const struct vport *vport,
86                              const struct tnl_mutable_config *mutable,
87                              void *header)
88 {
89         struct gre_base_hdr *greh = header;
90         __be32 *options = (__be32 *)(greh + 1);
91
92         greh->protocol = htons(ETH_P_TEB);
93         greh->flags = 0;
94
95         if (mutable->flags & TNL_F_CSUM) {
96                 greh->flags |= GRE_CSUM;
97                 *options = 0;
98                 options++;
99         }
100
101         if (mutable->flags & TNL_F_OUT_KEY_ACTION) {
102                 greh->flags |= GRE_KEY;
103                 if (mutable->key.tunnel_type & TNL_T_PROTO_GRE64)
104                         greh->flags |= GRE_SEQ;
105
106         } else if (mutable->out_key) {
107                 greh->flags |= GRE_KEY;
108                 *options = be64_get_low32(mutable->out_key);
109                 if (mutable->key.tunnel_type & TNL_T_PROTO_GRE64) {
110                         options++;
111                         *options = be64_get_high32(mutable->out_key);
112                         greh->flags |= GRE_SEQ;
113                 }
114         }
115 }
116
117 static struct sk_buff *gre_update_header(const struct vport *vport,
118                                          const struct tnl_mutable_config *mutable,
119                                          struct dst_entry *dst,
120                                          struct sk_buff *skb)
121 {
122         __be32 *options = (__be32 *)(skb_network_header(skb) + mutable->tunnel_hlen
123                                                - GRE_HEADER_SECTION);
124
125         /* Work backwards over the options so the checksum is last. */
126         if (mutable->flags & TNL_F_OUT_KEY_ACTION) {
127                 if (mutable->key.tunnel_type & TNL_T_PROTO_GRE64) {
128                         /* Set higher 32 bits to seq. */
129                         *options = be64_get_high32(OVS_CB(skb)->tun_id);
130                         options--;
131                 }
132                 *options = be64_get_low32(OVS_CB(skb)->tun_id);
133                 options--;
134         } else if (mutable->out_key) {
135                 options--;
136                 if (mutable->key.tunnel_type & TNL_T_PROTO_GRE64)
137                         options--;
138         }
139
140         if (mutable->flags & TNL_F_CSUM)
141                 *(__sum16 *)options = csum_fold(skb_checksum(skb,
142                                                 skb_transport_offset(skb),
143                                                 skb->len - skb_transport_offset(skb),
144                                                 0));
145         /*
146          * Allow our local IP stack to fragment the outer packet even if the
147          * DF bit is set as a last resort.  We also need to force selection of
148          * an IP ID here because Linux will otherwise leave it at 0 if the
149          * packet originally had DF set.
150          */
151         skb->local_df = 1;
152         __ip_select_ident(ip_hdr(skb), dst, 0);
153
154         return skb;
155 }
156
157 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
158 {
159 #ifdef __BIG_ENDIAN
160         return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
161 #else
162         return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
163 #endif
164 }
165
166 static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *tun_id,
167                         u32 *tunnel_type)
168 {
169         /* IP and ICMP protocol handlers check that the IHL is valid. */
170         struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
171         __be32 *options = (__be32 *)(greh + 1);
172         int hdr_len;
173
174         *flags = greh->flags;
175
176         if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
177                 return -EINVAL;
178
179         if (unlikely(greh->protocol != htons(ETH_P_TEB)))
180                 return -EINVAL;
181
182         hdr_len = GRE_HEADER_SECTION;
183
184         if (greh->flags & GRE_CSUM) {
185                 hdr_len += GRE_HEADER_SECTION;
186                 options++;
187         }
188
189         if (greh->flags & GRE_KEY) {
190                 __be32 seq;
191                 __be32 gre_key;
192
193                 gre_key = *options;
194                 hdr_len += GRE_HEADER_SECTION;
195                 options++;
196
197                 if (greh->flags & GRE_SEQ) {
198                         seq = *options;
199                         *tunnel_type = TNL_T_PROTO_GRE64;
200                 } else {
201                         seq = 0;
202                         *tunnel_type = TNL_T_PROTO_GRE;
203                 }
204                 *tun_id = key_to_tunnel_id(gre_key, seq);
205         } else {
206                 *tun_id = 0;
207                 /* Ignore GRE seq if there is no key present. */
208                 *tunnel_type = TNL_T_PROTO_GRE;
209         }
210
211         if (greh->flags & GRE_SEQ)
212                 hdr_len += GRE_HEADER_SECTION;
213
214         return hdr_len;
215 }
216
217 /* Called with rcu_read_lock and BH disabled. */
218 static void gre_err(struct sk_buff *skb, u32 info)
219 {
220         struct vport *vport;
221         const struct tnl_mutable_config *mutable;
222         const int type = icmp_hdr(skb)->type;
223         const int code = icmp_hdr(skb)->code;
224         int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
225         u32 tunnel_type;
226
227         struct iphdr *iph;
228         __be16 flags;
229         __be64 key;
230         int tunnel_hdr_len, tot_hdr_len;
231         unsigned int orig_mac_header;
232         unsigned int orig_nw_header;
233
234         if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
235                 return;
236
237         /*
238          * The mimimum size packet that we would actually be able to process:
239          * encapsulating IP header, minimum GRE header, Ethernet header,
240          * inner IPv4 header.
241          */
242         if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
243                                 ETH_HLEN + sizeof(struct iphdr)))
244                 return;
245
246         iph = (struct iphdr *)skb->data;
247         if (ipv4_is_multicast(iph->daddr))
248                 return;
249
250         tunnel_hdr_len = parse_header(iph, &flags, &key, &tunnel_type);
251         if (tunnel_hdr_len < 0)
252                 return;
253
254         vport = ovs_tnl_find_port(dev_net(skb->dev), iph->saddr, iph->daddr, key,
255                                   tunnel_type, &mutable);
256         if (!vport)
257                 return;
258
259         /*
260          * Packets received by this function were previously sent by us, so
261          * any comparisons should be to the output values, not the input.
262          * However, it's not really worth it to have a hash table based on
263          * output keys (especially since ICMP error handling of tunneled packets
264          * isn't that reliable anyways).  Therefore, we do a lookup based on the
265          * out key as if it were the in key and then check to see if the input
266          * and output keys are the same.
267          */
268         if (mutable->key.in_key != mutable->out_key)
269                 return;
270
271         if (!!(mutable->flags & TNL_F_IN_KEY_MATCH) !=
272             !!(mutable->flags & TNL_F_OUT_KEY_ACTION))
273                 return;
274
275         if ((mutable->flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
276                 return;
277
278         tunnel_hdr_len += iph->ihl << 2;
279
280         orig_mac_header = skb_mac_header(skb) - skb->data;
281         orig_nw_header = skb_network_header(skb) - skb->data;
282         skb_set_mac_header(skb, tunnel_hdr_len);
283
284         tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
285
286         skb->protocol = eth_hdr(skb)->h_proto;
287         if (skb->protocol == htons(ETH_P_8021Q)) {
288                 tot_hdr_len += VLAN_HLEN;
289                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
290         }
291
292         skb_set_network_header(skb, tot_hdr_len);
293         mtu -= tot_hdr_len;
294
295         if (skb->protocol == htons(ETH_P_IP))
296                 tot_hdr_len += sizeof(struct iphdr);
297 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
298         else if (skb->protocol == htons(ETH_P_IPV6))
299                 tot_hdr_len += sizeof(struct ipv6hdr);
300 #endif
301         else
302                 goto out;
303
304         if (!pskb_may_pull(skb, tot_hdr_len))
305                 goto out;
306
307         if (skb->protocol == htons(ETH_P_IP)) {
308                 if (mtu < IP_MIN_MTU) {
309                         if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
310                                 mtu = IP_MIN_MTU;
311                         else
312                                 goto out;
313                 }
314
315         }
316 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
317         else if (skb->protocol == htons(ETH_P_IPV6)) {
318                 if (mtu < IPV6_MIN_MTU) {
319                         unsigned int packet_length = sizeof(struct ipv6hdr) +
320                                               ntohs(ipv6_hdr(skb)->payload_len);
321
322                         if (packet_length >= IPV6_MIN_MTU
323                             || ntohs(ipv6_hdr(skb)->payload_len) == 0)
324                                 mtu = IPV6_MIN_MTU;
325                         else
326                                 goto out;
327                 }
328         }
329 #endif
330
331         __skb_pull(skb, tunnel_hdr_len);
332         ovs_tnl_frag_needed(vport, mutable, skb, mtu, key);
333         __skb_push(skb, tunnel_hdr_len);
334
335 out:
336         skb_set_mac_header(skb, orig_mac_header);
337         skb_set_network_header(skb, orig_nw_header);
338         skb->protocol = htons(ETH_P_IP);
339 }
340
341 static bool check_checksum(struct sk_buff *skb)
342 {
343         struct iphdr *iph = ip_hdr(skb);
344         struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
345         __sum16 csum = 0;
346
347         if (greh->flags & GRE_CSUM) {
348                 switch (skb->ip_summed) {
349                 case CHECKSUM_COMPLETE:
350                         csum = csum_fold(skb->csum);
351
352                         if (!csum)
353                                 break;
354                         /* Fall through. */
355
356                 case CHECKSUM_NONE:
357                         skb->csum = 0;
358                         csum = __skb_checksum_complete(skb);
359                         skb->ip_summed = CHECKSUM_COMPLETE;
360                         break;
361                 }
362         }
363
364         return (csum == 0);
365 }
366
367 /* Called with rcu_read_lock and BH disabled. */
368 static int gre_rcv(struct sk_buff *skb)
369 {
370         struct vport *vport;
371         const struct tnl_mutable_config *mutable;
372         int hdr_len;
373         struct iphdr *iph;
374         __be16 flags;
375         __be64 key;
376         u32 tunnel_type;
377
378         if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
379                 goto error;
380         if (unlikely(!check_checksum(skb)))
381                 goto error;
382
383         hdr_len = parse_header(ip_hdr(skb), &flags, &key, &tunnel_type);
384         if (unlikely(hdr_len < 0))
385                 goto error;
386
387         if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
388                 goto error;
389
390         iph = ip_hdr(skb);
391         vport = ovs_tnl_find_port(dev_net(skb->dev), iph->daddr, iph->saddr, key,
392                                   tunnel_type, &mutable);
393         if (unlikely(!vport)) {
394                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
395                 goto error;
396         }
397
398         if (mutable->flags & TNL_F_IN_KEY_MATCH)
399                 OVS_CB(skb)->tun_id = key;
400         else
401                 OVS_CB(skb)->tun_id = 0;
402
403         __skb_pull(skb, hdr_len);
404         skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
405
406         ovs_tnl_rcv(vport, skb, iph->tos);
407         return 0;
408
409 error:
410         kfree_skb(skb);
411         return 0;
412 }
413
414 static const struct tnl_ops gre_tnl_ops = {
415         .tunnel_type    = TNL_T_PROTO_GRE,
416         .ipproto        = IPPROTO_GRE,
417         .hdr_len        = gre_hdr_len,
418         .build_header   = gre_build_header,
419         .update_header  = gre_update_header,
420 };
421
422 static struct vport *gre_create(const struct vport_parms *parms)
423 {
424         return ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
425 }
426
427 static const struct tnl_ops gre64_tnl_ops = {
428         .tunnel_type    = TNL_T_PROTO_GRE64,
429         .ipproto        = IPPROTO_GRE,
430         .hdr_len        = gre_hdr_len,
431         .build_header   = gre_build_header,
432         .update_header  = gre_update_header,
433 };
434
435 static struct vport *gre_create64(const struct vport_parms *parms)
436 {
437         return ovs_tnl_create(parms, &ovs_gre64_vport_ops, &gre64_tnl_ops);
438 }
439
440 static const struct net_protocol gre_protocol_handlers = {
441         .handler        =       gre_rcv,
442         .err_handler    =       gre_err,
443 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
444         .netns_ok       =       1,
445 #endif
446 };
447
448 static bool inited;
449
450 static int gre_init(void)
451 {
452         int err;
453
454         if (inited)
455                 return 0;
456
457         inited = true;
458         err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
459         if (err)
460                 pr_warn("cannot register gre protocol handler\n");
461
462         return err;
463 }
464
465 static void gre_exit(void)
466 {
467         if (!inited)
468                 return;
469
470         inited = false;
471
472         inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
473 }
474
475 const struct vport_ops ovs_gre_vport_ops = {
476         .type           = OVS_VPORT_TYPE_GRE,
477         .flags          = VPORT_F_TUN_ID,
478         .init           = gre_init,
479         .exit           = gre_exit,
480         .create         = gre_create,
481         .destroy        = ovs_tnl_destroy,
482         .set_addr       = ovs_tnl_set_addr,
483         .get_name       = ovs_tnl_get_name,
484         .get_addr       = ovs_tnl_get_addr,
485         .get_options    = ovs_tnl_get_options,
486         .set_options    = ovs_tnl_set_options,
487         .get_dev_flags  = ovs_vport_gen_get_dev_flags,
488         .is_running     = ovs_vport_gen_is_running,
489         .get_operstate  = ovs_vport_gen_get_operstate,
490         .send           = ovs_tnl_send,
491 };
492
493 const struct vport_ops ovs_gre64_vport_ops = {
494         .type           = OVS_VPORT_TYPE_GRE64,
495         .flags          = VPORT_F_TUN_ID,
496         .init           = gre_init,
497         .exit           = gre_exit,
498         .create         = gre_create64,
499         .destroy        = ovs_tnl_destroy,
500         .set_addr       = ovs_tnl_set_addr,
501         .get_name       = ovs_tnl_get_name,
502         .get_addr       = ovs_tnl_get_addr,
503         .get_options    = ovs_tnl_get_options,
504         .set_options    = ovs_tnl_set_options,
505         .get_dev_flags  = ovs_vport_gen_get_dev_flags,
506         .is_running     = ovs_vport_gen_is_running,
507         .get_operstate  = ovs_vport_gen_get_operstate,
508         .send           = ovs_tnl_send,
509 };