2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/skbuff.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
30 #include <net/protocol.h>
35 #include "vport-generic.h"
38 * The GRE header is composed of a series of sections: a base and then a variable
41 #define GRE_HEADER_SECTION 4
48 static int gre_hdr_len(const struct tnl_mutable_config *mutable)
52 len = GRE_HEADER_SECTION;
54 if (mutable->flags & TNL_F_CSUM)
55 len += GRE_HEADER_SECTION;
57 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
58 len += GRE_HEADER_SECTION;
63 /* Returns the least-significant 32 bits of a __be64. */
64 static __be32 be64_get_low32(__be64 x)
67 return (__force __be32)x;
69 return (__force __be32)((__force u64)x >> 32);
73 static void gre_build_header(const struct vport *vport,
74 const struct tnl_mutable_config *mutable,
77 struct gre_base_hdr *greh = header;
78 __be32 *options = (__be32 *)(greh + 1);
80 greh->protocol = htons(ETH_P_TEB);
83 if (mutable->flags & TNL_F_CSUM) {
84 greh->flags |= GRE_CSUM;
89 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
90 greh->flags |= GRE_KEY;
93 *options = be64_get_low32(mutable->out_key);
96 static struct sk_buff *gre_update_header(const struct vport *vport,
97 const struct tnl_mutable_config *mutable,
98 struct dst_entry *dst,
101 __be32 *options = (__be32 *)(skb_network_header(skb) + mutable->tunnel_hlen
102 - GRE_HEADER_SECTION);
104 /* Work backwards over the options so the checksum is last. */
105 if (mutable->flags & TNL_F_OUT_KEY_ACTION)
106 *options = be64_get_low32(OVS_CB(skb)->tun_id);
108 if (mutable->out_key || mutable->flags & TNL_F_OUT_KEY_ACTION)
111 if (mutable->flags & TNL_F_CSUM)
112 *(__sum16 *)options = csum_fold(skb_checksum(skb,
113 skb_transport_offset(skb),
114 skb->len - skb_transport_offset(skb),
117 * Allow our local IP stack to fragment the outer packet even if the
118 * DF bit is set as a last resort. We also need to force selection of
119 * an IP ID here because Linux will otherwise leave it at 0 if the
120 * packet originally had DF set.
123 __ip_select_ident(ip_hdr(skb), dst, 0);
128 /* Zero-extends a __be32 into the least-significant 32 bits of a __be64. */
129 static __be64 be32_extend_to_be64(__be32 x)
132 return (__force __be64)x;
134 return (__force __be64)((__force u64)x << 32);
138 static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *key)
140 /* IP and ICMP protocol handlers check that the IHL is valid. */
141 struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
142 __be32 *options = (__be32 *)(greh + 1);
145 *flags = greh->flags;
147 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
150 if (unlikely(greh->protocol != htons(ETH_P_TEB)))
153 hdr_len = GRE_HEADER_SECTION;
155 if (greh->flags & GRE_CSUM) {
156 hdr_len += GRE_HEADER_SECTION;
160 if (greh->flags & GRE_KEY) {
161 hdr_len += GRE_HEADER_SECTION;
163 *key = be32_extend_to_be64(*options);
168 if (unlikely(greh->flags & GRE_SEQ))
169 hdr_len += GRE_HEADER_SECTION;
174 /* Called with rcu_read_lock and BH disabled. */
175 static void gre_err(struct sk_buff *skb, u32 info)
178 const struct tnl_mutable_config *mutable;
179 const int type = icmp_hdr(skb)->type;
180 const int code = icmp_hdr(skb)->code;
181 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
186 int tunnel_hdr_len, tot_hdr_len;
187 unsigned int orig_mac_header;
188 unsigned int orig_nw_header;
190 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
194 * The mimimum size packet that we would actually be able to process:
195 * encapsulating IP header, minimum GRE header, Ethernet header,
198 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
199 ETH_HLEN + sizeof(struct iphdr)))
202 iph = (struct iphdr *)skb->data;
203 if (ipv4_is_multicast(iph->daddr))
206 tunnel_hdr_len = parse_header(iph, &flags, &key);
207 if (tunnel_hdr_len < 0)
210 vport = ovs_tnl_find_port(dev_net(skb->dev), iph->saddr, iph->daddr, key,
211 TNL_T_PROTO_GRE, &mutable);
216 * Packets received by this function were previously sent by us, so
217 * any comparisons should be to the output values, not the input.
218 * However, it's not really worth it to have a hash table based on
219 * output keys (especially since ICMP error handling of tunneled packets
220 * isn't that reliable anyways). Therefore, we do a lookup based on the
221 * out key as if it were the in key and then check to see if the input
222 * and output keys are the same.
224 if (mutable->key.in_key != mutable->out_key)
227 if (!!(mutable->flags & TNL_F_IN_KEY_MATCH) !=
228 !!(mutable->flags & TNL_F_OUT_KEY_ACTION))
231 if ((mutable->flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
234 tunnel_hdr_len += iph->ihl << 2;
236 orig_mac_header = skb_mac_header(skb) - skb->data;
237 orig_nw_header = skb_network_header(skb) - skb->data;
238 skb_set_mac_header(skb, tunnel_hdr_len);
240 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
242 skb->protocol = eth_hdr(skb)->h_proto;
243 if (skb->protocol == htons(ETH_P_8021Q)) {
244 tot_hdr_len += VLAN_HLEN;
245 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
248 skb_set_network_header(skb, tot_hdr_len);
251 if (skb->protocol == htons(ETH_P_IP))
252 tot_hdr_len += sizeof(struct iphdr);
253 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
254 else if (skb->protocol == htons(ETH_P_IPV6))
255 tot_hdr_len += sizeof(struct ipv6hdr);
260 if (!pskb_may_pull(skb, tot_hdr_len))
263 if (skb->protocol == htons(ETH_P_IP)) {
264 if (mtu < IP_MIN_MTU) {
265 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
272 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
273 else if (skb->protocol == htons(ETH_P_IPV6)) {
274 if (mtu < IPV6_MIN_MTU) {
275 unsigned int packet_length = sizeof(struct ipv6hdr) +
276 ntohs(ipv6_hdr(skb)->payload_len);
278 if (packet_length >= IPV6_MIN_MTU
279 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
287 __skb_pull(skb, tunnel_hdr_len);
288 ovs_tnl_frag_needed(vport, mutable, skb, mtu, key);
289 __skb_push(skb, tunnel_hdr_len);
292 skb_set_mac_header(skb, orig_mac_header);
293 skb_set_network_header(skb, orig_nw_header);
294 skb->protocol = htons(ETH_P_IP);
297 static bool check_checksum(struct sk_buff *skb)
299 struct iphdr *iph = ip_hdr(skb);
300 struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
303 if (greh->flags & GRE_CSUM) {
304 switch (skb->ip_summed) {
305 case CHECKSUM_COMPLETE:
306 csum = csum_fold(skb->csum);
314 csum = __skb_checksum_complete(skb);
315 skb->ip_summed = CHECKSUM_COMPLETE;
323 /* Called with rcu_read_lock and BH disabled. */
324 static int gre_rcv(struct sk_buff *skb)
327 const struct tnl_mutable_config *mutable;
333 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
336 if (unlikely(!check_checksum(skb)))
339 hdr_len = parse_header(ip_hdr(skb), &flags, &key);
340 if (unlikely(hdr_len < 0))
343 if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
347 vport = ovs_tnl_find_port(dev_net(skb->dev), iph->daddr, iph->saddr, key,
348 TNL_T_PROTO_GRE, &mutable);
349 if (unlikely(!vport)) {
350 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
354 if (mutable->flags & TNL_F_IN_KEY_MATCH)
355 OVS_CB(skb)->tun_id = key;
357 OVS_CB(skb)->tun_id = 0;
359 __skb_pull(skb, hdr_len);
360 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
362 ovs_tnl_rcv(vport, skb, iph->tos);
370 static const struct tnl_ops gre_tnl_ops = {
371 .tunnel_type = TNL_T_PROTO_GRE,
372 .ipproto = IPPROTO_GRE,
373 .hdr_len = gre_hdr_len,
374 .build_header = gre_build_header,
375 .update_header = gre_update_header,
378 static struct vport *gre_create(const struct vport_parms *parms)
380 return ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
383 static const struct net_protocol gre_protocol_handlers = {
385 .err_handler = gre_err,
386 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
391 static int gre_init(void)
395 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
397 pr_warn("cannot register gre protocol handler\n");
402 static void gre_exit(void)
404 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
407 const struct vport_ops ovs_gre_vport_ops = {
408 .type = OVS_VPORT_TYPE_GRE,
409 .flags = VPORT_F_TUN_ID,
412 .create = gre_create,
413 .destroy = ovs_tnl_destroy,
414 .set_addr = ovs_tnl_set_addr,
415 .get_name = ovs_tnl_get_name,
416 .get_addr = ovs_tnl_get_addr,
417 .get_options = ovs_tnl_get_options,
418 .set_options = ovs_tnl_set_options,
419 .get_dev_flags = ovs_vport_gen_get_dev_flags,
420 .is_running = ovs_vport_gen_is_running,
421 .get_operstate = ovs_vport_gen_get_operstate,
422 .send = ovs_tnl_send,