2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/skbuff.h>
14 #include <linux/if_tunnel.h>
15 #include <linux/if_vlan.h>
20 #include <net/protocol.h>
24 #include "vport-generic.h"
27 * The GRE header is composed of a series of sections: a base and then a variable
30 #define GRE_HEADER_SECTION 4
37 static int gre_hdr_len(const struct tnl_port_config *port_config)
41 len = GRE_HEADER_SECTION;
43 if (port_config->flags & TNL_F_CSUM)
44 len += GRE_HEADER_SECTION;
46 if (port_config->out_key ||
47 port_config->flags & TNL_F_OUT_KEY_ACTION)
48 len += GRE_HEADER_SECTION;
53 static struct sk_buff *gre_build_header(struct sk_buff *skb,
54 const struct vport *vport,
55 const struct tnl_mutable_config *mutable,
56 struct dst_entry *dst)
58 struct gre_base_hdr *greh = (struct gre_base_hdr *)skb_transport_header(skb);
59 __be32 *options = (__be32 *)(skb_network_header(skb) + mutable->tunnel_hlen
60 - GRE_HEADER_SECTION);
62 greh->protocol = htons(ETH_P_TEB);
65 /* Work backwards over the options so the checksum is last. */
66 if (mutable->port_config.out_key ||
67 mutable->port_config.flags & TNL_F_OUT_KEY_ACTION) {
68 greh->flags |= GRE_KEY;
70 if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
71 *options = OVS_CB(skb)->tun_id;
73 *options = mutable->port_config.out_key;
78 if (mutable->port_config.flags & TNL_F_CSUM) {
79 greh->flags |= GRE_CSUM;
82 *(__sum16 *)options = csum_fold(skb_checksum(skb,
84 skb->len - sizeof(struct iphdr),
89 * Allow our local IP stack to fragment the outer packet even if the
90 * DF bit is set as a last resort.
97 static int parse_header(struct iphdr *iph, __be16 *flags, __be32 *key)
99 /* IP and ICMP protocol handlers check that the IHL is valid. */
100 struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
101 __be32 *options = (__be32 *)(greh + 1);
104 *flags = greh->flags;
106 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
109 if (unlikely(greh->protocol != htons(ETH_P_TEB)))
112 hdr_len = GRE_HEADER_SECTION;
114 if (greh->flags & GRE_CSUM) {
115 hdr_len += GRE_HEADER_SECTION;
119 if (greh->flags & GRE_KEY) {
120 hdr_len += GRE_HEADER_SECTION;
127 if (unlikely(greh->flags & GRE_SEQ))
128 hdr_len += GRE_HEADER_SECTION;
133 /* Called with rcu_read_lock and BH disabled. */
134 static void gre_err(struct sk_buff *skb, u32 info)
137 const struct tnl_mutable_config *mutable;
138 const int type = icmp_hdr(skb)->type;
139 const int code = icmp_hdr(skb)->code;
140 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
145 int tunnel_hdr_len, tot_hdr_len;
146 unsigned int orig_mac_header;
147 unsigned int orig_nw_header;
149 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
153 * The mimimum size packet that we would actually be able to process:
154 * encapsulating IP header, minimum GRE header, Ethernet header,
157 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
158 ETH_HLEN + sizeof(struct iphdr)))
161 iph = (struct iphdr *)skb->data;
163 tunnel_hdr_len = parse_header(iph, &flags, &key);
164 if (tunnel_hdr_len < 0)
167 vport = tnl_find_port(iph->saddr, iph->daddr, key,
168 TNL_T_PROTO_GRE | TNL_T_KEY_EITHER, &mutable);
173 * Packets received by this function were previously sent by us, so
174 * any comparisons should be to the output values, not the input.
175 * However, it's not really worth it to have a hash table based on
176 * output keys (especially since ICMP error handling of tunneled packets
177 * isn't that reliable anyways). Therefore, we do a lookup based on the
178 * out key as if it were the in key and then check to see if the input
179 * and output keys are the same.
181 if (mutable->port_config.in_key != mutable->port_config.out_key)
184 if (!!(mutable->port_config.flags & TNL_F_IN_KEY_MATCH) !=
185 !!(mutable->port_config.flags & TNL_F_OUT_KEY_ACTION))
188 if ((mutable->port_config.flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
191 tunnel_hdr_len += iph->ihl << 2;
193 orig_mac_header = skb_mac_header(skb) - skb->data;
194 orig_nw_header = skb_network_header(skb) - skb->data;
195 skb_set_mac_header(skb, tunnel_hdr_len);
197 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
199 skb->protocol = eth_hdr(skb)->h_proto;
200 if (skb->protocol == htons(ETH_P_8021Q)) {
201 tot_hdr_len += VLAN_HLEN;
202 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
205 skb_set_network_header(skb, tot_hdr_len);
208 if (skb->protocol == htons(ETH_P_IP))
209 tot_hdr_len += sizeof(struct iphdr);
210 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
211 else if (skb->protocol == htons(ETH_P_IPV6))
212 tot_hdr_len += sizeof(struct ipv6hdr);
217 if (!pskb_may_pull(skb, tot_hdr_len))
220 if (skb->protocol == htons(ETH_P_IP)) {
221 if (mtu < IP_MIN_MTU) {
222 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
229 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
230 else if (skb->protocol == htons(ETH_P_IPV6)) {
231 if (mtu < IPV6_MIN_MTU) {
232 unsigned int packet_length = sizeof(struct ipv6hdr) +
233 ntohs(ipv6_hdr(skb)->payload_len);
235 if (packet_length >= IPV6_MIN_MTU
236 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
244 __skb_pull(skb, tunnel_hdr_len);
245 tnl_frag_needed(vport, mutable, skb, mtu, key);
246 __skb_push(skb, tunnel_hdr_len);
249 skb_set_mac_header(skb, orig_mac_header);
250 skb_set_network_header(skb, orig_nw_header);
251 skb->protocol = htons(ETH_P_IP);
254 static bool check_checksum(struct sk_buff *skb)
256 struct iphdr *iph = ip_hdr(skb);
257 struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
260 if (greh->flags & GRE_CSUM) {
261 switch (skb->ip_summed) {
262 case CHECKSUM_COMPLETE:
263 csum = csum_fold(skb->csum);
271 csum = __skb_checksum_complete(skb);
272 skb->ip_summed = CHECKSUM_COMPLETE;
280 /* Called with rcu_read_lock and BH disabled. */
281 static int gre_rcv(struct sk_buff *skb)
284 const struct tnl_mutable_config *mutable;
290 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
293 if (unlikely(!check_checksum(skb)))
296 hdr_len = parse_header(ip_hdr(skb), &flags, &key);
297 if (unlikely(hdr_len < 0))
300 if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
304 vport = tnl_find_port(iph->daddr, iph->saddr, key,
305 TNL_T_PROTO_GRE | TNL_T_KEY_EITHER, &mutable);
306 if (unlikely(!vport)) {
307 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
311 if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH)
312 OVS_CB(skb)->tun_id = key;
314 OVS_CB(skb)->tun_id = 0;
316 __skb_pull(skb, hdr_len);
317 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
327 struct tnl_ops gre_tnl_ops = {
328 .tunnel_type = TNL_T_PROTO_GRE,
329 .ipproto = IPPROTO_GRE,
330 .hdr_len = gre_hdr_len,
331 .build_header = gre_build_header,
334 static struct vport *gre_create(const char *name, const void __user *config)
336 return tnl_create(name, config, &gre_vport_ops, &gre_tnl_ops);
339 static struct net_protocol gre_protocol_handlers = {
341 .err_handler = gre_err,
344 static int gre_init(void)
348 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
350 pr_warn("cannot register gre protocol handler\n");
360 static void gre_exit(void)
363 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
366 struct vport_ops gre_vport_ops = {
368 .flags = VPORT_F_GEN_STATS | VPORT_F_TUN_ID,
371 .create = gre_create,
372 .modify = tnl_modify,
373 .destroy = tnl_destroy,
374 .set_mtu = tnl_set_mtu,
375 .set_addr = tnl_set_addr,
376 .get_name = tnl_get_name,
377 .get_addr = tnl_get_addr,
378 .get_dev_flags = vport_gen_get_dev_flags,
379 .is_running = vport_gen_is_running,
380 .get_operstate = vport_gen_get_operstate,
381 .get_mtu = tnl_get_mtu,