2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_vlan.h>
15 #include <linux/in_route.h>
16 #include <linux/jhash.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
20 #include <net/dsfield.h>
23 #include <net/inet_ecn.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/protocol.h>
29 #include <net/route.h>
34 #include "openvswitch/gre.h"
37 #include "vport-generic.h"
39 /* The absolute minimum fragment size. Note that there are many other
40 * definitions of the minimum MTU. */
43 /* The GRE header is composed of a series of sections: a base and then a variable
44 * number of options. */
45 #define GRE_HEADER_SECTION 4
47 struct mutable_config {
50 unsigned char eth_addr[ETH_ALEN];
52 struct gre_port_config port_config;
54 int tunnel_hlen; /* Tunnel header length. */
59 struct tbl_node tbl_node;
63 /* Protected by RCU. */
64 struct mutable_config *mutable;
67 /* Protected by RCU. */
68 static struct tbl *port_table;
70 /* These are just used as an optimization: they don't require any kind of
71 * synchronization because we could have just as easily read the value before
72 * the port change happened. */
73 static unsigned int key_local_remote_ports;
74 static unsigned int key_remote_ports;
75 static unsigned int local_remote_ports;
76 static unsigned int remote_ports;
78 static inline struct gre_vport *gre_vport_priv(const struct vport *vport)
80 return vport_priv(vport);
83 static inline struct vport *gre_vport_to_vport(const struct gre_vport *gre_vport)
85 return vport_from_priv(gre_vport);
88 static inline struct gre_vport *gre_vport_table_cast(const struct tbl_node *node)
90 return container_of(node, struct gre_vport, tbl_node);
94 static void free_config(struct rcu_head *rcu)
96 struct mutable_config *c = container_of(rcu, struct mutable_config, rcu);
100 static void assign_config_rcu(struct vport *vport,
101 struct mutable_config *new_config)
103 struct gre_vport *gre_vport = gre_vport_priv(vport);
104 struct mutable_config *old_config;
106 old_config = rcu_dereference(gre_vport->mutable);
107 rcu_assign_pointer(gre_vport->mutable, new_config);
108 call_rcu(&old_config->rcu, free_config);
111 static unsigned int *find_port_pool(const struct mutable_config *mutable)
113 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
114 if (mutable->port_config.saddr)
115 return &local_remote_ports;
117 return &remote_ports;
119 if (mutable->port_config.saddr)
120 return &key_local_remote_ports;
122 return &key_remote_ports;
133 struct port_lookup_key {
134 u32 vals[4]; /* Contains enum lookup_key keys. */
135 const struct mutable_config *mutable;
138 /* Modifies 'target' to store the rcu_dereferenced pointer that was used to do
139 * the comparision. */
140 static int port_cmp(const struct tbl_node *node, void *target)
142 const struct gre_vport *gre_vport = gre_vport_table_cast(node);
143 struct port_lookup_key *lookup = target;
145 lookup->mutable = rcu_dereference(gre_vport->mutable);
147 return ((lookup->mutable->port_config.flags & GRE_F_IN_KEY_MATCH) ==
148 lookup->vals[LOOKUP_KEY_MATCH]) &&
149 lookup->mutable->port_config.daddr == lookup->vals[LOOKUP_DADDR] &&
150 lookup->mutable->port_config.in_key == lookup->vals[LOOKUP_KEY] &&
151 lookup->mutable->port_config.saddr == lookup->vals[LOOKUP_SADDR];
154 static u32 port_hash(struct port_lookup_key *lookup)
156 return jhash2(lookup->vals, ARRAY_SIZE(lookup->vals), 0);
159 static int add_port(struct vport *vport)
161 struct gre_vport *gre_vport = gre_vport_priv(vport);
162 struct port_lookup_key lookup;
166 struct tbl *new_table;
168 new_table = tbl_create(0);
172 rcu_assign_pointer(port_table, new_table);
174 } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
175 struct tbl *old_table = port_table;
176 struct tbl *new_table;
178 new_table = tbl_expand(old_table);
179 if (IS_ERR(new_table))
180 return PTR_ERR(new_table);
182 rcu_assign_pointer(port_table, new_table);
183 tbl_deferred_destroy(old_table, NULL);
186 lookup.vals[LOOKUP_SADDR] = gre_vport->mutable->port_config.saddr;
187 lookup.vals[LOOKUP_DADDR] = gre_vport->mutable->port_config.daddr;
188 lookup.vals[LOOKUP_KEY] = gre_vport->mutable->port_config.in_key;
189 lookup.vals[LOOKUP_KEY_MATCH] = gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH;
191 err = tbl_insert(port_table, &gre_vport->tbl_node, port_hash(&lookup));
195 (*find_port_pool(gre_vport->mutable))++;
200 static int del_port(struct vport *vport)
202 struct gre_vport *gre_vport = gre_vport_priv(vport);
205 err = tbl_remove(port_table, &gre_vport->tbl_node);
209 (*find_port_pool(gre_vport->mutable))--;
214 #define FIND_PORT_KEY (1 << 0)
215 #define FIND_PORT_MATCH (1 << 1)
216 #define FIND_PORT_ANY (FIND_PORT_KEY | FIND_PORT_MATCH)
218 static struct vport *find_port(__be32 saddr, __be32 daddr, __be32 key,
220 const struct mutable_config **mutable)
222 struct port_lookup_key lookup;
223 struct tbl *table = rcu_dereference(port_table);
224 struct tbl_node *tbl_node;
229 lookup.vals[LOOKUP_SADDR] = saddr;
230 lookup.vals[LOOKUP_DADDR] = daddr;
232 if (port_type & FIND_PORT_KEY) {
233 lookup.vals[LOOKUP_KEY] = key;
234 lookup.vals[LOOKUP_KEY_MATCH] = 0;
236 if (key_local_remote_ports) {
237 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
242 if (key_remote_ports) {
243 lookup.vals[LOOKUP_SADDR] = 0;
245 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
249 lookup.vals[LOOKUP_SADDR] = saddr;
253 if (port_type & FIND_PORT_MATCH) {
254 lookup.vals[LOOKUP_KEY] = 0;
255 lookup.vals[LOOKUP_KEY_MATCH] = GRE_F_IN_KEY_MATCH;
257 if (local_remote_ports) {
258 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
264 lookup.vals[LOOKUP_SADDR] = 0;
266 tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
275 *mutable = lookup.mutable;
276 return gre_vport_to_vport(gre_vport_table_cast(tbl_node));
279 static bool check_ipv4_address(__be32 addr)
281 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
282 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
288 static bool ipv4_should_icmp(struct sk_buff *skb)
290 struct iphdr *old_iph = ip_hdr(skb);
292 /* Don't respond to L2 broadcast. */
293 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
296 /* Don't respond to L3 broadcast or invalid addresses. */
297 if (!check_ipv4_address(old_iph->daddr) ||
298 !check_ipv4_address(old_iph->saddr))
301 /* Only respond to the first fragment. */
302 if (old_iph->frag_off & htons(IP_OFFSET))
305 /* Don't respond to ICMP error messages. */
306 if (old_iph->protocol == IPPROTO_ICMP) {
307 u8 icmp_type, *icmp_typep;
309 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
310 (old_iph->ihl << 2) +
311 offsetof(struct icmphdr, type) -
312 skb->data, sizeof(icmp_type),
318 if (*icmp_typep > NR_ICMP_TYPES
319 || (*icmp_typep <= ICMP_PARAMETERPROB
320 && *icmp_typep != ICMP_ECHOREPLY
321 && *icmp_typep != ICMP_ECHO))
328 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
329 unsigned int mtu, unsigned int payload_length)
331 struct iphdr *iph, *old_iph = ip_hdr(skb);
332 struct icmphdr *icmph;
335 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
336 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
337 payload = skb_put(nskb, payload_length);
341 iph->ihl = sizeof(struct iphdr) >> 2;
342 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
343 IPTOS_PREC_INTERNETCONTROL;
344 iph->tot_len = htons(sizeof(struct iphdr)
345 + sizeof(struct icmphdr)
347 get_random_bytes(&iph->id, sizeof(iph->id));
350 iph->protocol = IPPROTO_ICMP;
351 iph->daddr = old_iph->saddr;
352 iph->saddr = old_iph->daddr;
357 icmph->type = ICMP_DEST_UNREACH;
358 icmph->code = ICMP_FRAG_NEEDED;
359 icmph->un.gateway = htonl(mtu);
362 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
363 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
364 payload, payload_length,
366 icmph->checksum = csum_fold(nskb->csum);
369 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
370 static bool ipv6_should_icmp(struct sk_buff *skb)
372 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
374 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
375 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
377 /* Check source address is valid. */
378 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
379 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
382 /* Don't reply to unspecified addresses. */
383 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
386 /* Don't respond to ICMP error messages. */
387 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
391 if (nexthdr == NEXTHDR_ICMP) {
392 u8 icmp_type, *icmp_typep;
394 icmp_typep = skb_header_pointer(skb, payload_off +
395 offsetof(struct icmp6hdr,
397 sizeof(icmp_type), &icmp_type);
399 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
406 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
407 unsigned int mtu, unsigned int payload_length)
409 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
410 struct icmp6hdr *icmp6h;
413 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
414 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
415 payload = skb_put(nskb, payload_length);
420 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
421 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
423 ipv6h->nexthdr = NEXTHDR_ICMP;
424 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
425 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
426 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
429 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
430 icmp6h->icmp6_code = 0;
431 icmp6h->icmp6_cksum = 0;
432 icmp6h->icmp6_mtu = htonl(mtu);
434 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
435 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
436 payload, payload_length,
438 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
439 sizeof(struct icmp6hdr)
441 ipv6h->nexthdr, nskb->csum);
445 static bool send_frag_needed(struct vport *vport,
446 const struct mutable_config *mutable,
447 struct sk_buff *skb, unsigned int mtu,
450 unsigned int eth_hdr_len = ETH_HLEN;
451 unsigned int total_length = 0, header_length = 0, payload_length;
452 struct ethhdr *eh, *old_eh = eth_hdr(skb);
453 struct sk_buff *nskb;
456 if (skb->protocol == htons(ETH_P_IP)) {
457 if (mtu < IP_MIN_MTU)
460 if (!ipv4_should_icmp(skb))
463 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
464 else if (skb->protocol == htons(ETH_P_IPV6)) {
465 if (mtu < IPV6_MIN_MTU)
468 /* In theory we should do PMTUD on IPv6 multicast messages but
469 * we don't have an address to send from so just fragment. */
470 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
473 if (!ipv6_should_icmp(skb))
481 if (old_eh->h_proto == htons(ETH_P_8021Q))
482 eth_hdr_len = VLAN_ETH_HLEN;
484 payload_length = skb->len - eth_hdr_len;
485 if (skb->protocol == htons(ETH_P_IP)) {
486 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
487 total_length = min_t(unsigned int, header_length +
488 payload_length, 576);
490 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
492 header_length = sizeof(struct ipv6hdr) +
493 sizeof(struct icmp6hdr);
494 total_length = min_t(unsigned int, header_length +
495 payload_length, IPV6_MIN_MTU);
499 total_length = min(total_length, mutable->mtu);
500 payload_length = total_length - header_length;
502 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
507 skb_reserve(nskb, NET_IP_ALIGN);
509 /* Ethernet / VLAN */
510 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
511 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
512 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
513 nskb->protocol = eh->h_proto = old_eh->h_proto;
514 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
515 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
517 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
518 vh->h_vlan_encapsulated_proto = skb->protocol;
520 skb_reset_mac_header(nskb);
523 if (skb->protocol == htons(ETH_P_IP))
524 ipv4_build_icmp(skb, nskb, mtu, payload_length);
525 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
527 ipv6_build_icmp(skb, nskb, mtu, payload_length);
530 /* Assume that flow based keys are symmetric with respect to input
531 * and output and use the key that we were going to put on the
532 * outgoing packet for the fake received packet. If the keys are
533 * not symmetric then PMTUD needs to be disabled since we won't have
534 * any way of synthesizing packets. */
535 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH &&
536 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
537 OVS_CB(nskb)->tun_id = flow_key;
539 compute_ip_summed(nskb, false);
540 vport_receive(vport, nskb);
545 static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
547 if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
548 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
551 return ERR_PTR(-ENOMEM);
554 set_skb_csum_bits(skb, nskb);
557 skb_set_owner_w(nskb, skb->sk);
566 static void create_gre_header(struct sk_buff *skb,
567 const struct mutable_config *mutable)
569 struct iphdr *iph = ip_hdr(skb);
570 __be16 *flags = (__be16 *)(iph + 1);
571 __be16 *protocol = flags + 1;
572 __be32 *options = (__be32 *)((u8 *)iph + mutable->tunnel_hlen
573 - GRE_HEADER_SECTION);
575 *protocol = htons(ETH_P_TEB);
578 /* Work backwards over the options so the checksum is last. */
579 if (mutable->port_config.out_key ||
580 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION) {
583 if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
584 *options = OVS_CB(skb)->tun_id;
586 *options = mutable->port_config.out_key;
591 if (mutable->port_config.flags & GRE_F_OUT_CSUM) {
595 *(__sum16 *)options = csum_fold(skb_checksum(skb,
596 sizeof(struct iphdr),
597 skb->len - sizeof(struct iphdr),
602 static int check_checksum(struct sk_buff *skb)
604 struct iphdr *iph = ip_hdr(skb);
605 __be16 flags = *(__be16 *)(iph + 1);
608 if (flags & GRE_CSUM) {
609 switch (skb->ip_summed) {
610 case CHECKSUM_COMPLETE:
611 csum = csum_fold(skb->csum);
619 csum = __skb_checksum_complete(skb);
620 skb->ip_summed = CHECKSUM_COMPLETE;
628 static int parse_gre_header(struct iphdr *iph, __be16 *flags, __be32 *key)
630 /* IP and ICMP protocol handlers check that the IHL is valid. */
631 __be16 *flagsp = (__be16 *)((u8 *)iph + (iph->ihl << 2));
632 __be16 *protocol = flagsp + 1;
633 __be32 *options = (__be32 *)(protocol + 1);
638 if (*flags & (GRE_VERSION | GRE_ROUTING))
641 if (*protocol != htons(ETH_P_TEB))
644 hdr_len = GRE_HEADER_SECTION;
646 if (*flags & GRE_CSUM) {
647 hdr_len += GRE_HEADER_SECTION;
651 if (*flags & GRE_KEY) {
652 hdr_len += GRE_HEADER_SECTION;
659 if (*flags & GRE_SEQ)
660 hdr_len += GRE_HEADER_SECTION;
665 static inline u8 ecn_encapsulate(u8 tos, struct sk_buff *skb)
669 if (skb->protocol == htons(ETH_P_IP))
670 inner = ((struct iphdr *)skb_network_header(skb))->tos;
671 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
672 else if (skb->protocol == htons(ETH_P_IPV6))
673 inner = ipv6_get_dsfield((struct ipv6hdr *)skb_network_header(skb));
678 return INET_ECN_encapsulate(tos, inner);
681 static inline void ecn_decapsulate(u8 tos, struct sk_buff *skb)
683 if (INET_ECN_is_ce(tos)) {
684 __be16 protocol = skb->protocol;
685 unsigned int nw_header = skb_network_header(skb) - skb->data;
687 if (skb->protocol == htons(ETH_P_8021Q)) {
688 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
691 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
692 nw_header += VLAN_HLEN;
695 if (protocol == htons(ETH_P_IP)) {
696 if (unlikely(!pskb_may_pull(skb, nw_header
697 + sizeof(struct iphdr))))
700 IP_ECN_set_ce((struct iphdr *)(nw_header + skb->data));
702 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
703 else if (protocol == htons(ETH_P_IPV6)) {
704 if (unlikely(!pskb_may_pull(skb, nw_header
705 + sizeof(struct ipv6hdr))))
708 IP6_ECN_set_ce((struct ipv6hdr *)(nw_header
715 static struct sk_buff *handle_gso(struct sk_buff *skb)
717 if (skb_is_gso(skb)) {
718 struct sk_buff *nskb = skb_gso_segment(skb, 0);
727 static int handle_csum_offload(struct sk_buff *skb)
729 if (skb->ip_summed == CHECKSUM_PARTIAL)
730 return skb_checksum_help(skb);
732 skb->ip_summed = CHECKSUM_NONE;
737 /* Called with rcu_read_lock. */
738 static void gre_err(struct sk_buff *skb, u32 info)
741 const struct mutable_config *mutable;
742 const int type = icmp_hdr(skb)->type;
743 const int code = icmp_hdr(skb)->code;
744 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
749 int tunnel_hdr_len, tot_hdr_len;
750 unsigned int orig_mac_header;
751 unsigned int orig_nw_header;
753 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
756 /* The mimimum size packet that we would actually be able to process:
757 * encapsulating IP header, minimum GRE header, Ethernet header,
758 * inner IPv4 header. */
759 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
760 ETH_HLEN + sizeof(struct iphdr)))
763 iph = (struct iphdr *)skb->data;
765 tunnel_hdr_len = parse_gre_header(iph, &flags, &key);
766 if (tunnel_hdr_len < 0)
769 vport = find_port(iph->saddr, iph->daddr, key, FIND_PORT_ANY, &mutable);
773 /* Packets received by this function were previously sent by us, so
774 * any comparisons should be to the output values, not the input.
775 * However, it's not really worth it to have a hash table based on
776 * output keys (especially since ICMP error handling of tunneled packets
777 * isn't that reliable anyways). Therefore, we do a lookup based on the
778 * out key as if it were the in key and then check to see if the input
779 * and output keys are the same. */
780 if (mutable->port_config.in_key != mutable->port_config.out_key)
783 if (!!(mutable->port_config.flags & GRE_F_IN_KEY_MATCH) !=
784 !!(mutable->port_config.flags & GRE_F_OUT_KEY_ACTION))
787 if ((mutable->port_config.flags & GRE_F_OUT_CSUM) && !(flags & GRE_CSUM))
790 tunnel_hdr_len += iph->ihl << 2;
792 orig_mac_header = skb_mac_header(skb) - skb->data;
793 orig_nw_header = skb_network_header(skb) - skb->data;
794 skb_set_mac_header(skb, tunnel_hdr_len);
796 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
798 skb->protocol = eth_hdr(skb)->h_proto;
799 if (skb->protocol == htons(ETH_P_8021Q)) {
800 tot_hdr_len += VLAN_HLEN;
801 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
804 skb_set_network_header(skb, tot_hdr_len);
807 if (skb->protocol == htons(ETH_P_IP))
808 tot_hdr_len += sizeof(struct iphdr);
809 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
810 else if (skb->protocol == htons(ETH_P_IPV6))
811 tot_hdr_len += sizeof(struct ipv6hdr);
816 if (!pskb_may_pull(skb, tot_hdr_len))
819 if (skb->protocol == htons(ETH_P_IP)) {
820 if (mtu < IP_MIN_MTU) {
821 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
828 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
829 else if (skb->protocol == htons(ETH_P_IPV6)) {
830 if (mtu < IPV6_MIN_MTU) {
831 unsigned int packet_length = sizeof(struct ipv6hdr) +
832 ntohs(ipv6_hdr(skb)->payload_len);
834 if (packet_length >= IPV6_MIN_MTU
835 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
843 __pskb_pull(skb, tunnel_hdr_len);
844 send_frag_needed(vport, mutable, skb, mtu, key);
845 skb_push(skb, tunnel_hdr_len);
848 skb_set_mac_header(skb, orig_mac_header);
849 skb_set_network_header(skb, orig_nw_header);
850 skb->protocol = htons(ETH_P_IP);
853 /* Called with rcu_read_lock. */
854 static int gre_rcv(struct sk_buff *skb)
857 const struct mutable_config *mutable;
863 if (!pskb_may_pull(skb, GRE_HEADER_SECTION + ETH_HLEN))
866 if (!check_checksum(skb))
871 hdr_len = parse_gre_header(iph, &flags, &key);
875 vport = find_port(iph->daddr, iph->saddr, key, FIND_PORT_ANY, &mutable);
877 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
881 if ((mutable->port_config.flags & GRE_F_IN_CSUM) && !(flags & GRE_CSUM)) {
882 vport_record_error(vport, VPORT_E_RX_CRC);
886 if (!pskb_pull(skb, hdr_len) || !pskb_may_pull(skb, ETH_HLEN)) {
887 vport_record_error(vport, VPORT_E_RX_ERROR);
891 skb->pkt_type = PACKET_HOST;
892 skb->protocol = eth_type_trans(skb, skb->dev);
893 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
898 skb_reset_network_header(skb);
900 ecn_decapsulate(iph->tos, skb);
902 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH)
903 OVS_CB(skb)->tun_id = key;
905 OVS_CB(skb)->tun_id = 0;
907 skb_push(skb, ETH_HLEN);
908 compute_ip_summed(skb, false);
910 vport_receive(vport, skb);
919 static int build_packet(struct vport *vport, const struct mutable_config *mutable,
920 struct iphdr *iph, struct rtable *rt, int max_headroom,
921 int mtu, struct sk_buff *skb)
924 struct iphdr *new_iph;
925 int orig_len = skb->len;
926 __be16 frag_off = iph->frag_off;
928 skb = check_headroom(skb, max_headroom);
929 if (unlikely(IS_ERR(skb)))
932 err = handle_csum_offload(skb);
936 if (skb->protocol == htons(ETH_P_IP)) {
937 struct iphdr *old_iph = ip_hdr(skb);
939 if ((old_iph->frag_off & htons(IP_DF)) &&
940 mtu < ntohs(old_iph->tot_len)) {
941 if (send_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
946 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
947 else if (skb->protocol == htons(ETH_P_IPV6)) {
948 unsigned int packet_length = skb->len - ETH_HLEN
949 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
951 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
952 if (packet_length > IPV6_MIN_MTU)
953 frag_off = htons(IP_DF);
955 if (mtu < packet_length) {
956 if (send_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
962 skb_reset_transport_header(skb);
963 new_iph = (struct iphdr *)skb_push(skb, mutable->tunnel_hlen);
964 skb_reset_network_header(skb);
966 memcpy(new_iph, iph, sizeof(struct iphdr));
967 new_iph->frag_off = frag_off;
968 ip_select_ident(new_iph, &rt->u.dst, NULL);
970 create_gre_header(skb, mutable);
972 /* Allow our local IP stack to fragment the outer packet even if the
973 * DF bit is set as a last resort. */
976 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
977 IPCB(skb)->flags = 0;
979 err = ip_local_out(skb);
980 if (likely(net_xmit_eval(err) == 0))
983 vport_record_error(vport, VPORT_E_TX_ERROR);
990 vport_record_error(vport, VPORT_E_TX_DROPPED);
995 static int gre_send(struct vport *vport, struct sk_buff *skb)
997 struct gre_vport *gre_vport = gre_vport_priv(vport);
998 const struct mutable_config *mutable = rcu_dereference(gre_vport->mutable);
1000 struct iphdr *old_iph;
1007 /* Validate the protocol headers before we try to use them. */
1008 if (skb->protocol == htons(ETH_P_8021Q)) {
1009 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1012 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1013 skb_set_network_header(skb, VLAN_ETH_HLEN);
1016 if (skb->protocol == htons(ETH_P_IP)) {
1017 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
1018 + sizeof(struct iphdr) - skb->data)))
1021 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1022 else if (skb->protocol == htons(ETH_P_IPV6)) {
1023 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
1024 + sizeof(struct ipv6hdr) - skb->data)))
1028 old_iph = ip_hdr(skb);
1030 iph.tos = mutable->port_config.tos;
1031 if (mutable->port_config.flags & GRE_F_TOS_INHERIT) {
1032 if (skb->protocol == htons(ETH_P_IP))
1033 iph.tos = old_iph->tos;
1034 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1035 else if (skb->protocol == htons(ETH_P_IPV6))
1036 iph.tos = ipv6_get_dsfield(ipv6_hdr(skb));
1039 iph.tos = ecn_encapsulate(iph.tos, skb);
1042 struct flowi fl = { .nl_u = { .ip4_u =
1043 { .daddr = mutable->port_config.daddr,
1044 .saddr = mutable->port_config.saddr,
1045 .tos = RT_TOS(iph.tos) } },
1046 .proto = IPPROTO_GRE };
1048 if (ip_route_output_key(&init_net, &rt, &fl))
1052 iph.ttl = mutable->port_config.ttl;
1053 if (mutable->port_config.flags & GRE_F_TTL_INHERIT) {
1054 if (skb->protocol == htons(ETH_P_IP))
1055 iph.ttl = old_iph->ttl;
1056 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1057 else if (skb->protocol == htons(ETH_P_IPV6))
1058 iph.ttl = ipv6_hdr(skb)->hop_limit;
1062 iph.ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
1064 iph.frag_off = (mutable->port_config.flags & GRE_F_PMTUD) ? htons(IP_DF) : 0;
1066 mtu = dst_mtu(&rt->u.dst)
1068 - mutable->tunnel_hlen
1069 - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
1073 if (skb->protocol == htons(ETH_P_IP)) {
1074 iph.frag_off |= old_iph->frag_off & htons(IP_DF);
1075 mtu = max(mtu, IP_MIN_MTU);
1077 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1078 else if (skb->protocol == htons(ETH_P_IPV6))
1079 mtu = max(mtu, IPV6_MIN_MTU);
1083 iph.ihl = sizeof(struct iphdr) >> 2;
1084 iph.protocol = IPPROTO_GRE;
1085 iph.daddr = rt->rt_dst;
1086 iph.saddr = rt->rt_src;
1091 skb_dst_set(skb, &rt->u.dst);
1093 /* If we are doing GSO on a pskb it is better to make sure that the
1094 * headroom is correct now. We will only have to copy the portion in
1095 * the linear data area and GSO will preserve headroom when it creates
1096 * the segments. This is particularly beneficial on Xen where we get
1097 * lots of GSO pskbs. Conversely, we delay copying if it is just to
1098 * get our own writable clone because GSO may do the copy for us. */
1099 max_headroom = LL_RESERVED_SPACE(rt->u.dst.dev) + rt->u.dst.header_len
1100 + mutable->tunnel_hlen;
1102 if (skb_headroom(skb) < max_headroom) {
1103 skb = check_headroom(skb, max_headroom);
1104 if (unlikely(IS_ERR(skb))) {
1105 vport_record_error(vport, VPORT_E_TX_DROPPED);
1110 forward_ip_summed(skb);
1112 if (unlikely(vswitch_skb_checksum_setup(skb)))
1115 skb = handle_gso(skb);
1116 if (unlikely(IS_ERR(skb))) {
1117 vport_record_error(vport, VPORT_E_TX_DROPPED);
1121 /* Process GSO segments. Try to do any work for the entire packet that
1122 * doesn't involve actually writing to it before this point. */
1125 struct sk_buff *next_skb = skb->next;
1128 orig_len += build_packet(vport, mutable, &iph, rt, max_headroom, mtu, skb);
1137 vport_record_error(vport, VPORT_E_TX_ERROR);
1142 static struct net_protocol gre_protocol_handlers = {
1144 .err_handler = gre_err,
1147 static int gre_init(void)
1151 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
1153 printk(KERN_WARNING "openvswitch: cannot register gre protocol handler\n");
1158 static void gre_exit(void)
1160 tbl_destroy(port_table, NULL);
1161 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
1164 static int set_config(const struct vport *cur_vport,
1165 struct mutable_config *mutable, const void __user *uconfig)
1167 const struct vport *old_vport;
1168 const struct mutable_config *old_mutable;
1171 if (copy_from_user(&mutable->port_config, uconfig, sizeof(struct gre_port_config)))
1174 if (mutable->port_config.daddr == 0)
1177 if (mutable->port_config.flags & GRE_F_IN_KEY_MATCH) {
1178 port_type = FIND_PORT_MATCH;
1179 mutable->port_config.in_key = 0;
1181 port_type = FIND_PORT_KEY;
1183 old_vport = find_port(mutable->port_config.saddr,
1184 mutable->port_config.daddr,
1185 mutable->port_config.in_key, port_type,
1188 if (old_vport && old_vport != cur_vport)
1191 if (mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
1192 mutable->port_config.out_key = 0;
1194 mutable->tunnel_hlen = sizeof(struct iphdr) + GRE_HEADER_SECTION;
1196 if (mutable->port_config.flags & GRE_F_OUT_CSUM)
1197 mutable->tunnel_hlen += GRE_HEADER_SECTION;
1199 if (mutable->port_config.out_key ||
1200 mutable->port_config.flags & GRE_F_OUT_KEY_ACTION)
1201 mutable->tunnel_hlen += GRE_HEADER_SECTION;
1206 static struct vport *gre_create(const char *name, const void __user *config)
1208 struct vport *vport;
1209 struct gre_vport *gre_vport;
1212 vport = vport_alloc(sizeof(struct gre_vport), &gre_vport_ops);
1213 if (IS_ERR(vport)) {
1214 err = PTR_ERR(vport);
1218 gre_vport = gre_vport_priv(vport);
1220 strcpy(gre_vport->name, name);
1222 gre_vport->mutable = kmalloc(sizeof(struct mutable_config), GFP_KERNEL);
1223 if (!gre_vport->mutable) {
1225 goto error_free_vport;
1228 vport_gen_rand_ether_addr(gre_vport->mutable->eth_addr);
1229 gre_vport->mutable->mtu = ETH_DATA_LEN;
1231 err = set_config(NULL, gre_vport->mutable, config);
1233 goto error_free_mutable;
1235 err = add_port(vport);
1237 goto error_free_mutable;
1242 kfree(gre_vport->mutable);
1246 return ERR_PTR(err);
1249 static int gre_modify(struct vport *vport, const void __user *config)
1251 struct gre_vport *gre_vport = gre_vport_priv(vport);
1252 struct mutable_config *mutable;
1254 int update_hash = 0;
1256 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1262 err = set_config(vport, mutable, config);
1266 /* Only remove the port from the hash table if something that would
1267 * affect the lookup has changed. */
1268 if (gre_vport->mutable->port_config.saddr != mutable->port_config.saddr ||
1269 gre_vport->mutable->port_config.daddr != mutable->port_config.daddr ||
1270 gre_vport->mutable->port_config.in_key != mutable->port_config.in_key ||
1271 (gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH) !=
1272 (mutable->port_config.flags & GRE_F_IN_KEY_MATCH))
1276 /* This update is not atomic but the lookup uses the config, which
1277 * serves as an inherent double check. */
1279 err = del_port(vport);
1284 assign_config_rcu(vport, mutable);
1287 err = add_port(vport);
1300 static void free_port(struct rcu_head *rcu)
1302 struct gre_vport *gre_vport = container_of(rcu, struct gre_vport, rcu);
1304 kfree(gre_vport->mutable);
1305 vport_free(gre_vport_to_vport(gre_vport));
1308 static int gre_destroy(struct vport *vport)
1310 struct gre_vport *gre_vport = gre_vport_priv(vport);
1312 const struct mutable_config *old_mutable;
1314 /* Do a hash table lookup to make sure that the port exists. It should
1315 * exist but might not if a modify failed earlier. */
1316 if (gre_vport->mutable->port_config.flags & GRE_F_IN_KEY_MATCH)
1317 port_type = FIND_PORT_MATCH;
1319 port_type = FIND_PORT_KEY;
1321 if (vport == find_port(gre_vport->mutable->port_config.saddr,
1322 gre_vport->mutable->port_config.daddr,
1323 gre_vport->mutable->port_config.in_key, port_type, &old_mutable))
1326 call_rcu(&gre_vport->rcu, free_port);
1331 static int gre_set_mtu(struct vport *vport, int mtu)
1333 struct gre_vport *gre_vport = gre_vport_priv(vport);
1334 struct mutable_config *mutable;
1336 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1341 assign_config_rcu(vport, mutable);
1346 static int gre_set_addr(struct vport *vport, const unsigned char *addr)
1348 struct gre_vport *gre_vport = gre_vport_priv(vport);
1349 struct mutable_config *mutable;
1351 mutable = kmemdup(gre_vport->mutable, sizeof(struct mutable_config), GFP_KERNEL);
1355 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1356 assign_config_rcu(vport, mutable);
1362 static const char *gre_get_name(const struct vport *vport)
1364 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1365 return gre_vport->name;
1368 static const unsigned char *gre_get_addr(const struct vport *vport)
1370 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1371 return rcu_dereference(gre_vport->mutable)->eth_addr;
1374 static int gre_get_mtu(const struct vport *vport)
1376 const struct gre_vport *gre_vport = gre_vport_priv(vport);
1377 return rcu_dereference(gre_vport->mutable)->mtu;
1380 struct vport_ops gre_vport_ops = {
1382 .flags = VPORT_F_GEN_STATS | VPORT_F_TUN_ID,
1385 .create = gre_create,
1386 .modify = gre_modify,
1387 .destroy = gre_destroy,
1388 .set_mtu = gre_set_mtu,
1389 .set_addr = gre_set_addr,
1390 .get_name = gre_get_name,
1391 .get_addr = gre_get_addr,
1392 .get_dev_flags = vport_gen_get_dev_flags,
1393 .is_running = vport_gen_is_running,
1394 .get_operstate = vport_gen_get_operstate,
1395 .get_mtu = gre_get_mtu,