2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/if_arp.h>
22 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/igmp.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33 #include <linux/workqueue.h>
34 #include <linux/rculist.h>
36 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
41 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
44 #include <net/route.h>
52 #include "vport-internal_dev.h"
54 #define PORT_TABLE_SIZE 1024
56 static struct hlist_head *port_table __read_mostly;
59 * These are just used as an optimization: they don't require any kind of
60 * synchronization because we could have just as easily read the value before
61 * the port change happened.
63 static unsigned int key_local_remote_ports __read_mostly;
64 static unsigned int key_remote_ports __read_mostly;
65 static unsigned int key_multicast_ports __read_mostly;
66 static unsigned int local_remote_ports __read_mostly;
67 static unsigned int remote_ports __read_mostly;
68 static unsigned int null_ports __read_mostly;
69 static unsigned int multicast_ports __read_mostly;
71 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
72 #define rt_dst(rt) (rt->dst)
74 #define rt_dst(rt) (rt->u.dst)
77 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
79 return vport_from_priv(tnl_vport);
82 static void free_config_rcu(struct rcu_head *rcu)
84 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
88 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
89 * within an RCU callback. Fortunately this part doesn't require waiting for
90 * an RCU grace period.
92 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
95 if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
96 struct in_device *in_dev;
97 in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
99 ip_mc_dec_group(in_dev, mutable->key.daddr);
103 static void assign_config_rcu(struct vport *vport,
104 struct tnl_mutable_config *new_config)
106 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
107 struct tnl_mutable_config *old_config;
109 old_config = rtnl_dereference(tnl_vport->mutable);
110 rcu_assign_pointer(tnl_vport->mutable, new_config);
112 free_mutable_rtnl(old_config);
113 call_rcu(&old_config->rcu, free_config_rcu);
116 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
118 bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
120 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
121 if (mutable->key.saddr)
122 return &local_remote_ports;
123 else if (is_multicast)
124 return &multicast_ports;
126 return &remote_ports;
128 if (mutable->key.saddr)
129 return &key_local_remote_ports;
130 else if (is_multicast)
131 return &key_multicast_ports;
132 else if (mutable->key.daddr)
133 return &key_remote_ports;
139 static u32 port_hash(const struct port_lookup_key *key)
141 return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
144 static struct hlist_head *find_bucket(u32 hash)
146 return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
149 static void port_table_add_port(struct vport *vport)
151 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
152 const struct tnl_mutable_config *mutable;
155 mutable = rtnl_dereference(tnl_vport->mutable);
156 hash = port_hash(&mutable->key);
157 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
159 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
162 static void port_table_move_port(struct vport *vport,
163 struct tnl_mutable_config *new_mutable)
165 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
168 hash = port_hash(&new_mutable->key);
169 hlist_del_init_rcu(&tnl_vport->hash_node);
170 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
172 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
173 assign_config_rcu(vport, new_mutable);
174 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
177 static void port_table_remove_port(struct vport *vport)
179 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
181 hlist_del_init_rcu(&tnl_vport->hash_node);
183 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
186 static struct vport *port_table_lookup(struct port_lookup_key *key,
187 const struct tnl_mutable_config **pmutable)
189 struct hlist_node *n;
190 struct hlist_head *bucket;
191 u32 hash = port_hash(key);
192 struct tnl_vport *tnl_vport;
194 bucket = find_bucket(hash);
196 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
197 struct tnl_mutable_config *mutable;
199 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
200 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
202 return tnl_vport_to_vport(tnl_vport);
209 struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
210 __be64 key, int tunnel_type,
211 const struct tnl_mutable_config **mutable)
213 struct port_lookup_key lookup;
215 bool is_multicast = ipv4_is_multicast(saddr);
217 port_key_set_net(&lookup, net);
218 lookup.saddr = saddr;
219 lookup.daddr = daddr;
221 /* First try for exact match on in_key. */
223 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
224 if (!is_multicast && key_local_remote_ports) {
225 vport = port_table_lookup(&lookup, mutable);
229 if (key_remote_ports) {
231 vport = port_table_lookup(&lookup, mutable);
235 lookup.saddr = saddr;
238 /* Then try matches that wildcard in_key. */
240 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
241 if (!is_multicast && local_remote_ports) {
242 vport = port_table_lookup(&lookup, mutable);
248 vport = port_table_lookup(&lookup, mutable);
255 lookup.daddr = saddr;
256 if (key_multicast_ports) {
257 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
259 vport = port_table_lookup(&lookup, mutable);
263 if (multicast_ports) {
264 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
266 vport = port_table_lookup(&lookup, mutable);
276 lookup.tunnel_type = tunnel_type;
277 vport = port_table_lookup(&lookup, mutable);
284 static void ecn_decapsulate(struct sk_buff *skb)
286 if (unlikely(INET_ECN_is_ce(OVS_CB(skb)->tun_key->ipv4_tos))) {
287 __be16 protocol = skb->protocol;
289 skb_set_network_header(skb, ETH_HLEN);
291 if (protocol == htons(ETH_P_8021Q)) {
292 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
295 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
296 skb_set_network_header(skb, VLAN_ETH_HLEN);
299 if (protocol == htons(ETH_P_IP)) {
300 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
301 + sizeof(struct iphdr))))
304 IP_ECN_set_ce(ip_hdr(skb));
306 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
307 else if (protocol == htons(ETH_P_IPV6)) {
308 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
309 + sizeof(struct ipv6hdr))))
312 IP6_ECN_set_ce(ipv6_hdr(skb));
319 * ovs_tnl_rcv - ingress point for generic tunnel code
321 * @vport: port this packet was received on
322 * @skb: received packet
323 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
325 * Must be called with rcu_read_lock.
327 * Packets received by this function are in the following state:
328 * - skb->data points to the inner Ethernet header.
329 * - The inner Ethernet header is in the linear data area.
330 * - skb->csum does not include the inner Ethernet header.
331 * - The layer pointers are undefined.
333 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
337 skb_reset_mac_header(skb);
340 if (likely(ntohs(eh->h_proto) >= 1536))
341 skb->protocol = eh->h_proto;
343 skb->protocol = htons(ETH_P_802_2);
347 skb_clear_rxhash(skb);
350 ecn_decapsulate(skb);
351 vlan_set_tci(skb, 0);
353 if (unlikely(compute_ip_summed(skb, false))) {
358 ovs_vport_receive(vport, skb);
361 static bool check_ipv4_address(__be32 addr)
363 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
364 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
370 static bool ipv4_should_icmp(struct sk_buff *skb)
372 struct iphdr *old_iph = ip_hdr(skb);
374 /* Don't respond to L2 broadcast. */
375 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
378 /* Don't respond to L3 broadcast or invalid addresses. */
379 if (!check_ipv4_address(old_iph->daddr) ||
380 !check_ipv4_address(old_iph->saddr))
383 /* Only respond to the first fragment. */
384 if (old_iph->frag_off & htons(IP_OFFSET))
387 /* Don't respond to ICMP error messages. */
388 if (old_iph->protocol == IPPROTO_ICMP) {
389 u8 icmp_type, *icmp_typep;
391 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
392 (old_iph->ihl << 2) +
393 offsetof(struct icmphdr, type) -
394 skb->data, sizeof(icmp_type),
400 if (*icmp_typep > NR_ICMP_TYPES
401 || (*icmp_typep <= ICMP_PARAMETERPROB
402 && *icmp_typep != ICMP_ECHOREPLY
403 && *icmp_typep != ICMP_ECHO))
410 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
411 unsigned int mtu, unsigned int payload_length)
413 struct iphdr *iph, *old_iph = ip_hdr(skb);
414 struct icmphdr *icmph;
417 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
418 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
419 payload = skb_put(nskb, payload_length);
423 iph->ihl = sizeof(struct iphdr) >> 2;
424 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
425 IPTOS_PREC_INTERNETCONTROL;
426 iph->tot_len = htons(sizeof(struct iphdr)
427 + sizeof(struct icmphdr)
429 get_random_bytes(&iph->id, sizeof(iph->id));
432 iph->protocol = IPPROTO_ICMP;
433 iph->daddr = old_iph->saddr;
434 iph->saddr = old_iph->daddr;
439 icmph->type = ICMP_DEST_UNREACH;
440 icmph->code = ICMP_FRAG_NEEDED;
441 icmph->un.gateway = htonl(mtu);
444 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
445 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
446 payload, payload_length,
448 icmph->checksum = csum_fold(nskb->csum);
451 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
452 static bool ipv6_should_icmp(struct sk_buff *skb)
454 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
456 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
457 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
460 /* Check source address is valid. */
461 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
462 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
465 /* Don't reply to unspecified addresses. */
466 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
469 /* Don't respond to ICMP error messages. */
470 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
474 if (nexthdr == NEXTHDR_ICMP) {
475 u8 icmp_type, *icmp_typep;
477 icmp_typep = skb_header_pointer(skb, payload_off +
478 offsetof(struct icmp6hdr,
480 sizeof(icmp_type), &icmp_type);
482 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
489 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
490 unsigned int mtu, unsigned int payload_length)
492 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
493 struct icmp6hdr *icmp6h;
496 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
497 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
498 payload = skb_put(nskb, payload_length);
503 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
504 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
506 ipv6h->nexthdr = NEXTHDR_ICMP;
507 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
508 ipv6h->daddr = old_ipv6h->saddr;
509 ipv6h->saddr = old_ipv6h->daddr;
512 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
513 icmp6h->icmp6_code = 0;
514 icmp6h->icmp6_cksum = 0;
515 icmp6h->icmp6_mtu = htonl(mtu);
517 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
518 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
519 payload, payload_length,
521 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
522 sizeof(struct icmp6hdr)
524 ipv6h->nexthdr, nskb->csum);
528 bool ovs_tnl_frag_needed(struct vport *vport,
529 const struct tnl_mutable_config *mutable,
530 struct sk_buff *skb, unsigned int mtu)
532 unsigned int eth_hdr_len = ETH_HLEN;
533 unsigned int total_length = 0, header_length = 0, payload_length;
534 struct ethhdr *eh, *old_eh = eth_hdr(skb);
535 struct sk_buff *nskb;
538 if (skb->protocol == htons(ETH_P_IP)) {
539 if (mtu < IP_MIN_MTU)
542 if (!ipv4_should_icmp(skb))
545 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
546 else if (skb->protocol == htons(ETH_P_IPV6)) {
547 if (mtu < IPV6_MIN_MTU)
551 * In theory we should do PMTUD on IPv6 multicast messages but
552 * we don't have an address to send from so just fragment.
554 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
557 if (!ipv6_should_icmp(skb))
565 if (old_eh->h_proto == htons(ETH_P_8021Q))
566 eth_hdr_len = VLAN_ETH_HLEN;
568 payload_length = skb->len - eth_hdr_len;
569 if (skb->protocol == htons(ETH_P_IP)) {
570 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
571 total_length = min_t(unsigned int, header_length +
572 payload_length, 576);
574 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
576 header_length = sizeof(struct ipv6hdr) +
577 sizeof(struct icmp6hdr);
578 total_length = min_t(unsigned int, header_length +
579 payload_length, IPV6_MIN_MTU);
583 payload_length = total_length - header_length;
585 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
590 skb_reserve(nskb, NET_IP_ALIGN);
592 /* Ethernet / VLAN */
593 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
594 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
595 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
596 nskb->protocol = eh->h_proto = old_eh->h_proto;
597 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
598 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
600 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
601 vh->h_vlan_encapsulated_proto = skb->protocol;
603 vlan_set_tci(nskb, vlan_get_tci(skb));
604 skb_reset_mac_header(nskb);
607 if (skb->protocol == htons(ETH_P_IP))
608 ipv4_build_icmp(skb, nskb, mtu, payload_length);
609 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
611 ipv6_build_icmp(skb, nskb, mtu, payload_length);
614 if (unlikely(compute_ip_summed(nskb, false))) {
619 ovs_vport_receive(vport, nskb);
624 static bool check_mtu(struct sk_buff *skb,
626 const struct tnl_mutable_config *mutable,
627 const struct rtable *rt, __be16 *frag_offp,
634 unsigned int packet_length = skb->len - ETH_HLEN;
636 if (OVS_CB(skb)->tun_key->ipv4_dst) {
639 frag_off = OVS_CB(skb)->tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT ?
642 df_inherit = mutable->flags & TNL_F_DF_INHERIT;
643 pmtud = mutable->flags & TNL_F_PMTUD;
644 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
647 /* Allow for one level of tagging in the packet length. */
648 if (!vlan_tx_tag_present(skb) &&
649 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
650 packet_length -= VLAN_HLEN;
655 /* The tag needs to go in packet regardless of where it
656 * currently is, so subtract it from the MTU.
658 if (vlan_tx_tag_present(skb) ||
659 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
660 vlan_header = VLAN_HLEN;
662 mtu = dst_mtu(&rt_dst(rt))
668 if (skb->protocol == htons(ETH_P_IP)) {
669 struct iphdr *iph = ip_hdr(skb);
672 frag_off = iph->frag_off & htons(IP_DF);
674 if (pmtud && iph->frag_off & htons(IP_DF)) {
675 mtu = max(mtu, IP_MIN_MTU);
677 if (packet_length > mtu &&
678 ovs_tnl_frag_needed(vport, mutable, skb, mtu))
682 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
683 else if (skb->protocol == htons(ETH_P_IPV6)) {
684 /* IPv6 requires end hosts to do fragmentation
685 * if the packet is above the minimum MTU.
687 if (df_inherit && packet_length > IPV6_MIN_MTU)
688 frag_off = htons(IP_DF);
691 mtu = max(mtu, IPV6_MIN_MTU);
693 if (packet_length > mtu &&
694 ovs_tnl_frag_needed(vport, mutable, skb, mtu))
700 *frag_offp = frag_off;
704 static struct rtable *find_route(struct net *net,
705 __be32 *saddr, __be32 daddr, u8 ipproto,
709 /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
710 * router expect RT_TOS bits only. */
712 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
713 struct flowi fl = { .nl_u = { .ip4_u = {
716 .tos = RT_TOS(tos) } },
719 if (unlikely(ip_route_output_key(net, &rt, &fl)))
720 return ERR_PTR(-EADDRNOTAVAIL);
721 *saddr = fl.nl_u.ip4_u.saddr;
724 struct flowi4 fl = { .daddr = daddr,
726 .flowi4_tos = RT_TOS(tos),
727 .flowi4_proto = ipproto };
729 rt = ip_route_output_key(net, &fl);
735 static bool need_linearize(const struct sk_buff *skb)
739 if (unlikely(skb_shinfo(skb)->frag_list))
743 * Generally speaking we should linearize if there are paged frags.
744 * However, if all of the refcounts are 1 we know nobody else can
745 * change them from underneath us and we can skip the linearization.
747 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
748 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
754 static struct sk_buff *handle_offloads(struct sk_buff *skb,
755 const struct tnl_mutable_config *mutable,
756 const struct rtable *rt,
762 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
764 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
766 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
767 int head_delta = SKB_DATA_ALIGN(min_headroom -
770 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
776 forward_ip_summed(skb, true);
778 if (skb_is_gso(skb)) {
779 struct sk_buff *nskb;
781 nskb = skb_gso_segment(skb, 0);
790 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
791 /* Pages aren't locked and could change at any time.
792 * If this happens after we compute the checksum, the
793 * checksum will be wrong. We linearize now to avoid
796 if (unlikely(need_linearize(skb))) {
797 err = __skb_linearize(skb);
802 err = skb_checksum_help(skb);
807 set_ip_summed(skb, OVS_CSUM_NONE);
817 static int send_frags(struct sk_buff *skb,
824 struct sk_buff *next = skb->next;
825 int frag_len = skb->len - tunnel_hlen;
829 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
831 err = ip_local_out(skb);
833 if (unlikely(net_xmit_eval(err)))
835 sent_len += frag_len;
842 * There's no point in continuing to send fragments once one has been
843 * dropped so just free the rest. This may help improve the congestion
844 * that caused the first packet to be dropped.
846 ovs_tnl_free_linked_skbs(skb);
850 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
852 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
853 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
854 enum vport_err_type err = VPORT_E_TX_ERROR;
856 struct ovs_key_ipv4_tunnel tun_key;
865 /* Validate the protocol headers before we try to use them. */
866 if (skb->protocol == htons(ETH_P_8021Q) &&
867 !vlan_tx_tag_present(skb)) {
868 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
871 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
872 skb_set_network_header(skb, VLAN_ETH_HLEN);
875 if (skb->protocol == htons(ETH_P_IP)) {
876 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
877 + sizeof(struct iphdr))))
880 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
881 else if (skb->protocol == htons(ETH_P_IPV6)) {
882 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
883 + sizeof(struct ipv6hdr))))
888 /* If OVS_CB(skb)->tun_key is NULL, point it at the local tun_key here,
891 if (!OVS_CB(skb)->tun_key) {
892 memset(&tun_key, 0, sizeof(tun_key));
893 OVS_CB(skb)->tun_key = &tun_key;
896 tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, OVS_CB(skb)->tun_key);
897 if (unlikely(tunnel_hlen < 0)) {
898 err = VPORT_E_TX_DROPPED;
901 tunnel_hlen += sizeof(struct iphdr);
903 if (OVS_CB(skb)->tun_key->ipv4_dst) {
904 daddr = OVS_CB(skb)->tun_key->ipv4_dst;
905 saddr = OVS_CB(skb)->tun_key->ipv4_src;
906 tos = OVS_CB(skb)->tun_key->ipv4_tos;
907 ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
910 daddr = mutable->key.daddr;
911 saddr = mutable->key.saddr;
913 if (unlikely(!daddr)) {
914 /* Trying to sent packet from Null-port without
915 * tunnel info? Drop this packet. */
916 err = VPORT_E_TX_DROPPED;
921 if (skb->protocol == htons(ETH_P_IP))
922 inner_tos = ip_hdr(skb)->tos;
923 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
924 else if (skb->protocol == htons(ETH_P_IPV6))
925 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
930 if (mutable->flags & TNL_F_TOS_INHERIT)
935 tos = INET_ECN_encapsulate(tos, inner_tos);
939 if (mutable->flags & TNL_F_TTL_INHERIT) {
940 if (skb->protocol == htons(ETH_P_IP))
941 ttl = ip_hdr(skb)->ttl;
942 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
943 else if (skb->protocol == htons(ETH_P_IPV6))
944 ttl = ipv6_hdr(skb)->hop_limit;
951 rt = find_route(port_key_get_net(&mutable->key), &saddr, daddr,
952 tnl_vport->tnl_ops->ipproto, tos);
960 skb_clear_rxhash(skb);
963 skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
970 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off, tunnel_hlen))) {
971 err = VPORT_E_TX_DROPPED;
976 if (!OVS_CB(skb)->tun_key->ipv4_dst) {
977 if (!(mutable->flags & TNL_F_TTL_INHERIT)) {
979 ttl = ip4_dst_hoplimit(&rt_dst(rt));
985 struct sk_buff *next_skb = skb->next;
988 if (unlikely(vlan_deaccel_tag(skb)))
991 skb_push(skb, tunnel_hlen);
992 skb_reset_network_header(skb);
993 skb_set_transport_header(skb, sizeof(struct iphdr));
996 skb_dst_set(skb, dst_clone(&rt_dst(rt)));
998 skb_dst_set(skb, &rt_dst(rt));
1000 /* Push IP header. */
1003 iph->ihl = sizeof(struct iphdr) >> 2;
1004 iph->protocol = tnl_vport->tnl_ops->ipproto;
1009 iph->frag_off = frag_off;
1010 ip_select_ident(iph, &rt_dst(rt), NULL);
1012 /* Push Tunnel header. */
1013 skb = tnl_vport->tnl_ops->build_header(vport, mutable,
1014 &rt_dst(rt), skb, tunnel_hlen);
1018 sent_len += send_frags(skb, tunnel_hlen);
1024 if (unlikely(sent_len == 0))
1025 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
1032 ovs_tnl_free_linked_skbs(skb);
1033 ovs_vport_record_error(vport, err);
1037 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1038 [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1039 [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1040 [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1041 [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1042 [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1043 [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1044 [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1045 [OVS_TUNNEL_ATTR_DST_PORT] = { .type = NLA_U16 },
1048 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1050 static int tnl_set_config(struct net *net, struct nlattr *options,
1051 const struct tnl_ops *tnl_ops,
1052 const struct vport *cur_vport,
1053 struct tnl_mutable_config *mutable)
1055 const struct vport *old_vport;
1056 const struct tnl_mutable_config *old_mutable;
1057 struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1060 port_key_set_net(&mutable->key, net);
1061 mutable->key.tunnel_type = tnl_ops->tunnel_type;
1065 err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1069 /* Process attributes possibly useful for null_ports first */
1070 if (a[OVS_TUNNEL_ATTR_DST_PORT])
1072 htons(nla_get_u16(a[OVS_TUNNEL_ATTR_DST_PORT]));
1074 if (a[OVS_TUNNEL_ATTR_DST_IPV4])
1075 mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1077 /* Skip the rest if configuring a null_port */
1078 if (!mutable->key.daddr)
1081 if (a[OVS_TUNNEL_ATTR_FLAGS])
1082 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS])
1085 if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1086 if (ipv4_is_multicast(mutable->key.daddr))
1088 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1091 if (a[OVS_TUNNEL_ATTR_TOS]) {
1092 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1093 /* Reject ToS config with ECN bits set. */
1094 if (mutable->tos & INET_ECN_MASK)
1098 if (a[OVS_TUNNEL_ATTR_TTL])
1099 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1101 if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1102 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1103 mutable->flags |= TNL_F_IN_KEY_MATCH;
1105 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1106 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1109 if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1110 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1112 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1115 if (ipv4_is_multicast(mutable->key.daddr)) {
1116 struct net_device *dev;
1118 __be32 saddr = mutable->key.saddr;
1120 rt = find_route(port_key_get_net(&mutable->key),
1121 &saddr, mutable->key.daddr,
1122 tnl_ops->ipproto, mutable->tos);
1124 return -EADDRNOTAVAIL;
1125 dev = rt_dst(rt).dev;
1127 if (__in_dev_get_rtnl(dev) == NULL)
1128 return -EADDRNOTAVAIL;
1129 mutable->mlink = dev->ifindex;
1130 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1134 old_vport = port_table_lookup(&mutable->key, &old_mutable);
1135 if (old_vport && old_vport != cur_vport)
1141 struct vport *ovs_tnl_create(const struct vport_parms *parms,
1142 const struct vport_ops *vport_ops,
1143 const struct tnl_ops *tnl_ops)
1145 struct vport *vport;
1146 struct tnl_vport *tnl_vport;
1147 struct tnl_mutable_config *mutable;
1148 int initial_frag_id;
1151 vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1152 if (IS_ERR(vport)) {
1153 err = PTR_ERR(vport);
1157 tnl_vport = tnl_vport_priv(vport);
1159 strcpy(tnl_vport->name, parms->name);
1160 tnl_vport->tnl_ops = tnl_ops;
1162 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1165 goto error_free_vport;
1168 random_ether_addr(mutable->eth_addr);
1170 get_random_bytes(&initial_frag_id, sizeof(int));
1171 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1173 err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
1176 goto error_free_mutable;
1178 rcu_assign_pointer(tnl_vport->mutable, mutable);
1180 port_table_add_port(vport);
1184 free_mutable_rtnl(mutable);
1187 ovs_vport_free(vport);
1189 return ERR_PTR(err);
1192 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
1194 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1195 const struct tnl_mutable_config *old_mutable;
1196 struct tnl_mutable_config *mutable;
1199 old_mutable = rtnl_dereference(tnl_vport->mutable);
1200 if (!old_mutable->key.daddr)
1203 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1209 /* Copy fields whose values should be retained. */
1210 mutable->seq = old_mutable->seq + 1;
1211 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1213 /* Parse the others configured by userspace. */
1214 err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
1219 if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1220 port_table_move_port(vport, mutable);
1222 assign_config_rcu(vport, mutable);
1227 free_mutable_rtnl(mutable);
1233 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1235 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1236 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1238 if (mutable->dst_port && nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT,
1239 ntohs(mutable->dst_port)))
1240 goto nla_put_failure;
1242 /* Skip the rest for null_ports */
1243 if (!mutable->key.daddr)
1246 if (nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
1247 goto nla_put_failure;
1248 if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
1249 mutable->flags & TNL_F_PUBLIC))
1250 goto nla_put_failure;
1251 if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
1252 nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
1253 goto nla_put_failure;
1254 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
1255 nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
1256 goto nla_put_failure;
1257 if (mutable->key.saddr &&
1258 nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
1259 goto nla_put_failure;
1260 if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
1261 goto nla_put_failure;
1262 if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
1263 goto nla_put_failure;
1271 static void free_port_rcu(struct rcu_head *rcu)
1273 struct tnl_vport *tnl_vport = container_of(rcu,
1274 struct tnl_vport, rcu);
1276 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1277 ovs_vport_free(tnl_vport_to_vport(tnl_vport));
1280 void ovs_tnl_destroy(struct vport *vport)
1282 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1283 struct tnl_mutable_config *mutable;
1285 mutable = rtnl_dereference(tnl_vport->mutable);
1286 port_table_remove_port(vport);
1287 free_mutable_rtnl(mutable);
1288 call_rcu(&tnl_vport->rcu, free_port_rcu);
1291 int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
1293 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1294 struct tnl_mutable_config *old_mutable, *mutable;
1296 old_mutable = rtnl_dereference(tnl_vport->mutable);
1297 mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1301 old_mutable->mlink = 0;
1303 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1304 assign_config_rcu(vport, mutable);
1309 const char *ovs_tnl_get_name(const struct vport *vport)
1311 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1312 return tnl_vport->name;
1315 const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
1317 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1318 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1321 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
1324 struct sk_buff *next = skb->next;
1330 int ovs_tnl_init(void)
1334 port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1339 for (i = 0; i < PORT_TABLE_SIZE; i++)
1340 INIT_HLIST_HEAD(&port_table[i]);
1345 void ovs_tnl_exit(void)