2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/if_arp.h>
22 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/igmp.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33 #include <linux/workqueue.h>
34 #include <linux/rculist.h>
36 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
41 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
44 #include <net/route.h>
52 #include "vport-internal_dev.h"
54 #define PORT_TABLE_SIZE 1024
56 static struct hlist_head *port_table __read_mostly;
59 * These are just used as an optimization: they don't require any kind of
60 * synchronization because we could have just as easily read the value before
61 * the port change happened.
63 static unsigned int key_local_remote_ports __read_mostly;
64 static unsigned int key_remote_ports __read_mostly;
65 static unsigned int key_multicast_ports __read_mostly;
66 static unsigned int local_remote_ports __read_mostly;
67 static unsigned int remote_ports __read_mostly;
68 static unsigned int null_ports __read_mostly;
69 static unsigned int multicast_ports __read_mostly;
71 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
72 #define rt_dst(rt) (rt->dst)
74 #define rt_dst(rt) (rt->u.dst)
77 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
79 return vport_from_priv(tnl_vport);
82 static void free_config_rcu(struct rcu_head *rcu)
84 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
88 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
89 * within an RCU callback. Fortunately this part doesn't require waiting for
90 * an RCU grace period.
92 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
95 if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
96 struct in_device *in_dev;
97 in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
99 ip_mc_dec_group(in_dev, mutable->key.daddr);
103 static void assign_config_rcu(struct vport *vport,
104 struct tnl_mutable_config *new_config)
106 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
107 struct tnl_mutable_config *old_config;
109 old_config = rtnl_dereference(tnl_vport->mutable);
110 rcu_assign_pointer(tnl_vport->mutable, new_config);
112 free_mutable_rtnl(old_config);
113 call_rcu(&old_config->rcu, free_config_rcu);
116 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
118 bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
120 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
121 if (mutable->key.saddr)
122 return &local_remote_ports;
123 else if (is_multicast)
124 return &multicast_ports;
126 return &remote_ports;
128 if (mutable->key.saddr)
129 return &key_local_remote_ports;
130 else if (is_multicast)
131 return &key_multicast_ports;
132 else if (mutable->key.daddr)
133 return &key_remote_ports;
139 static u32 port_hash(const struct port_lookup_key *key)
141 return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
144 static struct hlist_head *find_bucket(u32 hash)
146 return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
149 static void port_table_add_port(struct vport *vport)
151 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
152 const struct tnl_mutable_config *mutable;
155 mutable = rtnl_dereference(tnl_vport->mutable);
156 hash = port_hash(&mutable->key);
157 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
159 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
162 static void port_table_move_port(struct vport *vport,
163 struct tnl_mutable_config *new_mutable)
165 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
168 hash = port_hash(&new_mutable->key);
169 hlist_del_init_rcu(&tnl_vport->hash_node);
170 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
172 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
173 assign_config_rcu(vport, new_mutable);
174 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
177 static void port_table_remove_port(struct vport *vport)
179 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
181 hlist_del_init_rcu(&tnl_vport->hash_node);
183 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
186 static struct vport *port_table_lookup(struct port_lookup_key *key,
187 const struct tnl_mutable_config **pmutable)
189 struct hlist_node *n;
190 struct hlist_head *bucket;
191 u32 hash = port_hash(key);
192 struct tnl_vport *tnl_vport;
194 bucket = find_bucket(hash);
196 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
197 struct tnl_mutable_config *mutable;
199 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
200 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
202 return tnl_vport_to_vport(tnl_vport);
209 struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
210 __be64 key, int tunnel_type,
211 const struct tnl_mutable_config **mutable)
213 struct port_lookup_key lookup;
215 bool is_multicast = ipv4_is_multicast(saddr);
217 port_key_set_net(&lookup, net);
218 lookup.saddr = saddr;
219 lookup.daddr = daddr;
221 /* First try for exact match on in_key. */
223 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
224 if (!is_multicast && key_local_remote_ports) {
225 vport = port_table_lookup(&lookup, mutable);
229 if (key_remote_ports) {
231 vport = port_table_lookup(&lookup, mutable);
235 lookup.saddr = saddr;
238 /* Then try matches that wildcard in_key. */
240 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
241 if (!is_multicast && local_remote_ports) {
242 vport = port_table_lookup(&lookup, mutable);
248 vport = port_table_lookup(&lookup, mutable);
255 lookup.daddr = saddr;
256 if (key_multicast_ports) {
257 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
259 vport = port_table_lookup(&lookup, mutable);
263 if (multicast_ports) {
264 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
266 vport = port_table_lookup(&lookup, mutable);
276 lookup.tunnel_type = tunnel_type;
277 vport = port_table_lookup(&lookup, mutable);
284 static void ecn_decapsulate(struct sk_buff *skb)
286 if (unlikely(INET_ECN_is_ce(OVS_CB(skb)->tun_key->ipv4_tos))) {
287 __be16 protocol = skb->protocol;
289 skb_set_network_header(skb, ETH_HLEN);
291 if (protocol == htons(ETH_P_8021Q)) {
292 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
295 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
296 skb_set_network_header(skb, VLAN_ETH_HLEN);
299 if (protocol == htons(ETH_P_IP)) {
300 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
301 + sizeof(struct iphdr))))
304 IP_ECN_set_ce(ip_hdr(skb));
306 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
307 else if (protocol == htons(ETH_P_IPV6)) {
308 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
309 + sizeof(struct ipv6hdr))))
312 IP6_ECN_set_ce(ipv6_hdr(skb));
319 * ovs_tnl_rcv - ingress point for generic tunnel code
321 * @vport: port this packet was received on
322 * @skb: received packet
323 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
325 * Must be called with rcu_read_lock.
327 * Packets received by this function are in the following state:
328 * - skb->data points to the inner Ethernet header.
329 * - The inner Ethernet header is in the linear data area.
330 * - skb->csum does not include the inner Ethernet header.
331 * - The layer pointers are undefined.
333 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
337 skb_reset_mac_header(skb);
340 if (likely(ntohs(eh->h_proto) >= 1536))
341 skb->protocol = eh->h_proto;
343 skb->protocol = htons(ETH_P_802_2);
347 skb_clear_rxhash(skb);
350 ecn_decapsulate(skb);
351 vlan_set_tci(skb, 0);
353 if (unlikely(compute_ip_summed(skb, false))) {
358 ovs_vport_receive(vport, skb);
361 static struct rtable *find_route(struct net *net,
362 __be32 *saddr, __be32 daddr, u8 ipproto,
366 /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
367 * router expect RT_TOS bits only. */
369 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
370 struct flowi fl = { .nl_u = { .ip4_u = {
373 .tos = RT_TOS(tos) } },
376 if (unlikely(ip_route_output_key(net, &rt, &fl)))
377 return ERR_PTR(-EADDRNOTAVAIL);
378 *saddr = fl.nl_u.ip4_u.saddr;
381 struct flowi4 fl = { .daddr = daddr,
383 .flowi4_tos = RT_TOS(tos),
384 .flowi4_proto = ipproto };
386 rt = ip_route_output_key(net, &fl);
392 static bool need_linearize(const struct sk_buff *skb)
396 if (unlikely(skb_shinfo(skb)->frag_list))
400 * Generally speaking we should linearize if there are paged frags.
401 * However, if all of the refcounts are 1 we know nobody else can
402 * change them from underneath us and we can skip the linearization.
404 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
405 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
411 static struct sk_buff *handle_offloads(struct sk_buff *skb,
412 const struct tnl_mutable_config *mutable,
413 const struct rtable *rt,
419 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
421 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
423 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
424 int head_delta = SKB_DATA_ALIGN(min_headroom -
427 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
433 forward_ip_summed(skb, true);
435 if (skb_is_gso(skb)) {
436 struct sk_buff *nskb;
438 nskb = __skb_gso_segment(skb, 0, false);
447 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
448 /* Pages aren't locked and could change at any time.
449 * If this happens after we compute the checksum, the
450 * checksum will be wrong. We linearize now to avoid
453 if (unlikely(need_linearize(skb))) {
454 err = __skb_linearize(skb);
459 err = skb_checksum_help(skb);
464 set_ip_summed(skb, OVS_CSUM_NONE);
474 static int send_frags(struct sk_buff *skb,
481 struct sk_buff *next = skb->next;
482 int frag_len = skb->len - tunnel_hlen;
486 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
488 err = ip_local_out(skb);
490 if (unlikely(net_xmit_eval(err)))
492 sent_len += frag_len;
499 * There's no point in continuing to send fragments once one has been
500 * dropped so just free the rest. This may help improve the congestion
501 * that caused the first packet to be dropped.
503 ovs_tnl_free_linked_skbs(skb);
507 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
509 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
510 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
511 enum vport_err_type err = VPORT_E_TX_ERROR;
513 struct ovs_key_ipv4_tunnel tun_key;
522 /* Validate the protocol headers before we try to use them. */
523 if (skb->protocol == htons(ETH_P_8021Q) &&
524 !vlan_tx_tag_present(skb)) {
525 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
528 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
529 skb_set_network_header(skb, VLAN_ETH_HLEN);
532 if (skb->protocol == htons(ETH_P_IP)) {
533 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
534 + sizeof(struct iphdr))))
537 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
538 else if (skb->protocol == htons(ETH_P_IPV6)) {
539 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
540 + sizeof(struct ipv6hdr))))
545 /* If OVS_CB(skb)->tun_key is NULL, point it at the local tun_key here,
548 if (!OVS_CB(skb)->tun_key) {
549 memset(&tun_key, 0, sizeof(tun_key));
550 OVS_CB(skb)->tun_key = &tun_key;
553 tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, OVS_CB(skb)->tun_key);
554 if (unlikely(tunnel_hlen < 0)) {
555 err = VPORT_E_TX_DROPPED;
558 tunnel_hlen += sizeof(struct iphdr);
560 if (OVS_CB(skb)->tun_key->ipv4_dst) {
561 daddr = OVS_CB(skb)->tun_key->ipv4_dst;
562 saddr = OVS_CB(skb)->tun_key->ipv4_src;
563 tos = OVS_CB(skb)->tun_key->ipv4_tos;
564 ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
565 frag_off = OVS_CB(skb)->tun_key->tun_flags &
566 OVS_TNL_F_DONT_FRAGMENT ? htons(IP_DF) : 0;
569 daddr = mutable->key.daddr;
570 saddr = mutable->key.saddr;
572 if (unlikely(!daddr)) {
573 /* Trying to sent packet from Null-port without
574 * tunnel info? Drop this packet. */
575 err = VPORT_E_TX_DROPPED;
580 if (skb->protocol == htons(ETH_P_IP))
581 inner_tos = ip_hdr(skb)->tos;
582 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
583 else if (skb->protocol == htons(ETH_P_IPV6))
584 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
589 if (mutable->flags & TNL_F_TOS_INHERIT)
594 tos = INET_ECN_encapsulate(tos, inner_tos);
598 if (mutable->flags & TNL_F_TTL_INHERIT) {
599 if (skb->protocol == htons(ETH_P_IP))
600 ttl = ip_hdr(skb)->ttl;
601 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
602 else if (skb->protocol == htons(ETH_P_IPV6))
603 ttl = ipv6_hdr(skb)->hop_limit;
607 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
611 rt = find_route(port_key_get_net(&mutable->key), &saddr, daddr,
612 tnl_vport->tnl_ops->ipproto, tos);
620 skb_clear_rxhash(skb);
623 skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
630 if (!OVS_CB(skb)->tun_key->ipv4_dst) {
631 if (!(mutable->flags & TNL_F_TTL_INHERIT)) {
633 ttl = ip4_dst_hoplimit(&rt_dst(rt));
639 struct sk_buff *next_skb = skb->next;
642 if (unlikely(vlan_deaccel_tag(skb)))
645 skb_push(skb, tunnel_hlen);
646 skb_reset_network_header(skb);
647 skb_set_transport_header(skb, sizeof(struct iphdr));
650 skb_dst_set(skb, dst_clone(&rt_dst(rt)));
652 skb_dst_set(skb, &rt_dst(rt));
654 /* Push IP header. */
657 iph->ihl = sizeof(struct iphdr) >> 2;
658 iph->protocol = tnl_vport->tnl_ops->ipproto;
663 iph->frag_off = frag_off;
664 ip_select_ident(iph, &rt_dst(rt), NULL);
666 /* Push Tunnel header. */
667 skb = tnl_vport->tnl_ops->build_header(vport, mutable,
668 &rt_dst(rt), skb, tunnel_hlen);
672 sent_len += send_frags(skb, tunnel_hlen);
678 if (unlikely(sent_len == 0))
679 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
686 ovs_tnl_free_linked_skbs(skb);
687 ovs_vport_record_error(vport, err);
691 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
692 [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
693 [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
694 [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
695 [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
696 [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
697 [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
698 [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
699 [OVS_TUNNEL_ATTR_DST_PORT] = { .type = NLA_U16 },
702 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
704 static int tnl_set_config(struct net *net, struct nlattr *options,
705 const struct tnl_ops *tnl_ops,
706 const struct vport *cur_vport,
707 struct tnl_mutable_config *mutable)
709 const struct vport *old_vport;
710 const struct tnl_mutable_config *old_mutable;
711 struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
714 port_key_set_net(&mutable->key, net);
715 mutable->key.tunnel_type = tnl_ops->tunnel_type;
719 err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
723 /* Process attributes possibly useful for null_ports first */
724 if (a[OVS_TUNNEL_ATTR_DST_PORT])
726 htons(nla_get_u16(a[OVS_TUNNEL_ATTR_DST_PORT]));
728 if (a[OVS_TUNNEL_ATTR_DST_IPV4])
729 mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
731 /* Skip the rest if configuring a null_port */
732 if (!mutable->key.daddr)
735 if (a[OVS_TUNNEL_ATTR_FLAGS])
736 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS])
739 if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
740 if (ipv4_is_multicast(mutable->key.daddr))
742 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
745 if (a[OVS_TUNNEL_ATTR_TOS]) {
746 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
747 /* Reject ToS config with ECN bits set. */
748 if (mutable->tos & INET_ECN_MASK)
752 if (a[OVS_TUNNEL_ATTR_TTL])
753 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
755 if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
756 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
757 mutable->flags |= TNL_F_IN_KEY_MATCH;
759 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
760 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
763 if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
764 mutable->flags |= TNL_F_OUT_KEY_ACTION;
766 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
769 if (ipv4_is_multicast(mutable->key.daddr)) {
770 struct net_device *dev;
772 __be32 saddr = mutable->key.saddr;
774 rt = find_route(port_key_get_net(&mutable->key),
775 &saddr, mutable->key.daddr,
776 tnl_ops->ipproto, mutable->tos);
778 return -EADDRNOTAVAIL;
779 dev = rt_dst(rt).dev;
781 if (__in_dev_get_rtnl(dev) == NULL)
782 return -EADDRNOTAVAIL;
783 mutable->mlink = dev->ifindex;
784 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
788 old_vport = port_table_lookup(&mutable->key, &old_mutable);
789 if (old_vport && old_vport != cur_vport)
795 struct vport *ovs_tnl_create(const struct vport_parms *parms,
796 const struct vport_ops *vport_ops,
797 const struct tnl_ops *tnl_ops)
800 struct tnl_vport *tnl_vport;
801 struct tnl_mutable_config *mutable;
804 vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
806 err = PTR_ERR(vport);
810 tnl_vport = tnl_vport_priv(vport);
812 strcpy(tnl_vport->name, parms->name);
813 tnl_vport->tnl_ops = tnl_ops;
815 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
818 goto error_free_vport;
821 err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
824 goto error_free_mutable;
826 rcu_assign_pointer(tnl_vport->mutable, mutable);
828 port_table_add_port(vport);
832 free_mutable_rtnl(mutable);
835 ovs_vport_free(vport);
840 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
842 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
843 const struct tnl_mutable_config *old_mutable;
844 struct tnl_mutable_config *mutable;
847 old_mutable = rtnl_dereference(tnl_vport->mutable);
848 if (!old_mutable->key.daddr)
851 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
857 /* Parse the others configured by userspace. */
858 err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
863 if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
864 port_table_move_port(vport, mutable);
866 assign_config_rcu(vport, mutable);
871 free_mutable_rtnl(mutable);
877 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
879 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
880 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
882 if (mutable->dst_port && nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT,
883 ntohs(mutable->dst_port)))
884 goto nla_put_failure;
886 /* Skip the rest for null_ports */
887 if (!mutable->key.daddr)
890 if (nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
891 goto nla_put_failure;
892 if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
893 mutable->flags & TNL_F_PUBLIC))
894 goto nla_put_failure;
895 if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
896 nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
897 goto nla_put_failure;
898 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
899 nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
900 goto nla_put_failure;
901 if (mutable->key.saddr &&
902 nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
903 goto nla_put_failure;
904 if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
905 goto nla_put_failure;
906 if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
907 goto nla_put_failure;
915 static void free_port_rcu(struct rcu_head *rcu)
917 struct tnl_vport *tnl_vport = container_of(rcu,
918 struct tnl_vport, rcu);
920 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
921 ovs_vport_free(tnl_vport_to_vport(tnl_vport));
924 void ovs_tnl_destroy(struct vport *vport)
926 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
927 struct tnl_mutable_config *mutable;
929 mutable = rtnl_dereference(tnl_vport->mutable);
930 port_table_remove_port(vport);
931 free_mutable_rtnl(mutable);
932 call_rcu(&tnl_vport->rcu, free_port_rcu);
935 const char *ovs_tnl_get_name(const struct vport *vport)
937 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
938 return tnl_vport->name;
941 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
944 struct sk_buff *next = skb->next;
950 int ovs_tnl_init(void)
954 port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
959 for (i = 0; i < PORT_TABLE_SIZE; i++)
960 INIT_HLIST_HEAD(&port_table[i]);
965 void ovs_tnl_exit(void)