2 * Copyright (c) 2007-2011 Nicira Networks.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/if_arp.h>
20 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
23 #include <linux/igmp.h>
25 #include <linux/in_route.h>
26 #include <linux/inetdevice.h>
27 #include <linux/jhash.h>
28 #include <linux/list.h>
29 #include <linux/kernel.h>
30 #include <linux/version.h>
31 #include <linux/workqueue.h>
32 #include <linux/rculist.h>
34 #include <net/dsfield.h>
37 #include <net/inet_ecn.h>
39 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
42 #include <net/route.h>
50 #include "vport-generic.h"
51 #include "vport-internal_dev.h"
53 #ifdef NEED_CACHE_TIMEOUT
55 * On kernels where we can't quickly detect changes in the rest of the system
56 * we use an expiration time to invalidate the cache. A shorter expiration
57 * reduces the length of time that we may potentially blackhole packets while
58 * a longer time increases performance by reducing the frequency that the
59 * cache needs to be rebuilt. A variety of factors may cause the cache to be
60 * invalidated before the expiration time but this is the maximum. The time
61 * is expressed in jiffies.
63 #define MAX_CACHE_EXP HZ
67 * Interval to check for and remove caches that are no longer valid. Caches
68 * are checked for validity before they are used for packet encapsulation and
69 * old caches are removed at that time. However, if no packets are sent through
70 * the tunnel then the cache will never be destroyed. Since it holds
71 * references to a number of system objects, the cache will continue to use
72 * system resources by not allowing those objects to be destroyed. The cache
73 * cleaner is periodically run to free invalid caches. It does not
74 * significantly affect system performance. A lower interval will release
75 * resources faster but will itself consume resources by requiring more frequent
76 * checks. A longer interval may result in messages being printed to the kernel
77 * message buffer about unreleased resources. The interval is expressed in
80 #define CACHE_CLEANER_INTERVAL (5 * HZ)
82 #define CACHE_DATA_ALIGN 16
83 #define PORT_TABLE_SIZE 1024
85 static struct hlist_head *port_table __read_mostly;
86 static int port_table_count;
88 static void cache_cleaner(struct work_struct *work);
89 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
92 * These are just used as an optimization: they don't require any kind of
93 * synchronization because we could have just as easily read the value before
94 * the port change happened.
96 static unsigned int key_local_remote_ports __read_mostly;
97 static unsigned int key_remote_ports __read_mostly;
98 static unsigned int key_multicast_ports __read_mostly;
99 static unsigned int local_remote_ports __read_mostly;
100 static unsigned int remote_ports __read_mostly;
101 static unsigned int multicast_ports __read_mostly;
103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
104 #define rt_dst(rt) (rt->dst)
106 #define rt_dst(rt) (rt->u.dst)
109 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
110 static struct hh_cache *rt_hh(struct rtable *rt)
112 struct neighbour *neigh = dst_get_neighbour(&rt->dst);
113 if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
119 #define rt_hh(rt) (rt_dst(rt).hh)
122 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
124 return vport_from_priv(tnl_vport);
127 /* This is analogous to rtnl_dereference for the tunnel cache. It checks that
128 * cache_lock is held, so it is only for update side code.
130 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
132 return rcu_dereference_protected(tnl_vport->cache,
133 lockdep_is_held(&tnl_vport->cache_lock));
136 static void schedule_cache_cleaner(void)
138 schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
141 static void free_cache(struct tnl_cache *cache)
146 flow_put(cache->flow);
147 ip_rt_put(cache->rt);
151 static void free_config_rcu(struct rcu_head *rcu)
153 struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
157 static void free_cache_rcu(struct rcu_head *rcu)
159 struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
163 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
164 * within an RCU callback. Fortunately this part doesn't require waiting for
165 * an RCU grace period.
167 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
170 if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
171 struct in_device *in_dev;
172 in_dev = inetdev_by_index(&init_net, mutable->mlink);
174 ip_mc_dec_group(in_dev, mutable->key.daddr);
178 static void assign_config_rcu(struct vport *vport,
179 struct tnl_mutable_config *new_config)
181 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
182 struct tnl_mutable_config *old_config;
184 old_config = rtnl_dereference(tnl_vport->mutable);
185 rcu_assign_pointer(tnl_vport->mutable, new_config);
187 free_mutable_rtnl(old_config);
188 call_rcu(&old_config->rcu, free_config_rcu);
191 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
193 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
194 struct tnl_cache *old_cache;
196 old_cache = cache_dereference(tnl_vport);
197 rcu_assign_pointer(tnl_vport->cache, new_cache);
200 call_rcu(&old_cache->rcu, free_cache_rcu);
203 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
205 bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
207 if (mutable->flags & TNL_F_IN_KEY_MATCH) {
208 if (mutable->key.saddr)
209 return &local_remote_ports;
210 else if (is_multicast)
211 return &multicast_ports;
213 return &remote_ports;
215 if (mutable->key.saddr)
216 return &key_local_remote_ports;
217 else if (is_multicast)
218 return &key_multicast_ports;
220 return &key_remote_ports;
224 static u32 port_hash(const struct port_lookup_key *key)
226 return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
229 static struct hlist_head *find_bucket(u32 hash)
231 return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
234 static void port_table_add_port(struct vport *vport)
236 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
237 const struct tnl_mutable_config *mutable;
240 if (port_table_count == 0)
241 schedule_cache_cleaner();
243 mutable = rtnl_dereference(tnl_vport->mutable);
244 hash = port_hash(&mutable->key);
245 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
248 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
251 static void port_table_move_port(struct vport *vport,
252 struct tnl_mutable_config *new_mutable)
254 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
257 hash = port_hash(&new_mutable->key);
258 hlist_del_init_rcu(&tnl_vport->hash_node);
259 hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
261 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
262 assign_config_rcu(vport, new_mutable);
263 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
266 static void port_table_remove_port(struct vport *vport)
268 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
270 hlist_del_init_rcu(&tnl_vport->hash_node);
273 if (port_table_count == 0)
274 cancel_delayed_work_sync(&cache_cleaner_wq);
276 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
279 static struct vport *port_table_lookup(struct port_lookup_key *key,
280 const struct tnl_mutable_config **pmutable)
282 struct hlist_node *n;
283 struct hlist_head *bucket;
284 u32 hash = port_hash(key);
285 struct tnl_vport *tnl_vport;
287 bucket = find_bucket(hash);
289 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
290 struct tnl_mutable_config *mutable;
292 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
293 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
295 return tnl_vport_to_vport(tnl_vport);
302 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
304 const struct tnl_mutable_config **mutable)
306 struct port_lookup_key lookup;
308 bool is_multicast = ipv4_is_multicast(saddr);
310 lookup.saddr = saddr;
311 lookup.daddr = daddr;
313 /* First try for exact match on in_key. */
315 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
316 if (!is_multicast && key_local_remote_ports) {
317 vport = port_table_lookup(&lookup, mutable);
321 if (key_remote_ports) {
323 vport = port_table_lookup(&lookup, mutable);
327 lookup.saddr = saddr;
330 /* Then try matches that wildcard in_key. */
332 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
333 if (!is_multicast && local_remote_ports) {
334 vport = port_table_lookup(&lookup, mutable);
340 vport = port_table_lookup(&lookup, mutable);
347 lookup.daddr = saddr;
348 if (key_multicast_ports) {
349 lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
351 vport = port_table_lookup(&lookup, mutable);
355 if (multicast_ports) {
356 lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
358 vport = port_table_lookup(&lookup, mutable);
367 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
369 if (unlikely(INET_ECN_is_ce(tos))) {
370 __be16 protocol = skb->protocol;
372 skb_set_network_header(skb, ETH_HLEN);
374 if (protocol == htons(ETH_P_8021Q)) {
375 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
378 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
379 skb_set_network_header(skb, VLAN_ETH_HLEN);
382 if (protocol == htons(ETH_P_IP)) {
383 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
384 + sizeof(struct iphdr))))
387 IP_ECN_set_ce(ip_hdr(skb));
389 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
390 else if (protocol == htons(ETH_P_IPV6)) {
391 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
392 + sizeof(struct ipv6hdr))))
395 IP6_ECN_set_ce(ipv6_hdr(skb));
402 * tnl_rcv - ingress point for generic tunnel code
404 * @vport: port this packet was received on
405 * @skb: received packet
406 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
408 * Must be called with rcu_read_lock.
410 * Packets received by this function are in the following state:
411 * - skb->data points to the inner Ethernet header.
412 * - The inner Ethernet header is in the linear data area.
413 * - skb->csum does not include the inner Ethernet header.
414 * - The layer pointers are undefined.
416 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
420 skb_reset_mac_header(skb);
423 if (likely(ntohs(eh->h_proto) >= 1536))
424 skb->protocol = eh->h_proto;
426 skb->protocol = htons(ETH_P_802_2);
430 skb_clear_rxhash(skb);
433 ecn_decapsulate(skb, tos);
434 vlan_set_tci(skb, 0);
436 if (unlikely(compute_ip_summed(skb, false))) {
441 vport_receive(vport, skb);
444 static bool check_ipv4_address(__be32 addr)
446 if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
447 || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
453 static bool ipv4_should_icmp(struct sk_buff *skb)
455 struct iphdr *old_iph = ip_hdr(skb);
457 /* Don't respond to L2 broadcast. */
458 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
461 /* Don't respond to L3 broadcast or invalid addresses. */
462 if (!check_ipv4_address(old_iph->daddr) ||
463 !check_ipv4_address(old_iph->saddr))
466 /* Only respond to the first fragment. */
467 if (old_iph->frag_off & htons(IP_OFFSET))
470 /* Don't respond to ICMP error messages. */
471 if (old_iph->protocol == IPPROTO_ICMP) {
472 u8 icmp_type, *icmp_typep;
474 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
475 (old_iph->ihl << 2) +
476 offsetof(struct icmphdr, type) -
477 skb->data, sizeof(icmp_type),
483 if (*icmp_typep > NR_ICMP_TYPES
484 || (*icmp_typep <= ICMP_PARAMETERPROB
485 && *icmp_typep != ICMP_ECHOREPLY
486 && *icmp_typep != ICMP_ECHO))
493 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
494 unsigned int mtu, unsigned int payload_length)
496 struct iphdr *iph, *old_iph = ip_hdr(skb);
497 struct icmphdr *icmph;
500 iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
501 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
502 payload = skb_put(nskb, payload_length);
506 iph->ihl = sizeof(struct iphdr) >> 2;
507 iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
508 IPTOS_PREC_INTERNETCONTROL;
509 iph->tot_len = htons(sizeof(struct iphdr)
510 + sizeof(struct icmphdr)
512 get_random_bytes(&iph->id, sizeof(iph->id));
515 iph->protocol = IPPROTO_ICMP;
516 iph->daddr = old_iph->saddr;
517 iph->saddr = old_iph->daddr;
522 icmph->type = ICMP_DEST_UNREACH;
523 icmph->code = ICMP_FRAG_NEEDED;
524 icmph->un.gateway = htonl(mtu);
527 nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
528 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
529 payload, payload_length,
531 icmph->checksum = csum_fold(nskb->csum);
534 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
535 static bool ipv6_should_icmp(struct sk_buff *skb)
537 struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
539 int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
540 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
542 /* Check source address is valid. */
543 addr_type = ipv6_addr_type(&old_ipv6h->saddr);
544 if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
547 /* Don't reply to unspecified addresses. */
548 if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
551 /* Don't respond to ICMP error messages. */
552 payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
556 if (nexthdr == NEXTHDR_ICMP) {
557 u8 icmp_type, *icmp_typep;
559 icmp_typep = skb_header_pointer(skb, payload_off +
560 offsetof(struct icmp6hdr,
562 sizeof(icmp_type), &icmp_type);
564 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
571 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
572 unsigned int mtu, unsigned int payload_length)
574 struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
575 struct icmp6hdr *icmp6h;
578 ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
579 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
580 payload = skb_put(nskb, payload_length);
585 memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
586 ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
588 ipv6h->nexthdr = NEXTHDR_ICMP;
589 ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
590 ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
591 ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
594 icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
595 icmp6h->icmp6_code = 0;
596 icmp6h->icmp6_cksum = 0;
597 icmp6h->icmp6_mtu = htonl(mtu);
599 nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
600 nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
601 payload, payload_length,
603 icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
604 sizeof(struct icmp6hdr)
606 ipv6h->nexthdr, nskb->csum);
610 bool tnl_frag_needed(struct vport *vport,
611 const struct tnl_mutable_config *mutable,
612 struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
614 unsigned int eth_hdr_len = ETH_HLEN;
615 unsigned int total_length = 0, header_length = 0, payload_length;
616 struct ethhdr *eh, *old_eh = eth_hdr(skb);
617 struct sk_buff *nskb;
620 if (skb->protocol == htons(ETH_P_IP)) {
621 if (mtu < IP_MIN_MTU)
624 if (!ipv4_should_icmp(skb))
627 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
628 else if (skb->protocol == htons(ETH_P_IPV6)) {
629 if (mtu < IPV6_MIN_MTU)
633 * In theory we should do PMTUD on IPv6 multicast messages but
634 * we don't have an address to send from so just fragment.
636 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
639 if (!ipv6_should_icmp(skb))
647 if (old_eh->h_proto == htons(ETH_P_8021Q))
648 eth_hdr_len = VLAN_ETH_HLEN;
650 payload_length = skb->len - eth_hdr_len;
651 if (skb->protocol == htons(ETH_P_IP)) {
652 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
653 total_length = min_t(unsigned int, header_length +
654 payload_length, 576);
656 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
658 header_length = sizeof(struct ipv6hdr) +
659 sizeof(struct icmp6hdr);
660 total_length = min_t(unsigned int, header_length +
661 payload_length, IPV6_MIN_MTU);
665 payload_length = total_length - header_length;
667 nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
672 skb_reserve(nskb, NET_IP_ALIGN);
674 /* Ethernet / VLAN */
675 eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
676 memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
677 memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
678 nskb->protocol = eh->h_proto = old_eh->h_proto;
679 if (old_eh->h_proto == htons(ETH_P_8021Q)) {
680 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
682 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
683 vh->h_vlan_encapsulated_proto = skb->protocol;
685 vlan_set_tci(nskb, vlan_get_tci(skb));
686 skb_reset_mac_header(nskb);
689 if (skb->protocol == htons(ETH_P_IP))
690 ipv4_build_icmp(skb, nskb, mtu, payload_length);
691 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
693 ipv6_build_icmp(skb, nskb, mtu, payload_length);
697 * Assume that flow based keys are symmetric with respect to input
698 * and output and use the key that we were going to put on the
699 * outgoing packet for the fake received packet. If the keys are
700 * not symmetric then PMTUD needs to be disabled since we won't have
701 * any way of synthesizing packets.
703 if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
704 (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
705 OVS_CB(nskb)->tun_id = flow_key;
707 if (unlikely(compute_ip_summed(nskb, false))) {
712 vport_receive(vport, nskb);
717 static bool check_mtu(struct sk_buff *skb,
719 const struct tnl_mutable_config *mutable,
720 const struct rtable *rt, __be16 *frag_offp)
722 bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
723 bool pmtud = mutable->flags & TNL_F_PMTUD;
724 __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
726 unsigned int packet_length = skb->len - ETH_HLEN;
728 /* Allow for one level of tagging in the packet length. */
729 if (!vlan_tx_tag_present(skb) &&
730 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
731 packet_length -= VLAN_HLEN;
736 /* The tag needs to go in packet regardless of where it
737 * currently is, so subtract it from the MTU.
739 if (vlan_tx_tag_present(skb) ||
740 eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
741 vlan_header = VLAN_HLEN;
743 mtu = dst_mtu(&rt_dst(rt))
745 - mutable->tunnel_hlen
749 if (skb->protocol == htons(ETH_P_IP)) {
750 struct iphdr *iph = ip_hdr(skb);
753 frag_off = iph->frag_off & htons(IP_DF);
755 if (pmtud && iph->frag_off & htons(IP_DF)) {
756 mtu = max(mtu, IP_MIN_MTU);
758 if (packet_length > mtu &&
759 tnl_frag_needed(vport, mutable, skb, mtu,
760 OVS_CB(skb)->tun_id))
764 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
765 else if (skb->protocol == htons(ETH_P_IPV6)) {
766 /* IPv6 requires end hosts to do fragmentation
767 * if the packet is above the minimum MTU.
769 if (df_inherit && packet_length > IPV6_MIN_MTU)
770 frag_off = htons(IP_DF);
773 mtu = max(mtu, IPV6_MIN_MTU);
775 if (packet_length > mtu &&
776 tnl_frag_needed(vport, mutable, skb, mtu,
777 OVS_CB(skb)->tun_id))
783 *frag_offp = frag_off;
787 static void create_tunnel_header(const struct vport *vport,
788 const struct tnl_mutable_config *mutable,
789 const struct rtable *rt, void *header)
791 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
792 struct iphdr *iph = header;
795 iph->ihl = sizeof(struct iphdr) >> 2;
796 iph->frag_off = htons(IP_DF);
797 iph->protocol = tnl_vport->tnl_ops->ipproto;
798 iph->tos = mutable->tos;
799 iph->daddr = rt->rt_dst;
800 iph->saddr = rt->rt_src;
801 iph->ttl = mutable->ttl;
803 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
805 tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
808 static void *get_cached_header(const struct tnl_cache *cache)
810 return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
813 static bool check_cache_valid(const struct tnl_cache *cache,
814 const struct tnl_mutable_config *mutable)
821 hh = rt_hh(cache->rt);
823 #ifdef NEED_CACHE_TIMEOUT
824 time_before(jiffies, cache->expiration) &&
827 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
830 hh->hh_lock.sequence == cache->hh_seq &&
832 mutable->seq == cache->mutable_seq &&
833 (!is_internal_dev(rt_dst(cache->rt).dev) ||
834 (cache->flow && !cache->flow->dead));
837 static void __cache_cleaner(struct tnl_vport *tnl_vport)
839 const struct tnl_mutable_config *mutable =
840 rcu_dereference(tnl_vport->mutable);
841 const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
843 if (cache && !check_cache_valid(cache, mutable) &&
844 spin_trylock_bh(&tnl_vport->cache_lock)) {
845 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
846 spin_unlock_bh(&tnl_vport->cache_lock);
850 static void cache_cleaner(struct work_struct *work)
854 schedule_cache_cleaner();
857 for (i = 0; i < PORT_TABLE_SIZE; i++) {
858 struct hlist_node *n;
859 struct hlist_head *bucket;
860 struct tnl_vport *tnl_vport;
862 bucket = &port_table[i];
863 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
864 __cache_cleaner(tnl_vport);
869 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
871 void *cache_data = get_cached_header(cache);
878 hh_seq = read_seqbegin(&hh->hh_lock);
879 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
880 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
881 cache->hh_len = hh->hh_len;
882 } while (read_seqretry(&hh->hh_lock, hh_seq));
884 cache->hh_seq = hh_seq;
886 read_lock(&hh->hh_lock);
887 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
888 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
889 cache->hh_len = hh->hh_len;
890 read_unlock(&hh->hh_lock);
894 static struct tnl_cache *build_cache(struct vport *vport,
895 const struct tnl_mutable_config *mutable,
898 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
899 struct tnl_cache *cache;
904 if (!(mutable->flags & TNL_F_HDR_CACHE))
908 * If there is no entry in the ARP cache or if this device does not
909 * support hard header caching just fall back to the IP stack.
917 * If lock is contended fall back to directly building the header.
918 * We're not going to help performance by sitting here spinning.
920 if (!spin_trylock(&tnl_vport->cache_lock))
923 cache = cache_dereference(tnl_vport);
924 if (check_cache_valid(cache, mutable))
929 cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
931 cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
932 cache_len, GFP_ATOMIC);
936 create_eth_hdr(cache, hh);
937 cache_data = get_cached_header(cache) + cache->hh_len;
938 cache->len = cache->hh_len + mutable->tunnel_hlen;
940 create_tunnel_header(vport, mutable, rt, cache_data);
942 cache->mutable_seq = mutable->seq;
944 #ifdef NEED_CACHE_TIMEOUT
945 cache->expiration = jiffies + tnl_vport->cache_exp_interval;
948 if (is_internal_dev(rt_dst(rt).dev)) {
949 struct sw_flow_key flow_key;
950 struct vport *dst_vport;
954 struct sw_flow *flow;
956 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
960 skb = alloc_skb(cache->len, GFP_ATOMIC);
964 __skb_put(skb, cache->len);
965 memcpy(skb->data, get_cached_header(cache), cache->len);
967 err = flow_extract(skb, dst_vport->port_no, &flow_key,
974 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
975 &flow_key, flow_key_len);
983 assign_cache_rcu(vport, cache);
986 spin_unlock(&tnl_vport->cache_lock);
991 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
994 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
995 struct flowi fl = { .nl_u = { .ip4_u = {
996 .daddr = mutable->key.daddr,
997 .saddr = mutable->key.saddr,
1002 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
1003 return ERR_PTR(-EADDRNOTAVAIL);
1007 struct flowi4 fl = { .daddr = mutable->key.daddr,
1008 .saddr = mutable->key.saddr,
1010 .flowi4_proto = ipproto };
1012 return ip_route_output_key(&init_net, &fl);
1016 static struct rtable *find_route(struct vport *vport,
1017 const struct tnl_mutable_config *mutable,
1018 u8 tos, struct tnl_cache **cache)
1020 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1021 struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1026 if (likely(tos == mutable->tos &&
1027 check_cache_valid(cur_cache, mutable))) {
1029 return cur_cache->rt;
1033 rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
1037 if (likely(tos == mutable->tos))
1038 *cache = build_cache(vport, mutable, rt);
1044 static bool need_linearize(const struct sk_buff *skb)
1048 if (unlikely(skb_shinfo(skb)->frag_list))
1052 * Generally speaking we should linearize if there are paged frags.
1053 * However, if all of the refcounts are 1 we know nobody else can
1054 * change them from underneath us and we can skip the linearization.
1056 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1057 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1063 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1064 const struct tnl_mutable_config *mutable,
1065 const struct rtable *rt)
1070 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1071 + mutable->tunnel_hlen
1072 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1074 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1075 int head_delta = SKB_DATA_ALIGN(min_headroom -
1078 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1084 forward_ip_summed(skb, true);
1086 if (skb_is_gso(skb)) {
1087 struct sk_buff *nskb;
1089 nskb = skb_gso_segment(skb, 0);
1092 err = PTR_ERR(nskb);
1098 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1099 /* Pages aren't locked and could change at any time.
1100 * If this happens after we compute the checksum, the
1101 * checksum will be wrong. We linearize now to avoid
1104 if (unlikely(need_linearize(skb))) {
1105 err = __skb_linearize(skb);
1110 err = skb_checksum_help(skb);
1115 set_ip_summed(skb, OVS_CSUM_NONE);
1122 return ERR_PTR(err);
1125 static int send_frags(struct sk_buff *skb,
1126 const struct tnl_mutable_config *mutable)
1132 struct sk_buff *next = skb->next;
1133 int frag_len = skb->len - mutable->tunnel_hlen;
1137 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1139 err = ip_local_out(skb);
1141 if (unlikely(net_xmit_eval(err)))
1143 sent_len += frag_len;
1150 * There's no point in continuing to send fragments once one has been
1151 * dropped so just free the rest. This may help improve the congestion
1152 * that caused the first packet to be dropped.
1154 tnl_free_linked_skbs(skb);
1158 int tnl_send(struct vport *vport, struct sk_buff *skb)
1160 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1161 const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1163 enum vport_err_type err = VPORT_E_TX_ERROR;
1165 struct dst_entry *unattached_dst = NULL;
1166 struct tnl_cache *cache;
1168 __be16 frag_off = 0;
1173 /* Validate the protocol headers before we try to use them. */
1174 if (skb->protocol == htons(ETH_P_8021Q) &&
1175 !vlan_tx_tag_present(skb)) {
1176 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1179 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1180 skb_set_network_header(skb, VLAN_ETH_HLEN);
1183 if (skb->protocol == htons(ETH_P_IP)) {
1184 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1185 + sizeof(struct iphdr))))
1188 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1189 else if (skb->protocol == htons(ETH_P_IPV6)) {
1190 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1191 + sizeof(struct ipv6hdr))))
1197 if (skb->protocol == htons(ETH_P_IP))
1198 inner_tos = ip_hdr(skb)->tos;
1199 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1200 else if (skb->protocol == htons(ETH_P_IPV6))
1201 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1206 if (mutable->flags & TNL_F_TOS_INHERIT)
1211 tos = INET_ECN_encapsulate(tos, inner_tos);
1214 rt = find_route(vport, mutable, tos, &cache);
1217 if (unlikely(!cache))
1218 unattached_dst = &rt_dst(rt);
1224 skb_clear_rxhash(skb);
1227 skb = handle_offloads(skb, mutable, rt);
1232 if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1233 err = VPORT_E_TX_DROPPED;
1238 * If we are over the MTU, allow the IP stack to handle fragmentation.
1239 * Fragmentation is a slow path anyways.
1241 if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1243 unattached_dst = &rt_dst(rt);
1244 dst_hold(unattached_dst);
1251 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1253 if (mutable->flags & TNL_F_TTL_INHERIT) {
1254 if (skb->protocol == htons(ETH_P_IP))
1255 ttl = ip_hdr(skb)->ttl;
1256 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1257 else if (skb->protocol == htons(ETH_P_IPV6))
1258 ttl = ipv6_hdr(skb)->hop_limit;
1264 struct sk_buff *next_skb = skb->next;
1267 if (unlikely(vlan_deaccel_tag(skb)))
1270 if (likely(cache)) {
1271 skb_push(skb, cache->len);
1272 memcpy(skb->data, get_cached_header(cache), cache->len);
1273 skb_reset_mac_header(skb);
1274 skb_set_network_header(skb, cache->hh_len);
1277 skb_push(skb, mutable->tunnel_hlen);
1278 create_tunnel_header(vport, mutable, rt, skb->data);
1279 skb_reset_network_header(skb);
1282 skb_dst_set(skb, dst_clone(unattached_dst));
1284 skb_dst_set(skb, unattached_dst);
1285 unattached_dst = NULL;
1288 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1293 iph->frag_off = frag_off;
1294 ip_select_ident(iph, &rt_dst(rt), NULL);
1296 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1301 if (likely(cache)) {
1302 int orig_len = skb->len - cache->len;
1303 struct vport *cache_vport;
1305 cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1306 skb->protocol = htons(ETH_P_IP);
1308 iph->tot_len = htons(skb->len - skb_network_offset(skb));
1312 if (unlikely(compute_ip_summed(skb, true))) {
1317 OVS_CB(skb)->flow = cache->flow;
1318 vport_receive(cache_vport, skb);
1319 sent_len += orig_len;
1323 skb->dev = rt_dst(rt).dev;
1324 xmit_err = dev_queue_xmit(skb);
1326 if (likely(net_xmit_eval(xmit_err) == 0))
1327 sent_len += orig_len;
1330 sent_len += send_frags(skb, mutable);
1336 if (unlikely(sent_len == 0))
1337 vport_record_error(vport, VPORT_E_TX_DROPPED);
1342 tnl_free_linked_skbs(skb);
1344 vport_record_error(vport, err);
1346 dst_release(unattached_dst);
1350 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1351 [OVS_TUNNEL_ATTR_FLAGS] = { .type = NLA_U32 },
1352 [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1353 [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1354 [OVS_TUNNEL_ATTR_OUT_KEY] = { .type = NLA_U64 },
1355 [OVS_TUNNEL_ATTR_IN_KEY] = { .type = NLA_U64 },
1356 [OVS_TUNNEL_ATTR_TOS] = { .type = NLA_U8 },
1357 [OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
1360 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1362 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1363 const struct vport *cur_vport,
1364 struct tnl_mutable_config *mutable)
1366 const struct vport *old_vport;
1367 const struct tnl_mutable_config *old_mutable;
1368 struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1374 err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1378 if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1381 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1383 mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1384 if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1385 if (ipv4_is_multicast(mutable->key.daddr))
1387 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1390 if (a[OVS_TUNNEL_ATTR_TOS]) {
1391 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1392 if (mutable->tos != RT_TOS(mutable->tos))
1396 if (a[OVS_TUNNEL_ATTR_TTL])
1397 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1399 mutable->key.tunnel_type = tnl_ops->tunnel_type;
1400 if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1401 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1402 mutable->flags |= TNL_F_IN_KEY_MATCH;
1404 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1405 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1408 if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1409 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1411 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1413 mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1414 if (mutable->tunnel_hlen < 0)
1415 return mutable->tunnel_hlen;
1417 mutable->tunnel_hlen += sizeof(struct iphdr);
1419 old_vport = port_table_lookup(&mutable->key, &old_mutable);
1420 if (old_vport && old_vport != cur_vport)
1424 if (ipv4_is_multicast(mutable->key.daddr)) {
1425 struct net_device *dev;
1428 rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
1430 return -EADDRNOTAVAIL;
1431 dev = rt_dst(rt).dev;
1433 if (__in_dev_get_rtnl(dev) == NULL)
1434 return -EADDRNOTAVAIL;
1435 mutable->mlink = dev->ifindex;
1436 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1442 struct vport *tnl_create(const struct vport_parms *parms,
1443 const struct vport_ops *vport_ops,
1444 const struct tnl_ops *tnl_ops)
1446 struct vport *vport;
1447 struct tnl_vport *tnl_vport;
1448 struct tnl_mutable_config *mutable;
1449 int initial_frag_id;
1452 vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1453 if (IS_ERR(vport)) {
1454 err = PTR_ERR(vport);
1458 tnl_vport = tnl_vport_priv(vport);
1460 strcpy(tnl_vport->name, parms->name);
1461 tnl_vport->tnl_ops = tnl_ops;
1463 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1466 goto error_free_vport;
1469 random_ether_addr(mutable->eth_addr);
1471 get_random_bytes(&initial_frag_id, sizeof(int));
1472 atomic_set(&tnl_vport->frag_id, initial_frag_id);
1474 err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1476 goto error_free_mutable;
1478 spin_lock_init(&tnl_vport->cache_lock);
1480 #ifdef NEED_CACHE_TIMEOUT
1481 tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1482 (net_random() % (MAX_CACHE_EXP / 2));
1485 rcu_assign_pointer(tnl_vport->mutable, mutable);
1487 port_table_add_port(vport);
1491 free_mutable_rtnl(mutable);
1496 return ERR_PTR(err);
1499 int tnl_set_options(struct vport *vport, struct nlattr *options)
1501 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1502 const struct tnl_mutable_config *old_mutable;
1503 struct tnl_mutable_config *mutable;
1506 mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1512 /* Copy fields whose values should be retained. */
1513 old_mutable = rtnl_dereference(tnl_vport->mutable);
1514 mutable->seq = old_mutable->seq + 1;
1515 memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1517 /* Parse the others configured by userspace. */
1518 err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1522 if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1523 port_table_move_port(vport, mutable);
1525 assign_config_rcu(vport, mutable);
1530 free_mutable_rtnl(mutable);
1536 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1538 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1539 const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1541 NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1542 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1544 if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1545 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1546 if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1547 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1548 if (mutable->key.saddr)
1549 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1551 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1553 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1561 static void free_port_rcu(struct rcu_head *rcu)
1563 struct tnl_vport *tnl_vport = container_of(rcu,
1564 struct tnl_vport, rcu);
1566 free_cache((struct tnl_cache __force *)tnl_vport->cache);
1567 kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1568 vport_free(tnl_vport_to_vport(tnl_vport));
1571 void tnl_destroy(struct vport *vport)
1573 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1574 struct tnl_mutable_config *mutable;
1576 mutable = rtnl_dereference(tnl_vport->mutable);
1577 port_table_remove_port(vport);
1578 free_mutable_rtnl(mutable);
1579 call_rcu(&tnl_vport->rcu, free_port_rcu);
1582 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1584 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1585 struct tnl_mutable_config *old_mutable, *mutable;
1587 old_mutable = rtnl_dereference(tnl_vport->mutable);
1588 mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1592 old_mutable->mlink = 0;
1594 memcpy(mutable->eth_addr, addr, ETH_ALEN);
1595 assign_config_rcu(vport, mutable);
1600 const char *tnl_get_name(const struct vport *vport)
1602 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1603 return tnl_vport->name;
1606 const unsigned char *tnl_get_addr(const struct vport *vport)
1608 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1609 return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1612 void tnl_free_linked_skbs(struct sk_buff *skb)
1615 struct sk_buff *next = skb->next;
1625 port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1630 for (i = 0; i < PORT_TABLE_SIZE; i++)
1631 INIT_HLIST_HEAD(&port_table[i]);
1640 for (i = 0; i < PORT_TABLE_SIZE; i++) {
1641 struct tnl_vport *tnl_vport;
1642 struct hlist_head *hash_head;
1643 struct hlist_node *n;
1645 hash_head = &port_table[i];
1646 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {