Merge branch 'hotfix' into make-improv
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if_arp.h>
22 #include <linux/if_ether.h>
23 #include <linux/ip.h>
24 #include <linux/if_vlan.h>
25 #include <linux/igmp.h>
26 #include <linux/in.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33 #include <linux/workqueue.h>
34 #include <linux/rculist.h>
35
36 #include <net/dsfield.h>
37 #include <net/dst.h>
38 #include <net/icmp.h>
39 #include <net/inet_ecn.h>
40 #include <net/ip.h>
41 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
42 #include <net/ipv6.h>
43 #endif
44 #include <net/route.h>
45 #include <net/xfrm.h>
46
47 #include "checksum.h"
48 #include "datapath.h"
49 #include "tunnel.h"
50 #include "vlan.h"
51 #include "vport.h"
52 #include "vport-generic.h"
53 #include "vport-internal_dev.h"
54
55 #ifdef NEED_CACHE_TIMEOUT
56 /*
57  * On kernels where we can't quickly detect changes in the rest of the system
58  * we use an expiration time to invalidate the cache.  A shorter expiration
59  * reduces the length of time that we may potentially blackhole packets while
60  * a longer time increases performance by reducing the frequency that the
61  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
62  * invalidated before the expiration time but this is the maximum.  The time
63  * is expressed in jiffies.
64  */
65 #define MAX_CACHE_EXP HZ
66 #endif
67
68 /*
69  * Interval to check for and remove caches that are no longer valid.  Caches
70  * are checked for validity before they are used for packet encapsulation and
71  * old caches are removed at that time.  However, if no packets are sent through
72  * the tunnel then the cache will never be destroyed.  Since it holds
73  * references to a number of system objects, the cache will continue to use
74  * system resources by not allowing those objects to be destroyed.  The cache
75  * cleaner is periodically run to free invalid caches.  It does not
76  * significantly affect system performance.  A lower interval will release
77  * resources faster but will itself consume resources by requiring more frequent
78  * checks.  A longer interval may result in messages being printed to the kernel
79  * message buffer about unreleased resources.  The interval is expressed in
80  * jiffies.
81  */
82 #define CACHE_CLEANER_INTERVAL (5 * HZ)
83
84 #define CACHE_DATA_ALIGN 16
85 #define PORT_TABLE_SIZE  1024
86
87 static struct hlist_head *port_table __read_mostly;
88 static int port_table_count;
89
90 static void cache_cleaner(struct work_struct *work);
91 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
92
93 /*
94  * These are just used as an optimization: they don't require any kind of
95  * synchronization because we could have just as easily read the value before
96  * the port change happened.
97  */
98 static unsigned int key_local_remote_ports __read_mostly;
99 static unsigned int key_remote_ports __read_mostly;
100 static unsigned int key_multicast_ports __read_mostly;
101 static unsigned int local_remote_ports __read_mostly;
102 static unsigned int remote_ports __read_mostly;
103 static unsigned int null_ports __read_mostly;
104 static unsigned int multicast_ports __read_mostly;
105
106 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
107 #define rt_dst(rt) (rt->dst)
108 #else
109 #define rt_dst(rt) (rt->u.dst)
110 #endif
111
112 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
113 static struct hh_cache *rt_hh(struct rtable *rt)
114 {
115         struct neighbour *neigh = dst_get_neighbour_noref(&rt->dst);
116         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
117                         !neigh->hh.hh_len)
118                 return NULL;
119         return &neigh->hh;
120 }
121 #else
122 #define rt_hh(rt) (rt_dst(rt).hh)
123 #endif
124
125 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
126 {
127         return vport_from_priv(tnl_vport);
128 }
129
130 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
131  * cache_lock is held, so it is only for update side code.
132  */
133 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
134 {
135         return rcu_dereference_protected(tnl_vport->cache,
136                                  lockdep_is_held(&tnl_vport->cache_lock));
137 }
138
139 static void schedule_cache_cleaner(void)
140 {
141         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
142 }
143
144 static void free_cache(struct tnl_cache *cache)
145 {
146         if (!cache)
147                 return;
148
149         ovs_flow_put(cache->flow);
150         ip_rt_put(cache->rt);
151         kfree(cache);
152 }
153
154 static void free_config_rcu(struct rcu_head *rcu)
155 {
156         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
157         kfree(c);
158 }
159
160 static void free_cache_rcu(struct rcu_head *rcu)
161 {
162         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
163         free_cache(c);
164 }
165
166 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
167  * within an RCU callback.  Fortunately this part doesn't require waiting for
168  * an RCU grace period.
169  */
170 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
171 {
172         ASSERT_RTNL();
173         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
174                 struct in_device *in_dev;
175                 in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
176                 if (in_dev)
177                         ip_mc_dec_group(in_dev, mutable->key.daddr);
178         }
179 }
180
181 static void assign_config_rcu(struct vport *vport,
182                               struct tnl_mutable_config *new_config)
183 {
184         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
185         struct tnl_mutable_config *old_config;
186
187         old_config = rtnl_dereference(tnl_vport->mutable);
188         rcu_assign_pointer(tnl_vport->mutable, new_config);
189
190         free_mutable_rtnl(old_config);
191         call_rcu(&old_config->rcu, free_config_rcu);
192 }
193
194 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
195 {
196         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
197         struct tnl_cache *old_cache;
198
199         old_cache = cache_dereference(tnl_vport);
200         rcu_assign_pointer(tnl_vport->cache, new_cache);
201
202         if (old_cache)
203                 call_rcu(&old_cache->rcu, free_cache_rcu);
204 }
205
206 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
207 {
208         bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
209
210         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
211                 if (mutable->key.saddr)
212                         return &local_remote_ports;
213                 else if (is_multicast)
214                         return &multicast_ports;
215                 else
216                         return &remote_ports;
217         } else {
218                 if (mutable->key.saddr)
219                         return &key_local_remote_ports;
220                 else if (is_multicast)
221                         return &key_multicast_ports;
222                 else if (mutable->key.daddr)
223                         return &key_remote_ports;
224                 else
225                         return &null_ports;
226         }
227 }
228
229 static u32 port_hash(const struct port_lookup_key *key)
230 {
231         return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
232 }
233
234 static struct hlist_head *find_bucket(u32 hash)
235 {
236         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
237 }
238
239 static void port_table_add_port(struct vport *vport)
240 {
241         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
242         const struct tnl_mutable_config *mutable;
243         u32 hash;
244
245         if (port_table_count == 0)
246                 schedule_cache_cleaner();
247
248         mutable = rtnl_dereference(tnl_vport->mutable);
249         hash = port_hash(&mutable->key);
250         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
251         port_table_count++;
252
253         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
254 }
255
256 static void port_table_move_port(struct vport *vport,
257                       struct tnl_mutable_config *new_mutable)
258 {
259         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
260         u32 hash;
261
262         hash = port_hash(&new_mutable->key);
263         hlist_del_init_rcu(&tnl_vport->hash_node);
264         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
265
266         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
267         assign_config_rcu(vport, new_mutable);
268         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
269 }
270
271 static void port_table_remove_port(struct vport *vport)
272 {
273         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
274
275         hlist_del_init_rcu(&tnl_vport->hash_node);
276
277         port_table_count--;
278         if (port_table_count == 0)
279                 cancel_delayed_work_sync(&cache_cleaner_wq);
280
281         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
282 }
283
284 static struct vport *port_table_lookup(struct port_lookup_key *key,
285                                        const struct tnl_mutable_config **pmutable)
286 {
287         struct hlist_node *n;
288         struct hlist_head *bucket;
289         u32 hash = port_hash(key);
290         struct tnl_vport *tnl_vport;
291
292         bucket = find_bucket(hash);
293
294         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
295                 struct tnl_mutable_config *mutable;
296
297                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
298                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
299                         *pmutable = mutable;
300                         return tnl_vport_to_vport(tnl_vport);
301                 }
302         }
303
304         return NULL;
305 }
306
307 struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
308                                 __be64 key, int tunnel_type,
309                                 const struct tnl_mutable_config **mutable)
310 {
311         struct port_lookup_key lookup;
312         struct vport *vport;
313         bool is_multicast = ipv4_is_multicast(saddr);
314
315         port_key_set_net(&lookup, net);
316         lookup.saddr = saddr;
317         lookup.daddr = daddr;
318
319         /* First try for exact match on in_key. */
320         lookup.in_key = key;
321         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
322         if (!is_multicast && key_local_remote_ports) {
323                 vport = port_table_lookup(&lookup, mutable);
324                 if (vport)
325                         return vport;
326         }
327         if (key_remote_ports) {
328                 lookup.saddr = 0;
329                 vport = port_table_lookup(&lookup, mutable);
330                 if (vport)
331                         return vport;
332
333                 lookup.saddr = saddr;
334         }
335
336         /* Then try matches that wildcard in_key. */
337         lookup.in_key = 0;
338         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
339         if (!is_multicast && local_remote_ports) {
340                 vport = port_table_lookup(&lookup, mutable);
341                 if (vport)
342                         return vport;
343         }
344         if (remote_ports) {
345                 lookup.saddr = 0;
346                 vport = port_table_lookup(&lookup, mutable);
347                 if (vport)
348                         return vport;
349         }
350
351         if (is_multicast) {
352                 lookup.saddr = 0;
353                 lookup.daddr = saddr;
354                 if (key_multicast_ports) {
355                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
356                         lookup.in_key = key;
357                         vport = port_table_lookup(&lookup, mutable);
358                         if (vport)
359                                 return vport;
360                 }
361                 if (multicast_ports) {
362                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
363                         lookup.in_key = 0;
364                         vport = port_table_lookup(&lookup, mutable);
365                         if (vport)
366                                 return vport;
367                 }
368         }
369
370         if (null_ports) {
371                 lookup.daddr = 0;
372                 lookup.saddr = 0;
373                 lookup.tunnel_type = tunnel_type;
374                 vport = port_table_lookup(&lookup, mutable);
375                 if (vport)
376                         return vport;
377         }
378         return NULL;
379 }
380
381 static void ecn_decapsulate(struct sk_buff *skb)
382 {
383         if (unlikely(INET_ECN_is_ce(OVS_CB(skb)->tun_key->ipv4_tos))) {
384                 __be16 protocol = skb->protocol;
385
386                 skb_set_network_header(skb, ETH_HLEN);
387
388                 if (protocol == htons(ETH_P_8021Q)) {
389                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
390                                 return;
391
392                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
393                         skb_set_network_header(skb, VLAN_ETH_HLEN);
394                 }
395
396                 if (protocol == htons(ETH_P_IP)) {
397                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
398                             + sizeof(struct iphdr))))
399                                 return;
400
401                         IP_ECN_set_ce(ip_hdr(skb));
402                 }
403 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
404                 else if (protocol == htons(ETH_P_IPV6)) {
405                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
406                             + sizeof(struct ipv6hdr))))
407                                 return;
408
409                         IP6_ECN_set_ce(ipv6_hdr(skb));
410                 }
411 #endif
412         }
413 }
414
415 /**
416  *      ovs_tnl_rcv - ingress point for generic tunnel code
417  *
418  * @vport: port this packet was received on
419  * @skb: received packet
420  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
421  *
422  * Must be called with rcu_read_lock.
423  *
424  * Packets received by this function are in the following state:
425  * - skb->data points to the inner Ethernet header.
426  * - The inner Ethernet header is in the linear data area.
427  * - skb->csum does not include the inner Ethernet header.
428  * - The layer pointers are undefined.
429  */
430 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
431 {
432         struct ethhdr *eh;
433
434         skb_reset_mac_header(skb);
435         eh = eth_hdr(skb);
436
437         if (likely(ntohs(eh->h_proto) >= 1536))
438                 skb->protocol = eh->h_proto;
439         else
440                 skb->protocol = htons(ETH_P_802_2);
441
442         skb_dst_drop(skb);
443         nf_reset(skb);
444         skb_clear_rxhash(skb);
445         secpath_reset(skb);
446
447         ecn_decapsulate(skb);
448         vlan_set_tci(skb, 0);
449
450         if (unlikely(compute_ip_summed(skb, false))) {
451                 kfree_skb(skb);
452                 return;
453         }
454
455         ovs_vport_receive(vport, skb);
456 }
457
458 static bool check_ipv4_address(__be32 addr)
459 {
460         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
461             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
462                 return false;
463
464         return true;
465 }
466
467 static bool ipv4_should_icmp(struct sk_buff *skb)
468 {
469         struct iphdr *old_iph = ip_hdr(skb);
470
471         /* Don't respond to L2 broadcast. */
472         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
473                 return false;
474
475         /* Don't respond to L3 broadcast or invalid addresses. */
476         if (!check_ipv4_address(old_iph->daddr) ||
477             !check_ipv4_address(old_iph->saddr))
478                 return false;
479
480         /* Only respond to the first fragment. */
481         if (old_iph->frag_off & htons(IP_OFFSET))
482                 return false;
483
484         /* Don't respond to ICMP error messages. */
485         if (old_iph->protocol == IPPROTO_ICMP) {
486                 u8 icmp_type, *icmp_typep;
487
488                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
489                                                 (old_iph->ihl << 2) +
490                                                 offsetof(struct icmphdr, type) -
491                                                 skb->data, sizeof(icmp_type),
492                                                 &icmp_type);
493
494                 if (!icmp_typep)
495                         return false;
496
497                 if (*icmp_typep > NR_ICMP_TYPES
498                         || (*icmp_typep <= ICMP_PARAMETERPROB
499                                 && *icmp_typep != ICMP_ECHOREPLY
500                                 && *icmp_typep != ICMP_ECHO))
501                         return false;
502         }
503
504         return true;
505 }
506
507 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
508                             unsigned int mtu, unsigned int payload_length)
509 {
510         struct iphdr *iph, *old_iph = ip_hdr(skb);
511         struct icmphdr *icmph;
512         u8 *payload;
513
514         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
515         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
516         payload = skb_put(nskb, payload_length);
517
518         /* IP */
519         iph->version            =       4;
520         iph->ihl                =       sizeof(struct iphdr) >> 2;
521         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
522                                         IPTOS_PREC_INTERNETCONTROL;
523         iph->tot_len            =       htons(sizeof(struct iphdr)
524                                               + sizeof(struct icmphdr)
525                                               + payload_length);
526         get_random_bytes(&iph->id, sizeof(iph->id));
527         iph->frag_off           =       0;
528         iph->ttl                =       IPDEFTTL;
529         iph->protocol           =       IPPROTO_ICMP;
530         iph->daddr              =       old_iph->saddr;
531         iph->saddr              =       old_iph->daddr;
532
533         ip_send_check(iph);
534
535         /* ICMP */
536         icmph->type             =       ICMP_DEST_UNREACH;
537         icmph->code             =       ICMP_FRAG_NEEDED;
538         icmph->un.gateway       =       htonl(mtu);
539         icmph->checksum         =       0;
540
541         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
542         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
543                                             payload, payload_length,
544                                             nskb->csum);
545         icmph->checksum = csum_fold(nskb->csum);
546 }
547
548 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
549 static bool ipv6_should_icmp(struct sk_buff *skb)
550 {
551         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
552         int addr_type;
553         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
554         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
555         __be16 frag_off;
556
557         /* Check source address is valid. */
558         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
559         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
560                 return false;
561
562         /* Don't reply to unspecified addresses. */
563         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
564                 return false;
565
566         /* Don't respond to ICMP error messages. */
567         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
568         if (payload_off < 0)
569                 return false;
570
571         if (nexthdr == NEXTHDR_ICMP) {
572                 u8 icmp_type, *icmp_typep;
573
574                 icmp_typep = skb_header_pointer(skb, payload_off +
575                                                 offsetof(struct icmp6hdr,
576                                                         icmp6_type),
577                                                 sizeof(icmp_type), &icmp_type);
578
579                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
580                         return false;
581         }
582
583         return true;
584 }
585
586 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
587                             unsigned int mtu, unsigned int payload_length)
588 {
589         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
590         struct icmp6hdr *icmp6h;
591         u8 *payload;
592
593         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
594         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
595         payload = skb_put(nskb, payload_length);
596
597         /* IPv6 */
598         ipv6h->version          =       6;
599         ipv6h->priority         =       0;
600         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
601         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
602                                               + payload_length);
603         ipv6h->nexthdr          =       NEXTHDR_ICMP;
604         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
605         ipv6h->daddr            =       old_ipv6h->saddr;
606         ipv6h->saddr            =       old_ipv6h->daddr;
607
608         /* ICMPv6 */
609         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
610         icmp6h->icmp6_code      =       0;
611         icmp6h->icmp6_cksum     =       0;
612         icmp6h->icmp6_mtu       =       htonl(mtu);
613
614         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
615         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
616                                             payload, payload_length,
617                                             nskb->csum);
618         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
619                                                 sizeof(struct icmp6hdr)
620                                                 + payload_length,
621                                                 ipv6h->nexthdr, nskb->csum);
622 }
623 #endif /* IPv6 */
624
625 bool ovs_tnl_frag_needed(struct vport *vport,
626                          const struct tnl_mutable_config *mutable,
627                          struct sk_buff *skb, unsigned int mtu)
628 {
629         unsigned int eth_hdr_len = ETH_HLEN;
630         unsigned int total_length = 0, header_length = 0, payload_length;
631         struct ethhdr *eh, *old_eh = eth_hdr(skb);
632         struct sk_buff *nskb;
633
634         /* Sanity check */
635         if (skb->protocol == htons(ETH_P_IP)) {
636                 if (mtu < IP_MIN_MTU)
637                         return false;
638
639                 if (!ipv4_should_icmp(skb))
640                         return true;
641         }
642 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
643         else if (skb->protocol == htons(ETH_P_IPV6)) {
644                 if (mtu < IPV6_MIN_MTU)
645                         return false;
646
647                 /*
648                  * In theory we should do PMTUD on IPv6 multicast messages but
649                  * we don't have an address to send from so just fragment.
650                  */
651                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
652                         return false;
653
654                 if (!ipv6_should_icmp(skb))
655                         return true;
656         }
657 #endif
658         else
659                 return false;
660
661         /* Allocate */
662         if (old_eh->h_proto == htons(ETH_P_8021Q))
663                 eth_hdr_len = VLAN_ETH_HLEN;
664
665         payload_length = skb->len - eth_hdr_len;
666         if (skb->protocol == htons(ETH_P_IP)) {
667                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
668                 total_length = min_t(unsigned int, header_length +
669                                                    payload_length, 576);
670         }
671 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
672         else {
673                 header_length = sizeof(struct ipv6hdr) +
674                                 sizeof(struct icmp6hdr);
675                 total_length = min_t(unsigned int, header_length +
676                                                   payload_length, IPV6_MIN_MTU);
677         }
678 #endif
679
680         payload_length = total_length - header_length;
681
682         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
683                              payload_length);
684         if (!nskb)
685                 return false;
686
687         skb_reserve(nskb, NET_IP_ALIGN);
688
689         /* Ethernet / VLAN */
690         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
691         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
692         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
693         nskb->protocol = eh->h_proto = old_eh->h_proto;
694         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
695                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
696
697                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
698                 vh->h_vlan_encapsulated_proto = skb->protocol;
699         } else
700                 vlan_set_tci(nskb, vlan_get_tci(skb));
701         skb_reset_mac_header(nskb);
702
703         /* Protocol */
704         if (skb->protocol == htons(ETH_P_IP))
705                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
706 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
707         else
708                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
709 #endif
710
711         if (unlikely(compute_ip_summed(nskb, false))) {
712                 kfree_skb(nskb);
713                 return false;
714         }
715
716         ovs_vport_receive(vport, nskb);
717
718         return true;
719 }
720
721 static bool check_mtu(struct sk_buff *skb,
722                       struct vport *vport,
723                       const struct tnl_mutable_config *mutable,
724                       const struct rtable *rt, __be16 *frag_offp,
725                       int tunnel_hlen)
726 {
727         bool df_inherit;
728         bool pmtud;
729         __be16 frag_off;
730         int mtu = 0;
731         unsigned int packet_length = skb->len - ETH_HLEN;
732
733         if (OVS_CB(skb)->tun_key->ipv4_dst) {
734                 df_inherit = false;
735                 pmtud = false;
736                 frag_off = OVS_CB(skb)->tun_key->tun_flags & OVS_FLOW_TNL_F_DONT_FRAGMENT ?
737                                   htons(IP_DF) : 0;
738         } else {
739                 df_inherit = mutable->flags & TNL_F_DF_INHERIT;
740                 pmtud = mutable->flags & TNL_F_PMTUD;
741                 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
742         }
743
744         /* Allow for one level of tagging in the packet length. */
745         if (!vlan_tx_tag_present(skb) &&
746             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
747                 packet_length -= VLAN_HLEN;
748
749         if (pmtud) {
750                 int vlan_header = 0;
751
752                 /* The tag needs to go in packet regardless of where it
753                  * currently is, so subtract it from the MTU.
754                  */
755                 if (vlan_tx_tag_present(skb) ||
756                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
757                         vlan_header = VLAN_HLEN;
758
759                 mtu = dst_mtu(&rt_dst(rt))
760                         - ETH_HLEN
761                         - tunnel_hlen
762                         - vlan_header;
763         }
764
765         if (skb->protocol == htons(ETH_P_IP)) {
766                 struct iphdr *iph = ip_hdr(skb);
767
768                 if (df_inherit)
769                         frag_off = iph->frag_off & htons(IP_DF);
770
771                 if (pmtud && iph->frag_off & htons(IP_DF)) {
772                         mtu = max(mtu, IP_MIN_MTU);
773
774                         if (packet_length > mtu &&
775                             ovs_tnl_frag_needed(vport, mutable, skb, mtu))
776                                 return false;
777                 }
778         }
779 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
780         else if (skb->protocol == htons(ETH_P_IPV6)) {
781                 /* IPv6 requires end hosts to do fragmentation
782                  * if the packet is above the minimum MTU.
783                  */
784                 if (df_inherit && packet_length > IPV6_MIN_MTU)
785                         frag_off = htons(IP_DF);
786
787                 if (pmtud) {
788                         mtu = max(mtu, IPV6_MIN_MTU);
789
790                         if (packet_length > mtu &&
791                             ovs_tnl_frag_needed(vport, mutable, skb, mtu))
792                                 return false;
793                 }
794         }
795 #endif
796
797         *frag_offp = frag_off;
798         return true;
799 }
800
801 static void create_tunnel_header(const struct vport *vport,
802                                  const struct tnl_mutable_config *mutable,
803                                  const struct ovs_key_ipv4_tunnel *tun_key,
804                                  const struct rtable *rt, void *header)
805 {
806         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
807         struct iphdr *iph = header;
808
809         iph->version    = 4;
810         iph->ihl        = sizeof(struct iphdr) >> 2;
811         iph->frag_off   = htons(IP_DF);
812         iph->protocol   = tnl_vport->tnl_ops->ipproto;
813         iph->tos        = mutable->tos;
814         iph->daddr      = rt->rt_dst;
815         iph->saddr      = rt->rt_src;
816         iph->ttl        = mutable->ttl;
817         if (!iph->ttl)
818                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
819
820         tnl_vport->tnl_ops->build_header(vport, mutable, tun_key, iph + 1);
821 }
822
823 static void *get_cached_header(const struct tnl_cache *cache)
824 {
825         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
826 }
827
828 #ifdef HAVE_RT_GENID
829 static inline int rt_genid(struct net *net)
830 {
831         return atomic_read(&net->ipv4.rt_genid);
832 }
833 #endif
834
835 static bool check_cache_valid(const struct tnl_cache *cache,
836                               const struct tnl_mutable_config *mutable)
837 {
838         struct hh_cache *hh;
839
840         if (!cache)
841                 return false;
842
843         hh = rt_hh(cache->rt);
844         return hh &&
845 #ifdef NEED_CACHE_TIMEOUT
846                 time_before(jiffies, cache->expiration) &&
847 #endif
848 #ifdef HAVE_RT_GENID
849                 rt_genid(dev_net(rt_dst(cache->rt).dev)) == cache->rt->rt_genid &&
850 #endif
851 #ifdef HAVE_HH_SEQ
852                 hh->hh_lock.sequence == cache->hh_seq &&
853 #endif
854                 mutable->seq == cache->mutable_seq &&
855                 (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
856                 (cache->flow && !cache->flow->dead));
857 }
858
859 static void __cache_cleaner(struct tnl_vport *tnl_vport)
860 {
861         const struct tnl_mutable_config *mutable =
862                         rcu_dereference(tnl_vport->mutable);
863         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
864
865         if (cache && !check_cache_valid(cache, mutable) &&
866             spin_trylock_bh(&tnl_vport->cache_lock)) {
867                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
868                 spin_unlock_bh(&tnl_vport->cache_lock);
869         }
870 }
871
872 static void cache_cleaner(struct work_struct *work)
873 {
874         int i;
875
876         schedule_cache_cleaner();
877
878         rcu_read_lock();
879         for (i = 0; i < PORT_TABLE_SIZE; i++) {
880                 struct hlist_node *n;
881                 struct hlist_head *bucket;
882                 struct tnl_vport *tnl_vport;
883
884                 bucket = &port_table[i];
885                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
886                         __cache_cleaner(tnl_vport);
887         }
888         rcu_read_unlock();
889 }
890
891 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
892 {
893         void *cache_data = get_cached_header(cache);
894         int hh_off;
895
896 #ifdef HAVE_HH_SEQ
897         unsigned hh_seq;
898
899         do {
900                 hh_seq = read_seqbegin(&hh->hh_lock);
901                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
902                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
903                 cache->hh_len = hh->hh_len;
904         } while (read_seqretry(&hh->hh_lock, hh_seq));
905
906         cache->hh_seq = hh_seq;
907 #else
908         read_lock(&hh->hh_lock);
909         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
910         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
911         cache->hh_len = hh->hh_len;
912         read_unlock(&hh->hh_lock);
913 #endif
914 }
915
916 static struct tnl_cache *build_cache(struct vport *vport,
917                                      const struct tnl_mutable_config *mutable,
918                                      struct rtable *rt)
919 {
920         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
921         static const struct ovs_key_ipv4_tunnel tun_key;
922         struct tnl_cache *cache;
923         void *cache_data;
924         int cache_len;
925         struct hh_cache *hh;
926         int tunnel_hlen;
927
928         if (!(mutable->flags & TNL_F_HDR_CACHE))
929                 return NULL;
930
931         tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, &tun_key);
932         if (tunnel_hlen < 0)
933                 return NULL;
934
935         tunnel_hlen += sizeof(struct iphdr);
936
937         /*
938          * If there is no entry in the ARP cache or if this device does not
939          * support hard header caching just fall back to the IP stack.
940          */
941
942         hh = rt_hh(rt);
943         if (!hh)
944                 return NULL;
945
946         /*
947          * If lock is contended fall back to directly building the header.
948          * We're not going to help performance by sitting here spinning.
949          */
950         if (!spin_trylock(&tnl_vport->cache_lock))
951                 return NULL;
952
953         cache = cache_dereference(tnl_vport);
954         if (check_cache_valid(cache, mutable))
955                 goto unlock;
956         else
957                 cache = NULL;
958
959         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + tunnel_hlen;
960
961         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
962                         cache_len, GFP_ATOMIC);
963         if (!cache)
964                 goto unlock;
965
966         create_eth_hdr(cache, hh);
967         cache_data = get_cached_header(cache) + cache->hh_len;
968         cache->len = cache->hh_len + tunnel_hlen;
969
970         create_tunnel_header(vport, mutable, &tun_key, rt, cache_data);
971
972         cache->mutable_seq = mutable->seq;
973         cache->rt = rt;
974 #ifdef NEED_CACHE_TIMEOUT
975         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
976 #endif
977
978         if (ovs_is_internal_dev(rt_dst(rt).dev)) {
979                 struct sw_flow_key flow_key;
980                 struct vport *dst_vport;
981                 struct sk_buff *skb;
982                 int err;
983                 int flow_key_len;
984                 struct sw_flow *flow;
985
986                 dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
987                 if (!dst_vport)
988                         goto done;
989
990                 skb = alloc_skb(cache->len, GFP_ATOMIC);
991                 if (!skb)
992                         goto done;
993
994                 __skb_put(skb, cache->len);
995                 memcpy(skb->data, get_cached_header(cache), cache->len);
996
997                 err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
998                                        &flow_key_len);
999
1000                 consume_skb(skb);
1001                 if (err)
1002                         goto done;
1003
1004                 flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
1005                                            &flow_key, flow_key_len);
1006                 if (flow) {
1007                         cache->flow = flow;
1008                         ovs_flow_hold(flow);
1009                 }
1010         }
1011
1012 done:
1013         assign_cache_rcu(vport, cache);
1014
1015 unlock:
1016         spin_unlock(&tnl_vport->cache_lock);
1017
1018         return cache;
1019 }
1020
1021 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
1022                                    __be32 saddr, __be32 daddr, u8 ipproto,
1023                                    u8 tos)
1024 {
1025         /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
1026          * router expect RT_TOS bits only. */
1027
1028 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
1029         struct flowi fl = { .nl_u = { .ip4_u = {
1030                                         .daddr = daddr,
1031                                         .saddr = saddr,
1032                                         .tos   = RT_TOS(tos) } },
1033                                         .proto = ipproto };
1034         struct rtable *rt;
1035
1036         if (unlikely(ip_route_output_key(port_key_get_net(&mutable->key), &rt, &fl)))
1037                 return ERR_PTR(-EADDRNOTAVAIL);
1038
1039         return rt;
1040 #else
1041         struct flowi4 fl = { .daddr = daddr,
1042                              .saddr = saddr,
1043                              .flowi4_tos = RT_TOS(tos),
1044                              .flowi4_proto = ipproto };
1045
1046         return ip_route_output_key(port_key_get_net(&mutable->key), &fl);
1047 #endif
1048 }
1049
1050 static struct rtable *find_route(struct vport *vport,
1051                                  const struct tnl_mutable_config *mutable,
1052                                  __be32 saddr, __be32 daddr, u8 tos,
1053                                  struct tnl_cache **cache)
1054 {
1055         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1056         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1057
1058         *cache = NULL;
1059         tos = RT_TOS(tos);
1060
1061         if (tos == RT_TOS(mutable->tos) &&
1062             check_cache_valid(cur_cache, mutable)) {
1063                 *cache = cur_cache;
1064                 return cur_cache->rt;
1065         } else {
1066                 struct rtable *rt;
1067
1068                 rt = __find_route(mutable, saddr, daddr,
1069                                   tnl_vport->tnl_ops->ipproto, tos);
1070                 if (IS_ERR(rt))
1071                         return NULL;
1072                 if (likely(tos == RT_TOS(mutable->tos)))
1073                         *cache = build_cache(vport, mutable, rt);
1074
1075                 return rt;
1076         }
1077 }
1078
1079 static bool need_linearize(const struct sk_buff *skb)
1080 {
1081         int i;
1082
1083         if (unlikely(skb_shinfo(skb)->frag_list))
1084                 return true;
1085
1086         /*
1087          * Generally speaking we should linearize if there are paged frags.
1088          * However, if all of the refcounts are 1 we know nobody else can
1089          * change them from underneath us and we can skip the linearization.
1090          */
1091         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1092                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1093                         return true;
1094
1095         return false;
1096 }
1097
1098 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1099                                        const struct tnl_mutable_config *mutable,
1100                                        const struct rtable *rt,
1101                                        int tunnel_hlen)
1102 {
1103         int min_headroom;
1104         int err;
1105
1106         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1107                         + tunnel_hlen
1108                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1109
1110         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1111                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1112                                                 skb_headroom(skb) +
1113                                                 16);
1114                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1115                                         0, GFP_ATOMIC);
1116                 if (unlikely(err))
1117                         goto error_free;
1118         }
1119
1120         forward_ip_summed(skb, true);
1121
1122         if (skb_is_gso(skb)) {
1123                 struct sk_buff *nskb;
1124
1125                 nskb = skb_gso_segment(skb, 0);
1126                 if (IS_ERR(nskb)) {
1127                         kfree_skb(skb);
1128                         err = PTR_ERR(nskb);
1129                         goto error;
1130                 }
1131
1132                 consume_skb(skb);
1133                 skb = nskb;
1134         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1135                 /* Pages aren't locked and could change at any time.
1136                  * If this happens after we compute the checksum, the
1137                  * checksum will be wrong.  We linearize now to avoid
1138                  * this problem.
1139                  */
1140                 if (unlikely(need_linearize(skb))) {
1141                         err = __skb_linearize(skb);
1142                         if (unlikely(err))
1143                                 goto error_free;
1144                 }
1145
1146                 err = skb_checksum_help(skb);
1147                 if (unlikely(err))
1148                         goto error_free;
1149         }
1150
1151         set_ip_summed(skb, OVS_CSUM_NONE);
1152
1153         return skb;
1154
1155 error_free:
1156         kfree_skb(skb);
1157 error:
1158         return ERR_PTR(err);
1159 }
1160
1161 static int send_frags(struct sk_buff *skb,
1162                       int tunnel_hlen)
1163 {
1164         int sent_len;
1165
1166         sent_len = 0;
1167         while (skb) {
1168                 struct sk_buff *next = skb->next;
1169                 int frag_len = skb->len - tunnel_hlen;
1170                 int err;
1171
1172                 skb->next = NULL;
1173                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1174
1175                 err = ip_local_out(skb);
1176                 skb = next;
1177                 if (unlikely(net_xmit_eval(err)))
1178                         goto free_frags;
1179                 sent_len += frag_len;
1180         }
1181
1182         return sent_len;
1183
1184 free_frags:
1185         /*
1186          * There's no point in continuing to send fragments once one has been
1187          * dropped so just free the rest.  This may help improve the congestion
1188          * that caused the first packet to be dropped.
1189          */
1190         ovs_tnl_free_linked_skbs(skb);
1191         return sent_len;
1192 }
1193
1194 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
1195 {
1196         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1197         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1198         enum vport_err_type err = VPORT_E_TX_ERROR;
1199         struct rtable *rt;
1200         struct dst_entry *unattached_dst = NULL;
1201         struct tnl_cache *cache;
1202         struct ovs_key_ipv4_tunnel tun_key;
1203         int sent_len = 0;
1204         int tunnel_hlen;
1205         __be16 frag_off = 0;
1206         __be32 daddr;
1207         __be32 saddr;
1208         u8 ttl;
1209         u8 tos;
1210
1211         /* Validate the protocol headers before we try to use them. */
1212         if (skb->protocol == htons(ETH_P_8021Q) &&
1213             !vlan_tx_tag_present(skb)) {
1214                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1215                         goto error_free;
1216
1217                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1218                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1219         }
1220
1221         if (skb->protocol == htons(ETH_P_IP)) {
1222                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1223                     + sizeof(struct iphdr))))
1224                         skb->protocol = 0;
1225         }
1226 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1227         else if (skb->protocol == htons(ETH_P_IPV6)) {
1228                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1229                     + sizeof(struct ipv6hdr))))
1230                         skb->protocol = 0;
1231         }
1232 #endif
1233
1234         /* If OVS_CB(skb)->tun_key is NULL, point it at the local tun_key here,
1235          * and zero it out.
1236          */
1237         if (!OVS_CB(skb)->tun_key) {
1238                 memset(&tun_key, 0, sizeof(tun_key));
1239                 OVS_CB(skb)->tun_key = &tun_key;
1240         }
1241
1242         tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, OVS_CB(skb)->tun_key);
1243         if (unlikely(tunnel_hlen < 0)) {
1244                 err = VPORT_E_TX_DROPPED;
1245                 goto error_free;
1246         }
1247         tunnel_hlen += sizeof(struct iphdr);
1248
1249         if (OVS_CB(skb)->tun_key->ipv4_dst) {
1250                 daddr = OVS_CB(skb)->tun_key->ipv4_dst;
1251                 saddr = OVS_CB(skb)->tun_key->ipv4_src;
1252                 tos = OVS_CB(skb)->tun_key->ipv4_tos;
1253                 ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
1254         } else {
1255                 u8 inner_tos;
1256                 daddr = mutable->key.daddr;
1257                 saddr = mutable->key.saddr;
1258
1259                 if (unlikely(!daddr)) {
1260                         /* Trying to sent packet from Null-port without
1261                          * tunnel info? Drop this packet. */
1262                         err = VPORT_E_TX_DROPPED;
1263                         goto error_free;
1264                 }
1265
1266                 /* ToS */
1267                 if (skb->protocol == htons(ETH_P_IP))
1268                         inner_tos = ip_hdr(skb)->tos;
1269 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1270                 else if (skb->protocol == htons(ETH_P_IPV6))
1271                         inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1272 #endif
1273                 else
1274                         inner_tos = 0;
1275
1276                 if (mutable->flags & TNL_F_TOS_INHERIT)
1277                         tos = inner_tos;
1278                 else
1279                         tos = mutable->tos;
1280
1281                 tos = INET_ECN_encapsulate(tos, inner_tos);
1282
1283                 /* TTL */
1284                 ttl = mutable->ttl;
1285                 if (mutable->flags & TNL_F_TTL_INHERIT) {
1286                         if (skb->protocol == htons(ETH_P_IP))
1287                                 ttl = ip_hdr(skb)->ttl;
1288 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1289                         else if (skb->protocol == htons(ETH_P_IPV6))
1290                                 ttl = ipv6_hdr(skb)->hop_limit;
1291 #endif
1292                 }
1293
1294         }
1295
1296         /* Route lookup */
1297         rt = find_route(vport, mutable, saddr, daddr, tos, &cache);
1298         if (unlikely(!rt))
1299                 goto error_free;
1300         if (unlikely(!cache))
1301                 unattached_dst = &rt_dst(rt);
1302
1303         /* Reset SKB */
1304         nf_reset(skb);
1305         secpath_reset(skb);
1306         skb_dst_drop(skb);
1307         skb_clear_rxhash(skb);
1308
1309         /* Offloading */
1310         skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
1311         if (IS_ERR(skb))
1312                 goto error;
1313
1314         /* MTU */
1315         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off, tunnel_hlen))) {
1316                 err = VPORT_E_TX_DROPPED;
1317                 goto error_free;
1318         }
1319
1320         /*
1321          * If we are over the MTU, allow the IP stack to handle fragmentation.
1322          * Fragmentation is a slow path anyways.
1323          */
1324         if (unlikely(skb->len + tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1325                      cache)) {
1326                 unattached_dst = &rt_dst(rt);
1327                 dst_hold(unattached_dst);
1328                 cache = NULL;
1329         }
1330
1331         /* TTL Fixup. */
1332         if (!OVS_CB(skb)->tun_key->ipv4_dst) {
1333                 if (!(mutable->flags & TNL_F_TTL_INHERIT)) {
1334                         if (!ttl)
1335                                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1336                 }
1337         }
1338
1339         while (skb) {
1340                 struct iphdr *iph;
1341                 struct sk_buff *next_skb = skb->next;
1342                 skb->next = NULL;
1343
1344                 if (unlikely(vlan_deaccel_tag(skb)))
1345                         goto next;
1346
1347                 if (likely(cache)) {
1348                         skb_push(skb, cache->len);
1349                         memcpy(skb->data, get_cached_header(cache), cache->len);
1350                         skb_reset_mac_header(skb);
1351                         skb_set_network_header(skb, cache->hh_len);
1352
1353                 } else {
1354                         skb_push(skb, tunnel_hlen);
1355                         create_tunnel_header(vport, mutable, OVS_CB(skb)->tun_key, rt, skb->data);
1356                         skb_reset_network_header(skb);
1357
1358                         if (next_skb)
1359                                 skb_dst_set(skb, dst_clone(unattached_dst));
1360                         else {
1361                                 skb_dst_set(skb, unattached_dst);
1362                                 unattached_dst = NULL;
1363                         }
1364                 }
1365                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1366
1367                 iph = ip_hdr(skb);
1368                 iph->tos = tos;
1369                 iph->ttl = ttl;
1370                 iph->frag_off = frag_off;
1371                 ip_select_ident(iph, &rt_dst(rt), NULL);
1372
1373                 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1374                                                         &rt_dst(rt), skb, tunnel_hlen);
1375                 if (unlikely(!skb))
1376                         goto next;
1377
1378                 if (likely(cache)) {
1379                         int orig_len = skb->len - cache->len;
1380                         struct vport *cache_vport;
1381
1382                         cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
1383                         skb->protocol = htons(ETH_P_IP);
1384                         iph = ip_hdr(skb);
1385                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1386                         ip_send_check(iph);
1387
1388                         if (cache_vport) {
1389                                 if (unlikely(compute_ip_summed(skb, true))) {
1390                                         kfree_skb(skb);
1391                                         goto next;
1392                                 }
1393
1394                                 OVS_CB(skb)->flow = cache->flow;
1395                                 ovs_vport_receive(cache_vport, skb);
1396                                 sent_len += orig_len;
1397                         } else {
1398                                 int xmit_err;
1399
1400                                 skb->dev = rt_dst(rt).dev;
1401                                 xmit_err = dev_queue_xmit(skb);
1402
1403                                 if (likely(net_xmit_eval(xmit_err) == 0))
1404                                         sent_len += orig_len;
1405                         }
1406                 } else
1407                         sent_len += send_frags(skb, tunnel_hlen);
1408
1409 next:
1410                 skb = next_skb;
1411         }
1412
1413         if (unlikely(sent_len == 0))
1414                 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
1415
1416         goto out;
1417
1418 error_free:
1419         ovs_tnl_free_linked_skbs(skb);
1420 error:
1421         ovs_vport_record_error(vport, err);
1422 out:
1423         dst_release(unattached_dst);
1424         return sent_len;
1425 }
1426
1427 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1428         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1429         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1430         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1431         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1432         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1433         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1434         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1435 };
1436
1437 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1438  * zeroed. */
1439 static int tnl_set_config(struct net *net, struct nlattr *options,
1440                           const struct tnl_ops *tnl_ops,
1441                           const struct vport *cur_vport,
1442                           struct tnl_mutable_config *mutable)
1443 {
1444         const struct vport *old_vport;
1445         const struct tnl_mutable_config *old_mutable;
1446         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1447         int err;
1448
1449         port_key_set_net(&mutable->key, net);
1450         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1451         if (!options)
1452                 goto out;
1453
1454         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1455         if (err)
1456                 return err;
1457
1458         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1459                 return -EINVAL;
1460
1461         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1462         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1463
1464         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1465                 if (ipv4_is_multicast(mutable->key.daddr))
1466                         return -EINVAL;
1467                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1468         }
1469
1470         if (a[OVS_TUNNEL_ATTR_TOS]) {
1471                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1472                 /* Reject ToS config with ECN bits set. */
1473                 if (mutable->tos & INET_ECN_MASK)
1474                         return -EINVAL;
1475         }
1476
1477         if (a[OVS_TUNNEL_ATTR_TTL])
1478                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1479
1480         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1481                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1482                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1483         } else {
1484                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1485                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1486         }
1487
1488         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1489                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1490         else
1491                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1492
1493         mutable->mlink = 0;
1494         if (ipv4_is_multicast(mutable->key.daddr)) {
1495                 struct net_device *dev;
1496                 struct rtable *rt;
1497
1498                 rt = __find_route(mutable, mutable->key.saddr, mutable->key.daddr,
1499                                   tnl_ops->ipproto, mutable->tos);
1500                 if (IS_ERR(rt))
1501                         return -EADDRNOTAVAIL;
1502                 dev = rt_dst(rt).dev;
1503                 ip_rt_put(rt);
1504                 if (__in_dev_get_rtnl(dev) == NULL)
1505                         return -EADDRNOTAVAIL;
1506                 mutable->mlink = dev->ifindex;
1507                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1508         }
1509
1510 out:
1511         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1512         if (old_vport && old_vport != cur_vport)
1513                 return -EEXIST;
1514
1515         return 0;
1516 }
1517
1518 struct vport *ovs_tnl_create(const struct vport_parms *parms,
1519                              const struct vport_ops *vport_ops,
1520                              const struct tnl_ops *tnl_ops)
1521 {
1522         struct vport *vport;
1523         struct tnl_vport *tnl_vport;
1524         struct tnl_mutable_config *mutable;
1525         int initial_frag_id;
1526         int err;
1527
1528         vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1529         if (IS_ERR(vport)) {
1530                 err = PTR_ERR(vport);
1531                 goto error;
1532         }
1533
1534         tnl_vport = tnl_vport_priv(vport);
1535
1536         strcpy(tnl_vport->name, parms->name);
1537         tnl_vport->tnl_ops = tnl_ops;
1538
1539         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1540         if (!mutable) {
1541                 err = -ENOMEM;
1542                 goto error_free_vport;
1543         }
1544
1545         random_ether_addr(mutable->eth_addr);
1546
1547         get_random_bytes(&initial_frag_id, sizeof(int));
1548         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1549
1550         err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
1551                              NULL, mutable);
1552         if (err)
1553                 goto error_free_mutable;
1554
1555         spin_lock_init(&tnl_vport->cache_lock);
1556
1557 #ifdef NEED_CACHE_TIMEOUT
1558         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1559                                        (net_random() % (MAX_CACHE_EXP / 2));
1560 #endif
1561
1562         rcu_assign_pointer(tnl_vport->mutable, mutable);
1563
1564         port_table_add_port(vport);
1565         return vport;
1566
1567 error_free_mutable:
1568         free_mutable_rtnl(mutable);
1569         kfree(mutable);
1570 error_free_vport:
1571         ovs_vport_free(vport);
1572 error:
1573         return ERR_PTR(err);
1574 }
1575
1576 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
1577 {
1578         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1579         const struct tnl_mutable_config *old_mutable;
1580         struct tnl_mutable_config *mutable;
1581         int err;
1582
1583         old_mutable = rtnl_dereference(tnl_vport->mutable);
1584         if (!old_mutable->key.daddr)
1585                 return -EINVAL;
1586
1587         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1588         if (!mutable) {
1589                 err = -ENOMEM;
1590                 goto error;
1591         }
1592
1593         /* Copy fields whose values should be retained. */
1594         mutable->seq = old_mutable->seq + 1;
1595         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1596
1597         /* Parse the others configured by userspace. */
1598         err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
1599                              vport, mutable);
1600         if (err)
1601                 goto error_free;
1602
1603         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1604                 port_table_move_port(vport, mutable);
1605         else
1606                 assign_config_rcu(vport, mutable);
1607
1608         return 0;
1609
1610 error_free:
1611         free_mutable_rtnl(mutable);
1612         kfree(mutable);
1613 error:
1614         return err;
1615 }
1616
1617 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1618 {
1619         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1620         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1621
1622         if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
1623                       mutable->flags & TNL_F_PUBLIC) ||
1624             nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
1625                 goto nla_put_failure;
1626
1627         if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
1628             nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
1629                 goto nla_put_failure;
1630         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
1631             nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
1632                 goto nla_put_failure;
1633         if (mutable->key.saddr &&
1634             nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
1635                 goto nla_put_failure;
1636         if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
1637                 goto nla_put_failure;
1638         if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
1639                 goto nla_put_failure;
1640
1641         return 0;
1642
1643 nla_put_failure:
1644         return -EMSGSIZE;
1645 }
1646
1647 static void free_port_rcu(struct rcu_head *rcu)
1648 {
1649         struct tnl_vport *tnl_vport = container_of(rcu,
1650                                                    struct tnl_vport, rcu);
1651
1652         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1653         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1654         ovs_vport_free(tnl_vport_to_vport(tnl_vport));
1655 }
1656
1657 void ovs_tnl_destroy(struct vport *vport)
1658 {
1659         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1660         struct tnl_mutable_config *mutable;
1661
1662         mutable = rtnl_dereference(tnl_vport->mutable);
1663         port_table_remove_port(vport);
1664         free_mutable_rtnl(mutable);
1665         call_rcu(&tnl_vport->rcu, free_port_rcu);
1666 }
1667
1668 int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
1669 {
1670         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1671         struct tnl_mutable_config *old_mutable, *mutable;
1672
1673         old_mutable = rtnl_dereference(tnl_vport->mutable);
1674         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1675         if (!mutable)
1676                 return -ENOMEM;
1677
1678         old_mutable->mlink = 0;
1679
1680         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1681         assign_config_rcu(vport, mutable);
1682
1683         return 0;
1684 }
1685
1686 const char *ovs_tnl_get_name(const struct vport *vport)
1687 {
1688         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1689         return tnl_vport->name;
1690 }
1691
1692 const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
1693 {
1694         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1695         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1696 }
1697
1698 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
1699 {
1700         while (skb) {
1701                 struct sk_buff *next = skb->next;
1702                 kfree_skb(skb);
1703                 skb = next;
1704         }
1705 }
1706
1707 int ovs_tnl_init(void)
1708 {
1709         int i;
1710
1711         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1712                              GFP_KERNEL);
1713         if (!port_table)
1714                 return -ENOMEM;
1715
1716         for (i = 0; i < PORT_TABLE_SIZE; i++)
1717                 INIT_HLIST_HEAD(&port_table[i]);
1718
1719         return 0;
1720 }
1721
1722 void ovs_tnl_exit(void)
1723 {
1724         kfree(port_table);
1725 }