datapath: Allow the number of hash entries to exceed TBL_MAX_BUCKETS
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/in.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
19
20 #include <net/dsfield.h>
21 #include <net/dst.h>
22 #include <net/icmp.h>
23 #include <net/inet_ecn.h>
24 #include <net/ip.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26 #include <net/ipv6.h>
27 #endif
28 #include <net/route.h>
29 #include <net/xfrm.h>
30
31 #include "actions.h"
32 #include "checksum.h"
33 #include "datapath.h"
34 #include "table.h"
35 #include "tunnel.h"
36 #include "vlan.h"
37 #include "vport.h"
38 #include "vport-generic.h"
39 #include "vport-internal_dev.h"
40
41 #ifdef NEED_CACHE_TIMEOUT
42 /*
43  * On kernels where we can't quickly detect changes in the rest of the system
44  * we use an expiration time to invalidate the cache.  A shorter expiration
45  * reduces the length of time that we may potentially blackhole packets while
46  * a longer time increases performance by reducing the frequency that the
47  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
48  * invalidated before the expiration time but this is the maximum.  The time
49  * is expressed in jiffies.
50  */
51 #define MAX_CACHE_EXP HZ
52 #endif
53
54 /*
55  * Interval to check for and remove caches that are no longer valid.  Caches
56  * are checked for validity before they are used for packet encapsulation and
57  * old caches are removed at that time.  However, if no packets are sent through
58  * the tunnel then the cache will never be destroyed.  Since it holds
59  * references to a number of system objects, the cache will continue to use
60  * system resources by not allowing those objects to be destroyed.  The cache
61  * cleaner is periodically run to free invalid caches.  It does not
62  * significantly affect system performance.  A lower interval will release
63  * resources faster but will itself consume resources by requiring more frequent
64  * checks.  A longer interval may result in messages being printed to the kernel
65  * message buffer about unreleased resources.  The interval is expressed in
66  * jiffies.
67  */
68 #define CACHE_CLEANER_INTERVAL (5 * HZ)
69
70 #define CACHE_DATA_ALIGN 16
71
72 static struct tbl __rcu *port_table __read_mostly;
73
74 static void cache_cleaner(struct work_struct *work);
75 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
76
77 /*
78  * These are just used as an optimization: they don't require any kind of
79  * synchronization because we could have just as easily read the value before
80  * the port change happened.
81  */
82 static unsigned int key_local_remote_ports __read_mostly;
83 static unsigned int key_remote_ports __read_mostly;
84 static unsigned int local_remote_ports __read_mostly;
85 static unsigned int remote_ports __read_mostly;
86
87 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
88 #define rt_dst(rt) (rt->dst)
89 #else
90 #define rt_dst(rt) (rt->u.dst)
91 #endif
92
93 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
94 {
95         return vport_from_priv(tnl_vport);
96 }
97
98 static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
99 {
100         return container_of(node, struct tnl_vport, tbl_node);
101 }
102
103 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
104  * cache_lock is held, so it is only for update side code.
105  */
106 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
107 {
108         return rcu_dereference_protected(tnl_vport->cache,
109                                          lockdep_is_held(&tnl_vport->cache_lock));
110 }
111
112 static inline void schedule_cache_cleaner(void)
113 {
114         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
115 }
116
117 static void free_cache(struct tnl_cache *cache)
118 {
119         if (!cache)
120                 return;
121
122         flow_put(cache->flow);
123         ip_rt_put(cache->rt);
124         kfree(cache);
125 }
126
127 static void free_config_rcu(struct rcu_head *rcu)
128 {
129         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
130         kfree(c);
131 }
132
133 static void free_cache_rcu(struct rcu_head *rcu)
134 {
135         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
136         free_cache(c);
137 }
138
139 static void assign_config_rcu(struct vport *vport,
140                               struct tnl_mutable_config *new_config)
141 {
142         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
143         struct tnl_mutable_config *old_config;
144
145         old_config = rtnl_dereference(tnl_vport->mutable);
146         rcu_assign_pointer(tnl_vport->mutable, new_config);
147         call_rcu(&old_config->rcu, free_config_rcu);
148 }
149
150 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
151 {
152         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
153         struct tnl_cache *old_cache;
154
155         old_cache = cache_dereference(tnl_vport);
156         rcu_assign_pointer(tnl_vport->cache, new_cache);
157
158         if (old_cache)
159                 call_rcu(&old_cache->rcu, free_cache_rcu);
160 }
161
162 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
163 {
164         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
165                 if (mutable->saddr)
166                         return &local_remote_ports;
167                 else
168                         return &remote_ports;
169         } else {
170                 if (mutable->saddr)
171                         return &key_local_remote_ports;
172                 else
173                         return &key_remote_ports;
174         }
175 }
176
177 struct port_lookup_key {
178         const struct tnl_mutable_config *mutable;
179         __be64 key;
180         u32 tunnel_type;
181         __be32 saddr;
182         __be32 daddr;
183 };
184
185 /*
186  * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
187  * the comparision.
188  */
189 static int port_cmp(const struct tbl_node *node, void *target, int unused)
190 {
191         const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
192         struct port_lookup_key *lookup = target;
193
194         lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
195
196         return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
197                 lookup->mutable->daddr == lookup->daddr &&
198                 lookup->mutable->in_key == lookup->key &&
199                 lookup->mutable->saddr == lookup->saddr);
200 }
201
202 static u32 port_hash(struct port_lookup_key *k)
203 {
204         u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
205                              k->tunnel_type, 0);
206         return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
207 }
208
209 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
210 {
211         struct port_lookup_key lookup;
212
213         lookup.saddr = mutable->saddr;
214         lookup.daddr = mutable->daddr;
215         lookup.key = mutable->in_key;
216         lookup.tunnel_type = mutable->tunnel_type;
217
218         return port_hash(&lookup);
219 }
220
221 static void check_table_empty(void)
222 {
223         struct tbl *old_table = rtnl_dereference(port_table);
224
225         if (tbl_count(old_table) == 0) {
226                 cancel_delayed_work_sync(&cache_cleaner_wq);
227                 rcu_assign_pointer(port_table, NULL);
228                 tbl_deferred_destroy(old_table, NULL);
229         }
230 }
231
232 static int add_port(struct vport *vport)
233 {
234         struct tbl *cur_table = rtnl_dereference(port_table);
235         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
236         int err;
237
238         if (!port_table) {
239                 struct tbl *new_table;
240
241                 new_table = tbl_create(TBL_MIN_BUCKETS);
242                 if (!new_table)
243                         return -ENOMEM;
244
245                 rcu_assign_pointer(port_table, new_table);
246                 schedule_cache_cleaner();
247
248         } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
249                 struct tbl *new_table;
250
251                 new_table = tbl_expand(cur_table);
252                 if (IS_ERR(new_table)) {
253                         if (PTR_ERR(new_table) != -ENOSPC)
254                                 return PTR_ERR(new_table);
255                 } else {
256                         rcu_assign_pointer(port_table, new_table);
257                         tbl_deferred_destroy(cur_table, NULL);
258                 }
259         }
260
261         err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
262                          mutable_hash(rtnl_dereference(tnl_vport->mutable)));
263         if (err) {
264                 check_table_empty();
265                 return err;
266         }
267
268         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
269
270         return 0;
271 }
272
273 static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
274 {
275         int err;
276         struct tbl *cur_table = rtnl_dereference(port_table);
277         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
278         u32 hash;
279
280         hash = mutable_hash(new_mutable);
281         if (hash == tnl_vport->tbl_node.hash)
282                 goto table_updated;
283
284         /*
285          * Ideally we should make this move atomic to avoid having gaps in
286          * finding tunnels or the possibility of failure.  However, if we do
287          * find a tunnel it will always be consistent.
288          */
289         err = tbl_remove(cur_table, &tnl_vport->tbl_node);
290         if (err)
291                 return err;
292
293         err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
294         if (err) {
295                 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
296                 check_table_empty();
297                 return err;
298         }
299
300 table_updated:
301         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
302         assign_config_rcu(vport, new_mutable);
303         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
304
305         return 0;
306 }
307
308 static int del_port(struct vport *vport)
309 {
310         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
311         int err;
312
313         err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
314         if (err)
315                 return err;
316
317         check_table_empty();
318         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
319
320         return 0;
321 }
322
323 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
324                             int tunnel_type,
325                             const struct tnl_mutable_config **mutable)
326 {
327         struct port_lookup_key lookup;
328         struct tbl *table = rcu_dereference_rtnl(port_table);
329         struct tbl_node *tbl_node;
330
331         if (unlikely(!table))
332                 return NULL;
333
334         lookup.saddr = saddr;
335         lookup.daddr = daddr;
336
337         if (tunnel_type & TNL_T_KEY_EXACT) {
338                 lookup.key = key;
339                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
340
341                 if (key_local_remote_ports) {
342                         tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
343                                               port_hash(&lookup), port_cmp);
344                         if (tbl_node)
345                                 goto found;
346                 }
347
348                 if (key_remote_ports) {
349                         lookup.saddr = 0;
350
351                         tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
352                                               port_hash(&lookup), port_cmp);
353                         if (tbl_node)
354                                 goto found;
355
356                         lookup.saddr = saddr;
357                 }
358         }
359
360         if (tunnel_type & TNL_T_KEY_MATCH) {
361                 lookup.key = 0;
362                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
363
364                 if (local_remote_ports) {
365                         tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
366                                               port_hash(&lookup), port_cmp);
367                         if (tbl_node)
368                                 goto found;
369                 }
370
371                 if (remote_ports) {
372                         lookup.saddr = 0;
373
374                         tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
375                                               port_hash(&lookup), port_cmp);
376                         if (tbl_node)
377                                 goto found;
378                 }
379         }
380
381         return NULL;
382
383 found:
384         *mutable = lookup.mutable;
385         return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
386 }
387
388 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
389 {
390         if (unlikely(INET_ECN_is_ce(tos))) {
391                 __be16 protocol = skb->protocol;
392
393                 skb_set_network_header(skb, ETH_HLEN);
394
395                 if (protocol == htons(ETH_P_8021Q)) {
396                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
397                                 return;
398
399                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
400                         skb_set_network_header(skb, VLAN_ETH_HLEN);
401                 }
402
403                 if (protocol == htons(ETH_P_IP)) {
404                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
405                             + sizeof(struct iphdr))))
406                                 return;
407
408                         IP_ECN_set_ce(ip_hdr(skb));
409                 }
410 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
411                 else if (protocol == htons(ETH_P_IPV6)) {
412                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
413                             + sizeof(struct ipv6hdr))))
414                                 return;
415
416                         IP6_ECN_set_ce(ipv6_hdr(skb));
417                 }
418 #endif
419         }
420 }
421
422 /**
423  *      tnl_rcv - ingress point for generic tunnel code
424  *
425  * @vport: port this packet was received on
426  * @skb: received packet
427  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
428  *
429  * Must be called with rcu_read_lock.
430  *
431  * Packets received by this function are in the following state:
432  * - skb->data points to the inner Ethernet header.
433  * - The inner Ethernet header is in the linear data area.
434  * - skb->csum does not include the inner Ethernet header.
435  * - The layer pointers are undefined.
436  */
437 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
438 {
439         struct ethhdr *eh;
440
441         skb_reset_mac_header(skb);
442         eh = eth_hdr(skb);
443
444         if (likely(ntohs(eh->h_proto) >= 1536))
445                 skb->protocol = eh->h_proto;
446         else
447                 skb->protocol = htons(ETH_P_802_2);
448
449         skb_dst_drop(skb);
450         nf_reset(skb);
451         skb_clear_rxhash(skb);
452         secpath_reset(skb);
453
454         ecn_decapsulate(skb, tos);
455         vlan_set_tci(skb, 0);
456
457         if (unlikely(compute_ip_summed(skb, false))) {
458                 kfree_skb(skb);
459                 return;
460         }
461
462         vport_receive(vport, skb);
463 }
464
465 static bool check_ipv4_address(__be32 addr)
466 {
467         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
468             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
469                 return false;
470
471         return true;
472 }
473
474 static bool ipv4_should_icmp(struct sk_buff *skb)
475 {
476         struct iphdr *old_iph = ip_hdr(skb);
477
478         /* Don't respond to L2 broadcast. */
479         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
480                 return false;
481
482         /* Don't respond to L3 broadcast or invalid addresses. */
483         if (!check_ipv4_address(old_iph->daddr) ||
484             !check_ipv4_address(old_iph->saddr))
485                 return false;
486
487         /* Only respond to the first fragment. */
488         if (old_iph->frag_off & htons(IP_OFFSET))
489                 return false;
490
491         /* Don't respond to ICMP error messages. */
492         if (old_iph->protocol == IPPROTO_ICMP) {
493                 u8 icmp_type, *icmp_typep;
494
495                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
496                                                 (old_iph->ihl << 2) +
497                                                 offsetof(struct icmphdr, type) -
498                                                 skb->data, sizeof(icmp_type),
499                                                 &icmp_type);
500
501                 if (!icmp_typep)
502                         return false;
503
504                 if (*icmp_typep > NR_ICMP_TYPES
505                         || (*icmp_typep <= ICMP_PARAMETERPROB
506                                 && *icmp_typep != ICMP_ECHOREPLY
507                                 && *icmp_typep != ICMP_ECHO))
508                         return false;
509         }
510
511         return true;
512 }
513
514 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
515                             unsigned int mtu, unsigned int payload_length)
516 {
517         struct iphdr *iph, *old_iph = ip_hdr(skb);
518         struct icmphdr *icmph;
519         u8 *payload;
520
521         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
522         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
523         payload = skb_put(nskb, payload_length);
524
525         /* IP */
526         iph->version            =       4;
527         iph->ihl                =       sizeof(struct iphdr) >> 2;
528         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
529                                         IPTOS_PREC_INTERNETCONTROL;
530         iph->tot_len            =       htons(sizeof(struct iphdr)
531                                               + sizeof(struct icmphdr)
532                                               + payload_length);
533         get_random_bytes(&iph->id, sizeof(iph->id));
534         iph->frag_off           =       0;
535         iph->ttl                =       IPDEFTTL;
536         iph->protocol           =       IPPROTO_ICMP;
537         iph->daddr              =       old_iph->saddr;
538         iph->saddr              =       old_iph->daddr;
539
540         ip_send_check(iph);
541
542         /* ICMP */
543         icmph->type             =       ICMP_DEST_UNREACH;
544         icmph->code             =       ICMP_FRAG_NEEDED;
545         icmph->un.gateway       =       htonl(mtu);
546         icmph->checksum         =       0;
547
548         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
549         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
550                                             payload, payload_length,
551                                             nskb->csum);
552         icmph->checksum = csum_fold(nskb->csum);
553 }
554
555 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
556 static bool ipv6_should_icmp(struct sk_buff *skb)
557 {
558         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
559         int addr_type;
560         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
561         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
562
563         /* Check source address is valid. */
564         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
565         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
566                 return false;
567
568         /* Don't reply to unspecified addresses. */
569         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
570                 return false;
571
572         /* Don't respond to ICMP error messages. */
573         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
574         if (payload_off < 0)
575                 return false;
576
577         if (nexthdr == NEXTHDR_ICMP) {
578                 u8 icmp_type, *icmp_typep;
579
580                 icmp_typep = skb_header_pointer(skb, payload_off +
581                                                 offsetof(struct icmp6hdr,
582                                                         icmp6_type),
583                                                 sizeof(icmp_type), &icmp_type);
584
585                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
586                         return false;
587         }
588
589         return true;
590 }
591
592 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
593                             unsigned int mtu, unsigned int payload_length)
594 {
595         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
596         struct icmp6hdr *icmp6h;
597         u8 *payload;
598
599         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
600         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
601         payload = skb_put(nskb, payload_length);
602
603         /* IPv6 */
604         ipv6h->version          =       6;
605         ipv6h->priority         =       0;
606         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
607         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
608                                               + payload_length);
609         ipv6h->nexthdr          =       NEXTHDR_ICMP;
610         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
611         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
612         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
613
614         /* ICMPv6 */
615         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
616         icmp6h->icmp6_code      =       0;
617         icmp6h->icmp6_cksum     =       0;
618         icmp6h->icmp6_mtu       =       htonl(mtu);
619
620         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
621         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
622                                             payload, payload_length,
623                                             nskb->csum);
624         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
625                                                 sizeof(struct icmp6hdr)
626                                                 + payload_length,
627                                                 ipv6h->nexthdr, nskb->csum);
628 }
629 #endif /* IPv6 */
630
631 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
632                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
633 {
634         unsigned int eth_hdr_len = ETH_HLEN;
635         unsigned int total_length = 0, header_length = 0, payload_length;
636         struct ethhdr *eh, *old_eh = eth_hdr(skb);
637         struct sk_buff *nskb;
638
639         /* Sanity check */
640         if (skb->protocol == htons(ETH_P_IP)) {
641                 if (mtu < IP_MIN_MTU)
642                         return false;
643
644                 if (!ipv4_should_icmp(skb))
645                         return true;
646         }
647 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
648         else if (skb->protocol == htons(ETH_P_IPV6)) {
649                 if (mtu < IPV6_MIN_MTU)
650                         return false;
651
652                 /*
653                  * In theory we should do PMTUD on IPv6 multicast messages but
654                  * we don't have an address to send from so just fragment.
655                  */
656                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
657                         return false;
658
659                 if (!ipv6_should_icmp(skb))
660                         return true;
661         }
662 #endif
663         else
664                 return false;
665
666         /* Allocate */
667         if (old_eh->h_proto == htons(ETH_P_8021Q))
668                 eth_hdr_len = VLAN_ETH_HLEN;
669
670         payload_length = skb->len - eth_hdr_len;
671         if (skb->protocol == htons(ETH_P_IP)) {
672                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
673                 total_length = min_t(unsigned int, header_length +
674                                                    payload_length, 576);
675         }
676 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
677         else {
678                 header_length = sizeof(struct ipv6hdr) +
679                                 sizeof(struct icmp6hdr);
680                 total_length = min_t(unsigned int, header_length +
681                                                   payload_length, IPV6_MIN_MTU);
682         }
683 #endif
684
685         payload_length = total_length - header_length;
686
687         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
688                              payload_length);
689         if (!nskb)
690                 return false;
691
692         skb_reserve(nskb, NET_IP_ALIGN);
693
694         /* Ethernet / VLAN */
695         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
696         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
697         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
698         nskb->protocol = eh->h_proto = old_eh->h_proto;
699         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
700                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
701
702                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
703                 vh->h_vlan_encapsulated_proto = skb->protocol;
704         } else
705                 vlan_set_tci(nskb, vlan_get_tci(skb));
706         skb_reset_mac_header(nskb);
707
708         /* Protocol */
709         if (skb->protocol == htons(ETH_P_IP))
710                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
711 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
712         else
713                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
714 #endif
715
716         /*
717          * Assume that flow based keys are symmetric with respect to input
718          * and output and use the key that we were going to put on the
719          * outgoing packet for the fake received packet.  If the keys are
720          * not symmetric then PMTUD needs to be disabled since we won't have
721          * any way of synthesizing packets.
722          */
723         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
724             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
725                 OVS_CB(nskb)->tun_id = flow_key;
726
727         if (unlikely(compute_ip_summed(nskb, false))) {
728                 kfree_skb(nskb);
729                 return false;
730         }
731
732         vport_receive(vport, nskb);
733
734         return true;
735 }
736
737 static bool check_mtu(struct sk_buff *skb,
738                       struct vport *vport,
739                       const struct tnl_mutable_config *mutable,
740                       const struct rtable *rt, __be16 *frag_offp)
741 {
742         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
743         bool pmtud = mutable->flags & TNL_F_PMTUD;
744         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
745         int mtu = 0;
746         unsigned int packet_length = skb->len - ETH_HLEN;
747
748         /* Allow for one level of tagging in the packet length. */
749         if (!vlan_tx_tag_present(skb) &&
750             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
751                 packet_length -= VLAN_HLEN;
752
753         if (pmtud) {
754                 int vlan_header = 0;
755
756                 /* The tag needs to go in packet regardless of where it
757                  * currently is, so subtract it from the MTU.
758                  */
759                 if (vlan_tx_tag_present(skb) ||
760                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
761                         vlan_header = VLAN_HLEN;
762
763                 mtu = dst_mtu(&rt_dst(rt))
764                         - ETH_HLEN
765                         - mutable->tunnel_hlen
766                         - vlan_header;
767         }
768
769         if (skb->protocol == htons(ETH_P_IP)) {
770                 struct iphdr *iph = ip_hdr(skb);
771
772                 if (df_inherit)
773                         frag_off = iph->frag_off & htons(IP_DF);
774
775                 if (pmtud && iph->frag_off & htons(IP_DF)) {
776                         mtu = max(mtu, IP_MIN_MTU);
777
778                         if (packet_length > mtu &&
779                             tnl_frag_needed(vport, mutable, skb, mtu,
780                                             OVS_CB(skb)->tun_id))
781                                 return false;
782                 }
783         }
784 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
785         else if (skb->protocol == htons(ETH_P_IPV6)) {
786                 /* IPv6 requires end hosts to do fragmentation
787                  * if the packet is above the minimum MTU.
788                  */
789                 if (df_inherit && packet_length > IPV6_MIN_MTU)
790                         frag_off = htons(IP_DF);
791
792                 if (pmtud) {
793                         mtu = max(mtu, IPV6_MIN_MTU);
794
795                         if (packet_length > mtu &&
796                             tnl_frag_needed(vport, mutable, skb, mtu,
797                                             OVS_CB(skb)->tun_id))
798                                 return false;
799                 }
800         }
801 #endif
802
803         *frag_offp = frag_off;
804         return true;
805 }
806
807 static void create_tunnel_header(const struct vport *vport,
808                                  const struct tnl_mutable_config *mutable,
809                                  const struct rtable *rt, void *header)
810 {
811         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
812         struct iphdr *iph = header;
813
814         iph->version    = 4;
815         iph->ihl        = sizeof(struct iphdr) >> 2;
816         iph->frag_off   = htons(IP_DF);
817         iph->protocol   = tnl_vport->tnl_ops->ipproto;
818         iph->tos        = mutable->tos;
819         iph->daddr      = rt->rt_dst;
820         iph->saddr      = rt->rt_src;
821         iph->ttl        = mutable->ttl;
822         if (!iph->ttl)
823                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
824
825         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
826 }
827
828 static inline void *get_cached_header(const struct tnl_cache *cache)
829 {
830         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
831 }
832
833 static inline bool check_cache_valid(const struct tnl_cache *cache,
834                                      const struct tnl_mutable_config *mutable)
835 {
836         return cache &&
837 #ifdef NEED_CACHE_TIMEOUT
838                 time_before(jiffies, cache->expiration) &&
839 #endif
840 #ifdef HAVE_RT_GENID
841                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
842 #endif
843 #ifdef HAVE_HH_SEQ
844                 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
845 #endif
846                 mutable->seq == cache->mutable_seq &&
847                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
848                 (cache->flow && !cache->flow->dead));
849 }
850
851 static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
852 {
853         struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
854         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
855         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
856
857         if (cache && !check_cache_valid(cache, mutable) &&
858             spin_trylock_bh(&tnl_vport->cache_lock)) {
859                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
860                 spin_unlock_bh(&tnl_vport->cache_lock);
861         }
862
863         return 0;
864 }
865
866 static void cache_cleaner(struct work_struct *work)
867 {
868         schedule_cache_cleaner();
869
870         rcu_read_lock();
871         tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
872         rcu_read_unlock();
873 }
874
875 static inline void create_eth_hdr(struct tnl_cache *cache,
876                                   const struct rtable *rt)
877 {
878         void *cache_data = get_cached_header(cache);
879         int hh_len = rt_dst(rt).hh->hh_len;
880         int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
881
882 #ifdef HAVE_HH_SEQ
883         unsigned hh_seq;
884
885         do {
886                 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
887                 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
888         } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
889
890         cache->hh_seq = hh_seq;
891 #else
892         read_lock_bh(&rt_dst(rt).hh->hh_lock);
893         memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
894         read_unlock_bh(&rt_dst(rt).hh->hh_lock);
895 #endif
896 }
897
898 static struct tnl_cache *build_cache(struct vport *vport,
899                                      const struct tnl_mutable_config *mutable,
900                                      struct rtable *rt)
901 {
902         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
903         struct tnl_cache *cache;
904         void *cache_data;
905         int cache_len;
906
907         if (!(mutable->flags & TNL_F_HDR_CACHE))
908                 return NULL;
909
910         /*
911          * If there is no entry in the ARP cache or if this device does not
912          * support hard header caching just fall back to the IP stack.
913          */
914         if (!rt_dst(rt).hh)
915                 return NULL;
916
917         /*
918          * If lock is contended fall back to directly building the header.
919          * We're not going to help performance by sitting here spinning.
920          */
921         if (!spin_trylock_bh(&tnl_vport->cache_lock))
922                 return NULL;
923
924         cache = cache_dereference(tnl_vport);
925         if (check_cache_valid(cache, mutable))
926                 goto unlock;
927         else
928                 cache = NULL;
929
930         cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
931
932         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
933                         cache_len, GFP_ATOMIC);
934         if (!cache)
935                 goto unlock;
936
937         cache->len = cache_len;
938
939         create_eth_hdr(cache, rt);
940         cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
941
942         create_tunnel_header(vport, mutable, rt, cache_data);
943
944         cache->mutable_seq = mutable->seq;
945         cache->rt = rt;
946 #ifdef NEED_CACHE_TIMEOUT
947         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
948 #endif
949
950         if (is_internal_dev(rt_dst(rt).dev)) {
951                 struct sw_flow_key flow_key;
952                 struct tbl_node *flow_node;
953                 struct vport *dst_vport;
954                 struct sk_buff *skb;
955                 bool is_frag;
956                 int err;
957                 int flow_key_len;
958
959                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
960                 if (!dst_vport)
961                         goto done;
962
963                 skb = alloc_skb(cache->len, GFP_ATOMIC);
964                 if (!skb)
965                         goto done;
966
967                 __skb_put(skb, cache->len);
968                 memcpy(skb->data, get_cached_header(cache), cache->len);
969
970                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
971                                    &flow_key_len, &is_frag);
972
973                 consume_skb(skb);
974                 if (err || is_frag)
975                         goto done;
976
977                 flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
978                                        &flow_key, flow_key_len,
979                                        flow_hash(&flow_key, flow_key_len),
980                                        flow_cmp);
981                 if (flow_node) {
982                         struct sw_flow *flow = flow_cast(flow_node);
983
984                         cache->flow = flow;
985                         flow_hold(flow);
986                 }
987         }
988
989 done:
990         assign_cache_rcu(vport, cache);
991
992 unlock:
993         spin_unlock_bh(&tnl_vport->cache_lock);
994
995         return cache;
996 }
997
998 static struct rtable *find_route(struct vport *vport,
999                                  const struct tnl_mutable_config *mutable,
1000                                  u8 tos, struct tnl_cache **cache)
1001 {
1002         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1003         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1004
1005         *cache = NULL;
1006         tos = RT_TOS(tos);
1007
1008         if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
1009                 *cache = cur_cache;
1010                 return cur_cache->rt;
1011         } else {
1012                 struct rtable *rt;
1013 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
1014                 struct flowi fl = { .nl_u = { .ip4_u =
1015                                               { .daddr = mutable->daddr,
1016                                                 .saddr = mutable->saddr,
1017                                                 .tos = tos } },
1018                                     .proto = tnl_vport->tnl_ops->ipproto };
1019
1020                 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
1021                         return NULL;
1022 #else
1023                 struct flowi4 fl = { .daddr = mutable->daddr,
1024                                      .saddr = mutable->saddr,
1025                                      .flowi4_tos = tos,
1026                                      .flowi4_proto = tnl_vport->tnl_ops->ipproto };
1027
1028                 rt = ip_route_output_key(&init_net, &fl);
1029                 if (IS_ERR(rt))
1030                         return NULL;
1031 #endif
1032
1033                 if (likely(tos == mutable->tos))
1034                         *cache = build_cache(vport, mutable, rt);
1035
1036                 return rt;
1037         }
1038 }
1039
1040 static inline bool need_linearize(const struct sk_buff *skb)
1041 {
1042         int i;
1043
1044         if (unlikely(skb_shinfo(skb)->frag_list))
1045                 return true;
1046
1047         /*
1048          * Generally speaking we should linearize if there are paged frags.
1049          * However, if all of the refcounts are 1 we know nobody else can
1050          * change them from underneath us and we can skip the linearization.
1051          */
1052         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1053                 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
1054                         return true;
1055
1056         return false;
1057 }
1058
1059 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1060                                        const struct tnl_mutable_config *mutable,
1061                                        const struct rtable *rt)
1062 {
1063         int min_headroom;
1064         int err;
1065
1066         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1067                         + mutable->tunnel_hlen
1068                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1069
1070         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1071                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1072                                                 skb_headroom(skb) +
1073                                                 16);
1074                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1075                                         0, GFP_ATOMIC);
1076                 if (unlikely(err))
1077                         goto error_free;
1078         }
1079
1080         forward_ip_summed(skb, true);
1081
1082         if (skb_is_gso(skb)) {
1083                 struct sk_buff *nskb;
1084
1085                 nskb = skb_gso_segment(skb, 0);
1086                 if (IS_ERR(nskb)) {
1087                         kfree_skb(skb);
1088                         err = PTR_ERR(nskb);
1089                         goto error;
1090                 }
1091
1092                 consume_skb(skb);
1093                 skb = nskb;
1094         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1095                 /* Pages aren't locked and could change at any time.
1096                  * If this happens after we compute the checksum, the
1097                  * checksum will be wrong.  We linearize now to avoid
1098                  * this problem.
1099                  */
1100                 if (unlikely(need_linearize(skb))) {
1101                         err = __skb_linearize(skb);
1102                         if (unlikely(err))
1103                                 goto error_free;
1104                 }
1105
1106                 err = skb_checksum_help(skb);
1107                 if (unlikely(err))
1108                         goto error_free;
1109         }
1110
1111         set_ip_summed(skb, OVS_CSUM_NONE);
1112
1113         return skb;
1114
1115 error_free:
1116         kfree_skb(skb);
1117 error:
1118         return ERR_PTR(err);
1119 }
1120
1121 static int send_frags(struct sk_buff *skb,
1122                       const struct tnl_mutable_config *mutable)
1123 {
1124         int sent_len;
1125
1126         sent_len = 0;
1127         while (skb) {
1128                 struct sk_buff *next = skb->next;
1129                 int frag_len = skb->len - mutable->tunnel_hlen;
1130                 int err;
1131
1132                 skb->next = NULL;
1133                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1134
1135                 err = ip_local_out(skb);
1136                 skb = next;
1137                 if (unlikely(net_xmit_eval(err)))
1138                         goto free_frags;
1139                 sent_len += frag_len;
1140         }
1141
1142         return sent_len;
1143
1144 free_frags:
1145         /*
1146          * There's no point in continuing to send fragments once one has been
1147          * dropped so just free the rest.  This may help improve the congestion
1148          * that caused the first packet to be dropped.
1149          */
1150         tnl_free_linked_skbs(skb);
1151         return sent_len;
1152 }
1153
1154 int tnl_send(struct vport *vport, struct sk_buff *skb)
1155 {
1156         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1157         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1158
1159         enum vport_err_type err = VPORT_E_TX_ERROR;
1160         struct rtable *rt;
1161         struct dst_entry *unattached_dst = NULL;
1162         struct tnl_cache *cache;
1163         int sent_len = 0;
1164         __be16 frag_off = 0;
1165         u8 ttl;
1166         u8 inner_tos;
1167         u8 tos;
1168
1169         /* Validate the protocol headers before we try to use them. */
1170         if (skb->protocol == htons(ETH_P_8021Q) &&
1171             !vlan_tx_tag_present(skb)) {
1172                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1173                         goto error_free;
1174
1175                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1176                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1177         }
1178
1179         if (skb->protocol == htons(ETH_P_IP)) {
1180                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1181                     + sizeof(struct iphdr))))
1182                         skb->protocol = 0;
1183         }
1184 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1185         else if (skb->protocol == htons(ETH_P_IPV6)) {
1186                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1187                     + sizeof(struct ipv6hdr))))
1188                         skb->protocol = 0;
1189         }
1190 #endif
1191
1192         /* ToS */
1193         if (skb->protocol == htons(ETH_P_IP))
1194                 inner_tos = ip_hdr(skb)->tos;
1195 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1196         else if (skb->protocol == htons(ETH_P_IPV6))
1197                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1198 #endif
1199         else
1200                 inner_tos = 0;
1201
1202         if (mutable->flags & TNL_F_TOS_INHERIT)
1203                 tos = inner_tos;
1204         else
1205                 tos = mutable->tos;
1206
1207         tos = INET_ECN_encapsulate(tos, inner_tos);
1208
1209         /* Route lookup */
1210         rt = find_route(vport, mutable, tos, &cache);
1211         if (unlikely(!rt))
1212                 goto error_free;
1213         if (unlikely(!cache))
1214                 unattached_dst = &rt_dst(rt);
1215
1216         /* Reset SKB */
1217         nf_reset(skb);
1218         secpath_reset(skb);
1219         skb_dst_drop(skb);
1220         skb_clear_rxhash(skb);
1221
1222         /* Offloading */
1223         skb = handle_offloads(skb, mutable, rt);
1224         if (IS_ERR(skb))
1225                 goto error;
1226
1227         /* MTU */
1228         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1229                 err = VPORT_E_TX_DROPPED;
1230                 goto error_free;
1231         }
1232
1233         /*
1234          * If we are over the MTU, allow the IP stack to handle fragmentation.
1235          * Fragmentation is a slow path anyways.
1236          */
1237         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1238                      cache)) {
1239                 unattached_dst = &rt_dst(rt);
1240                 dst_hold(unattached_dst);
1241                 cache = NULL;
1242         }
1243
1244         /* TTL */
1245         ttl = mutable->ttl;
1246         if (!ttl)
1247                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1248
1249         if (mutable->flags & TNL_F_TTL_INHERIT) {
1250                 if (skb->protocol == htons(ETH_P_IP))
1251                         ttl = ip_hdr(skb)->ttl;
1252 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1253                 else if (skb->protocol == htons(ETH_P_IPV6))
1254                         ttl = ipv6_hdr(skb)->hop_limit;
1255 #endif
1256         }
1257
1258         while (skb) {
1259                 struct iphdr *iph;
1260                 struct sk_buff *next_skb = skb->next;
1261                 skb->next = NULL;
1262
1263                 if (unlikely(vlan_deaccel_tag(skb)))
1264                         goto next;
1265
1266                 if (likely(cache)) {
1267                         skb_push(skb, cache->len);
1268                         memcpy(skb->data, get_cached_header(cache), cache->len);
1269                         skb_reset_mac_header(skb);
1270                         skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1271
1272                 } else {
1273                         skb_push(skb, mutable->tunnel_hlen);
1274                         create_tunnel_header(vport, mutable, rt, skb->data);
1275                         skb_reset_network_header(skb);
1276
1277                         if (next_skb)
1278                                 skb_dst_set(skb, dst_clone(unattached_dst));
1279                         else {
1280                                 skb_dst_set(skb, unattached_dst);
1281                                 unattached_dst = NULL;
1282                         }
1283                 }
1284                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1285
1286                 iph = ip_hdr(skb);
1287                 iph->tos = tos;
1288                 iph->ttl = ttl;
1289                 iph->frag_off = frag_off;
1290                 ip_select_ident(iph, &rt_dst(rt), NULL);
1291
1292                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1293                 if (unlikely(!skb))
1294                         goto next;
1295
1296                 if (likely(cache)) {
1297                         int orig_len = skb->len - cache->len;
1298                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1299
1300                         skb->protocol = htons(ETH_P_IP);
1301                         iph = ip_hdr(skb);
1302                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1303                         ip_send_check(iph);
1304
1305                         if (cache_vport) {
1306                                 if (unlikely(compute_ip_summed(skb, true))) {
1307                                         kfree_skb(skb);
1308                                         goto next;
1309                                 }
1310
1311                                 OVS_CB(skb)->flow = cache->flow;
1312                                 vport_receive(cache_vport, skb);
1313                                 sent_len += orig_len;
1314                         } else {
1315                                 int xmit_err;
1316
1317                                 skb->dev = rt_dst(rt).dev;
1318                                 xmit_err = dev_queue_xmit(skb);
1319
1320                                 if (likely(net_xmit_eval(xmit_err) == 0))
1321                                         sent_len += orig_len;
1322                         }
1323                 } else
1324                         sent_len += send_frags(skb, mutable);
1325
1326 next:
1327                 skb = next_skb;
1328         }
1329
1330         if (unlikely(sent_len == 0))
1331                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1332
1333         goto out;
1334
1335 error_free:
1336         tnl_free_linked_skbs(skb);
1337 error:
1338         vport_record_error(vport, err);
1339 out:
1340         dst_release(unattached_dst);
1341         return sent_len;
1342 }
1343
1344 static const struct nla_policy tnl_policy[ODP_TUNNEL_ATTR_MAX + 1] = {
1345         [ODP_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1346         [ODP_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1347         [ODP_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1348         [ODP_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1349         [ODP_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1350         [ODP_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1351         [ODP_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1352 };
1353
1354 /* Sets ODP_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1355 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1356                           const struct vport *cur_vport,
1357                           struct tnl_mutable_config *mutable)
1358 {
1359         const struct vport *old_vport;
1360         const struct tnl_mutable_config *old_mutable;
1361         struct nlattr *a[ODP_TUNNEL_ATTR_MAX + 1];
1362         int err;
1363
1364         if (!options)
1365                 return -EINVAL;
1366
1367         err = nla_parse_nested(a, ODP_TUNNEL_ATTR_MAX, options, tnl_policy);
1368         if (err)
1369                 return err;
1370
1371         if (!a[ODP_TUNNEL_ATTR_FLAGS] || !a[ODP_TUNNEL_ATTR_DST_IPV4])
1372                 return -EINVAL;
1373
1374         mutable->flags = nla_get_u32(a[ODP_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1375
1376         if (a[ODP_TUNNEL_ATTR_SRC_IPV4])
1377                 mutable->saddr = nla_get_be32(a[ODP_TUNNEL_ATTR_SRC_IPV4]);
1378         mutable->daddr = nla_get_be32(a[ODP_TUNNEL_ATTR_DST_IPV4]);
1379
1380         if (a[ODP_TUNNEL_ATTR_TOS]) {
1381                 mutable->tos = nla_get_u8(a[ODP_TUNNEL_ATTR_TOS]);
1382                 if (mutable->tos != RT_TOS(mutable->tos))
1383                         return -EINVAL;
1384         }
1385
1386         if (a[ODP_TUNNEL_ATTR_TTL])
1387                 mutable->ttl = nla_get_u8(a[ODP_TUNNEL_ATTR_TTL]);
1388
1389         mutable->tunnel_type = tnl_ops->tunnel_type;
1390         if (!a[ODP_TUNNEL_ATTR_IN_KEY]) {
1391                 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1392                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1393         } else {
1394                 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1395                 mutable->in_key = nla_get_be64(a[ODP_TUNNEL_ATTR_IN_KEY]);
1396         }
1397
1398         if (!a[ODP_TUNNEL_ATTR_OUT_KEY])
1399                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1400         else
1401                 mutable->out_key = nla_get_be64(a[ODP_TUNNEL_ATTR_OUT_KEY]);
1402
1403         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1404         if (mutable->tunnel_hlen < 0)
1405                 return mutable->tunnel_hlen;
1406
1407         mutable->tunnel_hlen += sizeof(struct iphdr);
1408
1409         old_vport = tnl_find_port(mutable->saddr, mutable->daddr,
1410                                   mutable->in_key, mutable->tunnel_type,
1411                                   &old_mutable);
1412
1413         if (old_vport && old_vport != cur_vport)
1414                 return -EEXIST;
1415
1416         return 0;
1417 }
1418
1419 struct vport *tnl_create(const struct vport_parms *parms,
1420                          const struct vport_ops *vport_ops,
1421                          const struct tnl_ops *tnl_ops)
1422 {
1423         struct vport *vport;
1424         struct tnl_vport *tnl_vport;
1425         struct tnl_mutable_config *mutable;
1426         int initial_frag_id;
1427         int err;
1428
1429         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1430         if (IS_ERR(vport)) {
1431                 err = PTR_ERR(vport);
1432                 goto error;
1433         }
1434
1435         tnl_vport = tnl_vport_priv(vport);
1436
1437         strcpy(tnl_vport->name, parms->name);
1438         tnl_vport->tnl_ops = tnl_ops;
1439
1440         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1441         if (!mutable) {
1442                 err = -ENOMEM;
1443                 goto error_free_vport;
1444         }
1445
1446         vport_gen_rand_ether_addr(mutable->eth_addr);
1447
1448         get_random_bytes(&initial_frag_id, sizeof(int));
1449         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1450
1451         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1452         if (err)
1453                 goto error_free_mutable;
1454
1455         spin_lock_init(&tnl_vport->cache_lock);
1456
1457 #ifdef NEED_CACHE_TIMEOUT
1458         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1459                                        (net_random() % (MAX_CACHE_EXP / 2));
1460 #endif
1461
1462         rcu_assign_pointer(tnl_vport->mutable, mutable);
1463
1464         err = add_port(vport);
1465         if (err)
1466                 goto error_free_mutable;
1467
1468         return vport;
1469
1470 error_free_mutable:
1471         kfree(mutable);
1472 error_free_vport:
1473         vport_free(vport);
1474 error:
1475         return ERR_PTR(err);
1476 }
1477
1478 int tnl_set_options(struct vport *vport, struct nlattr *options)
1479 {
1480         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1481         const struct tnl_mutable_config *old_mutable;
1482         struct tnl_mutable_config *mutable;
1483         int err;
1484
1485         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1486         if (!mutable) {
1487                 err = -ENOMEM;
1488                 goto error;
1489         }
1490
1491         /* Copy fields whose values should be retained. */
1492         old_mutable = rtnl_dereference(tnl_vport->mutable);
1493         mutable->seq = old_mutable->seq + 1;
1494         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1495
1496         /* Parse the others configured by userspace. */
1497         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1498         if (err)
1499                 goto error_free;
1500
1501         err = move_port(vport, mutable);
1502         if (err)
1503                 goto error_free;
1504
1505         return 0;
1506
1507 error_free:
1508         kfree(mutable);
1509 error:
1510         return err;
1511 }
1512
1513 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1514 {
1515         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1516         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1517
1518         NLA_PUT_U32(skb, ODP_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1519         NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_DST_IPV4, mutable->daddr);
1520
1521         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1522                 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_IN_KEY, mutable->in_key);
1523         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1524                 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1525         if (mutable->saddr)
1526                 NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_SRC_IPV4, mutable->saddr);
1527         if (mutable->tos)
1528                 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TOS, mutable->tos);
1529         if (mutable->ttl)
1530                 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TTL, mutable->ttl);
1531
1532         return 0;
1533
1534 nla_put_failure:
1535         return -EMSGSIZE;
1536 }
1537
1538 static void free_port_rcu(struct rcu_head *rcu)
1539 {
1540         struct tnl_vport *tnl_vport = container_of(rcu,
1541                                                    struct tnl_vport, rcu);
1542
1543         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1544         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1545         vport_free(tnl_vport_to_vport(tnl_vport));
1546 }
1547
1548 int tnl_destroy(struct vport *vport)
1549 {
1550         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1551         const struct tnl_mutable_config *mutable, *old_mutable;
1552
1553         mutable = rtnl_dereference(tnl_vport->mutable);
1554
1555         if (vport == tnl_find_port(mutable->saddr, mutable->daddr,
1556                                    mutable->in_key, mutable->tunnel_type,
1557                                    &old_mutable))
1558                 del_port(vport);
1559
1560         call_rcu(&tnl_vport->rcu, free_port_rcu);
1561
1562         return 0;
1563 }
1564
1565 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1566 {
1567         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1568         struct tnl_mutable_config *mutable;
1569
1570         mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1571                           sizeof(struct tnl_mutable_config), GFP_KERNEL);
1572         if (!mutable)
1573                 return -ENOMEM;
1574
1575         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1576         assign_config_rcu(vport, mutable);
1577
1578         return 0;
1579 }
1580
1581 const char *tnl_get_name(const struct vport *vport)
1582 {
1583         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1584         return tnl_vport->name;
1585 }
1586
1587 const unsigned char *tnl_get_addr(const struct vport *vport)
1588 {
1589         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1590         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1591 }
1592
1593 void tnl_free_linked_skbs(struct sk_buff *skb)
1594 {
1595         while (skb) {
1596                 struct sk_buff *next = skb->next;
1597                 kfree_skb(skb);
1598                 skb = next;
1599         }
1600 }