datapath: Use vlan acceleration for vlan operations.
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/in.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
19
20 #include <net/dsfield.h>
21 #include <net/dst.h>
22 #include <net/icmp.h>
23 #include <net/inet_ecn.h>
24 #include <net/ip.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26 #include <net/ipv6.h>
27 #endif
28 #include <net/route.h>
29 #include <net/xfrm.h>
30
31 #include "actions.h"
32 #include "checksum.h"
33 #include "datapath.h"
34 #include "table.h"
35 #include "tunnel.h"
36 #include "vlan.h"
37 #include "vport.h"
38 #include "vport-generic.h"
39 #include "vport-internal_dev.h"
40
41 #ifdef NEED_CACHE_TIMEOUT
42 /*
43  * On kernels where we can't quickly detect changes in the rest of the system
44  * we use an expiration time to invalidate the cache.  A shorter expiration
45  * reduces the length of time that we may potentially blackhole packets while
46  * a longer time increases performance by reducing the frequency that the
47  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
48  * invalidated before the expiration time but this is the maximum.  The time
49  * is expressed in jiffies.
50  */
51 #define MAX_CACHE_EXP HZ
52 #endif
53
54 /*
55  * Interval to check for and remove caches that are no longer valid.  Caches
56  * are checked for validity before they are used for packet encapsulation and
57  * old caches are removed at that time.  However, if no packets are sent through
58  * the tunnel then the cache will never be destroyed.  Since it holds
59  * references to a number of system objects, the cache will continue to use
60  * system resources by not allowing those objects to be destroyed.  The cache
61  * cleaner is periodically run to free invalid caches.  It does not
62  * significantly affect system performance.  A lower interval will release
63  * resources faster but will itself consume resources by requiring more frequent
64  * checks.  A longer interval may result in messages being printed to the kernel
65  * message buffer about unreleased resources.  The interval is expressed in
66  * jiffies.
67  */
68 #define CACHE_CLEANER_INTERVAL (5 * HZ)
69
70 #define CACHE_DATA_ALIGN 16
71
72 static struct tbl __rcu *port_table __read_mostly;
73
74 static void cache_cleaner(struct work_struct *work);
75 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
76
77 /*
78  * These are just used as an optimization: they don't require any kind of
79  * synchronization because we could have just as easily read the value before
80  * the port change happened.
81  */
82 static unsigned int key_local_remote_ports __read_mostly;
83 static unsigned int key_remote_ports __read_mostly;
84 static unsigned int local_remote_ports __read_mostly;
85 static unsigned int remote_ports __read_mostly;
86
87 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
88 #define rt_dst(rt) (rt->dst)
89 #else
90 #define rt_dst(rt) (rt->u.dst)
91 #endif
92
93 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
94 {
95         return vport_from_priv(tnl_vport);
96 }
97
98 static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
99 {
100         return container_of(node, struct tnl_vport, tbl_node);
101 }
102
103 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
104  * cache_lock is held, so it is only for update side code.
105  */
106 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
107 {
108         return rcu_dereference_protected(tnl_vport->cache,
109                                          lockdep_is_held(&tnl_vport->cache_lock));
110 }
111
112 static inline void schedule_cache_cleaner(void)
113 {
114         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
115 }
116
117 static void free_cache(struct tnl_cache *cache)
118 {
119         if (!cache)
120                 return;
121
122         flow_put(cache->flow);
123         ip_rt_put(cache->rt);
124         kfree(cache);
125 }
126
127 static void free_config_rcu(struct rcu_head *rcu)
128 {
129         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
130         kfree(c);
131 }
132
133 static void free_cache_rcu(struct rcu_head *rcu)
134 {
135         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
136         free_cache(c);
137 }
138
139 static void assign_config_rcu(struct vport *vport,
140                               struct tnl_mutable_config *new_config)
141 {
142         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
143         struct tnl_mutable_config *old_config;
144
145         old_config = rtnl_dereference(tnl_vport->mutable);
146         rcu_assign_pointer(tnl_vport->mutable, new_config);
147         call_rcu(&old_config->rcu, free_config_rcu);
148 }
149
150 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
151 {
152         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
153         struct tnl_cache *old_cache;
154
155         old_cache = cache_dereference(tnl_vport);
156         rcu_assign_pointer(tnl_vport->cache, new_cache);
157
158         if (old_cache)
159                 call_rcu(&old_cache->rcu, free_cache_rcu);
160 }
161
162 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
163 {
164         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
165                 if (mutable->saddr)
166                         return &local_remote_ports;
167                 else
168                         return &remote_ports;
169         } else {
170                 if (mutable->saddr)
171                         return &key_local_remote_ports;
172                 else
173                         return &key_remote_ports;
174         }
175 }
176
177 struct port_lookup_key {
178         const struct tnl_mutable_config *mutable;
179         __be64 key;
180         u32 tunnel_type;
181         __be32 saddr;
182         __be32 daddr;
183 };
184
185 /*
186  * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
187  * the comparision.
188  */
189 static int port_cmp(const struct tbl_node *node, void *target)
190 {
191         const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
192         struct port_lookup_key *lookup = target;
193
194         lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
195
196         return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
197                 lookup->mutable->daddr == lookup->daddr &&
198                 lookup->mutable->in_key == lookup->key &&
199                 lookup->mutable->saddr == lookup->saddr);
200 }
201
202 static u32 port_hash(struct port_lookup_key *k)
203 {
204         u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
205                              k->tunnel_type, 0);
206         return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
207 }
208
209 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
210 {
211         struct port_lookup_key lookup;
212
213         lookup.saddr = mutable->saddr;
214         lookup.daddr = mutable->daddr;
215         lookup.key = mutable->in_key;
216         lookup.tunnel_type = mutable->tunnel_type;
217
218         return port_hash(&lookup);
219 }
220
221 static void check_table_empty(void)
222 {
223         struct tbl *old_table = rtnl_dereference(port_table);
224
225         if (tbl_count(old_table) == 0) {
226                 cancel_delayed_work_sync(&cache_cleaner_wq);
227                 rcu_assign_pointer(port_table, NULL);
228                 tbl_deferred_destroy(old_table, NULL);
229         }
230 }
231
232 static int add_port(struct vport *vport)
233 {
234         struct tbl *cur_table = rtnl_dereference(port_table);
235         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
236         int err;
237
238         if (!port_table) {
239                 struct tbl *new_table;
240
241                 new_table = tbl_create(TBL_MIN_BUCKETS);
242                 if (!new_table)
243                         return -ENOMEM;
244
245                 rcu_assign_pointer(port_table, new_table);
246                 schedule_cache_cleaner();
247
248         } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
249                 struct tbl *new_table;
250
251                 new_table = tbl_expand(cur_table);
252                 if (IS_ERR(new_table))
253                         return PTR_ERR(new_table);
254
255                 rcu_assign_pointer(port_table, new_table);
256                 tbl_deferred_destroy(cur_table, NULL);
257         }
258
259         err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
260                          mutable_hash(rtnl_dereference(tnl_vport->mutable)));
261         if (err) {
262                 check_table_empty();
263                 return err;
264         }
265
266         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
267
268         return 0;
269 }
270
271 static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
272 {
273         int err;
274         struct tbl *cur_table = rtnl_dereference(port_table);
275         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
276         u32 hash;
277
278         hash = mutable_hash(new_mutable);
279         if (hash == tnl_vport->tbl_node.hash)
280                 goto table_updated;
281
282         /*
283          * Ideally we should make this move atomic to avoid having gaps in
284          * finding tunnels or the possibility of failure.  However, if we do
285          * find a tunnel it will always be consistent.
286          */
287         err = tbl_remove(cur_table, &tnl_vport->tbl_node);
288         if (err)
289                 return err;
290
291         err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
292         if (err) {
293                 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
294                 check_table_empty();
295                 return err;
296         }
297
298 table_updated:
299         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
300         assign_config_rcu(vport, new_mutable);
301         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
302
303         return 0;
304 }
305
306 static int del_port(struct vport *vport)
307 {
308         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
309         int err;
310
311         err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
312         if (err)
313                 return err;
314
315         check_table_empty();
316         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
317
318         return 0;
319 }
320
321 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
322                             int tunnel_type,
323                             const struct tnl_mutable_config **mutable)
324 {
325         struct port_lookup_key lookup;
326         struct tbl *table = rcu_dereference_rtnl(port_table);
327         struct tbl_node *tbl_node;
328
329         if (unlikely(!table))
330                 return NULL;
331
332         lookup.saddr = saddr;
333         lookup.daddr = daddr;
334
335         if (tunnel_type & TNL_T_KEY_EXACT) {
336                 lookup.key = key;
337                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
338
339                 if (key_local_remote_ports) {
340                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
341                         if (tbl_node)
342                                 goto found;
343                 }
344
345                 if (key_remote_ports) {
346                         lookup.saddr = 0;
347
348                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
349                         if (tbl_node)
350                                 goto found;
351
352                         lookup.saddr = saddr;
353                 }
354         }
355
356         if (tunnel_type & TNL_T_KEY_MATCH) {
357                 lookup.key = 0;
358                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
359
360                 if (local_remote_ports) {
361                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
362                         if (tbl_node)
363                                 goto found;
364                 }
365
366                 if (remote_ports) {
367                         lookup.saddr = 0;
368
369                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
370                         if (tbl_node)
371                                 goto found;
372                 }
373         }
374
375         return NULL;
376
377 found:
378         *mutable = lookup.mutable;
379         return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
380 }
381
382 static inline void ecn_decapsulate(struct sk_buff *skb)
383 {
384         /* This is accessing the outer IP header of the tunnel, which we've
385          * already validated to be OK.  skb->data is currently set to the start
386          * of the inner Ethernet header, and we've validated ETH_HLEN.
387          */
388         if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
389                 __be16 protocol = skb->protocol;
390
391                 skb_set_network_header(skb, ETH_HLEN);
392
393                 if (skb->protocol == htons(ETH_P_8021Q)) {
394                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
395                                 return;
396
397                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
398                         skb_set_network_header(skb, VLAN_ETH_HLEN);
399                 }
400
401                 if (protocol == htons(ETH_P_IP)) {
402                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
403                             + sizeof(struct iphdr))))
404                                 return;
405
406                         IP_ECN_set_ce(ip_hdr(skb));
407                 }
408 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
409                 else if (protocol == htons(ETH_P_IPV6)) {
410                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
411                             + sizeof(struct ipv6hdr))))
412                                 return;
413
414                         IP6_ECN_set_ce(ipv6_hdr(skb));
415                 }
416 #endif
417         }
418 }
419
420 /* Called with rcu_read_lock. */
421 void tnl_rcv(struct vport *vport, struct sk_buff *skb)
422 {
423         /* Packets received by this function are in the following state:
424          * - skb->data points to the inner Ethernet header.
425          * - The inner Ethernet header is in the linear data area.
426          * - skb->csum does not include the inner Ethernet header.
427          * - The layer pointers point at the outer headers.
428          */
429
430         struct ethhdr *eh = (struct ethhdr *)skb->data;
431
432         if (likely(ntohs(eh->h_proto) >= 1536))
433                 skb->protocol = eh->h_proto;
434         else
435                 skb->protocol = htons(ETH_P_802_2);
436
437         skb_dst_drop(skb);
438         nf_reset(skb);
439         secpath_reset(skb);
440
441         ecn_decapsulate(skb);
442         compute_ip_summed(skb, false);
443         vlan_set_tci(skb, 0);
444
445         vport_receive(vport, skb);
446 }
447
448 static bool check_ipv4_address(__be32 addr)
449 {
450         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
451             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
452                 return false;
453
454         return true;
455 }
456
457 static bool ipv4_should_icmp(struct sk_buff *skb)
458 {
459         struct iphdr *old_iph = ip_hdr(skb);
460
461         /* Don't respond to L2 broadcast. */
462         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
463                 return false;
464
465         /* Don't respond to L3 broadcast or invalid addresses. */
466         if (!check_ipv4_address(old_iph->daddr) ||
467             !check_ipv4_address(old_iph->saddr))
468                 return false;
469
470         /* Only respond to the first fragment. */
471         if (old_iph->frag_off & htons(IP_OFFSET))
472                 return false;
473
474         /* Don't respond to ICMP error messages. */
475         if (old_iph->protocol == IPPROTO_ICMP) {
476                 u8 icmp_type, *icmp_typep;
477
478                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
479                                                 (old_iph->ihl << 2) +
480                                                 offsetof(struct icmphdr, type) -
481                                                 skb->data, sizeof(icmp_type),
482                                                 &icmp_type);
483
484                 if (!icmp_typep)
485                         return false;
486
487                 if (*icmp_typep > NR_ICMP_TYPES
488                         || (*icmp_typep <= ICMP_PARAMETERPROB
489                                 && *icmp_typep != ICMP_ECHOREPLY
490                                 && *icmp_typep != ICMP_ECHO))
491                         return false;
492         }
493
494         return true;
495 }
496
497 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
498                             unsigned int mtu, unsigned int payload_length)
499 {
500         struct iphdr *iph, *old_iph = ip_hdr(skb);
501         struct icmphdr *icmph;
502         u8 *payload;
503
504         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
505         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
506         payload = skb_put(nskb, payload_length);
507
508         /* IP */
509         iph->version            =       4;
510         iph->ihl                =       sizeof(struct iphdr) >> 2;
511         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
512                                         IPTOS_PREC_INTERNETCONTROL;
513         iph->tot_len            =       htons(sizeof(struct iphdr)
514                                               + sizeof(struct icmphdr)
515                                               + payload_length);
516         get_random_bytes(&iph->id, sizeof(iph->id));
517         iph->frag_off           =       0;
518         iph->ttl                =       IPDEFTTL;
519         iph->protocol           =       IPPROTO_ICMP;
520         iph->daddr              =       old_iph->saddr;
521         iph->saddr              =       old_iph->daddr;
522
523         ip_send_check(iph);
524
525         /* ICMP */
526         icmph->type             =       ICMP_DEST_UNREACH;
527         icmph->code             =       ICMP_FRAG_NEEDED;
528         icmph->un.gateway       =       htonl(mtu);
529         icmph->checksum         =       0;
530
531         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
532         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
533                                             payload, payload_length,
534                                             nskb->csum);
535         icmph->checksum = csum_fold(nskb->csum);
536 }
537
538 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
539 static bool ipv6_should_icmp(struct sk_buff *skb)
540 {
541         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
542         int addr_type;
543         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
544         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
545
546         /* Check source address is valid. */
547         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
548         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
549                 return false;
550
551         /* Don't reply to unspecified addresses. */
552         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
553                 return false;
554
555         /* Don't respond to ICMP error messages. */
556         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
557         if (payload_off < 0)
558                 return false;
559
560         if (nexthdr == NEXTHDR_ICMP) {
561                 u8 icmp_type, *icmp_typep;
562
563                 icmp_typep = skb_header_pointer(skb, payload_off +
564                                                 offsetof(struct icmp6hdr,
565                                                         icmp6_type),
566                                                 sizeof(icmp_type), &icmp_type);
567
568                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
569                         return false;
570         }
571
572         return true;
573 }
574
575 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
576                             unsigned int mtu, unsigned int payload_length)
577 {
578         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
579         struct icmp6hdr *icmp6h;
580         u8 *payload;
581
582         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
583         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
584         payload = skb_put(nskb, payload_length);
585
586         /* IPv6 */
587         ipv6h->version          =       6;
588         ipv6h->priority         =       0;
589         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
590         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
591                                               + payload_length);
592         ipv6h->nexthdr          =       NEXTHDR_ICMP;
593         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
594         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
595         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
596
597         /* ICMPv6 */
598         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
599         icmp6h->icmp6_code      =       0;
600         icmp6h->icmp6_cksum     =       0;
601         icmp6h->icmp6_mtu       =       htonl(mtu);
602
603         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
604         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
605                                             payload, payload_length,
606                                             nskb->csum);
607         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
608                                                 sizeof(struct icmp6hdr)
609                                                 + payload_length,
610                                                 ipv6h->nexthdr, nskb->csum);
611 }
612 #endif /* IPv6 */
613
614 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
615                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
616 {
617         unsigned int eth_hdr_len = ETH_HLEN;
618         unsigned int total_length = 0, header_length = 0, payload_length;
619         struct ethhdr *eh, *old_eh = eth_hdr(skb);
620         struct sk_buff *nskb;
621
622         /* Sanity check */
623         if (skb->protocol == htons(ETH_P_IP)) {
624                 if (mtu < IP_MIN_MTU)
625                         return false;
626
627                 if (!ipv4_should_icmp(skb))
628                         return true;
629         }
630 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
631         else if (skb->protocol == htons(ETH_P_IPV6)) {
632                 if (mtu < IPV6_MIN_MTU)
633                         return false;
634
635                 /*
636                  * In theory we should do PMTUD on IPv6 multicast messages but
637                  * we don't have an address to send from so just fragment.
638                  */
639                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
640                         return false;
641
642                 if (!ipv6_should_icmp(skb))
643                         return true;
644         }
645 #endif
646         else
647                 return false;
648
649         /* Allocate */
650         if (old_eh->h_proto == htons(ETH_P_8021Q))
651                 eth_hdr_len = VLAN_ETH_HLEN;
652
653         payload_length = skb->len - eth_hdr_len;
654         if (skb->protocol == htons(ETH_P_IP)) {
655                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
656                 total_length = min_t(unsigned int, header_length +
657                                                    payload_length, 576);
658         }
659 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
660         else {
661                 header_length = sizeof(struct ipv6hdr) +
662                                 sizeof(struct icmp6hdr);
663                 total_length = min_t(unsigned int, header_length +
664                                                   payload_length, IPV6_MIN_MTU);
665         }
666 #endif
667
668         payload_length = total_length - header_length;
669
670         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
671                              payload_length);
672         if (!nskb)
673                 return false;
674
675         skb_reserve(nskb, NET_IP_ALIGN);
676
677         /* Ethernet / VLAN */
678         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
679         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
680         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
681         nskb->protocol = eh->h_proto = old_eh->h_proto;
682         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
683                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
684
685                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
686                 vh->h_vlan_encapsulated_proto = skb->protocol;
687         } else
688                 vlan_set_tci(nskb, vlan_get_tci(skb));
689         skb_reset_mac_header(nskb);
690
691         /* Protocol */
692         if (skb->protocol == htons(ETH_P_IP))
693                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
694 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
695         else
696                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
697 #endif
698
699         /*
700          * Assume that flow based keys are symmetric with respect to input
701          * and output and use the key that we were going to put on the
702          * outgoing packet for the fake received packet.  If the keys are
703          * not symmetric then PMTUD needs to be disabled since we won't have
704          * any way of synthesizing packets.
705          */
706         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
707             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
708                 OVS_CB(nskb)->tun_id = flow_key;
709
710         compute_ip_summed(nskb, false);
711         vport_receive(vport, nskb);
712
713         return true;
714 }
715
716 static bool check_mtu(struct sk_buff *skb,
717                       struct vport *vport,
718                       const struct tnl_mutable_config *mutable,
719                       const struct rtable *rt, __be16 *frag_offp)
720 {
721         bool pmtud = mutable->flags & TNL_F_PMTUD;
722         __be16 frag_off = 0;
723         int mtu = 0;
724         unsigned int packet_length = skb->len - ETH_HLEN;
725
726         /* Allow for one level of tagging in the packet length. */
727         if (!vlan_tx_tag_present(skb) &&
728             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
729                 packet_length -= VLAN_HLEN;
730
731         if (pmtud) {
732                 int vlan_header = 0;
733
734                 frag_off = htons(IP_DF);
735
736                 /* The tag needs to go in packet regardless of where it
737                  * currently is, so subtract it from the MTU.
738                  */
739                 if (vlan_tx_tag_present(skb) ||
740                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
741                         vlan_header = VLAN_HLEN;
742
743                 mtu = dst_mtu(&rt_dst(rt))
744                         - ETH_HLEN
745                         - mutable->tunnel_hlen
746                         - vlan_header;
747         }
748
749         if (skb->protocol == htons(ETH_P_IP)) {
750                 struct iphdr *iph = ip_hdr(skb);
751
752                 frag_off |= iph->frag_off & htons(IP_DF);
753
754                 if (pmtud && iph->frag_off & htons(IP_DF)) {
755                         mtu = max(mtu, IP_MIN_MTU);
756
757                         if (packet_length > mtu &&
758                             tnl_frag_needed(vport, mutable, skb, mtu,
759                                             OVS_CB(skb)->tun_id))
760                                 return false;
761                 }
762         }
763 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
764         else if (skb->protocol == htons(ETH_P_IPV6)) {
765                 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
766                 if (packet_length > IPV6_MIN_MTU)
767                         frag_off = htons(IP_DF);
768
769                 if (pmtud) {
770                         mtu = max(mtu, IPV6_MIN_MTU);
771
772                         if (packet_length > mtu &&
773                             tnl_frag_needed(vport, mutable, skb, mtu,
774                                             OVS_CB(skb)->tun_id))
775                                 return false;
776                 }
777         }
778 #endif
779
780         *frag_offp = frag_off;
781         return true;
782 }
783
784 static void create_tunnel_header(const struct vport *vport,
785                                  const struct tnl_mutable_config *mutable,
786                                  const struct rtable *rt, void *header)
787 {
788         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
789         struct iphdr *iph = header;
790
791         iph->version    = 4;
792         iph->ihl        = sizeof(struct iphdr) >> 2;
793         iph->frag_off   = htons(IP_DF);
794         iph->protocol   = tnl_vport->tnl_ops->ipproto;
795         iph->tos        = mutable->tos;
796         iph->daddr      = rt->rt_dst;
797         iph->saddr      = rt->rt_src;
798         iph->ttl        = mutable->ttl;
799         if (!iph->ttl)
800                 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
801
802         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
803 }
804
805 static inline void *get_cached_header(const struct tnl_cache *cache)
806 {
807         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
808 }
809
810 static inline bool check_cache_valid(const struct tnl_cache *cache,
811                                      const struct tnl_mutable_config *mutable)
812 {
813         return cache &&
814 #ifdef NEED_CACHE_TIMEOUT
815                 time_before(jiffies, cache->expiration) &&
816 #endif
817 #ifdef HAVE_RT_GENID
818                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
819 #endif
820 #ifdef HAVE_HH_SEQ
821                 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
822 #endif
823                 mutable->seq == cache->mutable_seq &&
824                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
825                 (cache->flow && !cache->flow->dead));
826 }
827
828 static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
829 {
830         struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
831         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
832         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
833
834         if (cache && !check_cache_valid(cache, mutable) &&
835             spin_trylock_bh(&tnl_vport->cache_lock)) {
836                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
837                 spin_unlock_bh(&tnl_vport->cache_lock);
838         }
839
840         return 0;
841 }
842
843 static void cache_cleaner(struct work_struct *work)
844 {
845         schedule_cache_cleaner();
846
847         rcu_read_lock();
848         tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
849         rcu_read_unlock();
850 }
851
852 static inline void create_eth_hdr(struct tnl_cache *cache,
853                                   const struct rtable *rt)
854 {
855         void *cache_data = get_cached_header(cache);
856         int hh_len = rt_dst(rt).hh->hh_len;
857         int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
858
859 #ifdef HAVE_HH_SEQ
860         unsigned hh_seq;
861
862         do {
863                 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
864                 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
865         } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
866
867         cache->hh_seq = hh_seq;
868 #else
869         read_lock_bh(&rt_dst(rt).hh->hh_lock);
870         memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
871         read_unlock_bh(&rt_dst(rt).hh->hh_lock);
872 #endif
873 }
874
875 static struct tnl_cache *build_cache(struct vport *vport,
876                                      const struct tnl_mutable_config *mutable,
877                                      struct rtable *rt)
878 {
879         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
880         struct tnl_cache *cache;
881         void *cache_data;
882         int cache_len;
883
884         if (!(mutable->flags & TNL_F_HDR_CACHE))
885                 return NULL;
886
887         /*
888          * If there is no entry in the ARP cache or if this device does not
889          * support hard header caching just fall back to the IP stack.
890          */
891         if (!rt_dst(rt).hh)
892                 return NULL;
893
894         /*
895          * If lock is contended fall back to directly building the header.
896          * We're not going to help performance by sitting here spinning.
897          */
898         if (!spin_trylock_bh(&tnl_vport->cache_lock))
899                 return NULL;
900
901         cache = cache_dereference(tnl_vport);
902         if (check_cache_valid(cache, mutable))
903                 goto unlock;
904         else
905                 cache = NULL;
906
907         cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
908
909         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
910                         cache_len, GFP_ATOMIC);
911         if (!cache)
912                 goto unlock;
913
914         cache->len = cache_len;
915
916         create_eth_hdr(cache, rt);
917         cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
918
919         create_tunnel_header(vport, mutable, rt, cache_data);
920
921         cache->mutable_seq = mutable->seq;
922         cache->rt = rt;
923 #ifdef NEED_CACHE_TIMEOUT
924         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
925 #endif
926
927         if (is_internal_dev(rt_dst(rt).dev)) {
928                 struct sw_flow_key flow_key;
929                 struct tbl_node *flow_node;
930                 struct vport *dst_vport;
931                 struct sk_buff *skb;
932                 bool is_frag;
933                 int err;
934
935                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
936                 if (!dst_vport)
937                         goto done;
938
939                 skb = alloc_skb(cache->len, GFP_ATOMIC);
940                 if (!skb)
941                         goto done;
942
943                 __skb_put(skb, cache->len);
944                 memcpy(skb->data, get_cached_header(cache), cache->len);
945
946                 err = flow_extract(skb, dst_vport->port_no, &flow_key, &is_frag);
947
948                 kfree_skb(skb);
949                 if (err || is_frag)
950                         goto done;
951
952                 flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
953                                        &flow_key, flow_hash(&flow_key),
954                                        flow_cmp);
955                 if (flow_node) {
956                         struct sw_flow *flow = flow_cast(flow_node);
957
958                         cache->flow = flow;
959                         flow_hold(flow);
960                 }
961         }
962
963 done:
964         assign_cache_rcu(vport, cache);
965
966 unlock:
967         spin_unlock_bh(&tnl_vport->cache_lock);
968
969         return cache;
970 }
971
972 static struct rtable *find_route(struct vport *vport,
973                                  const struct tnl_mutable_config *mutable,
974                                  u8 tos, struct tnl_cache **cache)
975 {
976         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
977         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
978
979         *cache = NULL;
980         tos = RT_TOS(tos);
981
982         if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
983                 *cache = cur_cache;
984                 return cur_cache->rt;
985         } else {
986                 struct rtable *rt;
987                 struct flowi fl = { .nl_u = { .ip4_u =
988                                               { .daddr = mutable->daddr,
989                                                 .saddr = mutable->saddr,
990                                                 .tos = tos } },
991                                     .proto = tnl_vport->tnl_ops->ipproto };
992
993                 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
994                         return NULL;
995
996                 if (likely(tos == mutable->tos))
997                         *cache = build_cache(vport, mutable, rt);
998
999                 return rt;
1000         }
1001 }
1002
1003 static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
1004 {
1005         if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
1006                 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
1007                 if (unlikely(!nskb)) {
1008                         kfree_skb(skb);
1009                         return ERR_PTR(-ENOMEM);
1010                 }
1011
1012                 set_skb_csum_bits(skb, nskb);
1013
1014                 if (skb->sk)
1015                         skb_set_owner_w(nskb, skb->sk);
1016
1017                 kfree_skb(skb);
1018                 return nskb;
1019         }
1020
1021         return skb;
1022 }
1023
1024 static inline bool need_linearize(const struct sk_buff *skb)
1025 {
1026         int i;
1027
1028         if (unlikely(skb_shinfo(skb)->frag_list))
1029                 return true;
1030
1031         /*
1032          * Generally speaking we should linearize if there are paged frags.
1033          * However, if all of the refcounts are 1 we know nobody else can
1034          * change them from underneath us and we can skip the linearization.
1035          */
1036         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1037                 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
1038                         return true;
1039
1040         return false;
1041 }
1042
1043 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1044                                        const struct tnl_mutable_config *mutable,
1045                                        const struct rtable *rt)
1046 {
1047         int min_headroom;
1048         int err;
1049
1050         forward_ip_summed(skb);
1051
1052         err = vswitch_skb_checksum_setup(skb);
1053         if (unlikely(err))
1054                 goto error_free;
1055
1056         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1057                         + mutable->tunnel_hlen
1058                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1059
1060         skb = check_headroom(skb, min_headroom);
1061         if (IS_ERR(skb)) {
1062                 err = PTR_ERR(skb);
1063                 goto error;
1064         }
1065
1066         if (skb_is_gso(skb)) {
1067                 struct sk_buff *nskb;
1068
1069                 nskb = skb_gso_segment(skb, 0);
1070                 kfree_skb(skb);
1071                 if (IS_ERR(nskb)) {
1072                         err = PTR_ERR(nskb);
1073                         goto error;
1074                 }
1075
1076                 skb = nskb;
1077         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1078                 /* Pages aren't locked and could change at any time.
1079                  * If this happens after we compute the checksum, the
1080                  * checksum will be wrong.  We linearize now to avoid
1081                  * this problem.
1082                  */
1083                 if (unlikely(need_linearize(skb))) {
1084                         err = __skb_linearize(skb);
1085                         if (unlikely(err))
1086                                 goto error_free;
1087                 }
1088
1089                 err = skb_checksum_help(skb);
1090                 if (unlikely(err))
1091                         goto error_free;
1092         } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1093                 skb->ip_summed = CHECKSUM_NONE;
1094
1095         return skb;
1096
1097 error_free:
1098         kfree_skb(skb);
1099 error:
1100         return ERR_PTR(err);
1101 }
1102
1103 static int send_frags(struct sk_buff *skb,
1104                       const struct tnl_mutable_config *mutable)
1105 {
1106         int sent_len;
1107         int err;
1108
1109         sent_len = 0;
1110         while (skb) {
1111                 struct sk_buff *next = skb->next;
1112                 int frag_len = skb->len - mutable->tunnel_hlen;
1113
1114                 skb->next = NULL;
1115                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1116
1117                 err = ip_local_out(skb);
1118                 if (likely(net_xmit_eval(err) == 0))
1119                         sent_len += frag_len;
1120                 else {
1121                         skb = next;
1122                         goto free_frags;
1123                 }
1124
1125                 skb = next;
1126         }
1127
1128         return sent_len;
1129
1130 free_frags:
1131         /*
1132          * There's no point in continuing to send fragments once one has been
1133          * dropped so just free the rest.  This may help improve the congestion
1134          * that caused the first packet to be dropped.
1135          */
1136         tnl_free_linked_skbs(skb);
1137         return sent_len;
1138 }
1139
1140 int tnl_send(struct vport *vport, struct sk_buff *skb)
1141 {
1142         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1143         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1144
1145         enum vport_err_type err = VPORT_E_TX_ERROR;
1146         struct rtable *rt;
1147         struct dst_entry *unattached_dst = NULL;
1148         struct tnl_cache *cache;
1149         int sent_len = 0;
1150         __be16 frag_off = 0;
1151         u8 ttl;
1152         u8 inner_tos;
1153         u8 tos;
1154
1155         /* Validate the protocol headers before we try to use them. */
1156         if (skb->protocol == htons(ETH_P_8021Q) &&
1157             !vlan_tx_tag_present(skb)) {
1158                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1159                         goto error_free;
1160
1161                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1162                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1163         }
1164
1165         if (skb->protocol == htons(ETH_P_IP)) {
1166                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1167                     + sizeof(struct iphdr))))
1168                         skb->protocol = 0;
1169         }
1170 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1171         else if (skb->protocol == htons(ETH_P_IPV6)) {
1172                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1173                     + sizeof(struct ipv6hdr))))
1174                         skb->protocol = 0;
1175         }
1176 #endif
1177
1178         /* ToS */
1179         if (skb->protocol == htons(ETH_P_IP))
1180                 inner_tos = ip_hdr(skb)->tos;
1181 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1182         else if (skb->protocol == htons(ETH_P_IPV6))
1183                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1184 #endif
1185         else
1186                 inner_tos = 0;
1187
1188         if (mutable->flags & TNL_F_TOS_INHERIT)
1189                 tos = inner_tos;
1190         else
1191                 tos = mutable->tos;
1192
1193         tos = INET_ECN_encapsulate(tos, inner_tos);
1194
1195         /* Route lookup */
1196         rt = find_route(vport, mutable, tos, &cache);
1197         if (unlikely(!rt))
1198                 goto error_free;
1199         if (unlikely(!cache))
1200                 unattached_dst = &rt_dst(rt);
1201
1202         /* Reset SKB */
1203         nf_reset(skb);
1204         secpath_reset(skb);
1205         skb_dst_drop(skb);
1206
1207         /* Offloading */
1208         skb = handle_offloads(skb, mutable, rt);
1209         if (IS_ERR(skb))
1210                 goto error;
1211
1212         /* MTU */
1213         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1214                 err = VPORT_E_TX_DROPPED;
1215                 goto error_free;
1216         }
1217
1218         /*
1219          * If we are over the MTU, allow the IP stack to handle fragmentation.
1220          * Fragmentation is a slow path anyways.
1221          */
1222         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1223                      cache)) {
1224                 unattached_dst = &rt_dst(rt);
1225                 dst_hold(unattached_dst);
1226                 cache = NULL;
1227         }
1228
1229         /* TTL */
1230         ttl = mutable->ttl;
1231         if (!ttl)
1232                 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1233
1234         if (mutable->flags & TNL_F_TTL_INHERIT) {
1235                 if (skb->protocol == htons(ETH_P_IP))
1236                         ttl = ip_hdr(skb)->ttl;
1237 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1238                 else if (skb->protocol == htons(ETH_P_IPV6))
1239                         ttl = ipv6_hdr(skb)->hop_limit;
1240 #endif
1241         }
1242
1243         while (skb) {
1244                 struct iphdr *iph;
1245                 struct sk_buff *next_skb = skb->next;
1246                 skb->next = NULL;
1247
1248                 if (unlikely(vlan_deaccel_tag(skb)))
1249                         goto next;
1250
1251                 if (likely(cache)) {
1252                         skb_push(skb, cache->len);
1253                         memcpy(skb->data, get_cached_header(cache), cache->len);
1254                         skb_reset_mac_header(skb);
1255                         skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1256
1257                 } else {
1258                         skb_push(skb, mutable->tunnel_hlen);
1259                         create_tunnel_header(vport, mutable, rt, skb->data);
1260                         skb_reset_network_header(skb);
1261
1262                         if (next_skb)
1263                                 skb_dst_set(skb, dst_clone(unattached_dst));
1264                         else {
1265                                 skb_dst_set(skb, unattached_dst);
1266                                 unattached_dst = NULL;
1267                         }
1268                 }
1269                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1270
1271                 iph = ip_hdr(skb);
1272                 iph->tos = tos;
1273                 iph->ttl = ttl;
1274                 iph->frag_off = frag_off;
1275                 ip_select_ident(iph, &rt_dst(rt), NULL);
1276
1277                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1278                 if (unlikely(!skb))
1279                         goto next;
1280
1281                 if (likely(cache)) {
1282                         int orig_len = skb->len - cache->len;
1283                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1284
1285                         skb->protocol = htons(ETH_P_IP);
1286                         iph = ip_hdr(skb);
1287                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1288                         ip_send_check(iph);
1289
1290                         if (cache_vport) {
1291                                 OVS_CB(skb)->flow = cache->flow;
1292                                 compute_ip_summed(skb, true);
1293                                 vport_receive(cache_vport, skb);
1294                                 sent_len += orig_len;
1295                         } else {
1296                                 int xmit_err;
1297
1298                                 skb->dev = rt_dst(rt).dev;
1299                                 xmit_err = dev_queue_xmit(skb);
1300
1301                                 if (likely(net_xmit_eval(xmit_err) == 0))
1302                                         sent_len += orig_len;
1303                         }
1304                 } else
1305                         sent_len += send_frags(skb, mutable);
1306
1307 next:
1308                 skb = next_skb;
1309         }
1310
1311         if (unlikely(sent_len == 0))
1312                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1313
1314         goto out;
1315
1316 error_free:
1317         tnl_free_linked_skbs(skb);
1318 error:
1319         dst_release(unattached_dst);
1320         vport_record_error(vport, err);
1321 out:
1322         return sent_len;
1323 }
1324
1325 static const struct nla_policy tnl_policy[ODP_TUNNEL_ATTR_MAX + 1] = {
1326         [ODP_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1327         [ODP_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1328         [ODP_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1329         [ODP_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1330         [ODP_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1331         [ODP_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1332         [ODP_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1333 };
1334
1335 /* Sets ODP_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1336 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1337                           const struct vport *cur_vport,
1338                           struct tnl_mutable_config *mutable)
1339 {
1340         const struct vport *old_vport;
1341         const struct tnl_mutable_config *old_mutable;
1342         struct nlattr *a[ODP_TUNNEL_ATTR_MAX + 1];
1343         int err;
1344
1345         if (!options)
1346                 return -EINVAL;
1347
1348         err = nla_parse_nested(a, ODP_TUNNEL_ATTR_MAX, options, tnl_policy);
1349         if (err)
1350                 return err;
1351
1352         if (!a[ODP_TUNNEL_ATTR_FLAGS] || !a[ODP_TUNNEL_ATTR_DST_IPV4])
1353                 return -EINVAL;
1354
1355         mutable->flags = nla_get_u32(a[ODP_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1356
1357         if (a[ODP_TUNNEL_ATTR_SRC_IPV4])
1358                 mutable->saddr = nla_get_be32(a[ODP_TUNNEL_ATTR_SRC_IPV4]);
1359         mutable->daddr = nla_get_be32(a[ODP_TUNNEL_ATTR_DST_IPV4]);
1360
1361         if (a[ODP_TUNNEL_ATTR_TOS]) {
1362                 mutable->tos = nla_get_u8(a[ODP_TUNNEL_ATTR_TOS]);
1363                 if (mutable->tos != RT_TOS(mutable->tos))
1364                         return -EINVAL;
1365         }
1366
1367         if (a[ODP_TUNNEL_ATTR_TTL])
1368                 mutable->ttl = nla_get_u8(a[ODP_TUNNEL_ATTR_TTL]);
1369
1370         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1371         if (mutable->tunnel_hlen < 0)
1372                 return mutable->tunnel_hlen;
1373
1374         mutable->tunnel_hlen += sizeof(struct iphdr);
1375
1376         mutable->tunnel_type = tnl_ops->tunnel_type;
1377         if (!a[ODP_TUNNEL_ATTR_IN_KEY]) {
1378                 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1379                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1380         } else {
1381                 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1382                 mutable->in_key = nla_get_be64(a[ODP_TUNNEL_ATTR_IN_KEY]);
1383         }
1384
1385         if (!a[ODP_TUNNEL_ATTR_OUT_KEY])
1386                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1387         else
1388                 mutable->out_key = nla_get_be64(a[ODP_TUNNEL_ATTR_OUT_KEY]);
1389
1390         old_vport = tnl_find_port(mutable->saddr, mutable->daddr,
1391                                   mutable->in_key, mutable->tunnel_type,
1392                                   &old_mutable);
1393
1394         if (old_vport && old_vport != cur_vport)
1395                 return -EEXIST;
1396
1397         return 0;
1398 }
1399
1400 struct vport *tnl_create(const struct vport_parms *parms,
1401                          const struct vport_ops *vport_ops,
1402                          const struct tnl_ops *tnl_ops)
1403 {
1404         struct vport *vport;
1405         struct tnl_vport *tnl_vport;
1406         struct tnl_mutable_config *mutable;
1407         int initial_frag_id;
1408         int err;
1409
1410         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1411         if (IS_ERR(vport)) {
1412                 err = PTR_ERR(vport);
1413                 goto error;
1414         }
1415
1416         tnl_vport = tnl_vport_priv(vport);
1417
1418         strcpy(tnl_vport->name, parms->name);
1419         tnl_vport->tnl_ops = tnl_ops;
1420
1421         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1422         if (!mutable) {
1423                 err = -ENOMEM;
1424                 goto error_free_vport;
1425         }
1426
1427         vport_gen_rand_ether_addr(mutable->eth_addr);
1428
1429         get_random_bytes(&initial_frag_id, sizeof(int));
1430         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1431
1432         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1433         if (err)
1434                 goto error_free_mutable;
1435
1436         spin_lock_init(&tnl_vport->cache_lock);
1437
1438 #ifdef NEED_CACHE_TIMEOUT
1439         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1440                                        (net_random() % (MAX_CACHE_EXP / 2));
1441 #endif
1442
1443         rcu_assign_pointer(tnl_vport->mutable, mutable);
1444
1445         err = add_port(vport);
1446         if (err)
1447                 goto error_free_mutable;
1448
1449         return vport;
1450
1451 error_free_mutable:
1452         kfree(mutable);
1453 error_free_vport:
1454         vport_free(vport);
1455 error:
1456         return ERR_PTR(err);
1457 }
1458
1459 int tnl_set_options(struct vport *vport, struct nlattr *options)
1460 {
1461         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1462         const struct tnl_mutable_config *old_mutable;
1463         struct tnl_mutable_config *mutable;
1464         int err;
1465
1466         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1467         if (!mutable) {
1468                 err = -ENOMEM;
1469                 goto error;
1470         }
1471
1472         /* Copy fields whose values should be retained. */
1473         old_mutable = rtnl_dereference(tnl_vport->mutable);
1474         mutable->seq = old_mutable->seq + 1;
1475         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1476
1477         /* Parse the others configured by userspace. */
1478         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1479         if (err)
1480                 goto error_free;
1481
1482         err = move_port(vport, mutable);
1483         if (err)
1484                 goto error_free;
1485
1486         return 0;
1487
1488 error_free:
1489         kfree(mutable);
1490 error:
1491         return err;
1492 }
1493
1494 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1495 {
1496         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1497         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1498
1499         NLA_PUT_U32(skb, ODP_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1500         NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_DST_IPV4, mutable->daddr);
1501
1502         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1503                 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_IN_KEY, mutable->in_key);
1504         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1505                 NLA_PUT_BE64(skb, ODP_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1506         if (mutable->saddr)
1507                 NLA_PUT_BE32(skb, ODP_TUNNEL_ATTR_SRC_IPV4, mutable->saddr);
1508         if (mutable->tos)
1509                 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TOS, mutable->tos);
1510         if (mutable->ttl)
1511                 NLA_PUT_U8(skb, ODP_TUNNEL_ATTR_TTL, mutable->ttl);
1512
1513         return 0;
1514
1515 nla_put_failure:
1516         return -EMSGSIZE;
1517 }
1518
1519 static void free_port_rcu(struct rcu_head *rcu)
1520 {
1521         struct tnl_vport *tnl_vport = container_of(rcu,
1522                                                    struct tnl_vport, rcu);
1523
1524         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1525         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1526         vport_free(tnl_vport_to_vport(tnl_vport));
1527 }
1528
1529 int tnl_destroy(struct vport *vport)
1530 {
1531         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1532         const struct tnl_mutable_config *mutable, *old_mutable;
1533
1534         mutable = rtnl_dereference(tnl_vport->mutable);
1535
1536         if (vport == tnl_find_port(mutable->saddr, mutable->daddr,
1537                                    mutable->in_key, mutable->tunnel_type,
1538                                    &old_mutable))
1539                 del_port(vport);
1540
1541         call_rcu(&tnl_vport->rcu, free_port_rcu);
1542
1543         return 0;
1544 }
1545
1546 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1547 {
1548         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1549         struct tnl_mutable_config *mutable;
1550
1551         mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1552                           sizeof(struct tnl_mutable_config), GFP_KERNEL);
1553         if (!mutable)
1554                 return -ENOMEM;
1555
1556         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1557         assign_config_rcu(vport, mutable);
1558
1559         return 0;
1560 }
1561
1562 const char *tnl_get_name(const struct vport *vport)
1563 {
1564         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1565         return tnl_vport->name;
1566 }
1567
1568 const unsigned char *tnl_get_addr(const struct vport *vport)
1569 {
1570         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1571         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1572 }
1573
1574 void tnl_free_linked_skbs(struct sk_buff *skb)
1575 {
1576         if (unlikely(!skb))
1577                 return;
1578
1579         while (skb) {
1580                 struct sk_buff *next = skb->next;
1581                 kfree_skb(skb);
1582                 skb = next;
1583         }
1584 }