239e604c7dbf1e0d722f7cf292530d83b3ad05b7
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/in.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/workqueue.h>
19
20 #include <net/dsfield.h>
21 #include <net/dst.h>
22 #include <net/icmp.h>
23 #include <net/inet_ecn.h>
24 #include <net/ip.h>
25 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
26 #include <net/ipv6.h>
27 #endif
28 #include <net/route.h>
29 #include <net/xfrm.h>
30
31 #include "actions.h"
32 #include "checksum.h"
33 #include "datapath.h"
34 #include "table.h"
35 #include "tunnel.h"
36 #include "vport.h"
37 #include "vport-generic.h"
38 #include "vport-internal_dev.h"
39
40 #ifdef NEED_CACHE_TIMEOUT
41 /*
42  * On kernels where we can't quickly detect changes in the rest of the system
43  * we use an expiration time to invalidate the cache.  A shorter expiration
44  * reduces the length of time that we may potentially blackhole packets while
45  * a longer time increases performance by reducing the frequency that the
46  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
47  * invalidated before the expiration time but this is the maximum.  The time
48  * is expressed in jiffies.
49  */
50 #define MAX_CACHE_EXP HZ
51 #endif
52
53 /*
54  * Interval to check for and remove caches that are no longer valid.  Caches
55  * are checked for validity before they are used for packet encapsulation and
56  * old caches are removed at that time.  However, if no packets are sent through
57  * the tunnel then the cache will never be destroyed.  Since it holds
58  * references to a number of system objects, the cache will continue to use
59  * system resources by not allowing those objects to be destroyed.  The cache
60  * cleaner is periodically run to free invalid caches.  It does not
61  * significantly affect system performance.  A lower interval will release
62  * resources faster but will itself consume resources by requiring more frequent
63  * checks.  A longer interval may result in messages being printed to the kernel
64  * message buffer about unreleased resources.  The interval is expressed in
65  * jiffies.
66  */
67 #define CACHE_CLEANER_INTERVAL (5 * HZ)
68
69 #define CACHE_DATA_ALIGN 16
70
71 static struct tbl __rcu *port_table __read_mostly;
72
73 static void cache_cleaner(struct work_struct *work);
74 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
75
76 /*
77  * These are just used as an optimization: they don't require any kind of
78  * synchronization because we could have just as easily read the value before
79  * the port change happened.
80  */
81 static unsigned int key_local_remote_ports __read_mostly;
82 static unsigned int key_remote_ports __read_mostly;
83 static unsigned int local_remote_ports __read_mostly;
84 static unsigned int remote_ports __read_mostly;
85
86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
87 #define rt_dst(rt) (rt->dst)
88 #else
89 #define rt_dst(rt) (rt->u.dst)
90 #endif
91
92 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
93 {
94         return vport_from_priv(tnl_vport);
95 }
96
97 static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
98 {
99         return container_of(node, struct tnl_vport, tbl_node);
100 }
101
102 static inline void schedule_cache_cleaner(void)
103 {
104         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
105 }
106
107 static void free_cache(struct tnl_cache *cache)
108 {
109         if (!cache)
110                 return;
111
112         flow_put(cache->flow);
113         ip_rt_put(cache->rt);
114         kfree(cache);
115 }
116
117 static void free_config_rcu(struct rcu_head *rcu)
118 {
119         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
120         kfree(c);
121 }
122
123 static void free_cache_rcu(struct rcu_head *rcu)
124 {
125         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
126         free_cache(c);
127 }
128
129 static void assign_config_rcu(struct vport *vport,
130                               struct tnl_mutable_config *new_config)
131 {
132         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
133         struct tnl_mutable_config *old_config;
134
135         old_config = rtnl_dereference(tnl_vport->mutable);
136         rcu_assign_pointer(tnl_vport->mutable, new_config);
137         call_rcu(&old_config->rcu, free_config_rcu);
138 }
139
140 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
141 {
142         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
143         struct tnl_cache *old_cache;
144
145         old_cache = tnl_vport->cache;
146         rcu_assign_pointer(tnl_vport->cache, new_cache);
147
148         if (old_cache)
149                 call_rcu(&old_cache->rcu, free_cache_rcu);
150 }
151
152 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
153 {
154         if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
155                 if (mutable->port_config.saddr)
156                         return &local_remote_ports;
157                 else
158                         return &remote_ports;
159         } else {
160                 if (mutable->port_config.saddr)
161                         return &key_local_remote_ports;
162                 else
163                         return &key_remote_ports;
164         }
165 }
166
167 struct port_lookup_key {
168         const struct tnl_mutable_config *mutable;
169         __be64 key;
170         u32 tunnel_type;
171         __be32 saddr;
172         __be32 daddr;
173 };
174
175 /*
176  * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
177  * the comparision.
178  */
179 static int port_cmp(const struct tbl_node *node, void *target)
180 {
181         const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
182         struct port_lookup_key *lookup = target;
183
184         lookup->mutable = rcu_dereference(tnl_vport->mutable);
185
186         return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
187                 lookup->mutable->port_config.daddr == lookup->daddr &&
188                 lookup->mutable->port_config.in_key == lookup->key &&
189                 lookup->mutable->port_config.saddr == lookup->saddr);
190 }
191
192 static u32 port_hash(struct port_lookup_key *k)
193 {
194         u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
195                              k->tunnel_type, 0);
196         return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
197 }
198
199 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
200 {
201         struct port_lookup_key lookup;
202
203         lookup.saddr = mutable->port_config.saddr;
204         lookup.daddr = mutable->port_config.daddr;
205         lookup.key = mutable->port_config.in_key;
206         lookup.tunnel_type = mutable->tunnel_type;
207
208         return port_hash(&lookup);
209 }
210
211 static void check_table_empty(void)
212 {
213         struct tbl *old_table = rtnl_dereference(port_table);
214
215         if (tbl_count(old_table) == 0) {
216                 cancel_delayed_work_sync(&cache_cleaner_wq);
217                 rcu_assign_pointer(port_table, NULL);
218                 tbl_deferred_destroy(old_table, NULL);
219         }
220 }
221
222 static int add_port(struct vport *vport)
223 {
224         struct tbl *cur_table = rtnl_dereference(port_table);
225         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
226         int err;
227
228         if (!port_table) {
229                 struct tbl *new_table;
230
231                 new_table = tbl_create(0);
232                 if (!new_table)
233                         return -ENOMEM;
234
235                 rcu_assign_pointer(port_table, new_table);
236                 schedule_cache_cleaner();
237
238         } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
239                 struct tbl *new_table;
240
241                 new_table = tbl_expand(cur_table);
242                 if (IS_ERR(new_table))
243                         return PTR_ERR(new_table);
244
245                 rcu_assign_pointer(port_table, new_table);
246                 tbl_deferred_destroy(cur_table, NULL);
247         }
248
249         err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
250                          mutable_hash(rtnl_dereference(tnl_vport->mutable)));
251         if (err) {
252                 check_table_empty();
253                 return err;
254         }
255
256         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
257
258         return 0;
259 }
260
261 static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
262 {
263         int err;
264         struct tbl *cur_table = rtnl_dereference(port_table);
265         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
266         u32 hash;
267
268         hash = mutable_hash(new_mutable);
269         if (hash == tnl_vport->tbl_node.hash)
270                 goto table_updated;
271
272         /*
273          * Ideally we should make this move atomic to avoid having gaps in
274          * finding tunnels or the possibility of failure.  However, if we do
275          * find a tunnel it will always be consistent.
276          */
277         err = tbl_remove(cur_table, &tnl_vport->tbl_node);
278         if (err)
279                 return err;
280
281         err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
282         if (err) {
283                 (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
284                 check_table_empty();
285                 return err;
286         }
287
288 table_updated:
289         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
290         assign_config_rcu(vport, new_mutable);
291         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
292
293         return 0;
294 }
295
296 static int del_port(struct vport *vport)
297 {
298         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
299         int err;
300
301         err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
302         if (err)
303                 return err;
304
305         check_table_empty();
306         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
307
308         return 0;
309 }
310
311 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
312                             int tunnel_type,
313                             const struct tnl_mutable_config **mutable)
314 {
315         struct port_lookup_key lookup;
316         struct tbl *table = rcu_dereference(port_table);
317         struct tbl_node *tbl_node;
318
319         if (unlikely(!table))
320                 return NULL;
321
322         lookup.saddr = saddr;
323         lookup.daddr = daddr;
324
325         if (tunnel_type & TNL_T_KEY_EXACT) {
326                 lookup.key = key;
327                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
328
329                 if (key_local_remote_ports) {
330                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
331                         if (tbl_node)
332                                 goto found;
333                 }
334
335                 if (key_remote_ports) {
336                         lookup.saddr = 0;
337
338                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
339                         if (tbl_node)
340                                 goto found;
341
342                         lookup.saddr = saddr;
343                 }
344         }
345
346         if (tunnel_type & TNL_T_KEY_MATCH) {
347                 lookup.key = 0;
348                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
349
350                 if (local_remote_ports) {
351                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
352                         if (tbl_node)
353                                 goto found;
354                 }
355
356                 if (remote_ports) {
357                         lookup.saddr = 0;
358
359                         tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
360                         if (tbl_node)
361                                 goto found;
362                 }
363         }
364
365         return NULL;
366
367 found:
368         *mutable = lookup.mutable;
369         return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
370 }
371
372 static inline void ecn_decapsulate(struct sk_buff *skb)
373 {
374         /* This is accessing the outer IP header of the tunnel, which we've
375          * already validated to be OK.  skb->data is currently set to the start
376          * of the inner Ethernet header, and we've validated ETH_HLEN.
377          */
378         if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
379                 __be16 protocol = skb->protocol;
380
381                 skb_set_network_header(skb, ETH_HLEN);
382
383                 if (skb->protocol == htons(ETH_P_8021Q)) {
384                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
385                                 return;
386
387                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
388                         skb_set_network_header(skb, VLAN_ETH_HLEN);
389                 }
390
391                 if (protocol == htons(ETH_P_IP)) {
392                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
393                             + sizeof(struct iphdr))))
394                                 return;
395
396                         IP_ECN_set_ce(ip_hdr(skb));
397                 }
398 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
399                 else if (protocol == htons(ETH_P_IPV6)) {
400                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
401                             + sizeof(struct ipv6hdr))))
402                                 return;
403
404                         IP6_ECN_set_ce(ipv6_hdr(skb));
405                 }
406 #endif
407         }
408 }
409
410 /* Called with rcu_read_lock. */
411 void tnl_rcv(struct vport *vport, struct sk_buff *skb)
412 {
413         /* Packets received by this function are in the following state:
414          * - skb->data points to the inner Ethernet header.
415          * - The inner Ethernet header is in the linear data area.
416          * - skb->csum does not include the inner Ethernet header.
417          * - The layer pointers point at the outer headers.
418          */
419
420         struct ethhdr *eh = (struct ethhdr *)skb->data;
421
422         if (likely(ntohs(eh->h_proto) >= 1536))
423                 skb->protocol = eh->h_proto;
424         else
425                 skb->protocol = htons(ETH_P_802_2);
426
427         skb_dst_drop(skb);
428         nf_reset(skb);
429         secpath_reset(skb);
430
431         ecn_decapsulate(skb);
432         compute_ip_summed(skb, false);
433
434         vport_receive(vport, skb);
435 }
436
437 static bool check_ipv4_address(__be32 addr)
438 {
439         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
440             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
441                 return false;
442
443         return true;
444 }
445
446 static bool ipv4_should_icmp(struct sk_buff *skb)
447 {
448         struct iphdr *old_iph = ip_hdr(skb);
449
450         /* Don't respond to L2 broadcast. */
451         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
452                 return false;
453
454         /* Don't respond to L3 broadcast or invalid addresses. */
455         if (!check_ipv4_address(old_iph->daddr) ||
456             !check_ipv4_address(old_iph->saddr))
457                 return false;
458
459         /* Only respond to the first fragment. */
460         if (old_iph->frag_off & htons(IP_OFFSET))
461                 return false;
462
463         /* Don't respond to ICMP error messages. */
464         if (old_iph->protocol == IPPROTO_ICMP) {
465                 u8 icmp_type, *icmp_typep;
466
467                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
468                                                 (old_iph->ihl << 2) +
469                                                 offsetof(struct icmphdr, type) -
470                                                 skb->data, sizeof(icmp_type),
471                                                 &icmp_type);
472
473                 if (!icmp_typep)
474                         return false;
475
476                 if (*icmp_typep > NR_ICMP_TYPES
477                         || (*icmp_typep <= ICMP_PARAMETERPROB
478                                 && *icmp_typep != ICMP_ECHOREPLY
479                                 && *icmp_typep != ICMP_ECHO))
480                         return false;
481         }
482
483         return true;
484 }
485
486 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
487                             unsigned int mtu, unsigned int payload_length)
488 {
489         struct iphdr *iph, *old_iph = ip_hdr(skb);
490         struct icmphdr *icmph;
491         u8 *payload;
492
493         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
494         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
495         payload = skb_put(nskb, payload_length);
496
497         /* IP */
498         iph->version            =       4;
499         iph->ihl                =       sizeof(struct iphdr) >> 2;
500         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
501                                         IPTOS_PREC_INTERNETCONTROL;
502         iph->tot_len            =       htons(sizeof(struct iphdr)
503                                               + sizeof(struct icmphdr)
504                                               + payload_length);
505         get_random_bytes(&iph->id, sizeof(iph->id));
506         iph->frag_off           =       0;
507         iph->ttl                =       IPDEFTTL;
508         iph->protocol           =       IPPROTO_ICMP;
509         iph->daddr              =       old_iph->saddr;
510         iph->saddr              =       old_iph->daddr;
511
512         ip_send_check(iph);
513
514         /* ICMP */
515         icmph->type             =       ICMP_DEST_UNREACH;
516         icmph->code             =       ICMP_FRAG_NEEDED;
517         icmph->un.gateway       =       htonl(mtu);
518         icmph->checksum         =       0;
519
520         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
521         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
522                                             payload, payload_length,
523                                             nskb->csum);
524         icmph->checksum = csum_fold(nskb->csum);
525 }
526
527 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
528 static bool ipv6_should_icmp(struct sk_buff *skb)
529 {
530         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
531         int addr_type;
532         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
533         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
534
535         /* Check source address is valid. */
536         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
537         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
538                 return false;
539
540         /* Don't reply to unspecified addresses. */
541         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
542                 return false;
543
544         /* Don't respond to ICMP error messages. */
545         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
546         if (payload_off < 0)
547                 return false;
548
549         if (nexthdr == NEXTHDR_ICMP) {
550                 u8 icmp_type, *icmp_typep;
551
552                 icmp_typep = skb_header_pointer(skb, payload_off +
553                                                 offsetof(struct icmp6hdr,
554                                                         icmp6_type),
555                                                 sizeof(icmp_type), &icmp_type);
556
557                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
558                         return false;
559         }
560
561         return true;
562 }
563
564 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
565                             unsigned int mtu, unsigned int payload_length)
566 {
567         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
568         struct icmp6hdr *icmp6h;
569         u8 *payload;
570
571         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
572         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
573         payload = skb_put(nskb, payload_length);
574
575         /* IPv6 */
576         ipv6h->version          =       6;
577         ipv6h->priority         =       0;
578         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
579         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
580                                               + payload_length);
581         ipv6h->nexthdr          =       NEXTHDR_ICMP;
582         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
583         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
584         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
585
586         /* ICMPv6 */
587         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
588         icmp6h->icmp6_code      =       0;
589         icmp6h->icmp6_cksum     =       0;
590         icmp6h->icmp6_mtu       =       htonl(mtu);
591
592         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
593         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
594                                             payload, payload_length,
595                                             nskb->csum);
596         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
597                                                 sizeof(struct icmp6hdr)
598                                                 + payload_length,
599                                                 ipv6h->nexthdr, nskb->csum);
600 }
601 #endif /* IPv6 */
602
603 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
604                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
605 {
606         unsigned int eth_hdr_len = ETH_HLEN;
607         unsigned int total_length = 0, header_length = 0, payload_length;
608         struct ethhdr *eh, *old_eh = eth_hdr(skb);
609         struct sk_buff *nskb;
610
611         /* Sanity check */
612         if (skb->protocol == htons(ETH_P_IP)) {
613                 if (mtu < IP_MIN_MTU)
614                         return false;
615
616                 if (!ipv4_should_icmp(skb))
617                         return true;
618         }
619 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
620         else if (skb->protocol == htons(ETH_P_IPV6)) {
621                 if (mtu < IPV6_MIN_MTU)
622                         return false;
623
624                 /*
625                  * In theory we should do PMTUD on IPv6 multicast messages but
626                  * we don't have an address to send from so just fragment.
627                  */
628                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
629                         return false;
630
631                 if (!ipv6_should_icmp(skb))
632                         return true;
633         }
634 #endif
635         else
636                 return false;
637
638         /* Allocate */
639         if (old_eh->h_proto == htons(ETH_P_8021Q))
640                 eth_hdr_len = VLAN_ETH_HLEN;
641
642         payload_length = skb->len - eth_hdr_len;
643         if (skb->protocol == htons(ETH_P_IP)) {
644                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
645                 total_length = min_t(unsigned int, header_length +
646                                                    payload_length, 576);
647         }
648 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
649         else {
650                 header_length = sizeof(struct ipv6hdr) +
651                                 sizeof(struct icmp6hdr);
652                 total_length = min_t(unsigned int, header_length +
653                                                   payload_length, IPV6_MIN_MTU);
654         }
655 #endif
656
657         total_length = min(total_length, mutable->mtu);
658         payload_length = total_length - header_length;
659
660         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
661                              payload_length);
662         if (!nskb)
663                 return false;
664
665         skb_reserve(nskb, NET_IP_ALIGN);
666
667         /* Ethernet / VLAN */
668         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
669         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
670         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
671         nskb->protocol = eh->h_proto = old_eh->h_proto;
672         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
673                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
674
675                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
676                 vh->h_vlan_encapsulated_proto = skb->protocol;
677         }
678         skb_reset_mac_header(nskb);
679
680         /* Protocol */
681         if (skb->protocol == htons(ETH_P_IP))
682                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
683 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
684         else
685                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
686 #endif
687
688         /*
689          * Assume that flow based keys are symmetric with respect to input
690          * and output and use the key that we were going to put on the
691          * outgoing packet for the fake received packet.  If the keys are
692          * not symmetric then PMTUD needs to be disabled since we won't have
693          * any way of synthesizing packets.
694          */
695         if ((mutable->port_config.flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
696             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
697                 OVS_CB(nskb)->tun_id = flow_key;
698
699         compute_ip_summed(nskb, false);
700         vport_receive(vport, nskb);
701
702         return true;
703 }
704
705 static bool check_mtu(struct sk_buff *skb,
706                       struct vport *vport,
707                       const struct tnl_mutable_config *mutable,
708                       const struct rtable *rt, __be16 *frag_offp)
709 {
710         int mtu;
711         __be16 frag_off;
712
713         frag_off = (mutable->port_config.flags & TNL_F_PMTUD) ? htons(IP_DF) : 0;
714         if (frag_off)
715                 mtu = dst_mtu(&rt_dst(rt))
716                         - ETH_HLEN
717                         - mutable->tunnel_hlen
718                         - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
719         else
720                 mtu = mutable->mtu;
721
722         if (skb->protocol == htons(ETH_P_IP)) {
723                 struct iphdr *old_iph = ip_hdr(skb);
724
725                 frag_off |= old_iph->frag_off & htons(IP_DF);
726                 mtu = max(mtu, IP_MIN_MTU);
727
728                 if ((old_iph->frag_off & htons(IP_DF)) &&
729                     mtu < ntohs(old_iph->tot_len)) {
730                         if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
731                                 goto drop;
732                 }
733         }
734 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
735         else if (skb->protocol == htons(ETH_P_IPV6)) {
736                 unsigned int packet_length = skb->len - ETH_HLEN
737                         - (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
738
739                 mtu = max(mtu, IPV6_MIN_MTU);
740
741                 /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
742                 if (packet_length > IPV6_MIN_MTU)
743                         frag_off = htons(IP_DF);
744
745                 if (mtu < packet_length) {
746                         if (tnl_frag_needed(vport, mutable, skb, mtu, OVS_CB(skb)->tun_id))
747                                 goto drop;
748                 }
749         }
750 #endif
751
752         *frag_offp = frag_off;
753         return true;
754
755 drop:
756         *frag_offp = 0;
757         return false;
758 }
759
760 static void create_tunnel_header(const struct vport *vport,
761                                  const struct tnl_mutable_config *mutable,
762                                  const struct rtable *rt, void *header)
763 {
764         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
765         struct iphdr *iph = header;
766
767         iph->version    = 4;
768         iph->ihl        = sizeof(struct iphdr) >> 2;
769         iph->frag_off   = htons(IP_DF);
770         iph->protocol   = tnl_vport->tnl_ops->ipproto;
771         iph->tos        = mutable->port_config.tos;
772         iph->daddr      = rt->rt_dst;
773         iph->saddr      = rt->rt_src;
774         iph->ttl        = mutable->port_config.ttl;
775         if (!iph->ttl)
776                 iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
777
778         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
779 }
780
781 static inline void *get_cached_header(const struct tnl_cache *cache)
782 {
783         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
784 }
785
786 static inline bool check_cache_valid(const struct tnl_cache *cache,
787                                      const struct tnl_mutable_config *mutable)
788 {
789         return cache &&
790 #ifdef NEED_CACHE_TIMEOUT
791                 time_before(jiffies, cache->expiration) &&
792 #endif
793 #ifdef HAVE_RT_GENID
794                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
795 #endif
796 #ifdef HAVE_HH_SEQ
797                 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
798 #endif
799                 mutable->seq == cache->mutable_seq &&
800                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
801                 (cache->flow && !cache->flow->dead));
802 }
803
804 static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
805 {
806         struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
807         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
808         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
809
810         if (cache && !check_cache_valid(cache, mutable) &&
811             spin_trylock_bh(&tnl_vport->cache_lock)) {
812                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
813                 spin_unlock_bh(&tnl_vport->cache_lock);
814         }
815
816         return 0;
817 }
818
819 static void cache_cleaner(struct work_struct *work)
820 {
821         schedule_cache_cleaner();
822
823         rcu_read_lock();
824         tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
825         rcu_read_unlock();
826 }
827
828 static inline void create_eth_hdr(struct tnl_cache *cache,
829                                   const struct rtable *rt)
830 {
831         void *cache_data = get_cached_header(cache);
832         int hh_len = rt_dst(rt).hh->hh_len;
833         int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
834
835 #ifdef HAVE_HH_SEQ
836         unsigned hh_seq;
837
838         do {
839                 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
840                 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
841         } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
842
843         cache->hh_seq = hh_seq;
844 #else
845         read_lock_bh(&rt_dst(rt).hh->hh_lock);
846         memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
847         read_unlock_bh(&rt_dst(rt).hh->hh_lock);
848 #endif
849 }
850
851 static struct tnl_cache *build_cache(struct vport *vport,
852                                      const struct tnl_mutable_config *mutable,
853                                      struct rtable *rt)
854 {
855         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
856         struct tnl_cache *cache;
857         void *cache_data;
858         int cache_len;
859
860         if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
861                 return NULL;
862
863         /*
864          * If there is no entry in the ARP cache or if this device does not
865          * support hard header caching just fall back to the IP stack.
866          */
867         if (!rt_dst(rt).hh)
868                 return NULL;
869
870         /*
871          * If lock is contended fall back to directly building the header.
872          * We're not going to help performance by sitting here spinning.
873          */
874         if (!spin_trylock_bh(&tnl_vport->cache_lock))
875                 return NULL;
876
877         cache = tnl_vport->cache;
878         if (check_cache_valid(cache, mutable))
879                 goto unlock;
880         else
881                 cache = NULL;
882
883         cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
884
885         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
886                         cache_len, GFP_ATOMIC);
887         if (!cache)
888                 goto unlock;
889
890         cache->len = cache_len;
891
892         create_eth_hdr(cache, rt);
893         cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
894
895         create_tunnel_header(vport, mutable, rt, cache_data);
896
897         cache->mutable_seq = mutable->seq;
898         cache->rt = rt;
899 #ifdef NEED_CACHE_TIMEOUT
900         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
901 #endif
902
903         if (is_internal_dev(rt_dst(rt).dev)) {
904                 struct odp_flow_key flow_key;
905                 struct tbl_node *flow_node;
906                 struct vport *vport;
907                 struct sk_buff *skb;
908                 bool is_frag;
909                 int err;
910
911                 vport = internal_dev_get_vport(rt_dst(rt).dev);
912                 if (!vport)
913                         goto done;
914
915                 skb = alloc_skb(cache->len, GFP_ATOMIC);
916                 if (!skb)
917                         goto done;
918
919                 __skb_put(skb, cache->len);
920                 memcpy(skb->data, get_cached_header(cache), cache->len);
921
922                 err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
923
924                 kfree_skb(skb);
925                 if (err || is_frag)
926                         goto done;
927
928                 flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
929                                        &flow_key, flow_hash(&flow_key),
930                                        flow_cmp);
931                 if (flow_node) {
932                         struct sw_flow *flow = flow_cast(flow_node);
933
934                         cache->flow = flow;
935                         flow_hold(flow);
936                 }
937         }
938
939 done:
940         assign_cache_rcu(vport, cache);
941
942 unlock:
943         spin_unlock_bh(&tnl_vport->cache_lock);
944
945         return cache;
946 }
947
948 static struct rtable *find_route(struct vport *vport,
949                                  const struct tnl_mutable_config *mutable,
950                                  u8 tos, struct tnl_cache **cache)
951 {
952         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
953         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
954
955         *cache = NULL;
956         tos = RT_TOS(tos);
957
958         if (likely(tos == mutable->port_config.tos &&
959                    check_cache_valid(cur_cache, mutable))) {
960                 *cache = cur_cache;
961                 return cur_cache->rt;
962         } else {
963                 struct rtable *rt;
964                 struct flowi fl = { .nl_u = { .ip4_u =
965                                               { .daddr = mutable->port_config.daddr,
966                                                 .saddr = mutable->port_config.saddr,
967                                                 .tos = tos } },
968                                     .proto = tnl_vport->tnl_ops->ipproto };
969
970                 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
971                         return NULL;
972
973                 if (likely(tos == mutable->port_config.tos))
974                         *cache = build_cache(vport, mutable, rt);
975
976                 return rt;
977         }
978 }
979
980 static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
981 {
982         if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
983                 struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
984                 if (unlikely(!nskb)) {
985                         kfree_skb(skb);
986                         return ERR_PTR(-ENOMEM);
987                 }
988
989                 set_skb_csum_bits(skb, nskb);
990
991                 if (skb->sk)
992                         skb_set_owner_w(nskb, skb->sk);
993
994                 kfree_skb(skb);
995                 return nskb;
996         }
997
998         return skb;
999 }
1000
1001 static inline bool need_linearize(const struct sk_buff *skb)
1002 {
1003         int i;
1004
1005         if (unlikely(skb_shinfo(skb)->frag_list))
1006                 return true;
1007
1008         /*
1009          * Generally speaking we should linearize if there are paged frags.
1010          * However, if all of the refcounts are 1 we know nobody else can
1011          * change them from underneath us and we can skip the linearization.
1012          */
1013         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1014                 if (unlikely(page_count(skb_shinfo(skb)->frags[0].page) > 1))
1015                         return true;
1016
1017         return false;
1018 }
1019
1020 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1021                                        const struct tnl_mutable_config *mutable,
1022                                        const struct rtable *rt)
1023 {
1024         int min_headroom;
1025         int err;
1026
1027         forward_ip_summed(skb);
1028
1029         err = vswitch_skb_checksum_setup(skb);
1030         if (unlikely(err))
1031                 goto error_free;
1032
1033         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1034                         + mutable->tunnel_hlen;
1035
1036         if (skb_is_gso(skb)) {
1037                 struct sk_buff *nskb;
1038
1039                 /*
1040                  * If we are doing GSO on a pskb it is better to make sure that
1041                  * the headroom is correct now.  We will only have to copy the
1042                  * portion in the linear data area and GSO will preserve
1043                  * headroom when it creates the segments.  This is particularly
1044                  * beneficial on Xen where we get a lot of GSO pskbs.
1045                  * Conversely, we avoid copying if it is just to get our own
1046                  * writable clone because GSO will do the copy for us.
1047                  */
1048                 if (skb_headroom(skb) < min_headroom) {
1049                         skb = check_headroom(skb, min_headroom);
1050                         if (IS_ERR(skb)) {
1051                                 err = PTR_ERR(skb);
1052                                 goto error;
1053                         }
1054                 }
1055
1056                 nskb = skb_gso_segment(skb, 0);
1057                 kfree_skb(skb);
1058                 if (IS_ERR(nskb)) {
1059                         err = PTR_ERR(nskb);
1060                         goto error;
1061                 }
1062
1063                 skb = nskb;
1064         } else {
1065                 skb = check_headroom(skb, min_headroom);
1066                 if (IS_ERR(skb)) {
1067                         err = PTR_ERR(skb);
1068                         goto error;
1069                 }
1070
1071                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1072                         /*
1073                          * Pages aren't locked and could change at any time.
1074                          * If this happens after we compute the checksum, the
1075                          * checksum will be wrong.  We linearize now to avoid
1076                          * this problem.
1077                          */
1078                         if (unlikely(need_linearize(skb))) {
1079                                 err = __skb_linearize(skb);
1080                                 if (unlikely(err))
1081                                         goto error_free;
1082                         }
1083
1084                         err = skb_checksum_help(skb);
1085                         if (unlikely(err))
1086                                 goto error_free;
1087                 } else if (skb->ip_summed == CHECKSUM_COMPLETE)
1088                         skb->ip_summed = CHECKSUM_NONE;
1089         }
1090
1091         return skb;
1092
1093 error_free:
1094         kfree_skb(skb);
1095 error:
1096         return ERR_PTR(err);
1097 }
1098
1099 static int send_frags(struct sk_buff *skb,
1100                       const struct tnl_mutable_config *mutable)
1101 {
1102         int sent_len;
1103         int err;
1104
1105         sent_len = 0;
1106         while (skb) {
1107                 struct sk_buff *next = skb->next;
1108                 int frag_len = skb->len - mutable->tunnel_hlen;
1109
1110                 skb->next = NULL;
1111                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1112
1113                 err = ip_local_out(skb);
1114                 if (likely(net_xmit_eval(err) == 0))
1115                         sent_len += frag_len;
1116                 else {
1117                         skb = next;
1118                         goto free_frags;
1119                 }
1120
1121                 skb = next;
1122         }
1123
1124         return sent_len;
1125
1126 free_frags:
1127         /*
1128          * There's no point in continuing to send fragments once one has been
1129          * dropped so just free the rest.  This may help improve the congestion
1130          * that caused the first packet to be dropped.
1131          */
1132         tnl_free_linked_skbs(skb);
1133         return sent_len;
1134 }
1135
1136 int tnl_send(struct vport *vport, struct sk_buff *skb)
1137 {
1138         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1139         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1140
1141         enum vport_err_type err = VPORT_E_TX_ERROR;
1142         struct rtable *rt;
1143         struct dst_entry *unattached_dst = NULL;
1144         struct tnl_cache *cache;
1145         int sent_len = 0;
1146         __be16 frag_off;
1147         u8 ttl;
1148         u8 inner_tos;
1149         u8 tos;
1150
1151         /* Validate the protocol headers before we try to use them. */
1152         if (skb->protocol == htons(ETH_P_8021Q)) {
1153                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1154                         goto error_free;
1155
1156                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1157                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1158         }
1159
1160         if (skb->protocol == htons(ETH_P_IP)) {
1161                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1162                     + sizeof(struct iphdr))))
1163                         skb->protocol = 0;
1164         }
1165 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1166         else if (skb->protocol == htons(ETH_P_IPV6)) {
1167                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1168                     + sizeof(struct ipv6hdr))))
1169                         skb->protocol = 0;
1170         }
1171 #endif
1172
1173         /* ToS */
1174         if (skb->protocol == htons(ETH_P_IP))
1175                 inner_tos = ip_hdr(skb)->tos;
1176 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1177         else if (skb->protocol == htons(ETH_P_IPV6))
1178                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1179 #endif
1180         else
1181                 inner_tos = 0;
1182
1183         if (mutable->port_config.flags & TNL_F_TOS_INHERIT)
1184                 tos = inner_tos;
1185         else
1186                 tos = mutable->port_config.tos;
1187
1188         tos = INET_ECN_encapsulate(tos, inner_tos);
1189
1190         /* Route lookup */
1191         rt = find_route(vport, mutable, tos, &cache);
1192         if (unlikely(!rt))
1193                 goto error_free;
1194         if (unlikely(!cache))
1195                 unattached_dst = &rt_dst(rt);
1196
1197         /* Reset SKB */
1198         nf_reset(skb);
1199         secpath_reset(skb);
1200         skb_dst_drop(skb);
1201
1202         /* Offloading */
1203         skb = handle_offloads(skb, mutable, rt);
1204         if (IS_ERR(skb))
1205                 goto error;
1206
1207         /* MTU */
1208         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1209                 err = VPORT_E_TX_DROPPED;
1210                 goto error_free;
1211         }
1212
1213         /*
1214          * If we are over the MTU, allow the IP stack to handle fragmentation.
1215          * Fragmentation is a slow path anyways.
1216          */
1217         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1218                      cache)) {
1219                 unattached_dst = &rt_dst(rt);
1220                 dst_hold(unattached_dst);
1221                 cache = NULL;
1222         }
1223
1224         /* TTL */
1225         ttl = mutable->port_config.ttl;
1226         if (!ttl)
1227                 ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
1228
1229         if (mutable->port_config.flags & TNL_F_TTL_INHERIT) {
1230                 if (skb->protocol == htons(ETH_P_IP))
1231                         ttl = ip_hdr(skb)->ttl;
1232 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1233                 else if (skb->protocol == htons(ETH_P_IPV6))
1234                         ttl = ipv6_hdr(skb)->hop_limit;
1235 #endif
1236         }
1237
1238         while (skb) {
1239                 struct iphdr *iph;
1240                 struct sk_buff *next_skb = skb->next;
1241                 skb->next = NULL;
1242
1243                 if (likely(cache)) {
1244                         skb_push(skb, cache->len);
1245                         memcpy(skb->data, get_cached_header(cache), cache->len);
1246                         skb_reset_mac_header(skb);
1247                         skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1248
1249                 } else {
1250                         skb_push(skb, mutable->tunnel_hlen);
1251                         create_tunnel_header(vport, mutable, rt, skb->data);
1252                         skb_reset_network_header(skb);
1253
1254                         if (next_skb)
1255                                 skb_dst_set(skb, dst_clone(unattached_dst));
1256                         else {
1257                                 skb_dst_set(skb, unattached_dst);
1258                                 unattached_dst = NULL;
1259                         }
1260                 }
1261                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1262
1263                 iph = ip_hdr(skb);
1264                 iph->tos = tos;
1265                 iph->ttl = ttl;
1266                 iph->frag_off = frag_off;
1267                 ip_select_ident(iph, &rt_dst(rt), NULL);
1268
1269                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1270                 if (unlikely(!skb))
1271                         goto next;
1272
1273                 if (likely(cache)) {
1274                         int orig_len = skb->len - cache->len;
1275                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1276
1277                         skb->protocol = htons(ETH_P_IP);
1278                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1279                         ip_send_check(iph);
1280
1281                         if (cache_vport) {
1282                                 OVS_CB(skb)->flow = cache->flow;
1283                                 compute_ip_summed(skb, true);
1284                                 vport_receive(cache_vport, skb);
1285                                 sent_len += orig_len;
1286                         } else {
1287                                 int err;
1288
1289                                 skb->dev = rt_dst(rt).dev;
1290                                 err = dev_queue_xmit(skb);
1291
1292                                 if (likely(net_xmit_eval(err) == 0))
1293                                         sent_len += orig_len;
1294                         }
1295                 } else
1296                         sent_len += send_frags(skb, mutable);
1297
1298 next:
1299                 skb = next_skb;
1300         }
1301
1302         if (unlikely(sent_len == 0))
1303                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1304
1305         goto out;
1306
1307 error_free:
1308         tnl_free_linked_skbs(skb);
1309 error:
1310         dst_release(unattached_dst);
1311         vport_record_error(vport, err);
1312 out:
1313         return sent_len;
1314 }
1315
1316 static int set_config(const void *config, const struct tnl_ops *tnl_ops,
1317                       const struct vport *cur_vport,
1318                       struct tnl_mutable_config *mutable)
1319 {
1320         const struct vport *old_vport;
1321         const struct tnl_mutable_config *old_mutable;
1322
1323         mutable->port_config = *(struct tnl_port_config *)config;
1324
1325         if (mutable->port_config.daddr == 0)
1326                 return -EINVAL;
1327
1328         if (mutable->port_config.tos != RT_TOS(mutable->port_config.tos))
1329                 return -EINVAL;
1330
1331         mutable->tunnel_hlen = tnl_ops->hdr_len(&mutable->port_config);
1332         if (mutable->tunnel_hlen < 0)
1333                 return mutable->tunnel_hlen;
1334
1335         mutable->tunnel_hlen += sizeof(struct iphdr);
1336
1337         mutable->tunnel_type = tnl_ops->tunnel_type;
1338         if (mutable->port_config.flags & TNL_F_IN_KEY_MATCH) {
1339                 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1340                 mutable->port_config.in_key = 0;
1341         } else
1342                 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1343
1344         old_vport = tnl_find_port(mutable->port_config.saddr,
1345                                   mutable->port_config.daddr,
1346                                   mutable->port_config.in_key,
1347                                   mutable->tunnel_type,
1348                                   &old_mutable);
1349
1350         if (old_vport && old_vport != cur_vport)
1351                 return -EEXIST;
1352
1353         if (mutable->port_config.flags & TNL_F_OUT_KEY_ACTION)
1354                 mutable->port_config.out_key = 0;
1355
1356         return 0;
1357 }
1358
1359 struct vport *tnl_create(const struct vport_parms *parms,
1360                          const struct vport_ops *vport_ops,
1361                          const struct tnl_ops *tnl_ops)
1362 {
1363         struct vport *vport;
1364         struct tnl_vport *tnl_vport;
1365         int initial_frag_id;
1366         int err;
1367
1368         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1369         if (IS_ERR(vport)) {
1370                 err = PTR_ERR(vport);
1371                 goto error;
1372         }
1373
1374         tnl_vport = tnl_vport_priv(vport);
1375
1376         strcpy(tnl_vport->name, parms->name);
1377         tnl_vport->tnl_ops = tnl_ops;
1378
1379         tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1380         if (!tnl_vport->mutable) {
1381                 err = -ENOMEM;
1382                 goto error_free_vport;
1383         }
1384
1385         vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
1386         tnl_vport->mutable->mtu = ETH_DATA_LEN;
1387
1388         get_random_bytes(&initial_frag_id, sizeof(int));
1389         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1390
1391         err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
1392         if (err)
1393                 goto error_free_mutable;
1394
1395         spin_lock_init(&tnl_vport->cache_lock);
1396
1397 #ifdef NEED_CACHE_TIMEOUT
1398         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1399                                         (net_random() % (MAX_CACHE_EXP / 2));
1400 #endif
1401
1402         err = add_port(vport);
1403         if (err)
1404                 goto error_free_mutable;
1405
1406         return vport;
1407
1408 error_free_mutable:
1409         kfree(tnl_vport->mutable);
1410 error_free_vport:
1411         vport_free(vport);
1412 error:
1413         return ERR_PTR(err);
1414 }
1415
1416 int tnl_modify(struct vport *vport, struct odp_port *port)
1417 {
1418         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1419         struct tnl_mutable_config *mutable;
1420         int err;
1421
1422         mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1423         if (!mutable) {
1424                 err = -ENOMEM;
1425                 goto error;
1426         }
1427
1428         err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
1429         if (err)
1430                 goto error_free;
1431
1432         mutable->seq++;
1433
1434         err = move_port(vport, mutable);
1435         if (err)
1436                 goto error_free;
1437
1438         return 0;
1439
1440 error_free:
1441         kfree(mutable);
1442 error:
1443         return err;
1444 }
1445
1446 static void free_port_rcu(struct rcu_head *rcu)
1447 {
1448         struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
1449
1450         spin_lock_bh(&tnl_vport->cache_lock);
1451         free_cache(tnl_vport->cache);
1452         spin_unlock_bh(&tnl_vport->cache_lock);
1453
1454         kfree(tnl_vport->mutable);
1455         vport_free(tnl_vport_to_vport(tnl_vport));
1456 }
1457
1458 int tnl_destroy(struct vport *vport)
1459 {
1460         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1461         const struct tnl_mutable_config *old_mutable;
1462
1463         if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
1464             tnl_vport->mutable->port_config.daddr,
1465             tnl_vport->mutable->port_config.in_key,
1466             tnl_vport->mutable->tunnel_type,
1467             &old_mutable))
1468                 del_port(vport);
1469
1470         call_rcu(&tnl_vport->rcu, free_port_rcu);
1471
1472         return 0;
1473 }
1474
1475 int tnl_set_mtu(struct vport *vport, int mtu)
1476 {
1477         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1478         struct tnl_mutable_config *mutable;
1479
1480         mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1481         if (!mutable)
1482                 return -ENOMEM;
1483
1484         mutable->mtu = mtu;
1485         assign_config_rcu(vport, mutable);
1486
1487         return 0;
1488 }
1489
1490 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1491 {
1492         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1493         struct tnl_mutable_config *mutable;
1494
1495         mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1496         if (!mutable)
1497                 return -ENOMEM;
1498
1499         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1500         assign_config_rcu(vport, mutable);
1501
1502         return 0;
1503 }
1504
1505 const char *tnl_get_name(const struct vport *vport)
1506 {
1507         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1508         return tnl_vport->name;
1509 }
1510
1511 const unsigned char *tnl_get_addr(const struct vport *vport)
1512 {
1513         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1514         return rcu_dereference(tnl_vport->mutable)->eth_addr;
1515 }
1516
1517 int tnl_get_mtu(const struct vport *vport)
1518 {
1519         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1520         return rcu_dereference(tnl_vport->mutable)->mtu;
1521 }
1522
1523 void tnl_free_linked_skbs(struct sk_buff *skb)
1524 {
1525         if (unlikely(!skb))
1526                 return;
1527
1528         while (skb) {
1529                 struct sk_buff *next = skb->next;
1530                 kfree_skb(skb);
1531                 skb = next;
1532         }
1533 }