datapath: Factor out repeated tnl_vport_to_vport() calls.
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/in.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <linux/rculist.h>
21
22 #include <net/dsfield.h>
23 #include <net/dst.h>
24 #include <net/icmp.h>
25 #include <net/inet_ecn.h>
26 #include <net/ip.h>
27 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/ipv6.h>
29 #endif
30 #include <net/route.h>
31 #include <net/xfrm.h>
32
33 #include "actions.h"
34 #include "checksum.h"
35 #include "datapath.h"
36 #include "tunnel.h"
37 #include "vlan.h"
38 #include "vport.h"
39 #include "vport-generic.h"
40 #include "vport-internal_dev.h"
41
42 #ifdef NEED_CACHE_TIMEOUT
43 /*
44  * On kernels where we can't quickly detect changes in the rest of the system
45  * we use an expiration time to invalidate the cache.  A shorter expiration
46  * reduces the length of time that we may potentially blackhole packets while
47  * a longer time increases performance by reducing the frequency that the
48  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
49  * invalidated before the expiration time but this is the maximum.  The time
50  * is expressed in jiffies.
51  */
52 #define MAX_CACHE_EXP HZ
53 #endif
54
55 /*
56  * Interval to check for and remove caches that are no longer valid.  Caches
57  * are checked for validity before they are used for packet encapsulation and
58  * old caches are removed at that time.  However, if no packets are sent through
59  * the tunnel then the cache will never be destroyed.  Since it holds
60  * references to a number of system objects, the cache will continue to use
61  * system resources by not allowing those objects to be destroyed.  The cache
62  * cleaner is periodically run to free invalid caches.  It does not
63  * significantly affect system performance.  A lower interval will release
64  * resources faster but will itself consume resources by requiring more frequent
65  * checks.  A longer interval may result in messages being printed to the kernel
66  * message buffer about unreleased resources.  The interval is expressed in
67  * jiffies.
68  */
69 #define CACHE_CLEANER_INTERVAL (5 * HZ)
70
71 #define CACHE_DATA_ALIGN 16
72 #define PORT_TABLE_SIZE  1024
73
74 static struct hlist_head *port_table __read_mostly;
75 static int port_table_count;
76
77 static void cache_cleaner(struct work_struct *work);
78 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
79
80 /*
81  * These are just used as an optimization: they don't require any kind of
82  * synchronization because we could have just as easily read the value before
83  * the port change happened.
84  */
85 static unsigned int key_local_remote_ports __read_mostly;
86 static unsigned int key_remote_ports __read_mostly;
87 static unsigned int local_remote_ports __read_mostly;
88 static unsigned int remote_ports __read_mostly;
89
90 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
91 #define rt_dst(rt) (rt->dst)
92 #else
93 #define rt_dst(rt) (rt->u.dst)
94 #endif
95
96 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
97 {
98         return vport_from_priv(tnl_vport);
99 }
100
101 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
102  * cache_lock is held, so it is only for update side code.
103  */
104 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
105 {
106         return rcu_dereference_protected(tnl_vport->cache,
107                                          lockdep_is_held(&tnl_vport->cache_lock));
108 }
109
110 static inline void schedule_cache_cleaner(void)
111 {
112         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
113 }
114
115 static void free_cache(struct tnl_cache *cache)
116 {
117         if (!cache)
118                 return;
119
120         flow_put(cache->flow);
121         ip_rt_put(cache->rt);
122         kfree(cache);
123 }
124
125 static void free_config_rcu(struct rcu_head *rcu)
126 {
127         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
128         kfree(c);
129 }
130
131 static void free_cache_rcu(struct rcu_head *rcu)
132 {
133         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
134         free_cache(c);
135 }
136
137 static void assign_config_rcu(struct vport *vport,
138                               struct tnl_mutable_config *new_config)
139 {
140         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
141         struct tnl_mutable_config *old_config;
142
143         old_config = rtnl_dereference(tnl_vport->mutable);
144         rcu_assign_pointer(tnl_vport->mutable, new_config);
145         call_rcu(&old_config->rcu, free_config_rcu);
146 }
147
148 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
149 {
150         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
151         struct tnl_cache *old_cache;
152
153         old_cache = cache_dereference(tnl_vport);
154         rcu_assign_pointer(tnl_vport->cache, new_cache);
155
156         if (old_cache)
157                 call_rcu(&old_cache->rcu, free_cache_rcu);
158 }
159
160 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
161 {
162         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
163                 if (mutable->key.saddr)
164                         return &local_remote_ports;
165                 else
166                         return &remote_ports;
167         } else {
168                 if (mutable->key.saddr)
169                         return &key_local_remote_ports;
170                 else
171                         return &key_remote_ports;
172         }
173 }
174
175 static u32 port_hash(const struct port_lookup_key *key)
176 {
177         return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0);
178 }
179
180 static inline struct hlist_head *find_bucket(u32 hash)
181 {
182         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
183 }
184
185 static void port_table_add_port(struct vport *vport)
186 {
187         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
188         const struct tnl_mutable_config *mutable;
189         u32 hash;
190
191         if (port_table_count == 0)
192                 schedule_cache_cleaner();
193
194         mutable = rtnl_dereference(tnl_vport->mutable);
195         hash = port_hash(&mutable->key);
196         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
197         port_table_count++;
198
199         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
200 }
201
202 static void port_table_move_port(struct vport *vport,
203                       struct tnl_mutable_config *new_mutable)
204 {
205         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
206         u32 hash;
207
208         hash = port_hash(&new_mutable->key);
209         hlist_del_init_rcu(&tnl_vport->hash_node);
210         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
211
212         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
213         assign_config_rcu(vport, new_mutable);
214         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
215 }
216
217 static void port_table_remove_port(struct vport *vport)
218 {
219         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
220
221         hlist_del_init_rcu(&tnl_vport->hash_node);
222
223         port_table_count--;
224         if (port_table_count == 0)
225                 cancel_delayed_work_sync(&cache_cleaner_wq);
226
227         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
228 }
229
230 static struct vport *port_table_lookup(struct port_lookup_key *key,
231                                        const struct tnl_mutable_config **pmutable)
232 {
233         struct hlist_node *n;
234         struct hlist_head *bucket;
235         u32 hash = port_hash(key);
236         struct tnl_vport * tnl_vport;
237
238         bucket = find_bucket(hash);
239
240         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
241                 struct tnl_mutable_config *mutable;
242
243                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
244                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
245                         *pmutable = mutable;
246                         return tnl_vport_to_vport(tnl_vport);
247                 }
248         }
249
250         return NULL;
251 }
252
253 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
254                             int tunnel_type,
255                             const struct tnl_mutable_config **mutable)
256 {
257         struct port_lookup_key lookup;
258         struct vport *vport;
259
260         lookup.saddr = saddr;
261         lookup.daddr = daddr;
262
263         if (tunnel_type & TNL_T_KEY_EXACT) {
264                 lookup.in_key = key;
265                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
266
267                 if (key_local_remote_ports) {
268                         vport = port_table_lookup(&lookup, mutable);
269                         if (vport)
270                                 return vport;
271                 }
272
273                 if (key_remote_ports) {
274                         lookup.saddr = 0;
275                         vport = port_table_lookup(&lookup, mutable);
276                         if (vport)
277                                 return vport;
278
279                         lookup.saddr = saddr;
280                 }
281         }
282
283         if (tunnel_type & TNL_T_KEY_MATCH) {
284                 lookup.in_key = 0;
285                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
286
287                 if (local_remote_ports) {
288                         vport = port_table_lookup(&lookup, mutable);
289                         if (vport)
290                                 return vport;
291                 }
292
293                 if (remote_ports) {
294                         lookup.saddr = 0;
295                         vport = port_table_lookup(&lookup, mutable);
296                         if (vport)
297                                 return vport;
298                 }
299         }
300
301         return NULL;
302 }
303
304 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
305 {
306         if (unlikely(INET_ECN_is_ce(tos))) {
307                 __be16 protocol = skb->protocol;
308
309                 skb_set_network_header(skb, ETH_HLEN);
310
311                 if (protocol == htons(ETH_P_8021Q)) {
312                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
313                                 return;
314
315                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
316                         skb_set_network_header(skb, VLAN_ETH_HLEN);
317                 }
318
319                 if (protocol == htons(ETH_P_IP)) {
320                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
321                             + sizeof(struct iphdr))))
322                                 return;
323
324                         IP_ECN_set_ce(ip_hdr(skb));
325                 }
326 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
327                 else if (protocol == htons(ETH_P_IPV6)) {
328                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
329                             + sizeof(struct ipv6hdr))))
330                                 return;
331
332                         IP6_ECN_set_ce(ipv6_hdr(skb));
333                 }
334 #endif
335         }
336 }
337
338 /**
339  *      tnl_rcv - ingress point for generic tunnel code
340  *
341  * @vport: port this packet was received on
342  * @skb: received packet
343  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
344  *
345  * Must be called with rcu_read_lock.
346  *
347  * Packets received by this function are in the following state:
348  * - skb->data points to the inner Ethernet header.
349  * - The inner Ethernet header is in the linear data area.
350  * - skb->csum does not include the inner Ethernet header.
351  * - The layer pointers are undefined.
352  */
353 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
354 {
355         struct ethhdr *eh;
356
357         skb_reset_mac_header(skb);
358         eh = eth_hdr(skb);
359
360         if (likely(ntohs(eh->h_proto) >= 1536))
361                 skb->protocol = eh->h_proto;
362         else
363                 skb->protocol = htons(ETH_P_802_2);
364
365         skb_dst_drop(skb);
366         nf_reset(skb);
367         skb_clear_rxhash(skb);
368         secpath_reset(skb);
369
370         ecn_decapsulate(skb, tos);
371         vlan_set_tci(skb, 0);
372
373         if (unlikely(compute_ip_summed(skb, false))) {
374                 kfree_skb(skb);
375                 return;
376         }
377
378         vport_receive(vport, skb);
379 }
380
381 static bool check_ipv4_address(__be32 addr)
382 {
383         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
384             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
385                 return false;
386
387         return true;
388 }
389
390 static bool ipv4_should_icmp(struct sk_buff *skb)
391 {
392         struct iphdr *old_iph = ip_hdr(skb);
393
394         /* Don't respond to L2 broadcast. */
395         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
396                 return false;
397
398         /* Don't respond to L3 broadcast or invalid addresses. */
399         if (!check_ipv4_address(old_iph->daddr) ||
400             !check_ipv4_address(old_iph->saddr))
401                 return false;
402
403         /* Only respond to the first fragment. */
404         if (old_iph->frag_off & htons(IP_OFFSET))
405                 return false;
406
407         /* Don't respond to ICMP error messages. */
408         if (old_iph->protocol == IPPROTO_ICMP) {
409                 u8 icmp_type, *icmp_typep;
410
411                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
412                                                 (old_iph->ihl << 2) +
413                                                 offsetof(struct icmphdr, type) -
414                                                 skb->data, sizeof(icmp_type),
415                                                 &icmp_type);
416
417                 if (!icmp_typep)
418                         return false;
419
420                 if (*icmp_typep > NR_ICMP_TYPES
421                         || (*icmp_typep <= ICMP_PARAMETERPROB
422                                 && *icmp_typep != ICMP_ECHOREPLY
423                                 && *icmp_typep != ICMP_ECHO))
424                         return false;
425         }
426
427         return true;
428 }
429
430 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
431                             unsigned int mtu, unsigned int payload_length)
432 {
433         struct iphdr *iph, *old_iph = ip_hdr(skb);
434         struct icmphdr *icmph;
435         u8 *payload;
436
437         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
438         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
439         payload = skb_put(nskb, payload_length);
440
441         /* IP */
442         iph->version            =       4;
443         iph->ihl                =       sizeof(struct iphdr) >> 2;
444         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
445                                         IPTOS_PREC_INTERNETCONTROL;
446         iph->tot_len            =       htons(sizeof(struct iphdr)
447                                               + sizeof(struct icmphdr)
448                                               + payload_length);
449         get_random_bytes(&iph->id, sizeof(iph->id));
450         iph->frag_off           =       0;
451         iph->ttl                =       IPDEFTTL;
452         iph->protocol           =       IPPROTO_ICMP;
453         iph->daddr              =       old_iph->saddr;
454         iph->saddr              =       old_iph->daddr;
455
456         ip_send_check(iph);
457
458         /* ICMP */
459         icmph->type             =       ICMP_DEST_UNREACH;
460         icmph->code             =       ICMP_FRAG_NEEDED;
461         icmph->un.gateway       =       htonl(mtu);
462         icmph->checksum         =       0;
463
464         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
465         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
466                                             payload, payload_length,
467                                             nskb->csum);
468         icmph->checksum = csum_fold(nskb->csum);
469 }
470
471 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
472 static bool ipv6_should_icmp(struct sk_buff *skb)
473 {
474         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
475         int addr_type;
476         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
477         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
478
479         /* Check source address is valid. */
480         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
481         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
482                 return false;
483
484         /* Don't reply to unspecified addresses. */
485         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
486                 return false;
487
488         /* Don't respond to ICMP error messages. */
489         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
490         if (payload_off < 0)
491                 return false;
492
493         if (nexthdr == NEXTHDR_ICMP) {
494                 u8 icmp_type, *icmp_typep;
495
496                 icmp_typep = skb_header_pointer(skb, payload_off +
497                                                 offsetof(struct icmp6hdr,
498                                                         icmp6_type),
499                                                 sizeof(icmp_type), &icmp_type);
500
501                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
502                         return false;
503         }
504
505         return true;
506 }
507
508 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
509                             unsigned int mtu, unsigned int payload_length)
510 {
511         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
512         struct icmp6hdr *icmp6h;
513         u8 *payload;
514
515         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
516         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
517         payload = skb_put(nskb, payload_length);
518
519         /* IPv6 */
520         ipv6h->version          =       6;
521         ipv6h->priority         =       0;
522         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
523         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
524                                               + payload_length);
525         ipv6h->nexthdr          =       NEXTHDR_ICMP;
526         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
527         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
528         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
529
530         /* ICMPv6 */
531         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
532         icmp6h->icmp6_code      =       0;
533         icmp6h->icmp6_cksum     =       0;
534         icmp6h->icmp6_mtu       =       htonl(mtu);
535
536         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
537         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
538                                             payload, payload_length,
539                                             nskb->csum);
540         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
541                                                 sizeof(struct icmp6hdr)
542                                                 + payload_length,
543                                                 ipv6h->nexthdr, nskb->csum);
544 }
545 #endif /* IPv6 */
546
547 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
548                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
549 {
550         unsigned int eth_hdr_len = ETH_HLEN;
551         unsigned int total_length = 0, header_length = 0, payload_length;
552         struct ethhdr *eh, *old_eh = eth_hdr(skb);
553         struct sk_buff *nskb;
554
555         /* Sanity check */
556         if (skb->protocol == htons(ETH_P_IP)) {
557                 if (mtu < IP_MIN_MTU)
558                         return false;
559
560                 if (!ipv4_should_icmp(skb))
561                         return true;
562         }
563 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
564         else if (skb->protocol == htons(ETH_P_IPV6)) {
565                 if (mtu < IPV6_MIN_MTU)
566                         return false;
567
568                 /*
569                  * In theory we should do PMTUD on IPv6 multicast messages but
570                  * we don't have an address to send from so just fragment.
571                  */
572                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
573                         return false;
574
575                 if (!ipv6_should_icmp(skb))
576                         return true;
577         }
578 #endif
579         else
580                 return false;
581
582         /* Allocate */
583         if (old_eh->h_proto == htons(ETH_P_8021Q))
584                 eth_hdr_len = VLAN_ETH_HLEN;
585
586         payload_length = skb->len - eth_hdr_len;
587         if (skb->protocol == htons(ETH_P_IP)) {
588                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
589                 total_length = min_t(unsigned int, header_length +
590                                                    payload_length, 576);
591         }
592 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
593         else {
594                 header_length = sizeof(struct ipv6hdr) +
595                                 sizeof(struct icmp6hdr);
596                 total_length = min_t(unsigned int, header_length +
597                                                   payload_length, IPV6_MIN_MTU);
598         }
599 #endif
600
601         payload_length = total_length - header_length;
602
603         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
604                              payload_length);
605         if (!nskb)
606                 return false;
607
608         skb_reserve(nskb, NET_IP_ALIGN);
609
610         /* Ethernet / VLAN */
611         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
612         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
613         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
614         nskb->protocol = eh->h_proto = old_eh->h_proto;
615         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
616                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
617
618                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
619                 vh->h_vlan_encapsulated_proto = skb->protocol;
620         } else
621                 vlan_set_tci(nskb, vlan_get_tci(skb));
622         skb_reset_mac_header(nskb);
623
624         /* Protocol */
625         if (skb->protocol == htons(ETH_P_IP))
626                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
627 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
628         else
629                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
630 #endif
631
632         /*
633          * Assume that flow based keys are symmetric with respect to input
634          * and output and use the key that we were going to put on the
635          * outgoing packet for the fake received packet.  If the keys are
636          * not symmetric then PMTUD needs to be disabled since we won't have
637          * any way of synthesizing packets.
638          */
639         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
640             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
641                 OVS_CB(nskb)->tun_id = flow_key;
642
643         if (unlikely(compute_ip_summed(nskb, false))) {
644                 kfree_skb(nskb);
645                 return false;
646         }
647
648         vport_receive(vport, nskb);
649
650         return true;
651 }
652
653 static bool check_mtu(struct sk_buff *skb,
654                       struct vport *vport,
655                       const struct tnl_mutable_config *mutable,
656                       const struct rtable *rt, __be16 *frag_offp)
657 {
658         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
659         bool pmtud = mutable->flags & TNL_F_PMTUD;
660         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
661         int mtu = 0;
662         unsigned int packet_length = skb->len - ETH_HLEN;
663
664         /* Allow for one level of tagging in the packet length. */
665         if (!vlan_tx_tag_present(skb) &&
666             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
667                 packet_length -= VLAN_HLEN;
668
669         if (pmtud) {
670                 int vlan_header = 0;
671
672                 /* The tag needs to go in packet regardless of where it
673                  * currently is, so subtract it from the MTU.
674                  */
675                 if (vlan_tx_tag_present(skb) ||
676                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
677                         vlan_header = VLAN_HLEN;
678
679                 mtu = dst_mtu(&rt_dst(rt))
680                         - ETH_HLEN
681                         - mutable->tunnel_hlen
682                         - vlan_header;
683         }
684
685         if (skb->protocol == htons(ETH_P_IP)) {
686                 struct iphdr *iph = ip_hdr(skb);
687
688                 if (df_inherit)
689                         frag_off = iph->frag_off & htons(IP_DF);
690
691                 if (pmtud && iph->frag_off & htons(IP_DF)) {
692                         mtu = max(mtu, IP_MIN_MTU);
693
694                         if (packet_length > mtu &&
695                             tnl_frag_needed(vport, mutable, skb, mtu,
696                                             OVS_CB(skb)->tun_id))
697                                 return false;
698                 }
699         }
700 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
701         else if (skb->protocol == htons(ETH_P_IPV6)) {
702                 /* IPv6 requires end hosts to do fragmentation
703                  * if the packet is above the minimum MTU.
704                  */
705                 if (df_inherit && packet_length > IPV6_MIN_MTU)
706                         frag_off = htons(IP_DF);
707
708                 if (pmtud) {
709                         mtu = max(mtu, IPV6_MIN_MTU);
710
711                         if (packet_length > mtu &&
712                             tnl_frag_needed(vport, mutable, skb, mtu,
713                                             OVS_CB(skb)->tun_id))
714                                 return false;
715                 }
716         }
717 #endif
718
719         *frag_offp = frag_off;
720         return true;
721 }
722
723 static void create_tunnel_header(const struct vport *vport,
724                                  const struct tnl_mutable_config *mutable,
725                                  const struct rtable *rt, void *header)
726 {
727         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
728         struct iphdr *iph = header;
729
730         iph->version    = 4;
731         iph->ihl        = sizeof(struct iphdr) >> 2;
732         iph->frag_off   = htons(IP_DF);
733         iph->protocol   = tnl_vport->tnl_ops->ipproto;
734         iph->tos        = mutable->tos;
735         iph->daddr      = rt->rt_dst;
736         iph->saddr      = rt->rt_src;
737         iph->ttl        = mutable->ttl;
738         if (!iph->ttl)
739                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
740
741         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
742 }
743
744 static inline void *get_cached_header(const struct tnl_cache *cache)
745 {
746         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
747 }
748
749 static inline bool check_cache_valid(const struct tnl_cache *cache,
750                                      const struct tnl_mutable_config *mutable)
751 {
752         return cache &&
753 #ifdef NEED_CACHE_TIMEOUT
754                 time_before(jiffies, cache->expiration) &&
755 #endif
756 #ifdef HAVE_RT_GENID
757                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
758 #endif
759 #ifdef HAVE_HH_SEQ
760                 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
761 #endif
762                 mutable->seq == cache->mutable_seq &&
763                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
764                 (cache->flow && !cache->flow->dead));
765 }
766
767 static void __cache_cleaner(struct tnl_vport *tnl_vport)
768 {
769         const struct tnl_mutable_config *mutable =
770                         rcu_dereference(tnl_vport->mutable);
771         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
772
773         if (cache && !check_cache_valid(cache, mutable) &&
774             spin_trylock_bh(&tnl_vport->cache_lock)) {
775                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
776                 spin_unlock_bh(&tnl_vport->cache_lock);
777         }
778 }
779
780 static void cache_cleaner(struct work_struct *work)
781 {
782         int i;
783
784         schedule_cache_cleaner();
785
786         rcu_read_lock();
787         for (i = 0; i < PORT_TABLE_SIZE; i++) {
788                 struct hlist_node *n;
789                 struct hlist_head *bucket;
790                 struct tnl_vport  *tnl_vport;
791
792                 bucket = &port_table[i];
793                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
794                         __cache_cleaner(tnl_vport);
795         }
796         rcu_read_unlock();
797 }
798
799 static inline void create_eth_hdr(struct tnl_cache *cache,
800                                   const struct rtable *rt)
801 {
802         void *cache_data = get_cached_header(cache);
803         int hh_len = rt_dst(rt).hh->hh_len;
804         int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
805
806 #ifdef HAVE_HH_SEQ
807         unsigned hh_seq;
808
809         do {
810                 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
811                 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
812         } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
813
814         cache->hh_seq = hh_seq;
815 #else
816         read_lock(&rt_dst(rt).hh->hh_lock);
817         memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
818         read_unlock(&rt_dst(rt).hh->hh_lock);
819 #endif
820 }
821
822 static struct tnl_cache *build_cache(struct vport *vport,
823                                      const struct tnl_mutable_config *mutable,
824                                      struct rtable *rt)
825 {
826         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
827         struct tnl_cache *cache;
828         void *cache_data;
829         int cache_len;
830
831         if (!(mutable->flags & TNL_F_HDR_CACHE))
832                 return NULL;
833
834         /*
835          * If there is no entry in the ARP cache or if this device does not
836          * support hard header caching just fall back to the IP stack.
837          */
838         if (!rt_dst(rt).hh)
839                 return NULL;
840
841         /*
842          * If lock is contended fall back to directly building the header.
843          * We're not going to help performance by sitting here spinning.
844          */
845         if (!spin_trylock(&tnl_vport->cache_lock))
846                 return NULL;
847
848         cache = cache_dereference(tnl_vport);
849         if (check_cache_valid(cache, mutable))
850                 goto unlock;
851         else
852                 cache = NULL;
853
854         cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
855
856         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
857                         cache_len, GFP_ATOMIC);
858         if (!cache)
859                 goto unlock;
860
861         cache->len = cache_len;
862
863         create_eth_hdr(cache, rt);
864         cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
865
866         create_tunnel_header(vport, mutable, rt, cache_data);
867
868         cache->mutable_seq = mutable->seq;
869         cache->rt = rt;
870 #ifdef NEED_CACHE_TIMEOUT
871         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
872 #endif
873
874         if (is_internal_dev(rt_dst(rt).dev)) {
875                 struct sw_flow_key flow_key;
876                 struct vport *dst_vport;
877                 struct sk_buff *skb;
878                 bool is_frag;
879                 int err;
880                 int flow_key_len;
881                 struct sw_flow *flow;
882
883                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
884                 if (!dst_vport)
885                         goto done;
886
887                 skb = alloc_skb(cache->len, GFP_ATOMIC);
888                 if (!skb)
889                         goto done;
890
891                 __skb_put(skb, cache->len);
892                 memcpy(skb->data, get_cached_header(cache), cache->len);
893
894                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
895                                    &flow_key_len, &is_frag);
896
897                 consume_skb(skb);
898                 if (err || is_frag)
899                         goto done;
900
901                 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
902                                          &flow_key, flow_key_len);
903                 if (flow) {
904                         cache->flow = flow;
905                         flow_hold(flow);
906                 }
907         }
908
909 done:
910         assign_cache_rcu(vport, cache);
911
912 unlock:
913         spin_unlock(&tnl_vport->cache_lock);
914
915         return cache;
916 }
917
918 static struct rtable *find_route(struct vport *vport,
919                                  const struct tnl_mutable_config *mutable,
920                                  u8 tos, struct tnl_cache **cache)
921 {
922         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
923         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
924
925         *cache = NULL;
926         tos = RT_TOS(tos);
927
928         if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
929                 *cache = cur_cache;
930                 return cur_cache->rt;
931         } else {
932                 struct rtable *rt;
933 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
934                 struct flowi fl = { .nl_u = { .ip4_u =
935                                               { .daddr = mutable->key.daddr,
936                                                 .saddr = mutable->key.saddr,
937                                                 .tos = tos } },
938                                     .proto = tnl_vport->tnl_ops->ipproto };
939
940                 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
941                         return NULL;
942 #else
943                 struct flowi4 fl = { .daddr = mutable->key.daddr,
944                                      .saddr = mutable->key.saddr,
945                                      .flowi4_tos = tos,
946                                      .flowi4_proto = tnl_vport->tnl_ops->ipproto };
947
948                 rt = ip_route_output_key(&init_net, &fl);
949                 if (IS_ERR(rt))
950                         return NULL;
951 #endif
952
953                 if (likely(tos == mutable->tos))
954                         *cache = build_cache(vport, mutable, rt);
955
956                 return rt;
957         }
958 }
959
960 static inline bool need_linearize(const struct sk_buff *skb)
961 {
962         int i;
963
964         if (unlikely(skb_shinfo(skb)->frag_list))
965                 return true;
966
967         /*
968          * Generally speaking we should linearize if there are paged frags.
969          * However, if all of the refcounts are 1 we know nobody else can
970          * change them from underneath us and we can skip the linearization.
971          */
972         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
973                 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
974                         return true;
975
976         return false;
977 }
978
979 static struct sk_buff *handle_offloads(struct sk_buff *skb,
980                                        const struct tnl_mutable_config *mutable,
981                                        const struct rtable *rt)
982 {
983         int min_headroom;
984         int err;
985
986         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
987                         + mutable->tunnel_hlen
988                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
989
990         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
991                 int head_delta = SKB_DATA_ALIGN(min_headroom -
992                                                 skb_headroom(skb) +
993                                                 16);
994                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
995                                         0, GFP_ATOMIC);
996                 if (unlikely(err))
997                         goto error_free;
998         }
999
1000         forward_ip_summed(skb, true);
1001
1002         if (skb_is_gso(skb)) {
1003                 struct sk_buff *nskb;
1004
1005                 nskb = skb_gso_segment(skb, 0);
1006                 if (IS_ERR(nskb)) {
1007                         kfree_skb(skb);
1008                         err = PTR_ERR(nskb);
1009                         goto error;
1010                 }
1011
1012                 consume_skb(skb);
1013                 skb = nskb;
1014         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1015                 /* Pages aren't locked and could change at any time.
1016                  * If this happens after we compute the checksum, the
1017                  * checksum will be wrong.  We linearize now to avoid
1018                  * this problem.
1019                  */
1020                 if (unlikely(need_linearize(skb))) {
1021                         err = __skb_linearize(skb);
1022                         if (unlikely(err))
1023                                 goto error_free;
1024                 }
1025
1026                 err = skb_checksum_help(skb);
1027                 if (unlikely(err))
1028                         goto error_free;
1029         }
1030
1031         set_ip_summed(skb, OVS_CSUM_NONE);
1032
1033         return skb;
1034
1035 error_free:
1036         kfree_skb(skb);
1037 error:
1038         return ERR_PTR(err);
1039 }
1040
1041 static int send_frags(struct sk_buff *skb,
1042                       const struct tnl_mutable_config *mutable)
1043 {
1044         int sent_len;
1045
1046         sent_len = 0;
1047         while (skb) {
1048                 struct sk_buff *next = skb->next;
1049                 int frag_len = skb->len - mutable->tunnel_hlen;
1050                 int err;
1051
1052                 skb->next = NULL;
1053                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1054
1055                 err = ip_local_out(skb);
1056                 skb = next;
1057                 if (unlikely(net_xmit_eval(err)))
1058                         goto free_frags;
1059                 sent_len += frag_len;
1060         }
1061
1062         return sent_len;
1063
1064 free_frags:
1065         /*
1066          * There's no point in continuing to send fragments once one has been
1067          * dropped so just free the rest.  This may help improve the congestion
1068          * that caused the first packet to be dropped.
1069          */
1070         tnl_free_linked_skbs(skb);
1071         return sent_len;
1072 }
1073
1074 int tnl_send(struct vport *vport, struct sk_buff *skb)
1075 {
1076         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1077         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1078
1079         enum vport_err_type err = VPORT_E_TX_ERROR;
1080         struct rtable *rt;
1081         struct dst_entry *unattached_dst = NULL;
1082         struct tnl_cache *cache;
1083         int sent_len = 0;
1084         __be16 frag_off = 0;
1085         u8 ttl;
1086         u8 inner_tos;
1087         u8 tos;
1088
1089         /* Validate the protocol headers before we try to use them. */
1090         if (skb->protocol == htons(ETH_P_8021Q) &&
1091             !vlan_tx_tag_present(skb)) {
1092                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1093                         goto error_free;
1094
1095                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1096                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1097         }
1098
1099         if (skb->protocol == htons(ETH_P_IP)) {
1100                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1101                     + sizeof(struct iphdr))))
1102                         skb->protocol = 0;
1103         }
1104 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1105         else if (skb->protocol == htons(ETH_P_IPV6)) {
1106                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1107                     + sizeof(struct ipv6hdr))))
1108                         skb->protocol = 0;
1109         }
1110 #endif
1111
1112         /* ToS */
1113         if (skb->protocol == htons(ETH_P_IP))
1114                 inner_tos = ip_hdr(skb)->tos;
1115 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1116         else if (skb->protocol == htons(ETH_P_IPV6))
1117                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1118 #endif
1119         else
1120                 inner_tos = 0;
1121
1122         if (mutable->flags & TNL_F_TOS_INHERIT)
1123                 tos = inner_tos;
1124         else
1125                 tos = mutable->tos;
1126
1127         tos = INET_ECN_encapsulate(tos, inner_tos);
1128
1129         /* Route lookup */
1130         rt = find_route(vport, mutable, tos, &cache);
1131         if (unlikely(!rt))
1132                 goto error_free;
1133         if (unlikely(!cache))
1134                 unattached_dst = &rt_dst(rt);
1135
1136         /* Reset SKB */
1137         nf_reset(skb);
1138         secpath_reset(skb);
1139         skb_dst_drop(skb);
1140         skb_clear_rxhash(skb);
1141
1142         /* Offloading */
1143         skb = handle_offloads(skb, mutable, rt);
1144         if (IS_ERR(skb))
1145                 goto error;
1146
1147         /* MTU */
1148         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1149                 err = VPORT_E_TX_DROPPED;
1150                 goto error_free;
1151         }
1152
1153         /*
1154          * If we are over the MTU, allow the IP stack to handle fragmentation.
1155          * Fragmentation is a slow path anyways.
1156          */
1157         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1158                      cache)) {
1159                 unattached_dst = &rt_dst(rt);
1160                 dst_hold(unattached_dst);
1161                 cache = NULL;
1162         }
1163
1164         /* TTL */
1165         ttl = mutable->ttl;
1166         if (!ttl)
1167                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1168
1169         if (mutable->flags & TNL_F_TTL_INHERIT) {
1170                 if (skb->protocol == htons(ETH_P_IP))
1171                         ttl = ip_hdr(skb)->ttl;
1172 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1173                 else if (skb->protocol == htons(ETH_P_IPV6))
1174                         ttl = ipv6_hdr(skb)->hop_limit;
1175 #endif
1176         }
1177
1178         while (skb) {
1179                 struct iphdr *iph;
1180                 struct sk_buff *next_skb = skb->next;
1181                 skb->next = NULL;
1182
1183                 if (unlikely(vlan_deaccel_tag(skb)))
1184                         goto next;
1185
1186                 if (likely(cache)) {
1187                         skb_push(skb, cache->len);
1188                         memcpy(skb->data, get_cached_header(cache), cache->len);
1189                         skb_reset_mac_header(skb);
1190                         skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1191
1192                 } else {
1193                         skb_push(skb, mutable->tunnel_hlen);
1194                         create_tunnel_header(vport, mutable, rt, skb->data);
1195                         skb_reset_network_header(skb);
1196
1197                         if (next_skb)
1198                                 skb_dst_set(skb, dst_clone(unattached_dst));
1199                         else {
1200                                 skb_dst_set(skb, unattached_dst);
1201                                 unattached_dst = NULL;
1202                         }
1203                 }
1204                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1205
1206                 iph = ip_hdr(skb);
1207                 iph->tos = tos;
1208                 iph->ttl = ttl;
1209                 iph->frag_off = frag_off;
1210                 ip_select_ident(iph, &rt_dst(rt), NULL);
1211
1212                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1213                 if (unlikely(!skb))
1214                         goto next;
1215
1216                 if (likely(cache)) {
1217                         int orig_len = skb->len - cache->len;
1218                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1219
1220                         skb->protocol = htons(ETH_P_IP);
1221                         iph = ip_hdr(skb);
1222                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1223                         ip_send_check(iph);
1224
1225                         if (cache_vport) {
1226                                 if (unlikely(compute_ip_summed(skb, true))) {
1227                                         kfree_skb(skb);
1228                                         goto next;
1229                                 }
1230
1231                                 OVS_CB(skb)->flow = cache->flow;
1232                                 vport_receive(cache_vport, skb);
1233                                 sent_len += orig_len;
1234                         } else {
1235                                 int xmit_err;
1236
1237                                 skb->dev = rt_dst(rt).dev;
1238                                 xmit_err = dev_queue_xmit(skb);
1239
1240                                 if (likely(net_xmit_eval(xmit_err) == 0))
1241                                         sent_len += orig_len;
1242                         }
1243                 } else
1244                         sent_len += send_frags(skb, mutable);
1245
1246 next:
1247                 skb = next_skb;
1248         }
1249
1250         if (unlikely(sent_len == 0))
1251                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1252
1253         goto out;
1254
1255 error_free:
1256         tnl_free_linked_skbs(skb);
1257 error:
1258         vport_record_error(vport, err);
1259 out:
1260         dst_release(unattached_dst);
1261         return sent_len;
1262 }
1263
1264 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1265         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1266         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1267         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1268         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1269         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1270         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1271         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1272 };
1273
1274 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1275 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1276                           const struct vport *cur_vport,
1277                           struct tnl_mutable_config *mutable)
1278 {
1279         const struct vport *old_vport;
1280         const struct tnl_mutable_config *old_mutable;
1281         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1282         int err;
1283
1284         if (!options)
1285                 return -EINVAL;
1286
1287         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1288         if (err)
1289                 return err;
1290
1291         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1292                 return -EINVAL;
1293
1294         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1295
1296         if (a[OVS_TUNNEL_ATTR_SRC_IPV4])
1297                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1298         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1299
1300         if (a[OVS_TUNNEL_ATTR_TOS]) {
1301                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1302                 if (mutable->tos != RT_TOS(mutable->tos))
1303                         return -EINVAL;
1304         }
1305
1306         if (a[OVS_TUNNEL_ATTR_TTL])
1307                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1308
1309         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1310         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1311                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1312                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1313         } else {
1314                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1315                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1316         }
1317
1318         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1319                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1320         else
1321                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1322
1323         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1324         if (mutable->tunnel_hlen < 0)
1325                 return mutable->tunnel_hlen;
1326
1327         mutable->tunnel_hlen += sizeof(struct iphdr);
1328
1329         old_vport = tnl_find_port(mutable->key.saddr, mutable->key.daddr,
1330                                   mutable->key.in_key, mutable->key.tunnel_type,
1331                                   &old_mutable);
1332
1333         if (old_vport && old_vport != cur_vport)
1334                 return -EEXIST;
1335
1336         return 0;
1337 }
1338
1339 struct vport *tnl_create(const struct vport_parms *parms,
1340                          const struct vport_ops *vport_ops,
1341                          const struct tnl_ops *tnl_ops)
1342 {
1343         struct vport *vport;
1344         struct tnl_vport *tnl_vport;
1345         struct tnl_mutable_config *mutable;
1346         int initial_frag_id;
1347         int err;
1348
1349         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1350         if (IS_ERR(vport)) {
1351                 err = PTR_ERR(vport);
1352                 goto error;
1353         }
1354
1355         tnl_vport = tnl_vport_priv(vport);
1356
1357         strcpy(tnl_vport->name, parms->name);
1358         tnl_vport->tnl_ops = tnl_ops;
1359
1360         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1361         if (!mutable) {
1362                 err = -ENOMEM;
1363                 goto error_free_vport;
1364         }
1365
1366         vport_gen_rand_ether_addr(mutable->eth_addr);
1367
1368         get_random_bytes(&initial_frag_id, sizeof(int));
1369         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1370
1371         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1372         if (err)
1373                 goto error_free_mutable;
1374
1375         spin_lock_init(&tnl_vport->cache_lock);
1376
1377 #ifdef NEED_CACHE_TIMEOUT
1378         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1379                                        (net_random() % (MAX_CACHE_EXP / 2));
1380 #endif
1381
1382         rcu_assign_pointer(tnl_vport->mutable, mutable);
1383
1384         port_table_add_port(vport);
1385         return vport;
1386
1387 error_free_mutable:
1388         kfree(mutable);
1389 error_free_vport:
1390         vport_free(vport);
1391 error:
1392         return ERR_PTR(err);
1393 }
1394
1395 int tnl_set_options(struct vport *vport, struct nlattr *options)
1396 {
1397         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1398         const struct tnl_mutable_config *old_mutable;
1399         struct tnl_mutable_config *mutable;
1400         int err;
1401
1402         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1403         if (!mutable) {
1404                 err = -ENOMEM;
1405                 goto error;
1406         }
1407
1408         /* Copy fields whose values should be retained. */
1409         old_mutable = rtnl_dereference(tnl_vport->mutable);
1410         mutable->seq = old_mutable->seq + 1;
1411         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1412
1413         /* Parse the others configured by userspace. */
1414         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1415         if (err)
1416                 goto error_free;
1417
1418         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1419                 port_table_move_port(vport, mutable);
1420
1421         return 0;
1422
1423 error_free:
1424         kfree(mutable);
1425 error:
1426         return err;
1427 }
1428
1429 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1430 {
1431         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1432         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1433
1434         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1435         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1436
1437         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1438                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1439         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1440                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1441         if (mutable->key.saddr)
1442                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1443         if (mutable->tos)
1444                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1445         if (mutable->ttl)
1446                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1447
1448         return 0;
1449
1450 nla_put_failure:
1451         return -EMSGSIZE;
1452 }
1453
1454 static void free_port_rcu(struct rcu_head *rcu)
1455 {
1456         struct tnl_vport *tnl_vport = container_of(rcu,
1457                                                    struct tnl_vport, rcu);
1458
1459         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1460         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1461         vport_free(tnl_vport_to_vport(tnl_vport));
1462 }
1463
1464 void tnl_destroy(struct vport *vport)
1465 {
1466         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1467         const struct tnl_mutable_config *mutable;
1468
1469         mutable = rtnl_dereference(tnl_vport->mutable);
1470         port_table_remove_port(vport);
1471         call_rcu(&tnl_vport->rcu, free_port_rcu);
1472 }
1473
1474 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1475 {
1476         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1477         struct tnl_mutable_config *mutable;
1478
1479         mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1480                           sizeof(struct tnl_mutable_config), GFP_KERNEL);
1481         if (!mutable)
1482                 return -ENOMEM;
1483
1484         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1485         assign_config_rcu(vport, mutable);
1486
1487         return 0;
1488 }
1489
1490 const char *tnl_get_name(const struct vport *vport)
1491 {
1492         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1493         return tnl_vport->name;
1494 }
1495
1496 const unsigned char *tnl_get_addr(const struct vport *vport)
1497 {
1498         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1499         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1500 }
1501
1502 void tnl_free_linked_skbs(struct sk_buff *skb)
1503 {
1504         while (skb) {
1505                 struct sk_buff *next = skb->next;
1506                 kfree_skb(skb);
1507                 skb = next;
1508         }
1509 }
1510
1511 int tnl_init(void)
1512 {
1513         int i;
1514
1515         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1516                         GFP_KERNEL);
1517         if (!port_table)
1518                 return -ENOMEM;
1519
1520         for (i = 0; i < PORT_TABLE_SIZE; i++)
1521                 INIT_HLIST_HEAD(&port_table[i]);
1522
1523         return 0;
1524 }
1525
1526 void tnl_exit(void)
1527 {
1528         int i;
1529
1530         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1531                 struct tnl_vport * tnl_vport;
1532                 struct hlist_head *hash_head;
1533                 struct hlist_node *n;
1534
1535                 hash_head = &port_table[i];
1536                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1537                         BUG();
1538                         goto out;
1539                 }
1540         }
1541 out:
1542         kfree(port_table);
1543 }