datapath: Improve kernel hash table
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/in.h>
14 #include <linux/in_route.h>
15 #include <linux/jhash.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <linux/rculist.h>
21
22 #include <net/dsfield.h>
23 #include <net/dst.h>
24 #include <net/icmp.h>
25 #include <net/inet_ecn.h>
26 #include <net/ip.h>
27 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28 #include <net/ipv6.h>
29 #endif
30 #include <net/route.h>
31 #include <net/xfrm.h>
32
33 #include "actions.h"
34 #include "checksum.h"
35 #include "datapath.h"
36 #include "tunnel.h"
37 #include "vlan.h"
38 #include "vport.h"
39 #include "vport-generic.h"
40 #include "vport-internal_dev.h"
41
42 #ifdef NEED_CACHE_TIMEOUT
43 /*
44  * On kernels where we can't quickly detect changes in the rest of the system
45  * we use an expiration time to invalidate the cache.  A shorter expiration
46  * reduces the length of time that we may potentially blackhole packets while
47  * a longer time increases performance by reducing the frequency that the
48  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
49  * invalidated before the expiration time but this is the maximum.  The time
50  * is expressed in jiffies.
51  */
52 #define MAX_CACHE_EXP HZ
53 #endif
54
55 /*
56  * Interval to check for and remove caches that are no longer valid.  Caches
57  * are checked for validity before they are used for packet encapsulation and
58  * old caches are removed at that time.  However, if no packets are sent through
59  * the tunnel then the cache will never be destroyed.  Since it holds
60  * references to a number of system objects, the cache will continue to use
61  * system resources by not allowing those objects to be destroyed.  The cache
62  * cleaner is periodically run to free invalid caches.  It does not
63  * significantly affect system performance.  A lower interval will release
64  * resources faster but will itself consume resources by requiring more frequent
65  * checks.  A longer interval may result in messages being printed to the kernel
66  * message buffer about unreleased resources.  The interval is expressed in
67  * jiffies.
68  */
69 #define CACHE_CLEANER_INTERVAL (5 * HZ)
70
71 #define CACHE_DATA_ALIGN 16
72 #define PORT_TABLE_SIZE  1024
73
74 static struct hlist_head *port_table __read_mostly;
75 static int port_table_count;
76
77 static void cache_cleaner(struct work_struct *work);
78 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
79
80 /*
81  * These are just used as an optimization: they don't require any kind of
82  * synchronization because we could have just as easily read the value before
83  * the port change happened.
84  */
85 static unsigned int key_local_remote_ports __read_mostly;
86 static unsigned int key_remote_ports __read_mostly;
87 static unsigned int local_remote_ports __read_mostly;
88 static unsigned int remote_ports __read_mostly;
89
90 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
91 #define rt_dst(rt) (rt->dst)
92 #else
93 #define rt_dst(rt) (rt->u.dst)
94 #endif
95
96 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
97 {
98         return vport_from_priv(tnl_vport);
99 }
100
101 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
102  * cache_lock is held, so it is only for update side code.
103  */
104 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
105 {
106         return rcu_dereference_protected(tnl_vport->cache,
107                                          lockdep_is_held(&tnl_vport->cache_lock));
108 }
109
110 static inline void schedule_cache_cleaner(void)
111 {
112         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
113 }
114
115 static void free_cache(struct tnl_cache *cache)
116 {
117         if (!cache)
118                 return;
119
120         flow_put(cache->flow);
121         ip_rt_put(cache->rt);
122         kfree(cache);
123 }
124
125 static void free_config_rcu(struct rcu_head *rcu)
126 {
127         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
128         kfree(c);
129 }
130
131 static void free_cache_rcu(struct rcu_head *rcu)
132 {
133         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
134         free_cache(c);
135 }
136
137 static void assign_config_rcu(struct vport *vport,
138                               struct tnl_mutable_config *new_config)
139 {
140         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
141         struct tnl_mutable_config *old_config;
142
143         old_config = rtnl_dereference(tnl_vport->mutable);
144         rcu_assign_pointer(tnl_vport->mutable, new_config);
145         call_rcu(&old_config->rcu, free_config_rcu);
146 }
147
148 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
149 {
150         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
151         struct tnl_cache *old_cache;
152
153         old_cache = cache_dereference(tnl_vport);
154         rcu_assign_pointer(tnl_vport->cache, new_cache);
155
156         if (old_cache)
157                 call_rcu(&old_cache->rcu, free_cache_rcu);
158 }
159
160 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
161 {
162         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
163                 if (mutable->saddr)
164                         return &local_remote_ports;
165                 else
166                         return &remote_ports;
167         } else {
168                 if (mutable->saddr)
169                         return &key_local_remote_ports;
170                 else
171                         return &key_remote_ports;
172         }
173 }
174
175 struct port_lookup_key {
176         const struct tnl_mutable_config *mutable;
177         __be64 key;
178         u32 tunnel_type;
179         __be32 saddr;
180         __be32 daddr;
181 };
182
183 /*
184  * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
185  * the comparision.
186  */
187 static int port_cmp(const struct tnl_vport *tnl_vport,
188                     struct port_lookup_key *lookup)
189 {
190         lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
191
192         return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
193                 lookup->mutable->daddr == lookup->daddr &&
194                 lookup->mutable->in_key == lookup->key &&
195                 lookup->mutable->saddr == lookup->saddr);
196 }
197
198 static u32 port_hash(struct port_lookup_key *k)
199 {
200         u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
201                              k->tunnel_type, 0);
202         return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
203 }
204
205 static u32 mutable_hash(const struct tnl_mutable_config *mutable)
206 {
207         struct port_lookup_key lookup;
208
209         lookup.saddr = mutable->saddr;
210         lookup.daddr = mutable->daddr;
211         lookup.key = mutable->in_key;
212         lookup.tunnel_type = mutable->tunnel_type;
213
214         return port_hash(&lookup);
215 }
216
217
218 static inline struct hlist_head *find_bucket(u32 hash)
219 {
220         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
221 }
222
223 static void port_table_add_port(struct vport *vport)
224 {
225         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
226         u32 hash = mutable_hash(rtnl_dereference(tnl_vport->mutable));
227
228         if (port_table_count == 0)
229                 schedule_cache_cleaner();
230
231         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
232         port_table_count++;
233
234         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
235 }
236
237 static void port_table_move_port(struct vport *vport,
238                       struct tnl_mutable_config *new_mutable)
239 {
240         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
241         u32 hash;
242
243         hash = mutable_hash(new_mutable);
244         hlist_del_init_rcu(&tnl_vport->hash_node);
245         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
246
247         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
248         assign_config_rcu(vport, new_mutable);
249         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
250 }
251
252 static void port_table_remove_port(struct vport *vport)
253 {
254         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
255
256         hlist_del_init_rcu(&tnl_vport->hash_node);
257
258         port_table_count--;
259         if (port_table_count == 0)
260                 cancel_delayed_work_sync(&cache_cleaner_wq);
261
262         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
263 }
264
265 static struct tnl_vport *port_table_lookup(struct port_lookup_key *lookup)
266 {
267         struct hlist_node *n;
268         struct hlist_head *bucket;
269         u32 hash = port_hash(lookup);
270         struct tnl_vport * tnl_vport;
271
272         bucket = find_bucket(hash);
273
274         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
275                 if (!port_cmp(tnl_vport, lookup))
276                         return tnl_vport;
277         }
278
279         return NULL;
280 }
281
282 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
283                             int tunnel_type,
284                             const struct tnl_mutable_config **mutable)
285 {
286         struct port_lookup_key lookup;
287         struct tnl_vport * tnl_vport;
288
289         lookup.saddr = saddr;
290         lookup.daddr = daddr;
291
292         if (tunnel_type & TNL_T_KEY_EXACT) {
293                 lookup.key = key;
294                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
295
296                 if (key_local_remote_ports) {
297                         tnl_vport = port_table_lookup(&lookup);
298                         if (tnl_vport)
299                                 goto found;
300                 }
301
302                 if (key_remote_ports) {
303                         lookup.saddr = 0;
304                         tnl_vport = port_table_lookup(&lookup);
305                         if (tnl_vport)
306                                 goto found;
307
308                         lookup.saddr = saddr;
309                 }
310         }
311
312         if (tunnel_type & TNL_T_KEY_MATCH) {
313                 lookup.key = 0;
314                 lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
315
316                 if (local_remote_ports) {
317                         tnl_vport = port_table_lookup(&lookup);
318                         if (tnl_vport)
319                                 goto found;
320                 }
321
322                 if (remote_ports) {
323                         lookup.saddr = 0;
324                         tnl_vport = port_table_lookup(&lookup);
325                         if (tnl_vport)
326                                 goto found;
327                 }
328         }
329
330         return NULL;
331
332 found:
333         *mutable = lookup.mutable;
334         return tnl_vport_to_vport(tnl_vport);
335 }
336
337 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
338 {
339         if (unlikely(INET_ECN_is_ce(tos))) {
340                 __be16 protocol = skb->protocol;
341
342                 skb_set_network_header(skb, ETH_HLEN);
343
344                 if (protocol == htons(ETH_P_8021Q)) {
345                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
346                                 return;
347
348                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
349                         skb_set_network_header(skb, VLAN_ETH_HLEN);
350                 }
351
352                 if (protocol == htons(ETH_P_IP)) {
353                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
354                             + sizeof(struct iphdr))))
355                                 return;
356
357                         IP_ECN_set_ce(ip_hdr(skb));
358                 }
359 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
360                 else if (protocol == htons(ETH_P_IPV6)) {
361                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
362                             + sizeof(struct ipv6hdr))))
363                                 return;
364
365                         IP6_ECN_set_ce(ipv6_hdr(skb));
366                 }
367 #endif
368         }
369 }
370
371 /**
372  *      tnl_rcv - ingress point for generic tunnel code
373  *
374  * @vport: port this packet was received on
375  * @skb: received packet
376  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
377  *
378  * Must be called with rcu_read_lock.
379  *
380  * Packets received by this function are in the following state:
381  * - skb->data points to the inner Ethernet header.
382  * - The inner Ethernet header is in the linear data area.
383  * - skb->csum does not include the inner Ethernet header.
384  * - The layer pointers are undefined.
385  */
386 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
387 {
388         struct ethhdr *eh;
389
390         skb_reset_mac_header(skb);
391         eh = eth_hdr(skb);
392
393         if (likely(ntohs(eh->h_proto) >= 1536))
394                 skb->protocol = eh->h_proto;
395         else
396                 skb->protocol = htons(ETH_P_802_2);
397
398         skb_dst_drop(skb);
399         nf_reset(skb);
400         skb_clear_rxhash(skb);
401         secpath_reset(skb);
402
403         ecn_decapsulate(skb, tos);
404         vlan_set_tci(skb, 0);
405
406         if (unlikely(compute_ip_summed(skb, false))) {
407                 kfree_skb(skb);
408                 return;
409         }
410
411         vport_receive(vport, skb);
412 }
413
414 static bool check_ipv4_address(__be32 addr)
415 {
416         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
417             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
418                 return false;
419
420         return true;
421 }
422
423 static bool ipv4_should_icmp(struct sk_buff *skb)
424 {
425         struct iphdr *old_iph = ip_hdr(skb);
426
427         /* Don't respond to L2 broadcast. */
428         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
429                 return false;
430
431         /* Don't respond to L3 broadcast or invalid addresses. */
432         if (!check_ipv4_address(old_iph->daddr) ||
433             !check_ipv4_address(old_iph->saddr))
434                 return false;
435
436         /* Only respond to the first fragment. */
437         if (old_iph->frag_off & htons(IP_OFFSET))
438                 return false;
439
440         /* Don't respond to ICMP error messages. */
441         if (old_iph->protocol == IPPROTO_ICMP) {
442                 u8 icmp_type, *icmp_typep;
443
444                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
445                                                 (old_iph->ihl << 2) +
446                                                 offsetof(struct icmphdr, type) -
447                                                 skb->data, sizeof(icmp_type),
448                                                 &icmp_type);
449
450                 if (!icmp_typep)
451                         return false;
452
453                 if (*icmp_typep > NR_ICMP_TYPES
454                         || (*icmp_typep <= ICMP_PARAMETERPROB
455                                 && *icmp_typep != ICMP_ECHOREPLY
456                                 && *icmp_typep != ICMP_ECHO))
457                         return false;
458         }
459
460         return true;
461 }
462
463 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
464                             unsigned int mtu, unsigned int payload_length)
465 {
466         struct iphdr *iph, *old_iph = ip_hdr(skb);
467         struct icmphdr *icmph;
468         u8 *payload;
469
470         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
471         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
472         payload = skb_put(nskb, payload_length);
473
474         /* IP */
475         iph->version            =       4;
476         iph->ihl                =       sizeof(struct iphdr) >> 2;
477         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
478                                         IPTOS_PREC_INTERNETCONTROL;
479         iph->tot_len            =       htons(sizeof(struct iphdr)
480                                               + sizeof(struct icmphdr)
481                                               + payload_length);
482         get_random_bytes(&iph->id, sizeof(iph->id));
483         iph->frag_off           =       0;
484         iph->ttl                =       IPDEFTTL;
485         iph->protocol           =       IPPROTO_ICMP;
486         iph->daddr              =       old_iph->saddr;
487         iph->saddr              =       old_iph->daddr;
488
489         ip_send_check(iph);
490
491         /* ICMP */
492         icmph->type             =       ICMP_DEST_UNREACH;
493         icmph->code             =       ICMP_FRAG_NEEDED;
494         icmph->un.gateway       =       htonl(mtu);
495         icmph->checksum         =       0;
496
497         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
498         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
499                                             payload, payload_length,
500                                             nskb->csum);
501         icmph->checksum = csum_fold(nskb->csum);
502 }
503
504 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
505 static bool ipv6_should_icmp(struct sk_buff *skb)
506 {
507         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
508         int addr_type;
509         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
510         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
511
512         /* Check source address is valid. */
513         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
514         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
515                 return false;
516
517         /* Don't reply to unspecified addresses. */
518         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
519                 return false;
520
521         /* Don't respond to ICMP error messages. */
522         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
523         if (payload_off < 0)
524                 return false;
525
526         if (nexthdr == NEXTHDR_ICMP) {
527                 u8 icmp_type, *icmp_typep;
528
529                 icmp_typep = skb_header_pointer(skb, payload_off +
530                                                 offsetof(struct icmp6hdr,
531                                                         icmp6_type),
532                                                 sizeof(icmp_type), &icmp_type);
533
534                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
535                         return false;
536         }
537
538         return true;
539 }
540
541 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
542                             unsigned int mtu, unsigned int payload_length)
543 {
544         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
545         struct icmp6hdr *icmp6h;
546         u8 *payload;
547
548         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
549         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
550         payload = skb_put(nskb, payload_length);
551
552         /* IPv6 */
553         ipv6h->version          =       6;
554         ipv6h->priority         =       0;
555         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
556         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
557                                               + payload_length);
558         ipv6h->nexthdr          =       NEXTHDR_ICMP;
559         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
560         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
561         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
562
563         /* ICMPv6 */
564         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
565         icmp6h->icmp6_code      =       0;
566         icmp6h->icmp6_cksum     =       0;
567         icmp6h->icmp6_mtu       =       htonl(mtu);
568
569         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
570         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
571                                             payload, payload_length,
572                                             nskb->csum);
573         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
574                                                 sizeof(struct icmp6hdr)
575                                                 + payload_length,
576                                                 ipv6h->nexthdr, nskb->csum);
577 }
578 #endif /* IPv6 */
579
580 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
581                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
582 {
583         unsigned int eth_hdr_len = ETH_HLEN;
584         unsigned int total_length = 0, header_length = 0, payload_length;
585         struct ethhdr *eh, *old_eh = eth_hdr(skb);
586         struct sk_buff *nskb;
587
588         /* Sanity check */
589         if (skb->protocol == htons(ETH_P_IP)) {
590                 if (mtu < IP_MIN_MTU)
591                         return false;
592
593                 if (!ipv4_should_icmp(skb))
594                         return true;
595         }
596 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
597         else if (skb->protocol == htons(ETH_P_IPV6)) {
598                 if (mtu < IPV6_MIN_MTU)
599                         return false;
600
601                 /*
602                  * In theory we should do PMTUD on IPv6 multicast messages but
603                  * we don't have an address to send from so just fragment.
604                  */
605                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
606                         return false;
607
608                 if (!ipv6_should_icmp(skb))
609                         return true;
610         }
611 #endif
612         else
613                 return false;
614
615         /* Allocate */
616         if (old_eh->h_proto == htons(ETH_P_8021Q))
617                 eth_hdr_len = VLAN_ETH_HLEN;
618
619         payload_length = skb->len - eth_hdr_len;
620         if (skb->protocol == htons(ETH_P_IP)) {
621                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
622                 total_length = min_t(unsigned int, header_length +
623                                                    payload_length, 576);
624         }
625 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
626         else {
627                 header_length = sizeof(struct ipv6hdr) +
628                                 sizeof(struct icmp6hdr);
629                 total_length = min_t(unsigned int, header_length +
630                                                   payload_length, IPV6_MIN_MTU);
631         }
632 #endif
633
634         payload_length = total_length - header_length;
635
636         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
637                              payload_length);
638         if (!nskb)
639                 return false;
640
641         skb_reserve(nskb, NET_IP_ALIGN);
642
643         /* Ethernet / VLAN */
644         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
645         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
646         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
647         nskb->protocol = eh->h_proto = old_eh->h_proto;
648         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
649                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
650
651                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
652                 vh->h_vlan_encapsulated_proto = skb->protocol;
653         } else
654                 vlan_set_tci(nskb, vlan_get_tci(skb));
655         skb_reset_mac_header(nskb);
656
657         /* Protocol */
658         if (skb->protocol == htons(ETH_P_IP))
659                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
660 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
661         else
662                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
663 #endif
664
665         /*
666          * Assume that flow based keys are symmetric with respect to input
667          * and output and use the key that we were going to put on the
668          * outgoing packet for the fake received packet.  If the keys are
669          * not symmetric then PMTUD needs to be disabled since we won't have
670          * any way of synthesizing packets.
671          */
672         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
673             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
674                 OVS_CB(nskb)->tun_id = flow_key;
675
676         if (unlikely(compute_ip_summed(nskb, false))) {
677                 kfree_skb(nskb);
678                 return false;
679         }
680
681         vport_receive(vport, nskb);
682
683         return true;
684 }
685
686 static bool check_mtu(struct sk_buff *skb,
687                       struct vport *vport,
688                       const struct tnl_mutable_config *mutable,
689                       const struct rtable *rt, __be16 *frag_offp)
690 {
691         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
692         bool pmtud = mutable->flags & TNL_F_PMTUD;
693         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
694         int mtu = 0;
695         unsigned int packet_length = skb->len - ETH_HLEN;
696
697         /* Allow for one level of tagging in the packet length. */
698         if (!vlan_tx_tag_present(skb) &&
699             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
700                 packet_length -= VLAN_HLEN;
701
702         if (pmtud) {
703                 int vlan_header = 0;
704
705                 /* The tag needs to go in packet regardless of where it
706                  * currently is, so subtract it from the MTU.
707                  */
708                 if (vlan_tx_tag_present(skb) ||
709                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
710                         vlan_header = VLAN_HLEN;
711
712                 mtu = dst_mtu(&rt_dst(rt))
713                         - ETH_HLEN
714                         - mutable->tunnel_hlen
715                         - vlan_header;
716         }
717
718         if (skb->protocol == htons(ETH_P_IP)) {
719                 struct iphdr *iph = ip_hdr(skb);
720
721                 if (df_inherit)
722                         frag_off = iph->frag_off & htons(IP_DF);
723
724                 if (pmtud && iph->frag_off & htons(IP_DF)) {
725                         mtu = max(mtu, IP_MIN_MTU);
726
727                         if (packet_length > mtu &&
728                             tnl_frag_needed(vport, mutable, skb, mtu,
729                                             OVS_CB(skb)->tun_id))
730                                 return false;
731                 }
732         }
733 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
734         else if (skb->protocol == htons(ETH_P_IPV6)) {
735                 /* IPv6 requires end hosts to do fragmentation
736                  * if the packet is above the minimum MTU.
737                  */
738                 if (df_inherit && packet_length > IPV6_MIN_MTU)
739                         frag_off = htons(IP_DF);
740
741                 if (pmtud) {
742                         mtu = max(mtu, IPV6_MIN_MTU);
743
744                         if (packet_length > mtu &&
745                             tnl_frag_needed(vport, mutable, skb, mtu,
746                                             OVS_CB(skb)->tun_id))
747                                 return false;
748                 }
749         }
750 #endif
751
752         *frag_offp = frag_off;
753         return true;
754 }
755
756 static void create_tunnel_header(const struct vport *vport,
757                                  const struct tnl_mutable_config *mutable,
758                                  const struct rtable *rt, void *header)
759 {
760         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
761         struct iphdr *iph = header;
762
763         iph->version    = 4;
764         iph->ihl        = sizeof(struct iphdr) >> 2;
765         iph->frag_off   = htons(IP_DF);
766         iph->protocol   = tnl_vport->tnl_ops->ipproto;
767         iph->tos        = mutable->tos;
768         iph->daddr      = rt->rt_dst;
769         iph->saddr      = rt->rt_src;
770         iph->ttl        = mutable->ttl;
771         if (!iph->ttl)
772                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
773
774         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
775 }
776
777 static inline void *get_cached_header(const struct tnl_cache *cache)
778 {
779         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
780 }
781
782 static inline bool check_cache_valid(const struct tnl_cache *cache,
783                                      const struct tnl_mutable_config *mutable)
784 {
785         return cache &&
786 #ifdef NEED_CACHE_TIMEOUT
787                 time_before(jiffies, cache->expiration) &&
788 #endif
789 #ifdef HAVE_RT_GENID
790                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
791 #endif
792 #ifdef HAVE_HH_SEQ
793                 rt_dst(cache->rt).hh->hh_lock.sequence == cache->hh_seq &&
794 #endif
795                 mutable->seq == cache->mutable_seq &&
796                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
797                 (cache->flow && !cache->flow->dead));
798 }
799
800 static void __cache_cleaner(struct tnl_vport *tnl_vport)
801 {
802         const struct tnl_mutable_config *mutable =
803                         rcu_dereference(tnl_vport->mutable);
804         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
805
806         if (cache && !check_cache_valid(cache, mutable) &&
807             spin_trylock_bh(&tnl_vport->cache_lock)) {
808                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
809                 spin_unlock_bh(&tnl_vport->cache_lock);
810         }
811 }
812
813 static void cache_cleaner(struct work_struct *work)
814 {
815         int i;
816
817         schedule_cache_cleaner();
818
819         rcu_read_lock();
820         for (i = 0; i < PORT_TABLE_SIZE; i++) {
821                 struct hlist_node *n;
822                 struct hlist_head *bucket;
823                 struct tnl_vport  *tnl_vport;
824
825                 bucket = &port_table[i];
826                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
827                         __cache_cleaner(tnl_vport);
828         }
829         rcu_read_unlock();
830 }
831
832 static inline void create_eth_hdr(struct tnl_cache *cache,
833                                   const struct rtable *rt)
834 {
835         void *cache_data = get_cached_header(cache);
836         int hh_len = rt_dst(rt).hh->hh_len;
837         int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
838
839 #ifdef HAVE_HH_SEQ
840         unsigned hh_seq;
841
842         do {
843                 hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
844                 memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
845         } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
846
847         cache->hh_seq = hh_seq;
848 #else
849         read_lock_bh(&rt_dst(rt).hh->hh_lock);
850         memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
851         read_unlock_bh(&rt_dst(rt).hh->hh_lock);
852 #endif
853 }
854
855 static struct tnl_cache *build_cache(struct vport *vport,
856                                      const struct tnl_mutable_config *mutable,
857                                      struct rtable *rt)
858 {
859         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
860         struct tnl_cache *cache;
861         void *cache_data;
862         int cache_len;
863
864         if (!(mutable->flags & TNL_F_HDR_CACHE))
865                 return NULL;
866
867         /*
868          * If there is no entry in the ARP cache or if this device does not
869          * support hard header caching just fall back to the IP stack.
870          */
871         if (!rt_dst(rt).hh)
872                 return NULL;
873
874         /*
875          * If lock is contended fall back to directly building the header.
876          * We're not going to help performance by sitting here spinning.
877          */
878         if (!spin_trylock_bh(&tnl_vport->cache_lock))
879                 return NULL;
880
881         cache = cache_dereference(tnl_vport);
882         if (check_cache_valid(cache, mutable))
883                 goto unlock;
884         else
885                 cache = NULL;
886
887         cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
888
889         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
890                         cache_len, GFP_ATOMIC);
891         if (!cache)
892                 goto unlock;
893
894         cache->len = cache_len;
895
896         create_eth_hdr(cache, rt);
897         cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
898
899         create_tunnel_header(vport, mutable, rt, cache_data);
900
901         cache->mutable_seq = mutable->seq;
902         cache->rt = rt;
903 #ifdef NEED_CACHE_TIMEOUT
904         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
905 #endif
906
907         if (is_internal_dev(rt_dst(rt).dev)) {
908                 struct sw_flow_key flow_key;
909                 struct vport *dst_vport;
910                 struct sk_buff *skb;
911                 bool is_frag;
912                 int err;
913                 int flow_key_len;
914                 struct sw_flow *flow;
915
916                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
917                 if (!dst_vport)
918                         goto done;
919
920                 skb = alloc_skb(cache->len, GFP_ATOMIC);
921                 if (!skb)
922                         goto done;
923
924                 __skb_put(skb, cache->len);
925                 memcpy(skb->data, get_cached_header(cache), cache->len);
926
927                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
928                                    &flow_key_len, &is_frag);
929
930                 consume_skb(skb);
931                 if (err || is_frag)
932                         goto done;
933
934                 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
935                                          &flow_key, flow_key_len);
936                 if (flow) {
937                         cache->flow = flow;
938                         flow_hold(flow);
939                 }
940         }
941
942 done:
943         assign_cache_rcu(vport, cache);
944
945 unlock:
946         spin_unlock_bh(&tnl_vport->cache_lock);
947
948         return cache;
949 }
950
951 static struct rtable *find_route(struct vport *vport,
952                                  const struct tnl_mutable_config *mutable,
953                                  u8 tos, struct tnl_cache **cache)
954 {
955         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
956         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
957
958         *cache = NULL;
959         tos = RT_TOS(tos);
960
961         if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
962                 *cache = cur_cache;
963                 return cur_cache->rt;
964         } else {
965                 struct rtable *rt;
966 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
967                 struct flowi fl = { .nl_u = { .ip4_u =
968                                               { .daddr = mutable->daddr,
969                                                 .saddr = mutable->saddr,
970                                                 .tos = tos } },
971                                     .proto = tnl_vport->tnl_ops->ipproto };
972
973                 if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
974                         return NULL;
975 #else
976                 struct flowi4 fl = { .daddr = mutable->daddr,
977                                      .saddr = mutable->saddr,
978                                      .flowi4_tos = tos,
979                                      .flowi4_proto = tnl_vport->tnl_ops->ipproto };
980
981                 rt = ip_route_output_key(&init_net, &fl);
982                 if (IS_ERR(rt))
983                         return NULL;
984 #endif
985
986                 if (likely(tos == mutable->tos))
987                         *cache = build_cache(vport, mutable, rt);
988
989                 return rt;
990         }
991 }
992
993 static inline bool need_linearize(const struct sk_buff *skb)
994 {
995         int i;
996
997         if (unlikely(skb_shinfo(skb)->frag_list))
998                 return true;
999
1000         /*
1001          * Generally speaking we should linearize if there are paged frags.
1002          * However, if all of the refcounts are 1 we know nobody else can
1003          * change them from underneath us and we can skip the linearization.
1004          */
1005         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1006                 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
1007                         return true;
1008
1009         return false;
1010 }
1011
1012 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1013                                        const struct tnl_mutable_config *mutable,
1014                                        const struct rtable *rt)
1015 {
1016         int min_headroom;
1017         int err;
1018
1019         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1020                         + mutable->tunnel_hlen
1021                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1022
1023         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1024                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1025                                                 skb_headroom(skb) +
1026                                                 16);
1027                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1028                                         0, GFP_ATOMIC);
1029                 if (unlikely(err))
1030                         goto error_free;
1031         }
1032
1033         forward_ip_summed(skb, true);
1034
1035         if (skb_is_gso(skb)) {
1036                 struct sk_buff *nskb;
1037
1038                 nskb = skb_gso_segment(skb, 0);
1039                 if (IS_ERR(nskb)) {
1040                         kfree_skb(skb);
1041                         err = PTR_ERR(nskb);
1042                         goto error;
1043                 }
1044
1045                 consume_skb(skb);
1046                 skb = nskb;
1047         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1048                 /* Pages aren't locked and could change at any time.
1049                  * If this happens after we compute the checksum, the
1050                  * checksum will be wrong.  We linearize now to avoid
1051                  * this problem.
1052                  */
1053                 if (unlikely(need_linearize(skb))) {
1054                         err = __skb_linearize(skb);
1055                         if (unlikely(err))
1056                                 goto error_free;
1057                 }
1058
1059                 err = skb_checksum_help(skb);
1060                 if (unlikely(err))
1061                         goto error_free;
1062         }
1063
1064         set_ip_summed(skb, OVS_CSUM_NONE);
1065
1066         return skb;
1067
1068 error_free:
1069         kfree_skb(skb);
1070 error:
1071         return ERR_PTR(err);
1072 }
1073
1074 static int send_frags(struct sk_buff *skb,
1075                       const struct tnl_mutable_config *mutable)
1076 {
1077         int sent_len;
1078
1079         sent_len = 0;
1080         while (skb) {
1081                 struct sk_buff *next = skb->next;
1082                 int frag_len = skb->len - mutable->tunnel_hlen;
1083                 int err;
1084
1085                 skb->next = NULL;
1086                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1087
1088                 err = ip_local_out(skb);
1089                 skb = next;
1090                 if (unlikely(net_xmit_eval(err)))
1091                         goto free_frags;
1092                 sent_len += frag_len;
1093         }
1094
1095         return sent_len;
1096
1097 free_frags:
1098         /*
1099          * There's no point in continuing to send fragments once one has been
1100          * dropped so just free the rest.  This may help improve the congestion
1101          * that caused the first packet to be dropped.
1102          */
1103         tnl_free_linked_skbs(skb);
1104         return sent_len;
1105 }
1106
1107 int tnl_send(struct vport *vport, struct sk_buff *skb)
1108 {
1109         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1110         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1111
1112         enum vport_err_type err = VPORT_E_TX_ERROR;
1113         struct rtable *rt;
1114         struct dst_entry *unattached_dst = NULL;
1115         struct tnl_cache *cache;
1116         int sent_len = 0;
1117         __be16 frag_off = 0;
1118         u8 ttl;
1119         u8 inner_tos;
1120         u8 tos;
1121
1122         /* Validate the protocol headers before we try to use them. */
1123         if (skb->protocol == htons(ETH_P_8021Q) &&
1124             !vlan_tx_tag_present(skb)) {
1125                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1126                         goto error_free;
1127
1128                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1129                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1130         }
1131
1132         if (skb->protocol == htons(ETH_P_IP)) {
1133                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1134                     + sizeof(struct iphdr))))
1135                         skb->protocol = 0;
1136         }
1137 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1138         else if (skb->protocol == htons(ETH_P_IPV6)) {
1139                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1140                     + sizeof(struct ipv6hdr))))
1141                         skb->protocol = 0;
1142         }
1143 #endif
1144
1145         /* ToS */
1146         if (skb->protocol == htons(ETH_P_IP))
1147                 inner_tos = ip_hdr(skb)->tos;
1148 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1149         else if (skb->protocol == htons(ETH_P_IPV6))
1150                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1151 #endif
1152         else
1153                 inner_tos = 0;
1154
1155         if (mutable->flags & TNL_F_TOS_INHERIT)
1156                 tos = inner_tos;
1157         else
1158                 tos = mutable->tos;
1159
1160         tos = INET_ECN_encapsulate(tos, inner_tos);
1161
1162         /* Route lookup */
1163         rt = find_route(vport, mutable, tos, &cache);
1164         if (unlikely(!rt))
1165                 goto error_free;
1166         if (unlikely(!cache))
1167                 unattached_dst = &rt_dst(rt);
1168
1169         /* Reset SKB */
1170         nf_reset(skb);
1171         secpath_reset(skb);
1172         skb_dst_drop(skb);
1173         skb_clear_rxhash(skb);
1174
1175         /* Offloading */
1176         skb = handle_offloads(skb, mutable, rt);
1177         if (IS_ERR(skb))
1178                 goto error;
1179
1180         /* MTU */
1181         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1182                 err = VPORT_E_TX_DROPPED;
1183                 goto error_free;
1184         }
1185
1186         /*
1187          * If we are over the MTU, allow the IP stack to handle fragmentation.
1188          * Fragmentation is a slow path anyways.
1189          */
1190         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1191                      cache)) {
1192                 unattached_dst = &rt_dst(rt);
1193                 dst_hold(unattached_dst);
1194                 cache = NULL;
1195         }
1196
1197         /* TTL */
1198         ttl = mutable->ttl;
1199         if (!ttl)
1200                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1201
1202         if (mutable->flags & TNL_F_TTL_INHERIT) {
1203                 if (skb->protocol == htons(ETH_P_IP))
1204                         ttl = ip_hdr(skb)->ttl;
1205 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1206                 else if (skb->protocol == htons(ETH_P_IPV6))
1207                         ttl = ipv6_hdr(skb)->hop_limit;
1208 #endif
1209         }
1210
1211         while (skb) {
1212                 struct iphdr *iph;
1213                 struct sk_buff *next_skb = skb->next;
1214                 skb->next = NULL;
1215
1216                 if (unlikely(vlan_deaccel_tag(skb)))
1217                         goto next;
1218
1219                 if (likely(cache)) {
1220                         skb_push(skb, cache->len);
1221                         memcpy(skb->data, get_cached_header(cache), cache->len);
1222                         skb_reset_mac_header(skb);
1223                         skb_set_network_header(skb, rt_dst(rt).hh->hh_len);
1224
1225                 } else {
1226                         skb_push(skb, mutable->tunnel_hlen);
1227                         create_tunnel_header(vport, mutable, rt, skb->data);
1228                         skb_reset_network_header(skb);
1229
1230                         if (next_skb)
1231                                 skb_dst_set(skb, dst_clone(unattached_dst));
1232                         else {
1233                                 skb_dst_set(skb, unattached_dst);
1234                                 unattached_dst = NULL;
1235                         }
1236                 }
1237                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1238
1239                 iph = ip_hdr(skb);
1240                 iph->tos = tos;
1241                 iph->ttl = ttl;
1242                 iph->frag_off = frag_off;
1243                 ip_select_ident(iph, &rt_dst(rt), NULL);
1244
1245                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1246                 if (unlikely(!skb))
1247                         goto next;
1248
1249                 if (likely(cache)) {
1250                         int orig_len = skb->len - cache->len;
1251                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1252
1253                         skb->protocol = htons(ETH_P_IP);
1254                         iph = ip_hdr(skb);
1255                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1256                         ip_send_check(iph);
1257
1258                         if (cache_vport) {
1259                                 if (unlikely(compute_ip_summed(skb, true))) {
1260                                         kfree_skb(skb);
1261                                         goto next;
1262                                 }
1263
1264                                 OVS_CB(skb)->flow = cache->flow;
1265                                 vport_receive(cache_vport, skb);
1266                                 sent_len += orig_len;
1267                         } else {
1268                                 int xmit_err;
1269
1270                                 skb->dev = rt_dst(rt).dev;
1271                                 xmit_err = dev_queue_xmit(skb);
1272
1273                                 if (likely(net_xmit_eval(xmit_err) == 0))
1274                                         sent_len += orig_len;
1275                         }
1276                 } else
1277                         sent_len += send_frags(skb, mutable);
1278
1279 next:
1280                 skb = next_skb;
1281         }
1282
1283         if (unlikely(sent_len == 0))
1284                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1285
1286         goto out;
1287
1288 error_free:
1289         tnl_free_linked_skbs(skb);
1290 error:
1291         vport_record_error(vport, err);
1292 out:
1293         dst_release(unattached_dst);
1294         return sent_len;
1295 }
1296
1297 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1298         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1299         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1300         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1301         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1302         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1303         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1304         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1305 };
1306
1307 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1308 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1309                           const struct vport *cur_vport,
1310                           struct tnl_mutable_config *mutable)
1311 {
1312         const struct vport *old_vport;
1313         const struct tnl_mutable_config *old_mutable;
1314         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1315         int err;
1316
1317         if (!options)
1318                 return -EINVAL;
1319
1320         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1321         if (err)
1322                 return err;
1323
1324         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1325                 return -EINVAL;
1326
1327         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1328
1329         if (a[OVS_TUNNEL_ATTR_SRC_IPV4])
1330                 mutable->saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1331         mutable->daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1332
1333         if (a[OVS_TUNNEL_ATTR_TOS]) {
1334                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1335                 if (mutable->tos != RT_TOS(mutable->tos))
1336                         return -EINVAL;
1337         }
1338
1339         if (a[OVS_TUNNEL_ATTR_TTL])
1340                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1341
1342         mutable->tunnel_type = tnl_ops->tunnel_type;
1343         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1344                 mutable->tunnel_type |= TNL_T_KEY_MATCH;
1345                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1346         } else {
1347                 mutable->tunnel_type |= TNL_T_KEY_EXACT;
1348                 mutable->in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1349         }
1350
1351         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1352                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1353         else
1354                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1355
1356         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1357         if (mutable->tunnel_hlen < 0)
1358                 return mutable->tunnel_hlen;
1359
1360         mutable->tunnel_hlen += sizeof(struct iphdr);
1361
1362         old_vport = tnl_find_port(mutable->saddr, mutable->daddr,
1363                                   mutable->in_key, mutable->tunnel_type,
1364                                   &old_mutable);
1365
1366         if (old_vport && old_vport != cur_vport)
1367                 return -EEXIST;
1368
1369         return 0;
1370 }
1371
1372 struct vport *tnl_create(const struct vport_parms *parms,
1373                          const struct vport_ops *vport_ops,
1374                          const struct tnl_ops *tnl_ops)
1375 {
1376         struct vport *vport;
1377         struct tnl_vport *tnl_vport;
1378         struct tnl_mutable_config *mutable;
1379         int initial_frag_id;
1380         int err;
1381
1382         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1383         if (IS_ERR(vport)) {
1384                 err = PTR_ERR(vport);
1385                 goto error;
1386         }
1387
1388         tnl_vport = tnl_vport_priv(vport);
1389
1390         strcpy(tnl_vport->name, parms->name);
1391         tnl_vport->tnl_ops = tnl_ops;
1392
1393         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1394         if (!mutable) {
1395                 err = -ENOMEM;
1396                 goto error_free_vport;
1397         }
1398
1399         vport_gen_rand_ether_addr(mutable->eth_addr);
1400
1401         get_random_bytes(&initial_frag_id, sizeof(int));
1402         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1403
1404         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1405         if (err)
1406                 goto error_free_mutable;
1407
1408         spin_lock_init(&tnl_vport->cache_lock);
1409
1410 #ifdef NEED_CACHE_TIMEOUT
1411         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1412                                        (net_random() % (MAX_CACHE_EXP / 2));
1413 #endif
1414
1415         rcu_assign_pointer(tnl_vport->mutable, mutable);
1416
1417         port_table_add_port(vport);
1418         return vport;
1419
1420 error_free_mutable:
1421         kfree(mutable);
1422 error_free_vport:
1423         vport_free(vport);
1424 error:
1425         return ERR_PTR(err);
1426 }
1427
1428 int tnl_set_options(struct vport *vport, struct nlattr *options)
1429 {
1430         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1431         const struct tnl_mutable_config *old_mutable;
1432         struct tnl_mutable_config *mutable;
1433         int err;
1434
1435         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1436         if (!mutable) {
1437                 err = -ENOMEM;
1438                 goto error;
1439         }
1440
1441         /* Copy fields whose values should be retained. */
1442         old_mutable = rtnl_dereference(tnl_vport->mutable);
1443         mutable->seq = old_mutable->seq + 1;
1444         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1445
1446         /* Parse the others configured by userspace. */
1447         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1448         if (err)
1449                 goto error_free;
1450
1451         if (mutable_hash(mutable) != mutable_hash(old_mutable))
1452                 port_table_move_port(vport, mutable);
1453
1454         return 0;
1455
1456 error_free:
1457         kfree(mutable);
1458 error:
1459         return err;
1460 }
1461
1462 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1463 {
1464         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1465         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1466
1467         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1468         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->daddr);
1469
1470         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1471                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->in_key);
1472         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1473                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1474         if (mutable->saddr)
1475                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->saddr);
1476         if (mutable->tos)
1477                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1478         if (mutable->ttl)
1479                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1480
1481         return 0;
1482
1483 nla_put_failure:
1484         return -EMSGSIZE;
1485 }
1486
1487 static void free_port_rcu(struct rcu_head *rcu)
1488 {
1489         struct tnl_vport *tnl_vport = container_of(rcu,
1490                                                    struct tnl_vport, rcu);
1491
1492         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1493         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1494         vport_free(tnl_vport_to_vport(tnl_vport));
1495 }
1496
1497 void tnl_destroy(struct vport *vport)
1498 {
1499         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1500         const struct tnl_mutable_config *mutable;
1501
1502         mutable = rtnl_dereference(tnl_vport->mutable);
1503         port_table_remove_port(vport);
1504         call_rcu(&tnl_vport->rcu, free_port_rcu);
1505 }
1506
1507 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1508 {
1509         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1510         struct tnl_mutable_config *mutable;
1511
1512         mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
1513                           sizeof(struct tnl_mutable_config), GFP_KERNEL);
1514         if (!mutable)
1515                 return -ENOMEM;
1516
1517         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1518         assign_config_rcu(vport, mutable);
1519
1520         return 0;
1521 }
1522
1523 const char *tnl_get_name(const struct vport *vport)
1524 {
1525         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1526         return tnl_vport->name;
1527 }
1528
1529 const unsigned char *tnl_get_addr(const struct vport *vport)
1530 {
1531         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1532         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1533 }
1534
1535 void tnl_free_linked_skbs(struct sk_buff *skb)
1536 {
1537         while (skb) {
1538                 struct sk_buff *next = skb->next;
1539                 kfree_skb(skb);
1540                 skb = next;
1541         }
1542 }
1543
1544 int tnl_init(void)
1545 {
1546         int i;
1547
1548         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1549                         GFP_KERNEL);
1550         if (!port_table)
1551                 return -ENOMEM;
1552
1553         for (i = 0; i < PORT_TABLE_SIZE; i++)
1554                 INIT_HLIST_HEAD(&port_table[i]);
1555
1556         return 0;
1557 }
1558
1559 void tnl_exit(void)
1560 {
1561         int i;
1562
1563         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1564                 struct tnl_vport * tnl_vport;
1565                 struct hlist_head *hash_head;
1566                 struct hlist_node *n;
1567
1568                 hash_head = &port_table[i];
1569                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1570                         BUG();
1571                         goto out;
1572                 }
1573         }
1574 out:
1575         kfree(port_table);
1576 }