datapath: remove actions.h
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/igmp.h>
14 #include <linux/in.h>
15 #include <linux/in_route.h>
16 #include <linux/inetdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/list.h>
19 #include <linux/kernel.h>
20 #include <linux/version.h>
21 #include <linux/workqueue.h>
22 #include <linux/rculist.h>
23
24 #include <net/dsfield.h>
25 #include <net/dst.h>
26 #include <net/icmp.h>
27 #include <net/inet_ecn.h>
28 #include <net/ip.h>
29 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
30 #include <net/ipv6.h>
31 #endif
32 #include <net/route.h>
33 #include <net/xfrm.h>
34
35 #include "checksum.h"
36 #include "datapath.h"
37 #include "tunnel.h"
38 #include "vlan.h"
39 #include "vport.h"
40 #include "vport-generic.h"
41 #include "vport-internal_dev.h"
42
43 #ifdef NEED_CACHE_TIMEOUT
44 /*
45  * On kernels where we can't quickly detect changes in the rest of the system
46  * we use an expiration time to invalidate the cache.  A shorter expiration
47  * reduces the length of time that we may potentially blackhole packets while
48  * a longer time increases performance by reducing the frequency that the
49  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
50  * invalidated before the expiration time but this is the maximum.  The time
51  * is expressed in jiffies.
52  */
53 #define MAX_CACHE_EXP HZ
54 #endif
55
56 /*
57  * Interval to check for and remove caches that are no longer valid.  Caches
58  * are checked for validity before they are used for packet encapsulation and
59  * old caches are removed at that time.  However, if no packets are sent through
60  * the tunnel then the cache will never be destroyed.  Since it holds
61  * references to a number of system objects, the cache will continue to use
62  * system resources by not allowing those objects to be destroyed.  The cache
63  * cleaner is periodically run to free invalid caches.  It does not
64  * significantly affect system performance.  A lower interval will release
65  * resources faster but will itself consume resources by requiring more frequent
66  * checks.  A longer interval may result in messages being printed to the kernel
67  * message buffer about unreleased resources.  The interval is expressed in
68  * jiffies.
69  */
70 #define CACHE_CLEANER_INTERVAL (5 * HZ)
71
72 #define CACHE_DATA_ALIGN 16
73 #define PORT_TABLE_SIZE  1024
74
75 static struct hlist_head *port_table __read_mostly;
76 static int port_table_count;
77
78 static void cache_cleaner(struct work_struct *work);
79 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
80
81 /*
82  * These are just used as an optimization: they don't require any kind of
83  * synchronization because we could have just as easily read the value before
84  * the port change happened.
85  */
86 static unsigned int key_local_remote_ports __read_mostly;
87 static unsigned int key_remote_ports __read_mostly;
88 static unsigned int key_multicast_ports __read_mostly;
89 static unsigned int local_remote_ports __read_mostly;
90 static unsigned int remote_ports __read_mostly;
91 static unsigned int multicast_ports __read_mostly;
92
93 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
94 #define rt_dst(rt) (rt->dst)
95 #else
96 #define rt_dst(rt) (rt->u.dst)
97 #endif
98
99 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
100 static struct hh_cache *rt_hh(struct rtable *rt)
101 {
102         struct neighbour *neigh = dst_get_neighbour(&rt->dst);
103         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
104                         !neigh->hh.hh_len)
105                 return NULL;
106         return &neigh->hh;
107 }
108 #else
109 #define rt_hh(rt) (rt_dst(rt).hh)
110 #endif
111
112 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
113 {
114         return vport_from_priv(tnl_vport);
115 }
116
117 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
118  * cache_lock is held, so it is only for update side code.
119  */
120 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
121 {
122         return rcu_dereference_protected(tnl_vport->cache,
123                                  lockdep_is_held(&tnl_vport->cache_lock));
124 }
125
126 static void schedule_cache_cleaner(void)
127 {
128         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
129 }
130
131 static void free_cache(struct tnl_cache *cache)
132 {
133         if (!cache)
134                 return;
135
136         flow_put(cache->flow);
137         ip_rt_put(cache->rt);
138         kfree(cache);
139 }
140
141 static void free_config_rcu(struct rcu_head *rcu)
142 {
143         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
144         kfree(c);
145 }
146
147 static void free_cache_rcu(struct rcu_head *rcu)
148 {
149         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
150         free_cache(c);
151 }
152
153 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
154  * within an RCU callback.  Fortunately this part doesn't require waiting for
155  * an RCU grace period.
156  */
157 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
158 {
159         ASSERT_RTNL();
160         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
161                 struct in_device *in_dev;
162                 in_dev = inetdev_by_index(&init_net, mutable->mlink);
163                 if (in_dev)
164                         ip_mc_dec_group(in_dev, mutable->key.daddr);
165         }
166 }
167
168 static void assign_config_rcu(struct vport *vport,
169                               struct tnl_mutable_config *new_config)
170 {
171         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
172         struct tnl_mutable_config *old_config;
173
174         old_config = rtnl_dereference(tnl_vport->mutable);
175         rcu_assign_pointer(tnl_vport->mutable, new_config);
176
177         free_mutable_rtnl(old_config);
178         call_rcu(&old_config->rcu, free_config_rcu);
179 }
180
181 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
182 {
183         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
184         struct tnl_cache *old_cache;
185
186         old_cache = cache_dereference(tnl_vport);
187         rcu_assign_pointer(tnl_vport->cache, new_cache);
188
189         if (old_cache)
190                 call_rcu(&old_cache->rcu, free_cache_rcu);
191 }
192
193 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
194 {
195         bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
196
197         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
198                 if (mutable->key.saddr)
199                         return &local_remote_ports;
200                 else if (is_multicast)
201                         return &multicast_ports;
202                 else
203                         return &remote_ports;
204         } else {
205                 if (mutable->key.saddr)
206                         return &key_local_remote_ports;
207                 else if (is_multicast)
208                         return &key_multicast_ports;
209                 else
210                         return &key_remote_ports;
211         }
212 }
213
214 static u32 port_hash(const struct port_lookup_key *key)
215 {
216         return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
217 }
218
219 static struct hlist_head *find_bucket(u32 hash)
220 {
221         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
222 }
223
224 static void port_table_add_port(struct vport *vport)
225 {
226         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
227         const struct tnl_mutable_config *mutable;
228         u32 hash;
229
230         if (port_table_count == 0)
231                 schedule_cache_cleaner();
232
233         mutable = rtnl_dereference(tnl_vport->mutable);
234         hash = port_hash(&mutable->key);
235         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
236         port_table_count++;
237
238         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
239 }
240
241 static void port_table_move_port(struct vport *vport,
242                       struct tnl_mutable_config *new_mutable)
243 {
244         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
245         u32 hash;
246
247         hash = port_hash(&new_mutable->key);
248         hlist_del_init_rcu(&tnl_vport->hash_node);
249         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
250
251         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
252         assign_config_rcu(vport, new_mutable);
253         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
254 }
255
256 static void port_table_remove_port(struct vport *vport)
257 {
258         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
259
260         hlist_del_init_rcu(&tnl_vport->hash_node);
261
262         port_table_count--;
263         if (port_table_count == 0)
264                 cancel_delayed_work_sync(&cache_cleaner_wq);
265
266         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
267 }
268
269 static struct vport *port_table_lookup(struct port_lookup_key *key,
270                                        const struct tnl_mutable_config **pmutable)
271 {
272         struct hlist_node *n;
273         struct hlist_head *bucket;
274         u32 hash = port_hash(key);
275         struct tnl_vport *tnl_vport;
276
277         bucket = find_bucket(hash);
278
279         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
280                 struct tnl_mutable_config *mutable;
281
282                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
283                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
284                         *pmutable = mutable;
285                         return tnl_vport_to_vport(tnl_vport);
286                 }
287         }
288
289         return NULL;
290 }
291
292 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
293                             int tunnel_type,
294                             const struct tnl_mutable_config **mutable)
295 {
296         struct port_lookup_key lookup;
297         struct vport *vport;
298         bool is_multicast = ipv4_is_multicast(saddr);
299
300         lookup.saddr = saddr;
301         lookup.daddr = daddr;
302
303         /* First try for exact match on in_key. */
304         lookup.in_key = key;
305         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
306         if (!is_multicast && key_local_remote_ports) {
307                 vport = port_table_lookup(&lookup, mutable);
308                 if (vport)
309                         return vport;
310         }
311         if (key_remote_ports) {
312                 lookup.saddr = 0;
313                 vport = port_table_lookup(&lookup, mutable);
314                 if (vport)
315                         return vport;
316
317                 lookup.saddr = saddr;
318         }
319
320         /* Then try matches that wildcard in_key. */
321         lookup.in_key = 0;
322         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
323         if (!is_multicast && local_remote_ports) {
324                 vport = port_table_lookup(&lookup, mutable);
325                 if (vport)
326                         return vport;
327         }
328         if (remote_ports) {
329                 lookup.saddr = 0;
330                 vport = port_table_lookup(&lookup, mutable);
331                 if (vport)
332                         return vport;
333         }
334
335         if (is_multicast) {
336                 lookup.saddr = 0;
337                 lookup.daddr = saddr;
338                 if (key_multicast_ports) {
339                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
340                         lookup.in_key = key;
341                         vport = port_table_lookup(&lookup, mutable);
342                         if (vport)
343                                 return vport;
344                 }
345                 if (multicast_ports) {
346                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
347                         lookup.in_key = 0;
348                         vport = port_table_lookup(&lookup, mutable);
349                         if (vport)
350                                 return vport;
351                 }
352         }
353
354         return NULL;
355 }
356
357 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
358 {
359         if (unlikely(INET_ECN_is_ce(tos))) {
360                 __be16 protocol = skb->protocol;
361
362                 skb_set_network_header(skb, ETH_HLEN);
363
364                 if (protocol == htons(ETH_P_8021Q)) {
365                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
366                                 return;
367
368                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
369                         skb_set_network_header(skb, VLAN_ETH_HLEN);
370                 }
371
372                 if (protocol == htons(ETH_P_IP)) {
373                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
374                             + sizeof(struct iphdr))))
375                                 return;
376
377                         IP_ECN_set_ce(ip_hdr(skb));
378                 }
379 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
380                 else if (protocol == htons(ETH_P_IPV6)) {
381                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
382                             + sizeof(struct ipv6hdr))))
383                                 return;
384
385                         IP6_ECN_set_ce(ipv6_hdr(skb));
386                 }
387 #endif
388         }
389 }
390
391 /**
392  *      tnl_rcv - ingress point for generic tunnel code
393  *
394  * @vport: port this packet was received on
395  * @skb: received packet
396  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
397  *
398  * Must be called with rcu_read_lock.
399  *
400  * Packets received by this function are in the following state:
401  * - skb->data points to the inner Ethernet header.
402  * - The inner Ethernet header is in the linear data area.
403  * - skb->csum does not include the inner Ethernet header.
404  * - The layer pointers are undefined.
405  */
406 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
407 {
408         struct ethhdr *eh;
409
410         skb_reset_mac_header(skb);
411         eh = eth_hdr(skb);
412
413         if (likely(ntohs(eh->h_proto) >= 1536))
414                 skb->protocol = eh->h_proto;
415         else
416                 skb->protocol = htons(ETH_P_802_2);
417
418         skb_dst_drop(skb);
419         nf_reset(skb);
420         skb_clear_rxhash(skb);
421         secpath_reset(skb);
422
423         ecn_decapsulate(skb, tos);
424         vlan_set_tci(skb, 0);
425
426         if (unlikely(compute_ip_summed(skb, false))) {
427                 kfree_skb(skb);
428                 return;
429         }
430
431         vport_receive(vport, skb);
432 }
433
434 static bool check_ipv4_address(__be32 addr)
435 {
436         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
437             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
438                 return false;
439
440         return true;
441 }
442
443 static bool ipv4_should_icmp(struct sk_buff *skb)
444 {
445         struct iphdr *old_iph = ip_hdr(skb);
446
447         /* Don't respond to L2 broadcast. */
448         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
449                 return false;
450
451         /* Don't respond to L3 broadcast or invalid addresses. */
452         if (!check_ipv4_address(old_iph->daddr) ||
453             !check_ipv4_address(old_iph->saddr))
454                 return false;
455
456         /* Only respond to the first fragment. */
457         if (old_iph->frag_off & htons(IP_OFFSET))
458                 return false;
459
460         /* Don't respond to ICMP error messages. */
461         if (old_iph->protocol == IPPROTO_ICMP) {
462                 u8 icmp_type, *icmp_typep;
463
464                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
465                                                 (old_iph->ihl << 2) +
466                                                 offsetof(struct icmphdr, type) -
467                                                 skb->data, sizeof(icmp_type),
468                                                 &icmp_type);
469
470                 if (!icmp_typep)
471                         return false;
472
473                 if (*icmp_typep > NR_ICMP_TYPES
474                         || (*icmp_typep <= ICMP_PARAMETERPROB
475                                 && *icmp_typep != ICMP_ECHOREPLY
476                                 && *icmp_typep != ICMP_ECHO))
477                         return false;
478         }
479
480         return true;
481 }
482
483 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
484                             unsigned int mtu, unsigned int payload_length)
485 {
486         struct iphdr *iph, *old_iph = ip_hdr(skb);
487         struct icmphdr *icmph;
488         u8 *payload;
489
490         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
491         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
492         payload = skb_put(nskb, payload_length);
493
494         /* IP */
495         iph->version            =       4;
496         iph->ihl                =       sizeof(struct iphdr) >> 2;
497         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
498                                         IPTOS_PREC_INTERNETCONTROL;
499         iph->tot_len            =       htons(sizeof(struct iphdr)
500                                               + sizeof(struct icmphdr)
501                                               + payload_length);
502         get_random_bytes(&iph->id, sizeof(iph->id));
503         iph->frag_off           =       0;
504         iph->ttl                =       IPDEFTTL;
505         iph->protocol           =       IPPROTO_ICMP;
506         iph->daddr              =       old_iph->saddr;
507         iph->saddr              =       old_iph->daddr;
508
509         ip_send_check(iph);
510
511         /* ICMP */
512         icmph->type             =       ICMP_DEST_UNREACH;
513         icmph->code             =       ICMP_FRAG_NEEDED;
514         icmph->un.gateway       =       htonl(mtu);
515         icmph->checksum         =       0;
516
517         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
518         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
519                                             payload, payload_length,
520                                             nskb->csum);
521         icmph->checksum = csum_fold(nskb->csum);
522 }
523
524 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
525 static bool ipv6_should_icmp(struct sk_buff *skb)
526 {
527         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
528         int addr_type;
529         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
530         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
531
532         /* Check source address is valid. */
533         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
534         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
535                 return false;
536
537         /* Don't reply to unspecified addresses. */
538         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
539                 return false;
540
541         /* Don't respond to ICMP error messages. */
542         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
543         if (payload_off < 0)
544                 return false;
545
546         if (nexthdr == NEXTHDR_ICMP) {
547                 u8 icmp_type, *icmp_typep;
548
549                 icmp_typep = skb_header_pointer(skb, payload_off +
550                                                 offsetof(struct icmp6hdr,
551                                                         icmp6_type),
552                                                 sizeof(icmp_type), &icmp_type);
553
554                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
555                         return false;
556         }
557
558         return true;
559 }
560
561 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
562                             unsigned int mtu, unsigned int payload_length)
563 {
564         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
565         struct icmp6hdr *icmp6h;
566         u8 *payload;
567
568         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
569         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
570         payload = skb_put(nskb, payload_length);
571
572         /* IPv6 */
573         ipv6h->version          =       6;
574         ipv6h->priority         =       0;
575         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
576         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
577                                               + payload_length);
578         ipv6h->nexthdr          =       NEXTHDR_ICMP;
579         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
580         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
581         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
582
583         /* ICMPv6 */
584         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
585         icmp6h->icmp6_code      =       0;
586         icmp6h->icmp6_cksum     =       0;
587         icmp6h->icmp6_mtu       =       htonl(mtu);
588
589         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
590         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
591                                             payload, payload_length,
592                                             nskb->csum);
593         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
594                                                 sizeof(struct icmp6hdr)
595                                                 + payload_length,
596                                                 ipv6h->nexthdr, nskb->csum);
597 }
598 #endif /* IPv6 */
599
600 bool tnl_frag_needed(struct vport *vport,
601                      const struct tnl_mutable_config *mutable,
602                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
603 {
604         unsigned int eth_hdr_len = ETH_HLEN;
605         unsigned int total_length = 0, header_length = 0, payload_length;
606         struct ethhdr *eh, *old_eh = eth_hdr(skb);
607         struct sk_buff *nskb;
608
609         /* Sanity check */
610         if (skb->protocol == htons(ETH_P_IP)) {
611                 if (mtu < IP_MIN_MTU)
612                         return false;
613
614                 if (!ipv4_should_icmp(skb))
615                         return true;
616         }
617 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
618         else if (skb->protocol == htons(ETH_P_IPV6)) {
619                 if (mtu < IPV6_MIN_MTU)
620                         return false;
621
622                 /*
623                  * In theory we should do PMTUD on IPv6 multicast messages but
624                  * we don't have an address to send from so just fragment.
625                  */
626                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
627                         return false;
628
629                 if (!ipv6_should_icmp(skb))
630                         return true;
631         }
632 #endif
633         else
634                 return false;
635
636         /* Allocate */
637         if (old_eh->h_proto == htons(ETH_P_8021Q))
638                 eth_hdr_len = VLAN_ETH_HLEN;
639
640         payload_length = skb->len - eth_hdr_len;
641         if (skb->protocol == htons(ETH_P_IP)) {
642                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
643                 total_length = min_t(unsigned int, header_length +
644                                                    payload_length, 576);
645         }
646 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
647         else {
648                 header_length = sizeof(struct ipv6hdr) +
649                                 sizeof(struct icmp6hdr);
650                 total_length = min_t(unsigned int, header_length +
651                                                   payload_length, IPV6_MIN_MTU);
652         }
653 #endif
654
655         payload_length = total_length - header_length;
656
657         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
658                              payload_length);
659         if (!nskb)
660                 return false;
661
662         skb_reserve(nskb, NET_IP_ALIGN);
663
664         /* Ethernet / VLAN */
665         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
666         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
667         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
668         nskb->protocol = eh->h_proto = old_eh->h_proto;
669         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
670                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
671
672                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
673                 vh->h_vlan_encapsulated_proto = skb->protocol;
674         } else
675                 vlan_set_tci(nskb, vlan_get_tci(skb));
676         skb_reset_mac_header(nskb);
677
678         /* Protocol */
679         if (skb->protocol == htons(ETH_P_IP))
680                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
681 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
682         else
683                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
684 #endif
685
686         /*
687          * Assume that flow based keys are symmetric with respect to input
688          * and output and use the key that we were going to put on the
689          * outgoing packet for the fake received packet.  If the keys are
690          * not symmetric then PMTUD needs to be disabled since we won't have
691          * any way of synthesizing packets.
692          */
693         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
694             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
695                 OVS_CB(nskb)->tun_id = flow_key;
696
697         if (unlikely(compute_ip_summed(nskb, false))) {
698                 kfree_skb(nskb);
699                 return false;
700         }
701
702         vport_receive(vport, nskb);
703
704         return true;
705 }
706
707 static bool check_mtu(struct sk_buff *skb,
708                       struct vport *vport,
709                       const struct tnl_mutable_config *mutable,
710                       const struct rtable *rt, __be16 *frag_offp)
711 {
712         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
713         bool pmtud = mutable->flags & TNL_F_PMTUD;
714         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
715         int mtu = 0;
716         unsigned int packet_length = skb->len - ETH_HLEN;
717
718         /* Allow for one level of tagging in the packet length. */
719         if (!vlan_tx_tag_present(skb) &&
720             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
721                 packet_length -= VLAN_HLEN;
722
723         if (pmtud) {
724                 int vlan_header = 0;
725
726                 /* The tag needs to go in packet regardless of where it
727                  * currently is, so subtract it from the MTU.
728                  */
729                 if (vlan_tx_tag_present(skb) ||
730                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
731                         vlan_header = VLAN_HLEN;
732
733                 mtu = dst_mtu(&rt_dst(rt))
734                         - ETH_HLEN
735                         - mutable->tunnel_hlen
736                         - vlan_header;
737         }
738
739         if (skb->protocol == htons(ETH_P_IP)) {
740                 struct iphdr *iph = ip_hdr(skb);
741
742                 if (df_inherit)
743                         frag_off = iph->frag_off & htons(IP_DF);
744
745                 if (pmtud && iph->frag_off & htons(IP_DF)) {
746                         mtu = max(mtu, IP_MIN_MTU);
747
748                         if (packet_length > mtu &&
749                             tnl_frag_needed(vport, mutable, skb, mtu,
750                                             OVS_CB(skb)->tun_id))
751                                 return false;
752                 }
753         }
754 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
755         else if (skb->protocol == htons(ETH_P_IPV6)) {
756                 /* IPv6 requires end hosts to do fragmentation
757                  * if the packet is above the minimum MTU.
758                  */
759                 if (df_inherit && packet_length > IPV6_MIN_MTU)
760                         frag_off = htons(IP_DF);
761
762                 if (pmtud) {
763                         mtu = max(mtu, IPV6_MIN_MTU);
764
765                         if (packet_length > mtu &&
766                             tnl_frag_needed(vport, mutable, skb, mtu,
767                                             OVS_CB(skb)->tun_id))
768                                 return false;
769                 }
770         }
771 #endif
772
773         *frag_offp = frag_off;
774         return true;
775 }
776
777 static void create_tunnel_header(const struct vport *vport,
778                                  const struct tnl_mutable_config *mutable,
779                                  const struct rtable *rt, void *header)
780 {
781         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
782         struct iphdr *iph = header;
783
784         iph->version    = 4;
785         iph->ihl        = sizeof(struct iphdr) >> 2;
786         iph->frag_off   = htons(IP_DF);
787         iph->protocol   = tnl_vport->tnl_ops->ipproto;
788         iph->tos        = mutable->tos;
789         iph->daddr      = rt->rt_dst;
790         iph->saddr      = rt->rt_src;
791         iph->ttl        = mutable->ttl;
792         if (!iph->ttl)
793                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
794
795         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
796 }
797
798 static void *get_cached_header(const struct tnl_cache *cache)
799 {
800         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
801 }
802
803 static bool check_cache_valid(const struct tnl_cache *cache,
804                               const struct tnl_mutable_config *mutable)
805 {
806         struct hh_cache *hh;
807
808         if (!cache)
809                 return false;
810
811         hh = rt_hh(cache->rt);
812         return hh &&
813 #ifdef NEED_CACHE_TIMEOUT
814                 time_before(jiffies, cache->expiration) &&
815 #endif
816 #ifdef HAVE_RT_GENID
817                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
818 #endif
819 #ifdef HAVE_HH_SEQ
820                 hh->hh_lock.sequence == cache->hh_seq &&
821 #endif
822                 mutable->seq == cache->mutable_seq &&
823                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
824                 (cache->flow && !cache->flow->dead));
825 }
826
827 static void __cache_cleaner(struct tnl_vport *tnl_vport)
828 {
829         const struct tnl_mutable_config *mutable =
830                         rcu_dereference(tnl_vport->mutable);
831         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
832
833         if (cache && !check_cache_valid(cache, mutable) &&
834             spin_trylock_bh(&tnl_vport->cache_lock)) {
835                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
836                 spin_unlock_bh(&tnl_vport->cache_lock);
837         }
838 }
839
840 static void cache_cleaner(struct work_struct *work)
841 {
842         int i;
843
844         schedule_cache_cleaner();
845
846         rcu_read_lock();
847         for (i = 0; i < PORT_TABLE_SIZE; i++) {
848                 struct hlist_node *n;
849                 struct hlist_head *bucket;
850                 struct tnl_vport  *tnl_vport;
851
852                 bucket = &port_table[i];
853                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
854                         __cache_cleaner(tnl_vport);
855         }
856         rcu_read_unlock();
857 }
858
859 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
860 {
861         void *cache_data = get_cached_header(cache);
862         int hh_off;
863
864 #ifdef HAVE_HH_SEQ
865         unsigned hh_seq;
866
867         do {
868                 hh_seq = read_seqbegin(&hh->hh_lock);
869                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
870                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
871                 cache->hh_len = hh->hh_len;
872         } while (read_seqretry(&hh->hh_lock, hh_seq));
873
874         cache->hh_seq = hh_seq;
875 #else
876         read_lock(&hh->hh_lock);
877         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
878         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
879         cache->hh_len = hh->hh_len;
880         read_unlock(&hh->hh_lock);
881 #endif
882 }
883
884 static struct tnl_cache *build_cache(struct vport *vport,
885                                      const struct tnl_mutable_config *mutable,
886                                      struct rtable *rt)
887 {
888         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
889         struct tnl_cache *cache;
890         void *cache_data;
891         int cache_len;
892         struct hh_cache *hh;
893
894         if (!(mutable->flags & TNL_F_HDR_CACHE))
895                 return NULL;
896
897         /*
898          * If there is no entry in the ARP cache or if this device does not
899          * support hard header caching just fall back to the IP stack.
900          */
901
902         hh = rt_hh(rt);
903         if (!hh)
904                 return NULL;
905
906         /*
907          * If lock is contended fall back to directly building the header.
908          * We're not going to help performance by sitting here spinning.
909          */
910         if (!spin_trylock(&tnl_vport->cache_lock))
911                 return NULL;
912
913         cache = cache_dereference(tnl_vport);
914         if (check_cache_valid(cache, mutable))
915                 goto unlock;
916         else
917                 cache = NULL;
918
919         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
920
921         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
922                         cache_len, GFP_ATOMIC);
923         if (!cache)
924                 goto unlock;
925
926         create_eth_hdr(cache, hh);
927         cache_data = get_cached_header(cache) + cache->hh_len;
928         cache->len = cache->hh_len + mutable->tunnel_hlen;
929
930         create_tunnel_header(vport, mutable, rt, cache_data);
931
932         cache->mutable_seq = mutable->seq;
933         cache->rt = rt;
934 #ifdef NEED_CACHE_TIMEOUT
935         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
936 #endif
937
938         if (is_internal_dev(rt_dst(rt).dev)) {
939                 struct sw_flow_key flow_key;
940                 struct vport *dst_vport;
941                 struct sk_buff *skb;
942                 int err;
943                 int flow_key_len;
944                 struct sw_flow *flow;
945
946                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
947                 if (!dst_vport)
948                         goto done;
949
950                 skb = alloc_skb(cache->len, GFP_ATOMIC);
951                 if (!skb)
952                         goto done;
953
954                 __skb_put(skb, cache->len);
955                 memcpy(skb->data, get_cached_header(cache), cache->len);
956
957                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
958                                    &flow_key_len);
959
960                 consume_skb(skb);
961                 if (err)
962                         goto done;
963
964                 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
965                                          &flow_key, flow_key_len);
966                 if (flow) {
967                         cache->flow = flow;
968                         flow_hold(flow);
969                 }
970         }
971
972 done:
973         assign_cache_rcu(vport, cache);
974
975 unlock:
976         spin_unlock(&tnl_vport->cache_lock);
977
978         return cache;
979 }
980
981 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
982                                    u8 ipproto, u8 tos)
983 {
984 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
985         struct flowi fl = { .nl_u = { .ip4_u = {
986                                         .daddr = mutable->key.daddr,
987                                         .saddr = mutable->key.saddr,
988                                         .tos = tos } },
989                             .proto = ipproto };
990         struct rtable *rt;
991
992         if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
993                 return ERR_PTR(-EADDRNOTAVAIL);
994
995         return rt;
996 #else
997         struct flowi4 fl = { .daddr = mutable->key.daddr,
998                              .saddr = mutable->key.saddr,
999                              .flowi4_tos = tos,
1000                              .flowi4_proto = ipproto };
1001
1002         return ip_route_output_key(&init_net, &fl);
1003 #endif
1004 }
1005
1006 static struct rtable *find_route(struct vport *vport,
1007                                  const struct tnl_mutable_config *mutable,
1008                                  u8 tos, struct tnl_cache **cache)
1009 {
1010         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1011         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1012
1013         *cache = NULL;
1014         tos = RT_TOS(tos);
1015
1016         if (likely(tos == mutable->tos &&
1017             check_cache_valid(cur_cache, mutable))) {
1018                 *cache = cur_cache;
1019                 return cur_cache->rt;
1020         } else {
1021                 struct rtable *rt;
1022
1023                 rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
1024                 if (IS_ERR(rt))
1025                         return NULL;
1026
1027                 if (likely(tos == mutable->tos))
1028                         *cache = build_cache(vport, mutable, rt);
1029
1030                 return rt;
1031         }
1032 }
1033
1034 static bool need_linearize(const struct sk_buff *skb)
1035 {
1036         int i;
1037
1038         if (unlikely(skb_shinfo(skb)->frag_list))
1039                 return true;
1040
1041         /*
1042          * Generally speaking we should linearize if there are paged frags.
1043          * However, if all of the refcounts are 1 we know nobody else can
1044          * change them from underneath us and we can skip the linearization.
1045          */
1046         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1047                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1048                         return true;
1049
1050         return false;
1051 }
1052
1053 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1054                                        const struct tnl_mutable_config *mutable,
1055                                        const struct rtable *rt)
1056 {
1057         int min_headroom;
1058         int err;
1059
1060         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1061                         + mutable->tunnel_hlen
1062                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1063
1064         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1065                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1066                                                 skb_headroom(skb) +
1067                                                 16);
1068                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1069                                         0, GFP_ATOMIC);
1070                 if (unlikely(err))
1071                         goto error_free;
1072         }
1073
1074         forward_ip_summed(skb, true);
1075
1076         if (skb_is_gso(skb)) {
1077                 struct sk_buff *nskb;
1078
1079                 nskb = skb_gso_segment(skb, 0);
1080                 if (IS_ERR(nskb)) {
1081                         kfree_skb(skb);
1082                         err = PTR_ERR(nskb);
1083                         goto error;
1084                 }
1085
1086                 consume_skb(skb);
1087                 skb = nskb;
1088         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1089                 /* Pages aren't locked and could change at any time.
1090                  * If this happens after we compute the checksum, the
1091                  * checksum will be wrong.  We linearize now to avoid
1092                  * this problem.
1093                  */
1094                 if (unlikely(need_linearize(skb))) {
1095                         err = __skb_linearize(skb);
1096                         if (unlikely(err))
1097                                 goto error_free;
1098                 }
1099
1100                 err = skb_checksum_help(skb);
1101                 if (unlikely(err))
1102                         goto error_free;
1103         }
1104
1105         set_ip_summed(skb, OVS_CSUM_NONE);
1106
1107         return skb;
1108
1109 error_free:
1110         kfree_skb(skb);
1111 error:
1112         return ERR_PTR(err);
1113 }
1114
1115 static int send_frags(struct sk_buff *skb,
1116                       const struct tnl_mutable_config *mutable)
1117 {
1118         int sent_len;
1119
1120         sent_len = 0;
1121         while (skb) {
1122                 struct sk_buff *next = skb->next;
1123                 int frag_len = skb->len - mutable->tunnel_hlen;
1124                 int err;
1125
1126                 skb->next = NULL;
1127                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1128
1129                 err = ip_local_out(skb);
1130                 skb = next;
1131                 if (unlikely(net_xmit_eval(err)))
1132                         goto free_frags;
1133                 sent_len += frag_len;
1134         }
1135
1136         return sent_len;
1137
1138 free_frags:
1139         /*
1140          * There's no point in continuing to send fragments once one has been
1141          * dropped so just free the rest.  This may help improve the congestion
1142          * that caused the first packet to be dropped.
1143          */
1144         tnl_free_linked_skbs(skb);
1145         return sent_len;
1146 }
1147
1148 int tnl_send(struct vport *vport, struct sk_buff *skb)
1149 {
1150         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1151         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1152
1153         enum vport_err_type err = VPORT_E_TX_ERROR;
1154         struct rtable *rt;
1155         struct dst_entry *unattached_dst = NULL;
1156         struct tnl_cache *cache;
1157         int sent_len = 0;
1158         __be16 frag_off = 0;
1159         u8 ttl;
1160         u8 inner_tos;
1161         u8 tos;
1162
1163         /* Validate the protocol headers before we try to use them. */
1164         if (skb->protocol == htons(ETH_P_8021Q) &&
1165             !vlan_tx_tag_present(skb)) {
1166                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1167                         goto error_free;
1168
1169                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1170                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1171         }
1172
1173         if (skb->protocol == htons(ETH_P_IP)) {
1174                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1175                     + sizeof(struct iphdr))))
1176                         skb->protocol = 0;
1177         }
1178 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1179         else if (skb->protocol == htons(ETH_P_IPV6)) {
1180                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1181                     + sizeof(struct ipv6hdr))))
1182                         skb->protocol = 0;
1183         }
1184 #endif
1185
1186         /* ToS */
1187         if (skb->protocol == htons(ETH_P_IP))
1188                 inner_tos = ip_hdr(skb)->tos;
1189 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1190         else if (skb->protocol == htons(ETH_P_IPV6))
1191                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1192 #endif
1193         else
1194                 inner_tos = 0;
1195
1196         if (mutable->flags & TNL_F_TOS_INHERIT)
1197                 tos = inner_tos;
1198         else
1199                 tos = mutable->tos;
1200
1201         tos = INET_ECN_encapsulate(tos, inner_tos);
1202
1203         /* Route lookup */
1204         rt = find_route(vport, mutable, tos, &cache);
1205         if (unlikely(!rt))
1206                 goto error_free;
1207         if (unlikely(!cache))
1208                 unattached_dst = &rt_dst(rt);
1209
1210         /* Reset SKB */
1211         nf_reset(skb);
1212         secpath_reset(skb);
1213         skb_dst_drop(skb);
1214         skb_clear_rxhash(skb);
1215
1216         /* Offloading */
1217         skb = handle_offloads(skb, mutable, rt);
1218         if (IS_ERR(skb))
1219                 goto error;
1220
1221         /* MTU */
1222         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1223                 err = VPORT_E_TX_DROPPED;
1224                 goto error_free;
1225         }
1226
1227         /*
1228          * If we are over the MTU, allow the IP stack to handle fragmentation.
1229          * Fragmentation is a slow path anyways.
1230          */
1231         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1232                      cache)) {
1233                 unattached_dst = &rt_dst(rt);
1234                 dst_hold(unattached_dst);
1235                 cache = NULL;
1236         }
1237
1238         /* TTL */
1239         ttl = mutable->ttl;
1240         if (!ttl)
1241                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1242
1243         if (mutable->flags & TNL_F_TTL_INHERIT) {
1244                 if (skb->protocol == htons(ETH_P_IP))
1245                         ttl = ip_hdr(skb)->ttl;
1246 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1247                 else if (skb->protocol == htons(ETH_P_IPV6))
1248                         ttl = ipv6_hdr(skb)->hop_limit;
1249 #endif
1250         }
1251
1252         while (skb) {
1253                 struct iphdr *iph;
1254                 struct sk_buff *next_skb = skb->next;
1255                 skb->next = NULL;
1256
1257                 if (unlikely(vlan_deaccel_tag(skb)))
1258                         goto next;
1259
1260                 if (likely(cache)) {
1261                         skb_push(skb, cache->len);
1262                         memcpy(skb->data, get_cached_header(cache), cache->len);
1263                         skb_reset_mac_header(skb);
1264                         skb_set_network_header(skb, cache->hh_len);
1265
1266                 } else {
1267                         skb_push(skb, mutable->tunnel_hlen);
1268                         create_tunnel_header(vport, mutable, rt, skb->data);
1269                         skb_reset_network_header(skb);
1270
1271                         if (next_skb)
1272                                 skb_dst_set(skb, dst_clone(unattached_dst));
1273                         else {
1274                                 skb_dst_set(skb, unattached_dst);
1275                                 unattached_dst = NULL;
1276                         }
1277                 }
1278                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1279
1280                 iph = ip_hdr(skb);
1281                 iph->tos = tos;
1282                 iph->ttl = ttl;
1283                 iph->frag_off = frag_off;
1284                 ip_select_ident(iph, &rt_dst(rt), NULL);
1285
1286                 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1287                                                         &rt_dst(rt), skb);
1288                 if (unlikely(!skb))
1289                         goto next;
1290
1291                 if (likely(cache)) {
1292                         int orig_len = skb->len - cache->len;
1293                         struct vport *cache_vport;
1294
1295                         cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1296                         skb->protocol = htons(ETH_P_IP);
1297                         iph = ip_hdr(skb);
1298                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1299                         ip_send_check(iph);
1300
1301                         if (cache_vport) {
1302                                 if (unlikely(compute_ip_summed(skb, true))) {
1303                                         kfree_skb(skb);
1304                                         goto next;
1305                                 }
1306
1307                                 OVS_CB(skb)->flow = cache->flow;
1308                                 vport_receive(cache_vport, skb);
1309                                 sent_len += orig_len;
1310                         } else {
1311                                 int xmit_err;
1312
1313                                 skb->dev = rt_dst(rt).dev;
1314                                 xmit_err = dev_queue_xmit(skb);
1315
1316                                 if (likely(net_xmit_eval(xmit_err) == 0))
1317                                         sent_len += orig_len;
1318                         }
1319                 } else
1320                         sent_len += send_frags(skb, mutable);
1321
1322 next:
1323                 skb = next_skb;
1324         }
1325
1326         if (unlikely(sent_len == 0))
1327                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1328
1329         goto out;
1330
1331 error_free:
1332         tnl_free_linked_skbs(skb);
1333 error:
1334         vport_record_error(vport, err);
1335 out:
1336         dst_release(unattached_dst);
1337         return sent_len;
1338 }
1339
1340 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1341         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1342         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1343         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1344         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1345         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1346         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1347         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1348 };
1349
1350 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1351  * zeroed. */
1352 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1353                           const struct vport *cur_vport,
1354                           struct tnl_mutable_config *mutable)
1355 {
1356         const struct vport *old_vport;
1357         const struct tnl_mutable_config *old_mutable;
1358         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1359         int err;
1360
1361         if (!options)
1362                 return -EINVAL;
1363
1364         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1365         if (err)
1366                 return err;
1367
1368         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1369                 return -EINVAL;
1370
1371         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1372
1373         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1374         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1375                 if (ipv4_is_multicast(mutable->key.daddr))
1376                         return -EINVAL;
1377                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1378         }
1379
1380         if (a[OVS_TUNNEL_ATTR_TOS]) {
1381                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1382                 if (mutable->tos != RT_TOS(mutable->tos))
1383                         return -EINVAL;
1384         }
1385
1386         if (a[OVS_TUNNEL_ATTR_TTL])
1387                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1388
1389         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1390         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1391                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1392                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1393         } else {
1394                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1395                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1396         }
1397
1398         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1399                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1400         else
1401                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1402
1403         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1404         if (mutable->tunnel_hlen < 0)
1405                 return mutable->tunnel_hlen;
1406
1407         mutable->tunnel_hlen += sizeof(struct iphdr);
1408
1409         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1410         if (old_vport && old_vport != cur_vport)
1411                 return -EEXIST;
1412
1413         mutable->mlink = 0;
1414         if (ipv4_is_multicast(mutable->key.daddr)) {
1415                 struct net_device *dev;
1416                 struct rtable *rt;
1417
1418                 rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
1419                 if (IS_ERR(rt))
1420                         return -EADDRNOTAVAIL;
1421                 dev = rt_dst(rt).dev;
1422                 ip_rt_put(rt);
1423                 if (__in_dev_get_rtnl(dev) == NULL)
1424                         return -EADDRNOTAVAIL;
1425                 mutable->mlink = dev->ifindex;
1426                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1427         }
1428
1429         return 0;
1430 }
1431
1432 struct vport *tnl_create(const struct vport_parms *parms,
1433                          const struct vport_ops *vport_ops,
1434                          const struct tnl_ops *tnl_ops)
1435 {
1436         struct vport *vport;
1437         struct tnl_vport *tnl_vport;
1438         struct tnl_mutable_config *mutable;
1439         int initial_frag_id;
1440         int err;
1441
1442         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1443         if (IS_ERR(vport)) {
1444                 err = PTR_ERR(vport);
1445                 goto error;
1446         }
1447
1448         tnl_vport = tnl_vport_priv(vport);
1449
1450         strcpy(tnl_vport->name, parms->name);
1451         tnl_vport->tnl_ops = tnl_ops;
1452
1453         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1454         if (!mutable) {
1455                 err = -ENOMEM;
1456                 goto error_free_vport;
1457         }
1458
1459         random_ether_addr(mutable->eth_addr);
1460
1461         get_random_bytes(&initial_frag_id, sizeof(int));
1462         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1463
1464         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1465         if (err)
1466                 goto error_free_mutable;
1467
1468         spin_lock_init(&tnl_vport->cache_lock);
1469
1470 #ifdef NEED_CACHE_TIMEOUT
1471         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1472                                        (net_random() % (MAX_CACHE_EXP / 2));
1473 #endif
1474
1475         rcu_assign_pointer(tnl_vport->mutable, mutable);
1476
1477         port_table_add_port(vport);
1478         return vport;
1479
1480 error_free_mutable:
1481         free_mutable_rtnl(mutable);
1482         kfree(mutable);
1483 error_free_vport:
1484         vport_free(vport);
1485 error:
1486         return ERR_PTR(err);
1487 }
1488
1489 int tnl_set_options(struct vport *vport, struct nlattr *options)
1490 {
1491         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1492         const struct tnl_mutable_config *old_mutable;
1493         struct tnl_mutable_config *mutable;
1494         int err;
1495
1496         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1497         if (!mutable) {
1498                 err = -ENOMEM;
1499                 goto error;
1500         }
1501
1502         /* Copy fields whose values should be retained. */
1503         old_mutable = rtnl_dereference(tnl_vport->mutable);
1504         mutable->seq = old_mutable->seq + 1;
1505         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1506
1507         /* Parse the others configured by userspace. */
1508         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1509         if (err)
1510                 goto error_free;
1511
1512         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1513                 port_table_move_port(vport, mutable);
1514         else
1515                 assign_config_rcu(vport, mutable);
1516
1517         return 0;
1518
1519 error_free:
1520         free_mutable_rtnl(mutable);
1521         kfree(mutable);
1522 error:
1523         return err;
1524 }
1525
1526 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1527 {
1528         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1529         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1530
1531         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1532         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1533
1534         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1535                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1536         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1537                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1538         if (mutable->key.saddr)
1539                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1540         if (mutable->tos)
1541                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1542         if (mutable->ttl)
1543                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1544
1545         return 0;
1546
1547 nla_put_failure:
1548         return -EMSGSIZE;
1549 }
1550
1551 static void free_port_rcu(struct rcu_head *rcu)
1552 {
1553         struct tnl_vport *tnl_vport = container_of(rcu,
1554                                                    struct tnl_vport, rcu);
1555
1556         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1557         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1558         vport_free(tnl_vport_to_vport(tnl_vport));
1559 }
1560
1561 void tnl_destroy(struct vport *vport)
1562 {
1563         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1564         struct tnl_mutable_config *mutable;
1565
1566         mutable = rtnl_dereference(tnl_vport->mutable);
1567         port_table_remove_port(vport);
1568         free_mutable_rtnl(mutable);
1569         call_rcu(&tnl_vport->rcu, free_port_rcu);
1570 }
1571
1572 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1573 {
1574         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1575         struct tnl_mutable_config *old_mutable, *mutable;
1576
1577         old_mutable = rtnl_dereference(tnl_vport->mutable);
1578         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1579         if (!mutable)
1580                 return -ENOMEM;
1581
1582         old_mutable->mlink = 0;
1583
1584         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1585         assign_config_rcu(vport, mutable);
1586
1587         return 0;
1588 }
1589
1590 const char *tnl_get_name(const struct vport *vport)
1591 {
1592         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1593         return tnl_vport->name;
1594 }
1595
1596 const unsigned char *tnl_get_addr(const struct vport *vport)
1597 {
1598         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1599         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1600 }
1601
1602 void tnl_free_linked_skbs(struct sk_buff *skb)
1603 {
1604         while (skb) {
1605                 struct sk_buff *next = skb->next;
1606                 kfree_skb(skb);
1607                 skb = next;
1608         }
1609 }
1610
1611 int tnl_init(void)
1612 {
1613         int i;
1614
1615         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1616                         GFP_KERNEL);
1617         if (!port_table)
1618                 return -ENOMEM;
1619
1620         for (i = 0; i < PORT_TABLE_SIZE; i++)
1621                 INIT_HLIST_HEAD(&port_table[i]);
1622
1623         return 0;
1624 }
1625
1626 void tnl_exit(void)
1627 {
1628         int i;
1629
1630         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1631                 struct tnl_vport *tnl_vport;
1632                 struct hlist_head *hash_head;
1633                 struct hlist_node *n;
1634
1635                 hash_head = &port_table[i];
1636                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1637                         BUG();
1638                         goto out;
1639                 }
1640         }
1641 out:
1642         kfree(port_table);
1643 }