datapath: Update kernel support to 3.2.
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/igmp.h>
14 #include <linux/in.h>
15 #include <linux/in_route.h>
16 #include <linux/inetdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/list.h>
19 #include <linux/kernel.h>
20 #include <linux/version.h>
21 #include <linux/workqueue.h>
22 #include <linux/rculist.h>
23
24 #include <net/dsfield.h>
25 #include <net/dst.h>
26 #include <net/icmp.h>
27 #include <net/inet_ecn.h>
28 #include <net/ip.h>
29 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
30 #include <net/ipv6.h>
31 #endif
32 #include <net/route.h>
33 #include <net/xfrm.h>
34
35 #include "actions.h"
36 #include "checksum.h"
37 #include "datapath.h"
38 #include "tunnel.h"
39 #include "vlan.h"
40 #include "vport.h"
41 #include "vport-generic.h"
42 #include "vport-internal_dev.h"
43
44 #ifdef NEED_CACHE_TIMEOUT
45 /*
46  * On kernels where we can't quickly detect changes in the rest of the system
47  * we use an expiration time to invalidate the cache.  A shorter expiration
48  * reduces the length of time that we may potentially blackhole packets while
49  * a longer time increases performance by reducing the frequency that the
50  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
51  * invalidated before the expiration time but this is the maximum.  The time
52  * is expressed in jiffies.
53  */
54 #define MAX_CACHE_EXP HZ
55 #endif
56
57 /*
58  * Interval to check for and remove caches that are no longer valid.  Caches
59  * are checked for validity before they are used for packet encapsulation and
60  * old caches are removed at that time.  However, if no packets are sent through
61  * the tunnel then the cache will never be destroyed.  Since it holds
62  * references to a number of system objects, the cache will continue to use
63  * system resources by not allowing those objects to be destroyed.  The cache
64  * cleaner is periodically run to free invalid caches.  It does not
65  * significantly affect system performance.  A lower interval will release
66  * resources faster but will itself consume resources by requiring more frequent
67  * checks.  A longer interval may result in messages being printed to the kernel
68  * message buffer about unreleased resources.  The interval is expressed in
69  * jiffies.
70  */
71 #define CACHE_CLEANER_INTERVAL (5 * HZ)
72
73 #define CACHE_DATA_ALIGN 16
74 #define PORT_TABLE_SIZE  1024
75
76 static struct hlist_head *port_table __read_mostly;
77 static int port_table_count;
78
79 static void cache_cleaner(struct work_struct *work);
80 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
81
82 /*
83  * These are just used as an optimization: they don't require any kind of
84  * synchronization because we could have just as easily read the value before
85  * the port change happened.
86  */
87 static unsigned int key_local_remote_ports __read_mostly;
88 static unsigned int key_remote_ports __read_mostly;
89 static unsigned int key_multicast_ports __read_mostly;
90 static unsigned int local_remote_ports __read_mostly;
91 static unsigned int remote_ports __read_mostly;
92 static unsigned int multicast_ports __read_mostly;
93
94 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
95 #define rt_dst(rt) (rt->dst)
96 #else
97 #define rt_dst(rt) (rt->u.dst)
98 #endif
99
100 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
101 static struct hh_cache *rt_hh(struct rtable *rt)
102 {
103         struct neighbour *neigh = dst_get_neighbour(&rt->dst);
104         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
105                         !neigh->hh.hh_len)
106                 return NULL;
107         return &neigh->hh;
108 }
109 #else
110 #define rt_hh(rt) (rt_dst(rt).hh)
111 #endif
112
113 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
114 {
115         return vport_from_priv(tnl_vport);
116 }
117
118 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
119  * cache_lock is held, so it is only for update side code.
120  */
121 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
122 {
123         return rcu_dereference_protected(tnl_vport->cache,
124                                  lockdep_is_held(&tnl_vport->cache_lock));
125 }
126
127 static void schedule_cache_cleaner(void)
128 {
129         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
130 }
131
132 static void free_cache(struct tnl_cache *cache)
133 {
134         if (!cache)
135                 return;
136
137         flow_put(cache->flow);
138         ip_rt_put(cache->rt);
139         kfree(cache);
140 }
141
142 static void free_config_rcu(struct rcu_head *rcu)
143 {
144         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
145         kfree(c);
146 }
147
148 static void free_cache_rcu(struct rcu_head *rcu)
149 {
150         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
151         free_cache(c);
152 }
153
154 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
155  * within an RCU callback.  Fortunately this part doesn't require waiting for
156  * an RCU grace period.
157  */
158 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
159 {
160         ASSERT_RTNL();
161         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
162                 struct in_device *in_dev;
163                 in_dev = inetdev_by_index(&init_net, mutable->mlink);
164                 if (in_dev)
165                         ip_mc_dec_group(in_dev, mutable->key.daddr);
166         }
167 }
168
169 static void assign_config_rcu(struct vport *vport,
170                               struct tnl_mutable_config *new_config)
171 {
172         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
173         struct tnl_mutable_config *old_config;
174
175         old_config = rtnl_dereference(tnl_vport->mutable);
176         rcu_assign_pointer(tnl_vport->mutable, new_config);
177
178         free_mutable_rtnl(old_config);
179         call_rcu(&old_config->rcu, free_config_rcu);
180 }
181
182 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
183 {
184         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
185         struct tnl_cache *old_cache;
186
187         old_cache = cache_dereference(tnl_vport);
188         rcu_assign_pointer(tnl_vport->cache, new_cache);
189
190         if (old_cache)
191                 call_rcu(&old_cache->rcu, free_cache_rcu);
192 }
193
194 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
195 {
196         bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
197
198         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
199                 if (mutable->key.saddr)
200                         return &local_remote_ports;
201                 else if (is_multicast)
202                         return &multicast_ports;
203                 else
204                         return &remote_ports;
205         } else {
206                 if (mutable->key.saddr)
207                         return &key_local_remote_ports;
208                 else if (is_multicast)
209                         return &key_multicast_ports;
210                 else
211                         return &key_remote_ports;
212         }
213 }
214
215 static u32 port_hash(const struct port_lookup_key *key)
216 {
217         return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
218 }
219
220 static struct hlist_head *find_bucket(u32 hash)
221 {
222         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
223 }
224
225 static void port_table_add_port(struct vport *vport)
226 {
227         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
228         const struct tnl_mutable_config *mutable;
229         u32 hash;
230
231         if (port_table_count == 0)
232                 schedule_cache_cleaner();
233
234         mutable = rtnl_dereference(tnl_vport->mutable);
235         hash = port_hash(&mutable->key);
236         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
237         port_table_count++;
238
239         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
240 }
241
242 static void port_table_move_port(struct vport *vport,
243                       struct tnl_mutable_config *new_mutable)
244 {
245         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
246         u32 hash;
247
248         hash = port_hash(&new_mutable->key);
249         hlist_del_init_rcu(&tnl_vport->hash_node);
250         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
251
252         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
253         assign_config_rcu(vport, new_mutable);
254         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
255 }
256
257 static void port_table_remove_port(struct vport *vport)
258 {
259         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
260
261         hlist_del_init_rcu(&tnl_vport->hash_node);
262
263         port_table_count--;
264         if (port_table_count == 0)
265                 cancel_delayed_work_sync(&cache_cleaner_wq);
266
267         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
268 }
269
270 static struct vport *port_table_lookup(struct port_lookup_key *key,
271                                        const struct tnl_mutable_config **pmutable)
272 {
273         struct hlist_node *n;
274         struct hlist_head *bucket;
275         u32 hash = port_hash(key);
276         struct tnl_vport *tnl_vport;
277
278         bucket = find_bucket(hash);
279
280         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
281                 struct tnl_mutable_config *mutable;
282
283                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
284                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
285                         *pmutable = mutable;
286                         return tnl_vport_to_vport(tnl_vport);
287                 }
288         }
289
290         return NULL;
291 }
292
293 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
294                             int tunnel_type,
295                             const struct tnl_mutable_config **mutable)
296 {
297         struct port_lookup_key lookup;
298         struct vport *vport;
299         bool is_multicast = ipv4_is_multicast(saddr);
300
301         lookup.saddr = saddr;
302         lookup.daddr = daddr;
303
304         /* First try for exact match on in_key. */
305         lookup.in_key = key;
306         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
307         if (!is_multicast && key_local_remote_ports) {
308                 vport = port_table_lookup(&lookup, mutable);
309                 if (vport)
310                         return vport;
311         }
312         if (key_remote_ports) {
313                 lookup.saddr = 0;
314                 vport = port_table_lookup(&lookup, mutable);
315                 if (vport)
316                         return vport;
317
318                 lookup.saddr = saddr;
319         }
320
321         /* Then try matches that wildcard in_key. */
322         lookup.in_key = 0;
323         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
324         if (!is_multicast && local_remote_ports) {
325                 vport = port_table_lookup(&lookup, mutable);
326                 if (vport)
327                         return vport;
328         }
329         if (remote_ports) {
330                 lookup.saddr = 0;
331                 vport = port_table_lookup(&lookup, mutable);
332                 if (vport)
333                         return vport;
334         }
335
336         if (is_multicast) {
337                 lookup.saddr = 0;
338                 lookup.daddr = saddr;
339                 if (key_multicast_ports) {
340                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
341                         lookup.in_key = key;
342                         vport = port_table_lookup(&lookup, mutable);
343                         if (vport)
344                                 return vport;
345                 }
346                 if (multicast_ports) {
347                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
348                         lookup.in_key = 0;
349                         vport = port_table_lookup(&lookup, mutable);
350                         if (vport)
351                                 return vport;
352                 }
353         }
354
355         return NULL;
356 }
357
358 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
359 {
360         if (unlikely(INET_ECN_is_ce(tos))) {
361                 __be16 protocol = skb->protocol;
362
363                 skb_set_network_header(skb, ETH_HLEN);
364
365                 if (protocol == htons(ETH_P_8021Q)) {
366                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
367                                 return;
368
369                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
370                         skb_set_network_header(skb, VLAN_ETH_HLEN);
371                 }
372
373                 if (protocol == htons(ETH_P_IP)) {
374                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
375                             + sizeof(struct iphdr))))
376                                 return;
377
378                         IP_ECN_set_ce(ip_hdr(skb));
379                 }
380 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
381                 else if (protocol == htons(ETH_P_IPV6)) {
382                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
383                             + sizeof(struct ipv6hdr))))
384                                 return;
385
386                         IP6_ECN_set_ce(ipv6_hdr(skb));
387                 }
388 #endif
389         }
390 }
391
392 /**
393  *      tnl_rcv - ingress point for generic tunnel code
394  *
395  * @vport: port this packet was received on
396  * @skb: received packet
397  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
398  *
399  * Must be called with rcu_read_lock.
400  *
401  * Packets received by this function are in the following state:
402  * - skb->data points to the inner Ethernet header.
403  * - The inner Ethernet header is in the linear data area.
404  * - skb->csum does not include the inner Ethernet header.
405  * - The layer pointers are undefined.
406  */
407 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
408 {
409         struct ethhdr *eh;
410
411         skb_reset_mac_header(skb);
412         eh = eth_hdr(skb);
413
414         if (likely(ntohs(eh->h_proto) >= 1536))
415                 skb->protocol = eh->h_proto;
416         else
417                 skb->protocol = htons(ETH_P_802_2);
418
419         skb_dst_drop(skb);
420         nf_reset(skb);
421         skb_clear_rxhash(skb);
422         secpath_reset(skb);
423
424         ecn_decapsulate(skb, tos);
425         vlan_set_tci(skb, 0);
426
427         if (unlikely(compute_ip_summed(skb, false))) {
428                 kfree_skb(skb);
429                 return;
430         }
431
432         vport_receive(vport, skb);
433 }
434
435 static bool check_ipv4_address(__be32 addr)
436 {
437         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
438             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
439                 return false;
440
441         return true;
442 }
443
444 static bool ipv4_should_icmp(struct sk_buff *skb)
445 {
446         struct iphdr *old_iph = ip_hdr(skb);
447
448         /* Don't respond to L2 broadcast. */
449         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
450                 return false;
451
452         /* Don't respond to L3 broadcast or invalid addresses. */
453         if (!check_ipv4_address(old_iph->daddr) ||
454             !check_ipv4_address(old_iph->saddr))
455                 return false;
456
457         /* Only respond to the first fragment. */
458         if (old_iph->frag_off & htons(IP_OFFSET))
459                 return false;
460
461         /* Don't respond to ICMP error messages. */
462         if (old_iph->protocol == IPPROTO_ICMP) {
463                 u8 icmp_type, *icmp_typep;
464
465                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
466                                                 (old_iph->ihl << 2) +
467                                                 offsetof(struct icmphdr, type) -
468                                                 skb->data, sizeof(icmp_type),
469                                                 &icmp_type);
470
471                 if (!icmp_typep)
472                         return false;
473
474                 if (*icmp_typep > NR_ICMP_TYPES
475                         || (*icmp_typep <= ICMP_PARAMETERPROB
476                                 && *icmp_typep != ICMP_ECHOREPLY
477                                 && *icmp_typep != ICMP_ECHO))
478                         return false;
479         }
480
481         return true;
482 }
483
484 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
485                             unsigned int mtu, unsigned int payload_length)
486 {
487         struct iphdr *iph, *old_iph = ip_hdr(skb);
488         struct icmphdr *icmph;
489         u8 *payload;
490
491         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
492         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
493         payload = skb_put(nskb, payload_length);
494
495         /* IP */
496         iph->version            =       4;
497         iph->ihl                =       sizeof(struct iphdr) >> 2;
498         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
499                                         IPTOS_PREC_INTERNETCONTROL;
500         iph->tot_len            =       htons(sizeof(struct iphdr)
501                                               + sizeof(struct icmphdr)
502                                               + payload_length);
503         get_random_bytes(&iph->id, sizeof(iph->id));
504         iph->frag_off           =       0;
505         iph->ttl                =       IPDEFTTL;
506         iph->protocol           =       IPPROTO_ICMP;
507         iph->daddr              =       old_iph->saddr;
508         iph->saddr              =       old_iph->daddr;
509
510         ip_send_check(iph);
511
512         /* ICMP */
513         icmph->type             =       ICMP_DEST_UNREACH;
514         icmph->code             =       ICMP_FRAG_NEEDED;
515         icmph->un.gateway       =       htonl(mtu);
516         icmph->checksum         =       0;
517
518         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
519         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
520                                             payload, payload_length,
521                                             nskb->csum);
522         icmph->checksum = csum_fold(nskb->csum);
523 }
524
525 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
526 static bool ipv6_should_icmp(struct sk_buff *skb)
527 {
528         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
529         int addr_type;
530         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
531         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
532
533         /* Check source address is valid. */
534         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
535         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
536                 return false;
537
538         /* Don't reply to unspecified addresses. */
539         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
540                 return false;
541
542         /* Don't respond to ICMP error messages. */
543         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
544         if (payload_off < 0)
545                 return false;
546
547         if (nexthdr == NEXTHDR_ICMP) {
548                 u8 icmp_type, *icmp_typep;
549
550                 icmp_typep = skb_header_pointer(skb, payload_off +
551                                                 offsetof(struct icmp6hdr,
552                                                         icmp6_type),
553                                                 sizeof(icmp_type), &icmp_type);
554
555                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
556                         return false;
557         }
558
559         return true;
560 }
561
562 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
563                             unsigned int mtu, unsigned int payload_length)
564 {
565         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
566         struct icmp6hdr *icmp6h;
567         u8 *payload;
568
569         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
570         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
571         payload = skb_put(nskb, payload_length);
572
573         /* IPv6 */
574         ipv6h->version          =       6;
575         ipv6h->priority         =       0;
576         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
577         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
578                                               + payload_length);
579         ipv6h->nexthdr          =       NEXTHDR_ICMP;
580         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
581         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
582         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
583
584         /* ICMPv6 */
585         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
586         icmp6h->icmp6_code      =       0;
587         icmp6h->icmp6_cksum     =       0;
588         icmp6h->icmp6_mtu       =       htonl(mtu);
589
590         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
591         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
592                                             payload, payload_length,
593                                             nskb->csum);
594         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
595                                                 sizeof(struct icmp6hdr)
596                                                 + payload_length,
597                                                 ipv6h->nexthdr, nskb->csum);
598 }
599 #endif /* IPv6 */
600
601 bool tnl_frag_needed(struct vport *vport,
602                      const struct tnl_mutable_config *mutable,
603                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
604 {
605         unsigned int eth_hdr_len = ETH_HLEN;
606         unsigned int total_length = 0, header_length = 0, payload_length;
607         struct ethhdr *eh, *old_eh = eth_hdr(skb);
608         struct sk_buff *nskb;
609
610         /* Sanity check */
611         if (skb->protocol == htons(ETH_P_IP)) {
612                 if (mtu < IP_MIN_MTU)
613                         return false;
614
615                 if (!ipv4_should_icmp(skb))
616                         return true;
617         }
618 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
619         else if (skb->protocol == htons(ETH_P_IPV6)) {
620                 if (mtu < IPV6_MIN_MTU)
621                         return false;
622
623                 /*
624                  * In theory we should do PMTUD on IPv6 multicast messages but
625                  * we don't have an address to send from so just fragment.
626                  */
627                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
628                         return false;
629
630                 if (!ipv6_should_icmp(skb))
631                         return true;
632         }
633 #endif
634         else
635                 return false;
636
637         /* Allocate */
638         if (old_eh->h_proto == htons(ETH_P_8021Q))
639                 eth_hdr_len = VLAN_ETH_HLEN;
640
641         payload_length = skb->len - eth_hdr_len;
642         if (skb->protocol == htons(ETH_P_IP)) {
643                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
644                 total_length = min_t(unsigned int, header_length +
645                                                    payload_length, 576);
646         }
647 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
648         else {
649                 header_length = sizeof(struct ipv6hdr) +
650                                 sizeof(struct icmp6hdr);
651                 total_length = min_t(unsigned int, header_length +
652                                                   payload_length, IPV6_MIN_MTU);
653         }
654 #endif
655
656         payload_length = total_length - header_length;
657
658         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
659                              payload_length);
660         if (!nskb)
661                 return false;
662
663         skb_reserve(nskb, NET_IP_ALIGN);
664
665         /* Ethernet / VLAN */
666         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
667         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
668         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
669         nskb->protocol = eh->h_proto = old_eh->h_proto;
670         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
671                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
672
673                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
674                 vh->h_vlan_encapsulated_proto = skb->protocol;
675         } else
676                 vlan_set_tci(nskb, vlan_get_tci(skb));
677         skb_reset_mac_header(nskb);
678
679         /* Protocol */
680         if (skb->protocol == htons(ETH_P_IP))
681                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
682 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
683         else
684                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
685 #endif
686
687         /*
688          * Assume that flow based keys are symmetric with respect to input
689          * and output and use the key that we were going to put on the
690          * outgoing packet for the fake received packet.  If the keys are
691          * not symmetric then PMTUD needs to be disabled since we won't have
692          * any way of synthesizing packets.
693          */
694         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
695             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
696                 OVS_CB(nskb)->tun_id = flow_key;
697
698         if (unlikely(compute_ip_summed(nskb, false))) {
699                 kfree_skb(nskb);
700                 return false;
701         }
702
703         vport_receive(vport, nskb);
704
705         return true;
706 }
707
708 static bool check_mtu(struct sk_buff *skb,
709                       struct vport *vport,
710                       const struct tnl_mutable_config *mutable,
711                       const struct rtable *rt, __be16 *frag_offp)
712 {
713         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
714         bool pmtud = mutable->flags & TNL_F_PMTUD;
715         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
716         int mtu = 0;
717         unsigned int packet_length = skb->len - ETH_HLEN;
718
719         /* Allow for one level of tagging in the packet length. */
720         if (!vlan_tx_tag_present(skb) &&
721             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
722                 packet_length -= VLAN_HLEN;
723
724         if (pmtud) {
725                 int vlan_header = 0;
726
727                 /* The tag needs to go in packet regardless of where it
728                  * currently is, so subtract it from the MTU.
729                  */
730                 if (vlan_tx_tag_present(skb) ||
731                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
732                         vlan_header = VLAN_HLEN;
733
734                 mtu = dst_mtu(&rt_dst(rt))
735                         - ETH_HLEN
736                         - mutable->tunnel_hlen
737                         - vlan_header;
738         }
739
740         if (skb->protocol == htons(ETH_P_IP)) {
741                 struct iphdr *iph = ip_hdr(skb);
742
743                 if (df_inherit)
744                         frag_off = iph->frag_off & htons(IP_DF);
745
746                 if (pmtud && iph->frag_off & htons(IP_DF)) {
747                         mtu = max(mtu, IP_MIN_MTU);
748
749                         if (packet_length > mtu &&
750                             tnl_frag_needed(vport, mutable, skb, mtu,
751                                             OVS_CB(skb)->tun_id))
752                                 return false;
753                 }
754         }
755 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
756         else if (skb->protocol == htons(ETH_P_IPV6)) {
757                 /* IPv6 requires end hosts to do fragmentation
758                  * if the packet is above the minimum MTU.
759                  */
760                 if (df_inherit && packet_length > IPV6_MIN_MTU)
761                         frag_off = htons(IP_DF);
762
763                 if (pmtud) {
764                         mtu = max(mtu, IPV6_MIN_MTU);
765
766                         if (packet_length > mtu &&
767                             tnl_frag_needed(vport, mutable, skb, mtu,
768                                             OVS_CB(skb)->tun_id))
769                                 return false;
770                 }
771         }
772 #endif
773
774         *frag_offp = frag_off;
775         return true;
776 }
777
778 static void create_tunnel_header(const struct vport *vport,
779                                  const struct tnl_mutable_config *mutable,
780                                  const struct rtable *rt, void *header)
781 {
782         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
783         struct iphdr *iph = header;
784
785         iph->version    = 4;
786         iph->ihl        = sizeof(struct iphdr) >> 2;
787         iph->frag_off   = htons(IP_DF);
788         iph->protocol   = tnl_vport->tnl_ops->ipproto;
789         iph->tos        = mutable->tos;
790         iph->daddr      = rt->rt_dst;
791         iph->saddr      = rt->rt_src;
792         iph->ttl        = mutable->ttl;
793         if (!iph->ttl)
794                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
795
796         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
797 }
798
799 static void *get_cached_header(const struct tnl_cache *cache)
800 {
801         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
802 }
803
804 static bool check_cache_valid(const struct tnl_cache *cache,
805                               const struct tnl_mutable_config *mutable)
806 {
807         struct hh_cache *hh;
808
809         if (!cache)
810                 return false;
811
812         hh = rt_hh(cache->rt);
813         return hh &&
814 #ifdef NEED_CACHE_TIMEOUT
815                 time_before(jiffies, cache->expiration) &&
816 #endif
817 #ifdef HAVE_RT_GENID
818                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
819 #endif
820 #ifdef HAVE_HH_SEQ
821                 hh->hh_lock.sequence == cache->hh_seq &&
822 #endif
823                 mutable->seq == cache->mutable_seq &&
824                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
825                 (cache->flow && !cache->flow->dead));
826 }
827
828 static void __cache_cleaner(struct tnl_vport *tnl_vport)
829 {
830         const struct tnl_mutable_config *mutable =
831                         rcu_dereference(tnl_vport->mutable);
832         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
833
834         if (cache && !check_cache_valid(cache, mutable) &&
835             spin_trylock_bh(&tnl_vport->cache_lock)) {
836                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
837                 spin_unlock_bh(&tnl_vport->cache_lock);
838         }
839 }
840
841 static void cache_cleaner(struct work_struct *work)
842 {
843         int i;
844
845         schedule_cache_cleaner();
846
847         rcu_read_lock();
848         for (i = 0; i < PORT_TABLE_SIZE; i++) {
849                 struct hlist_node *n;
850                 struct hlist_head *bucket;
851                 struct tnl_vport  *tnl_vport;
852
853                 bucket = &port_table[i];
854                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
855                         __cache_cleaner(tnl_vport);
856         }
857         rcu_read_unlock();
858 }
859
860 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
861 {
862         void *cache_data = get_cached_header(cache);
863         int hh_off;
864
865 #ifdef HAVE_HH_SEQ
866         unsigned hh_seq;
867
868         do {
869                 hh_seq = read_seqbegin(&hh->hh_lock);
870                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
871                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
872                 cache->hh_len = hh->hh_len;
873         } while (read_seqretry(&hh->hh_lock, hh_seq));
874
875         cache->hh_seq = hh_seq;
876 #else
877         read_lock(&hh->hh_lock);
878         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
879         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
880         cache->hh_len = hh->hh_len;
881         read_unlock(&hh->hh_lock);
882 #endif
883 }
884
885 static struct tnl_cache *build_cache(struct vport *vport,
886                                      const struct tnl_mutable_config *mutable,
887                                      struct rtable *rt)
888 {
889         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
890         struct tnl_cache *cache;
891         void *cache_data;
892         int cache_len;
893         struct hh_cache *hh;
894
895         if (!(mutable->flags & TNL_F_HDR_CACHE))
896                 return NULL;
897
898         /*
899          * If there is no entry in the ARP cache or if this device does not
900          * support hard header caching just fall back to the IP stack.
901          */
902
903         hh = rt_hh(rt);
904         if (!hh)
905                 return NULL;
906
907         /*
908          * If lock is contended fall back to directly building the header.
909          * We're not going to help performance by sitting here spinning.
910          */
911         if (!spin_trylock(&tnl_vport->cache_lock))
912                 return NULL;
913
914         cache = cache_dereference(tnl_vport);
915         if (check_cache_valid(cache, mutable))
916                 goto unlock;
917         else
918                 cache = NULL;
919
920         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
921
922         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
923                         cache_len, GFP_ATOMIC);
924         if (!cache)
925                 goto unlock;
926
927         create_eth_hdr(cache, hh);
928         cache_data = get_cached_header(cache) + cache->hh_len;
929         cache->len = cache->hh_len + mutable->tunnel_hlen;
930
931         create_tunnel_header(vport, mutable, rt, cache_data);
932
933         cache->mutable_seq = mutable->seq;
934         cache->rt = rt;
935 #ifdef NEED_CACHE_TIMEOUT
936         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
937 #endif
938
939         if (is_internal_dev(rt_dst(rt).dev)) {
940                 struct sw_flow_key flow_key;
941                 struct vport *dst_vport;
942                 struct sk_buff *skb;
943                 int err;
944                 int flow_key_len;
945                 struct sw_flow *flow;
946
947                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
948                 if (!dst_vport)
949                         goto done;
950
951                 skb = alloc_skb(cache->len, GFP_ATOMIC);
952                 if (!skb)
953                         goto done;
954
955                 __skb_put(skb, cache->len);
956                 memcpy(skb->data, get_cached_header(cache), cache->len);
957
958                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
959                                    &flow_key_len);
960
961                 consume_skb(skb);
962                 if (err)
963                         goto done;
964
965                 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
966                                          &flow_key, flow_key_len);
967                 if (flow) {
968                         cache->flow = flow;
969                         flow_hold(flow);
970                 }
971         }
972
973 done:
974         assign_cache_rcu(vport, cache);
975
976 unlock:
977         spin_unlock(&tnl_vport->cache_lock);
978
979         return cache;
980 }
981
982 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
983                                    u8 ipproto, u8 tos)
984 {
985 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
986         struct flowi fl = { .nl_u = { .ip4_u = {
987                                         .daddr = mutable->key.daddr,
988                                         .saddr = mutable->key.saddr,
989                                         .tos = tos } },
990                             .proto = ipproto };
991         struct rtable *rt;
992
993         if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
994                 return ERR_PTR(-EADDRNOTAVAIL);
995
996         return rt;
997 #else
998         struct flowi4 fl = { .daddr = mutable->key.daddr,
999                              .saddr = mutable->key.saddr,
1000                              .flowi4_tos = tos,
1001                              .flowi4_proto = ipproto };
1002
1003         return ip_route_output_key(&init_net, &fl);
1004 #endif
1005 }
1006
1007 static struct rtable *find_route(struct vport *vport,
1008                                  const struct tnl_mutable_config *mutable,
1009                                  u8 tos, struct tnl_cache **cache)
1010 {
1011         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1012         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1013
1014         *cache = NULL;
1015         tos = RT_TOS(tos);
1016
1017         if (likely(tos == mutable->tos &&
1018             check_cache_valid(cur_cache, mutable))) {
1019                 *cache = cur_cache;
1020                 return cur_cache->rt;
1021         } else {
1022                 struct rtable *rt;
1023
1024                 rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
1025                 if (IS_ERR(rt))
1026                         return NULL;
1027
1028                 if (likely(tos == mutable->tos))
1029                         *cache = build_cache(vport, mutable, rt);
1030
1031                 return rt;
1032         }
1033 }
1034
1035 static bool need_linearize(const struct sk_buff *skb)
1036 {
1037         int i;
1038
1039         if (unlikely(skb_shinfo(skb)->frag_list))
1040                 return true;
1041
1042         /*
1043          * Generally speaking we should linearize if there are paged frags.
1044          * However, if all of the refcounts are 1 we know nobody else can
1045          * change them from underneath us and we can skip the linearization.
1046          */
1047         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1048                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1049                         return true;
1050
1051         return false;
1052 }
1053
1054 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1055                                        const struct tnl_mutable_config *mutable,
1056                                        const struct rtable *rt)
1057 {
1058         int min_headroom;
1059         int err;
1060
1061         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1062                         + mutable->tunnel_hlen
1063                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1064
1065         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1066                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1067                                                 skb_headroom(skb) +
1068                                                 16);
1069                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1070                                         0, GFP_ATOMIC);
1071                 if (unlikely(err))
1072                         goto error_free;
1073         }
1074
1075         forward_ip_summed(skb, true);
1076
1077         if (skb_is_gso(skb)) {
1078                 struct sk_buff *nskb;
1079
1080                 nskb = skb_gso_segment(skb, 0);
1081                 if (IS_ERR(nskb)) {
1082                         kfree_skb(skb);
1083                         err = PTR_ERR(nskb);
1084                         goto error;
1085                 }
1086
1087                 consume_skb(skb);
1088                 skb = nskb;
1089         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1090                 /* Pages aren't locked and could change at any time.
1091                  * If this happens after we compute the checksum, the
1092                  * checksum will be wrong.  We linearize now to avoid
1093                  * this problem.
1094                  */
1095                 if (unlikely(need_linearize(skb))) {
1096                         err = __skb_linearize(skb);
1097                         if (unlikely(err))
1098                                 goto error_free;
1099                 }
1100
1101                 err = skb_checksum_help(skb);
1102                 if (unlikely(err))
1103                         goto error_free;
1104         }
1105
1106         set_ip_summed(skb, OVS_CSUM_NONE);
1107
1108         return skb;
1109
1110 error_free:
1111         kfree_skb(skb);
1112 error:
1113         return ERR_PTR(err);
1114 }
1115
1116 static int send_frags(struct sk_buff *skb,
1117                       const struct tnl_mutable_config *mutable)
1118 {
1119         int sent_len;
1120
1121         sent_len = 0;
1122         while (skb) {
1123                 struct sk_buff *next = skb->next;
1124                 int frag_len = skb->len - mutable->tunnel_hlen;
1125                 int err;
1126
1127                 skb->next = NULL;
1128                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1129
1130                 err = ip_local_out(skb);
1131                 skb = next;
1132                 if (unlikely(net_xmit_eval(err)))
1133                         goto free_frags;
1134                 sent_len += frag_len;
1135         }
1136
1137         return sent_len;
1138
1139 free_frags:
1140         /*
1141          * There's no point in continuing to send fragments once one has been
1142          * dropped so just free the rest.  This may help improve the congestion
1143          * that caused the first packet to be dropped.
1144          */
1145         tnl_free_linked_skbs(skb);
1146         return sent_len;
1147 }
1148
1149 int tnl_send(struct vport *vport, struct sk_buff *skb)
1150 {
1151         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1152         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1153
1154         enum vport_err_type err = VPORT_E_TX_ERROR;
1155         struct rtable *rt;
1156         struct dst_entry *unattached_dst = NULL;
1157         struct tnl_cache *cache;
1158         int sent_len = 0;
1159         __be16 frag_off = 0;
1160         u8 ttl;
1161         u8 inner_tos;
1162         u8 tos;
1163
1164         /* Validate the protocol headers before we try to use them. */
1165         if (skb->protocol == htons(ETH_P_8021Q) &&
1166             !vlan_tx_tag_present(skb)) {
1167                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1168                         goto error_free;
1169
1170                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1171                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1172         }
1173
1174         if (skb->protocol == htons(ETH_P_IP)) {
1175                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1176                     + sizeof(struct iphdr))))
1177                         skb->protocol = 0;
1178         }
1179 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1180         else if (skb->protocol == htons(ETH_P_IPV6)) {
1181                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1182                     + sizeof(struct ipv6hdr))))
1183                         skb->protocol = 0;
1184         }
1185 #endif
1186
1187         /* ToS */
1188         if (skb->protocol == htons(ETH_P_IP))
1189                 inner_tos = ip_hdr(skb)->tos;
1190 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1191         else if (skb->protocol == htons(ETH_P_IPV6))
1192                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1193 #endif
1194         else
1195                 inner_tos = 0;
1196
1197         if (mutable->flags & TNL_F_TOS_INHERIT)
1198                 tos = inner_tos;
1199         else
1200                 tos = mutable->tos;
1201
1202         tos = INET_ECN_encapsulate(tos, inner_tos);
1203
1204         /* Route lookup */
1205         rt = find_route(vport, mutable, tos, &cache);
1206         if (unlikely(!rt))
1207                 goto error_free;
1208         if (unlikely(!cache))
1209                 unattached_dst = &rt_dst(rt);
1210
1211         /* Reset SKB */
1212         nf_reset(skb);
1213         secpath_reset(skb);
1214         skb_dst_drop(skb);
1215         skb_clear_rxhash(skb);
1216
1217         /* Offloading */
1218         skb = handle_offloads(skb, mutable, rt);
1219         if (IS_ERR(skb))
1220                 goto error;
1221
1222         /* MTU */
1223         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1224                 err = VPORT_E_TX_DROPPED;
1225                 goto error_free;
1226         }
1227
1228         /*
1229          * If we are over the MTU, allow the IP stack to handle fragmentation.
1230          * Fragmentation is a slow path anyways.
1231          */
1232         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1233                      cache)) {
1234                 unattached_dst = &rt_dst(rt);
1235                 dst_hold(unattached_dst);
1236                 cache = NULL;
1237         }
1238
1239         /* TTL */
1240         ttl = mutable->ttl;
1241         if (!ttl)
1242                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1243
1244         if (mutable->flags & TNL_F_TTL_INHERIT) {
1245                 if (skb->protocol == htons(ETH_P_IP))
1246                         ttl = ip_hdr(skb)->ttl;
1247 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1248                 else if (skb->protocol == htons(ETH_P_IPV6))
1249                         ttl = ipv6_hdr(skb)->hop_limit;
1250 #endif
1251         }
1252
1253         while (skb) {
1254                 struct iphdr *iph;
1255                 struct sk_buff *next_skb = skb->next;
1256                 skb->next = NULL;
1257
1258                 if (unlikely(vlan_deaccel_tag(skb)))
1259                         goto next;
1260
1261                 if (likely(cache)) {
1262                         skb_push(skb, cache->len);
1263                         memcpy(skb->data, get_cached_header(cache), cache->len);
1264                         skb_reset_mac_header(skb);
1265                         skb_set_network_header(skb, cache->hh_len);
1266
1267                 } else {
1268                         skb_push(skb, mutable->tunnel_hlen);
1269                         create_tunnel_header(vport, mutable, rt, skb->data);
1270                         skb_reset_network_header(skb);
1271
1272                         if (next_skb)
1273                                 skb_dst_set(skb, dst_clone(unattached_dst));
1274                         else {
1275                                 skb_dst_set(skb, unattached_dst);
1276                                 unattached_dst = NULL;
1277                         }
1278                 }
1279                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1280
1281                 iph = ip_hdr(skb);
1282                 iph->tos = tos;
1283                 iph->ttl = ttl;
1284                 iph->frag_off = frag_off;
1285                 ip_select_ident(iph, &rt_dst(rt), NULL);
1286
1287                 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1288                                                         &rt_dst(rt), skb);
1289                 if (unlikely(!skb))
1290                         goto next;
1291
1292                 if (likely(cache)) {
1293                         int orig_len = skb->len - cache->len;
1294                         struct vport *cache_vport;
1295
1296                         cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1297                         skb->protocol = htons(ETH_P_IP);
1298                         iph = ip_hdr(skb);
1299                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1300                         ip_send_check(iph);
1301
1302                         if (cache_vport) {
1303                                 if (unlikely(compute_ip_summed(skb, true))) {
1304                                         kfree_skb(skb);
1305                                         goto next;
1306                                 }
1307
1308                                 OVS_CB(skb)->flow = cache->flow;
1309                                 vport_receive(cache_vport, skb);
1310                                 sent_len += orig_len;
1311                         } else {
1312                                 int xmit_err;
1313
1314                                 skb->dev = rt_dst(rt).dev;
1315                                 xmit_err = dev_queue_xmit(skb);
1316
1317                                 if (likely(net_xmit_eval(xmit_err) == 0))
1318                                         sent_len += orig_len;
1319                         }
1320                 } else
1321                         sent_len += send_frags(skb, mutable);
1322
1323 next:
1324                 skb = next_skb;
1325         }
1326
1327         if (unlikely(sent_len == 0))
1328                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1329
1330         goto out;
1331
1332 error_free:
1333         tnl_free_linked_skbs(skb);
1334 error:
1335         vport_record_error(vport, err);
1336 out:
1337         dst_release(unattached_dst);
1338         return sent_len;
1339 }
1340
1341 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1342         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1343         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1344         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1345         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1346         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1347         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1348         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1349 };
1350
1351 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1352  * zeroed. */
1353 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1354                           const struct vport *cur_vport,
1355                           struct tnl_mutable_config *mutable)
1356 {
1357         const struct vport *old_vport;
1358         const struct tnl_mutable_config *old_mutable;
1359         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1360         int err;
1361
1362         if (!options)
1363                 return -EINVAL;
1364
1365         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1366         if (err)
1367                 return err;
1368
1369         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1370                 return -EINVAL;
1371
1372         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1373
1374         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1375         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1376                 if (ipv4_is_multicast(mutable->key.daddr))
1377                         return -EINVAL;
1378                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1379         }
1380
1381         if (a[OVS_TUNNEL_ATTR_TOS]) {
1382                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1383                 if (mutable->tos != RT_TOS(mutable->tos))
1384                         return -EINVAL;
1385         }
1386
1387         if (a[OVS_TUNNEL_ATTR_TTL])
1388                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1389
1390         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1391         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1392                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1393                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1394         } else {
1395                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1396                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1397         }
1398
1399         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1400                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1401         else
1402                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1403
1404         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1405         if (mutable->tunnel_hlen < 0)
1406                 return mutable->tunnel_hlen;
1407
1408         mutable->tunnel_hlen += sizeof(struct iphdr);
1409
1410         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1411         if (old_vport && old_vport != cur_vport)
1412                 return -EEXIST;
1413
1414         mutable->mlink = 0;
1415         if (ipv4_is_multicast(mutable->key.daddr)) {
1416                 struct net_device *dev;
1417                 struct rtable *rt;
1418
1419                 rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
1420                 if (IS_ERR(rt))
1421                         return -EADDRNOTAVAIL;
1422                 dev = rt_dst(rt).dev;
1423                 ip_rt_put(rt);
1424                 if (__in_dev_get_rtnl(dev) == NULL)
1425                         return -EADDRNOTAVAIL;
1426                 mutable->mlink = dev->ifindex;
1427                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1428         }
1429
1430         return 0;
1431 }
1432
1433 struct vport *tnl_create(const struct vport_parms *parms,
1434                          const struct vport_ops *vport_ops,
1435                          const struct tnl_ops *tnl_ops)
1436 {
1437         struct vport *vport;
1438         struct tnl_vport *tnl_vport;
1439         struct tnl_mutable_config *mutable;
1440         int initial_frag_id;
1441         int err;
1442
1443         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1444         if (IS_ERR(vport)) {
1445                 err = PTR_ERR(vport);
1446                 goto error;
1447         }
1448
1449         tnl_vport = tnl_vport_priv(vport);
1450
1451         strcpy(tnl_vport->name, parms->name);
1452         tnl_vport->tnl_ops = tnl_ops;
1453
1454         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1455         if (!mutable) {
1456                 err = -ENOMEM;
1457                 goto error_free_vport;
1458         }
1459
1460         vport_gen_rand_ether_addr(mutable->eth_addr);
1461
1462         get_random_bytes(&initial_frag_id, sizeof(int));
1463         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1464
1465         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1466         if (err)
1467                 goto error_free_mutable;
1468
1469         spin_lock_init(&tnl_vport->cache_lock);
1470
1471 #ifdef NEED_CACHE_TIMEOUT
1472         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1473                                        (net_random() % (MAX_CACHE_EXP / 2));
1474 #endif
1475
1476         rcu_assign_pointer(tnl_vport->mutable, mutable);
1477
1478         port_table_add_port(vport);
1479         return vport;
1480
1481 error_free_mutable:
1482         free_mutable_rtnl(mutable);
1483         kfree(mutable);
1484 error_free_vport:
1485         vport_free(vport);
1486 error:
1487         return ERR_PTR(err);
1488 }
1489
1490 int tnl_set_options(struct vport *vport, struct nlattr *options)
1491 {
1492         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1493         const struct tnl_mutable_config *old_mutable;
1494         struct tnl_mutable_config *mutable;
1495         int err;
1496
1497         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1498         if (!mutable) {
1499                 err = -ENOMEM;
1500                 goto error;
1501         }
1502
1503         /* Copy fields whose values should be retained. */
1504         old_mutable = rtnl_dereference(tnl_vport->mutable);
1505         mutable->seq = old_mutable->seq + 1;
1506         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1507
1508         /* Parse the others configured by userspace. */
1509         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1510         if (err)
1511                 goto error_free;
1512
1513         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1514                 port_table_move_port(vport, mutable);
1515         else
1516                 assign_config_rcu(vport, mutable);
1517
1518         return 0;
1519
1520 error_free:
1521         free_mutable_rtnl(mutable);
1522         kfree(mutable);
1523 error:
1524         return err;
1525 }
1526
1527 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1528 {
1529         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1530         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1531
1532         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1533         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1534
1535         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1536                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1537         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1538                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1539         if (mutable->key.saddr)
1540                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1541         if (mutable->tos)
1542                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1543         if (mutable->ttl)
1544                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1545
1546         return 0;
1547
1548 nla_put_failure:
1549         return -EMSGSIZE;
1550 }
1551
1552 static void free_port_rcu(struct rcu_head *rcu)
1553 {
1554         struct tnl_vport *tnl_vport = container_of(rcu,
1555                                                    struct tnl_vport, rcu);
1556
1557         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1558         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1559         vport_free(tnl_vport_to_vport(tnl_vport));
1560 }
1561
1562 void tnl_destroy(struct vport *vport)
1563 {
1564         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1565         struct tnl_mutable_config *mutable;
1566
1567         mutable = rtnl_dereference(tnl_vport->mutable);
1568         port_table_remove_port(vport);
1569         free_mutable_rtnl(mutable);
1570         call_rcu(&tnl_vport->rcu, free_port_rcu);
1571 }
1572
1573 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1574 {
1575         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1576         struct tnl_mutable_config *old_mutable, *mutable;
1577
1578         old_mutable = rtnl_dereference(tnl_vport->mutable);
1579         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1580         if (!mutable)
1581                 return -ENOMEM;
1582
1583         old_mutable->mlink = 0;
1584
1585         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1586         assign_config_rcu(vport, mutable);
1587
1588         return 0;
1589 }
1590
1591 const char *tnl_get_name(const struct vport *vport)
1592 {
1593         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1594         return tnl_vport->name;
1595 }
1596
1597 const unsigned char *tnl_get_addr(const struct vport *vport)
1598 {
1599         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1600         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1601 }
1602
1603 void tnl_free_linked_skbs(struct sk_buff *skb)
1604 {
1605         while (skb) {
1606                 struct sk_buff *next = skb->next;
1607                 kfree_skb(skb);
1608                 skb = next;
1609         }
1610 }
1611
1612 int tnl_init(void)
1613 {
1614         int i;
1615
1616         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1617                         GFP_KERNEL);
1618         if (!port_table)
1619                 return -ENOMEM;
1620
1621         for (i = 0; i < PORT_TABLE_SIZE; i++)
1622                 INIT_HLIST_HEAD(&port_table[i]);
1623
1624         return 0;
1625 }
1626
1627 void tnl_exit(void)
1628 {
1629         int i;
1630
1631         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1632                 struct tnl_vport *tnl_vport;
1633                 struct hlist_head *hash_head;
1634                 struct hlist_node *n;
1635
1636                 hash_head = &port_table[i];
1637                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1638                         BUG();
1639                         goto out;
1640                 }
1641         }
1642 out:
1643         kfree(port_table);
1644 }