meta-flow: Correctly set destination MAC in mf_set_flow_value().
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2011 Nicira Networks.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #include <linux/if_arp.h>
20 #include <linux/if_ether.h>
21 #include <linux/ip.h>
22 #include <linux/if_vlan.h>
23 #include <linux/igmp.h>
24 #include <linux/in.h>
25 #include <linux/in_route.h>
26 #include <linux/inetdevice.h>
27 #include <linux/jhash.h>
28 #include <linux/list.h>
29 #include <linux/kernel.h>
30 #include <linux/version.h>
31 #include <linux/workqueue.h>
32 #include <linux/rculist.h>
33
34 #include <net/dsfield.h>
35 #include <net/dst.h>
36 #include <net/icmp.h>
37 #include <net/inet_ecn.h>
38 #include <net/ip.h>
39 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40 #include <net/ipv6.h>
41 #endif
42 #include <net/route.h>
43 #include <net/xfrm.h>
44
45 #include "checksum.h"
46 #include "datapath.h"
47 #include "tunnel.h"
48 #include "vlan.h"
49 #include "vport.h"
50 #include "vport-generic.h"
51 #include "vport-internal_dev.h"
52
53 #ifdef NEED_CACHE_TIMEOUT
54 /*
55  * On kernels where we can't quickly detect changes in the rest of the system
56  * we use an expiration time to invalidate the cache.  A shorter expiration
57  * reduces the length of time that we may potentially blackhole packets while
58  * a longer time increases performance by reducing the frequency that the
59  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
60  * invalidated before the expiration time but this is the maximum.  The time
61  * is expressed in jiffies.
62  */
63 #define MAX_CACHE_EXP HZ
64 #endif
65
66 /*
67  * Interval to check for and remove caches that are no longer valid.  Caches
68  * are checked for validity before they are used for packet encapsulation and
69  * old caches are removed at that time.  However, if no packets are sent through
70  * the tunnel then the cache will never be destroyed.  Since it holds
71  * references to a number of system objects, the cache will continue to use
72  * system resources by not allowing those objects to be destroyed.  The cache
73  * cleaner is periodically run to free invalid caches.  It does not
74  * significantly affect system performance.  A lower interval will release
75  * resources faster but will itself consume resources by requiring more frequent
76  * checks.  A longer interval may result in messages being printed to the kernel
77  * message buffer about unreleased resources.  The interval is expressed in
78  * jiffies.
79  */
80 #define CACHE_CLEANER_INTERVAL (5 * HZ)
81
82 #define CACHE_DATA_ALIGN 16
83 #define PORT_TABLE_SIZE  1024
84
85 static struct hlist_head *port_table __read_mostly;
86 static int port_table_count;
87
88 static void cache_cleaner(struct work_struct *work);
89 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
90
91 /*
92  * These are just used as an optimization: they don't require any kind of
93  * synchronization because we could have just as easily read the value before
94  * the port change happened.
95  */
96 static unsigned int key_local_remote_ports __read_mostly;
97 static unsigned int key_remote_ports __read_mostly;
98 static unsigned int key_multicast_ports __read_mostly;
99 static unsigned int local_remote_ports __read_mostly;
100 static unsigned int remote_ports __read_mostly;
101 static unsigned int multicast_ports __read_mostly;
102
103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
104 #define rt_dst(rt) (rt->dst)
105 #else
106 #define rt_dst(rt) (rt->u.dst)
107 #endif
108
109 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
110 static struct hh_cache *rt_hh(struct rtable *rt)
111 {
112         struct neighbour *neigh = dst_get_neighbour(&rt->dst);
113         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
114                         !neigh->hh.hh_len)
115                 return NULL;
116         return &neigh->hh;
117 }
118 #else
119 #define rt_hh(rt) (rt_dst(rt).hh)
120 #endif
121
122 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
123 {
124         return vport_from_priv(tnl_vport);
125 }
126
127 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
128  * cache_lock is held, so it is only for update side code.
129  */
130 static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
131 {
132         return rcu_dereference_protected(tnl_vport->cache,
133                                  lockdep_is_held(&tnl_vport->cache_lock));
134 }
135
136 static void schedule_cache_cleaner(void)
137 {
138         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
139 }
140
141 static void free_cache(struct tnl_cache *cache)
142 {
143         if (!cache)
144                 return;
145
146         ovs_flow_put(cache->flow);
147         ip_rt_put(cache->rt);
148         kfree(cache);
149 }
150
151 static void free_config_rcu(struct rcu_head *rcu)
152 {
153         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
154         kfree(c);
155 }
156
157 static void free_cache_rcu(struct rcu_head *rcu)
158 {
159         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
160         free_cache(c);
161 }
162
163 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
164  * within an RCU callback.  Fortunately this part doesn't require waiting for
165  * an RCU grace period.
166  */
167 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
168 {
169         ASSERT_RTNL();
170         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
171                 struct in_device *in_dev;
172                 in_dev = inetdev_by_index(&init_net, mutable->mlink);
173                 if (in_dev)
174                         ip_mc_dec_group(in_dev, mutable->key.daddr);
175         }
176 }
177
178 static void assign_config_rcu(struct vport *vport,
179                               struct tnl_mutable_config *new_config)
180 {
181         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
182         struct tnl_mutable_config *old_config;
183
184         old_config = rtnl_dereference(tnl_vport->mutable);
185         rcu_assign_pointer(tnl_vport->mutable, new_config);
186
187         free_mutable_rtnl(old_config);
188         call_rcu(&old_config->rcu, free_config_rcu);
189 }
190
191 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
192 {
193         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
194         struct tnl_cache *old_cache;
195
196         old_cache = cache_dereference(tnl_vport);
197         rcu_assign_pointer(tnl_vport->cache, new_cache);
198
199         if (old_cache)
200                 call_rcu(&old_cache->rcu, free_cache_rcu);
201 }
202
203 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
204 {
205         bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
206
207         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
208                 if (mutable->key.saddr)
209                         return &local_remote_ports;
210                 else if (is_multicast)
211                         return &multicast_ports;
212                 else
213                         return &remote_ports;
214         } else {
215                 if (mutable->key.saddr)
216                         return &key_local_remote_ports;
217                 else if (is_multicast)
218                         return &key_multicast_ports;
219                 else
220                         return &key_remote_ports;
221         }
222 }
223
224 static u32 port_hash(const struct port_lookup_key *key)
225 {
226         return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
227 }
228
229 static struct hlist_head *find_bucket(u32 hash)
230 {
231         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
232 }
233
234 static void port_table_add_port(struct vport *vport)
235 {
236         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
237         const struct tnl_mutable_config *mutable;
238         u32 hash;
239
240         if (port_table_count == 0)
241                 schedule_cache_cleaner();
242
243         mutable = rtnl_dereference(tnl_vport->mutable);
244         hash = port_hash(&mutable->key);
245         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
246         port_table_count++;
247
248         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
249 }
250
251 static void port_table_move_port(struct vport *vport,
252                       struct tnl_mutable_config *new_mutable)
253 {
254         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
255         u32 hash;
256
257         hash = port_hash(&new_mutable->key);
258         hlist_del_init_rcu(&tnl_vport->hash_node);
259         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
260
261         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
262         assign_config_rcu(vport, new_mutable);
263         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
264 }
265
266 static void port_table_remove_port(struct vport *vport)
267 {
268         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
269
270         hlist_del_init_rcu(&tnl_vport->hash_node);
271
272         port_table_count--;
273         if (port_table_count == 0)
274                 cancel_delayed_work_sync(&cache_cleaner_wq);
275
276         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
277 }
278
279 static struct vport *port_table_lookup(struct port_lookup_key *key,
280                                        const struct tnl_mutable_config **pmutable)
281 {
282         struct hlist_node *n;
283         struct hlist_head *bucket;
284         u32 hash = port_hash(key);
285         struct tnl_vport *tnl_vport;
286
287         bucket = find_bucket(hash);
288
289         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
290                 struct tnl_mutable_config *mutable;
291
292                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
293                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
294                         *pmutable = mutable;
295                         return tnl_vport_to_vport(tnl_vport);
296                 }
297         }
298
299         return NULL;
300 }
301
302 struct vport *ovs_tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
303                                 int tunnel_type,
304                                 const struct tnl_mutable_config **mutable)
305 {
306         struct port_lookup_key lookup;
307         struct vport *vport;
308         bool is_multicast = ipv4_is_multicast(saddr);
309
310         lookup.saddr = saddr;
311         lookup.daddr = daddr;
312
313         /* First try for exact match on in_key. */
314         lookup.in_key = key;
315         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
316         if (!is_multicast && key_local_remote_ports) {
317                 vport = port_table_lookup(&lookup, mutable);
318                 if (vport)
319                         return vport;
320         }
321         if (key_remote_ports) {
322                 lookup.saddr = 0;
323                 vport = port_table_lookup(&lookup, mutable);
324                 if (vport)
325                         return vport;
326
327                 lookup.saddr = saddr;
328         }
329
330         /* Then try matches that wildcard in_key. */
331         lookup.in_key = 0;
332         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
333         if (!is_multicast && local_remote_ports) {
334                 vport = port_table_lookup(&lookup, mutable);
335                 if (vport)
336                         return vport;
337         }
338         if (remote_ports) {
339                 lookup.saddr = 0;
340                 vport = port_table_lookup(&lookup, mutable);
341                 if (vport)
342                         return vport;
343         }
344
345         if (is_multicast) {
346                 lookup.saddr = 0;
347                 lookup.daddr = saddr;
348                 if (key_multicast_ports) {
349                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
350                         lookup.in_key = key;
351                         vport = port_table_lookup(&lookup, mutable);
352                         if (vport)
353                                 return vport;
354                 }
355                 if (multicast_ports) {
356                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
357                         lookup.in_key = 0;
358                         vport = port_table_lookup(&lookup, mutable);
359                         if (vport)
360                                 return vport;
361                 }
362         }
363
364         return NULL;
365 }
366
367 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
368 {
369         if (unlikely(INET_ECN_is_ce(tos))) {
370                 __be16 protocol = skb->protocol;
371
372                 skb_set_network_header(skb, ETH_HLEN);
373
374                 if (protocol == htons(ETH_P_8021Q)) {
375                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
376                                 return;
377
378                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
379                         skb_set_network_header(skb, VLAN_ETH_HLEN);
380                 }
381
382                 if (protocol == htons(ETH_P_IP)) {
383                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
384                             + sizeof(struct iphdr))))
385                                 return;
386
387                         IP_ECN_set_ce(ip_hdr(skb));
388                 }
389 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
390                 else if (protocol == htons(ETH_P_IPV6)) {
391                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
392                             + sizeof(struct ipv6hdr))))
393                                 return;
394
395                         IP6_ECN_set_ce(ipv6_hdr(skb));
396                 }
397 #endif
398         }
399 }
400
401 /**
402  *      ovs_tnl_rcv - ingress point for generic tunnel code
403  *
404  * @vport: port this packet was received on
405  * @skb: received packet
406  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
407  *
408  * Must be called with rcu_read_lock.
409  *
410  * Packets received by this function are in the following state:
411  * - skb->data points to the inner Ethernet header.
412  * - The inner Ethernet header is in the linear data area.
413  * - skb->csum does not include the inner Ethernet header.
414  * - The layer pointers are undefined.
415  */
416 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
417 {
418         struct ethhdr *eh;
419
420         skb_reset_mac_header(skb);
421         eh = eth_hdr(skb);
422
423         if (likely(ntohs(eh->h_proto) >= 1536))
424                 skb->protocol = eh->h_proto;
425         else
426                 skb->protocol = htons(ETH_P_802_2);
427
428         skb_dst_drop(skb);
429         nf_reset(skb);
430         skb_clear_rxhash(skb);
431         secpath_reset(skb);
432
433         ecn_decapsulate(skb, tos);
434         vlan_set_tci(skb, 0);
435
436         if (unlikely(compute_ip_summed(skb, false))) {
437                 kfree_skb(skb);
438                 return;
439         }
440
441         ovs_vport_receive(vport, skb);
442 }
443
444 static bool check_ipv4_address(__be32 addr)
445 {
446         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
447             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
448                 return false;
449
450         return true;
451 }
452
453 static bool ipv4_should_icmp(struct sk_buff *skb)
454 {
455         struct iphdr *old_iph = ip_hdr(skb);
456
457         /* Don't respond to L2 broadcast. */
458         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
459                 return false;
460
461         /* Don't respond to L3 broadcast or invalid addresses. */
462         if (!check_ipv4_address(old_iph->daddr) ||
463             !check_ipv4_address(old_iph->saddr))
464                 return false;
465
466         /* Only respond to the first fragment. */
467         if (old_iph->frag_off & htons(IP_OFFSET))
468                 return false;
469
470         /* Don't respond to ICMP error messages. */
471         if (old_iph->protocol == IPPROTO_ICMP) {
472                 u8 icmp_type, *icmp_typep;
473
474                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
475                                                 (old_iph->ihl << 2) +
476                                                 offsetof(struct icmphdr, type) -
477                                                 skb->data, sizeof(icmp_type),
478                                                 &icmp_type);
479
480                 if (!icmp_typep)
481                         return false;
482
483                 if (*icmp_typep > NR_ICMP_TYPES
484                         || (*icmp_typep <= ICMP_PARAMETERPROB
485                                 && *icmp_typep != ICMP_ECHOREPLY
486                                 && *icmp_typep != ICMP_ECHO))
487                         return false;
488         }
489
490         return true;
491 }
492
493 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
494                             unsigned int mtu, unsigned int payload_length)
495 {
496         struct iphdr *iph, *old_iph = ip_hdr(skb);
497         struct icmphdr *icmph;
498         u8 *payload;
499
500         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
501         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
502         payload = skb_put(nskb, payload_length);
503
504         /* IP */
505         iph->version            =       4;
506         iph->ihl                =       sizeof(struct iphdr) >> 2;
507         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
508                                         IPTOS_PREC_INTERNETCONTROL;
509         iph->tot_len            =       htons(sizeof(struct iphdr)
510                                               + sizeof(struct icmphdr)
511                                               + payload_length);
512         get_random_bytes(&iph->id, sizeof(iph->id));
513         iph->frag_off           =       0;
514         iph->ttl                =       IPDEFTTL;
515         iph->protocol           =       IPPROTO_ICMP;
516         iph->daddr              =       old_iph->saddr;
517         iph->saddr              =       old_iph->daddr;
518
519         ip_send_check(iph);
520
521         /* ICMP */
522         icmph->type             =       ICMP_DEST_UNREACH;
523         icmph->code             =       ICMP_FRAG_NEEDED;
524         icmph->un.gateway       =       htonl(mtu);
525         icmph->checksum         =       0;
526
527         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
528         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
529                                             payload, payload_length,
530                                             nskb->csum);
531         icmph->checksum = csum_fold(nskb->csum);
532 }
533
534 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
535 static bool ipv6_should_icmp(struct sk_buff *skb)
536 {
537         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
538         int addr_type;
539         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
540         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
541         __be16 frag_off;
542
543         /* Check source address is valid. */
544         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
545         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
546                 return false;
547
548         /* Don't reply to unspecified addresses. */
549         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
550                 return false;
551
552         /* Don't respond to ICMP error messages. */
553         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
554         if (payload_off < 0)
555                 return false;
556
557         if (nexthdr == NEXTHDR_ICMP) {
558                 u8 icmp_type, *icmp_typep;
559
560                 icmp_typep = skb_header_pointer(skb, payload_off +
561                                                 offsetof(struct icmp6hdr,
562                                                         icmp6_type),
563                                                 sizeof(icmp_type), &icmp_type);
564
565                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
566                         return false;
567         }
568
569         return true;
570 }
571
572 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
573                             unsigned int mtu, unsigned int payload_length)
574 {
575         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
576         struct icmp6hdr *icmp6h;
577         u8 *payload;
578
579         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
580         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
581         payload = skb_put(nskb, payload_length);
582
583         /* IPv6 */
584         ipv6h->version          =       6;
585         ipv6h->priority         =       0;
586         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
587         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
588                                               + payload_length);
589         ipv6h->nexthdr          =       NEXTHDR_ICMP;
590         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
591         ipv6h->daddr            =       old_ipv6h->saddr;
592         ipv6h->saddr            =       old_ipv6h->daddr;
593
594         /* ICMPv6 */
595         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
596         icmp6h->icmp6_code      =       0;
597         icmp6h->icmp6_cksum     =       0;
598         icmp6h->icmp6_mtu       =       htonl(mtu);
599
600         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
601         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
602                                             payload, payload_length,
603                                             nskb->csum);
604         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
605                                                 sizeof(struct icmp6hdr)
606                                                 + payload_length,
607                                                 ipv6h->nexthdr, nskb->csum);
608 }
609 #endif /* IPv6 */
610
611 bool ovs_tnl_frag_needed(struct vport *vport,
612                          const struct tnl_mutable_config *mutable,
613                          struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
614 {
615         unsigned int eth_hdr_len = ETH_HLEN;
616         unsigned int total_length = 0, header_length = 0, payload_length;
617         struct ethhdr *eh, *old_eh = eth_hdr(skb);
618         struct sk_buff *nskb;
619
620         /* Sanity check */
621         if (skb->protocol == htons(ETH_P_IP)) {
622                 if (mtu < IP_MIN_MTU)
623                         return false;
624
625                 if (!ipv4_should_icmp(skb))
626                         return true;
627         }
628 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
629         else if (skb->protocol == htons(ETH_P_IPV6)) {
630                 if (mtu < IPV6_MIN_MTU)
631                         return false;
632
633                 /*
634                  * In theory we should do PMTUD on IPv6 multicast messages but
635                  * we don't have an address to send from so just fragment.
636                  */
637                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
638                         return false;
639
640                 if (!ipv6_should_icmp(skb))
641                         return true;
642         }
643 #endif
644         else
645                 return false;
646
647         /* Allocate */
648         if (old_eh->h_proto == htons(ETH_P_8021Q))
649                 eth_hdr_len = VLAN_ETH_HLEN;
650
651         payload_length = skb->len - eth_hdr_len;
652         if (skb->protocol == htons(ETH_P_IP)) {
653                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
654                 total_length = min_t(unsigned int, header_length +
655                                                    payload_length, 576);
656         }
657 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
658         else {
659                 header_length = sizeof(struct ipv6hdr) +
660                                 sizeof(struct icmp6hdr);
661                 total_length = min_t(unsigned int, header_length +
662                                                   payload_length, IPV6_MIN_MTU);
663         }
664 #endif
665
666         payload_length = total_length - header_length;
667
668         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
669                              payload_length);
670         if (!nskb)
671                 return false;
672
673         skb_reserve(nskb, NET_IP_ALIGN);
674
675         /* Ethernet / VLAN */
676         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
677         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
678         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
679         nskb->protocol = eh->h_proto = old_eh->h_proto;
680         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
681                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
682
683                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
684                 vh->h_vlan_encapsulated_proto = skb->protocol;
685         } else
686                 vlan_set_tci(nskb, vlan_get_tci(skb));
687         skb_reset_mac_header(nskb);
688
689         /* Protocol */
690         if (skb->protocol == htons(ETH_P_IP))
691                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
692 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
693         else
694                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
695 #endif
696
697         /*
698          * Assume that flow based keys are symmetric with respect to input
699          * and output and use the key that we were going to put on the
700          * outgoing packet for the fake received packet.  If the keys are
701          * not symmetric then PMTUD needs to be disabled since we won't have
702          * any way of synthesizing packets.
703          */
704         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
705             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
706                 OVS_CB(nskb)->tun_id = flow_key;
707
708         if (unlikely(compute_ip_summed(nskb, false))) {
709                 kfree_skb(nskb);
710                 return false;
711         }
712
713         ovs_vport_receive(vport, nskb);
714
715         return true;
716 }
717
718 static bool check_mtu(struct sk_buff *skb,
719                       struct vport *vport,
720                       const struct tnl_mutable_config *mutable,
721                       const struct rtable *rt, __be16 *frag_offp)
722 {
723         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
724         bool pmtud = mutable->flags & TNL_F_PMTUD;
725         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
726         int mtu = 0;
727         unsigned int packet_length = skb->len - ETH_HLEN;
728
729         /* Allow for one level of tagging in the packet length. */
730         if (!vlan_tx_tag_present(skb) &&
731             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
732                 packet_length -= VLAN_HLEN;
733
734         if (pmtud) {
735                 int vlan_header = 0;
736
737                 /* The tag needs to go in packet regardless of where it
738                  * currently is, so subtract it from the MTU.
739                  */
740                 if (vlan_tx_tag_present(skb) ||
741                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
742                         vlan_header = VLAN_HLEN;
743
744                 mtu = dst_mtu(&rt_dst(rt))
745                         - ETH_HLEN
746                         - mutable->tunnel_hlen
747                         - vlan_header;
748         }
749
750         if (skb->protocol == htons(ETH_P_IP)) {
751                 struct iphdr *iph = ip_hdr(skb);
752
753                 if (df_inherit)
754                         frag_off = iph->frag_off & htons(IP_DF);
755
756                 if (pmtud && iph->frag_off & htons(IP_DF)) {
757                         mtu = max(mtu, IP_MIN_MTU);
758
759                         if (packet_length > mtu &&
760                             ovs_tnl_frag_needed(vport, mutable, skb, mtu,
761                                                 OVS_CB(skb)->tun_id))
762                                 return false;
763                 }
764         }
765 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
766         else if (skb->protocol == htons(ETH_P_IPV6)) {
767                 /* IPv6 requires end hosts to do fragmentation
768                  * if the packet is above the minimum MTU.
769                  */
770                 if (df_inherit && packet_length > IPV6_MIN_MTU)
771                         frag_off = htons(IP_DF);
772
773                 if (pmtud) {
774                         mtu = max(mtu, IPV6_MIN_MTU);
775
776                         if (packet_length > mtu &&
777                             ovs_tnl_frag_needed(vport, mutable, skb, mtu,
778                                                 OVS_CB(skb)->tun_id))
779                                 return false;
780                 }
781         }
782 #endif
783
784         *frag_offp = frag_off;
785         return true;
786 }
787
788 static void create_tunnel_header(const struct vport *vport,
789                                  const struct tnl_mutable_config *mutable,
790                                  const struct rtable *rt, void *header)
791 {
792         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
793         struct iphdr *iph = header;
794
795         iph->version    = 4;
796         iph->ihl        = sizeof(struct iphdr) >> 2;
797         iph->frag_off   = htons(IP_DF);
798         iph->protocol   = tnl_vport->tnl_ops->ipproto;
799         iph->tos        = mutable->tos;
800         iph->daddr      = rt->rt_dst;
801         iph->saddr      = rt->rt_src;
802         iph->ttl        = mutable->ttl;
803         if (!iph->ttl)
804                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
805
806         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
807 }
808
809 static void *get_cached_header(const struct tnl_cache *cache)
810 {
811         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
812 }
813
814 static bool check_cache_valid(const struct tnl_cache *cache,
815                               const struct tnl_mutable_config *mutable)
816 {
817         struct hh_cache *hh;
818
819         if (!cache)
820                 return false;
821
822         hh = rt_hh(cache->rt);
823         return hh &&
824 #ifdef NEED_CACHE_TIMEOUT
825                 time_before(jiffies, cache->expiration) &&
826 #endif
827 #ifdef HAVE_RT_GENID
828                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
829 #endif
830 #ifdef HAVE_HH_SEQ
831                 hh->hh_lock.sequence == cache->hh_seq &&
832 #endif
833                 mutable->seq == cache->mutable_seq &&
834                 (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
835                 (cache->flow && !cache->flow->dead));
836 }
837
838 static void __cache_cleaner(struct tnl_vport *tnl_vport)
839 {
840         const struct tnl_mutable_config *mutable =
841                         rcu_dereference(tnl_vport->mutable);
842         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
843
844         if (cache && !check_cache_valid(cache, mutable) &&
845             spin_trylock_bh(&tnl_vport->cache_lock)) {
846                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
847                 spin_unlock_bh(&tnl_vport->cache_lock);
848         }
849 }
850
851 static void cache_cleaner(struct work_struct *work)
852 {
853         int i;
854
855         schedule_cache_cleaner();
856
857         rcu_read_lock();
858         for (i = 0; i < PORT_TABLE_SIZE; i++) {
859                 struct hlist_node *n;
860                 struct hlist_head *bucket;
861                 struct tnl_vport  *tnl_vport;
862
863                 bucket = &port_table[i];
864                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
865                         __cache_cleaner(tnl_vport);
866         }
867         rcu_read_unlock();
868 }
869
870 static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
871 {
872         void *cache_data = get_cached_header(cache);
873         int hh_off;
874
875 #ifdef HAVE_HH_SEQ
876         unsigned hh_seq;
877
878         do {
879                 hh_seq = read_seqbegin(&hh->hh_lock);
880                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
881                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
882                 cache->hh_len = hh->hh_len;
883         } while (read_seqretry(&hh->hh_lock, hh_seq));
884
885         cache->hh_seq = hh_seq;
886 #else
887         read_lock(&hh->hh_lock);
888         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
889         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
890         cache->hh_len = hh->hh_len;
891         read_unlock(&hh->hh_lock);
892 #endif
893 }
894
895 static struct tnl_cache *build_cache(struct vport *vport,
896                                      const struct tnl_mutable_config *mutable,
897                                      struct rtable *rt)
898 {
899         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
900         struct tnl_cache *cache;
901         void *cache_data;
902         int cache_len;
903         struct hh_cache *hh;
904
905         if (!(mutable->flags & TNL_F_HDR_CACHE))
906                 return NULL;
907
908         /*
909          * If there is no entry in the ARP cache or if this device does not
910          * support hard header caching just fall back to the IP stack.
911          */
912
913         hh = rt_hh(rt);
914         if (!hh)
915                 return NULL;
916
917         /*
918          * If lock is contended fall back to directly building the header.
919          * We're not going to help performance by sitting here spinning.
920          */
921         if (!spin_trylock(&tnl_vport->cache_lock))
922                 return NULL;
923
924         cache = cache_dereference(tnl_vport);
925         if (check_cache_valid(cache, mutable))
926                 goto unlock;
927         else
928                 cache = NULL;
929
930         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
931
932         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
933                         cache_len, GFP_ATOMIC);
934         if (!cache)
935                 goto unlock;
936
937         create_eth_hdr(cache, hh);
938         cache_data = get_cached_header(cache) + cache->hh_len;
939         cache->len = cache->hh_len + mutable->tunnel_hlen;
940
941         create_tunnel_header(vport, mutable, rt, cache_data);
942
943         cache->mutable_seq = mutable->seq;
944         cache->rt = rt;
945 #ifdef NEED_CACHE_TIMEOUT
946         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
947 #endif
948
949         if (ovs_is_internal_dev(rt_dst(rt).dev)) {
950                 struct sw_flow_key flow_key;
951                 struct vport *dst_vport;
952                 struct sk_buff *skb;
953                 int err;
954                 int flow_key_len;
955                 struct sw_flow *flow;
956
957                 dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
958                 if (!dst_vport)
959                         goto done;
960
961                 skb = alloc_skb(cache->len, GFP_ATOMIC);
962                 if (!skb)
963                         goto done;
964
965                 __skb_put(skb, cache->len);
966                 memcpy(skb->data, get_cached_header(cache), cache->len);
967
968                 err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
969                                        &flow_key_len);
970
971                 consume_skb(skb);
972                 if (err)
973                         goto done;
974
975                 flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
976                                            &flow_key, flow_key_len);
977                 if (flow) {
978                         cache->flow = flow;
979                         ovs_flow_hold(flow);
980                 }
981         }
982
983 done:
984         assign_cache_rcu(vport, cache);
985
986 unlock:
987         spin_unlock(&tnl_vport->cache_lock);
988
989         return cache;
990 }
991
992 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
993                                    u8 ipproto, u8 tos)
994 {
995 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
996         struct flowi fl = { .nl_u = { .ip4_u = {
997                                         .daddr = mutable->key.daddr,
998                                         .saddr = mutable->key.saddr,
999                                         .tos = tos } },
1000                             .proto = ipproto };
1001         struct rtable *rt;
1002
1003         if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
1004                 return ERR_PTR(-EADDRNOTAVAIL);
1005
1006         return rt;
1007 #else
1008         struct flowi4 fl = { .daddr = mutable->key.daddr,
1009                              .saddr = mutable->key.saddr,
1010                              .flowi4_tos = tos,
1011                              .flowi4_proto = ipproto };
1012
1013         return ip_route_output_key(&init_net, &fl);
1014 #endif
1015 }
1016
1017 static struct rtable *find_route(struct vport *vport,
1018                                  const struct tnl_mutable_config *mutable,
1019                                  u8 tos, struct tnl_cache **cache)
1020 {
1021         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1022         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1023
1024         *cache = NULL;
1025         tos = RT_TOS(tos);
1026
1027         if (likely(tos == mutable->tos &&
1028             check_cache_valid(cur_cache, mutable))) {
1029                 *cache = cur_cache;
1030                 return cur_cache->rt;
1031         } else {
1032                 struct rtable *rt;
1033
1034                 rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
1035                 if (IS_ERR(rt))
1036                         return NULL;
1037
1038                 if (likely(tos == mutable->tos))
1039                         *cache = build_cache(vport, mutable, rt);
1040
1041                 return rt;
1042         }
1043 }
1044
1045 static bool need_linearize(const struct sk_buff *skb)
1046 {
1047         int i;
1048
1049         if (unlikely(skb_shinfo(skb)->frag_list))
1050                 return true;
1051
1052         /*
1053          * Generally speaking we should linearize if there are paged frags.
1054          * However, if all of the refcounts are 1 we know nobody else can
1055          * change them from underneath us and we can skip the linearization.
1056          */
1057         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1058                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
1059                         return true;
1060
1061         return false;
1062 }
1063
1064 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1065                                        const struct tnl_mutable_config *mutable,
1066                                        const struct rtable *rt)
1067 {
1068         int min_headroom;
1069         int err;
1070
1071         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1072                         + mutable->tunnel_hlen
1073                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1074
1075         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1076                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1077                                                 skb_headroom(skb) +
1078                                                 16);
1079                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1080                                         0, GFP_ATOMIC);
1081                 if (unlikely(err))
1082                         goto error_free;
1083         }
1084
1085         forward_ip_summed(skb, true);
1086
1087         if (skb_is_gso(skb)) {
1088                 struct sk_buff *nskb;
1089
1090                 nskb = skb_gso_segment(skb, 0);
1091                 if (IS_ERR(nskb)) {
1092                         kfree_skb(skb);
1093                         err = PTR_ERR(nskb);
1094                         goto error;
1095                 }
1096
1097                 consume_skb(skb);
1098                 skb = nskb;
1099         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1100                 /* Pages aren't locked and could change at any time.
1101                  * If this happens after we compute the checksum, the
1102                  * checksum will be wrong.  We linearize now to avoid
1103                  * this problem.
1104                  */
1105                 if (unlikely(need_linearize(skb))) {
1106                         err = __skb_linearize(skb);
1107                         if (unlikely(err))
1108                                 goto error_free;
1109                 }
1110
1111                 err = skb_checksum_help(skb);
1112                 if (unlikely(err))
1113                         goto error_free;
1114         }
1115
1116         set_ip_summed(skb, OVS_CSUM_NONE);
1117
1118         return skb;
1119
1120 error_free:
1121         kfree_skb(skb);
1122 error:
1123         return ERR_PTR(err);
1124 }
1125
1126 static int send_frags(struct sk_buff *skb,
1127                       const struct tnl_mutable_config *mutable)
1128 {
1129         int sent_len;
1130
1131         sent_len = 0;
1132         while (skb) {
1133                 struct sk_buff *next = skb->next;
1134                 int frag_len = skb->len - mutable->tunnel_hlen;
1135                 int err;
1136
1137                 skb->next = NULL;
1138                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1139
1140                 err = ip_local_out(skb);
1141                 skb = next;
1142                 if (unlikely(net_xmit_eval(err)))
1143                         goto free_frags;
1144                 sent_len += frag_len;
1145         }
1146
1147         return sent_len;
1148
1149 free_frags:
1150         /*
1151          * There's no point in continuing to send fragments once one has been
1152          * dropped so just free the rest.  This may help improve the congestion
1153          * that caused the first packet to be dropped.
1154          */
1155         ovs_tnl_free_linked_skbs(skb);
1156         return sent_len;
1157 }
1158
1159 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
1160 {
1161         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1162         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1163
1164         enum vport_err_type err = VPORT_E_TX_ERROR;
1165         struct rtable *rt;
1166         struct dst_entry *unattached_dst = NULL;
1167         struct tnl_cache *cache;
1168         int sent_len = 0;
1169         __be16 frag_off = 0;
1170         u8 ttl;
1171         u8 inner_tos;
1172         u8 tos;
1173
1174         /* Validate the protocol headers before we try to use them. */
1175         if (skb->protocol == htons(ETH_P_8021Q) &&
1176             !vlan_tx_tag_present(skb)) {
1177                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1178                         goto error_free;
1179
1180                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1181                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1182         }
1183
1184         if (skb->protocol == htons(ETH_P_IP)) {
1185                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1186                     + sizeof(struct iphdr))))
1187                         skb->protocol = 0;
1188         }
1189 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1190         else if (skb->protocol == htons(ETH_P_IPV6)) {
1191                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1192                     + sizeof(struct ipv6hdr))))
1193                         skb->protocol = 0;
1194         }
1195 #endif
1196
1197         /* ToS */
1198         if (skb->protocol == htons(ETH_P_IP))
1199                 inner_tos = ip_hdr(skb)->tos;
1200 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1201         else if (skb->protocol == htons(ETH_P_IPV6))
1202                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1203 #endif
1204         else
1205                 inner_tos = 0;
1206
1207         if (mutable->flags & TNL_F_TOS_INHERIT)
1208                 tos = inner_tos;
1209         else
1210                 tos = mutable->tos;
1211
1212         tos = INET_ECN_encapsulate(tos, inner_tos);
1213
1214         /* Route lookup */
1215         rt = find_route(vport, mutable, tos, &cache);
1216         if (unlikely(!rt))
1217                 goto error_free;
1218         if (unlikely(!cache))
1219                 unattached_dst = &rt_dst(rt);
1220
1221         /* Reset SKB */
1222         nf_reset(skb);
1223         secpath_reset(skb);
1224         skb_dst_drop(skb);
1225         skb_clear_rxhash(skb);
1226
1227         /* Offloading */
1228         skb = handle_offloads(skb, mutable, rt);
1229         if (IS_ERR(skb))
1230                 goto error;
1231
1232         /* MTU */
1233         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1234                 err = VPORT_E_TX_DROPPED;
1235                 goto error_free;
1236         }
1237
1238         /*
1239          * If we are over the MTU, allow the IP stack to handle fragmentation.
1240          * Fragmentation is a slow path anyways.
1241          */
1242         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1243                      cache)) {
1244                 unattached_dst = &rt_dst(rt);
1245                 dst_hold(unattached_dst);
1246                 cache = NULL;
1247         }
1248
1249         /* TTL */
1250         ttl = mutable->ttl;
1251         if (!ttl)
1252                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1253
1254         if (mutable->flags & TNL_F_TTL_INHERIT) {
1255                 if (skb->protocol == htons(ETH_P_IP))
1256                         ttl = ip_hdr(skb)->ttl;
1257 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1258                 else if (skb->protocol == htons(ETH_P_IPV6))
1259                         ttl = ipv6_hdr(skb)->hop_limit;
1260 #endif
1261         }
1262
1263         while (skb) {
1264                 struct iphdr *iph;
1265                 struct sk_buff *next_skb = skb->next;
1266                 skb->next = NULL;
1267
1268                 if (unlikely(vlan_deaccel_tag(skb)))
1269                         goto next;
1270
1271                 if (likely(cache)) {
1272                         skb_push(skb, cache->len);
1273                         memcpy(skb->data, get_cached_header(cache), cache->len);
1274                         skb_reset_mac_header(skb);
1275                         skb_set_network_header(skb, cache->hh_len);
1276
1277                 } else {
1278                         skb_push(skb, mutable->tunnel_hlen);
1279                         create_tunnel_header(vport, mutable, rt, skb->data);
1280                         skb_reset_network_header(skb);
1281
1282                         if (next_skb)
1283                                 skb_dst_set(skb, dst_clone(unattached_dst));
1284                         else {
1285                                 skb_dst_set(skb, unattached_dst);
1286                                 unattached_dst = NULL;
1287                         }
1288                 }
1289                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1290
1291                 iph = ip_hdr(skb);
1292                 iph->tos = tos;
1293                 iph->ttl = ttl;
1294                 iph->frag_off = frag_off;
1295                 ip_select_ident(iph, &rt_dst(rt), NULL);
1296
1297                 skb = tnl_vport->tnl_ops->update_header(vport, mutable,
1298                                                         &rt_dst(rt), skb);
1299                 if (unlikely(!skb))
1300                         goto next;
1301
1302                 if (likely(cache)) {
1303                         int orig_len = skb->len - cache->len;
1304                         struct vport *cache_vport;
1305
1306                         cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
1307                         skb->protocol = htons(ETH_P_IP);
1308                         iph = ip_hdr(skb);
1309                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1310                         ip_send_check(iph);
1311
1312                         if (cache_vport) {
1313                                 if (unlikely(compute_ip_summed(skb, true))) {
1314                                         kfree_skb(skb);
1315                                         goto next;
1316                                 }
1317
1318                                 OVS_CB(skb)->flow = cache->flow;
1319                                 ovs_vport_receive(cache_vport, skb);
1320                                 sent_len += orig_len;
1321                         } else {
1322                                 int xmit_err;
1323
1324                                 skb->dev = rt_dst(rt).dev;
1325                                 xmit_err = dev_queue_xmit(skb);
1326
1327                                 if (likely(net_xmit_eval(xmit_err) == 0))
1328                                         sent_len += orig_len;
1329                         }
1330                 } else
1331                         sent_len += send_frags(skb, mutable);
1332
1333 next:
1334                 skb = next_skb;
1335         }
1336
1337         if (unlikely(sent_len == 0))
1338                 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
1339
1340         goto out;
1341
1342 error_free:
1343         ovs_tnl_free_linked_skbs(skb);
1344 error:
1345         ovs_vport_record_error(vport, err);
1346 out:
1347         dst_release(unattached_dst);
1348         return sent_len;
1349 }
1350
1351 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1352         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1353         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1354         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1355         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1356         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1357         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1358         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1359 };
1360
1361 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1362  * zeroed. */
1363 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1364                           const struct vport *cur_vport,
1365                           struct tnl_mutable_config *mutable)
1366 {
1367         const struct vport *old_vport;
1368         const struct tnl_mutable_config *old_mutable;
1369         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1370         int err;
1371
1372         if (!options)
1373                 return -EINVAL;
1374
1375         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1376         if (err)
1377                 return err;
1378
1379         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1380                 return -EINVAL;
1381
1382         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1383
1384         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1385         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1386                 if (ipv4_is_multicast(mutable->key.daddr))
1387                         return -EINVAL;
1388                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1389         }
1390
1391         if (a[OVS_TUNNEL_ATTR_TOS]) {
1392                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1393                 if (mutable->tos != RT_TOS(mutable->tos))
1394                         return -EINVAL;
1395         }
1396
1397         if (a[OVS_TUNNEL_ATTR_TTL])
1398                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1399
1400         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1401         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1402                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1403                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1404         } else {
1405                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1406                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1407         }
1408
1409         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1410                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1411         else
1412                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1413
1414         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1415         if (mutable->tunnel_hlen < 0)
1416                 return mutable->tunnel_hlen;
1417
1418         mutable->tunnel_hlen += sizeof(struct iphdr);
1419
1420         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1421         if (old_vport && old_vport != cur_vport)
1422                 return -EEXIST;
1423
1424         mutable->mlink = 0;
1425         if (ipv4_is_multicast(mutable->key.daddr)) {
1426                 struct net_device *dev;
1427                 struct rtable *rt;
1428
1429                 rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
1430                 if (IS_ERR(rt))
1431                         return -EADDRNOTAVAIL;
1432                 dev = rt_dst(rt).dev;
1433                 ip_rt_put(rt);
1434                 if (__in_dev_get_rtnl(dev) == NULL)
1435                         return -EADDRNOTAVAIL;
1436                 mutable->mlink = dev->ifindex;
1437                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1438         }
1439
1440         return 0;
1441 }
1442
1443 struct vport *ovs_tnl_create(const struct vport_parms *parms,
1444                              const struct vport_ops *vport_ops,
1445                              const struct tnl_ops *tnl_ops)
1446 {
1447         struct vport *vport;
1448         struct tnl_vport *tnl_vport;
1449         struct tnl_mutable_config *mutable;
1450         int initial_frag_id;
1451         int err;
1452
1453         vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1454         if (IS_ERR(vport)) {
1455                 err = PTR_ERR(vport);
1456                 goto error;
1457         }
1458
1459         tnl_vport = tnl_vport_priv(vport);
1460
1461         strcpy(tnl_vport->name, parms->name);
1462         tnl_vport->tnl_ops = tnl_ops;
1463
1464         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1465         if (!mutable) {
1466                 err = -ENOMEM;
1467                 goto error_free_vport;
1468         }
1469
1470         random_ether_addr(mutable->eth_addr);
1471
1472         get_random_bytes(&initial_frag_id, sizeof(int));
1473         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1474
1475         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1476         if (err)
1477                 goto error_free_mutable;
1478
1479         spin_lock_init(&tnl_vport->cache_lock);
1480
1481 #ifdef NEED_CACHE_TIMEOUT
1482         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1483                                        (net_random() % (MAX_CACHE_EXP / 2));
1484 #endif
1485
1486         rcu_assign_pointer(tnl_vport->mutable, mutable);
1487
1488         port_table_add_port(vport);
1489         return vport;
1490
1491 error_free_mutable:
1492         free_mutable_rtnl(mutable);
1493         kfree(mutable);
1494 error_free_vport:
1495         ovs_vport_free(vport);
1496 error:
1497         return ERR_PTR(err);
1498 }
1499
1500 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
1501 {
1502         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1503         const struct tnl_mutable_config *old_mutable;
1504         struct tnl_mutable_config *mutable;
1505         int err;
1506
1507         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1508         if (!mutable) {
1509                 err = -ENOMEM;
1510                 goto error;
1511         }
1512
1513         /* Copy fields whose values should be retained. */
1514         old_mutable = rtnl_dereference(tnl_vport->mutable);
1515         mutable->seq = old_mutable->seq + 1;
1516         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1517
1518         /* Parse the others configured by userspace. */
1519         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1520         if (err)
1521                 goto error_free;
1522
1523         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1524                 port_table_move_port(vport, mutable);
1525         else
1526                 assign_config_rcu(vport, mutable);
1527
1528         return 0;
1529
1530 error_free:
1531         free_mutable_rtnl(mutable);
1532         kfree(mutable);
1533 error:
1534         return err;
1535 }
1536
1537 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1538 {
1539         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1540         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1541
1542         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1543         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1544
1545         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1546                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1547         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1548                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1549         if (mutable->key.saddr)
1550                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1551         if (mutable->tos)
1552                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1553         if (mutable->ttl)
1554                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1555
1556         return 0;
1557
1558 nla_put_failure:
1559         return -EMSGSIZE;
1560 }
1561
1562 static void free_port_rcu(struct rcu_head *rcu)
1563 {
1564         struct tnl_vport *tnl_vport = container_of(rcu,
1565                                                    struct tnl_vport, rcu);
1566
1567         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1568         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1569         ovs_vport_free(tnl_vport_to_vport(tnl_vport));
1570 }
1571
1572 void ovs_tnl_destroy(struct vport *vport)
1573 {
1574         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1575         struct tnl_mutable_config *mutable;
1576
1577         mutable = rtnl_dereference(tnl_vport->mutable);
1578         port_table_remove_port(vport);
1579         free_mutable_rtnl(mutable);
1580         call_rcu(&tnl_vport->rcu, free_port_rcu);
1581 }
1582
1583 int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
1584 {
1585         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1586         struct tnl_mutable_config *old_mutable, *mutable;
1587
1588         old_mutable = rtnl_dereference(tnl_vport->mutable);
1589         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1590         if (!mutable)
1591                 return -ENOMEM;
1592
1593         old_mutable->mlink = 0;
1594
1595         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1596         assign_config_rcu(vport, mutable);
1597
1598         return 0;
1599 }
1600
1601 const char *ovs_tnl_get_name(const struct vport *vport)
1602 {
1603         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1604         return tnl_vport->name;
1605 }
1606
1607 const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
1608 {
1609         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1610         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1611 }
1612
1613 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
1614 {
1615         while (skb) {
1616                 struct sk_buff *next = skb->next;
1617                 kfree_skb(skb);
1618                 skb = next;
1619         }
1620 }
1621
1622 int ovs_tnl_init(void)
1623 {
1624         int i;
1625
1626         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1627                         GFP_KERNEL);
1628         if (!port_table)
1629                 return -ENOMEM;
1630
1631         for (i = 0; i < PORT_TABLE_SIZE; i++)
1632                 INIT_HLIST_HEAD(&port_table[i]);
1633
1634         return 0;
1635 }
1636
1637 void ovs_tnl_exit(void)
1638 {
1639         int i;
1640
1641         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1642                 struct tnl_vport *tnl_vport;
1643                 struct hlist_head *hash_head;
1644                 struct hlist_node *n;
1645
1646                 hash_head = &port_table[i];
1647                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1648                         BUG();
1649                         goto out;
1650                 }
1651         }
1652 out:
1653         kfree(port_table);
1654 }