datapath: Add multicast tunnel support.
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include <linux/if_arp.h>
10 #include <linux/if_ether.h>
11 #include <linux/ip.h>
12 #include <linux/if_vlan.h>
13 #include <linux/igmp.h>
14 #include <linux/in.h>
15 #include <linux/in_route.h>
16 #include <linux/inetdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/list.h>
19 #include <linux/kernel.h>
20 #include <linux/version.h>
21 #include <linux/workqueue.h>
22 #include <linux/rculist.h>
23
24 #include <net/dsfield.h>
25 #include <net/dst.h>
26 #include <net/icmp.h>
27 #include <net/inet_ecn.h>
28 #include <net/ip.h>
29 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
30 #include <net/ipv6.h>
31 #endif
32 #include <net/route.h>
33 #include <net/xfrm.h>
34
35 #include "actions.h"
36 #include "checksum.h"
37 #include "datapath.h"
38 #include "tunnel.h"
39 #include "vlan.h"
40 #include "vport.h"
41 #include "vport-generic.h"
42 #include "vport-internal_dev.h"
43
44 #ifdef NEED_CACHE_TIMEOUT
45 /*
46  * On kernels where we can't quickly detect changes in the rest of the system
47  * we use an expiration time to invalidate the cache.  A shorter expiration
48  * reduces the length of time that we may potentially blackhole packets while
49  * a longer time increases performance by reducing the frequency that the
50  * cache needs to be rebuilt.  A variety of factors may cause the cache to be
51  * invalidated before the expiration time but this is the maximum.  The time
52  * is expressed in jiffies.
53  */
54 #define MAX_CACHE_EXP HZ
55 #endif
56
57 /*
58  * Interval to check for and remove caches that are no longer valid.  Caches
59  * are checked for validity before they are used for packet encapsulation and
60  * old caches are removed at that time.  However, if no packets are sent through
61  * the tunnel then the cache will never be destroyed.  Since it holds
62  * references to a number of system objects, the cache will continue to use
63  * system resources by not allowing those objects to be destroyed.  The cache
64  * cleaner is periodically run to free invalid caches.  It does not
65  * significantly affect system performance.  A lower interval will release
66  * resources faster but will itself consume resources by requiring more frequent
67  * checks.  A longer interval may result in messages being printed to the kernel
68  * message buffer about unreleased resources.  The interval is expressed in
69  * jiffies.
70  */
71 #define CACHE_CLEANER_INTERVAL (5 * HZ)
72
73 #define CACHE_DATA_ALIGN 16
74 #define PORT_TABLE_SIZE  1024
75
76 static struct hlist_head *port_table __read_mostly;
77 static int port_table_count;
78
79 static void cache_cleaner(struct work_struct *work);
80 static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
81
82 /*
83  * These are just used as an optimization: they don't require any kind of
84  * synchronization because we could have just as easily read the value before
85  * the port change happened.
86  */
87 static unsigned int key_local_remote_ports __read_mostly;
88 static unsigned int key_remote_ports __read_mostly;
89 static unsigned int local_remote_ports __read_mostly;
90 static unsigned int remote_ports __read_mostly;
91
92 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
93 #define rt_dst(rt) (rt->dst)
94 #else
95 #define rt_dst(rt) (rt->u.dst)
96 #endif
97
98 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
99 static struct hh_cache *rt_hh(struct rtable *rt)
100 {
101         struct neighbour *neigh = dst_get_neighbour(&rt->dst);
102         if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
103                         !neigh->hh.hh_len)
104                 return NULL;
105         return &neigh->hh;
106 }
107 #else
108 #define rt_hh(rt) (rt_dst(rt).hh)
109 #endif
110
111 static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
112 {
113         return vport_from_priv(tnl_vport);
114 }
115
116 /* This is analogous to rtnl_dereference for the tunnel cache.  It checks that
117  * cache_lock is held, so it is only for update side code.
118  */
119 static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
120 {
121         return rcu_dereference_protected(tnl_vport->cache,
122                                          lockdep_is_held(&tnl_vport->cache_lock));
123 }
124
125 static inline void schedule_cache_cleaner(void)
126 {
127         schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
128 }
129
130 static void free_cache(struct tnl_cache *cache)
131 {
132         if (!cache)
133                 return;
134
135         flow_put(cache->flow);
136         ip_rt_put(cache->rt);
137         kfree(cache);
138 }
139
140 static void free_config_rcu(struct rcu_head *rcu)
141 {
142         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
143         kfree(c);
144 }
145
146 static void free_cache_rcu(struct rcu_head *rcu)
147 {
148         struct tnl_cache *c = container_of(rcu, struct tnl_cache, rcu);
149         free_cache(c);
150 }
151
152 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
153  * within an RCU callback.  Fortunately this part doesn't require waiting for
154  * an RCU grace period.
155  */
156 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
157 {
158         ASSERT_RTNL();
159         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
160                 struct in_device *in_dev;
161                 in_dev = inetdev_by_index(&init_net, mutable->mlink);
162                 if (in_dev)
163                         ip_mc_dec_group(in_dev, mutable->key.daddr);
164         }
165 }
166
167 static void assign_config_rcu(struct vport *vport,
168                               struct tnl_mutable_config *new_config)
169 {
170         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
171         struct tnl_mutable_config *old_config;
172
173         old_config = rtnl_dereference(tnl_vport->mutable);
174         rcu_assign_pointer(tnl_vport->mutable, new_config);
175
176         free_mutable_rtnl(old_config);
177         call_rcu(&old_config->rcu, free_config_rcu);
178 }
179
180 static void assign_cache_rcu(struct vport *vport, struct tnl_cache *new_cache)
181 {
182         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
183         struct tnl_cache *old_cache;
184
185         old_cache = cache_dereference(tnl_vport);
186         rcu_assign_pointer(tnl_vport->cache, new_cache);
187
188         if (old_cache)
189                 call_rcu(&old_cache->rcu, free_cache_rcu);
190 }
191
192 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
193 {
194         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
195                 if (mutable->key.saddr)
196                         return &local_remote_ports;
197                 else
198                         return &remote_ports;
199         } else {
200                 if (mutable->key.saddr)
201                         return &key_local_remote_ports;
202                 else
203                         return &key_remote_ports;
204         }
205 }
206
207 static u32 port_hash(const struct port_lookup_key *key)
208 {
209         return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0);
210 }
211
212 static inline struct hlist_head *find_bucket(u32 hash)
213 {
214         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
215 }
216
217 static void port_table_add_port(struct vport *vport)
218 {
219         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
220         const struct tnl_mutable_config *mutable;
221         u32 hash;
222
223         if (port_table_count == 0)
224                 schedule_cache_cleaner();
225
226         mutable = rtnl_dereference(tnl_vport->mutable);
227         hash = port_hash(&mutable->key);
228         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
229         port_table_count++;
230
231         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
232 }
233
234 static void port_table_move_port(struct vport *vport,
235                       struct tnl_mutable_config *new_mutable)
236 {
237         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
238         u32 hash;
239
240         hash = port_hash(&new_mutable->key);
241         hlist_del_init_rcu(&tnl_vport->hash_node);
242         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
243
244         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
245         assign_config_rcu(vport, new_mutable);
246         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
247 }
248
249 static void port_table_remove_port(struct vport *vport)
250 {
251         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
252
253         hlist_del_init_rcu(&tnl_vport->hash_node);
254
255         port_table_count--;
256         if (port_table_count == 0)
257                 cancel_delayed_work_sync(&cache_cleaner_wq);
258
259         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
260 }
261
262 static struct vport *port_table_lookup(struct port_lookup_key *key,
263                                        const struct tnl_mutable_config **pmutable)
264 {
265         struct hlist_node *n;
266         struct hlist_head *bucket;
267         u32 hash = port_hash(key);
268         struct tnl_vport * tnl_vport;
269
270         bucket = find_bucket(hash);
271
272         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
273                 struct tnl_mutable_config *mutable;
274
275                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
276                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
277                         *pmutable = mutable;
278                         return tnl_vport_to_vport(tnl_vport);
279                 }
280         }
281
282         return NULL;
283 }
284
285 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
286                             int tunnel_type,
287                             const struct tnl_mutable_config **mutable)
288 {
289         struct port_lookup_key lookup;
290         struct vport *vport;
291
292         if (ipv4_is_multicast(saddr)) {
293                 lookup.saddr = 0;
294                 lookup.daddr = saddr;
295                 if (key_remote_ports) {
296                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
297                         lookup.in_key = key;
298                         vport = port_table_lookup(&lookup, mutable);
299                         if (vport)
300                                 return vport;
301                 }
302                 if (remote_ports) {
303                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
304                         lookup.in_key = 0;
305                         vport = port_table_lookup(&lookup, mutable);
306                         if (vport)
307                                 return vport;
308                 }
309                 return NULL;
310         }
311
312         lookup.saddr = saddr;
313         lookup.daddr = daddr;
314
315         /* First try for exact match on in_key. */
316         lookup.in_key = key;
317         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
318         if (key_local_remote_ports) {
319                 vport = port_table_lookup(&lookup, mutable);
320                 if (vport)
321                         return vport;
322         }
323         if (key_remote_ports) {
324                 lookup.saddr = 0;
325                 vport = port_table_lookup(&lookup, mutable);
326                 if (vport)
327                         return vport;
328
329                 lookup.saddr = saddr;
330         }
331
332         /* Then try matches that wildcard in_key. */
333         lookup.in_key = 0;
334         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
335         if (local_remote_ports) {
336                 vport = port_table_lookup(&lookup, mutable);
337                 if (vport)
338                         return vport;
339         }
340         if (remote_ports) {
341                 lookup.saddr = 0;
342                 vport = port_table_lookup(&lookup, mutable);
343                 if (vport)
344                         return vport;
345         }
346
347         return NULL;
348 }
349
350 static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
351 {
352         if (unlikely(INET_ECN_is_ce(tos))) {
353                 __be16 protocol = skb->protocol;
354
355                 skb_set_network_header(skb, ETH_HLEN);
356
357                 if (protocol == htons(ETH_P_8021Q)) {
358                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
359                                 return;
360
361                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
362                         skb_set_network_header(skb, VLAN_ETH_HLEN);
363                 }
364
365                 if (protocol == htons(ETH_P_IP)) {
366                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
367                             + sizeof(struct iphdr))))
368                                 return;
369
370                         IP_ECN_set_ce(ip_hdr(skb));
371                 }
372 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
373                 else if (protocol == htons(ETH_P_IPV6)) {
374                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
375                             + sizeof(struct ipv6hdr))))
376                                 return;
377
378                         IP6_ECN_set_ce(ipv6_hdr(skb));
379                 }
380 #endif
381         }
382 }
383
384 /**
385  *      tnl_rcv - ingress point for generic tunnel code
386  *
387  * @vport: port this packet was received on
388  * @skb: received packet
389  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
390  *
391  * Must be called with rcu_read_lock.
392  *
393  * Packets received by this function are in the following state:
394  * - skb->data points to the inner Ethernet header.
395  * - The inner Ethernet header is in the linear data area.
396  * - skb->csum does not include the inner Ethernet header.
397  * - The layer pointers are undefined.
398  */
399 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
400 {
401         struct ethhdr *eh;
402
403         skb_reset_mac_header(skb);
404         eh = eth_hdr(skb);
405
406         if (likely(ntohs(eh->h_proto) >= 1536))
407                 skb->protocol = eh->h_proto;
408         else
409                 skb->protocol = htons(ETH_P_802_2);
410
411         skb_dst_drop(skb);
412         nf_reset(skb);
413         skb_clear_rxhash(skb);
414         secpath_reset(skb);
415
416         ecn_decapsulate(skb, tos);
417         vlan_set_tci(skb, 0);
418
419         if (unlikely(compute_ip_summed(skb, false))) {
420                 kfree_skb(skb);
421                 return;
422         }
423
424         vport_receive(vport, skb);
425 }
426
427 static bool check_ipv4_address(__be32 addr)
428 {
429         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
430             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
431                 return false;
432
433         return true;
434 }
435
436 static bool ipv4_should_icmp(struct sk_buff *skb)
437 {
438         struct iphdr *old_iph = ip_hdr(skb);
439
440         /* Don't respond to L2 broadcast. */
441         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
442                 return false;
443
444         /* Don't respond to L3 broadcast or invalid addresses. */
445         if (!check_ipv4_address(old_iph->daddr) ||
446             !check_ipv4_address(old_iph->saddr))
447                 return false;
448
449         /* Only respond to the first fragment. */
450         if (old_iph->frag_off & htons(IP_OFFSET))
451                 return false;
452
453         /* Don't respond to ICMP error messages. */
454         if (old_iph->protocol == IPPROTO_ICMP) {
455                 u8 icmp_type, *icmp_typep;
456
457                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
458                                                 (old_iph->ihl << 2) +
459                                                 offsetof(struct icmphdr, type) -
460                                                 skb->data, sizeof(icmp_type),
461                                                 &icmp_type);
462
463                 if (!icmp_typep)
464                         return false;
465
466                 if (*icmp_typep > NR_ICMP_TYPES
467                         || (*icmp_typep <= ICMP_PARAMETERPROB
468                                 && *icmp_typep != ICMP_ECHOREPLY
469                                 && *icmp_typep != ICMP_ECHO))
470                         return false;
471         }
472
473         return true;
474 }
475
476 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
477                             unsigned int mtu, unsigned int payload_length)
478 {
479         struct iphdr *iph, *old_iph = ip_hdr(skb);
480         struct icmphdr *icmph;
481         u8 *payload;
482
483         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
484         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
485         payload = skb_put(nskb, payload_length);
486
487         /* IP */
488         iph->version            =       4;
489         iph->ihl                =       sizeof(struct iphdr) >> 2;
490         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
491                                         IPTOS_PREC_INTERNETCONTROL;
492         iph->tot_len            =       htons(sizeof(struct iphdr)
493                                               + sizeof(struct icmphdr)
494                                               + payload_length);
495         get_random_bytes(&iph->id, sizeof(iph->id));
496         iph->frag_off           =       0;
497         iph->ttl                =       IPDEFTTL;
498         iph->protocol           =       IPPROTO_ICMP;
499         iph->daddr              =       old_iph->saddr;
500         iph->saddr              =       old_iph->daddr;
501
502         ip_send_check(iph);
503
504         /* ICMP */
505         icmph->type             =       ICMP_DEST_UNREACH;
506         icmph->code             =       ICMP_FRAG_NEEDED;
507         icmph->un.gateway       =       htonl(mtu);
508         icmph->checksum         =       0;
509
510         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
511         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
512                                             payload, payload_length,
513                                             nskb->csum);
514         icmph->checksum = csum_fold(nskb->csum);
515 }
516
517 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
518 static bool ipv6_should_icmp(struct sk_buff *skb)
519 {
520         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
521         int addr_type;
522         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
523         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
524
525         /* Check source address is valid. */
526         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
527         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
528                 return false;
529
530         /* Don't reply to unspecified addresses. */
531         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
532                 return false;
533
534         /* Don't respond to ICMP error messages. */
535         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr);
536         if (payload_off < 0)
537                 return false;
538
539         if (nexthdr == NEXTHDR_ICMP) {
540                 u8 icmp_type, *icmp_typep;
541
542                 icmp_typep = skb_header_pointer(skb, payload_off +
543                                                 offsetof(struct icmp6hdr,
544                                                         icmp6_type),
545                                                 sizeof(icmp_type), &icmp_type);
546
547                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
548                         return false;
549         }
550
551         return true;
552 }
553
554 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
555                             unsigned int mtu, unsigned int payload_length)
556 {
557         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
558         struct icmp6hdr *icmp6h;
559         u8 *payload;
560
561         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
562         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
563         payload = skb_put(nskb, payload_length);
564
565         /* IPv6 */
566         ipv6h->version          =       6;
567         ipv6h->priority         =       0;
568         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
569         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
570                                               + payload_length);
571         ipv6h->nexthdr          =       NEXTHDR_ICMP;
572         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
573         ipv6_addr_copy(&ipv6h->daddr, &old_ipv6h->saddr);
574         ipv6_addr_copy(&ipv6h->saddr, &old_ipv6h->daddr);
575
576         /* ICMPv6 */
577         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
578         icmp6h->icmp6_code      =       0;
579         icmp6h->icmp6_cksum     =       0;
580         icmp6h->icmp6_mtu       =       htonl(mtu);
581
582         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
583         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
584                                             payload, payload_length,
585                                             nskb->csum);
586         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
587                                                 sizeof(struct icmp6hdr)
588                                                 + payload_length,
589                                                 ipv6h->nexthdr, nskb->csum);
590 }
591 #endif /* IPv6 */
592
593 bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
594                      struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
595 {
596         unsigned int eth_hdr_len = ETH_HLEN;
597         unsigned int total_length = 0, header_length = 0, payload_length;
598         struct ethhdr *eh, *old_eh = eth_hdr(skb);
599         struct sk_buff *nskb;
600
601         /* Sanity check */
602         if (skb->protocol == htons(ETH_P_IP)) {
603                 if (mtu < IP_MIN_MTU)
604                         return false;
605
606                 if (!ipv4_should_icmp(skb))
607                         return true;
608         }
609 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
610         else if (skb->protocol == htons(ETH_P_IPV6)) {
611                 if (mtu < IPV6_MIN_MTU)
612                         return false;
613
614                 /*
615                  * In theory we should do PMTUD on IPv6 multicast messages but
616                  * we don't have an address to send from so just fragment.
617                  */
618                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
619                         return false;
620
621                 if (!ipv6_should_icmp(skb))
622                         return true;
623         }
624 #endif
625         else
626                 return false;
627
628         /* Allocate */
629         if (old_eh->h_proto == htons(ETH_P_8021Q))
630                 eth_hdr_len = VLAN_ETH_HLEN;
631
632         payload_length = skb->len - eth_hdr_len;
633         if (skb->protocol == htons(ETH_P_IP)) {
634                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
635                 total_length = min_t(unsigned int, header_length +
636                                                    payload_length, 576);
637         }
638 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
639         else {
640                 header_length = sizeof(struct ipv6hdr) +
641                                 sizeof(struct icmp6hdr);
642                 total_length = min_t(unsigned int, header_length +
643                                                   payload_length, IPV6_MIN_MTU);
644         }
645 #endif
646
647         payload_length = total_length - header_length;
648
649         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
650                              payload_length);
651         if (!nskb)
652                 return false;
653
654         skb_reserve(nskb, NET_IP_ALIGN);
655
656         /* Ethernet / VLAN */
657         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
658         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
659         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
660         nskb->protocol = eh->h_proto = old_eh->h_proto;
661         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
662                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
663
664                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
665                 vh->h_vlan_encapsulated_proto = skb->protocol;
666         } else
667                 vlan_set_tci(nskb, vlan_get_tci(skb));
668         skb_reset_mac_header(nskb);
669
670         /* Protocol */
671         if (skb->protocol == htons(ETH_P_IP))
672                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
673 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
674         else
675                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
676 #endif
677
678         /*
679          * Assume that flow based keys are symmetric with respect to input
680          * and output and use the key that we were going to put on the
681          * outgoing packet for the fake received packet.  If the keys are
682          * not symmetric then PMTUD needs to be disabled since we won't have
683          * any way of synthesizing packets.
684          */
685         if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
686             (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
687                 OVS_CB(nskb)->tun_id = flow_key;
688
689         if (unlikely(compute_ip_summed(nskb, false))) {
690                 kfree_skb(nskb);
691                 return false;
692         }
693
694         vport_receive(vport, nskb);
695
696         return true;
697 }
698
699 static bool check_mtu(struct sk_buff *skb,
700                       struct vport *vport,
701                       const struct tnl_mutable_config *mutable,
702                       const struct rtable *rt, __be16 *frag_offp)
703 {
704         bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
705         bool pmtud = mutable->flags & TNL_F_PMTUD;
706         __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
707         int mtu = 0;
708         unsigned int packet_length = skb->len - ETH_HLEN;
709
710         /* Allow for one level of tagging in the packet length. */
711         if (!vlan_tx_tag_present(skb) &&
712             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
713                 packet_length -= VLAN_HLEN;
714
715         if (pmtud) {
716                 int vlan_header = 0;
717
718                 /* The tag needs to go in packet regardless of where it
719                  * currently is, so subtract it from the MTU.
720                  */
721                 if (vlan_tx_tag_present(skb) ||
722                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
723                         vlan_header = VLAN_HLEN;
724
725                 mtu = dst_mtu(&rt_dst(rt))
726                         - ETH_HLEN
727                         - mutable->tunnel_hlen
728                         - vlan_header;
729         }
730
731         if (skb->protocol == htons(ETH_P_IP)) {
732                 struct iphdr *iph = ip_hdr(skb);
733
734                 if (df_inherit)
735                         frag_off = iph->frag_off & htons(IP_DF);
736
737                 if (pmtud && iph->frag_off & htons(IP_DF)) {
738                         mtu = max(mtu, IP_MIN_MTU);
739
740                         if (packet_length > mtu &&
741                             tnl_frag_needed(vport, mutable, skb, mtu,
742                                             OVS_CB(skb)->tun_id))
743                                 return false;
744                 }
745         }
746 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
747         else if (skb->protocol == htons(ETH_P_IPV6)) {
748                 /* IPv6 requires end hosts to do fragmentation
749                  * if the packet is above the minimum MTU.
750                  */
751                 if (df_inherit && packet_length > IPV6_MIN_MTU)
752                         frag_off = htons(IP_DF);
753
754                 if (pmtud) {
755                         mtu = max(mtu, IPV6_MIN_MTU);
756
757                         if (packet_length > mtu &&
758                             tnl_frag_needed(vport, mutable, skb, mtu,
759                                             OVS_CB(skb)->tun_id))
760                                 return false;
761                 }
762         }
763 #endif
764
765         *frag_offp = frag_off;
766         return true;
767 }
768
769 static void create_tunnel_header(const struct vport *vport,
770                                  const struct tnl_mutable_config *mutable,
771                                  const struct rtable *rt, void *header)
772 {
773         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
774         struct iphdr *iph = header;
775
776         iph->version    = 4;
777         iph->ihl        = sizeof(struct iphdr) >> 2;
778         iph->frag_off   = htons(IP_DF);
779         iph->protocol   = tnl_vport->tnl_ops->ipproto;
780         iph->tos        = mutable->tos;
781         iph->daddr      = rt->rt_dst;
782         iph->saddr      = rt->rt_src;
783         iph->ttl        = mutable->ttl;
784         if (!iph->ttl)
785                 iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
786
787         tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
788 }
789
790 static inline void *get_cached_header(const struct tnl_cache *cache)
791 {
792         return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
793 }
794
795 static inline bool check_cache_valid(const struct tnl_cache *cache,
796                                      const struct tnl_mutable_config *mutable)
797 {
798         struct hh_cache *hh;
799
800         if (!cache)
801                 return false;
802
803         hh = rt_hh(cache->rt);
804         return hh &&
805 #ifdef NEED_CACHE_TIMEOUT
806                 time_before(jiffies, cache->expiration) &&
807 #endif
808 #ifdef HAVE_RT_GENID
809                 atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
810 #endif
811 #ifdef HAVE_HH_SEQ
812                 hh->hh_lock.sequence == cache->hh_seq &&
813 #endif
814                 mutable->seq == cache->mutable_seq &&
815                 (!is_internal_dev(rt_dst(cache->rt).dev) ||
816                 (cache->flow && !cache->flow->dead));
817 }
818
819 static void __cache_cleaner(struct tnl_vport *tnl_vport)
820 {
821         const struct tnl_mutable_config *mutable =
822                         rcu_dereference(tnl_vport->mutable);
823         const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
824
825         if (cache && !check_cache_valid(cache, mutable) &&
826             spin_trylock_bh(&tnl_vport->cache_lock)) {
827                 assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
828                 spin_unlock_bh(&tnl_vport->cache_lock);
829         }
830 }
831
832 static void cache_cleaner(struct work_struct *work)
833 {
834         int i;
835
836         schedule_cache_cleaner();
837
838         rcu_read_lock();
839         for (i = 0; i < PORT_TABLE_SIZE; i++) {
840                 struct hlist_node *n;
841                 struct hlist_head *bucket;
842                 struct tnl_vport  *tnl_vport;
843
844                 bucket = &port_table[i];
845                 hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
846                         __cache_cleaner(tnl_vport);
847         }
848         rcu_read_unlock();
849 }
850
851 static inline void create_eth_hdr(struct tnl_cache *cache,
852                                   struct hh_cache *hh)
853 {
854         void *cache_data = get_cached_header(cache);
855         int hh_off;
856
857 #ifdef HAVE_HH_SEQ
858         unsigned hh_seq;
859
860         do {
861                 hh_seq = read_seqbegin(&hh->hh_lock);
862                 hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
863                 memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
864                 cache->hh_len = hh->hh_len;
865         } while (read_seqretry(&hh->hh_lock, hh_seq));
866
867         cache->hh_seq = hh_seq;
868 #else
869         read_lock(&hh->hh_lock);
870         hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
871         memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
872         cache->hh_len = hh->hh_len;
873         read_unlock(&hh->hh_lock);
874 #endif
875 }
876
877 static struct tnl_cache *build_cache(struct vport *vport,
878                                      const struct tnl_mutable_config *mutable,
879                                      struct rtable *rt)
880 {
881         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
882         struct tnl_cache *cache;
883         void *cache_data;
884         int cache_len;
885         struct hh_cache *hh;
886
887         if (!(mutable->flags & TNL_F_HDR_CACHE))
888                 return NULL;
889
890         /*
891          * If there is no entry in the ARP cache or if this device does not
892          * support hard header caching just fall back to the IP stack.
893          */
894
895         hh = rt_hh(rt);
896         if (!hh)
897                 return NULL;
898
899         /*
900          * If lock is contended fall back to directly building the header.
901          * We're not going to help performance by sitting here spinning.
902          */
903         if (!spin_trylock(&tnl_vport->cache_lock))
904                 return NULL;
905
906         cache = cache_dereference(tnl_vport);
907         if (check_cache_valid(cache, mutable))
908                 goto unlock;
909         else
910                 cache = NULL;
911
912         cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
913
914         cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
915                         cache_len, GFP_ATOMIC);
916         if (!cache)
917                 goto unlock;
918
919         create_eth_hdr(cache, hh);
920         cache_data = get_cached_header(cache) + cache->hh_len;
921         cache->len = cache->hh_len + mutable->tunnel_hlen;
922
923         create_tunnel_header(vport, mutable, rt, cache_data);
924
925         cache->mutable_seq = mutable->seq;
926         cache->rt = rt;
927 #ifdef NEED_CACHE_TIMEOUT
928         cache->expiration = jiffies + tnl_vport->cache_exp_interval;
929 #endif
930
931         if (is_internal_dev(rt_dst(rt).dev)) {
932                 struct sw_flow_key flow_key;
933                 struct vport *dst_vport;
934                 struct sk_buff *skb;
935                 int err;
936                 int flow_key_len;
937                 struct sw_flow *flow;
938
939                 dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
940                 if (!dst_vport)
941                         goto done;
942
943                 skb = alloc_skb(cache->len, GFP_ATOMIC);
944                 if (!skb)
945                         goto done;
946
947                 __skb_put(skb, cache->len);
948                 memcpy(skb->data, get_cached_header(cache), cache->len);
949
950                 err = flow_extract(skb, dst_vport->port_no, &flow_key,
951                                    &flow_key_len);
952
953                 consume_skb(skb);
954                 if (err)
955                         goto done;
956
957                 flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
958                                          &flow_key, flow_key_len);
959                 if (flow) {
960                         cache->flow = flow;
961                         flow_hold(flow);
962                 }
963         }
964
965 done:
966         assign_cache_rcu(vport, cache);
967
968 unlock:
969         spin_unlock(&tnl_vport->cache_lock);
970
971         return cache;
972 }
973
974 static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
975                                    u8 ipproto, u8 tos)
976 {
977 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
978         struct flowi fl = { .nl_u = { .ip4_u =
979                                       { .daddr = mutable->key.daddr,
980                                         .saddr = mutable->key.saddr,
981                                         .tos = tos } },
982                             .proto = ipproto };
983         struct rtable *rt;
984
985         if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
986                 return ERR_PTR(-EADDRNOTAVAIL);
987
988         return rt;
989 #else
990         struct flowi4 fl = { .daddr = mutable->key.daddr,
991                              .saddr = mutable->key.saddr,
992                              .flowi4_tos = tos,
993                              .flowi4_proto = ipproto };
994
995         return ip_route_output_key(&init_net, &fl);
996 #endif
997 }
998
999 static struct rtable *find_route(struct vport *vport,
1000                                  const struct tnl_mutable_config *mutable,
1001                                  u8 tos, struct tnl_cache **cache)
1002 {
1003         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1004         struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
1005
1006         *cache = NULL;
1007         tos = RT_TOS(tos);
1008
1009         if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
1010                 *cache = cur_cache;
1011                 return cur_cache->rt;
1012         } else {
1013                 struct rtable *rt;
1014
1015                 rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
1016                 if (IS_ERR(rt))
1017                         return NULL;
1018
1019                 if (likely(tos == mutable->tos))
1020                         *cache = build_cache(vport, mutable, rt);
1021
1022                 return rt;
1023         }
1024 }
1025
1026 static inline bool need_linearize(const struct sk_buff *skb)
1027 {
1028         int i;
1029
1030         if (unlikely(skb_shinfo(skb)->frag_list))
1031                 return true;
1032
1033         /*
1034          * Generally speaking we should linearize if there are paged frags.
1035          * However, if all of the refcounts are 1 we know nobody else can
1036          * change them from underneath us and we can skip the linearization.
1037          */
1038         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1039                 if (unlikely(page_count(skb_shinfo(skb)->frags[i].page) > 1))
1040                         return true;
1041
1042         return false;
1043 }
1044
1045 static struct sk_buff *handle_offloads(struct sk_buff *skb,
1046                                        const struct tnl_mutable_config *mutable,
1047                                        const struct rtable *rt)
1048 {
1049         int min_headroom;
1050         int err;
1051
1052         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
1053                         + mutable->tunnel_hlen
1054                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1055
1056         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
1057                 int head_delta = SKB_DATA_ALIGN(min_headroom -
1058                                                 skb_headroom(skb) +
1059                                                 16);
1060                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
1061                                         0, GFP_ATOMIC);
1062                 if (unlikely(err))
1063                         goto error_free;
1064         }
1065
1066         forward_ip_summed(skb, true);
1067
1068         if (skb_is_gso(skb)) {
1069                 struct sk_buff *nskb;
1070
1071                 nskb = skb_gso_segment(skb, 0);
1072                 if (IS_ERR(nskb)) {
1073                         kfree_skb(skb);
1074                         err = PTR_ERR(nskb);
1075                         goto error;
1076                 }
1077
1078                 consume_skb(skb);
1079                 skb = nskb;
1080         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
1081                 /* Pages aren't locked and could change at any time.
1082                  * If this happens after we compute the checksum, the
1083                  * checksum will be wrong.  We linearize now to avoid
1084                  * this problem.
1085                  */
1086                 if (unlikely(need_linearize(skb))) {
1087                         err = __skb_linearize(skb);
1088                         if (unlikely(err))
1089                                 goto error_free;
1090                 }
1091
1092                 err = skb_checksum_help(skb);
1093                 if (unlikely(err))
1094                         goto error_free;
1095         }
1096
1097         set_ip_summed(skb, OVS_CSUM_NONE);
1098
1099         return skb;
1100
1101 error_free:
1102         kfree_skb(skb);
1103 error:
1104         return ERR_PTR(err);
1105 }
1106
1107 static int send_frags(struct sk_buff *skb,
1108                       const struct tnl_mutable_config *mutable)
1109 {
1110         int sent_len;
1111
1112         sent_len = 0;
1113         while (skb) {
1114                 struct sk_buff *next = skb->next;
1115                 int frag_len = skb->len - mutable->tunnel_hlen;
1116                 int err;
1117
1118                 skb->next = NULL;
1119                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
1120
1121                 err = ip_local_out(skb);
1122                 skb = next;
1123                 if (unlikely(net_xmit_eval(err)))
1124                         goto free_frags;
1125                 sent_len += frag_len;
1126         }
1127
1128         return sent_len;
1129
1130 free_frags:
1131         /*
1132          * There's no point in continuing to send fragments once one has been
1133          * dropped so just free the rest.  This may help improve the congestion
1134          * that caused the first packet to be dropped.
1135          */
1136         tnl_free_linked_skbs(skb);
1137         return sent_len;
1138 }
1139
1140 int tnl_send(struct vport *vport, struct sk_buff *skb)
1141 {
1142         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1143         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
1144
1145         enum vport_err_type err = VPORT_E_TX_ERROR;
1146         struct rtable *rt;
1147         struct dst_entry *unattached_dst = NULL;
1148         struct tnl_cache *cache;
1149         int sent_len = 0;
1150         __be16 frag_off = 0;
1151         u8 ttl;
1152         u8 inner_tos;
1153         u8 tos;
1154
1155         /* Validate the protocol headers before we try to use them. */
1156         if (skb->protocol == htons(ETH_P_8021Q) &&
1157             !vlan_tx_tag_present(skb)) {
1158                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1159                         goto error_free;
1160
1161                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1162                 skb_set_network_header(skb, VLAN_ETH_HLEN);
1163         }
1164
1165         if (skb->protocol == htons(ETH_P_IP)) {
1166                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1167                     + sizeof(struct iphdr))))
1168                         skb->protocol = 0;
1169         }
1170 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1171         else if (skb->protocol == htons(ETH_P_IPV6)) {
1172                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
1173                     + sizeof(struct ipv6hdr))))
1174                         skb->protocol = 0;
1175         }
1176 #endif
1177
1178         /* ToS */
1179         if (skb->protocol == htons(ETH_P_IP))
1180                 inner_tos = ip_hdr(skb)->tos;
1181 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1182         else if (skb->protocol == htons(ETH_P_IPV6))
1183                 inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
1184 #endif
1185         else
1186                 inner_tos = 0;
1187
1188         if (mutable->flags & TNL_F_TOS_INHERIT)
1189                 tos = inner_tos;
1190         else
1191                 tos = mutable->tos;
1192
1193         tos = INET_ECN_encapsulate(tos, inner_tos);
1194
1195         /* Route lookup */
1196         rt = find_route(vport, mutable, tos, &cache);
1197         if (unlikely(!rt))
1198                 goto error_free;
1199         if (unlikely(!cache))
1200                 unattached_dst = &rt_dst(rt);
1201
1202         /* Reset SKB */
1203         nf_reset(skb);
1204         secpath_reset(skb);
1205         skb_dst_drop(skb);
1206         skb_clear_rxhash(skb);
1207
1208         /* Offloading */
1209         skb = handle_offloads(skb, mutable, rt);
1210         if (IS_ERR(skb))
1211                 goto error;
1212
1213         /* MTU */
1214         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off))) {
1215                 err = VPORT_E_TX_DROPPED;
1216                 goto error_free;
1217         }
1218
1219         /*
1220          * If we are over the MTU, allow the IP stack to handle fragmentation.
1221          * Fragmentation is a slow path anyways.
1222          */
1223         if (unlikely(skb->len + mutable->tunnel_hlen > dst_mtu(&rt_dst(rt)) &&
1224                      cache)) {
1225                 unattached_dst = &rt_dst(rt);
1226                 dst_hold(unattached_dst);
1227                 cache = NULL;
1228         }
1229
1230         /* TTL */
1231         ttl = mutable->ttl;
1232         if (!ttl)
1233                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
1234
1235         if (mutable->flags & TNL_F_TTL_INHERIT) {
1236                 if (skb->protocol == htons(ETH_P_IP))
1237                         ttl = ip_hdr(skb)->ttl;
1238 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1239                 else if (skb->protocol == htons(ETH_P_IPV6))
1240                         ttl = ipv6_hdr(skb)->hop_limit;
1241 #endif
1242         }
1243
1244         while (skb) {
1245                 struct iphdr *iph;
1246                 struct sk_buff *next_skb = skb->next;
1247                 skb->next = NULL;
1248
1249                 if (unlikely(vlan_deaccel_tag(skb)))
1250                         goto next;
1251
1252                 if (likely(cache)) {
1253                         skb_push(skb, cache->len);
1254                         memcpy(skb->data, get_cached_header(cache), cache->len);
1255                         skb_reset_mac_header(skb);
1256                         skb_set_network_header(skb, cache->hh_len);
1257
1258                 } else {
1259                         skb_push(skb, mutable->tunnel_hlen);
1260                         create_tunnel_header(vport, mutable, rt, skb->data);
1261                         skb_reset_network_header(skb);
1262
1263                         if (next_skb)
1264                                 skb_dst_set(skb, dst_clone(unattached_dst));
1265                         else {
1266                                 skb_dst_set(skb, unattached_dst);
1267                                 unattached_dst = NULL;
1268                         }
1269                 }
1270                 skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
1271
1272                 iph = ip_hdr(skb);
1273                 iph->tos = tos;
1274                 iph->ttl = ttl;
1275                 iph->frag_off = frag_off;
1276                 ip_select_ident(iph, &rt_dst(rt), NULL);
1277
1278                 skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
1279                 if (unlikely(!skb))
1280                         goto next;
1281
1282                 if (likely(cache)) {
1283                         int orig_len = skb->len - cache->len;
1284                         struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
1285
1286                         skb->protocol = htons(ETH_P_IP);
1287                         iph = ip_hdr(skb);
1288                         iph->tot_len = htons(skb->len - skb_network_offset(skb));
1289                         ip_send_check(iph);
1290
1291                         if (cache_vport) {
1292                                 if (unlikely(compute_ip_summed(skb, true))) {
1293                                         kfree_skb(skb);
1294                                         goto next;
1295                                 }
1296
1297                                 OVS_CB(skb)->flow = cache->flow;
1298                                 vport_receive(cache_vport, skb);
1299                                 sent_len += orig_len;
1300                         } else {
1301                                 int xmit_err;
1302
1303                                 skb->dev = rt_dst(rt).dev;
1304                                 xmit_err = dev_queue_xmit(skb);
1305
1306                                 if (likely(net_xmit_eval(xmit_err) == 0))
1307                                         sent_len += orig_len;
1308                         }
1309                 } else
1310                         sent_len += send_frags(skb, mutable);
1311
1312 next:
1313                 skb = next_skb;
1314         }
1315
1316         if (unlikely(sent_len == 0))
1317                 vport_record_error(vport, VPORT_E_TX_DROPPED);
1318
1319         goto out;
1320
1321 error_free:
1322         tnl_free_linked_skbs(skb);
1323 error:
1324         vport_record_error(vport, err);
1325 out:
1326         dst_release(unattached_dst);
1327         return sent_len;
1328 }
1329
1330 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1331         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1332         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1333         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1334         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1335         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1336         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1337         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1338 };
1339
1340 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
1341 static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
1342                           const struct vport *cur_vport,
1343                           struct tnl_mutable_config *mutable)
1344 {
1345         const struct vport *old_vport;
1346         const struct tnl_mutable_config *old_mutable;
1347         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1348         int err;
1349
1350         if (!options)
1351                 return -EINVAL;
1352
1353         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1354         if (err)
1355                 return err;
1356
1357         if (!a[OVS_TUNNEL_ATTR_FLAGS] || !a[OVS_TUNNEL_ATTR_DST_IPV4])
1358                 return -EINVAL;
1359
1360         mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
1361
1362         mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1363         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1364                 if (ipv4_is_multicast(mutable->key.daddr))
1365                         return -EINVAL;
1366                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1367         }
1368
1369         if (a[OVS_TUNNEL_ATTR_TOS]) {
1370                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1371                 if (mutable->tos != RT_TOS(mutable->tos))
1372                         return -EINVAL;
1373         }
1374
1375         if (a[OVS_TUNNEL_ATTR_TTL])
1376                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1377
1378         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1379         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1380                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1381                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1382         } else {
1383                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1384                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1385         }
1386
1387         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1388                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1389         else
1390                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1391
1392         mutable->tunnel_hlen = tnl_ops->hdr_len(mutable);
1393         if (mutable->tunnel_hlen < 0)
1394                 return mutable->tunnel_hlen;
1395
1396         mutable->tunnel_hlen += sizeof(struct iphdr);
1397
1398         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1399         if (old_vport && old_vport != cur_vport)
1400                 return -EEXIST;
1401
1402         mutable->mlink = 0;
1403         if (ipv4_is_multicast(mutable->key.daddr)) {
1404                 struct net_device *dev;
1405                 struct rtable *rt;
1406
1407                 rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
1408                 if (IS_ERR(rt))
1409                         return -EADDRNOTAVAIL;
1410                 dev = rt_dst(rt).dev;
1411                 ip_rt_put(rt);
1412                 if (__in_dev_get_rtnl(dev) == NULL)
1413                         return -EADDRNOTAVAIL;
1414                 mutable->mlink = dev->ifindex;
1415                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1416         }
1417
1418         return 0;
1419 }
1420
1421 struct vport *tnl_create(const struct vport_parms *parms,
1422                          const struct vport_ops *vport_ops,
1423                          const struct tnl_ops *tnl_ops)
1424 {
1425         struct vport *vport;
1426         struct tnl_vport *tnl_vport;
1427         struct tnl_mutable_config *mutable;
1428         int initial_frag_id;
1429         int err;
1430
1431         vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1432         if (IS_ERR(vport)) {
1433                 err = PTR_ERR(vport);
1434                 goto error;
1435         }
1436
1437         tnl_vport = tnl_vport_priv(vport);
1438
1439         strcpy(tnl_vport->name, parms->name);
1440         tnl_vport->tnl_ops = tnl_ops;
1441
1442         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1443         if (!mutable) {
1444                 err = -ENOMEM;
1445                 goto error_free_vport;
1446         }
1447
1448         vport_gen_rand_ether_addr(mutable->eth_addr);
1449
1450         get_random_bytes(&initial_frag_id, sizeof(int));
1451         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1452
1453         err = tnl_set_config(parms->options, tnl_ops, NULL, mutable);
1454         if (err)
1455                 goto error_free_mutable;
1456
1457         spin_lock_init(&tnl_vport->cache_lock);
1458
1459 #ifdef NEED_CACHE_TIMEOUT
1460         tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
1461                                        (net_random() % (MAX_CACHE_EXP / 2));
1462 #endif
1463
1464         rcu_assign_pointer(tnl_vport->mutable, mutable);
1465
1466         port_table_add_port(vport);
1467         return vport;
1468
1469 error_free_mutable:
1470         free_mutable_rtnl(mutable);
1471         kfree(mutable);
1472 error_free_vport:
1473         vport_free(vport);
1474 error:
1475         return ERR_PTR(err);
1476 }
1477
1478 int tnl_set_options(struct vport *vport, struct nlattr *options)
1479 {
1480         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1481         const struct tnl_mutable_config *old_mutable;
1482         struct tnl_mutable_config *mutable;
1483         int err;
1484
1485         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1486         if (!mutable) {
1487                 err = -ENOMEM;
1488                 goto error;
1489         }
1490
1491         /* Copy fields whose values should be retained. */
1492         old_mutable = rtnl_dereference(tnl_vport->mutable);
1493         mutable->seq = old_mutable->seq + 1;
1494         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1495
1496         /* Parse the others configured by userspace. */
1497         err = tnl_set_config(options, tnl_vport->tnl_ops, vport, mutable);
1498         if (err)
1499                 goto error_free;
1500
1501         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1502                 port_table_move_port(vport, mutable);
1503         else
1504                 assign_config_rcu(vport, mutable);
1505
1506         return 0;
1507
1508 error_free:
1509         free_mutable_rtnl(mutable);
1510         kfree(mutable);
1511 error:
1512         return err;
1513 }
1514
1515 int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1516 {
1517         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1518         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1519
1520         NLA_PUT_U32(skb, OVS_TUNNEL_ATTR_FLAGS, mutable->flags & TNL_F_PUBLIC);
1521         NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr);
1522
1523         if (!(mutable->flags & TNL_F_IN_KEY_MATCH))
1524                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key);
1525         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION))
1526                 NLA_PUT_BE64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key);
1527         if (mutable->key.saddr)
1528                 NLA_PUT_BE32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr);
1529         if (mutable->tos)
1530                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos);
1531         if (mutable->ttl)
1532                 NLA_PUT_U8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl);
1533
1534         return 0;
1535
1536 nla_put_failure:
1537         return -EMSGSIZE;
1538 }
1539
1540 static void free_port_rcu(struct rcu_head *rcu)
1541 {
1542         struct tnl_vport *tnl_vport = container_of(rcu,
1543                                                    struct tnl_vport, rcu);
1544
1545         free_cache((struct tnl_cache __force *)tnl_vport->cache);
1546         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1547         vport_free(tnl_vport_to_vport(tnl_vport));
1548 }
1549
1550 void tnl_destroy(struct vport *vport)
1551 {
1552         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1553         struct tnl_mutable_config *mutable;
1554
1555         mutable = rtnl_dereference(tnl_vport->mutable);
1556         port_table_remove_port(vport);
1557         free_mutable_rtnl(mutable);
1558         call_rcu(&tnl_vport->rcu, free_port_rcu);
1559 }
1560
1561 int tnl_set_addr(struct vport *vport, const unsigned char *addr)
1562 {
1563         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1564         struct tnl_mutable_config *old_mutable, *mutable;
1565
1566         old_mutable = rtnl_dereference(tnl_vport->mutable);
1567         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1568         if (!mutable)
1569                 return -ENOMEM;
1570
1571         old_mutable->mlink = 0;
1572
1573         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1574         assign_config_rcu(vport, mutable);
1575
1576         return 0;
1577 }
1578
1579 const char *tnl_get_name(const struct vport *vport)
1580 {
1581         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1582         return tnl_vport->name;
1583 }
1584
1585 const unsigned char *tnl_get_addr(const struct vport *vport)
1586 {
1587         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1588         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1589 }
1590
1591 void tnl_free_linked_skbs(struct sk_buff *skb)
1592 {
1593         while (skb) {
1594                 struct sk_buff *next = skb->next;
1595                 kfree_skb(skb);
1596                 skb = next;
1597         }
1598 }
1599
1600 int tnl_init(void)
1601 {
1602         int i;
1603
1604         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1605                         GFP_KERNEL);
1606         if (!port_table)
1607                 return -ENOMEM;
1608
1609         for (i = 0; i < PORT_TABLE_SIZE; i++)
1610                 INIT_HLIST_HEAD(&port_table[i]);
1611
1612         return 0;
1613 }
1614
1615 void tnl_exit(void)
1616 {
1617         int i;
1618
1619         for (i = 0; i < PORT_TABLE_SIZE; i++) {
1620                 struct tnl_vport * tnl_vport;
1621                 struct hlist_head *hash_head;
1622                 struct hlist_node *n;
1623
1624                 hash_head = &port_table[i];
1625                 hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
1626                         BUG();
1627                         goto out;
1628                 }
1629         }
1630 out:
1631         kfree(port_table);
1632 }