datapath: Remove support for Don't Fragment inheritance.
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if_arp.h>
22 #include <linux/if_ether.h>
23 #include <linux/ip.h>
24 #include <linux/if_vlan.h>
25 #include <linux/igmp.h>
26 #include <linux/in.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33 #include <linux/workqueue.h>
34 #include <linux/rculist.h>
35
36 #include <net/dsfield.h>
37 #include <net/dst.h>
38 #include <net/icmp.h>
39 #include <net/inet_ecn.h>
40 #include <net/ip.h>
41 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
42 #include <net/ipv6.h>
43 #endif
44 #include <net/route.h>
45 #include <net/xfrm.h>
46
47 #include "checksum.h"
48 #include "datapath.h"
49 #include "tunnel.h"
50 #include "vlan.h"
51 #include "vport.h"
52 #include "vport-internal_dev.h"
53
54 #define PORT_TABLE_SIZE  1024
55
56 static struct hlist_head *port_table __read_mostly;
57
58 /*
59  * These are just used as an optimization: they don't require any kind of
60  * synchronization because we could have just as easily read the value before
61  * the port change happened.
62  */
63 static unsigned int key_local_remote_ports __read_mostly;
64 static unsigned int key_remote_ports __read_mostly;
65 static unsigned int key_multicast_ports __read_mostly;
66 static unsigned int local_remote_ports __read_mostly;
67 static unsigned int remote_ports __read_mostly;
68 static unsigned int null_ports __read_mostly;
69 static unsigned int multicast_ports __read_mostly;
70
71 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
72 #define rt_dst(rt) (rt->dst)
73 #else
74 #define rt_dst(rt) (rt->u.dst)
75 #endif
76
77 static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
78 {
79         return vport_from_priv(tnl_vport);
80 }
81
82 static void free_config_rcu(struct rcu_head *rcu)
83 {
84         struct tnl_mutable_config *c = container_of(rcu, struct tnl_mutable_config, rcu);
85         kfree(c);
86 }
87
88 /* Frees the portion of 'mutable' that requires RTNL and thus can't happen
89  * within an RCU callback.  Fortunately this part doesn't require waiting for
90  * an RCU grace period.
91  */
92 static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
93 {
94         ASSERT_RTNL();
95         if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
96                 struct in_device *in_dev;
97                 in_dev = inetdev_by_index(port_key_get_net(&mutable->key), mutable->mlink);
98                 if (in_dev)
99                         ip_mc_dec_group(in_dev, mutable->key.daddr);
100         }
101 }
102
103 static void assign_config_rcu(struct vport *vport,
104                               struct tnl_mutable_config *new_config)
105 {
106         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
107         struct tnl_mutable_config *old_config;
108
109         old_config = rtnl_dereference(tnl_vport->mutable);
110         rcu_assign_pointer(tnl_vport->mutable, new_config);
111
112         free_mutable_rtnl(old_config);
113         call_rcu(&old_config->rcu, free_config_rcu);
114 }
115
116 static unsigned int *find_port_pool(const struct tnl_mutable_config *mutable)
117 {
118         bool is_multicast = ipv4_is_multicast(mutable->key.daddr);
119
120         if (mutable->flags & TNL_F_IN_KEY_MATCH) {
121                 if (mutable->key.saddr)
122                         return &local_remote_ports;
123                 else if (is_multicast)
124                         return &multicast_ports;
125                 else
126                         return &remote_ports;
127         } else {
128                 if (mutable->key.saddr)
129                         return &key_local_remote_ports;
130                 else if (is_multicast)
131                         return &key_multicast_ports;
132                 else if (mutable->key.daddr)
133                         return &key_remote_ports;
134                 else
135                         return &null_ports;
136         }
137 }
138
139 static u32 port_hash(const struct port_lookup_key *key)
140 {
141         return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
142 }
143
144 static struct hlist_head *find_bucket(u32 hash)
145 {
146         return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
147 }
148
149 static void port_table_add_port(struct vport *vport)
150 {
151         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
152         const struct tnl_mutable_config *mutable;
153         u32 hash;
154
155         mutable = rtnl_dereference(tnl_vport->mutable);
156         hash = port_hash(&mutable->key);
157         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
158
159         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
160 }
161
162 static void port_table_move_port(struct vport *vport,
163                       struct tnl_mutable_config *new_mutable)
164 {
165         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
166         u32 hash;
167
168         hash = port_hash(&new_mutable->key);
169         hlist_del_init_rcu(&tnl_vport->hash_node);
170         hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
171
172         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
173         assign_config_rcu(vport, new_mutable);
174         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
175 }
176
177 static void port_table_remove_port(struct vport *vport)
178 {
179         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
180
181         hlist_del_init_rcu(&tnl_vport->hash_node);
182
183         (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
184 }
185
186 static struct vport *port_table_lookup(struct port_lookup_key *key,
187                                        const struct tnl_mutable_config **pmutable)
188 {
189         struct hlist_node *n;
190         struct hlist_head *bucket;
191         u32 hash = port_hash(key);
192         struct tnl_vport *tnl_vport;
193
194         bucket = find_bucket(hash);
195
196         hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
197                 struct tnl_mutable_config *mutable;
198
199                 mutable = rcu_dereference_rtnl(tnl_vport->mutable);
200                 if (!memcmp(&mutable->key, key, PORT_KEY_LEN)) {
201                         *pmutable = mutable;
202                         return tnl_vport_to_vport(tnl_vport);
203                 }
204         }
205
206         return NULL;
207 }
208
209 struct vport *ovs_tnl_find_port(struct net *net, __be32 saddr, __be32 daddr,
210                                 __be64 key, int tunnel_type,
211                                 const struct tnl_mutable_config **mutable)
212 {
213         struct port_lookup_key lookup;
214         struct vport *vport;
215         bool is_multicast = ipv4_is_multicast(saddr);
216
217         port_key_set_net(&lookup, net);
218         lookup.saddr = saddr;
219         lookup.daddr = daddr;
220
221         /* First try for exact match on in_key. */
222         lookup.in_key = key;
223         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
224         if (!is_multicast && key_local_remote_ports) {
225                 vport = port_table_lookup(&lookup, mutable);
226                 if (vport)
227                         return vport;
228         }
229         if (key_remote_ports) {
230                 lookup.saddr = 0;
231                 vport = port_table_lookup(&lookup, mutable);
232                 if (vport)
233                         return vport;
234
235                 lookup.saddr = saddr;
236         }
237
238         /* Then try matches that wildcard in_key. */
239         lookup.in_key = 0;
240         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
241         if (!is_multicast && local_remote_ports) {
242                 vport = port_table_lookup(&lookup, mutable);
243                 if (vport)
244                         return vport;
245         }
246         if (remote_ports) {
247                 lookup.saddr = 0;
248                 vport = port_table_lookup(&lookup, mutable);
249                 if (vport)
250                         return vport;
251         }
252
253         if (is_multicast) {
254                 lookup.saddr = 0;
255                 lookup.daddr = saddr;
256                 if (key_multicast_ports) {
257                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
258                         lookup.in_key = key;
259                         vport = port_table_lookup(&lookup, mutable);
260                         if (vport)
261                                 return vport;
262                 }
263                 if (multicast_ports) {
264                         lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
265                         lookup.in_key = 0;
266                         vport = port_table_lookup(&lookup, mutable);
267                         if (vport)
268                                 return vport;
269                 }
270         }
271
272         if (null_ports) {
273                 lookup.daddr = 0;
274                 lookup.saddr = 0;
275                 lookup.in_key = 0;
276                 lookup.tunnel_type = tunnel_type;
277                 vport = port_table_lookup(&lookup, mutable);
278                 if (vport)
279                         return vport;
280         }
281         return NULL;
282 }
283
284 static void ecn_decapsulate(struct sk_buff *skb)
285 {
286         if (unlikely(INET_ECN_is_ce(OVS_CB(skb)->tun_key->ipv4_tos))) {
287                 __be16 protocol = skb->protocol;
288
289                 skb_set_network_header(skb, ETH_HLEN);
290
291                 if (protocol == htons(ETH_P_8021Q)) {
292                         if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
293                                 return;
294
295                         protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
296                         skb_set_network_header(skb, VLAN_ETH_HLEN);
297                 }
298
299                 if (protocol == htons(ETH_P_IP)) {
300                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
301                             + sizeof(struct iphdr))))
302                                 return;
303
304                         IP_ECN_set_ce(ip_hdr(skb));
305                 }
306 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
307                 else if (protocol == htons(ETH_P_IPV6)) {
308                         if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
309                             + sizeof(struct ipv6hdr))))
310                                 return;
311
312                         IP6_ECN_set_ce(ipv6_hdr(skb));
313                 }
314 #endif
315         }
316 }
317
318 /**
319  *      ovs_tnl_rcv - ingress point for generic tunnel code
320  *
321  * @vport: port this packet was received on
322  * @skb: received packet
323  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
324  *
325  * Must be called with rcu_read_lock.
326  *
327  * Packets received by this function are in the following state:
328  * - skb->data points to the inner Ethernet header.
329  * - The inner Ethernet header is in the linear data area.
330  * - skb->csum does not include the inner Ethernet header.
331  * - The layer pointers are undefined.
332  */
333 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
334 {
335         struct ethhdr *eh;
336
337         skb_reset_mac_header(skb);
338         eh = eth_hdr(skb);
339
340         if (likely(ntohs(eh->h_proto) >= 1536))
341                 skb->protocol = eh->h_proto;
342         else
343                 skb->protocol = htons(ETH_P_802_2);
344
345         skb_dst_drop(skb);
346         nf_reset(skb);
347         skb_clear_rxhash(skb);
348         secpath_reset(skb);
349
350         ecn_decapsulate(skb);
351         vlan_set_tci(skb, 0);
352
353         if (unlikely(compute_ip_summed(skb, false))) {
354                 kfree_skb(skb);
355                 return;
356         }
357
358         ovs_vport_receive(vport, skb);
359 }
360
361 static bool check_ipv4_address(__be32 addr)
362 {
363         if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
364             || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
365                 return false;
366
367         return true;
368 }
369
370 static bool ipv4_should_icmp(struct sk_buff *skb)
371 {
372         struct iphdr *old_iph = ip_hdr(skb);
373
374         /* Don't respond to L2 broadcast. */
375         if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
376                 return false;
377
378         /* Don't respond to L3 broadcast or invalid addresses. */
379         if (!check_ipv4_address(old_iph->daddr) ||
380             !check_ipv4_address(old_iph->saddr))
381                 return false;
382
383         /* Only respond to the first fragment. */
384         if (old_iph->frag_off & htons(IP_OFFSET))
385                 return false;
386
387         /* Don't respond to ICMP error messages. */
388         if (old_iph->protocol == IPPROTO_ICMP) {
389                 u8 icmp_type, *icmp_typep;
390
391                 icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
392                                                 (old_iph->ihl << 2) +
393                                                 offsetof(struct icmphdr, type) -
394                                                 skb->data, sizeof(icmp_type),
395                                                 &icmp_type);
396
397                 if (!icmp_typep)
398                         return false;
399
400                 if (*icmp_typep > NR_ICMP_TYPES
401                         || (*icmp_typep <= ICMP_PARAMETERPROB
402                                 && *icmp_typep != ICMP_ECHOREPLY
403                                 && *icmp_typep != ICMP_ECHO))
404                         return false;
405         }
406
407         return true;
408 }
409
410 static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
411                             unsigned int mtu, unsigned int payload_length)
412 {
413         struct iphdr *iph, *old_iph = ip_hdr(skb);
414         struct icmphdr *icmph;
415         u8 *payload;
416
417         iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
418         icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
419         payload = skb_put(nskb, payload_length);
420
421         /* IP */
422         iph->version            =       4;
423         iph->ihl                =       sizeof(struct iphdr) >> 2;
424         iph->tos                =       (old_iph->tos & IPTOS_TOS_MASK) |
425                                         IPTOS_PREC_INTERNETCONTROL;
426         iph->tot_len            =       htons(sizeof(struct iphdr)
427                                               + sizeof(struct icmphdr)
428                                               + payload_length);
429         get_random_bytes(&iph->id, sizeof(iph->id));
430         iph->frag_off           =       0;
431         iph->ttl                =       IPDEFTTL;
432         iph->protocol           =       IPPROTO_ICMP;
433         iph->daddr              =       old_iph->saddr;
434         iph->saddr              =       old_iph->daddr;
435
436         ip_send_check(iph);
437
438         /* ICMP */
439         icmph->type             =       ICMP_DEST_UNREACH;
440         icmph->code             =       ICMP_FRAG_NEEDED;
441         icmph->un.gateway       =       htonl(mtu);
442         icmph->checksum         =       0;
443
444         nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
445         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
446                                             payload, payload_length,
447                                             nskb->csum);
448         icmph->checksum = csum_fold(nskb->csum);
449 }
450
451 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
452 static bool ipv6_should_icmp(struct sk_buff *skb)
453 {
454         struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
455         int addr_type;
456         int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
457         u8 nexthdr = ipv6_hdr(skb)->nexthdr;
458         __be16 frag_off;
459
460         /* Check source address is valid. */
461         addr_type = ipv6_addr_type(&old_ipv6h->saddr);
462         if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
463                 return false;
464
465         /* Don't reply to unspecified addresses. */
466         if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
467                 return false;
468
469         /* Don't respond to ICMP error messages. */
470         payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
471         if (payload_off < 0)
472                 return false;
473
474         if (nexthdr == NEXTHDR_ICMP) {
475                 u8 icmp_type, *icmp_typep;
476
477                 icmp_typep = skb_header_pointer(skb, payload_off +
478                                                 offsetof(struct icmp6hdr,
479                                                         icmp6_type),
480                                                 sizeof(icmp_type), &icmp_type);
481
482                 if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
483                         return false;
484         }
485
486         return true;
487 }
488
489 static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
490                             unsigned int mtu, unsigned int payload_length)
491 {
492         struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
493         struct icmp6hdr *icmp6h;
494         u8 *payload;
495
496         ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
497         icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
498         payload = skb_put(nskb, payload_length);
499
500         /* IPv6 */
501         ipv6h->version          =       6;
502         ipv6h->priority         =       0;
503         memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
504         ipv6h->payload_len      =       htons(sizeof(struct icmp6hdr)
505                                               + payload_length);
506         ipv6h->nexthdr          =       NEXTHDR_ICMP;
507         ipv6h->hop_limit        =       IPV6_DEFAULT_HOPLIMIT;
508         ipv6h->daddr            =       old_ipv6h->saddr;
509         ipv6h->saddr            =       old_ipv6h->daddr;
510
511         /* ICMPv6 */
512         icmp6h->icmp6_type      =       ICMPV6_PKT_TOOBIG;
513         icmp6h->icmp6_code      =       0;
514         icmp6h->icmp6_cksum     =       0;
515         icmp6h->icmp6_mtu       =       htonl(mtu);
516
517         nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
518         nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
519                                             payload, payload_length,
520                                             nskb->csum);
521         icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
522                                                 sizeof(struct icmp6hdr)
523                                                 + payload_length,
524                                                 ipv6h->nexthdr, nskb->csum);
525 }
526 #endif /* IPv6 */
527
528 bool ovs_tnl_frag_needed(struct vport *vport,
529                          const struct tnl_mutable_config *mutable,
530                          struct sk_buff *skb, unsigned int mtu)
531 {
532         unsigned int eth_hdr_len = ETH_HLEN;
533         unsigned int total_length = 0, header_length = 0, payload_length;
534         struct ethhdr *eh, *old_eh = eth_hdr(skb);
535         struct sk_buff *nskb;
536
537         /* Sanity check */
538         if (skb->protocol == htons(ETH_P_IP)) {
539                 if (mtu < IP_MIN_MTU)
540                         return false;
541
542                 if (!ipv4_should_icmp(skb))
543                         return true;
544         }
545 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
546         else if (skb->protocol == htons(ETH_P_IPV6)) {
547                 if (mtu < IPV6_MIN_MTU)
548                         return false;
549
550                 /*
551                  * In theory we should do PMTUD on IPv6 multicast messages but
552                  * we don't have an address to send from so just fragment.
553                  */
554                 if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
555                         return false;
556
557                 if (!ipv6_should_icmp(skb))
558                         return true;
559         }
560 #endif
561         else
562                 return false;
563
564         /* Allocate */
565         if (old_eh->h_proto == htons(ETH_P_8021Q))
566                 eth_hdr_len = VLAN_ETH_HLEN;
567
568         payload_length = skb->len - eth_hdr_len;
569         if (skb->protocol == htons(ETH_P_IP)) {
570                 header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
571                 total_length = min_t(unsigned int, header_length +
572                                                    payload_length, 576);
573         }
574 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
575         else {
576                 header_length = sizeof(struct ipv6hdr) +
577                                 sizeof(struct icmp6hdr);
578                 total_length = min_t(unsigned int, header_length +
579                                                   payload_length, IPV6_MIN_MTU);
580         }
581 #endif
582
583         payload_length = total_length - header_length;
584
585         nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
586                              payload_length);
587         if (!nskb)
588                 return false;
589
590         skb_reserve(nskb, NET_IP_ALIGN);
591
592         /* Ethernet / VLAN */
593         eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
594         memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
595         memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
596         nskb->protocol = eh->h_proto = old_eh->h_proto;
597         if (old_eh->h_proto == htons(ETH_P_8021Q)) {
598                 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
599
600                 vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
601                 vh->h_vlan_encapsulated_proto = skb->protocol;
602         } else
603                 vlan_set_tci(nskb, vlan_get_tci(skb));
604         skb_reset_mac_header(nskb);
605
606         /* Protocol */
607         if (skb->protocol == htons(ETH_P_IP))
608                 ipv4_build_icmp(skb, nskb, mtu, payload_length);
609 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
610         else
611                 ipv6_build_icmp(skb, nskb, mtu, payload_length);
612 #endif
613
614         if (unlikely(compute_ip_summed(nskb, false))) {
615                 kfree_skb(nskb);
616                 return false;
617         }
618
619         ovs_vport_receive(vport, nskb);
620
621         return true;
622 }
623
624 static bool check_mtu(struct sk_buff *skb,
625                       struct vport *vport,
626                       const struct tnl_mutable_config *mutable,
627                       const struct rtable *rt, __be16 *frag_offp,
628                       int tunnel_hlen)
629 {
630         bool pmtud;
631         __be16 frag_off;
632         int mtu = 0;
633         unsigned int packet_length = skb->len - ETH_HLEN;
634
635         if (OVS_CB(skb)->tun_key->ipv4_dst) {
636                 pmtud = false;
637                 frag_off = OVS_CB(skb)->tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT ?
638                                   htons(IP_DF) : 0;
639         } else {
640                 pmtud = mutable->flags & TNL_F_PMTUD;
641                 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
642         }
643
644         /* Allow for one level of tagging in the packet length. */
645         if (!vlan_tx_tag_present(skb) &&
646             eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
647                 packet_length -= VLAN_HLEN;
648
649         if (pmtud) {
650                 int vlan_header = 0;
651
652                 /* The tag needs to go in packet regardless of where it
653                  * currently is, so subtract it from the MTU.
654                  */
655                 if (vlan_tx_tag_present(skb) ||
656                     eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
657                         vlan_header = VLAN_HLEN;
658
659                 mtu = dst_mtu(&rt_dst(rt))
660                         - ETH_HLEN
661                         - tunnel_hlen
662                         - vlan_header;
663         }
664
665         if (skb->protocol == htons(ETH_P_IP)) {
666                 struct iphdr *iph = ip_hdr(skb);
667
668                 if (pmtud && iph->frag_off & htons(IP_DF)) {
669                         mtu = max(mtu, IP_MIN_MTU);
670
671                         if (packet_length > mtu &&
672                             ovs_tnl_frag_needed(vport, mutable, skb, mtu))
673                                 return false;
674                 }
675         }
676 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
677         else if (skb->protocol == htons(ETH_P_IPV6)) {
678                 /* IPv6 requires end hosts to do fragmentation
679                  * if the packet is above the minimum MTU.
680                  */
681                 if (packet_length > IPV6_MIN_MTU)
682                         frag_off = htons(IP_DF);
683
684                 if (pmtud) {
685                         mtu = max(mtu, IPV6_MIN_MTU);
686
687                         if (packet_length > mtu &&
688                             ovs_tnl_frag_needed(vport, mutable, skb, mtu))
689                                 return false;
690                 }
691         }
692 #endif
693
694         *frag_offp = frag_off;
695         return true;
696 }
697
698 static struct rtable *find_route(struct net *net,
699                 __be32 *saddr, __be32 daddr, u8 ipproto,
700                 u8 tos)
701 {
702         struct rtable *rt;
703         /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
704          * router expect RT_TOS bits only. */
705
706 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
707         struct flowi fl = { .nl_u = { .ip4_u = {
708                                         .daddr = daddr,
709                                         .saddr = *saddr,
710                                         .tos   = RT_TOS(tos) } },
711                                         .proto = ipproto };
712
713         if (unlikely(ip_route_output_key(net, &rt, &fl)))
714                 return ERR_PTR(-EADDRNOTAVAIL);
715         *saddr = fl.nl_u.ip4_u.saddr;
716         return rt;
717 #else
718         struct flowi4 fl = { .daddr = daddr,
719                              .saddr = *saddr,
720                              .flowi4_tos = RT_TOS(tos),
721                              .flowi4_proto = ipproto };
722
723         rt = ip_route_output_key(net, &fl);
724         *saddr = fl.saddr;
725         return rt;
726 #endif
727 }
728
729 static bool need_linearize(const struct sk_buff *skb)
730 {
731         int i;
732
733         if (unlikely(skb_shinfo(skb)->frag_list))
734                 return true;
735
736         /*
737          * Generally speaking we should linearize if there are paged frags.
738          * However, if all of the refcounts are 1 we know nobody else can
739          * change them from underneath us and we can skip the linearization.
740          */
741         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
742                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
743                         return true;
744
745         return false;
746 }
747
748 static struct sk_buff *handle_offloads(struct sk_buff *skb,
749                                        const struct tnl_mutable_config *mutable,
750                                        const struct rtable *rt,
751                                        int tunnel_hlen)
752 {
753         int min_headroom;
754         int err;
755
756         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
757                         + tunnel_hlen
758                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
759
760         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
761                 int head_delta = SKB_DATA_ALIGN(min_headroom -
762                                                 skb_headroom(skb) +
763                                                 16);
764                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
765                                         0, GFP_ATOMIC);
766                 if (unlikely(err))
767                         goto error_free;
768         }
769
770         forward_ip_summed(skb, true);
771
772         if (skb_is_gso(skb)) {
773                 struct sk_buff *nskb;
774
775                 nskb = skb_gso_segment(skb, 0);
776                 if (IS_ERR(nskb)) {
777                         kfree_skb(skb);
778                         err = PTR_ERR(nskb);
779                         goto error;
780                 }
781
782                 consume_skb(skb);
783                 skb = nskb;
784         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
785                 /* Pages aren't locked and could change at any time.
786                  * If this happens after we compute the checksum, the
787                  * checksum will be wrong.  We linearize now to avoid
788                  * this problem.
789                  */
790                 if (unlikely(need_linearize(skb))) {
791                         err = __skb_linearize(skb);
792                         if (unlikely(err))
793                                 goto error_free;
794                 }
795
796                 err = skb_checksum_help(skb);
797                 if (unlikely(err))
798                         goto error_free;
799         }
800
801         set_ip_summed(skb, OVS_CSUM_NONE);
802
803         return skb;
804
805 error_free:
806         kfree_skb(skb);
807 error:
808         return ERR_PTR(err);
809 }
810
811 static int send_frags(struct sk_buff *skb,
812                       int tunnel_hlen)
813 {
814         int sent_len;
815
816         sent_len = 0;
817         while (skb) {
818                 struct sk_buff *next = skb->next;
819                 int frag_len = skb->len - tunnel_hlen;
820                 int err;
821
822                 skb->next = NULL;
823                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
824
825                 err = ip_local_out(skb);
826                 skb = next;
827                 if (unlikely(net_xmit_eval(err)))
828                         goto free_frags;
829                 sent_len += frag_len;
830         }
831
832         return sent_len;
833
834 free_frags:
835         /*
836          * There's no point in continuing to send fragments once one has been
837          * dropped so just free the rest.  This may help improve the congestion
838          * that caused the first packet to be dropped.
839          */
840         ovs_tnl_free_linked_skbs(skb);
841         return sent_len;
842 }
843
844 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
845 {
846         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
847         const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
848         enum vport_err_type err = VPORT_E_TX_ERROR;
849         struct rtable *rt;
850         struct ovs_key_ipv4_tunnel tun_key;
851         int sent_len = 0;
852         int tunnel_hlen;
853         __be16 frag_off = 0;
854         __be32 daddr;
855         __be32 saddr;
856         u8 ttl;
857         u8 tos;
858
859         /* Validate the protocol headers before we try to use them. */
860         if (skb->protocol == htons(ETH_P_8021Q) &&
861             !vlan_tx_tag_present(skb)) {
862                 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
863                         goto error_free;
864
865                 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
866                 skb_set_network_header(skb, VLAN_ETH_HLEN);
867         }
868
869         if (skb->protocol == htons(ETH_P_IP)) {
870                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
871                     + sizeof(struct iphdr))))
872                         skb->protocol = 0;
873         }
874 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
875         else if (skb->protocol == htons(ETH_P_IPV6)) {
876                 if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
877                     + sizeof(struct ipv6hdr))))
878                         skb->protocol = 0;
879         }
880 #endif
881
882         /* If OVS_CB(skb)->tun_key is NULL, point it at the local tun_key here,
883          * and zero it out.
884          */
885         if (!OVS_CB(skb)->tun_key) {
886                 memset(&tun_key, 0, sizeof(tun_key));
887                 OVS_CB(skb)->tun_key = &tun_key;
888         }
889
890         tunnel_hlen = tnl_vport->tnl_ops->hdr_len(mutable, OVS_CB(skb)->tun_key);
891         if (unlikely(tunnel_hlen < 0)) {
892                 err = VPORT_E_TX_DROPPED;
893                 goto error_free;
894         }
895         tunnel_hlen += sizeof(struct iphdr);
896
897         if (OVS_CB(skb)->tun_key->ipv4_dst) {
898                 daddr = OVS_CB(skb)->tun_key->ipv4_dst;
899                 saddr = OVS_CB(skb)->tun_key->ipv4_src;
900                 tos = OVS_CB(skb)->tun_key->ipv4_tos;
901                 ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
902         } else {
903                 u8 inner_tos;
904                 daddr = mutable->key.daddr;
905                 saddr = mutable->key.saddr;
906
907                 if (unlikely(!daddr)) {
908                         /* Trying to sent packet from Null-port without
909                          * tunnel info? Drop this packet. */
910                         err = VPORT_E_TX_DROPPED;
911                         goto error_free;
912                 }
913
914                 /* ToS */
915                 if (skb->protocol == htons(ETH_P_IP))
916                         inner_tos = ip_hdr(skb)->tos;
917 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
918                 else if (skb->protocol == htons(ETH_P_IPV6))
919                         inner_tos = ipv6_get_dsfield(ipv6_hdr(skb));
920 #endif
921                 else
922                         inner_tos = 0;
923
924                 if (mutable->flags & TNL_F_TOS_INHERIT)
925                         tos = inner_tos;
926                 else
927                         tos = mutable->tos;
928
929                 tos = INET_ECN_encapsulate(tos, inner_tos);
930
931                 /* TTL */
932                 ttl = mutable->ttl;
933                 if (mutable->flags & TNL_F_TTL_INHERIT) {
934                         if (skb->protocol == htons(ETH_P_IP))
935                                 ttl = ip_hdr(skb)->ttl;
936 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
937                         else if (skb->protocol == htons(ETH_P_IPV6))
938                                 ttl = ipv6_hdr(skb)->hop_limit;
939 #endif
940                 }
941
942         }
943
944         /* Route lookup */
945         rt = find_route(port_key_get_net(&mutable->key), &saddr, daddr,
946                           tnl_vport->tnl_ops->ipproto, tos);
947         if (IS_ERR(rt))
948                 goto error_free;
949
950         /* Reset SKB */
951         nf_reset(skb);
952         secpath_reset(skb);
953         skb_dst_drop(skb);
954         skb_clear_rxhash(skb);
955
956         /* Offloading */
957         skb = handle_offloads(skb, mutable, rt, tunnel_hlen);
958         if (IS_ERR(skb)) {
959                 skb = NULL;
960                 goto err_free_rt;
961         }
962
963         /* MTU */
964         if (unlikely(!check_mtu(skb, vport, mutable, rt, &frag_off, tunnel_hlen))) {
965                 err = VPORT_E_TX_DROPPED;
966                 goto err_free_rt;
967         }
968
969         /* TTL Fixup. */
970         if (!OVS_CB(skb)->tun_key->ipv4_dst) {
971                 if (!(mutable->flags & TNL_F_TTL_INHERIT)) {
972                         if (!ttl)
973                                 ttl = ip4_dst_hoplimit(&rt_dst(rt));
974                 }
975         }
976
977         while (skb) {
978                 struct iphdr *iph;
979                 struct sk_buff *next_skb = skb->next;
980                 skb->next = NULL;
981
982                 if (unlikely(vlan_deaccel_tag(skb)))
983                         goto next;
984
985                 skb_push(skb, tunnel_hlen);
986                 skb_reset_network_header(skb);
987                 skb_set_transport_header(skb, sizeof(struct iphdr));
988
989                 if (next_skb)
990                         skb_dst_set(skb, dst_clone(&rt_dst(rt)));
991                 else
992                         skb_dst_set(skb, &rt_dst(rt));
993
994                 /* Push IP header. */
995                 iph = ip_hdr(skb);
996                 iph->version    = 4;
997                 iph->ihl        = sizeof(struct iphdr) >> 2;
998                 iph->protocol   = tnl_vport->tnl_ops->ipproto;
999                 iph->daddr      = daddr;
1000                 iph->saddr      = saddr;
1001                 iph->tos        = tos;
1002                 iph->ttl        = ttl;
1003                 iph->frag_off   = frag_off;
1004                 ip_select_ident(iph, &rt_dst(rt), NULL);
1005
1006                 /* Push Tunnel header. */
1007                 skb = tnl_vport->tnl_ops->build_header(vport, mutable,
1008                                                         &rt_dst(rt), skb, tunnel_hlen);
1009                 if (unlikely(!skb))
1010                         goto next;
1011
1012                 sent_len += send_frags(skb, tunnel_hlen);
1013
1014 next:
1015                 skb = next_skb;
1016         }
1017
1018         if (unlikely(sent_len == 0))
1019                 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
1020
1021         return sent_len;
1022
1023 err_free_rt:
1024         ip_rt_put(rt);
1025 error_free:
1026         ovs_tnl_free_linked_skbs(skb);
1027         ovs_vport_record_error(vport, err);
1028         return sent_len;
1029 }
1030
1031 static const struct nla_policy tnl_policy[OVS_TUNNEL_ATTR_MAX + 1] = {
1032         [OVS_TUNNEL_ATTR_FLAGS]    = { .type = NLA_U32 },
1033         [OVS_TUNNEL_ATTR_DST_IPV4] = { .type = NLA_U32 },
1034         [OVS_TUNNEL_ATTR_SRC_IPV4] = { .type = NLA_U32 },
1035         [OVS_TUNNEL_ATTR_OUT_KEY]  = { .type = NLA_U64 },
1036         [OVS_TUNNEL_ATTR_IN_KEY]   = { .type = NLA_U64 },
1037         [OVS_TUNNEL_ATTR_TOS]      = { .type = NLA_U8 },
1038         [OVS_TUNNEL_ATTR_TTL]      = { .type = NLA_U8 },
1039         [OVS_TUNNEL_ATTR_DST_PORT] = { .type = NLA_U16 },
1040 };
1041
1042 /* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
1043  * zeroed. */
1044 static int tnl_set_config(struct net *net, struct nlattr *options,
1045                           const struct tnl_ops *tnl_ops,
1046                           const struct vport *cur_vport,
1047                           struct tnl_mutable_config *mutable)
1048 {
1049         const struct vport *old_vport;
1050         const struct tnl_mutable_config *old_mutable;
1051         struct nlattr *a[OVS_TUNNEL_ATTR_MAX + 1];
1052         int err;
1053
1054         port_key_set_net(&mutable->key, net);
1055         mutable->key.tunnel_type = tnl_ops->tunnel_type;
1056         if (!options)
1057                 goto out;
1058
1059         err = nla_parse_nested(a, OVS_TUNNEL_ATTR_MAX, options, tnl_policy);
1060         if (err)
1061                 return err;
1062
1063         /* Process attributes possibly useful for null_ports first */
1064         if (a[OVS_TUNNEL_ATTR_DST_PORT])
1065                 mutable->dst_port =
1066                         htons(nla_get_u16(a[OVS_TUNNEL_ATTR_DST_PORT]));
1067
1068         if (a[OVS_TUNNEL_ATTR_DST_IPV4])
1069                 mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
1070
1071         /* Skip the rest if configuring a null_port */
1072         if (!mutable->key.daddr)
1073                 goto out;
1074
1075         if (a[OVS_TUNNEL_ATTR_FLAGS])
1076                 mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS])
1077                         & TNL_F_PUBLIC;
1078
1079         if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
1080                 if (ipv4_is_multicast(mutable->key.daddr))
1081                         return -EINVAL;
1082                 mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
1083         }
1084
1085         if (a[OVS_TUNNEL_ATTR_TOS]) {
1086                 mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
1087                 /* Reject ToS config with ECN bits set. */
1088                 if (mutable->tos & INET_ECN_MASK)
1089                         return -EINVAL;
1090         }
1091
1092         if (a[OVS_TUNNEL_ATTR_TTL])
1093                 mutable->ttl = nla_get_u8(a[OVS_TUNNEL_ATTR_TTL]);
1094
1095         if (!a[OVS_TUNNEL_ATTR_IN_KEY]) {
1096                 mutable->key.tunnel_type |= TNL_T_KEY_MATCH;
1097                 mutable->flags |= TNL_F_IN_KEY_MATCH;
1098         } else {
1099                 mutable->key.tunnel_type |= TNL_T_KEY_EXACT;
1100                 mutable->key.in_key = nla_get_be64(a[OVS_TUNNEL_ATTR_IN_KEY]);
1101         }
1102
1103         if (!a[OVS_TUNNEL_ATTR_OUT_KEY])
1104                 mutable->flags |= TNL_F_OUT_KEY_ACTION;
1105         else
1106                 mutable->out_key = nla_get_be64(a[OVS_TUNNEL_ATTR_OUT_KEY]);
1107
1108         mutable->mlink = 0;
1109         if (ipv4_is_multicast(mutable->key.daddr)) {
1110                 struct net_device *dev;
1111                 struct rtable *rt;
1112                 __be32 saddr = mutable->key.saddr;
1113
1114                 rt = find_route(port_key_get_net(&mutable->key),
1115                              &saddr, mutable->key.daddr,
1116                              tnl_ops->ipproto, mutable->tos);
1117                 if (IS_ERR(rt))
1118                         return -EADDRNOTAVAIL;
1119                 dev = rt_dst(rt).dev;
1120                 ip_rt_put(rt);
1121                 if (__in_dev_get_rtnl(dev) == NULL)
1122                         return -EADDRNOTAVAIL;
1123                 mutable->mlink = dev->ifindex;
1124                 ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
1125         }
1126
1127 out:
1128         old_vport = port_table_lookup(&mutable->key, &old_mutable);
1129         if (old_vport && old_vport != cur_vport)
1130                 return -EEXIST;
1131
1132         return 0;
1133 }
1134
1135 struct vport *ovs_tnl_create(const struct vport_parms *parms,
1136                              const struct vport_ops *vport_ops,
1137                              const struct tnl_ops *tnl_ops)
1138 {
1139         struct vport *vport;
1140         struct tnl_vport *tnl_vport;
1141         struct tnl_mutable_config *mutable;
1142         int initial_frag_id;
1143         int err;
1144
1145         vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
1146         if (IS_ERR(vport)) {
1147                 err = PTR_ERR(vport);
1148                 goto error;
1149         }
1150
1151         tnl_vport = tnl_vport_priv(vport);
1152
1153         strcpy(tnl_vport->name, parms->name);
1154         tnl_vport->tnl_ops = tnl_ops;
1155
1156         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1157         if (!mutable) {
1158                 err = -ENOMEM;
1159                 goto error_free_vport;
1160         }
1161
1162         random_ether_addr(mutable->eth_addr);
1163
1164         get_random_bytes(&initial_frag_id, sizeof(int));
1165         atomic_set(&tnl_vport->frag_id, initial_frag_id);
1166
1167         err = tnl_set_config(ovs_dp_get_net(parms->dp), parms->options, tnl_ops,
1168                              NULL, mutable);
1169         if (err)
1170                 goto error_free_mutable;
1171
1172         rcu_assign_pointer(tnl_vport->mutable, mutable);
1173
1174         port_table_add_port(vport);
1175         return vport;
1176
1177 error_free_mutable:
1178         free_mutable_rtnl(mutable);
1179         kfree(mutable);
1180 error_free_vport:
1181         ovs_vport_free(vport);
1182 error:
1183         return ERR_PTR(err);
1184 }
1185
1186 int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
1187 {
1188         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1189         const struct tnl_mutable_config *old_mutable;
1190         struct tnl_mutable_config *mutable;
1191         int err;
1192
1193         old_mutable = rtnl_dereference(tnl_vport->mutable);
1194         if (!old_mutable->key.daddr)
1195                 return -EINVAL;
1196
1197         mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
1198         if (!mutable) {
1199                 err = -ENOMEM;
1200                 goto error;
1201         }
1202
1203         /* Copy fields whose values should be retained. */
1204         mutable->seq = old_mutable->seq + 1;
1205         memcpy(mutable->eth_addr, old_mutable->eth_addr, ETH_ALEN);
1206
1207         /* Parse the others configured by userspace. */
1208         err = tnl_set_config(ovs_dp_get_net(vport->dp), options, tnl_vport->tnl_ops,
1209                              vport, mutable);
1210         if (err)
1211                 goto error_free;
1212
1213         if (port_hash(&mutable->key) != port_hash(&old_mutable->key))
1214                 port_table_move_port(vport, mutable);
1215         else
1216                 assign_config_rcu(vport, mutable);
1217
1218         return 0;
1219
1220 error_free:
1221         free_mutable_rtnl(mutable);
1222         kfree(mutable);
1223 error:
1224         return err;
1225 }
1226
1227 int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
1228 {
1229         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1230         const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
1231
1232         if (mutable->dst_port && nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT,
1233                                              ntohs(mutable->dst_port)))
1234                 goto nla_put_failure;
1235
1236         /* Skip the rest for null_ports */
1237         if (!mutable->key.daddr)
1238                 return 0;
1239
1240         if (nla_put_be32(skb, OVS_TUNNEL_ATTR_DST_IPV4, mutable->key.daddr))
1241                 goto nla_put_failure;
1242         if (nla_put_u32(skb, OVS_TUNNEL_ATTR_FLAGS,
1243                         mutable->flags & TNL_F_PUBLIC))
1244                 goto nla_put_failure;
1245         if (!(mutable->flags & TNL_F_IN_KEY_MATCH) &&
1246             nla_put_be64(skb, OVS_TUNNEL_ATTR_IN_KEY, mutable->key.in_key))
1247                 goto nla_put_failure;
1248         if (!(mutable->flags & TNL_F_OUT_KEY_ACTION) &&
1249             nla_put_be64(skb, OVS_TUNNEL_ATTR_OUT_KEY, mutable->out_key))
1250                 goto nla_put_failure;
1251         if (mutable->key.saddr &&
1252             nla_put_be32(skb, OVS_TUNNEL_ATTR_SRC_IPV4, mutable->key.saddr))
1253                 goto nla_put_failure;
1254         if (mutable->tos && nla_put_u8(skb, OVS_TUNNEL_ATTR_TOS, mutable->tos))
1255                 goto nla_put_failure;
1256         if (mutable->ttl && nla_put_u8(skb, OVS_TUNNEL_ATTR_TTL, mutable->ttl))
1257                 goto nla_put_failure;
1258
1259         return 0;
1260
1261 nla_put_failure:
1262         return -EMSGSIZE;
1263 }
1264
1265 static void free_port_rcu(struct rcu_head *rcu)
1266 {
1267         struct tnl_vport *tnl_vport = container_of(rcu,
1268                                                    struct tnl_vport, rcu);
1269
1270         kfree((struct tnl_mutable __force *)tnl_vport->mutable);
1271         ovs_vport_free(tnl_vport_to_vport(tnl_vport));
1272 }
1273
1274 void ovs_tnl_destroy(struct vport *vport)
1275 {
1276         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1277         struct tnl_mutable_config *mutable;
1278
1279         mutable = rtnl_dereference(tnl_vport->mutable);
1280         port_table_remove_port(vport);
1281         free_mutable_rtnl(mutable);
1282         call_rcu(&tnl_vport->rcu, free_port_rcu);
1283 }
1284
1285 int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
1286 {
1287         struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1288         struct tnl_mutable_config *old_mutable, *mutable;
1289
1290         old_mutable = rtnl_dereference(tnl_vport->mutable);
1291         mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
1292         if (!mutable)
1293                 return -ENOMEM;
1294
1295         old_mutable->mlink = 0;
1296
1297         memcpy(mutable->eth_addr, addr, ETH_ALEN);
1298         assign_config_rcu(vport, mutable);
1299
1300         return 0;
1301 }
1302
1303 const char *ovs_tnl_get_name(const struct vport *vport)
1304 {
1305         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1306         return tnl_vport->name;
1307 }
1308
1309 const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
1310 {
1311         const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
1312         return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
1313 }
1314
1315 void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
1316 {
1317         while (skb) {
1318                 struct sk_buff *next = skb->next;
1319                 kfree_skb(skb);
1320                 skb = next;
1321         }
1322 }
1323
1324 int ovs_tnl_init(void)
1325 {
1326         int i;
1327
1328         port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
1329                              GFP_KERNEL);
1330         if (!port_table)
1331                 return -ENOMEM;
1332
1333         for (i = 0; i < PORT_TABLE_SIZE; i++)
1334                 INIT_HLIST_HEAD(&port_table[i]);
1335
1336         return 0;
1337 }
1338
1339 void ovs_tnl_exit(void)
1340 {
1341         kfree(port_table);
1342 }