7b23d1aa38baf99cc8b6c2b4a888b7908353878b
[sliver-openvswitch.git] / datapath / linux-2.6 / compat-2.6 / ip_gre.c
1 /* ip_gre driver port to Linux 2.6.18 and greater */
2
3 #include <linux/version.h>
4 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5 #define HAVE_NETDEV_STATS
6 #endif
7 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
8 #define HAVE_NETDEV_HEADER_OPS
9 #endif
10 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
11 #define HAVE_NETDEV_NEEDED_HEADROOM
12 #endif
13
14 /*
15  *      Linux NET3:     GRE over IP protocol decoder.
16  *
17  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
18  *
19  *      This program is free software; you can redistribute it and/or
20  *      modify it under the terms of the GNU General Public License
21  *      as published by the Free Software Foundation; either version
22  *      2 of the License, or (at your option) any later version.
23  *
24  */
25
26 #include <linux/capability.h>
27 #include <linux/ethtool.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <asm/uaccess.h>
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/in.h>
35 #include <linux/tcp.h>
36 #include <linux/udp.h>
37 #include <linux/if_arp.h>
38 #include <linux/mroute.h>
39 #include <linux/init.h>
40 #include <linux/in6.h>
41 #include <linux/inetdevice.h>
42 #include <linux/igmp.h>
43 #include <linux/netfilter_ipv4.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_ether.h>
46
47 #include <net/sock.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/protocol.h>
51 #include <net/ipip.h>
52 #include <net/arp.h>
53 #include <net/checksum.h>
54 #include <net/dsfield.h>
55 #include <net/inet_ecn.h>
56 #include <net/xfrm.h>
57 #include <net/net_namespace.h>
58 #include <net/netns/generic.h>
59
60 #ifdef CONFIG_IPV6
61 #include <net/ipv6.h>
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
64 #endif
65
66 #include "compat.h"
67 #include "openvswitch/gre.h"
68
69 #ifndef GRE_IOCTL_ONLY
70 #include <net/rtnetlink.h>
71 #endif
72
73 /*
74    Problems & solutions
75    --------------------
76
77    1. The most important issue is detecting local dead loops.
78    They would cause complete host lockup in transmit, which
79    would be "resolved" by stack overflow or, if queueing is enabled,
80    with infinite looping in net_bh.
81
82    We cannot track such dead loops during route installation,
83    it is infeasible task. The most general solutions would be
84    to keep skb->encapsulation counter (sort of local ttl),
85    and silently drop packet when it expires. It is the best
86    solution, but it supposes maintaing new variable in ALL
87    skb, even if no tunneling is used.
88
89    Current solution: HARD_TX_LOCK lock breaks dead loops.
90
91
92
93    2. Networking dead loops would not kill routers, but would really
94    kill network. IP hop limit plays role of "t->recursion" in this case,
95    if we copy it from packet being encapsulated to upper header.
96    It is very good solution, but it introduces two problems:
97
98    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
99      do not work over tunnels.
100    - traceroute does not work. I planned to relay ICMP from tunnel,
101      so that this problem would be solved and traceroute output
102      would even more informative. This idea appeared to be wrong:
103      only Linux complies to rfc1812 now (yes, guys, Linux is the only
104      true router now :-)), all routers (at least, in neighbourhood of mine)
105      return only 8 bytes of payload. It is the end.
106
107    Hence, if we want that OSPF worked or traceroute said something reasonable,
108    we should search for another solution.
109
110    One of them is to parse packet trying to detect inner encapsulation
111    made by our node. It is difficult or even impossible, especially,
112    taking into account fragmentation. TO be short, tt is not solution at all.
113
114    Current solution: The solution was UNEXPECTEDLY SIMPLE.
115    We force DF flag on tunnels with preconfigured hop limit,
116    that is ALL. :-) Well, it does not remove the problem completely,
117    but exponential growth of network traffic is changed to linear
118    (branches, that exceed pmtu are pruned) and tunnel mtu
119    fastly degrades to value <68, where looping stops.
120    Yes, it is not good if there exists a router in the loop,
121    which does not force DF, even when encapsulating packets have DF set.
122    But it is not our problem! Nobody could accuse us, we made
123    all that we could make. Even if it is your gated who injected
124    fatal route to network, even if it were you who configured
125    fatal static route: you are innocent. :-)
126
127
128
129    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
130    practically identical code. It would be good to glue them
131    together, but it is not very evident, how to make them modular.
132    sit is integral part of IPv6, ipip and gre are naturally modular.
133    We could extract common parts (hash table, ioctl etc)
134    to a separate module (ip_tunnel.c).
135
136    Alexey Kuznetsov.
137  */
138
139 #ifndef GRE_IOCTL_ONLY
140 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
141 static struct rtnl_link_ops ipgre_tap_ops __read_mostly;
142 #endif
143 static int ipgre_tunnel_init(struct net_device *dev);
144 static void ipgre_tunnel_setup(struct net_device *dev);
145 static void ipgre_tap_setup(struct net_device *dev);
146 static int ipgre_tunnel_bind_dev(struct net_device *dev);
147
148 #define HASH_SIZE  16
149
150 static int ipgre_net_id;
151 struct ipgre_net {
152         struct ip_tunnel *tunnels[4][HASH_SIZE];
153
154         struct net_device *fb_tunnel_dev;
155 };
156
157 /* Tunnel hash table */
158
159 /*
160    4 hash tables:
161
162    3: (remote,local)
163    2: (remote,*)
164    1: (*,local)
165    0: (*,*)
166
167    We require exact key match i.e. if a key is present in packet
168    it will match only tunnel with the same key; if it is not present,
169    it will match only keyless tunnel.
170
171    All keysless packets, if not matched configured keyless tunnels
172    will match fallback tunnel.
173  */
174
175 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
176
177 #define tunnels_r_l     tunnels[3]
178 #define tunnels_r       tunnels[2]
179 #define tunnels_l       tunnels[1]
180 #define tunnels_wc      tunnels[0]
181 /*
182  * Locking : hash tables are protected by RCU and a spinlock
183  */
184 static DEFINE_SPINLOCK(ipgre_lock);
185
186 #define for_each_ip_tunnel_rcu(start) \
187         for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
188
189 /* Given src, dst and key, find appropriate for input tunnel. */
190
191 static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
192                                               __be32 remote, __be32 local,
193                                               __be32 key, __be16 gre_proto)
194 {
195         struct net *net = dev_net(dev);
196         int link = dev->ifindex;
197         unsigned h0 = HASH(remote);
198         unsigned h1 = HASH(key);
199         struct ip_tunnel *t, *cand = NULL;
200         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
201         int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
202                        ARPHRD_ETHER : ARPHRD_IPGRE;
203         int score, cand_score = 4;
204
205         for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
206                 if (local != t->parms.iph.saddr ||
207                     remote != t->parms.iph.daddr ||
208                     key != t->parms.i_key ||
209                     !(t->dev->flags & IFF_UP))
210                         continue;
211
212                 if (t->dev->type != ARPHRD_IPGRE &&
213                     t->dev->type != dev_type)
214                         continue;
215
216                 score = 0;
217                 if (t->parms.link != link)
218                         score |= 1;
219                 if (t->dev->type != dev_type)
220                         score |= 2;
221                 if (score == 0)
222                         return t;
223
224                 if (score < cand_score) {
225                         cand = t;
226                         cand_score = score;
227                 }
228         }
229
230         for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
231                 if (remote != t->parms.iph.daddr ||
232                     key != t->parms.i_key ||
233                     !(t->dev->flags & IFF_UP))
234                         continue;
235
236                 if (t->dev->type != ARPHRD_IPGRE &&
237                     t->dev->type != dev_type)
238                         continue;
239
240                 score = 0;
241                 if (t->parms.link != link)
242                         score |= 1;
243                 if (t->dev->type != dev_type)
244                         score |= 2;
245                 if (score == 0)
246                         return t;
247
248                 if (score < cand_score) {
249                         cand = t;
250                         cand_score = score;
251                 }
252         }
253
254         for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
255                 if ((local != t->parms.iph.saddr &&
256                      (local != t->parms.iph.daddr ||
257                       !ipv4_is_multicast(local))) ||
258                     key != t->parms.i_key ||
259                     !(t->dev->flags & IFF_UP))
260                         continue;
261
262                 if (t->dev->type != ARPHRD_IPGRE &&
263                     t->dev->type != dev_type)
264                         continue;
265
266                 score = 0;
267                 if (t->parms.link != link)
268                         score |= 1;
269                 if (t->dev->type != dev_type)
270                         score |= 2;
271                 if (score == 0)
272                         return t;
273
274                 if (score < cand_score) {
275                         cand = t;
276                         cand_score = score;
277                 }
278         }
279
280         for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
281                 if (t->parms.i_key != key ||
282                     !(t->dev->flags & IFF_UP))
283                         continue;
284
285                 if (t->dev->type != ARPHRD_IPGRE &&
286                     t->dev->type != dev_type)
287                         continue;
288
289                 score = 0;
290                 if (t->parms.link != link)
291                         score |= 1;
292                 if (t->dev->type != dev_type)
293                         score |= 2;
294                 if (score == 0)
295                         return t;
296
297                 if (score < cand_score) {
298                         cand = t;
299                         cand_score = score;
300                 }
301         }
302
303         if (cand != NULL)
304                 return cand;
305
306         dev = ign->fb_tunnel_dev;
307         if (dev->flags & IFF_UP)
308                 return netdev_priv(dev);
309
310         return NULL;
311 }
312
313 static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
314                 struct ip_tunnel_parm *parms)
315 {
316         __be32 remote = parms->iph.daddr;
317         __be32 local = parms->iph.saddr;
318         __be32 key = parms->i_key;
319         unsigned h = HASH(key);
320         int prio = 0;
321
322         if (local)
323                 prio |= 1;
324         if (remote && !ipv4_is_multicast(remote)) {
325                 prio |= 2;
326                 h ^= HASH(remote);
327         }
328
329         return &ign->tunnels[prio][h];
330 }
331
332 static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
333                 struct ip_tunnel *t)
334 {
335         return __ipgre_bucket(ign, &t->parms);
336 }
337
338 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
339 {
340         struct ip_tunnel **tp = ipgre_bucket(ign, t);
341
342         spin_lock_bh(&ipgre_lock);
343         t->next = *tp;
344         rcu_assign_pointer(*tp, t);
345         spin_unlock_bh(&ipgre_lock);
346 }
347
348 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
349 {
350         struct ip_tunnel **tp;
351
352         for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
353                 if (t == *tp) {
354                         spin_lock_bh(&ipgre_lock);
355                         *tp = t->next;
356                         spin_unlock_bh(&ipgre_lock);
357                         break;
358                 }
359         }
360 }
361
362 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
363                                            struct ip_tunnel_parm *parms,
364                                            int type)
365 {
366         __be32 remote = parms->iph.daddr;
367         __be32 local = parms->iph.saddr;
368         __be32 key = parms->i_key;
369         int link = parms->link;
370         struct ip_tunnel *t, **tp;
371         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
372
373         for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
374                 if (local == t->parms.iph.saddr &&
375                     remote == t->parms.iph.daddr &&
376                     key == t->parms.i_key &&
377                     link == t->parms.link &&
378                     type == t->dev->type)
379                         break;
380
381         return t;
382 }
383
384 static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
385                 struct ip_tunnel_parm *parms, int gretap, int create)
386 {
387         struct ip_tunnel *t, *nt;
388         struct net_device *dev;
389         char name[IFNAMSIZ];
390         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
391
392         t = ipgre_tunnel_find(net, parms, gretap ? ARPHRD_ETHER : ARPHRD_IPGRE);
393         if (t || !create)
394                 return t;
395
396         if (parms->name[0])
397                 strlcpy(name, parms->name, IFNAMSIZ);
398         else
399                 sprintf(name, "gre%%d");
400
401         dev = alloc_netdev(sizeof(*t), name, gretap ? ipgre_tap_setup
402                                                     : ipgre_tunnel_setup);
403         if (!dev)
404           return NULL;
405
406         dev_net_set(dev, net);
407
408         if (strchr(name, '%')) {
409                 if (dev_alloc_name(dev, name) < 0)
410                         goto failed_free;
411         }
412
413         if (gretap)
414                 random_ether_addr(dev->dev_addr);
415
416 #ifndef GRE_IOCTL_ONLY
417         dev->rtnl_link_ops = gretap ? &ipgre_tap_ops : &ipgre_link_ops;
418 #endif
419         nt = netdev_priv(dev);
420         nt->parms = *parms;
421
422         dev->mtu = ipgre_tunnel_bind_dev(dev);
423
424         if (register_netdevice(dev) < 0)
425                 goto failed_free;
426
427         dev_hold(dev);
428         ipgre_tunnel_link(ign, nt);
429         return nt;
430
431 failed_free:
432         free_netdev(dev);
433         return NULL;
434 }
435
436 static void ipgre_tunnel_uninit(struct net_device *dev)
437 {
438         struct net *net = dev_net(dev);
439         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
440
441         ipgre_tunnel_unlink(ign, netdev_priv(dev));
442         dev_put(dev);
443 }
444
445
446 static void ipgre_err(struct sk_buff *skb, u32 info)
447 {
448
449 /* All the routers (except for Linux) return only
450    8 bytes of packet payload. It means, that precise relaying of
451    ICMP in the real Internet is absolutely infeasible.
452
453    Moreover, Cisco "wise men" put GRE key to the third word
454    in GRE header. It makes impossible maintaining even soft state for keyed
455    GRE tunnels with enabled checksum. Tell them "thank you".
456
457    Well, I wonder, rfc1812 was written by Cisco employee,
458    what the hell these idiots break standrads established
459    by themself???
460  */
461
462         struct iphdr *iph = (struct iphdr *)skb->data;
463         __be16       *p = (__be16*)(skb->data+(iph->ihl<<2));
464         int grehlen = (iph->ihl<<2) + 4;
465         const int type = icmp_hdr(skb)->type;
466         const int code = icmp_hdr(skb)->code;
467         struct ip_tunnel *t;
468         __be16 flags;
469
470         flags = p[0];
471         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
472                 if (flags&(GRE_VERSION|GRE_ROUTING))
473                         return;
474                 if (flags&GRE_KEY) {
475                         grehlen += 4;
476                         if (flags&GRE_CSUM)
477                                 grehlen += 4;
478                 }
479         }
480
481         /* If only 8 bytes returned, keyed message will be dropped here */
482         if (skb_headlen(skb) < grehlen)
483                 return;
484
485         switch (type) {
486         default:
487         case ICMP_PARAMETERPROB:
488                 return;
489
490         case ICMP_DEST_UNREACH:
491                 switch (code) {
492                 case ICMP_SR_FAILED:
493                 case ICMP_PORT_UNREACH:
494                         /* Impossible event. */
495                         return;
496                 case ICMP_FRAG_NEEDED:
497                         /* Soft state for pmtu is maintained by IP core. */
498                         return;
499                 default:
500                         /* All others are translated to HOST_UNREACH.
501                            rfc2003 contains "deep thoughts" about NET_UNREACH,
502                            I believe they are just ether pollution. --ANK
503                          */
504                         break;
505                 }
506                 break;
507         case ICMP_TIME_EXCEEDED:
508                 if (code != ICMP_EXC_TTL)
509                         return;
510                 break;
511         }
512
513         rcu_read_lock();
514         t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
515                                 flags & GRE_KEY ?
516                                 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
517                                 p[1]);
518         if (t == NULL || t->parms.iph.daddr == 0 ||
519             ipv4_is_multicast(t->parms.iph.daddr))
520                 goto out;
521
522         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
523                 goto out;
524
525         if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
526                 t->err_count++;
527         else
528                 t->err_count = 1;
529         t->err_time = jiffies;
530 out:
531         rcu_read_unlock();
532         return;
533 }
534
535 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
536 {
537         if (INET_ECN_is_ce(iph->tos)) {
538                 if (skb->protocol == htons(ETH_P_IP)) {
539                         IP_ECN_set_ce(ip_hdr(skb));
540                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
541                         IP6_ECN_set_ce(ipv6_hdr(skb));
542                 }
543         }
544 }
545
546 static inline u8
547 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
548 {
549         u8 inner = 0;
550         if (skb->protocol == htons(ETH_P_IP))
551                 inner = old_iph->tos;
552         else if (skb->protocol == htons(ETH_P_IPV6))
553                 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
554         return INET_ECN_encapsulate(tos, inner);
555 }
556
557 static int ipgre_rcv(struct sk_buff *skb)
558 {
559         struct iphdr *iph;
560         u8     *h;
561         __be16    flags;
562         __sum16   csum = 0;
563         __be32 key = 0;
564         u32    seqno = 0;
565         struct ip_tunnel *tunnel;
566         int    offset = 4;
567         __be16 gre_proto;
568         unsigned int len;
569
570         if (!pskb_may_pull(skb, 16))
571                 goto drop_nolock;
572
573         iph = ip_hdr(skb);
574         h = skb->data;
575         flags = *(__be16*)h;
576
577         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
578                 /* - Version must be 0.
579                    - We do not support routing headers.
580                  */
581                 if (flags&(GRE_VERSION|GRE_ROUTING))
582                         goto drop_nolock;
583
584                 if (flags&GRE_CSUM) {
585                         switch (skb->ip_summed) {
586                         case CHECKSUM_COMPLETE:
587                                 csum = csum_fold(skb->csum);
588                                 if (!csum)
589                                         break;
590                                 /* fall through */
591                         case CHECKSUM_NONE:
592                                 skb->csum = 0;
593                                 csum = __skb_checksum_complete(skb);
594                                 skb->ip_summed = CHECKSUM_COMPLETE;
595                         }
596                         offset += 4;
597                 }
598                 if (flags&GRE_KEY) {
599                         key = *(__be32*)(h + offset);
600                         offset += 4;
601                 }
602                 if (flags&GRE_SEQ) {
603                         seqno = ntohl(*(__be32*)(h + offset));
604                         offset += 4;
605                 }
606         }
607
608         gre_proto = *(__be16 *)(h + 2);
609
610         rcu_read_lock();
611         if ((tunnel = ipgre_tunnel_lookup(skb->dev,
612                                           iph->saddr, iph->daddr, key,
613                                           gre_proto))) {
614                 struct net_device_stats *stats;
615 #ifdef HAVE_NETDEV_STATS
616                 stats = &tunnel->dev->stats;
617 #else
618                 stats = &tunnel->stat;
619 #endif
620
621                 secpath_reset(skb);
622
623                 skb->protocol = gre_proto;
624                 /* WCCP version 1 and 2 protocol decoding.
625                  * - Change protocol to IP
626                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
627                  */
628                 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
629                         skb->protocol = htons(ETH_P_IP);
630                         if ((*(h + offset) & 0xF0) != 0x40)
631                                 offset += 4;
632                 }
633
634                 skb->mac_header = skb->network_header;
635                 __pskb_pull(skb, offset);
636                 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
637                 skb->pkt_type = PACKET_HOST;
638 #ifdef CONFIG_NET_IPGRE_BROADCAST
639                 if (ipv4_is_multicast(iph->daddr)) {
640                         /* Looped back packet, drop it! */
641                         if (skb_rtable(skb)->fl.iif == 0)
642                                 goto drop;
643                         stats->multicast++;
644                         skb->pkt_type = PACKET_BROADCAST;
645                 }
646 #endif
647
648                 if (((flags&GRE_CSUM) && csum) ||
649                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
650                         stats->rx_crc_errors++;
651                         stats->rx_errors++;
652                         goto drop;
653                 }
654                 if (tunnel->parms.i_flags&GRE_SEQ) {
655                         if (!(flags&GRE_SEQ) ||
656                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
657                                 stats->rx_fifo_errors++;
658                                 stats->rx_errors++;
659                                 goto drop;
660                         }
661                         tunnel->i_seqno = seqno + 1;
662                 }
663
664                 len = skb->len;
665
666                 /* Warning: All skb pointers will be invalidated! */
667                 if (tunnel->dev->type == ARPHRD_ETHER) {
668                         if (!pskb_may_pull(skb, ETH_HLEN)) {
669                                 stats->rx_length_errors++;
670                                 stats->rx_errors++;
671                                 goto drop;
672                         }
673
674                         iph = ip_hdr(skb);
675                         skb->protocol = eth_type_trans(skb, tunnel->dev);
676                         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
677                 }
678
679                 stats->rx_packets++;
680                 stats->rx_bytes += len;
681                 skb->dev = tunnel->dev;
682                 skb_dst_drop(skb);
683                 nf_reset(skb);
684
685                 skb_reset_network_header(skb);
686                 ipgre_ecn_decapsulate(iph, skb);
687
688                 netif_rx(skb);
689                 rcu_read_unlock();
690                 return(0);
691         }
692         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
693
694 drop:
695         rcu_read_unlock();
696 drop_nolock:
697         kfree_skb(skb);
698         return(0);
699 }
700
701 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
702 {
703         struct ip_tunnel *tunnel = netdev_priv(dev);
704         struct net_device_stats *stats;
705 #ifdef HAVE_NETDEV_QUEUE_STATS
706         struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
707 #endif
708         struct iphdr  *old_iph = ip_hdr(skb);
709         struct iphdr  *tiph;
710         u8     tos;
711         __be16 df;
712         struct rtable *rt;                      /* Route to the other host */
713         struct net_device *tdev;                /* Device to other host */
714         struct iphdr  *iph;                     /* Our new IP header */
715         unsigned int max_headroom;              /* The extra header space needed */
716         int    gre_hlen;
717         __be32 dst;
718         int    mtu;
719
720 #ifdef HAVE_NETDEV_STATS
721         stats = &dev->stats;
722 #else
723         stats = &tunnel->stat;
724 #endif
725
726         if (dev->type == ARPHRD_ETHER)
727                 IPCB(skb)->flags = 0;
728
729 #ifdef HAVE_NETDEV_HEADER_OPS
730         if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
731 #else
732         if (dev->hard_header && dev->type == ARPHRD_IPGRE) {
733 #endif
734                 gre_hlen = 0;
735                 tiph = (struct iphdr *)skb->data;
736         } else {
737                 gre_hlen = tunnel->hlen;
738                 tiph = &tunnel->parms.iph;
739         }
740
741         if ((dst = tiph->daddr) == 0) {
742                 /* NBMA tunnel */
743
744                 if (skb_dst(skb) == NULL) {
745                         stats->tx_fifo_errors++;
746                         goto tx_error;
747                 }
748
749                 if (skb->protocol == htons(ETH_P_IP)) {
750                         rt = skb_rtable(skb);
751                         if ((dst = rt->rt_gateway) == 0)
752                                 goto tx_error_icmp;
753                 }
754 #ifdef CONFIG_IPV6
755                 else if (skb->protocol == htons(ETH_P_IPV6)) {
756                         struct in6_addr *addr6;
757                         int addr_type;
758                         struct neighbour *neigh = skb_dst(skb)->neighbour;
759
760                         if (neigh == NULL)
761                                 goto tx_error;
762
763                         addr6 = (struct in6_addr *)&neigh->primary_key;
764                         addr_type = ipv6_addr_type(addr6);
765
766                         if (addr_type == IPV6_ADDR_ANY) {
767                                 addr6 = &ipv6_hdr(skb)->daddr;
768                                 addr_type = ipv6_addr_type(addr6);
769                         }
770
771                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
772                                 goto tx_error_icmp;
773
774                         dst = addr6->s6_addr32[3];
775                 }
776 #endif
777                 else
778                         goto tx_error;
779         }
780
781         tos = tiph->tos;
782         if (tos == 1) {
783                 tos = 0;
784                 if (skb->protocol == htons(ETH_P_IP))
785                         tos = old_iph->tos;
786         }
787
788         {
789                 struct flowi fl = { .oif = tunnel->parms.link,
790                                     .nl_u = { .ip4_u =
791                                               { .daddr = dst,
792                                                 .saddr = tiph->saddr,
793                                                 .tos = RT_TOS(tos) } },
794                                     .proto = IPPROTO_GRE };
795                 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
796                         stats->tx_carrier_errors++;
797                         goto tx_error;
798                 }
799         }
800         tdev = rt->u.dst.dev;
801
802         if (tdev == dev) {
803                 ip_rt_put(rt);
804                 stats->collisions++;
805                 goto tx_error;
806         }
807
808         df = tiph->frag_off;
809         if (df)
810 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
811                 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
812 #else
813                 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
814 #endif
815         else
816                 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
817
818         if (skb_dst(skb))
819                 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
820
821         /* XXX: Temporarily allow fragmentation since DF doesn't
822          * do the right thing with bridging. */
823 /*
824         if (skb->protocol == htons(ETH_P_IP)) {
825                 df |= (old_iph->frag_off&htons(IP_DF));
826
827                 if ((old_iph->frag_off&htons(IP_DF)) &&
828                     mtu < ntohs(old_iph->tot_len)) {
829                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
830                         ip_rt_put(rt);
831                         goto tx_error;
832                 }
833         }
834 #ifdef CONFIG_IPV6
835         else if (skb->protocol == htons(ETH_P_IPV6)) {
836                 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
837
838                 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
839                         if ((tunnel->parms.iph.daddr &&
840                              !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
841                             rt6->rt6i_dst.plen == 128) {
842                                 rt6->rt6i_flags |= RTF_MODIFIED;
843                                 skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
844                         }
845                 }
846
847                 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
848                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
849                         ip_rt_put(rt);
850                         goto tx_error;
851                 }
852         }
853 #endif
854 */
855         if (tunnel->err_count > 0) {
856                 if (time_before(jiffies,
857                                 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
858                         tunnel->err_count--;
859
860                         dst_link_failure(skb);
861                 } else
862                         tunnel->err_count = 0;
863         }
864
865         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
866
867         if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
868             (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
869                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
870                 if (!new_skb) {
871                         ip_rt_put(rt);
872 #ifdef HAVE_NETDEV_QUEUE_STATS
873                         txq->tx_dropped++;
874 #else
875                         stats->tx_dropped++;
876 #endif
877                         dev_kfree_skb(skb);
878                         return NETDEV_TX_OK;
879                 }
880                 if (skb->sk)
881                         skb_set_owner_w(new_skb, skb->sk);
882                 dev_kfree_skb(skb);
883                 skb = new_skb;
884                 old_iph = ip_hdr(skb);
885         }
886
887         skb_reset_transport_header(skb);
888         skb_push(skb, gre_hlen);
889         skb_reset_network_header(skb);
890         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
891         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
892                               IPSKB_REROUTED);
893         skb_dst_drop(skb);
894         skb_dst_set(skb, &rt->u.dst);
895
896         /*
897          *      Push down and install the IPIP header.
898          */
899
900         iph                     =       ip_hdr(skb);
901         iph->version            =       4;
902         iph->ihl                =       sizeof(struct iphdr) >> 2;
903         iph->frag_off           =       df;
904         iph->protocol           =       IPPROTO_GRE;
905         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
906         iph->daddr              =       rt->rt_dst;
907         iph->saddr              =       rt->rt_src;
908
909         if ((iph->ttl = tiph->ttl) == 0) {
910                 if (skb->protocol == htons(ETH_P_IP))
911                         iph->ttl = old_iph->ttl;
912 #ifdef CONFIG_IPV6
913                 else if (skb->protocol == htons(ETH_P_IPV6))
914                         iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
915 #endif
916                 else
917                         iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
918         }
919
920         ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
921         ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
922                                    htons(ETH_P_TEB) : skb->protocol;
923
924         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
925                 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
926
927                 if (tunnel->parms.o_flags&GRE_SEQ) {
928                         ++tunnel->o_seqno;
929                         *ptr = htonl(tunnel->o_seqno);
930                         ptr--;
931                 }
932                 if (tunnel->parms.o_flags&GRE_KEY) {
933                         *ptr = tunnel->parms.o_key;
934                         ptr--;
935                 }
936                 if (tunnel->parms.o_flags&GRE_CSUM) {
937                         *ptr = 0;
938                         *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
939                 }
940         }
941
942         nf_reset(skb);
943
944         IPTUNNEL_XMIT();
945         return NETDEV_TX_OK;
946
947 tx_error_icmp:
948         dst_link_failure(skb);
949
950 tx_error:
951         stats->tx_errors++;
952         dev_kfree_skb(skb);
953         return NETDEV_TX_OK;
954 }
955
956 static int ipgre_tunnel_bind_dev(struct net_device *dev)
957 {
958         struct net_device *tdev = NULL;
959         struct ip_tunnel *tunnel;
960         struct iphdr *iph;
961         int hlen = LL_MAX_HEADER;
962         int mtu = ETH_DATA_LEN;
963         int addend = sizeof(struct iphdr) + 4;
964
965         tunnel = netdev_priv(dev);
966         iph = &tunnel->parms.iph;
967
968         /* Guess output device to choose reasonable mtu and needed_headroom */
969
970         if (iph->daddr) {
971                 struct flowi fl = { .oif = tunnel->parms.link,
972                                     .nl_u = { .ip4_u =
973                                               { .daddr = iph->daddr,
974                                                 .saddr = iph->saddr,
975                                                 .tos = RT_TOS(iph->tos) } },
976                                     .proto = IPPROTO_GRE };
977                 struct rtable *rt;
978                 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
979                         tdev = rt->u.dst.dev;
980                         ip_rt_put(rt);
981                 }
982
983                 if (dev->type != ARPHRD_ETHER)
984                         dev->flags |= IFF_POINTOPOINT;
985         }
986
987         if (!tdev && tunnel->parms.link)
988                 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
989
990         if (tdev) {
991 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
992                 hlen = tdev->hard_header_len + tdev->needed_headroom;
993 #else
994                 hlen = tdev->hard_header_len;
995 #endif
996                 mtu = tdev->mtu;
997         }
998         dev->iflink = tunnel->parms.link;
999
1000         /* Precalculate GRE options length */
1001         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1002                 if (tunnel->parms.o_flags&GRE_CSUM)
1003                         addend += 4;
1004                 if (tunnel->parms.o_flags&GRE_KEY)
1005                         addend += 4;
1006                 if (tunnel->parms.o_flags&GRE_SEQ)
1007                         addend += 4;
1008         }
1009 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1010         dev->needed_headroom = hlen + addend;
1011         mtu -= dev->hard_header_len + addend;
1012 #else
1013         dev->hard_header_len = hlen + addend;
1014         mtu -= addend;
1015 #endif
1016         tunnel->hlen = addend;
1017
1018         if (mtu < 68)
1019                 mtu = 68;
1020
1021         /* XXX: Set MTU to the maximum possible value.  If we are bridged to a
1022         * device with a larger MTU then packets will be dropped. */
1023         mtu = 65482;
1024
1025         return mtu;
1026 }
1027
1028 static int
1029 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1030 {
1031         int err = 0;
1032         struct ip_tunnel_parm p;
1033         struct ip_tunnel *t;
1034         struct net *net = dev_net(dev);
1035         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1036         int add_tunnel, gretap;
1037
1038         switch (cmd) {
1039         case SIOCGETTUNNEL:
1040                 t = NULL;
1041                 if (dev == ign->fb_tunnel_dev) {
1042                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1043                                 err = -EFAULT;
1044                                 break;
1045                         }
1046                         t = ipgre_tunnel_locate(net, &p, false, 0);
1047                 }
1048                 if (t == NULL)
1049                         t = netdev_priv(dev);
1050                 memcpy(&p, &t->parms, sizeof(p));
1051                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1052                         err = -EFAULT;
1053                 break;
1054
1055         case SIOCADDTUNNEL:
1056         case SIOCCHGTUNNEL:
1057         case SIOCADDGRETAP:
1058         case SIOCCHGGRETAP:
1059                 err = -EPERM;
1060                 if (!capable(CAP_NET_ADMIN))
1061                         goto done;
1062
1063                 err = -EFAULT;
1064                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1065                         goto done;
1066
1067                 err = -EINVAL;
1068                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1069                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1070                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1071                         goto done;
1072
1073                 add_tunnel = (cmd == SIOCADDTUNNEL || cmd == SIOCADDGRETAP);
1074                 gretap = (cmd == SIOCADDGRETAP || cmd == SIOCCHGGRETAP);
1075
1076                 if (p.iph.ttl)
1077                         p.iph.frag_off |= htons(IP_DF);
1078
1079                 if (!(p.i_flags&GRE_KEY))
1080                         p.i_key = 0;
1081                 if (!(p.o_flags&GRE_KEY))
1082                         p.o_key = 0;
1083
1084                 t = ipgre_tunnel_locate(net, &p, gretap, add_tunnel);
1085
1086                 if (dev != ign->fb_tunnel_dev && !add_tunnel) {
1087                         if (t != NULL) {
1088                                 if (t->dev != dev) {
1089                                         err = -EEXIST;
1090                                         break;
1091                                 }
1092                         } else {
1093                                 unsigned nflags = 0;
1094
1095                                 t = netdev_priv(dev);
1096
1097                                 if (ipv4_is_multicast(p.iph.daddr))
1098                                         nflags = IFF_BROADCAST;
1099                                 else if (p.iph.daddr)
1100                                         nflags = IFF_POINTOPOINT;
1101
1102                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1103                                         err = -EINVAL;
1104                                         break;
1105                                 }
1106                                 ipgre_tunnel_unlink(ign, t);
1107                                 t->parms.iph.saddr = p.iph.saddr;
1108                                 t->parms.iph.daddr = p.iph.daddr;
1109                                 t->parms.i_key = p.i_key;
1110                                 t->parms.o_key = p.o_key;
1111                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1112                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
1113                                 ipgre_tunnel_link(ign, t);
1114                                 netdev_state_change(dev);
1115                         }
1116                 }
1117
1118                 if (t) {
1119                         err = 0;
1120                         if (!add_tunnel) {
1121                                 t->parms.iph.ttl = p.iph.ttl;
1122                                 t->parms.iph.tos = p.iph.tos;
1123                                 t->parms.iph.frag_off = p.iph.frag_off;
1124                                 if (t->parms.link != p.link) {
1125                                         t->parms.link = p.link;
1126                                         dev->mtu = ipgre_tunnel_bind_dev(dev);
1127                                         netdev_state_change(dev);
1128                                 }
1129                         }
1130                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1131                                 err = -EFAULT;
1132                 } else
1133                         err = (add_tunnel ? -ENOBUFS : -ENOENT);
1134                 break;
1135
1136         case SIOCDELTUNNEL:
1137                 err = -EPERM;
1138                 if (!capable(CAP_NET_ADMIN))
1139                         goto done;
1140
1141                 if (dev == ign->fb_tunnel_dev) {
1142                         err = -EFAULT;
1143                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1144                                 goto done;
1145                         err = -ENOENT;
1146                         if ((t = ipgre_tunnel_locate(net, &p, false, 0)) == NULL)
1147                                 goto done;
1148                         err = -EPERM;
1149                         if (t == netdev_priv(ign->fb_tunnel_dev))
1150                                 goto done;
1151                         dev = t->dev;
1152                 }
1153                 unregister_netdevice(dev);
1154                 err = 0;
1155                 break;
1156
1157         default:
1158                 err = -EINVAL;
1159         }
1160
1161 done:
1162         return err;
1163 }
1164
1165 #ifndef HAVE_NETDEV_STATS
1166 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1167 {
1168         return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
1169 }
1170 #endif
1171
1172 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1173 {
1174         struct ip_tunnel *tunnel = netdev_priv(dev);
1175         if (new_mtu < 68 ||
1176 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1177         new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1178 #else
1179         new_mtu > 0xFFF8 - tunnel->hlen)
1180 #endif
1181                 return -EINVAL;
1182         dev->mtu = new_mtu;
1183         return 0;
1184 }
1185
1186 /* Nice toy. Unfortunately, useless in real life :-)
1187    It allows to construct virtual multiprotocol broadcast "LAN"
1188    over the Internet, provided multicast routing is tuned.
1189
1190
1191    I have no idea was this bicycle invented before me,
1192    so that I had to set ARPHRD_IPGRE to a random value.
1193    I have an impression, that Cisco could make something similar,
1194    but this feature is apparently missing in IOS<=11.2(8).
1195
1196    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1197    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1198
1199    ping -t 255 224.66.66.66
1200
1201    If nobody answers, mbone does not work.
1202
1203    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1204    ip addr add 10.66.66.<somewhat>/24 dev Universe
1205    ifconfig Universe up
1206    ifconfig Universe add fe80::<Your_real_addr>/10
1207    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1208    ftp 10.66.66.66
1209    ...
1210    ftp fec0:6666:6666::193.233.7.65
1211    ...
1212
1213  */
1214
1215 #ifdef HAVE_NETDEV_HEADER_OPS
1216 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1217                        unsigned short type,
1218                        const void *daddr, const void *saddr, unsigned len)
1219 #else
1220 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
1221                         void *daddr, void *saddr, unsigned len)
1222 #endif
1223 {
1224         struct ip_tunnel *t = netdev_priv(dev);
1225         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1226         __be16 *p = (__be16*)(iph+1);
1227
1228         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1229         p[0]            = t->parms.o_flags;
1230         p[1]            = htons(type);
1231
1232         /*
1233          *      Set the source hardware address.
1234          */
1235
1236         if (saddr)
1237                 memcpy(&iph->saddr, saddr, 4);
1238
1239         if (daddr) {
1240                 memcpy(&iph->daddr, daddr, 4);
1241                 return t->hlen;
1242         }
1243         if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1244                 return t->hlen;
1245
1246         return -t->hlen;
1247 }
1248
1249 #ifdef HAVE_NETDEV_HEADER_OPS
1250 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1251 #else
1252 static int ipgre_header_parse(struct sk_buff *skb, unsigned char *haddr)
1253 #endif
1254 {
1255         struct iphdr *iph = (struct iphdr *) skb_mac_header(skb);
1256         memcpy(haddr, &iph->saddr, 4);
1257         return 4;
1258 }
1259
1260 #ifdef HAVE_NETDEV_HEADER_OPS
1261 static const struct header_ops ipgre_header_ops = {
1262         .create = ipgre_header,
1263         .parse  = ipgre_header_parse,
1264 };
1265 #endif
1266
1267 #ifdef CONFIG_NET_IPGRE_BROADCAST
1268 static int ipgre_open(struct net_device *dev)
1269 {
1270         struct ip_tunnel *t = netdev_priv(dev);
1271
1272         if (ipv4_is_multicast(t->parms.iph.daddr)) {
1273                 struct flowi fl = { .oif = t->parms.link,
1274                                     .nl_u = { .ip4_u =
1275                                               { .daddr = t->parms.iph.daddr,
1276                                                 .saddr = t->parms.iph.saddr,
1277                                                 .tos = RT_TOS(t->parms.iph.tos) } },
1278                                     .proto = IPPROTO_GRE };
1279                 struct rtable *rt;
1280                 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1281                         return -EADDRNOTAVAIL;
1282                 dev = rt->u.dst.dev;
1283                 ip_rt_put(rt);
1284                 if (__in_dev_get_rtnl(dev) == NULL)
1285                         return -EADDRNOTAVAIL;
1286                 t->mlink = dev->ifindex;
1287                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1288         }
1289         return 0;
1290 }
1291
1292 static int ipgre_close(struct net_device *dev)
1293 {
1294         struct ip_tunnel *t = netdev_priv(dev);
1295
1296         if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1297                 struct in_device *in_dev;
1298                 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1299                 if (in_dev) {
1300                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1301                         in_dev_put(in_dev);
1302                 }
1303         }
1304         return 0;
1305 }
1306
1307 #endif
1308
1309 static void ethtool_getinfo(struct net_device *dev,
1310                             struct ethtool_drvinfo *info)
1311 {
1312         strcpy(info->driver, "ip_gre");
1313         strcpy(info->version, "Open vSwitch "VERSION BUILDNR);
1314         strcpy(info->bus_info, dev->type == ARPHRD_ETHER ? "gretap" : "gre");
1315 }
1316
1317 static struct ethtool_ops ethtool_ops = {
1318         .get_drvinfo = ethtool_getinfo,
1319 };
1320
1321 #ifdef HAVE_NET_DEVICE_OPS
1322 static const struct net_device_ops ipgre_netdev_ops = {
1323         .ndo_init               = ipgre_tunnel_init,
1324         .ndo_uninit             = ipgre_tunnel_uninit,
1325 #ifdef CONFIG_NET_IPGRE_BROADCAST
1326         .ndo_open               = ipgre_open,
1327         .ndo_stop               = ipgre_close,
1328 #endif
1329         .ndo_start_xmit         = ipgre_tunnel_xmit,
1330         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1331         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1332 };
1333 #endif
1334
1335 static void ipgre_tunnel_setup(struct net_device *dev)
1336 {
1337 #ifdef HAVE_NET_DEVICE_OPS
1338         dev->netdev_ops         = &ipgre_netdev_ops;
1339 #else
1340         dev->init               = ipgre_tunnel_init;
1341         dev->uninit             = ipgre_tunnel_uninit;
1342         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1343 #ifndef HAVE_NETDEV_STATS
1344         dev->get_stats          = ipgre_tunnel_get_stats;
1345 #endif
1346         dev->do_ioctl           = ipgre_tunnel_ioctl;
1347         dev->change_mtu         = ipgre_tunnel_change_mtu;
1348 #endif /* HAVE_NET_DEVICE_OPS */
1349         dev->destructor         = free_netdev;
1350
1351         dev->type               = ARPHRD_IPGRE;
1352 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1353         dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1354 #else
1355         dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1356 #endif
1357         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1358         dev->flags              = IFF_NOARP;
1359         dev->iflink             = 0;
1360         dev->addr_len           = 4;
1361         dev->features           |= NETIF_F_NETNS_LOCAL;
1362         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1363
1364         SET_ETHTOOL_OPS(dev, &ethtool_ops);
1365 }
1366
1367 static int ipgre_tunnel_init(struct net_device *dev)
1368 {
1369         struct ip_tunnel *tunnel;
1370         struct iphdr *iph;
1371
1372         tunnel = netdev_priv(dev);
1373         iph = &tunnel->parms.iph;
1374
1375         tunnel->dev = dev;
1376         strcpy(tunnel->parms.name, dev->name);
1377
1378         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1379         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1380
1381         if (iph->daddr) {
1382 #ifdef CONFIG_NET_IPGRE_BROADCAST
1383                 if (ipv4_is_multicast(iph->daddr)) {
1384                         if (!iph->saddr)
1385                                 return -EINVAL;
1386                         dev->flags = IFF_BROADCAST;
1387 #ifdef HAVE_NETDEV_HEADER_OPS
1388                         dev->header_ops = &ipgre_header_ops;
1389 #else
1390                         dev->hard_header = ipgre_header;
1391                         dev->hard_header_parse = ipgre_header_parse;
1392 #endif
1393 #ifndef HAVE_NET_DEVICE_OPS
1394                         dev->open = ipgre_open;
1395                         dev->stop = ipgre_close;
1396 #endif
1397                 }
1398 #endif
1399         } else {
1400 #ifdef HAVE_NETDEV_HEADER_OPS
1401                 dev->header_ops = &ipgre_header_ops;
1402 #else
1403                 dev->hard_header = ipgre_header;
1404                 dev->hard_header_parse = ipgre_header_parse;
1405 #endif
1406         }
1407
1408         return 0;
1409 }
1410
1411 #ifdef HAVE_NET_DEVICE_OPS
1412 static void ipgre_fb_tunnel_init(struct net_device *dev)
1413 #else
1414 static int ipgre_fb_tunnel_init(struct net_device *dev)
1415 #endif
1416 {
1417         struct ip_tunnel *tunnel = netdev_priv(dev);
1418         struct iphdr *iph = &tunnel->parms.iph;
1419         struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
1420
1421         tunnel->dev = dev;
1422         strcpy(tunnel->parms.name, dev->name);
1423
1424         iph->version            = 4;
1425         iph->protocol           = IPPROTO_GRE;
1426         iph->ihl                = 5;
1427         tunnel->hlen            = sizeof(struct iphdr) + 4;
1428
1429         dev_hold(dev);
1430         ign->tunnels_wc[0]      = tunnel;
1431
1432 #ifndef HAVE_NET_DEVICE_OPS
1433         return 0;
1434 #endif
1435 }
1436
1437 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
1438 static struct net_protocol ipgre_protocol = {
1439 #else
1440 static const struct net_protocol ipgre_protocol = {
1441 #endif
1442         .handler        =       ipgre_rcv,
1443         .err_handler    =       ipgre_err,
1444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
1445         .netns_ok       =       1,
1446 #endif
1447 };
1448
1449 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1450 {
1451         int prio;
1452
1453         for (prio = 0; prio < 4; prio++) {
1454                 int h;
1455                 for (h = 0; h < HASH_SIZE; h++) {
1456                         struct ip_tunnel *t = ign->tunnels[prio][h];
1457
1458                         while (t != NULL) {
1459                                 unregister_netdevice_queue(t->dev, head);
1460                                 t = t->next;
1461                         }
1462                 }
1463         }
1464 }
1465
1466 static int ipgre_init_net(struct net *net)
1467 {
1468         int err;
1469         struct ipgre_net *ign;
1470
1471         err = -ENOMEM;
1472         ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
1473         if (ign == NULL)
1474                 goto err_alloc;
1475
1476         err = net_assign_generic(net, ipgre_net_id, ign);
1477         if (err < 0)
1478                 goto err_assign;
1479
1480         ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), GRE_IOCTL_DEVICE,
1481                                            ipgre_tunnel_setup);
1482         if (!ign->fb_tunnel_dev) {
1483                 err = -ENOMEM;
1484                 goto err_alloc_dev;
1485         }
1486         dev_net_set(ign->fb_tunnel_dev, net);
1487
1488 #ifdef HAVE_NET_DEVICE_OPS
1489         ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1490 #else
1491         ign->fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1492 #endif
1493 #ifndef GRE_IOCTL_ONLY
1494         ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1495 #endif
1496
1497         if ((err = register_netdev(ign->fb_tunnel_dev)))
1498                 goto err_reg_dev;
1499
1500         return 0;
1501
1502 err_reg_dev:
1503         free_netdev(ign->fb_tunnel_dev);
1504 err_alloc_dev:
1505         /* nothing */
1506 err_assign:
1507         kfree(ign);
1508 err_alloc:
1509         return err;
1510 }
1511
1512 static void ipgre_exit_net(struct net *net)
1513 {
1514         struct ipgre_net *ign;
1515         LIST_HEAD(list);
1516
1517         ign = net_generic(net, ipgre_net_id);
1518         rtnl_lock();
1519         ipgre_destroy_tunnels(ign, &list);
1520         unregister_netdevice_many(&list);
1521         rtnl_unlock();
1522         kfree(ign);
1523 }
1524
1525 static struct pernet_operations ipgre_net_ops = {
1526         .init = ipgre_init_net,
1527         .exit = ipgre_exit_net,
1528 };
1529
1530 static int ipgre_tap_init(struct net_device *dev)
1531 {
1532         struct ip_tunnel *tunnel;
1533
1534         tunnel = netdev_priv(dev);
1535
1536         tunnel->dev = dev;
1537         strcpy(tunnel->parms.name, dev->name);
1538
1539         ipgre_tunnel_bind_dev(dev);
1540
1541         return 0;
1542 }
1543
1544 #ifdef HAVE_NET_DEVICE_OPS
1545 static const struct net_device_ops ipgre_tap_netdev_ops = {
1546         .ndo_init               = ipgre_tap_init,
1547         .ndo_uninit             = ipgre_tunnel_uninit,
1548         .ndo_start_xmit         = ipgre_tunnel_xmit,
1549         .ndo_set_mac_address    = eth_mac_addr,
1550         .ndo_validate_addr      = eth_validate_addr,
1551         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1552         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1553 };
1554 #endif
1555
1556 static void ipgre_tap_setup(struct net_device *dev)
1557 {
1558         ether_setup(dev);
1559
1560 #ifdef HAVE_NET_DEVICE_OPS
1561         dev->netdev_ops         = &ipgre_tap_netdev_ops;
1562 #else
1563         dev->init               = ipgre_tap_init;
1564         dev->uninit             = ipgre_tunnel_uninit;
1565         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1566 #ifndef HAVE_NETDEV_STATS
1567         dev->get_stats          = ipgre_tunnel_get_stats;
1568 #endif
1569         dev->do_ioctl           = ipgre_tunnel_ioctl;
1570         dev->change_mtu         = ipgre_tunnel_change_mtu;
1571 #endif /* HAVE_NET_DEVICE_OPS */
1572         dev->destructor         = free_netdev;
1573
1574         dev->iflink             = 0;
1575         dev->features           |= NETIF_F_NETNS_LOCAL;
1576
1577         SET_ETHTOOL_OPS(dev, &ethtool_ops);
1578 }
1579
1580 #ifndef GRE_IOCTL_ONLY
1581 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1582 {
1583         __be16 flags;
1584
1585         if (!data)
1586                 return 0;
1587
1588         flags = 0;
1589         if (data[IFLA_GRE_IFLAGS])
1590                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1591         if (data[IFLA_GRE_OFLAGS])
1592                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1593         if (flags & (GRE_VERSION|GRE_ROUTING))
1594                 return -EINVAL;
1595
1596         return 0;
1597 }
1598
1599 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1600 {
1601         __be32 daddr;
1602
1603         if (tb[IFLA_ADDRESS]) {
1604                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1605                         return -EINVAL;
1606                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1607                         return -EADDRNOTAVAIL;
1608         }
1609
1610         if (!data)
1611                 goto out;
1612
1613         if (data[IFLA_GRE_REMOTE]) {
1614                 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1615                 if (!daddr)
1616                         return -EINVAL;
1617         }
1618
1619 out:
1620         return ipgre_tunnel_validate(tb, data);
1621 }
1622
1623 static void ipgre_netlink_parms(struct nlattr *data[],
1624                                 struct ip_tunnel_parm *parms)
1625 {
1626         memset(parms, 0, sizeof(*parms));
1627
1628         parms->iph.protocol = IPPROTO_GRE;
1629
1630         if (!data)
1631                 return;
1632
1633         if (data[IFLA_GRE_LINK])
1634                 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1635
1636         if (data[IFLA_GRE_IFLAGS])
1637                 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1638
1639         if (data[IFLA_GRE_OFLAGS])
1640                 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1641
1642         if (data[IFLA_GRE_IKEY])
1643                 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1644
1645         if (data[IFLA_GRE_OKEY])
1646                 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1647
1648         if (data[IFLA_GRE_LOCAL])
1649                 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1650
1651         if (data[IFLA_GRE_REMOTE])
1652                 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1653
1654         if (data[IFLA_GRE_TTL])
1655                 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1656
1657         if (data[IFLA_GRE_TOS])
1658                 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1659
1660         if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1661                 parms->iph.frag_off = htons(IP_DF);
1662 }
1663
1664 static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[],
1665                          struct nlattr *data[])
1666 {
1667         struct ip_tunnel *nt;
1668         struct net *net = dev_net(dev);
1669         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1670         int mtu;
1671         int err;
1672
1673         nt = netdev_priv(dev);
1674         ipgre_netlink_parms(data, &nt->parms);
1675
1676         if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1677                 return -EEXIST;
1678
1679         if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1680                 random_ether_addr(dev->dev_addr);
1681
1682         mtu = ipgre_tunnel_bind_dev(dev);
1683         if (!tb[IFLA_MTU])
1684                 dev->mtu = mtu;
1685
1686         err = register_netdevice(dev);
1687         if (err)
1688                 goto out;
1689
1690         dev_hold(dev);
1691         ipgre_tunnel_link(ign, nt);
1692
1693 out:
1694         return err;
1695 }
1696
1697 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1698                             struct nlattr *data[])
1699 {
1700         struct ip_tunnel *t, *nt;
1701         struct net *net = dev_net(dev);
1702         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1703         struct ip_tunnel_parm p;
1704         int mtu;
1705
1706         if (dev == ign->fb_tunnel_dev)
1707                 return -EINVAL;
1708
1709         nt = netdev_priv(dev);
1710         ipgre_netlink_parms(data, &p);
1711
1712         t = ipgre_tunnel_locate(net, &p, false, 0);
1713
1714         if (t) {
1715                 if (t->dev != dev)
1716                         return -EEXIST;
1717         } else {
1718                 t = nt;
1719
1720                 if (dev->type != ARPHRD_ETHER) {
1721                         unsigned nflags = 0;
1722
1723                         if (ipv4_is_multicast(p.iph.daddr))
1724                                 nflags = IFF_BROADCAST;
1725                         else if (p.iph.daddr)
1726                                 nflags = IFF_POINTOPOINT;
1727
1728                         if ((dev->flags ^ nflags) &
1729                             (IFF_POINTOPOINT | IFF_BROADCAST))
1730                                 return -EINVAL;
1731                 }
1732
1733                 ipgre_tunnel_unlink(ign, t);
1734                 t->parms.iph.saddr = p.iph.saddr;
1735                 t->parms.iph.daddr = p.iph.daddr;
1736                 t->parms.i_key = p.i_key;
1737                 if (dev->type != ARPHRD_ETHER) {
1738                         memcpy(dev->dev_addr, &p.iph.saddr, 4);
1739                         memcpy(dev->broadcast, &p.iph.daddr, 4);
1740                 }
1741                 ipgre_tunnel_link(ign, t);
1742                 netdev_state_change(dev);
1743         }
1744
1745         t->parms.o_key = p.o_key;
1746         t->parms.iph.ttl = p.iph.ttl;
1747         t->parms.iph.tos = p.iph.tos;
1748         t->parms.iph.frag_off = p.iph.frag_off;
1749
1750         if (t->parms.link != p.link) {
1751                 t->parms.link = p.link;
1752                 mtu = ipgre_tunnel_bind_dev(dev);
1753                 if (!tb[IFLA_MTU])
1754                         dev->mtu = mtu;
1755                 netdev_state_change(dev);
1756         }
1757
1758         return 0;
1759 }
1760
1761 static size_t ipgre_get_size(const struct net_device *dev)
1762 {
1763         return
1764                 /* IFLA_GRE_LINK */
1765                 nla_total_size(4) +
1766                 /* IFLA_GRE_IFLAGS */
1767                 nla_total_size(2) +
1768                 /* IFLA_GRE_OFLAGS */
1769                 nla_total_size(2) +
1770                 /* IFLA_GRE_IKEY */
1771                 nla_total_size(4) +
1772                 /* IFLA_GRE_OKEY */
1773                 nla_total_size(4) +
1774                 /* IFLA_GRE_LOCAL */
1775                 nla_total_size(4) +
1776                 /* IFLA_GRE_REMOTE */
1777                 nla_total_size(4) +
1778                 /* IFLA_GRE_TTL */
1779                 nla_total_size(1) +
1780                 /* IFLA_GRE_TOS */
1781                 nla_total_size(1) +
1782                 /* IFLA_GRE_PMTUDISC */
1783                 nla_total_size(1) +
1784                 0;
1785 }
1786
1787 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1788 {
1789         struct ip_tunnel *t = netdev_priv(dev);
1790         struct ip_tunnel_parm *p = &t->parms;
1791
1792         NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1793         NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1794         NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1795         NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1796         NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1797         NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1798         NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1799         NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1800         NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1801         NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1802
1803         return 0;
1804
1805 nla_put_failure:
1806         return -EMSGSIZE;
1807 }
1808
1809 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1810         [IFLA_GRE_LINK]         = { .type = NLA_U32 },
1811         [IFLA_GRE_IFLAGS]       = { .type = NLA_U16 },
1812         [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
1813         [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
1814         [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
1815         [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1816         [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1817         [IFLA_GRE_TTL]          = { .type = NLA_U8 },
1818         [IFLA_GRE_TOS]          = { .type = NLA_U8 },
1819         [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
1820 };
1821
1822 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1823         .kind           = "gre",
1824         .maxtype        = IFLA_GRE_MAX,
1825         .policy         = ipgre_policy,
1826         .priv_size      = sizeof(struct ip_tunnel),
1827         .setup          = ipgre_tunnel_setup,
1828         .validate       = ipgre_tunnel_validate,
1829         .newlink        = ipgre_newlink,
1830         .changelink     = ipgre_changelink,
1831         .get_size       = ipgre_get_size,
1832         .fill_info      = ipgre_fill_info,
1833 };
1834
1835 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1836         .kind           = "gretap",
1837         .maxtype        = IFLA_GRE_MAX,
1838         .policy         = ipgre_policy,
1839         .priv_size      = sizeof(struct ip_tunnel),
1840         .setup          = ipgre_tap_setup,
1841         .validate       = ipgre_tap_validate,
1842         .newlink        = ipgre_newlink,
1843         .changelink     = ipgre_changelink,
1844         .get_size       = ipgre_get_size,
1845         .fill_info      = ipgre_fill_info,
1846 };
1847 #endif
1848
1849 /*
1850  *      And now the modules code and kernel interface.
1851  */
1852
1853 static int __init ipgre_init(void)
1854 {
1855         int err;
1856
1857         printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1858
1859         if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1860                 printk(KERN_INFO "ipgre init: can't add protocol\n");
1861                 return -EAGAIN;
1862         }
1863
1864         err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
1865         if (err < 0)
1866                 goto gen_device_failed;
1867
1868 #ifndef GRE_IOCTL_ONLY
1869         err = rtnl_link_register(&ipgre_link_ops);
1870         if (err < 0)
1871                 goto rtnl_link_failed;
1872
1873         err = rtnl_link_register(&ipgre_tap_ops);
1874         if (err < 0)
1875                 goto tap_ops_failed;
1876 #endif
1877
1878 out:
1879         return err;
1880
1881 #ifndef GRE_IOCTL_ONLY
1882 tap_ops_failed:
1883         rtnl_link_unregister(&ipgre_link_ops);
1884 rtnl_link_failed:
1885         unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
1886 #endif
1887 gen_device_failed:
1888         inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1889         goto out;
1890
1891 }
1892
1893 static void __exit ipgre_fini(void)
1894 {
1895 #ifndef GRE_IOCTL_ONLY
1896         rtnl_link_unregister(&ipgre_tap_ops);
1897         rtnl_link_unregister(&ipgre_link_ops);
1898 #endif
1899         unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
1900         if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1901                 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1902 }
1903
1904 module_init(ipgre_init);
1905 module_exit(ipgre_fini);
1906 MODULE_DESCRIPTION("GRE over IPv4 tunneling driver");
1907 MODULE_LICENSE("GPL");
1908 #ifndef GRE_IOCTL_ONLY
1909 MODULE_ALIAS_RTNL_LINK("gre");
1910 MODULE_ALIAS_RTNL_LINK("gretap");
1911 #endif
1912