gre: Always set TTL on outer packet to 64.
[sliver-openvswitch.git] / datapath / linux-2.6 / compat-2.6 / ip_gre.c
1 /* ip_gre driver port to Linux 2.6.18 and greater */
2
3 #include <linux/version.h>
4 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5 #define HAVE_NETDEV_STATS
6 #endif
7 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
8 #define HAVE_NETDEV_HEADER_OPS
9 #endif
10 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
11 #define HAVE_NETDEV_NEEDED_HEADROOM
12 #endif
13
14 /*
15  *      Linux NET3:     GRE over IP protocol decoder.
16  *
17  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
18  *
19  *      This program is free software; you can redistribute it and/or
20  *      modify it under the terms of the GNU General Public License
21  *      as published by the Free Software Foundation; either version
22  *      2 of the License, or (at your option) any later version.
23  *
24  */
25
26 #include <linux/capability.h>
27 #include <linux/ethtool.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <asm/uaccess.h>
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/in.h>
35 #include <linux/tcp.h>
36 #include <linux/udp.h>
37 #include <linux/if_arp.h>
38 #include <linux/mroute.h>
39 #include <linux/init.h>
40 #include <linux/in6.h>
41 #include <linux/inetdevice.h>
42 #include <linux/igmp.h>
43 #include <linux/netfilter_ipv4.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_ether.h>
46
47 #include <net/sock.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/protocol.h>
51 #include <net/ipip.h>
52 #include <net/arp.h>
53 #include <net/checksum.h>
54 #include <net/dsfield.h>
55 #include <net/inet_ecn.h>
56 #include <net/xfrm.h>
57 #include <net/net_namespace.h>
58 #include <net/netns/generic.h>
59
60 #ifdef CONFIG_IPV6
61 #include <net/ipv6.h>
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
64 #endif
65
66 #include "compat.h"
67 #include "openvswitch/gre.h"
68
69 #ifndef GRE_IOCTL_ONLY
70 #include <net/rtnetlink.h>
71 #endif
72
73 /*
74    Problems & solutions
75    --------------------
76
77    1. The most important issue is detecting local dead loops.
78    They would cause complete host lockup in transmit, which
79    would be "resolved" by stack overflow or, if queueing is enabled,
80    with infinite looping in net_bh.
81
82    We cannot track such dead loops during route installation,
83    it is infeasible task. The most general solutions would be
84    to keep skb->encapsulation counter (sort of local ttl),
85    and silently drop packet when it expires. It is the best
86    solution, but it supposes maintaing new variable in ALL
87    skb, even if no tunneling is used.
88
89    Current solution: HARD_TX_LOCK lock breaks dead loops.
90
91
92
93    2. Networking dead loops would not kill routers, but would really
94    kill network. IP hop limit plays role of "t->recursion" in this case,
95    if we copy it from packet being encapsulated to upper header.
96    It is very good solution, but it introduces two problems:
97
98    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
99      do not work over tunnels.
100    - traceroute does not work. I planned to relay ICMP from tunnel,
101      so that this problem would be solved and traceroute output
102      would even more informative. This idea appeared to be wrong:
103      only Linux complies to rfc1812 now (yes, guys, Linux is the only
104      true router now :-)), all routers (at least, in neighbourhood of mine)
105      return only 8 bytes of payload. It is the end.
106
107    Hence, if we want that OSPF worked or traceroute said something reasonable,
108    we should search for another solution.
109
110    One of them is to parse packet trying to detect inner encapsulation
111    made by our node. It is difficult or even impossible, especially,
112    taking into account fragmentation. TO be short, tt is not solution at all.
113
114    Current solution: The solution was UNEXPECTEDLY SIMPLE.
115    We force DF flag on tunnels with preconfigured hop limit,
116    that is ALL. :-) Well, it does not remove the problem completely,
117    but exponential growth of network traffic is changed to linear
118    (branches, that exceed pmtu are pruned) and tunnel mtu
119    fastly degrades to value <68, where looping stops.
120    Yes, it is not good if there exists a router in the loop,
121    which does not force DF, even when encapsulating packets have DF set.
122    But it is not our problem! Nobody could accuse us, we made
123    all that we could make. Even if it is your gated who injected
124    fatal route to network, even if it were you who configured
125    fatal static route: you are innocent. :-)
126
127    XXX: Forcing the DF flag on was done only when setting up tunnels via the
128         ioctl interface and not Netlink.  Since it prevents some operations
129         and isn't very transparent I removed it.  It seems nobody really
130         cared about it anyways.
131         Moral: don't create loops.
132
133    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
134    practically identical code. It would be good to glue them
135    together, but it is not very evident, how to make them modular.
136    sit is integral part of IPv6, ipip and gre are naturally modular.
137    We could extract common parts (hash table, ioctl etc)
138    to a separate module (ip_tunnel.c).
139
140    Alexey Kuznetsov.
141  */
142
143 #ifndef GRE_IOCTL_ONLY
144 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
145 static struct rtnl_link_ops ipgre_tap_ops __read_mostly;
146 #endif
147 static int ipgre_tunnel_init(struct net_device *dev);
148 static void ipgre_tunnel_setup(struct net_device *dev);
149 static void ipgre_tap_setup(struct net_device *dev);
150 static int ipgre_tunnel_bind_dev(struct net_device *dev);
151
152 #define HASH_SIZE  16
153
154 static int ipgre_net_id __read_mostly;
155 struct ipgre_net {
156         struct ip_tunnel *tunnels[4][HASH_SIZE];
157
158         struct net_device *fb_tunnel_dev;
159 };
160
161 /* Tunnel hash table */
162
163 /*
164    4 hash tables:
165
166    3: (remote,local)
167    2: (remote,*)
168    1: (*,local)
169    0: (*,*)
170
171    We require exact key match i.e. if a key is present in packet
172    it will match only tunnel with the same key; if it is not present,
173    it will match only keyless tunnel.
174
175    All keysless packets, if not matched configured keyless tunnels
176    will match fallback tunnel.
177  */
178
179 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
180
181 #define tunnels_r_l     tunnels[3]
182 #define tunnels_r       tunnels[2]
183 #define tunnels_l       tunnels[1]
184 #define tunnels_wc      tunnels[0]
185 /*
186  * Locking : hash tables are protected by RCU and a spinlock
187  */
188 static DEFINE_SPINLOCK(ipgre_lock);
189
190 #define for_each_ip_tunnel_rcu(start) \
191         for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
192
193 /* Given src, dst and key, find appropriate for input tunnel. */
194
195 static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
196                                               __be32 remote, __be32 local,
197                                               __be32 key, __be16 gre_proto)
198 {
199         struct net *net = dev_net(dev);
200         int link = dev->ifindex;
201         unsigned h0 = HASH(remote);
202         unsigned h1 = HASH(key);
203         struct ip_tunnel *t, *cand = NULL;
204         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
205         int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
206                        ARPHRD_ETHER : ARPHRD_IPGRE;
207         int score, cand_score = 4;
208
209         for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
210                 if (local != t->parms.iph.saddr ||
211                     remote != t->parms.iph.daddr ||
212                     key != t->parms.i_key ||
213                     !(t->dev->flags & IFF_UP))
214                         continue;
215
216                 if (t->dev->type != ARPHRD_IPGRE &&
217                     t->dev->type != dev_type)
218                         continue;
219
220                 score = 0;
221                 if (t->parms.link != link)
222                         score |= 1;
223                 if (t->dev->type != dev_type)
224                         score |= 2;
225                 if (score == 0)
226                         return t;
227
228                 if (score < cand_score) {
229                         cand = t;
230                         cand_score = score;
231                 }
232         }
233
234         for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
235                 if (remote != t->parms.iph.daddr ||
236                     key != t->parms.i_key ||
237                     !(t->dev->flags & IFF_UP))
238                         continue;
239
240                 if (t->dev->type != ARPHRD_IPGRE &&
241                     t->dev->type != dev_type)
242                         continue;
243
244                 score = 0;
245                 if (t->parms.link != link)
246                         score |= 1;
247                 if (t->dev->type != dev_type)
248                         score |= 2;
249                 if (score == 0)
250                         return t;
251
252                 if (score < cand_score) {
253                         cand = t;
254                         cand_score = score;
255                 }
256         }
257
258         for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
259                 if ((local != t->parms.iph.saddr &&
260                      (local != t->parms.iph.daddr ||
261                       !ipv4_is_multicast(local))) ||
262                     key != t->parms.i_key ||
263                     !(t->dev->flags & IFF_UP))
264                         continue;
265
266                 if (t->dev->type != ARPHRD_IPGRE &&
267                     t->dev->type != dev_type)
268                         continue;
269
270                 score = 0;
271                 if (t->parms.link != link)
272                         score |= 1;
273                 if (t->dev->type != dev_type)
274                         score |= 2;
275                 if (score == 0)
276                         return t;
277
278                 if (score < cand_score) {
279                         cand = t;
280                         cand_score = score;
281                 }
282         }
283
284         for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
285                 if (t->parms.i_key != key ||
286                     !(t->dev->flags & IFF_UP))
287                         continue;
288
289                 if (t->dev->type != ARPHRD_IPGRE &&
290                     t->dev->type != dev_type)
291                         continue;
292
293                 score = 0;
294                 if (t->parms.link != link)
295                         score |= 1;
296                 if (t->dev->type != dev_type)
297                         score |= 2;
298                 if (score == 0)
299                         return t;
300
301                 if (score < cand_score) {
302                         cand = t;
303                         cand_score = score;
304                 }
305         }
306
307         if (cand != NULL)
308                 return cand;
309
310         dev = ign->fb_tunnel_dev;
311         if (dev->flags & IFF_UP)
312                 return netdev_priv(dev);
313
314         return NULL;
315 }
316
317 static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
318                 struct ip_tunnel_parm *parms)
319 {
320         __be32 remote = parms->iph.daddr;
321         __be32 local = parms->iph.saddr;
322         __be32 key = parms->i_key;
323         unsigned h = HASH(key);
324         int prio = 0;
325
326         if (local)
327                 prio |= 1;
328         if (remote && !ipv4_is_multicast(remote)) {
329                 prio |= 2;
330                 h ^= HASH(remote);
331         }
332
333         return &ign->tunnels[prio][h];
334 }
335
336 static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
337                 struct ip_tunnel *t)
338 {
339         return __ipgre_bucket(ign, &t->parms);
340 }
341
342 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
343 {
344         struct ip_tunnel **tp = ipgre_bucket(ign, t);
345
346         spin_lock_bh(&ipgre_lock);
347         t->next = *tp;
348         rcu_assign_pointer(*tp, t);
349         spin_unlock_bh(&ipgre_lock);
350 }
351
352 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
353 {
354         struct ip_tunnel **tp;
355
356         for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
357                 if (t == *tp) {
358                         spin_lock_bh(&ipgre_lock);
359                         *tp = t->next;
360                         spin_unlock_bh(&ipgre_lock);
361                         break;
362                 }
363         }
364 }
365
366 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
367                                            struct ip_tunnel_parm *parms,
368                                            int type)
369 {
370         __be32 remote = parms->iph.daddr;
371         __be32 local = parms->iph.saddr;
372         __be32 key = parms->i_key;
373         int link = parms->link;
374         struct ip_tunnel *t, **tp;
375         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
376
377         for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
378                 if (local == t->parms.iph.saddr &&
379                     remote == t->parms.iph.daddr &&
380                     key == t->parms.i_key &&
381                     link == t->parms.link &&
382                     type == t->dev->type)
383                         break;
384
385         return t;
386 }
387
388 static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
389                 struct ip_tunnel_parm *parms, int gretap, int create)
390 {
391         struct ip_tunnel *t, *nt;
392         struct net_device *dev;
393         char name[IFNAMSIZ];
394         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
395
396         t = ipgre_tunnel_find(net, parms, gretap ? ARPHRD_ETHER : ARPHRD_IPGRE);
397         if (t || !create)
398                 return t;
399
400         if (parms->name[0])
401                 strlcpy(name, parms->name, IFNAMSIZ);
402         else
403                 sprintf(name, "gre%%d");
404
405         dev = alloc_netdev(sizeof(*t), name, gretap ? ipgre_tap_setup
406                                                     : ipgre_tunnel_setup);
407         if (!dev)
408           return NULL;
409
410         dev_net_set(dev, net);
411
412         if (strchr(name, '%')) {
413                 if (dev_alloc_name(dev, name) < 0)
414                         goto failed_free;
415         }
416
417         if (gretap)
418                 random_ether_addr(dev->dev_addr);
419
420 #ifndef GRE_IOCTL_ONLY
421         dev->rtnl_link_ops = gretap ? &ipgre_tap_ops : &ipgre_link_ops;
422 #endif
423         nt = netdev_priv(dev);
424         nt->parms = *parms;
425
426         dev->mtu = ipgre_tunnel_bind_dev(dev);
427
428         if (register_netdevice(dev) < 0)
429                 goto failed_free;
430
431         dev_hold(dev);
432         ipgre_tunnel_link(ign, nt);
433         return nt;
434
435 failed_free:
436         free_netdev(dev);
437         return NULL;
438 }
439
440 static void ipgre_tunnel_uninit(struct net_device *dev)
441 {
442         struct net *net = dev_net(dev);
443         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
444
445         ipgre_tunnel_unlink(ign, netdev_priv(dev));
446         dev_put(dev);
447 }
448
449
450 static void ipgre_err(struct sk_buff *skb, u32 info)
451 {
452
453 /* All the routers (except for Linux) return only
454    8 bytes of packet payload. It means, that precise relaying of
455    ICMP in the real Internet is absolutely infeasible.
456
457    Moreover, Cisco "wise men" put GRE key to the third word
458    in GRE header. It makes impossible maintaining even soft state for keyed
459    GRE tunnels with enabled checksum. Tell them "thank you".
460
461    Well, I wonder, rfc1812 was written by Cisco employee,
462    what the hell these idiots break standrads established
463    by themself???
464  */
465
466         struct iphdr *iph = (struct iphdr *)skb->data;
467         __be16       *p = (__be16*)(skb->data+(iph->ihl<<2));
468         int grehlen = (iph->ihl<<2) + 4;
469         const int type = icmp_hdr(skb)->type;
470         const int code = icmp_hdr(skb)->code;
471         struct ip_tunnel *t;
472         __be16 flags;
473
474         if (skb_headlen(skb) < grehlen)
475                 return;
476
477         flags = p[0];
478         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
479                 if (flags&(GRE_VERSION|GRE_ROUTING))
480                         return;
481                 if (flags&GRE_KEY) {
482                         grehlen += 4;
483                         if (flags&GRE_CSUM)
484                                 grehlen += 4;
485                 }
486         }
487
488         /* If only 8 bytes returned, keyed message will be dropped here */
489         if (skb_headlen(skb) < grehlen)
490                 return;
491
492         switch (type) {
493         default:
494         case ICMP_PARAMETERPROB:
495                 return;
496
497         case ICMP_DEST_UNREACH:
498                 switch (code) {
499                 case ICMP_SR_FAILED:
500                 case ICMP_PORT_UNREACH:
501                         /* Impossible event. */
502                         return;
503                 case ICMP_FRAG_NEEDED:
504                         /* Soft state for pmtu is maintained by IP core. */
505                         return;
506                 default:
507                         /* All others are translated to HOST_UNREACH.
508                            rfc2003 contains "deep thoughts" about NET_UNREACH,
509                            I believe they are just ether pollution. --ANK
510                          */
511                         break;
512                 }
513                 break;
514         case ICMP_TIME_EXCEEDED:
515                 if (code != ICMP_EXC_TTL)
516                         return;
517                 break;
518         }
519
520         rcu_read_lock();
521         t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
522                                 flags & GRE_KEY ?
523                                 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
524                                 p[1]);
525         if (t == NULL || t->parms.iph.daddr == 0 ||
526             ipv4_is_multicast(t->parms.iph.daddr))
527                 goto out;
528
529         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
530                 goto out;
531
532         if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
533                 t->err_count++;
534         else
535                 t->err_count = 1;
536         t->err_time = jiffies;
537 out:
538         rcu_read_unlock();
539         return;
540 }
541
542 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
543 {
544         if (INET_ECN_is_ce(iph->tos)) {
545                 if (skb->protocol == htons(ETH_P_IP)) {
546                         if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
547                             + sizeof(struct iphdr) - skb->data)))
548                                 return;
549
550                         IP_ECN_set_ce(ip_hdr(skb));
551                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
552                         if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
553                             + sizeof(struct ipv6hdr) - skb->data)))
554                                 return;
555
556                         IP6_ECN_set_ce(ipv6_hdr(skb));
557                 }
558         }
559 }
560
561 static inline u8
562 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
563 {
564         u8 inner = 0;
565         if (skb->protocol == htons(ETH_P_IP))
566                 inner = old_iph->tos;
567         else if (skb->protocol == htons(ETH_P_IPV6))
568                 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
569         return INET_ECN_encapsulate(tos, inner);
570 }
571
572 static int ipgre_rcv(struct sk_buff *skb)
573 {
574         struct iphdr *iph;
575         u8     *h;
576         __be16    flags;
577         __sum16   csum = 0;
578         __be32 key = 0;
579         u32    seqno = 0;
580         struct ip_tunnel *tunnel;
581         int    offset = 4;
582         __be16 gre_proto;
583         unsigned int len;
584
585         if (!pskb_may_pull(skb, 16))
586                 goto drop_nolock;
587
588         iph = ip_hdr(skb);
589         h = skb->data;
590         flags = *(__be16*)h;
591
592         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
593                 /* - Version must be 0.
594                    - We do not support routing headers.
595                  */
596                 if (flags&(GRE_VERSION|GRE_ROUTING))
597                         goto drop_nolock;
598
599                 if (flags&GRE_CSUM) {
600                         switch (skb->ip_summed) {
601                         case CHECKSUM_COMPLETE:
602                                 csum = csum_fold(skb->csum);
603                                 if (!csum)
604                                         break;
605                                 /* fall through */
606                         case CHECKSUM_NONE:
607                                 skb->csum = 0;
608                                 csum = __skb_checksum_complete(skb);
609                                 skb->ip_summed = CHECKSUM_COMPLETE;
610                         }
611                         offset += 4;
612                 }
613                 if (flags&GRE_KEY) {
614                         key = *(__be32*)(h + offset);
615                         offset += 4;
616                 }
617                 if (flags&GRE_SEQ) {
618                         seqno = ntohl(*(__be32*)(h + offset));
619                         offset += 4;
620                 }
621         }
622
623         gre_proto = *(__be16 *)(h + 2);
624
625         rcu_read_lock();
626         if ((tunnel = ipgre_tunnel_lookup(skb->dev,
627                                           iph->saddr, iph->daddr, key,
628                                           gre_proto))) {
629                 struct net_device_stats *stats;
630 #ifdef HAVE_NETDEV_STATS
631                 stats = &tunnel->dev->stats;
632 #else
633                 stats = &tunnel->stat;
634 #endif
635
636                 secpath_reset(skb);
637
638                 skb->protocol = gre_proto;
639                 /* WCCP version 1 and 2 protocol decoding.
640                  * - Change protocol to IP
641                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
642                  */
643                 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
644                         skb->protocol = htons(ETH_P_IP);
645                         if ((*(h + offset) & 0xF0) != 0x40)
646                                 offset += 4;
647                 }
648
649                 skb->mac_header = skb->network_header;
650                 __pskb_pull(skb, offset);
651                 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
652                 skb->pkt_type = PACKET_HOST;
653 #ifdef CONFIG_NET_IPGRE_BROADCAST
654                 if (ipv4_is_multicast(iph->daddr)) {
655                         /* Looped back packet, drop it! */
656                         if (skb_rtable(skb)->fl.iif == 0)
657                                 goto drop;
658                         stats->multicast++;
659                         skb->pkt_type = PACKET_BROADCAST;
660                 }
661 #endif
662
663                 if (((flags&GRE_CSUM) && csum) ||
664                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
665                         stats->rx_crc_errors++;
666                         stats->rx_errors++;
667                         goto drop;
668                 }
669                 if (tunnel->parms.i_flags&GRE_SEQ) {
670                         if (!(flags&GRE_SEQ) ||
671                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
672                                 stats->rx_fifo_errors++;
673                                 stats->rx_errors++;
674                                 goto drop;
675                         }
676                         tunnel->i_seqno = seqno + 1;
677                 }
678
679                 len = skb->len;
680
681                 /* Warning: All skb pointers will be invalidated! */
682                 if (tunnel->dev->type == ARPHRD_ETHER) {
683                         if (!pskb_may_pull(skb, ETH_HLEN)) {
684                                 stats->rx_length_errors++;
685                                 stats->rx_errors++;
686                                 goto drop;
687                         }
688
689                         iph = ip_hdr(skb);
690                         skb->protocol = eth_type_trans(skb, tunnel->dev);
691                         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
692                 }
693
694                 stats->rx_packets++;
695                 stats->rx_bytes += len;
696                 skb->dev = tunnel->dev;
697                 skb_dst_drop(skb);
698                 nf_reset(skb);
699
700                 skb_reset_network_header(skb);
701
702                 /* Invalidates pointers. */
703                 ipgre_ecn_decapsulate(iph, skb);
704
705                 netif_rx(skb);
706                 rcu_read_unlock();
707                 return(0);
708         }
709         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
710
711 drop:
712         rcu_read_unlock();
713 drop_nolock:
714         kfree_skb(skb);
715         return(0);
716 }
717
718 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
719 {
720         struct ip_tunnel *tunnel = netdev_priv(dev);
721         struct net_device_stats *stats;
722 #ifdef HAVE_NETDEV_QUEUE_STATS
723         struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
724 #endif
725         struct iphdr  *old_iph = ip_hdr(skb);
726         struct iphdr  *tiph;
727         u8     tos;
728         __be16 df;
729         struct rtable *rt;                      /* Route to the other host */
730         struct net_device *tdev;                /* Device to other host */
731         struct iphdr  *iph;                     /* Our new IP header */
732         unsigned int max_headroom;              /* The extra header space needed */
733         int    gre_hlen;
734         __be32 dst;
735         int    mtu;
736         u8   original_protocol;
737
738 #ifdef HAVE_NETDEV_STATS
739         stats = &dev->stats;
740 #else
741         stats = &tunnel->stat;
742 #endif
743
744         /* Validate the protocol headers before we try to use them. */
745         original_protocol = skb->protocol;
746         if (skb->protocol == htons(ETH_P_IP)) {
747                 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
748                     + sizeof(struct iphdr) - skb->data)))
749                         skb->protocol = 0;
750         } else if (skb->protocol == htons(ETH_P_IPV6)) {
751                 if (unlikely(!pskb_may_pull(skb, skb_network_header(skb)
752                     + sizeof(struct ipv6hdr) - skb->data)))
753                         skb->protocol = 0;
754         }
755
756         if (dev->type == ARPHRD_ETHER)
757                 IPCB(skb)->flags = 0;
758
759 #ifdef HAVE_NETDEV_HEADER_OPS
760         if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
761 #else
762         if (dev->hard_header && dev->type == ARPHRD_IPGRE) {
763 #endif
764                 gre_hlen = 0;
765                 tiph = (struct iphdr *)skb->data;
766         } else {
767                 gre_hlen = tunnel->hlen;
768                 tiph = &tunnel->parms.iph;
769         }
770
771         if ((dst = tiph->daddr) == 0) {
772                 /* NBMA tunnel */
773
774                 if (skb_dst(skb) == NULL) {
775                         stats->tx_fifo_errors++;
776                         goto tx_error;
777                 }
778
779                 if (skb->protocol == htons(ETH_P_IP)) {
780                         rt = skb_rtable(skb);
781                         if ((dst = rt->rt_gateway) == 0)
782                                 goto tx_error_icmp;
783                 }
784 #ifdef CONFIG_IPV6
785                 else if (skb->protocol == htons(ETH_P_IPV6)) {
786                         struct in6_addr *addr6;
787                         int addr_type;
788                         struct neighbour *neigh = skb_dst(skb)->neighbour;
789
790                         if (neigh == NULL)
791                                 goto tx_error;
792
793                         addr6 = (struct in6_addr *)&neigh->primary_key;
794                         addr_type = ipv6_addr_type(addr6);
795
796                         if (addr_type == IPV6_ADDR_ANY) {
797                                 addr6 = &ipv6_hdr(skb)->daddr;
798                                 addr_type = ipv6_addr_type(addr6);
799                         }
800
801                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
802                                 goto tx_error_icmp;
803
804                         dst = addr6->s6_addr32[3];
805                 }
806 #endif
807                 else
808                         goto tx_error;
809         }
810
811         tos = tiph->tos;
812         if (tos == 1) {
813                 tos = 0;
814                 if (skb->protocol == htons(ETH_P_IP))
815                         tos = old_iph->tos;
816         }
817
818         {
819                 struct flowi fl = { .oif = tunnel->parms.link,
820                                     .nl_u = { .ip4_u =
821                                               { .daddr = dst,
822                                                 .saddr = tiph->saddr,
823                                                 .tos = RT_TOS(tos) } },
824                                     .proto = IPPROTO_GRE };
825                 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
826                         stats->tx_carrier_errors++;
827                         goto tx_error;
828                 }
829         }
830         tdev = rt->u.dst.dev;
831
832         if (tdev == dev) {
833                 ip_rt_put(rt);
834                 stats->collisions++;
835                 goto tx_error;
836         }
837
838         df = tiph->frag_off;
839         if (df)
840 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
841                 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
842 #else
843                 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
844 #endif
845         else
846                 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
847
848         if (skb_dst(skb))
849                 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
850
851         /* XXX: Temporarily allow fragmentation since DF doesn't
852          * do the right thing with bridging. */
853 /*
854         if (skb->protocol == htons(ETH_P_IP)) {
855                 df |= (old_iph->frag_off&htons(IP_DF));
856
857                 if ((old_iph->frag_off&htons(IP_DF)) &&
858                     mtu < ntohs(old_iph->tot_len)) {
859                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
860                         ip_rt_put(rt);
861                         goto tx_error;
862                 }
863         }
864 #ifdef CONFIG_IPV6
865         else if (skb->protocol == htons(ETH_P_IPV6)) {
866                 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
867
868                 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
869                         if ((tunnel->parms.iph.daddr &&
870                              !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
871                             rt6->rt6i_dst.plen == 128) {
872                                 rt6->rt6i_flags |= RTF_MODIFIED;
873                                 skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
874                         }
875                 }
876
877                 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
878                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
879                         ip_rt_put(rt);
880                         goto tx_error;
881                 }
882         }
883 #endif
884 */
885         if (tunnel->err_count > 0) {
886                 if (time_before(jiffies,
887                                 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
888                         tunnel->err_count--;
889
890                         dst_link_failure(skb);
891                 } else
892                         tunnel->err_count = 0;
893         }
894
895         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
896
897         if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
898             (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
899                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
900                 if (!new_skb) {
901                         ip_rt_put(rt);
902 #ifdef HAVE_NETDEV_QUEUE_STATS
903                         txq->tx_dropped++;
904 #else
905                         stats->tx_dropped++;
906 #endif
907                         dev_kfree_skb(skb);
908                         return NETDEV_TX_OK;
909                 }
910                 if (skb->sk)
911                         skb_set_owner_w(new_skb, skb->sk);
912                 dev_kfree_skb(skb);
913                 skb = new_skb;
914                 old_iph = ip_hdr(skb);
915         }
916
917         skb_reset_transport_header(skb);
918         skb_push(skb, gre_hlen);
919         skb_reset_network_header(skb);
920         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
921         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
922                               IPSKB_REROUTED);
923         skb_dst_drop(skb);
924         skb_dst_set(skb, &rt->u.dst);
925
926         /*
927          *      Push down and install the IPIP header.
928          */
929
930         iph                     =       ip_hdr(skb);
931         iph->version            =       4;
932         iph->ihl                =       sizeof(struct iphdr) >> 2;
933         iph->frag_off           =       df;
934         iph->protocol           =       IPPROTO_GRE;
935         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
936         iph->daddr              =       rt->rt_dst;
937         iph->saddr              =       rt->rt_src;
938
939         if ((iph->ttl = tiph->ttl) == 0) {
940                 if (skb->protocol == htons(ETH_P_IP))
941                         iph->ttl = old_iph->ttl;
942 #ifdef CONFIG_IPV6
943                 else if (skb->protocol == htons(ETH_P_IPV6))
944                         iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
945 #endif
946                 else
947                         iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
948         }
949
950         skb->protocol = original_protocol;
951
952         ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
953         ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
954                                    htons(ETH_P_TEB) : skb->protocol;
955
956         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
957                 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
958
959                 if (tunnel->parms.o_flags&GRE_SEQ) {
960                         ++tunnel->o_seqno;
961                         *ptr = htonl(tunnel->o_seqno);
962                         ptr--;
963                 }
964                 if (tunnel->parms.o_flags&GRE_KEY) {
965                         *ptr = tunnel->parms.o_key;
966                         ptr--;
967                 }
968                 if (tunnel->parms.o_flags&GRE_CSUM) {
969                         *ptr = 0;
970                         *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
971                 }
972         }
973
974         nf_reset(skb);
975
976         IPTUNNEL_XMIT();
977         return NETDEV_TX_OK;
978
979 tx_error_icmp:
980         dst_link_failure(skb);
981
982 tx_error:
983         stats->tx_errors++;
984         dev_kfree_skb(skb);
985         return NETDEV_TX_OK;
986 }
987
988 static int ipgre_tunnel_bind_dev(struct net_device *dev)
989 {
990         struct net_device *tdev = NULL;
991         struct ip_tunnel *tunnel;
992         struct iphdr *iph;
993         int hlen = LL_MAX_HEADER;
994         int mtu = ETH_DATA_LEN;
995         int addend = sizeof(struct iphdr) + 4;
996
997         tunnel = netdev_priv(dev);
998         iph = &tunnel->parms.iph;
999
1000         /* Guess output device to choose reasonable mtu and needed_headroom */
1001
1002         if (iph->daddr) {
1003                 struct flowi fl = { .oif = tunnel->parms.link,
1004                                     .nl_u = { .ip4_u =
1005                                               { .daddr = iph->daddr,
1006                                                 .saddr = iph->saddr,
1007                                                 .tos = RT_TOS(iph->tos) } },
1008                                     .proto = IPPROTO_GRE };
1009                 struct rtable *rt;
1010                 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
1011                         tdev = rt->u.dst.dev;
1012                         ip_rt_put(rt);
1013                 }
1014
1015                 if (dev->type != ARPHRD_ETHER)
1016                         dev->flags |= IFF_POINTOPOINT;
1017         }
1018
1019         if (!tdev && tunnel->parms.link)
1020                 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1021
1022         if (tdev) {
1023 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1024                 hlen = tdev->hard_header_len + tdev->needed_headroom;
1025 #else
1026                 hlen = tdev->hard_header_len;
1027 #endif
1028                 mtu = tdev->mtu;
1029         }
1030         dev->iflink = tunnel->parms.link;
1031
1032         /* Precalculate GRE options length */
1033         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1034                 if (tunnel->parms.o_flags&GRE_CSUM)
1035                         addend += 4;
1036                 if (tunnel->parms.o_flags&GRE_KEY)
1037                         addend += 4;
1038                 if (tunnel->parms.o_flags&GRE_SEQ)
1039                         addend += 4;
1040         }
1041 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1042         dev->needed_headroom = hlen + addend;
1043         mtu -= dev->hard_header_len + addend;
1044 #else
1045         dev->hard_header_len = hlen + addend;
1046         mtu -= addend;
1047 #endif
1048         tunnel->hlen = addend;
1049
1050         if (mtu < 68)
1051                 mtu = 68;
1052
1053         /* XXX: Set MTU to the maximum possible value.  If we are bridged to a
1054         * device with a larger MTU then packets will be dropped. */
1055         mtu = 65482;
1056
1057         return mtu;
1058 }
1059
1060 static int
1061 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1062 {
1063         int err = 0;
1064         struct ip_tunnel_parm p;
1065         struct ip_tunnel *t;
1066         struct net *net = dev_net(dev);
1067         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1068         int add_tunnel, gretap;
1069
1070         switch (cmd) {
1071         case SIOCGETTUNNEL:
1072                 t = NULL;
1073                 if (dev == ign->fb_tunnel_dev) {
1074                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1075                                 err = -EFAULT;
1076                                 break;
1077                         }
1078                         t = ipgre_tunnel_locate(net, &p, false, 0);
1079                 }
1080                 if (t == NULL)
1081                         t = netdev_priv(dev);
1082                 memcpy(&p, &t->parms, sizeof(p));
1083                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1084                         err = -EFAULT;
1085                 break;
1086
1087         case SIOCADDTUNNEL:
1088         case SIOCCHGTUNNEL:
1089         case SIOCADDGRETAP:
1090         case SIOCCHGGRETAP:
1091                 err = -EPERM;
1092                 if (!capable(CAP_NET_ADMIN))
1093                         goto done;
1094
1095                 err = -EFAULT;
1096                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1097                         goto done;
1098
1099                 err = -EINVAL;
1100                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1101                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1102                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1103                         goto done;
1104
1105                 add_tunnel = (cmd == SIOCADDTUNNEL || cmd == SIOCADDGRETAP);
1106                 gretap = (cmd == SIOCADDGRETAP || cmd == SIOCCHGGRETAP);
1107
1108                 if (!(p.i_flags&GRE_KEY))
1109                         p.i_key = 0;
1110                 if (!(p.o_flags&GRE_KEY))
1111                         p.o_key = 0;
1112
1113                 t = ipgre_tunnel_locate(net, &p, gretap, add_tunnel);
1114
1115                 if (dev != ign->fb_tunnel_dev && !add_tunnel) {
1116                         if (t != NULL) {
1117                                 if (t->dev != dev) {
1118                                         err = -EEXIST;
1119                                         break;
1120                                 }
1121                         } else {
1122                                 unsigned nflags = 0;
1123
1124                                 t = netdev_priv(dev);
1125
1126                                 if (ipv4_is_multicast(p.iph.daddr))
1127                                         nflags = IFF_BROADCAST;
1128                                 else if (p.iph.daddr)
1129                                         nflags = IFF_POINTOPOINT;
1130
1131                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1132                                         err = -EINVAL;
1133                                         break;
1134                                 }
1135                                 ipgre_tunnel_unlink(ign, t);
1136                                 t->parms.iph.saddr = p.iph.saddr;
1137                                 t->parms.iph.daddr = p.iph.daddr;
1138                                 t->parms.i_key = p.i_key;
1139                                 t->parms.o_key = p.o_key;
1140                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1141                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
1142                                 ipgre_tunnel_link(ign, t);
1143                                 netdev_state_change(dev);
1144                         }
1145                 }
1146
1147                 if (t) {
1148                         err = 0;
1149                         if (!add_tunnel) {
1150                                 t->parms.iph.ttl = p.iph.ttl;
1151                                 t->parms.iph.tos = p.iph.tos;
1152                                 t->parms.iph.frag_off = p.iph.frag_off;
1153                                 if (t->parms.link != p.link) {
1154                                         t->parms.link = p.link;
1155                                         dev->mtu = ipgre_tunnel_bind_dev(dev);
1156                                         netdev_state_change(dev);
1157                                 }
1158                         }
1159                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1160                                 err = -EFAULT;
1161                 } else
1162                         err = (add_tunnel ? -ENOBUFS : -ENOENT);
1163                 break;
1164
1165         case SIOCDELTUNNEL:
1166                 err = -EPERM;
1167                 if (!capable(CAP_NET_ADMIN))
1168                         goto done;
1169
1170                 if (dev == ign->fb_tunnel_dev) {
1171                         err = -EFAULT;
1172                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1173                                 goto done;
1174                         err = -ENOENT;
1175                         if ((t = ipgre_tunnel_locate(net, &p, false, 0)) == NULL)
1176                                 goto done;
1177                         err = -EPERM;
1178                         if (t == netdev_priv(ign->fb_tunnel_dev))
1179                                 goto done;
1180                         dev = t->dev;
1181                 }
1182                 unregister_netdevice(dev);
1183                 err = 0;
1184                 break;
1185
1186         default:
1187                 err = -EINVAL;
1188         }
1189
1190 done:
1191         return err;
1192 }
1193
1194 #ifndef HAVE_NETDEV_STATS
1195 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1196 {
1197         return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
1198 }
1199 #endif
1200
1201 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1202 {
1203         struct ip_tunnel *tunnel = netdev_priv(dev);
1204         if (new_mtu < 68 ||
1205 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1206         new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1207 #else
1208         new_mtu > 0xFFF8 - tunnel->hlen)
1209 #endif
1210                 return -EINVAL;
1211         dev->mtu = new_mtu;
1212         return 0;
1213 }
1214
1215 /* Nice toy. Unfortunately, useless in real life :-)
1216    It allows to construct virtual multiprotocol broadcast "LAN"
1217    over the Internet, provided multicast routing is tuned.
1218
1219
1220    I have no idea was this bicycle invented before me,
1221    so that I had to set ARPHRD_IPGRE to a random value.
1222    I have an impression, that Cisco could make something similar,
1223    but this feature is apparently missing in IOS<=11.2(8).
1224
1225    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1226    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1227
1228    ping -t 255 224.66.66.66
1229
1230    If nobody answers, mbone does not work.
1231
1232    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1233    ip addr add 10.66.66.<somewhat>/24 dev Universe
1234    ifconfig Universe up
1235    ifconfig Universe add fe80::<Your_real_addr>/10
1236    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1237    ftp 10.66.66.66
1238    ...
1239    ftp fec0:6666:6666::193.233.7.65
1240    ...
1241
1242  */
1243
1244 #ifdef HAVE_NETDEV_HEADER_OPS
1245 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1246                        unsigned short type,
1247                        const void *daddr, const void *saddr, unsigned len)
1248 #else
1249 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
1250                         void *daddr, void *saddr, unsigned len)
1251 #endif
1252 {
1253         struct ip_tunnel *t = netdev_priv(dev);
1254         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1255         __be16 *p = (__be16*)(iph+1);
1256
1257         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1258         p[0]            = t->parms.o_flags;
1259         p[1]            = htons(type);
1260
1261         /*
1262          *      Set the source hardware address.
1263          */
1264
1265         if (saddr)
1266                 memcpy(&iph->saddr, saddr, 4);
1267
1268         if (daddr) {
1269                 memcpy(&iph->daddr, daddr, 4);
1270                 return t->hlen;
1271         }
1272         if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1273                 return t->hlen;
1274
1275         return -t->hlen;
1276 }
1277
1278 #ifdef HAVE_NETDEV_HEADER_OPS
1279 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1280 #else
1281 static int ipgre_header_parse(struct sk_buff *skb, unsigned char *haddr)
1282 #endif
1283 {
1284         struct iphdr *iph = (struct iphdr *) skb_mac_header(skb);
1285         memcpy(haddr, &iph->saddr, 4);
1286         return 4;
1287 }
1288
1289 #ifdef HAVE_NETDEV_HEADER_OPS
1290 static const struct header_ops ipgre_header_ops = {
1291         .create = ipgre_header,
1292         .parse  = ipgre_header_parse,
1293 };
1294 #endif
1295
1296 #ifdef CONFIG_NET_IPGRE_BROADCAST
1297 static int ipgre_open(struct net_device *dev)
1298 {
1299         struct ip_tunnel *t = netdev_priv(dev);
1300
1301         if (ipv4_is_multicast(t->parms.iph.daddr)) {
1302                 struct flowi fl = { .oif = t->parms.link,
1303                                     .nl_u = { .ip4_u =
1304                                               { .daddr = t->parms.iph.daddr,
1305                                                 .saddr = t->parms.iph.saddr,
1306                                                 .tos = RT_TOS(t->parms.iph.tos) } },
1307                                     .proto = IPPROTO_GRE };
1308                 struct rtable *rt;
1309                 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1310                         return -EADDRNOTAVAIL;
1311                 dev = rt->u.dst.dev;
1312                 ip_rt_put(rt);
1313                 if (__in_dev_get_rtnl(dev) == NULL)
1314                         return -EADDRNOTAVAIL;
1315                 t->mlink = dev->ifindex;
1316                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1317         }
1318         return 0;
1319 }
1320
1321 static int ipgre_close(struct net_device *dev)
1322 {
1323         struct ip_tunnel *t = netdev_priv(dev);
1324
1325         if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1326                 struct in_device *in_dev;
1327                 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1328                 if (in_dev) {
1329                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1330                         in_dev_put(in_dev);
1331                 }
1332         }
1333         return 0;
1334 }
1335
1336 #endif
1337
1338 static void ethtool_getinfo(struct net_device *dev,
1339                             struct ethtool_drvinfo *info)
1340 {
1341         strcpy(info->driver, "ip_gre");
1342         strcpy(info->version, "Open vSwitch "VERSION BUILDNR);
1343         strcpy(info->bus_info, dev->type == ARPHRD_ETHER ? "gretap" : "gre");
1344 }
1345
1346 static struct ethtool_ops ethtool_ops = {
1347         .get_drvinfo = ethtool_getinfo,
1348 };
1349
1350 #ifdef HAVE_NET_DEVICE_OPS
1351 static const struct net_device_ops ipgre_netdev_ops = {
1352         .ndo_init               = ipgre_tunnel_init,
1353         .ndo_uninit             = ipgre_tunnel_uninit,
1354 #ifdef CONFIG_NET_IPGRE_BROADCAST
1355         .ndo_open               = ipgre_open,
1356         .ndo_stop               = ipgre_close,
1357 #endif
1358         .ndo_start_xmit         = ipgre_tunnel_xmit,
1359         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1360         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1361 };
1362 #endif
1363
1364 static void ipgre_tunnel_setup(struct net_device *dev)
1365 {
1366 #ifdef HAVE_NET_DEVICE_OPS
1367         dev->netdev_ops         = &ipgre_netdev_ops;
1368 #else
1369         dev->init               = ipgre_tunnel_init;
1370         dev->uninit             = ipgre_tunnel_uninit;
1371         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1372 #ifndef HAVE_NETDEV_STATS
1373         dev->get_stats          = ipgre_tunnel_get_stats;
1374 #endif
1375         dev->do_ioctl           = ipgre_tunnel_ioctl;
1376         dev->change_mtu         = ipgre_tunnel_change_mtu;
1377 #endif /* HAVE_NET_DEVICE_OPS */
1378         dev->destructor         = free_netdev;
1379
1380         dev->type               = ARPHRD_IPGRE;
1381 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1382         dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1383 #else
1384         dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1385 #endif
1386         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1387         dev->flags              = IFF_NOARP;
1388         dev->iflink             = 0;
1389         dev->addr_len           = 4;
1390         dev->features           |= NETIF_F_NETNS_LOCAL;
1391         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1392
1393         SET_ETHTOOL_OPS(dev, &ethtool_ops);
1394 }
1395
1396 static int ipgre_tunnel_init(struct net_device *dev)
1397 {
1398         struct ip_tunnel *tunnel;
1399         struct iphdr *iph;
1400
1401         tunnel = netdev_priv(dev);
1402         iph = &tunnel->parms.iph;
1403
1404         tunnel->dev = dev;
1405         strcpy(tunnel->parms.name, dev->name);
1406
1407         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1408         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1409
1410         if (iph->daddr) {
1411 #ifdef CONFIG_NET_IPGRE_BROADCAST
1412                 if (ipv4_is_multicast(iph->daddr)) {
1413                         if (!iph->saddr)
1414                                 return -EINVAL;
1415                         dev->flags = IFF_BROADCAST;
1416 #ifdef HAVE_NETDEV_HEADER_OPS
1417                         dev->header_ops = &ipgre_header_ops;
1418 #else
1419                         dev->hard_header = ipgre_header;
1420                         dev->hard_header_parse = ipgre_header_parse;
1421 #endif
1422 #ifndef HAVE_NET_DEVICE_OPS
1423                         dev->open = ipgre_open;
1424                         dev->stop = ipgre_close;
1425 #endif
1426                 }
1427 #endif
1428         } else {
1429 #ifdef HAVE_NETDEV_HEADER_OPS
1430                 dev->header_ops = &ipgre_header_ops;
1431 #else
1432                 dev->hard_header = ipgre_header;
1433                 dev->hard_header_parse = ipgre_header_parse;
1434 #endif
1435         }
1436
1437         return 0;
1438 }
1439
1440 #ifdef HAVE_NET_DEVICE_OPS
1441 static void ipgre_fb_tunnel_init(struct net_device *dev)
1442 #else
1443 static int ipgre_fb_tunnel_init(struct net_device *dev)
1444 #endif
1445 {
1446         struct ip_tunnel *tunnel = netdev_priv(dev);
1447         struct iphdr *iph = &tunnel->parms.iph;
1448         struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
1449
1450         tunnel->dev = dev;
1451         strcpy(tunnel->parms.name, dev->name);
1452
1453         iph->version            = 4;
1454         iph->protocol           = IPPROTO_GRE;
1455         iph->ihl                = 5;
1456         tunnel->hlen            = sizeof(struct iphdr) + 4;
1457
1458         dev_hold(dev);
1459         ign->tunnels_wc[0]      = tunnel;
1460
1461 #ifndef HAVE_NET_DEVICE_OPS
1462         return 0;
1463 #endif
1464 }
1465
1466 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
1467 static struct net_protocol ipgre_protocol = {
1468 #else
1469 static const struct net_protocol ipgre_protocol = {
1470 #endif
1471         .handler        =       ipgre_rcv,
1472         .err_handler    =       ipgre_err,
1473 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
1474         .netns_ok       =       1,
1475 #endif
1476 };
1477
1478 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1479 {
1480         int prio;
1481
1482         for (prio = 0; prio < 4; prio++) {
1483                 int h;
1484                 for (h = 0; h < HASH_SIZE; h++) {
1485                         struct ip_tunnel *t = ign->tunnels[prio][h];
1486
1487                         while (t != NULL) {
1488                                 unregister_netdevice_queue(t->dev, head);
1489                                 t = t->next;
1490                         }
1491                 }
1492         }
1493 }
1494
1495 static int ipgre_init_net(struct net *net)
1496 {
1497         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1498         int err;
1499
1500         ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), GRE_IOCTL_DEVICE,
1501                                            ipgre_tunnel_setup);
1502         if (!ign->fb_tunnel_dev) {
1503                 err = -ENOMEM;
1504                 goto err_alloc_dev;
1505         }
1506         dev_net_set(ign->fb_tunnel_dev, net);
1507
1508 #ifdef HAVE_NET_DEVICE_OPS
1509         ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1510 #else
1511         ign->fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1512 #endif
1513 #ifndef GRE_IOCTL_ONLY
1514         ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1515 #endif
1516
1517         if ((err = register_netdev(ign->fb_tunnel_dev)))
1518                 goto err_reg_dev;
1519
1520         return 0;
1521
1522 err_reg_dev:
1523         free_netdev(ign->fb_tunnel_dev);
1524 err_alloc_dev:
1525         return err;
1526 }
1527
1528 static void ipgre_exit_net(struct net *net)
1529 {
1530         struct ipgre_net *ign;
1531         LIST_HEAD(list);
1532
1533         ign = net_generic(net, ipgre_net_id);
1534         rtnl_lock();
1535         ipgre_destroy_tunnels(ign, &list);
1536         unregister_netdevice_many(&list);
1537         rtnl_unlock();
1538 }
1539
1540 static struct pernet_operations ipgre_net_ops = {
1541         .init = ipgre_init_net,
1542         .exit = ipgre_exit_net,
1543         .id   = &ipgre_net_id,
1544         .size = sizeof(struct ipgre_net),
1545 };
1546
1547 static int ipgre_tap_init(struct net_device *dev)
1548 {
1549         struct ip_tunnel *tunnel;
1550
1551         tunnel = netdev_priv(dev);
1552
1553         tunnel->dev = dev;
1554         strcpy(tunnel->parms.name, dev->name);
1555
1556         ipgre_tunnel_bind_dev(dev);
1557
1558         return 0;
1559 }
1560
1561 #ifdef HAVE_NET_DEVICE_OPS
1562 static const struct net_device_ops ipgre_tap_netdev_ops = {
1563         .ndo_init               = ipgre_tap_init,
1564         .ndo_uninit             = ipgre_tunnel_uninit,
1565         .ndo_start_xmit         = ipgre_tunnel_xmit,
1566         .ndo_set_mac_address    = eth_mac_addr,
1567         .ndo_validate_addr      = eth_validate_addr,
1568         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1569         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1570 };
1571 #endif
1572
1573 static void ipgre_tap_setup(struct net_device *dev)
1574 {
1575         ether_setup(dev);
1576
1577 #ifdef HAVE_NET_DEVICE_OPS
1578         dev->netdev_ops         = &ipgre_tap_netdev_ops;
1579 #else
1580         dev->init               = ipgre_tap_init;
1581         dev->uninit             = ipgre_tunnel_uninit;
1582         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1583 #ifndef HAVE_NETDEV_STATS
1584         dev->get_stats          = ipgre_tunnel_get_stats;
1585 #endif
1586         dev->do_ioctl           = ipgre_tunnel_ioctl;
1587         dev->change_mtu         = ipgre_tunnel_change_mtu;
1588 #endif /* HAVE_NET_DEVICE_OPS */
1589         dev->destructor         = free_netdev;
1590
1591         dev->iflink             = 0;
1592         dev->features           |= NETIF_F_NETNS_LOCAL;
1593         dev->tx_queue_len       = 0;
1594
1595         SET_ETHTOOL_OPS(dev, &ethtool_ops);
1596 }
1597
1598 #ifndef GRE_IOCTL_ONLY
1599 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1600 {
1601         __be16 flags;
1602
1603         if (!data)
1604                 return 0;
1605
1606         flags = 0;
1607         if (data[IFLA_GRE_IFLAGS])
1608                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1609         if (data[IFLA_GRE_OFLAGS])
1610                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1611         if (flags & (GRE_VERSION|GRE_ROUTING))
1612                 return -EINVAL;
1613
1614         return 0;
1615 }
1616
1617 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1618 {
1619         __be32 daddr;
1620
1621         if (tb[IFLA_ADDRESS]) {
1622                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1623                         return -EINVAL;
1624                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1625                         return -EADDRNOTAVAIL;
1626         }
1627
1628         if (!data)
1629                 goto out;
1630
1631         if (data[IFLA_GRE_REMOTE]) {
1632                 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1633                 if (!daddr)
1634                         return -EINVAL;
1635         }
1636
1637 out:
1638         return ipgre_tunnel_validate(tb, data);
1639 }
1640
1641 static void ipgre_netlink_parms(struct nlattr *data[],
1642                                 struct ip_tunnel_parm *parms)
1643 {
1644         memset(parms, 0, sizeof(*parms));
1645
1646         parms->iph.protocol = IPPROTO_GRE;
1647
1648         if (!data)
1649                 return;
1650
1651         if (data[IFLA_GRE_LINK])
1652                 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1653
1654         if (data[IFLA_GRE_IFLAGS])
1655                 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1656
1657         if (data[IFLA_GRE_OFLAGS])
1658                 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1659
1660         if (data[IFLA_GRE_IKEY])
1661                 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1662
1663         if (data[IFLA_GRE_OKEY])
1664                 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1665
1666         if (data[IFLA_GRE_LOCAL])
1667                 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1668
1669         if (data[IFLA_GRE_REMOTE])
1670                 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1671
1672         if (data[IFLA_GRE_TTL])
1673                 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1674
1675         if (data[IFLA_GRE_TOS])
1676                 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1677
1678         if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1679                 parms->iph.frag_off = htons(IP_DF);
1680 }
1681
1682 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
1683 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1684                          struct nlattr *data[])
1685 #else
1686 static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[],
1687                          struct nlattr *data[])
1688 #endif
1689 {
1690         struct ip_tunnel *nt;
1691         struct net *net = dev_net(dev);
1692         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1693         int mtu;
1694         int err;
1695
1696         nt = netdev_priv(dev);
1697         ipgre_netlink_parms(data, &nt->parms);
1698
1699         if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1700                 return -EEXIST;
1701
1702         if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1703                 random_ether_addr(dev->dev_addr);
1704
1705         mtu = ipgre_tunnel_bind_dev(dev);
1706         if (!tb[IFLA_MTU])
1707                 dev->mtu = mtu;
1708
1709         err = register_netdevice(dev);
1710         if (err)
1711                 goto out;
1712
1713         dev_hold(dev);
1714         ipgre_tunnel_link(ign, nt);
1715
1716 out:
1717         return err;
1718 }
1719
1720 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1721                             struct nlattr *data[])
1722 {
1723         struct ip_tunnel *t, *nt;
1724         struct net *net = dev_net(dev);
1725         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1726         struct ip_tunnel_parm p;
1727         int mtu;
1728
1729         if (dev == ign->fb_tunnel_dev)
1730                 return -EINVAL;
1731
1732         nt = netdev_priv(dev);
1733         ipgre_netlink_parms(data, &p);
1734
1735         t = ipgre_tunnel_locate(net, &p, false, 0);
1736
1737         if (t) {
1738                 if (t->dev != dev)
1739                         return -EEXIST;
1740         } else {
1741                 t = nt;
1742
1743                 if (dev->type != ARPHRD_ETHER) {
1744                         unsigned nflags = 0;
1745
1746                         if (ipv4_is_multicast(p.iph.daddr))
1747                                 nflags = IFF_BROADCAST;
1748                         else if (p.iph.daddr)
1749                                 nflags = IFF_POINTOPOINT;
1750
1751                         if ((dev->flags ^ nflags) &
1752                             (IFF_POINTOPOINT | IFF_BROADCAST))
1753                                 return -EINVAL;
1754                 }
1755
1756                 ipgre_tunnel_unlink(ign, t);
1757                 t->parms.iph.saddr = p.iph.saddr;
1758                 t->parms.iph.daddr = p.iph.daddr;
1759                 t->parms.i_key = p.i_key;
1760                 if (dev->type != ARPHRD_ETHER) {
1761                         memcpy(dev->dev_addr, &p.iph.saddr, 4);
1762                         memcpy(dev->broadcast, &p.iph.daddr, 4);
1763                 }
1764                 ipgre_tunnel_link(ign, t);
1765                 netdev_state_change(dev);
1766         }
1767
1768         t->parms.o_key = p.o_key;
1769         t->parms.iph.ttl = p.iph.ttl;
1770         t->parms.iph.tos = p.iph.tos;
1771         t->parms.iph.frag_off = p.iph.frag_off;
1772
1773         if (t->parms.link != p.link) {
1774                 t->parms.link = p.link;
1775                 mtu = ipgre_tunnel_bind_dev(dev);
1776                 if (!tb[IFLA_MTU])
1777                         dev->mtu = mtu;
1778                 netdev_state_change(dev);
1779         }
1780
1781         return 0;
1782 }
1783
1784 static size_t ipgre_get_size(const struct net_device *dev)
1785 {
1786         return
1787                 /* IFLA_GRE_LINK */
1788                 nla_total_size(4) +
1789                 /* IFLA_GRE_IFLAGS */
1790                 nla_total_size(2) +
1791                 /* IFLA_GRE_OFLAGS */
1792                 nla_total_size(2) +
1793                 /* IFLA_GRE_IKEY */
1794                 nla_total_size(4) +
1795                 /* IFLA_GRE_OKEY */
1796                 nla_total_size(4) +
1797                 /* IFLA_GRE_LOCAL */
1798                 nla_total_size(4) +
1799                 /* IFLA_GRE_REMOTE */
1800                 nla_total_size(4) +
1801                 /* IFLA_GRE_TTL */
1802                 nla_total_size(1) +
1803                 /* IFLA_GRE_TOS */
1804                 nla_total_size(1) +
1805                 /* IFLA_GRE_PMTUDISC */
1806                 nla_total_size(1) +
1807                 0;
1808 }
1809
1810 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1811 {
1812         struct ip_tunnel *t = netdev_priv(dev);
1813         struct ip_tunnel_parm *p = &t->parms;
1814
1815         NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1816         NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1817         NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1818         NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1819         NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1820         NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1821         NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1822         NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1823         NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1824         NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1825
1826         return 0;
1827
1828 nla_put_failure:
1829         return -EMSGSIZE;
1830 }
1831
1832 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1833         [IFLA_GRE_LINK]         = { .type = NLA_U32 },
1834         [IFLA_GRE_IFLAGS]       = { .type = NLA_U16 },
1835         [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
1836         [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
1837         [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
1838         [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1839         [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1840         [IFLA_GRE_TTL]          = { .type = NLA_U8 },
1841         [IFLA_GRE_TOS]          = { .type = NLA_U8 },
1842         [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
1843 };
1844
1845 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1846         .kind           = "gre",
1847         .maxtype        = IFLA_GRE_MAX,
1848         .policy         = ipgre_policy,
1849         .priv_size      = sizeof(struct ip_tunnel),
1850         .setup          = ipgre_tunnel_setup,
1851         .validate       = ipgre_tunnel_validate,
1852         .newlink        = ipgre_newlink,
1853         .changelink     = ipgre_changelink,
1854         .get_size       = ipgre_get_size,
1855         .fill_info      = ipgre_fill_info,
1856 };
1857
1858 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1859         .kind           = "gretap",
1860         .maxtype        = IFLA_GRE_MAX,
1861         .policy         = ipgre_policy,
1862         .priv_size      = sizeof(struct ip_tunnel),
1863         .setup          = ipgre_tap_setup,
1864         .validate       = ipgre_tap_validate,
1865         .newlink        = ipgre_newlink,
1866         .changelink     = ipgre_changelink,
1867         .get_size       = ipgre_get_size,
1868         .fill_info      = ipgre_fill_info,
1869 };
1870 #endif
1871
1872 /*
1873  *      And now the modules code and kernel interface.
1874  */
1875
1876 static int __init ipgre_init(void)
1877 {
1878         int err;
1879
1880         printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1881
1882         if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1883                 printk(KERN_INFO "ipgre init: can't add protocol\n");
1884                 return -EAGAIN;
1885         }
1886
1887         err = register_pernet_device(&ipgre_net_ops);
1888         if (err < 0)
1889                 goto gen_device_failed;
1890
1891 #ifndef GRE_IOCTL_ONLY
1892         err = rtnl_link_register(&ipgre_link_ops);
1893         if (err < 0)
1894                 goto rtnl_link_failed;
1895
1896         err = rtnl_link_register(&ipgre_tap_ops);
1897         if (err < 0)
1898                 goto tap_ops_failed;
1899 #endif
1900
1901 out:
1902         return err;
1903
1904 #ifndef GRE_IOCTL_ONLY
1905 tap_ops_failed:
1906         rtnl_link_unregister(&ipgre_link_ops);
1907 rtnl_link_failed:
1908         unregister_pernet_device(&ipgre_net_ops);
1909 #endif
1910 gen_device_failed:
1911         inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1912         goto out;
1913
1914 }
1915
1916 static void __exit ipgre_fini(void)
1917 {
1918 #ifndef GRE_IOCTL_ONLY
1919         rtnl_link_unregister(&ipgre_tap_ops);
1920         rtnl_link_unregister(&ipgre_link_ops);
1921 #endif
1922         unregister_pernet_device(&ipgre_net_ops);
1923         if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1924                 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1925 }
1926
1927 module_init(ipgre_init);
1928 module_exit(ipgre_fini);
1929 MODULE_DESCRIPTION("GRE over IPv4 tunneling driver");
1930 MODULE_LICENSE("GPL");
1931 #ifndef GRE_IOCTL_ONLY
1932 MODULE_ALIAS_RTNL_LINK("gre");
1933 MODULE_ALIAS_RTNL_LINK("gretap");
1934 #endif
1935