datapath: Handle packets with precomputed checksums.
[sliver-openvswitch.git] / datapath / linux-2.6 / compat-2.6 / ip_gre.c
1 /* ip_gre driver port to Linux 2.6.18 and greater */
2
3 #include <linux/version.h>
4 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5 #define HAVE_NETDEV_STATS
6 #endif
7 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
8 #define HAVE_NETDEV_HEADER_OPS
9 #endif
10 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
11 #define HAVE_NETDEV_NEEDED_HEADROOM
12 #endif
13
14 /*
15  *      Linux NET3:     GRE over IP protocol decoder.
16  *
17  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
18  *
19  *      This program is free software; you can redistribute it and/or
20  *      modify it under the terms of the GNU General Public License
21  *      as published by the Free Software Foundation; either version
22  *      2 of the License, or (at your option) any later version.
23  *
24  */
25
26 #include <linux/capability.h>
27 #include <linux/ethtool.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <asm/uaccess.h>
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/in.h>
35 #include <linux/tcp.h>
36 #include <linux/udp.h>
37 #include <linux/if_arp.h>
38 #include <linux/mroute.h>
39 #include <linux/init.h>
40 #include <linux/in6.h>
41 #include <linux/inetdevice.h>
42 #include <linux/igmp.h>
43 #include <linux/netfilter_ipv4.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_ether.h>
46
47 #include <net/sock.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/protocol.h>
51 #include <net/ipip.h>
52 #include <net/arp.h>
53 #include <net/checksum.h>
54 #include <net/dsfield.h>
55 #include <net/inet_ecn.h>
56 #include <net/xfrm.h>
57 #include <net/net_namespace.h>
58 #include <net/netns/generic.h>
59
60 #ifdef CONFIG_IPV6
61 #include <net/ipv6.h>
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
64 #endif
65
66 #include "compat.h"
67 #include "openvswitch/gre.h"
68
69 #ifndef GRE_IOCTL_ONLY
70 #include <net/rtnetlink.h>
71 #endif
72
73 /*
74    Problems & solutions
75    --------------------
76
77    1. The most important issue is detecting local dead loops.
78    They would cause complete host lockup in transmit, which
79    would be "resolved" by stack overflow or, if queueing is enabled,
80    with infinite looping in net_bh.
81
82    We cannot track such dead loops during route installation,
83    it is infeasible task. The most general solutions would be
84    to keep skb->encapsulation counter (sort of local ttl),
85    and silently drop packet when it expires. It is the best
86    solution, but it supposes maintaing new variable in ALL
87    skb, even if no tunneling is used.
88
89    Current solution: HARD_TX_LOCK lock breaks dead loops.
90
91
92
93    2. Networking dead loops would not kill routers, but would really
94    kill network. IP hop limit plays role of "t->recursion" in this case,
95    if we copy it from packet being encapsulated to upper header.
96    It is very good solution, but it introduces two problems:
97
98    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
99      do not work over tunnels.
100    - traceroute does not work. I planned to relay ICMP from tunnel,
101      so that this problem would be solved and traceroute output
102      would even more informative. This idea appeared to be wrong:
103      only Linux complies to rfc1812 now (yes, guys, Linux is the only
104      true router now :-)), all routers (at least, in neighbourhood of mine)
105      return only 8 bytes of payload. It is the end.
106
107    Hence, if we want that OSPF worked or traceroute said something reasonable,
108    we should search for another solution.
109
110    One of them is to parse packet trying to detect inner encapsulation
111    made by our node. It is difficult or even impossible, especially,
112    taking into account fragmentation. TO be short, tt is not solution at all.
113
114    Current solution: The solution was UNEXPECTEDLY SIMPLE.
115    We force DF flag on tunnels with preconfigured hop limit,
116    that is ALL. :-) Well, it does not remove the problem completely,
117    but exponential growth of network traffic is changed to linear
118    (branches, that exceed pmtu are pruned) and tunnel mtu
119    fastly degrades to value <68, where looping stops.
120    Yes, it is not good if there exists a router in the loop,
121    which does not force DF, even when encapsulating packets have DF set.
122    But it is not our problem! Nobody could accuse us, we made
123    all that we could make. Even if it is your gated who injected
124    fatal route to network, even if it were you who configured
125    fatal static route: you are innocent. :-)
126
127
128
129    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
130    practically identical code. It would be good to glue them
131    together, but it is not very evident, how to make them modular.
132    sit is integral part of IPv6, ipip and gre are naturally modular.
133    We could extract common parts (hash table, ioctl etc)
134    to a separate module (ip_tunnel.c).
135
136    Alexey Kuznetsov.
137  */
138
139 #ifndef GRE_IOCTL_ONLY
140 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
141 static struct rtnl_link_ops ipgre_tap_ops __read_mostly;
142 #endif
143 static int ipgre_tunnel_init(struct net_device *dev);
144 static void ipgre_tunnel_setup(struct net_device *dev);
145 static void ipgre_tap_setup(struct net_device *dev);
146 static int ipgre_tunnel_bind_dev(struct net_device *dev);
147
148 #define HASH_SIZE  16
149
150 static int ipgre_net_id;
151 struct ipgre_net {
152         struct ip_tunnel *tunnels[4][HASH_SIZE];
153
154         struct net_device *fb_tunnel_dev;
155 };
156
157 /* Tunnel hash table */
158
159 /*
160    4 hash tables:
161
162    3: (remote,local)
163    2: (remote,*)
164    1: (*,local)
165    0: (*,*)
166
167    We require exact key match i.e. if a key is present in packet
168    it will match only tunnel with the same key; if it is not present,
169    it will match only keyless tunnel.
170
171    All keysless packets, if not matched configured keyless tunnels
172    will match fallback tunnel.
173  */
174
175 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
176
177 #define tunnels_r_l     tunnels[3]
178 #define tunnels_r       tunnels[2]
179 #define tunnels_l       tunnels[1]
180 #define tunnels_wc      tunnels[0]
181
182 static DEFINE_RWLOCK(ipgre_lock);
183
184 /* Given src, dst and key, find appropriate for input tunnel. */
185
186 static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
187                                               __be32 remote, __be32 local,
188                                               __be32 key, __be16 gre_proto)
189 {
190         struct net *net = dev_net(dev);
191         int link = dev->ifindex;
192         unsigned h0 = HASH(remote);
193         unsigned h1 = HASH(key);
194         struct ip_tunnel *t, *cand = NULL;
195         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
196         int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
197                        ARPHRD_ETHER : ARPHRD_IPGRE;
198         int score, cand_score = 4;
199
200         for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
201                 if (local != t->parms.iph.saddr ||
202                     remote != t->parms.iph.daddr ||
203                     key != t->parms.i_key ||
204                     !(t->dev->flags & IFF_UP))
205                         continue;
206
207                 if (t->dev->type != ARPHRD_IPGRE &&
208                     t->dev->type != dev_type)
209                         continue;
210
211                 score = 0;
212                 if (t->parms.link != link)
213                         score |= 1;
214                 if (t->dev->type != dev_type)
215                         score |= 2;
216                 if (score == 0)
217                         return t;
218
219                 if (score < cand_score) {
220                         cand = t;
221                         cand_score = score;
222                 }
223         }
224
225         for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
226                 if (remote != t->parms.iph.daddr ||
227                     key != t->parms.i_key ||
228                     !(t->dev->flags & IFF_UP))
229                         continue;
230
231                 if (t->dev->type != ARPHRD_IPGRE &&
232                     t->dev->type != dev_type)
233                         continue;
234
235                 score = 0;
236                 if (t->parms.link != link)
237                         score |= 1;
238                 if (t->dev->type != dev_type)
239                         score |= 2;
240                 if (score == 0)
241                         return t;
242
243                 if (score < cand_score) {
244                         cand = t;
245                         cand_score = score;
246                 }
247         }
248
249         for (t = ign->tunnels_l[h1]; t; t = t->next) {
250                 if ((local != t->parms.iph.saddr &&
251                      (local != t->parms.iph.daddr ||
252                       !ipv4_is_multicast(local))) ||
253                     key != t->parms.i_key ||
254                     !(t->dev->flags & IFF_UP))
255                         continue;
256
257                 if (t->dev->type != ARPHRD_IPGRE &&
258                     t->dev->type != dev_type)
259                         continue;
260
261                 score = 0;
262                 if (t->parms.link != link)
263                         score |= 1;
264                 if (t->dev->type != dev_type)
265                         score |= 2;
266                 if (score == 0)
267                         return t;
268
269                 if (score < cand_score) {
270                         cand = t;
271                         cand_score = score;
272                 }
273         }
274
275         for (t = ign->tunnels_wc[h1]; t; t = t->next) {
276                 if (t->parms.i_key != key ||
277                     !(t->dev->flags & IFF_UP))
278                         continue;
279
280                 if (t->dev->type != ARPHRD_IPGRE &&
281                     t->dev->type != dev_type)
282                         continue;
283
284                 score = 0;
285                 if (t->parms.link != link)
286                         score |= 1;
287                 if (t->dev->type != dev_type)
288                         score |= 2;
289                 if (score == 0)
290                         return t;
291
292                 if (score < cand_score) {
293                         cand = t;
294                         cand_score = score;
295                 }
296         }
297
298         if (cand != NULL)
299                 return cand;
300
301         if (ign->fb_tunnel_dev->flags & IFF_UP)
302                 return netdev_priv(ign->fb_tunnel_dev);
303
304         return NULL;
305 }
306
307 static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
308                 struct ip_tunnel_parm *parms)
309 {
310         __be32 remote = parms->iph.daddr;
311         __be32 local = parms->iph.saddr;
312         __be32 key = parms->i_key;
313         unsigned h = HASH(key);
314         int prio = 0;
315
316         if (local)
317                 prio |= 1;
318         if (remote && !ipv4_is_multicast(remote)) {
319                 prio |= 2;
320                 h ^= HASH(remote);
321         }
322
323         return &ign->tunnels[prio][h];
324 }
325
326 static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
327                 struct ip_tunnel *t)
328 {
329         return __ipgre_bucket(ign, &t->parms);
330 }
331
332 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
333 {
334         struct ip_tunnel **tp = ipgre_bucket(ign, t);
335
336         t->next = *tp;
337         write_lock_bh(&ipgre_lock);
338         *tp = t;
339         write_unlock_bh(&ipgre_lock);
340 }
341
342 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
343 {
344         struct ip_tunnel **tp;
345
346         for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
347                 if (t == *tp) {
348                         write_lock_bh(&ipgre_lock);
349                         *tp = t->next;
350                         write_unlock_bh(&ipgre_lock);
351                         break;
352                 }
353         }
354 }
355
356 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
357                                            struct ip_tunnel_parm *parms,
358                                            int type)
359 {
360         __be32 remote = parms->iph.daddr;
361         __be32 local = parms->iph.saddr;
362         __be32 key = parms->i_key;
363         int link = parms->link;
364         struct ip_tunnel *t, **tp;
365         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
366
367         for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
368                 if (local == t->parms.iph.saddr &&
369                     remote == t->parms.iph.daddr &&
370                     key == t->parms.i_key &&
371                     link == t->parms.link &&
372                     type == t->dev->type)
373                         break;
374
375         return t;
376 }
377
378 static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
379                 struct ip_tunnel_parm *parms, int gretap, int create)
380 {
381         struct ip_tunnel *t, *nt;
382         struct net_device *dev;
383         char name[IFNAMSIZ];
384         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
385
386         t = ipgre_tunnel_find(net, parms, gretap ? ARPHRD_ETHER : ARPHRD_IPGRE);
387         if (t || !create)
388                 return t;
389
390         if (parms->name[0])
391                 strlcpy(name, parms->name, IFNAMSIZ);
392         else
393                 sprintf(name, "gre%%d");
394
395         dev = alloc_netdev(sizeof(*t), name, gretap ? ipgre_tap_setup
396                                                     : ipgre_tunnel_setup);
397         if (!dev)
398           return NULL;
399
400         dev_net_set(dev, net);
401
402         if (strchr(name, '%')) {
403                 if (dev_alloc_name(dev, name) < 0)
404                         goto failed_free;
405         }
406
407         if (gretap)
408                 random_ether_addr(dev->dev_addr);
409
410 #ifndef GRE_IOCTL_ONLY
411         dev->rtnl_link_ops = gretap ? &ipgre_tap_ops : &ipgre_link_ops;
412 #endif
413         nt = netdev_priv(dev);
414         nt->parms = *parms;
415
416         dev->mtu = ipgre_tunnel_bind_dev(dev);
417
418         if (register_netdevice(dev) < 0)
419                 goto failed_free;
420
421         dev_hold(dev);
422         ipgre_tunnel_link(ign, nt);
423         return nt;
424
425 failed_free:
426         free_netdev(dev);
427         return NULL;
428 }
429
430 static void ipgre_tunnel_uninit(struct net_device *dev)
431 {
432         struct net *net = dev_net(dev);
433         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
434
435         ipgre_tunnel_unlink(ign, netdev_priv(dev));
436         dev_put(dev);
437 }
438
439
440 static void ipgre_err(struct sk_buff *skb, u32 info)
441 {
442
443 /* All the routers (except for Linux) return only
444    8 bytes of packet payload. It means, that precise relaying of
445    ICMP in the real Internet is absolutely infeasible.
446
447    Moreover, Cisco "wise men" put GRE key to the third word
448    in GRE header. It makes impossible maintaining even soft state for keyed
449    GRE tunnels with enabled checksum. Tell them "thank you".
450
451    Well, I wonder, rfc1812 was written by Cisco employee,
452    what the hell these idiots break standrads established
453    by themself???
454  */
455
456         struct iphdr *iph = (struct iphdr *)skb->data;
457         __be16       *p = (__be16*)(skb->data+(iph->ihl<<2));
458         int grehlen = (iph->ihl<<2) + 4;
459         const int type = icmp_hdr(skb)->type;
460         const int code = icmp_hdr(skb)->code;
461         struct ip_tunnel *t;
462         __be16 flags;
463
464         flags = p[0];
465         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
466                 if (flags&(GRE_VERSION|GRE_ROUTING))
467                         return;
468                 if (flags&GRE_KEY) {
469                         grehlen += 4;
470                         if (flags&GRE_CSUM)
471                                 grehlen += 4;
472                 }
473         }
474
475         /* If only 8 bytes returned, keyed message will be dropped here */
476         if (skb_headlen(skb) < grehlen)
477                 return;
478
479         switch (type) {
480         default:
481         case ICMP_PARAMETERPROB:
482                 return;
483
484         case ICMP_DEST_UNREACH:
485                 switch (code) {
486                 case ICMP_SR_FAILED:
487                 case ICMP_PORT_UNREACH:
488                         /* Impossible event. */
489                         return;
490                 case ICMP_FRAG_NEEDED:
491                         /* Soft state for pmtu is maintained by IP core. */
492                         return;
493                 default:
494                         /* All others are translated to HOST_UNREACH.
495                            rfc2003 contains "deep thoughts" about NET_UNREACH,
496                            I believe they are just ether pollution. --ANK
497                          */
498                         break;
499                 }
500                 break;
501         case ICMP_TIME_EXCEEDED:
502                 if (code != ICMP_EXC_TTL)
503                         return;
504                 break;
505         }
506
507         read_lock(&ipgre_lock);
508         t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
509                                 flags & GRE_KEY ?
510                                 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
511                                 p[1]);
512         if (t == NULL || t->parms.iph.daddr == 0 ||
513             ipv4_is_multicast(t->parms.iph.daddr))
514                 goto out;
515
516         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
517                 goto out;
518
519         if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
520                 t->err_count++;
521         else
522                 t->err_count = 1;
523         t->err_time = jiffies;
524 out:
525         read_unlock(&ipgre_lock);
526         return;
527 }
528
529 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
530 {
531         if (INET_ECN_is_ce(iph->tos)) {
532                 if (skb->protocol == htons(ETH_P_IP)) {
533                         IP_ECN_set_ce(ip_hdr(skb));
534                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
535                         IP6_ECN_set_ce(ipv6_hdr(skb));
536                 }
537         }
538 }
539
540 static inline u8
541 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
542 {
543         u8 inner = 0;
544         if (skb->protocol == htons(ETH_P_IP))
545                 inner = old_iph->tos;
546         else if (skb->protocol == htons(ETH_P_IPV6))
547                 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
548         return INET_ECN_encapsulate(tos, inner);
549 }
550
551 static int ipgre_rcv(struct sk_buff *skb)
552 {
553         struct iphdr *iph;
554         u8     *h;
555         __be16    flags;
556         __sum16   csum = 0;
557         __be32 key = 0;
558         u32    seqno = 0;
559         struct ip_tunnel *tunnel;
560         int    offset = 4;
561         __be16 gre_proto;
562         unsigned int len;
563
564         if (!pskb_may_pull(skb, 16))
565                 goto drop_nolock;
566
567         iph = ip_hdr(skb);
568         h = skb->data;
569         flags = *(__be16*)h;
570
571         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
572                 /* - Version must be 0.
573                    - We do not support routing headers.
574                  */
575                 if (flags&(GRE_VERSION|GRE_ROUTING))
576                         goto drop_nolock;
577
578                 if (flags&GRE_CSUM) {
579                         switch (skb->ip_summed) {
580                         case CHECKSUM_COMPLETE:
581                                 csum = csum_fold(skb->csum);
582                                 if (!csum)
583                                         break;
584                                 /* fall through */
585                         case CHECKSUM_NONE:
586                                 skb->csum = 0;
587                                 csum = __skb_checksum_complete(skb);
588                                 skb->ip_summed = CHECKSUM_COMPLETE;
589                         }
590                         offset += 4;
591                 }
592                 if (flags&GRE_KEY) {
593                         key = *(__be32*)(h + offset);
594                         offset += 4;
595                 }
596                 if (flags&GRE_SEQ) {
597                         seqno = ntohl(*(__be32*)(h + offset));
598                         offset += 4;
599                 }
600         }
601
602         gre_proto = *(__be16 *)(h + 2);
603
604         read_lock(&ipgre_lock);
605         if ((tunnel = ipgre_tunnel_lookup(skb->dev,
606                                           iph->saddr, iph->daddr, key,
607                                           gre_proto))) {
608                 struct net_device_stats *stats;
609 #ifdef HAVE_NETDEV_STATS
610                 stats = &tunnel->dev->stats;
611 #else
612                 stats = &tunnel->stat;
613 #endif
614
615                 secpath_reset(skb);
616
617                 skb->protocol = gre_proto;
618                 /* WCCP version 1 and 2 protocol decoding.
619                  * - Change protocol to IP
620                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
621                  */
622                 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
623                         skb->protocol = htons(ETH_P_IP);
624                         if ((*(h + offset) & 0xF0) != 0x40)
625                                 offset += 4;
626                 }
627
628                 skb->mac_header = skb->network_header;
629                 __pskb_pull(skb, offset);
630                 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
631                 skb->pkt_type = PACKET_HOST;
632 #ifdef CONFIG_NET_IPGRE_BROADCAST
633                 if (ipv4_is_multicast(iph->daddr)) {
634                         /* Looped back packet, drop it! */
635                         if (skb_rtable(skb)->fl.iif == 0)
636                                 goto drop;
637                         stats->multicast++;
638                         skb->pkt_type = PACKET_BROADCAST;
639                 }
640 #endif
641
642                 if (((flags&GRE_CSUM) && csum) ||
643                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
644                         stats->rx_crc_errors++;
645                         stats->rx_errors++;
646                         goto drop;
647                 }
648                 if (tunnel->parms.i_flags&GRE_SEQ) {
649                         if (!(flags&GRE_SEQ) ||
650                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
651                                 stats->rx_fifo_errors++;
652                                 stats->rx_errors++;
653                                 goto drop;
654                         }
655                         tunnel->i_seqno = seqno + 1;
656                 }
657
658                 len = skb->len;
659
660                 /* Warning: All skb pointers will be invalidated! */
661                 if (tunnel->dev->type == ARPHRD_ETHER) {
662                         if (!pskb_may_pull(skb, ETH_HLEN)) {
663                                 stats->rx_length_errors++;
664                                 stats->rx_errors++;
665                                 goto drop;
666                         }
667
668                         iph = ip_hdr(skb);
669                         skb->protocol = eth_type_trans(skb, tunnel->dev);
670                         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
671                 }
672
673                 stats->rx_packets++;
674                 stats->rx_bytes += len;
675                 skb->dev = tunnel->dev;
676                 skb_dst_drop(skb);
677                 nf_reset(skb);
678
679                 skb_reset_network_header(skb);
680                 ipgre_ecn_decapsulate(iph, skb);
681
682                 netif_rx(skb);
683                 read_unlock(&ipgre_lock);
684                 return(0);
685         }
686         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
687
688 drop:
689         read_unlock(&ipgre_lock);
690 drop_nolock:
691         kfree_skb(skb);
692         return(0);
693 }
694
695 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
696 {
697         struct ip_tunnel *tunnel = netdev_priv(dev);
698         struct net_device_stats *stats;
699         struct iphdr  *old_iph = ip_hdr(skb);
700         struct iphdr  *tiph;
701         u8     tos;
702         __be16 df;
703         struct rtable *rt;                      /* Route to the other host */
704         struct net_device *tdev;                /* Device to other host */
705         struct iphdr  *iph;                     /* Our new IP header */
706         unsigned int max_headroom;              /* The extra header space needed */
707         int    gre_hlen;
708         __be32 dst;
709         int    mtu;
710
711 #ifdef HAVE_NETDEV_STATS
712         stats = &tunnel->dev->stats;
713 #else
714         stats = &tunnel->stat;
715 #endif
716
717         if (dev->type == ARPHRD_ETHER)
718                 IPCB(skb)->flags = 0;
719
720 #ifdef HAVE_NETDEV_HEADER_OPS
721         if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
722 #else
723         if (dev->hard_header && dev->type == ARPHRD_IPGRE) {
724 #endif
725                 gre_hlen = 0;
726                 tiph = (struct iphdr *)skb->data;
727         } else {
728                 gre_hlen = tunnel->hlen;
729                 tiph = &tunnel->parms.iph;
730         }
731
732         if ((dst = tiph->daddr) == 0) {
733                 /* NBMA tunnel */
734
735                 if (skb_dst(skb) == NULL) {
736                         stats->tx_fifo_errors++;
737                         goto tx_error;
738                 }
739
740                 if (skb->protocol == htons(ETH_P_IP)) {
741                         rt = skb_rtable(skb);
742                         if ((dst = rt->rt_gateway) == 0)
743                                 goto tx_error_icmp;
744                 }
745 #ifdef CONFIG_IPV6
746                 else if (skb->protocol == htons(ETH_P_IPV6)) {
747                         struct in6_addr *addr6;
748                         int addr_type;
749                         struct neighbour *neigh = skb_dst(skb)->neighbour;
750
751                         if (neigh == NULL)
752                                 goto tx_error;
753
754                         addr6 = (struct in6_addr *)&neigh->primary_key;
755                         addr_type = ipv6_addr_type(addr6);
756
757                         if (addr_type == IPV6_ADDR_ANY) {
758                                 addr6 = &ipv6_hdr(skb)->daddr;
759                                 addr_type = ipv6_addr_type(addr6);
760                         }
761
762                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
763                                 goto tx_error_icmp;
764
765                         dst = addr6->s6_addr32[3];
766                 }
767 #endif
768                 else
769                         goto tx_error;
770         }
771
772         tos = tiph->tos;
773         if (tos == 1) {
774                 tos = 0;
775                 if (skb->protocol == htons(ETH_P_IP))
776                         tos = old_iph->tos;
777         }
778
779         {
780                 struct flowi fl = { .oif = tunnel->parms.link,
781                                     .nl_u = { .ip4_u =
782                                               { .daddr = dst,
783                                                 .saddr = tiph->saddr,
784                                                 .tos = RT_TOS(tos) } },
785                                     .proto = IPPROTO_GRE };
786                 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
787                         stats->tx_carrier_errors++;
788                         goto tx_error;
789                 }
790         }
791         tdev = rt->u.dst.dev;
792
793         if (tdev == dev) {
794                 ip_rt_put(rt);
795                 stats->collisions++;
796                 goto tx_error;
797         }
798
799         df = tiph->frag_off;
800         if (df)
801 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
802                 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
803 #else
804                 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
805 #endif
806         else
807                 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
808
809         if (skb_dst(skb))
810                 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
811
812         /* XXX: Temporarily allow fragmentation since DF doesn't
813          * do the right thing with bridging. */
814 /*
815         if (skb->protocol == htons(ETH_P_IP)) {
816                 df |= (old_iph->frag_off&htons(IP_DF));
817
818                 if ((old_iph->frag_off&htons(IP_DF)) &&
819                     mtu < ntohs(old_iph->tot_len)) {
820                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
821                         ip_rt_put(rt);
822                         goto tx_error;
823                 }
824         }
825 #ifdef CONFIG_IPV6
826         else if (skb->protocol == htons(ETH_P_IPV6)) {
827                 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
828
829                 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
830                         if ((tunnel->parms.iph.daddr &&
831                              !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
832                             rt6->rt6i_dst.plen == 128) {
833                                 rt6->rt6i_flags |= RTF_MODIFIED;
834                                 skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
835                         }
836                 }
837
838                 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
839                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
840                         ip_rt_put(rt);
841                         goto tx_error;
842                 }
843         }
844 #endif
845 */
846         if (tunnel->err_count > 0) {
847                 if (time_before(jiffies,
848                                 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
849                         tunnel->err_count--;
850
851                         dst_link_failure(skb);
852                 } else
853                         tunnel->err_count = 0;
854         }
855
856         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
857
858         if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
859             (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
860                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
861                 if (!new_skb) {
862                         ip_rt_put(rt);
863                         stats->tx_dropped++;
864                         dev_kfree_skb(skb);
865                         return NETDEV_TX_OK;
866                 }
867                 if (skb->sk)
868                         skb_set_owner_w(new_skb, skb->sk);
869                 dev_kfree_skb(skb);
870                 skb = new_skb;
871                 old_iph = ip_hdr(skb);
872         }
873
874         skb_reset_transport_header(skb);
875         skb_push(skb, gre_hlen);
876         skb_reset_network_header(skb);
877         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
878         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
879                               IPSKB_REROUTED);
880         skb_dst_drop(skb);
881         skb_dst_set(skb, &rt->u.dst);
882
883         /*
884          *      Push down and install the IPIP header.
885          */
886
887         iph                     =       ip_hdr(skb);
888         iph->version            =       4;
889         iph->ihl                =       sizeof(struct iphdr) >> 2;
890         iph->frag_off           =       df;
891         iph->protocol           =       IPPROTO_GRE;
892         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
893         iph->daddr              =       rt->rt_dst;
894         iph->saddr              =       rt->rt_src;
895
896         if ((iph->ttl = tiph->ttl) == 0) {
897                 if (skb->protocol == htons(ETH_P_IP))
898                         iph->ttl = old_iph->ttl;
899 #ifdef CONFIG_IPV6
900                 else if (skb->protocol == htons(ETH_P_IPV6))
901                         iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
902 #endif
903                 else
904                         iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
905         }
906
907         ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
908         ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
909                                    htons(ETH_P_TEB) : skb->protocol;
910
911         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
912                 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
913
914                 if (tunnel->parms.o_flags&GRE_SEQ) {
915                         ++tunnel->o_seqno;
916                         *ptr = htonl(tunnel->o_seqno);
917                         ptr--;
918                 }
919                 if (tunnel->parms.o_flags&GRE_KEY) {
920                         *ptr = tunnel->parms.o_key;
921                         ptr--;
922                 }
923                 if (tunnel->parms.o_flags&GRE_CSUM) {
924                         *ptr = 0;
925                         *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
926                 }
927         }
928
929         nf_reset(skb);
930
931         IPTUNNEL_XMIT();
932         return NETDEV_TX_OK;
933
934 tx_error_icmp:
935         dst_link_failure(skb);
936
937 tx_error:
938         stats->tx_errors++;
939         dev_kfree_skb(skb);
940         return NETDEV_TX_OK;
941 }
942
943 static int ipgre_tunnel_bind_dev(struct net_device *dev)
944 {
945         struct net_device *tdev = NULL;
946         struct ip_tunnel *tunnel;
947         struct iphdr *iph;
948         int hlen = LL_MAX_HEADER;
949         int mtu = ETH_DATA_LEN;
950         int addend = sizeof(struct iphdr) + 4;
951
952         tunnel = netdev_priv(dev);
953         iph = &tunnel->parms.iph;
954
955         /* Guess output device to choose reasonable mtu and needed_headroom */
956
957         if (iph->daddr) {
958                 struct flowi fl = { .oif = tunnel->parms.link,
959                                     .nl_u = { .ip4_u =
960                                               { .daddr = iph->daddr,
961                                                 .saddr = iph->saddr,
962                                                 .tos = RT_TOS(iph->tos) } },
963                                     .proto = IPPROTO_GRE };
964                 struct rtable *rt;
965                 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
966                         tdev = rt->u.dst.dev;
967                         ip_rt_put(rt);
968                 }
969
970                 if (dev->type != ARPHRD_ETHER)
971                         dev->flags |= IFF_POINTOPOINT;
972         }
973
974         if (!tdev && tunnel->parms.link)
975                 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
976
977         if (tdev) {
978 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
979                 hlen = tdev->hard_header_len + tdev->needed_headroom;
980 #else
981                 hlen = tdev->hard_header_len;
982 #endif
983                 mtu = tdev->mtu;
984         }
985         dev->iflink = tunnel->parms.link;
986
987         /* Precalculate GRE options length */
988         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
989                 if (tunnel->parms.o_flags&GRE_CSUM)
990                         addend += 4;
991                 if (tunnel->parms.o_flags&GRE_KEY)
992                         addend += 4;
993                 if (tunnel->parms.o_flags&GRE_SEQ)
994                         addend += 4;
995         }
996 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
997         dev->needed_headroom = hlen + addend;
998         mtu -= dev->hard_header_len + addend;
999 #else
1000         dev->hard_header_len = hlen + addend;
1001         mtu -= addend;
1002 #endif
1003         tunnel->hlen = addend;
1004
1005         if (mtu < 68)
1006                 mtu = 68;
1007
1008         /* XXX: Set MTU to the maximum possible value.  If we are bridged to a
1009         * device with a larger MTU then packets will be dropped. */
1010         mtu = 65482;
1011
1012         return mtu;
1013 }
1014
1015 static int
1016 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1017 {
1018         int err = 0;
1019         struct ip_tunnel_parm p;
1020         struct ip_tunnel *t;
1021         struct net *net = dev_net(dev);
1022         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1023         int add_tunnel, gretap;
1024
1025         switch (cmd) {
1026         case SIOCGETTUNNEL:
1027                 t = NULL;
1028                 if (dev == ign->fb_tunnel_dev) {
1029                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1030                                 err = -EFAULT;
1031                                 break;
1032                         }
1033                         t = ipgre_tunnel_locate(net, &p, false, 0);
1034                 }
1035                 if (t == NULL)
1036                         t = netdev_priv(dev);
1037                 memcpy(&p, &t->parms, sizeof(p));
1038                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1039                         err = -EFAULT;
1040                 break;
1041
1042         case SIOCADDTUNNEL:
1043         case SIOCCHGTUNNEL:
1044         case SIOCADDGRETAP:
1045         case SIOCCHGGRETAP:
1046                 err = -EPERM;
1047                 if (!capable(CAP_NET_ADMIN))
1048                         goto done;
1049
1050                 err = -EFAULT;
1051                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1052                         goto done;
1053
1054                 err = -EINVAL;
1055                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1056                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1057                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1058                         goto done;
1059
1060                 add_tunnel = (cmd == SIOCADDTUNNEL || cmd == SIOCADDGRETAP);
1061                 gretap = (cmd == SIOCADDGRETAP || cmd == SIOCCHGGRETAP);
1062
1063                 if (p.iph.ttl)
1064                         p.iph.frag_off |= htons(IP_DF);
1065
1066                 if (!(p.i_flags&GRE_KEY))
1067                         p.i_key = 0;
1068                 if (!(p.o_flags&GRE_KEY))
1069                         p.o_key = 0;
1070
1071                 t = ipgre_tunnel_locate(net, &p, gretap, add_tunnel);
1072
1073                 if (dev != ign->fb_tunnel_dev && !add_tunnel) {
1074                         if (t != NULL) {
1075                                 if (t->dev != dev) {
1076                                         err = -EEXIST;
1077                                         break;
1078                                 }
1079                         } else {
1080                                 unsigned nflags = 0;
1081
1082                                 t = netdev_priv(dev);
1083
1084                                 if (ipv4_is_multicast(p.iph.daddr))
1085                                         nflags = IFF_BROADCAST;
1086                                 else if (p.iph.daddr)
1087                                         nflags = IFF_POINTOPOINT;
1088
1089                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1090                                         err = -EINVAL;
1091                                         break;
1092                                 }
1093                                 ipgre_tunnel_unlink(ign, t);
1094                                 t->parms.iph.saddr = p.iph.saddr;
1095                                 t->parms.iph.daddr = p.iph.daddr;
1096                                 t->parms.i_key = p.i_key;
1097                                 t->parms.o_key = p.o_key;
1098                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1099                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
1100                                 ipgre_tunnel_link(ign, t);
1101                                 netdev_state_change(dev);
1102                         }
1103                 }
1104
1105                 if (t) {
1106                         err = 0;
1107                         if (!add_tunnel) {
1108                                 t->parms.iph.ttl = p.iph.ttl;
1109                                 t->parms.iph.tos = p.iph.tos;
1110                                 t->parms.iph.frag_off = p.iph.frag_off;
1111                                 if (t->parms.link != p.link) {
1112                                         t->parms.link = p.link;
1113                                         dev->mtu = ipgre_tunnel_bind_dev(dev);
1114                                         netdev_state_change(dev);
1115                                 }
1116                         }
1117                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1118                                 err = -EFAULT;
1119                 } else
1120                         err = (add_tunnel ? -ENOBUFS : -ENOENT);
1121                 break;
1122
1123         case SIOCDELTUNNEL:
1124                 err = -EPERM;
1125                 if (!capable(CAP_NET_ADMIN))
1126                         goto done;
1127
1128                 if (dev == ign->fb_tunnel_dev) {
1129                         err = -EFAULT;
1130                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1131                                 goto done;
1132                         err = -ENOENT;
1133                         if ((t = ipgre_tunnel_locate(net, &p, false, 0)) == NULL)
1134                                 goto done;
1135                         err = -EPERM;
1136                         if (t == netdev_priv(ign->fb_tunnel_dev))
1137                                 goto done;
1138                         dev = t->dev;
1139                 }
1140                 unregister_netdevice(dev);
1141                 err = 0;
1142                 break;
1143
1144         default:
1145                 err = -EINVAL;
1146         }
1147
1148 done:
1149         return err;
1150 }
1151
1152 #ifndef HAVE_NETDEV_STATS
1153 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1154 {
1155         return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
1156 }
1157 #endif
1158
1159 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1160 {
1161         struct ip_tunnel *tunnel = netdev_priv(dev);
1162         if (new_mtu < 68 ||
1163 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1164         new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1165 #else
1166         new_mtu > 0xFFF8 - tunnel->hlen)
1167 #endif
1168                 return -EINVAL;
1169         dev->mtu = new_mtu;
1170         return 0;
1171 }
1172
1173 /* Nice toy. Unfortunately, useless in real life :-)
1174    It allows to construct virtual multiprotocol broadcast "LAN"
1175    over the Internet, provided multicast routing is tuned.
1176
1177
1178    I have no idea was this bicycle invented before me,
1179    so that I had to set ARPHRD_IPGRE to a random value.
1180    I have an impression, that Cisco could make something similar,
1181    but this feature is apparently missing in IOS<=11.2(8).
1182
1183    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1184    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1185
1186    ping -t 255 224.66.66.66
1187
1188    If nobody answers, mbone does not work.
1189
1190    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1191    ip addr add 10.66.66.<somewhat>/24 dev Universe
1192    ifconfig Universe up
1193    ifconfig Universe add fe80::<Your_real_addr>/10
1194    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1195    ftp 10.66.66.66
1196    ...
1197    ftp fec0:6666:6666::193.233.7.65
1198    ...
1199
1200  */
1201
1202 #ifdef HAVE_NETDEV_HEADER_OPS
1203 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1204                        unsigned short type,
1205                        const void *daddr, const void *saddr, unsigned len)
1206 #else
1207 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
1208                         void *daddr, void *saddr, unsigned len)
1209 #endif
1210 {
1211         struct ip_tunnel *t = netdev_priv(dev);
1212         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1213         __be16 *p = (__be16*)(iph+1);
1214
1215         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1216         p[0]            = t->parms.o_flags;
1217         p[1]            = htons(type);
1218
1219         /*
1220          *      Set the source hardware address.
1221          */
1222
1223         if (saddr)
1224                 memcpy(&iph->saddr, saddr, 4);
1225
1226         if (daddr) {
1227                 memcpy(&iph->daddr, daddr, 4);
1228                 return t->hlen;
1229         }
1230         if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1231                 return t->hlen;
1232
1233         return -t->hlen;
1234 }
1235
1236 #ifdef HAVE_NETDEV_HEADER_OPS
1237 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1238 #else
1239 static int ipgre_header_parse(struct sk_buff *skb, unsigned char *haddr)
1240 #endif
1241 {
1242         struct iphdr *iph = (struct iphdr *) skb_mac_header(skb);
1243         memcpy(haddr, &iph->saddr, 4);
1244         return 4;
1245 }
1246
1247 #ifdef HAVE_NETDEV_HEADER_OPS
1248 static const struct header_ops ipgre_header_ops = {
1249         .create = ipgre_header,
1250         .parse  = ipgre_header_parse,
1251 };
1252 #endif
1253
1254 #ifdef CONFIG_NET_IPGRE_BROADCAST
1255 static int ipgre_open(struct net_device *dev)
1256 {
1257         struct ip_tunnel *t = netdev_priv(dev);
1258
1259         if (ipv4_is_multicast(t->parms.iph.daddr)) {
1260                 struct flowi fl = { .oif = t->parms.link,
1261                                     .nl_u = { .ip4_u =
1262                                               { .daddr = t->parms.iph.daddr,
1263                                                 .saddr = t->parms.iph.saddr,
1264                                                 .tos = RT_TOS(t->parms.iph.tos) } },
1265                                     .proto = IPPROTO_GRE };
1266                 struct rtable *rt;
1267                 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1268                         return -EADDRNOTAVAIL;
1269                 dev = rt->u.dst.dev;
1270                 ip_rt_put(rt);
1271                 if (__in_dev_get_rtnl(dev) == NULL)
1272                         return -EADDRNOTAVAIL;
1273                 t->mlink = dev->ifindex;
1274                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1275         }
1276         return 0;
1277 }
1278
1279 static int ipgre_close(struct net_device *dev)
1280 {
1281         struct ip_tunnel *t = netdev_priv(dev);
1282
1283         if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1284                 struct in_device *in_dev;
1285                 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1286                 if (in_dev) {
1287                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1288                         in_dev_put(in_dev);
1289                 }
1290         }
1291         return 0;
1292 }
1293
1294 #endif
1295
1296 static void ethtool_getinfo(struct net_device *dev,
1297                             struct ethtool_drvinfo *info)
1298 {
1299         strcpy(info->driver, "ip_gre");
1300         strcpy(info->version, "Open vSwitch "VERSION BUILDNR);
1301         strcpy(info->bus_info, dev->type == ARPHRD_ETHER ? "gretap" : "gre");
1302 }
1303
1304 static struct ethtool_ops ethtool_ops = {
1305         .get_drvinfo = ethtool_getinfo,
1306 };
1307
1308 #ifdef HAVE_NET_DEVICE_OPS
1309 static const struct net_device_ops ipgre_netdev_ops = {
1310         .ndo_init               = ipgre_tunnel_init,
1311         .ndo_uninit             = ipgre_tunnel_uninit,
1312 #ifdef CONFIG_NET_IPGRE_BROADCAST
1313         .ndo_open               = ipgre_open,
1314         .ndo_stop               = ipgre_close,
1315 #endif
1316         .ndo_start_xmit         = ipgre_tunnel_xmit,
1317         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1318         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1319 };
1320 #endif
1321
1322 static void ipgre_tunnel_setup(struct net_device *dev)
1323 {
1324 #ifdef HAVE_NET_DEVICE_OPS
1325         dev->netdev_ops         = &ipgre_netdev_ops;
1326 #else
1327         dev->init               = ipgre_tunnel_init;
1328         dev->uninit             = ipgre_tunnel_uninit;
1329         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1330 #ifndef HAVE_NETDEV_STATS
1331         dev->get_stats          = ipgre_tunnel_get_stats;
1332 #endif
1333         dev->do_ioctl           = ipgre_tunnel_ioctl;
1334         dev->change_mtu         = ipgre_tunnel_change_mtu;
1335 #endif /* HAVE_NET_DEVICE_OPS */
1336         dev->destructor         = free_netdev;
1337
1338         dev->type               = ARPHRD_IPGRE;
1339 #ifdef HAVE_NETDEV_NEEDED_HEADROOM
1340         dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1341 #else
1342         dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1343 #endif
1344         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1345         dev->flags              = IFF_NOARP;
1346         dev->iflink             = 0;
1347         dev->addr_len           = 4;
1348         dev->features           |= NETIF_F_NETNS_LOCAL;
1349         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1350
1351         SET_ETHTOOL_OPS(dev, &ethtool_ops);
1352 }
1353
1354 static int ipgre_tunnel_init(struct net_device *dev)
1355 {
1356         struct ip_tunnel *tunnel;
1357         struct iphdr *iph;
1358
1359         tunnel = netdev_priv(dev);
1360         iph = &tunnel->parms.iph;
1361
1362         tunnel->dev = dev;
1363         strcpy(tunnel->parms.name, dev->name);
1364
1365         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1366         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1367
1368         if (iph->daddr) {
1369 #ifdef CONFIG_NET_IPGRE_BROADCAST
1370                 if (ipv4_is_multicast(iph->daddr)) {
1371                         if (!iph->saddr)
1372                                 return -EINVAL;
1373                         dev->flags = IFF_BROADCAST;
1374 #ifdef HAVE_NETDEV_HEADER_OPS
1375                         dev->header_ops = &ipgre_header_ops;
1376 #else
1377                         dev->hard_header = ipgre_header;
1378                         dev->hard_header_parse = ipgre_header_parse;
1379 #endif
1380 #ifndef HAVE_NET_DEVICE_OPS
1381                         dev->open = ipgre_open;
1382                         dev->stop = ipgre_close;
1383 #endif
1384                 }
1385 #endif
1386         } else {
1387 #ifdef HAVE_NETDEV_HEADER_OPS
1388                 dev->header_ops = &ipgre_header_ops;
1389 #else
1390                 dev->hard_header = ipgre_header;
1391                 dev->hard_header_parse = ipgre_header_parse;
1392 #endif
1393         }
1394
1395         return 0;
1396 }
1397
1398 #ifdef HAVE_NET_DEVICE_OPS
1399 static void ipgre_fb_tunnel_init(struct net_device *dev)
1400 #else
1401 static int ipgre_fb_tunnel_init(struct net_device *dev)
1402 #endif
1403 {
1404         struct ip_tunnel *tunnel = netdev_priv(dev);
1405         struct iphdr *iph = &tunnel->parms.iph;
1406         struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
1407
1408         tunnel->dev = dev;
1409         strcpy(tunnel->parms.name, dev->name);
1410
1411         iph->version            = 4;
1412         iph->protocol           = IPPROTO_GRE;
1413         iph->ihl                = 5;
1414         tunnel->hlen            = sizeof(struct iphdr) + 4;
1415
1416         dev_hold(dev);
1417         ign->tunnels_wc[0]      = tunnel;
1418
1419 #ifndef HAVE_NET_DEVICE_OPS
1420         return 0;
1421 #endif
1422 }
1423
1424 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
1425 static struct net_protocol ipgre_protocol = {
1426 #else
1427 static const struct net_protocol ipgre_protocol = {
1428 #endif
1429         .handler        =       ipgre_rcv,
1430         .err_handler    =       ipgre_err,
1431 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
1432         .netns_ok       =       1,
1433 #endif
1434 };
1435
1436 static void ipgre_destroy_tunnels(struct ipgre_net *ign)
1437 {
1438         int prio;
1439
1440         for (prio = 0; prio < 4; prio++) {
1441                 int h;
1442                 for (h = 0; h < HASH_SIZE; h++) {
1443                         struct ip_tunnel *t;
1444                         while ((t = ign->tunnels[prio][h]) != NULL)
1445                                 unregister_netdevice(t->dev);
1446                 }
1447         }
1448 }
1449
1450 static int ipgre_init_net(struct net *net)
1451 {
1452         int err;
1453         struct ipgre_net *ign;
1454
1455         err = -ENOMEM;
1456         ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
1457         if (ign == NULL)
1458                 goto err_alloc;
1459
1460         err = net_assign_generic(net, ipgre_net_id, ign);
1461         if (err < 0)
1462                 goto err_assign;
1463
1464         ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), GRE_IOCTL_DEVICE,
1465                                            ipgre_tunnel_setup);
1466         if (!ign->fb_tunnel_dev) {
1467                 err = -ENOMEM;
1468                 goto err_alloc_dev;
1469         }
1470         dev_net_set(ign->fb_tunnel_dev, net);
1471
1472 #ifdef HAVE_NET_DEVICE_OPS
1473         ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1474 #else
1475         ign->fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1476 #endif
1477 #ifndef GRE_IOCTL_ONLY
1478         ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1479 #endif
1480
1481         if ((err = register_netdev(ign->fb_tunnel_dev)))
1482                 goto err_reg_dev;
1483
1484         return 0;
1485
1486 err_reg_dev:
1487         free_netdev(ign->fb_tunnel_dev);
1488 err_alloc_dev:
1489         /* nothing */
1490 err_assign:
1491         kfree(ign);
1492 err_alloc:
1493         return err;
1494 }
1495
1496 static void ipgre_exit_net(struct net *net)
1497 {
1498         struct ipgre_net *ign;
1499
1500         ign = net_generic(net, ipgre_net_id);
1501         rtnl_lock();
1502         ipgre_destroy_tunnels(ign);
1503         rtnl_unlock();
1504         kfree(ign);
1505 }
1506
1507 static struct pernet_operations ipgre_net_ops = {
1508         .init = ipgre_init_net,
1509         .exit = ipgre_exit_net,
1510 };
1511
1512 static int ipgre_tap_init(struct net_device *dev)
1513 {
1514         struct ip_tunnel *tunnel;
1515
1516         tunnel = netdev_priv(dev);
1517
1518         tunnel->dev = dev;
1519         strcpy(tunnel->parms.name, dev->name);
1520
1521         ipgre_tunnel_bind_dev(dev);
1522
1523         return 0;
1524 }
1525
1526 #ifdef HAVE_NET_DEVICE_OPS
1527 static const struct net_device_ops ipgre_tap_netdev_ops = {
1528         .ndo_init               = ipgre_tap_init,
1529         .ndo_uninit             = ipgre_tunnel_uninit,
1530         .ndo_start_xmit         = ipgre_tunnel_xmit,
1531         .ndo_set_mac_address    = eth_mac_addr,
1532         .ndo_validate_addr      = eth_validate_addr,
1533         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1534         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1535 };
1536 #endif
1537
1538 static void ipgre_tap_setup(struct net_device *dev)
1539 {
1540         ether_setup(dev);
1541
1542 #ifdef HAVE_NET_DEVICE_OPS
1543         dev->netdev_ops         = &ipgre_tap_netdev_ops;
1544 #else
1545         dev->init               = ipgre_tap_init;
1546         dev->uninit             = ipgre_tunnel_uninit;
1547         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1548 #ifndef HAVE_NETDEV_STATS
1549         dev->get_stats          = ipgre_tunnel_get_stats;
1550 #endif
1551         dev->do_ioctl           = ipgre_tunnel_ioctl;
1552         dev->change_mtu         = ipgre_tunnel_change_mtu;
1553 #endif /* HAVE_NET_DEVICE_OPS */
1554         dev->destructor         = free_netdev;
1555
1556         dev->iflink             = 0;
1557         dev->features           |= NETIF_F_NETNS_LOCAL;
1558
1559         SET_ETHTOOL_OPS(dev, &ethtool_ops);
1560 }
1561
1562 #ifndef GRE_IOCTL_ONLY
1563 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1564 {
1565         __be16 flags;
1566
1567         if (!data)
1568                 return 0;
1569
1570         flags = 0;
1571         if (data[IFLA_GRE_IFLAGS])
1572                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1573         if (data[IFLA_GRE_OFLAGS])
1574                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1575         if (flags & (GRE_VERSION|GRE_ROUTING))
1576                 return -EINVAL;
1577
1578         return 0;
1579 }
1580
1581 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1582 {
1583         __be32 daddr;
1584
1585         if (tb[IFLA_ADDRESS]) {
1586                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1587                         return -EINVAL;
1588                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1589                         return -EADDRNOTAVAIL;
1590         }
1591
1592         if (!data)
1593                 goto out;
1594
1595         if (data[IFLA_GRE_REMOTE]) {
1596                 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1597                 if (!daddr)
1598                         return -EINVAL;
1599         }
1600
1601 out:
1602         return ipgre_tunnel_validate(tb, data);
1603 }
1604
1605 static void ipgre_netlink_parms(struct nlattr *data[],
1606                                 struct ip_tunnel_parm *parms)
1607 {
1608         memset(parms, 0, sizeof(*parms));
1609
1610         parms->iph.protocol = IPPROTO_GRE;
1611
1612         if (!data)
1613                 return;
1614
1615         if (data[IFLA_GRE_LINK])
1616                 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1617
1618         if (data[IFLA_GRE_IFLAGS])
1619                 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1620
1621         if (data[IFLA_GRE_OFLAGS])
1622                 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1623
1624         if (data[IFLA_GRE_IKEY])
1625                 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1626
1627         if (data[IFLA_GRE_OKEY])
1628                 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1629
1630         if (data[IFLA_GRE_LOCAL])
1631                 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1632
1633         if (data[IFLA_GRE_REMOTE])
1634                 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1635
1636         if (data[IFLA_GRE_TTL])
1637                 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1638
1639         if (data[IFLA_GRE_TOS])
1640                 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1641
1642         if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1643                 parms->iph.frag_off = htons(IP_DF);
1644 }
1645
1646 static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[],
1647                          struct nlattr *data[])
1648 {
1649         struct ip_tunnel *nt;
1650         struct net *net = dev_net(dev);
1651         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1652         int mtu;
1653         int err;
1654
1655         nt = netdev_priv(dev);
1656         ipgre_netlink_parms(data, &nt->parms);
1657
1658         if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1659                 return -EEXIST;
1660
1661         if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1662                 random_ether_addr(dev->dev_addr);
1663
1664         mtu = ipgre_tunnel_bind_dev(dev);
1665         if (!tb[IFLA_MTU])
1666                 dev->mtu = mtu;
1667
1668         err = register_netdevice(dev);
1669         if (err)
1670                 goto out;
1671
1672         dev_hold(dev);
1673         ipgre_tunnel_link(ign, nt);
1674
1675 out:
1676         return err;
1677 }
1678
1679 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1680                             struct nlattr *data[])
1681 {
1682         struct ip_tunnel *t, *nt;
1683         struct net *net = dev_net(dev);
1684         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1685         struct ip_tunnel_parm p;
1686         int mtu;
1687
1688         if (dev == ign->fb_tunnel_dev)
1689                 return -EINVAL;
1690
1691         nt = netdev_priv(dev);
1692         ipgre_netlink_parms(data, &p);
1693
1694         t = ipgre_tunnel_locate(net, &p, false, 0);
1695
1696         if (t) {
1697                 if (t->dev != dev)
1698                         return -EEXIST;
1699         } else {
1700                 t = nt;
1701
1702                 if (dev->type != ARPHRD_ETHER) {
1703                         unsigned nflags = 0;
1704
1705                         if (ipv4_is_multicast(p.iph.daddr))
1706                                 nflags = IFF_BROADCAST;
1707                         else if (p.iph.daddr)
1708                                 nflags = IFF_POINTOPOINT;
1709
1710                         if ((dev->flags ^ nflags) &
1711                             (IFF_POINTOPOINT | IFF_BROADCAST))
1712                                 return -EINVAL;
1713                 }
1714
1715                 ipgre_tunnel_unlink(ign, t);
1716                 t->parms.iph.saddr = p.iph.saddr;
1717                 t->parms.iph.daddr = p.iph.daddr;
1718                 t->parms.i_key = p.i_key;
1719                 if (dev->type != ARPHRD_ETHER) {
1720                         memcpy(dev->dev_addr, &p.iph.saddr, 4);
1721                         memcpy(dev->broadcast, &p.iph.daddr, 4);
1722                 }
1723                 ipgre_tunnel_link(ign, t);
1724                 netdev_state_change(dev);
1725         }
1726
1727         t->parms.o_key = p.o_key;
1728         t->parms.iph.ttl = p.iph.ttl;
1729         t->parms.iph.tos = p.iph.tos;
1730         t->parms.iph.frag_off = p.iph.frag_off;
1731
1732         if (t->parms.link != p.link) {
1733                 t->parms.link = p.link;
1734                 mtu = ipgre_tunnel_bind_dev(dev);
1735                 if (!tb[IFLA_MTU])
1736                         dev->mtu = mtu;
1737                 netdev_state_change(dev);
1738         }
1739
1740         return 0;
1741 }
1742
1743 static size_t ipgre_get_size(const struct net_device *dev)
1744 {
1745         return
1746                 /* IFLA_GRE_LINK */
1747                 nla_total_size(4) +
1748                 /* IFLA_GRE_IFLAGS */
1749                 nla_total_size(2) +
1750                 /* IFLA_GRE_OFLAGS */
1751                 nla_total_size(2) +
1752                 /* IFLA_GRE_IKEY */
1753                 nla_total_size(4) +
1754                 /* IFLA_GRE_OKEY */
1755                 nla_total_size(4) +
1756                 /* IFLA_GRE_LOCAL */
1757                 nla_total_size(4) +
1758                 /* IFLA_GRE_REMOTE */
1759                 nla_total_size(4) +
1760                 /* IFLA_GRE_TTL */
1761                 nla_total_size(1) +
1762                 /* IFLA_GRE_TOS */
1763                 nla_total_size(1) +
1764                 /* IFLA_GRE_PMTUDISC */
1765                 nla_total_size(1) +
1766                 0;
1767 }
1768
1769 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1770 {
1771         struct ip_tunnel *t = netdev_priv(dev);
1772         struct ip_tunnel_parm *p = &t->parms;
1773
1774         NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1775         NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1776         NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1777         NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1778         NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1779         NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1780         NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1781         NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1782         NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1783         NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1784
1785         return 0;
1786
1787 nla_put_failure:
1788         return -EMSGSIZE;
1789 }
1790
1791 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1792         [IFLA_GRE_LINK]         = { .type = NLA_U32 },
1793         [IFLA_GRE_IFLAGS]       = { .type = NLA_U16 },
1794         [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
1795         [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
1796         [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
1797         [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1798         [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1799         [IFLA_GRE_TTL]          = { .type = NLA_U8 },
1800         [IFLA_GRE_TOS]          = { .type = NLA_U8 },
1801         [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
1802 };
1803
1804 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1805         .kind           = "gre",
1806         .maxtype        = IFLA_GRE_MAX,
1807         .policy         = ipgre_policy,
1808         .priv_size      = sizeof(struct ip_tunnel),
1809         .setup          = ipgre_tunnel_setup,
1810         .validate       = ipgre_tunnel_validate,
1811         .newlink        = ipgre_newlink,
1812         .changelink     = ipgre_changelink,
1813         .get_size       = ipgre_get_size,
1814         .fill_info      = ipgre_fill_info,
1815 };
1816
1817 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1818         .kind           = "gretap",
1819         .maxtype        = IFLA_GRE_MAX,
1820         .policy         = ipgre_policy,
1821         .priv_size      = sizeof(struct ip_tunnel),
1822         .setup          = ipgre_tap_setup,
1823         .validate       = ipgre_tap_validate,
1824         .newlink        = ipgre_newlink,
1825         .changelink     = ipgre_changelink,
1826         .get_size       = ipgre_get_size,
1827         .fill_info      = ipgre_fill_info,
1828 };
1829 #endif
1830
1831 /*
1832  *      And now the modules code and kernel interface.
1833  */
1834
1835 static int __init ipgre_init(void)
1836 {
1837         int err;
1838
1839         printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1840
1841         if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1842                 printk(KERN_INFO "ipgre init: can't add protocol\n");
1843                 return -EAGAIN;
1844         }
1845
1846         err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
1847         if (err < 0)
1848                 goto gen_device_failed;
1849
1850 #ifndef GRE_IOCTL_ONLY
1851         err = rtnl_link_register(&ipgre_link_ops);
1852         if (err < 0)
1853                 goto rtnl_link_failed;
1854
1855         err = rtnl_link_register(&ipgre_tap_ops);
1856         if (err < 0)
1857                 goto tap_ops_failed;
1858 #endif
1859
1860 out:
1861         return err;
1862
1863 #ifndef GRE_IOCTL_ONLY
1864 tap_ops_failed:
1865         rtnl_link_unregister(&ipgre_link_ops);
1866 rtnl_link_failed:
1867         unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
1868 #endif
1869 gen_device_failed:
1870         inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1871         goto out;
1872
1873 }
1874
1875 static void __exit ipgre_fini(void)
1876 {
1877 #ifndef GRE_IOCTL_ONLY
1878         rtnl_link_unregister(&ipgre_tap_ops);
1879         rtnl_link_unregister(&ipgre_link_ops);
1880 #endif
1881         unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
1882         if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1883                 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1884 }
1885
1886 module_init(ipgre_init);
1887 module_exit(ipgre_fini);
1888 MODULE_DESCRIPTION("GRE over IPv4 tunneling driver");
1889 MODULE_LICENSE("GPL");
1890 #ifndef GRE_IOCTL_ONLY
1891 MODULE_ALIAS_RTNL_LINK("gre");
1892 MODULE_ALIAS_RTNL_LINK("gretap");
1893 #endif
1894