2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <asm/uaccess.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/if_arp.h>
25 #include <linux/mroute.h>
26 #include <linux/init.h>
27 #include <linux/in6.h>
28 #include <linux/inetdevice.h>
29 #include <linux/igmp.h>
30 #include <linux/netfilter_ipv4.h>
35 #include <net/protocol.h>
38 #include <net/checksum.h>
39 #include <net/inet_ecn.h>
44 #include <net/ip6_fib.h>
45 #include <net/ip6_route.h>
52 1. The most important issue is detecting local dead loops.
53 They would cause complete host lockup in transmit, which
54 would be "resolved" by stack overflow or, if queueing is enabled,
55 with infinite looping in net_bh.
57 We cannot track such dead loops during route installation,
58 it is infeasible task. The most general solutions would be
59 to keep skb->encapsulation counter (sort of local ttl),
60 and silently drop packet when it expires. It is the best
61 solution, but it supposes maintaing new variable in ALL
62 skb, even if no tunneling is used.
64 Current solution: t->recursion lock breaks dead loops. It looks
65 like dev->tbusy flag, but I preferred new variable, because
66 the semantics is different. One day, when hard_start_xmit
67 will be multithreaded we will have to use skb->encapsulation.
71 2. Networking dead loops would not kill routers, but would really
72 kill network. IP hop limit plays role of "t->recursion" in this case,
73 if we copy it from packet being encapsulated to upper header.
74 It is very good solution, but it introduces two problems:
76 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
77 do not work over tunnels.
78 - traceroute does not work. I planned to relay ICMP from tunnel,
79 so that this problem would be solved and traceroute output
80 would even more informative. This idea appeared to be wrong:
81 only Linux complies to rfc1812 now (yes, guys, Linux is the only
82 true router now :-)), all routers (at least, in neighbourhood of mine)
83 return only 8 bytes of payload. It is the end.
85 Hence, if we want that OSPF worked or traceroute said something reasonable,
86 we should search for another solution.
88 One of them is to parse packet trying to detect inner encapsulation
89 made by our node. It is difficult or even impossible, especially,
90 taking into account fragmentation. TO be short, tt is not solution at all.
92 Current solution: The solution was UNEXPECTEDLY SIMPLE.
93 We force DF flag on tunnels with preconfigured hop limit,
94 that is ALL. :-) Well, it does not remove the problem completely,
95 but exponential growth of network traffic is changed to linear
96 (branches, that exceed pmtu are pruned) and tunnel mtu
97 fastly degrades to value <68, where looping stops.
98 Yes, it is not good if there exists a router in the loop,
99 which does not force DF, even when encapsulating packets have DF set.
100 But it is not our problem! Nobody could accuse us, we made
101 all that we could make. Even if it is your gated who injected
102 fatal route to network, even if it were you who configured
103 fatal static route: you are innocent. :-)
107 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
108 practically identical code. It would be good to glue them
109 together, but it is not very evident, how to make them modular.
110 sit is integral part of IPv6, ipip and gre are naturally modular.
111 We could extract common parts (hash table, ioctl etc)
112 to a separate module (ip_tunnel.c).
117 static int ipgre_tunnel_init(struct net_device *dev);
118 static void ipgre_tunnel_setup(struct net_device *dev);
120 /* Fallback tunnel: no source, no destination, no key, no options */
122 static int ipgre_fb_tunnel_init(struct net_device *dev);
124 static struct net_device *ipgre_fb_tunnel_dev;
126 /* Tunnel hash table */
136 We require exact key match i.e. if a key is present in packet
137 it will match only tunnel with the same key; if it is not present,
138 it will match only keyless tunnel.
140 All keysless packets, if not matched configured keyless tunnels
141 will match fallback tunnel.
145 #define HASH(addr) ((addr^(addr>>4))&0xF)
147 static struct ip_tunnel *tunnels[4][HASH_SIZE];
149 #define tunnels_r_l (tunnels[3])
150 #define tunnels_r (tunnels[2])
151 #define tunnels_l (tunnels[1])
152 #define tunnels_wc (tunnels[0])
154 static rwlock_t ipgre_lock = RW_LOCK_UNLOCKED;
156 /* Given src, dst and key, find appropriate for input tunnel. */
158 static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
160 unsigned h0 = HASH(remote);
161 unsigned h1 = HASH(key);
164 for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
165 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
166 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
170 for (t = tunnels_r[h0^h1]; t; t = t->next) {
171 if (remote == t->parms.iph.daddr) {
172 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
176 for (t = tunnels_l[h1]; t; t = t->next) {
177 if (local == t->parms.iph.saddr ||
178 (local == t->parms.iph.daddr && MULTICAST(local))) {
179 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
183 for (t = tunnels_wc[h1]; t; t = t->next) {
184 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
188 if (ipgre_fb_tunnel_dev->flags&IFF_UP)
189 return ipgre_fb_tunnel_dev->priv;
193 static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
195 u32 remote = t->parms.iph.daddr;
196 u32 local = t->parms.iph.saddr;
197 u32 key = t->parms.i_key;
198 unsigned h = HASH(key);
203 if (remote && !MULTICAST(remote)) {
208 return &tunnels[prio][h];
211 static void ipgre_tunnel_link(struct ip_tunnel *t)
213 struct ip_tunnel **tp = ipgre_bucket(t);
216 write_lock_bh(&ipgre_lock);
218 write_unlock_bh(&ipgre_lock);
221 static void ipgre_tunnel_unlink(struct ip_tunnel *t)
223 struct ip_tunnel **tp;
225 for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
227 write_lock_bh(&ipgre_lock);
229 write_unlock_bh(&ipgre_lock);
235 static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
237 u32 remote = parms->iph.daddr;
238 u32 local = parms->iph.saddr;
239 u32 key = parms->i_key;
240 struct ip_tunnel *t, **tp, *nt;
241 struct net_device *dev;
242 unsigned h = HASH(key);
248 if (remote && !MULTICAST(remote)) {
252 for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
253 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
254 if (key == t->parms.i_key)
262 strlcpy(name, parms->name, IFNAMSIZ);
265 for (i=1; i<100; i++) {
266 sprintf(name, "gre%d", i);
267 if (__dev_get_by_name(name) == NULL)
274 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
278 dev->init = ipgre_tunnel_init;
282 if (register_netdevice(dev) < 0) {
291 ipgre_tunnel_link(nt);
292 /* Do not decrement MOD_USE_COUNT here. */
299 static void ipgre_tunnel_uninit(struct net_device *dev)
301 ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv);
306 void ipgre_err(struct sk_buff *skb, u32 info)
308 #ifndef I_WISH_WORLD_WERE_PERFECT
310 /* It is not :-( All the routers (except for Linux) return only
311 8 bytes of packet payload. It means, that precise relaying of
312 ICMP in the real Internet is absolutely infeasible.
314 Moreover, Cisco "wise men" put GRE key to the third word
315 in GRE header. It makes impossible maintaining even soft state for keyed
316 GRE tunnels with enabled checksum. Tell them "thank you".
318 Well, I wonder, rfc1812 was written by Cisco employee,
319 what the hell these idiots break standrads established
323 struct iphdr *iph = (struct iphdr*)skb->data;
324 u16 *p = (u16*)(skb->data+(iph->ihl<<2));
325 int grehlen = (iph->ihl<<2) + 4;
326 int type = skb->h.icmph->type;
327 int code = skb->h.icmph->code;
332 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
333 if (flags&(GRE_VERSION|GRE_ROUTING))
342 /* If only 8 bytes returned, keyed message will be dropped here */
343 if (skb_headlen(skb) < grehlen)
348 case ICMP_PARAMETERPROB:
351 case ICMP_DEST_UNREACH:
354 case ICMP_PORT_UNREACH:
355 /* Impossible event. */
357 case ICMP_FRAG_NEEDED:
358 /* Soft state for pmtu is maintained by IP core. */
361 /* All others are translated to HOST_UNREACH.
362 rfc2003 contains "deep thoughts" about NET_UNREACH,
363 I believe they are just ether pollution. --ANK
368 case ICMP_TIME_EXCEEDED:
369 if (code != ICMP_EXC_TTL)
374 read_lock(&ipgre_lock);
375 t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((u32*)p) + (grehlen>>2) - 1) : 0);
376 if (t == NULL || t->parms.iph.daddr == 0 || MULTICAST(t->parms.iph.daddr))
379 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
382 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
386 t->err_time = jiffies;
388 read_unlock(&ipgre_lock);
391 struct iphdr *iph = (struct iphdr*)dp;
393 u16 *p = (u16*)(dp+(iph->ihl<<2));
394 int type = skb->h.icmph->type;
395 int code = skb->h.icmph->code;
400 int grehlen = (iph->ihl<<2) + 4;
401 struct sk_buff *skb2;
405 if (p[1] != htons(ETH_P_IP))
409 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
410 if (flags&(GRE_VERSION|GRE_ROUTING))
419 if (len < grehlen + sizeof(struct iphdr))
421 eiph = (struct iphdr*)(dp + grehlen);
426 case ICMP_PARAMETERPROB:
427 if (skb->h.icmph->un.gateway < (iph->ihl<<2))
430 /* So... This guy found something strange INSIDE encapsulated
431 packet. Well, he is fool, but what can we do ?
433 rel_type = ICMP_PARAMETERPROB;
434 rel_info = skb->h.icmph->un.gateway - grehlen;
437 case ICMP_DEST_UNREACH:
440 case ICMP_PORT_UNREACH:
441 /* Impossible event. */
443 case ICMP_FRAG_NEEDED:
444 /* And it is the only really necessary thing :-) */
445 rel_info = ntohs(skb->h.icmph->un.frag.mtu);
446 if (rel_info < grehlen+68)
449 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
450 if (rel_info > ntohs(eiph->tot_len))
454 /* All others are translated to HOST_UNREACH.
455 rfc2003 contains "deep thoughts" about NET_UNREACH,
456 I believe, it is just ether pollution. --ANK
458 rel_type = ICMP_DEST_UNREACH;
459 rel_code = ICMP_HOST_UNREACH;
463 case ICMP_TIME_EXCEEDED:
464 if (code != ICMP_EXC_TTL)
469 /* Prepare fake skb to feed it to icmp_send */
470 skb2 = skb_clone(skb, GFP_ATOMIC);
473 dst_release(skb2->dst);
475 skb_pull(skb2, skb->data - (u8*)eiph);
476 skb2->nh.raw = skb2->data;
478 /* Try to guess incoming interface */
479 memset(&fl, 0, sizeof(fl));
480 fl.fl4_dst = eiph->saddr;
481 fl.fl4_tos = RT_TOS(eiph->tos);
482 fl.proto = IPPROTO_GRE;
483 if (ip_route_output_key(&rt, &fl)) {
487 skb2->dev = rt->u.dst.dev;
489 /* route "incoming" packet */
490 if (rt->rt_flags&RTCF_LOCAL) {
493 fl.fl4_dst = eiph->daddr;
494 fl.fl4_src = eiph->saddr;
495 fl.fl4_tos = eiph->tos;
496 if (ip_route_output_key(&rt, &fl) ||
497 rt->u.dst.dev->type != ARPHRD_IPGRE) {
504 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
505 skb2->dst->dev->type != ARPHRD_IPGRE) {
511 /* change mtu on this route */
512 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
513 if (rel_info > dst_pmtu(skb2->dst)) {
517 skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
518 rel_info = htonl(rel_info);
519 } else if (type == ICMP_TIME_EXCEEDED) {
520 struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
521 if (t->parms.iph.ttl) {
522 rel_type = ICMP_DEST_UNREACH;
523 rel_code = ICMP_HOST_UNREACH;
527 icmp_send(skb2, rel_type, rel_code, rel_info);
532 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
534 if (INET_ECN_is_ce(iph->tos)) {
535 if (skb->protocol == htons(ETH_P_IP)) {
536 if (INET_ECN_is_not_ce(skb->nh.iph->tos))
537 IP_ECN_set_ce(skb->nh.iph);
538 } else if (skb->protocol == htons(ETH_P_IPV6)) {
539 if (INET_ECN_is_not_ce(ip6_get_dsfield(skb->nh.ipv6h)))
540 IP6_ECN_set_ce(skb->nh.ipv6h);
546 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
549 if (skb->protocol == htons(ETH_P_IP))
550 inner = old_iph->tos;
551 else if (skb->protocol == htons(ETH_P_IPV6))
552 inner = ip6_get_dsfield((struct ipv6hdr*)old_iph);
553 return INET_ECN_encapsulate(tos, inner);
556 int ipgre_rcv(struct sk_buff *skb)
564 struct ip_tunnel *tunnel;
567 if (!pskb_may_pull(skb, 16))
574 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
575 /* - Version must be 0.
576 - We do not support routing headers.
578 if (flags&(GRE_VERSION|GRE_ROUTING))
581 if (flags&GRE_CSUM) {
582 if (skb->ip_summed == CHECKSUM_HW) {
583 csum = (u16)csum_fold(skb->csum);
585 skb->ip_summed = CHECKSUM_NONE;
587 if (skb->ip_summed == CHECKSUM_NONE) {
588 skb->csum = skb_checksum(skb, 0, skb->len, 0);
589 skb->ip_summed = CHECKSUM_HW;
590 csum = (u16)csum_fold(skb->csum);
595 key = *(u32*)(h + offset);
599 seqno = ntohl(*(u32*)(h + offset));
604 read_lock(&ipgre_lock);
605 if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
608 skb->mac.raw = skb->nh.raw;
609 skb->nh.raw = __pskb_pull(skb, offset);
610 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
611 if (skb->ip_summed == CHECKSUM_HW)
612 skb->csum = csum_sub(skb->csum,
613 csum_partial(skb->mac.raw, skb->nh.raw-skb->mac.raw, 0));
614 skb->protocol = *(u16*)(h + 2);
615 skb->pkt_type = PACKET_HOST;
616 #ifdef CONFIG_NET_IPGRE_BROADCAST
617 if (MULTICAST(iph->daddr)) {
618 /* Looped back packet, drop it! */
619 if (((struct rtable*)skb->dst)->fl.iif == 0)
621 tunnel->stat.multicast++;
622 skb->pkt_type = PACKET_BROADCAST;
626 if (((flags&GRE_CSUM) && csum) ||
627 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
628 tunnel->stat.rx_crc_errors++;
629 tunnel->stat.rx_errors++;
632 if (tunnel->parms.i_flags&GRE_SEQ) {
633 if (!(flags&GRE_SEQ) ||
634 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
635 tunnel->stat.rx_fifo_errors++;
636 tunnel->stat.rx_errors++;
639 tunnel->i_seqno = seqno + 1;
641 tunnel->stat.rx_packets++;
642 tunnel->stat.rx_bytes += skb->len;
643 skb->dev = tunnel->dev;
644 dst_release(skb->dst);
647 ipgre_ecn_decapsulate(iph, skb);
649 read_unlock(&ipgre_lock);
652 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
655 read_unlock(&ipgre_lock);
661 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
663 struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
664 struct net_device_stats *stats = &tunnel->stat;
665 struct iphdr *old_iph = skb->nh.iph;
669 struct rtable *rt; /* Route to the other host */
670 struct net_device *tdev; /* Device to other host */
671 struct iphdr *iph; /* Our new IP header */
672 int max_headroom; /* The extra header space needed */
677 if (tunnel->recursion++) {
678 tunnel->stat.collisions++;
682 if (dev->hard_header) {
684 tiph = (struct iphdr*)skb->data;
686 gre_hlen = tunnel->hlen;
687 tiph = &tunnel->parms.iph;
690 if ((dst = tiph->daddr) == 0) {
693 if (skb->dst == NULL) {
694 tunnel->stat.tx_fifo_errors++;
698 if (skb->protocol == htons(ETH_P_IP)) {
699 rt = (struct rtable*)skb->dst;
700 if ((dst = rt->rt_gateway) == 0)
704 else if (skb->protocol == htons(ETH_P_IPV6)) {
705 struct in6_addr *addr6;
707 struct neighbour *neigh = skb->dst->neighbour;
712 addr6 = (struct in6_addr*)&neigh->primary_key;
713 addr_type = ipv6_addr_type(addr6);
715 if (addr_type == IPV6_ADDR_ANY) {
716 addr6 = &skb->nh.ipv6h->daddr;
717 addr_type = ipv6_addr_type(addr6);
720 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
723 dst = addr6->s6_addr32[3];
732 if (skb->protocol == htons(ETH_P_IP))
738 struct flowi fl = { .oif = tunnel->parms.link,
741 .saddr = tiph->saddr,
742 .tos = RT_TOS(tos) } },
743 .proto = IPPROTO_GRE };
744 if (ip_route_output_key(&rt, &fl)) {
745 tunnel->stat.tx_carrier_errors++;
749 tdev = rt->u.dst.dev;
753 tunnel->stat.collisions++;
759 mtu = dst_pmtu(&rt->u.dst) - tunnel->hlen;
761 mtu = skb->dst ? dst_pmtu(skb->dst) : dev->mtu;
764 skb->dst->ops->update_pmtu(skb->dst, mtu);
766 if (skb->protocol == htons(ETH_P_IP)) {
767 df |= (old_iph->frag_off&htons(IP_DF));
769 if ((old_iph->frag_off&htons(IP_DF)) &&
770 mtu < ntohs(old_iph->tot_len)) {
771 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
777 else if (skb->protocol == htons(ETH_P_IPV6)) {
778 struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
780 if (rt6 && mtu < dst_pmtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
781 if ((tunnel->parms.iph.daddr && !MULTICAST(tunnel->parms.iph.daddr)) ||
782 rt6->rt6i_dst.plen == 128) {
783 rt6->rt6i_flags |= RTF_MODIFIED;
784 skb->dst->metrics[RTAX_MTU-1] = mtu;
788 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
789 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
796 if (tunnel->err_count > 0) {
797 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
800 dst_link_failure(skb);
802 tunnel->err_count = 0;
805 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
807 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
808 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
817 skb_set_owner_w(new_skb, skb->sk);
820 old_iph = skb->nh.iph;
823 skb->h.raw = skb->nh.raw;
824 skb->nh.raw = skb_push(skb, gre_hlen);
825 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
826 dst_release(skb->dst);
827 skb->dst = &rt->u.dst;
830 * Push down and install the IPIP header.
835 iph->ihl = sizeof(struct iphdr) >> 2;
837 iph->protocol = IPPROTO_GRE;
838 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
839 iph->daddr = rt->rt_dst;
840 iph->saddr = rt->rt_src;
842 if ((iph->ttl = tiph->ttl) == 0) {
843 if (skb->protocol == htons(ETH_P_IP))
844 iph->ttl = old_iph->ttl;
846 else if (skb->protocol == htons(ETH_P_IPV6))
847 iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
850 iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
853 ((u16*)(iph+1))[0] = tunnel->parms.o_flags;
854 ((u16*)(iph+1))[1] = skb->protocol;
856 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
857 u32 *ptr = (u32*)(((u8*)iph) + tunnel->hlen - 4);
859 if (tunnel->parms.o_flags&GRE_SEQ) {
861 *ptr = htonl(tunnel->o_seqno);
864 if (tunnel->parms.o_flags&GRE_KEY) {
865 *ptr = tunnel->parms.o_key;
868 if (tunnel->parms.o_flags&GRE_CSUM) {
870 *(__u16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
881 dst_link_failure(skb);
891 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
894 struct ip_tunnel_parm p;
900 if (dev == ipgre_fb_tunnel_dev) {
901 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
905 t = ipgre_tunnel_locate(&p, 0);
908 t = (struct ip_tunnel*)dev->priv;
909 memcpy(&p, &t->parms, sizeof(p));
910 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
917 if (!capable(CAP_NET_ADMIN))
921 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
925 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
926 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
927 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
930 p.iph.frag_off |= htons(IP_DF);
932 if (!(p.i_flags&GRE_KEY))
934 if (!(p.o_flags&GRE_KEY))
937 t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
939 if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
948 t = (struct ip_tunnel*)dev->priv;
950 if (MULTICAST(p.iph.daddr))
951 nflags = IFF_BROADCAST;
952 else if (p.iph.daddr)
953 nflags = IFF_POINTOPOINT;
955 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
959 ipgre_tunnel_unlink(t);
960 t->parms.iph.saddr = p.iph.saddr;
961 t->parms.iph.daddr = p.iph.daddr;
962 t->parms.i_key = p.i_key;
963 t->parms.o_key = p.o_key;
964 memcpy(dev->dev_addr, &p.iph.saddr, 4);
965 memcpy(dev->broadcast, &p.iph.daddr, 4);
966 ipgre_tunnel_link(t);
967 netdev_state_change(dev);
973 if (cmd == SIOCCHGTUNNEL) {
974 t->parms.iph.ttl = p.iph.ttl;
975 t->parms.iph.tos = p.iph.tos;
976 t->parms.iph.frag_off = p.iph.frag_off;
978 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
981 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
986 if (!capable(CAP_NET_ADMIN))
989 if (dev == ipgre_fb_tunnel_dev) {
991 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
994 if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
997 if (t == ipgre_fb_tunnel_dev->priv)
1001 err = unregister_netdevice(dev);
1012 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1014 return &(((struct ip_tunnel*)dev->priv)->stat);
1017 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1019 struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
1020 if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
1026 #ifdef CONFIG_NET_IPGRE_BROADCAST
1027 /* Nice toy. Unfortunately, useless in real life :-)
1028 It allows to construct virtual multiprotocol broadcast "LAN"
1029 over the Internet, provided multicast routing is tuned.
1032 I have no idea was this bicycle invented before me,
1033 so that I had to set ARPHRD_IPGRE to a random value.
1034 I have an impression, that Cisco could make something similar,
1035 but this feature is apparently missing in IOS<=11.2(8).
1037 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1038 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1040 ping -t 255 224.66.66.66
1042 If nobody answers, mbone does not work.
1044 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1045 ip addr add 10.66.66.<somewhat>/24 dev Universe
1046 ifconfig Universe up
1047 ifconfig Universe add fe80::<Your_real_addr>/10
1048 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1051 ftp fec0:6666:6666::193.233.7.65
1056 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
1057 void *daddr, void *saddr, unsigned len)
1059 struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
1060 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1061 u16 *p = (u16*)(iph+1);
1063 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1064 p[0] = t->parms.o_flags;
1068 * Set the source hardware address.
1072 memcpy(&iph->saddr, saddr, 4);
1075 memcpy(&iph->daddr, daddr, 4);
1078 if (iph->daddr && !MULTICAST(iph->daddr))
1084 static int ipgre_open(struct net_device *dev)
1086 struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
1088 if (MULTICAST(t->parms.iph.daddr)) {
1089 struct flowi fl = { .oif = t->parms.link,
1091 { .daddr = t->parms.iph.daddr,
1092 .saddr = t->parms.iph.saddr,
1093 .tos = RT_TOS(t->parms.iph.tos) } },
1094 .proto = IPPROTO_GRE };
1096 if (ip_route_output_key(&rt, &fl))
1097 return -EADDRNOTAVAIL;
1098 dev = rt->u.dst.dev;
1100 if (__in_dev_get(dev) == NULL)
1101 return -EADDRNOTAVAIL;
1102 t->mlink = dev->ifindex;
1103 ip_mc_inc_group(__in_dev_get(dev), t->parms.iph.daddr);
1108 static int ipgre_close(struct net_device *dev)
1110 struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
1111 if (MULTICAST(t->parms.iph.daddr) && t->mlink) {
1112 struct in_device *in_dev = inetdev_by_index(t->mlink);
1114 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1123 static void ipgre_tunnel_setup(struct net_device *dev)
1125 SET_MODULE_OWNER(dev);
1126 dev->uninit = ipgre_tunnel_uninit;
1127 dev->destructor = free_netdev;
1128 dev->hard_start_xmit = ipgre_tunnel_xmit;
1129 dev->get_stats = ipgre_tunnel_get_stats;
1130 dev->do_ioctl = ipgre_tunnel_ioctl;
1131 dev->change_mtu = ipgre_tunnel_change_mtu;
1133 dev->type = ARPHRD_IPGRE;
1134 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1135 dev->mtu = 1500 - sizeof(struct iphdr) - 4;
1136 dev->flags = IFF_NOARP;
1141 static int ipgre_tunnel_init(struct net_device *dev)
1143 struct net_device *tdev = NULL;
1144 struct ip_tunnel *tunnel;
1146 int hlen = LL_MAX_HEADER;
1148 int addend = sizeof(struct iphdr) + 4;
1150 tunnel = (struct ip_tunnel*)dev->priv;
1151 iph = &tunnel->parms.iph;
1154 strcpy(tunnel->parms.name, dev->name);
1156 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1157 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1159 /* Guess output device to choose reasonable mtu and hard_header_len */
1162 struct flowi fl = { .oif = tunnel->parms.link,
1164 { .daddr = iph->daddr,
1165 .saddr = iph->saddr,
1166 .tos = RT_TOS(iph->tos) } },
1167 .proto = IPPROTO_GRE };
1169 if (!ip_route_output_key(&rt, &fl)) {
1170 tdev = rt->u.dst.dev;
1174 dev->flags |= IFF_POINTOPOINT;
1176 #ifdef CONFIG_NET_IPGRE_BROADCAST
1177 if (MULTICAST(iph->daddr)) {
1180 dev->flags = IFF_BROADCAST;
1181 dev->hard_header = ipgre_header;
1182 dev->open = ipgre_open;
1183 dev->stop = ipgre_close;
1188 if (!tdev && tunnel->parms.link)
1189 tdev = __dev_get_by_index(tunnel->parms.link);
1192 hlen = tdev->hard_header_len;
1195 dev->iflink = tunnel->parms.link;
1197 /* Precalculate GRE options length */
1198 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1199 if (tunnel->parms.o_flags&GRE_CSUM)
1201 if (tunnel->parms.o_flags&GRE_KEY)
1203 if (tunnel->parms.o_flags&GRE_SEQ)
1206 dev->hard_header_len = hlen + addend;
1207 dev->mtu = mtu - addend;
1208 tunnel->hlen = addend;
1212 int __init ipgre_fb_tunnel_init(struct net_device *dev)
1214 struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
1215 struct iphdr *iph = &tunnel->parms.iph;
1218 strcpy(tunnel->parms.name, dev->name);
1221 iph->protocol = IPPROTO_GRE;
1223 tunnel->hlen = sizeof(struct iphdr) + 4;
1226 tunnels_wc[0] = tunnel;
1231 static struct net_protocol ipgre_protocol = {
1232 .handler = ipgre_rcv,
1233 .err_handler = ipgre_err,
1238 * And now the modules code and kernel interface.
1241 static int __init ipgre_init(void)
1245 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1247 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1248 printk(KERN_INFO "ipgre init: can't add protocol\n");
1252 ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1253 ipgre_tunnel_setup);
1254 if (!ipgre_fb_tunnel_dev) {
1259 ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1261 if ((err = register_netdev(ipgre_fb_tunnel_dev)))
1266 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1267 free_netdev(ipgre_fb_tunnel_dev);
1271 void ipgre_fini(void)
1273 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1274 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1276 unregister_netdev(ipgre_fb_tunnel_dev);
1279 module_init(ipgre_init);
1280 module_exit(ipgre_fini);
1281 MODULE_LICENSE("GPL");