1 diff -Nurb linux-2.6.27-660/drivers/net/Kconfig linux-2.6.27-700/drivers/net/Kconfig
2 --- linux-2.6.27-660/drivers/net/Kconfig 2009-04-16 10:27:01.000000000 -0400
3 +++ linux-2.6.27-700/drivers/net/Kconfig 2009-04-16 10:27:39.000000000 -0400
6 Look at the iproute2 documentation directory for usage etc
9 + tristate "EGRE module for Ethernet over GRE Tunnels"
12 tristate "Dummy net driver support"
14 diff -Nurb linux-2.6.27-660/drivers/net/Makefile linux-2.6.27-700/drivers/net/Makefile
15 --- linux-2.6.27-660/drivers/net/Makefile 2008-10-09 18:13:53.000000000 -0400
16 +++ linux-2.6.27-700/drivers/net/Makefile 2009-04-16 10:27:39.000000000 -0400
18 # Makefile for the Linux network (ethercard) device drivers.
21 +obj-$(CONFIG_EGRE) += gre.o
22 obj-$(CONFIG_E1000) += e1000/
23 obj-$(CONFIG_E1000E) += e1000e/
24 obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
25 diff -Nurb linux-2.6.27-660/drivers/net/gre.c linux-2.6.27-700/drivers/net/gre.c
26 --- linux-2.6.27-660/drivers/net/gre.c 1969-12-31 19:00:00.000000000 -0500
27 +++ linux-2.6.27-700/drivers/net/gre.c 2009-04-16 12:48:33.000000000 -0400
30 + * Linux NET3: GRE over IP protocol decoder.
32 + * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
34 + * This program is free software; you can redistribute it and/or
35 + * modify it under the terms of the GNU General Public License
36 + * as published by the Free Software Foundation; either version
37 + * 2 of the License, or (at your option) any later version.
41 +#include <linux/capability.h>
42 +#include <linux/module.h>
43 +#include <linux/types.h>
44 +#include <linux/sched.h>
45 +#include <linux/kernel.h>
46 +#include <asm/uaccess.h>
47 +#include <linux/skbuff.h>
48 +#include <linux/netdevice.h>
49 +#include <linux/in.h>
50 +#include <linux/tcp.h>
51 +#include <linux/udp.h>
52 +#include <linux/if_arp.h>
53 +#include <linux/mroute.h>
54 +#include <linux/init.h>
55 +#include <linux/in6.h>
56 +#include <linux/inetdevice.h>
57 +#include <linux/etherdevice.h> /**XXX added XXX */
58 +#include <linux/igmp.h>
59 +#include <linux/netfilter_ipv4.h>
60 +#include <linux/if_ether.h>
62 +#include <net/sock.h>
64 +#include <net/icmp.h>
65 +#include <net/protocol.h>
66 +#include <net/ipip.h>
68 +#include <net/checksum.h>
69 +#include <net/dsfield.h>
70 +#include <net/inet_ecn.h>
71 +#include <net/xfrm.h>
74 +#include <net/ipv6.h>
75 +#include <net/ip6_fib.h>
76 +#include <net/ip6_route.h>
79 +#define ipv4_is_multicast(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
81 +//#define GRE_DEBUG 1
84 + Problems & solutions
85 + --------------------
87 + 1. The most important issue is detecting local dead loops.
88 + They would cause complete host lockup in transmit, which
89 + would be "resolved" by stack overflow or, if queueing is enabled,
90 + with infinite looping in net_bh.
92 + We cannot track such dead loops during route installation,
93 + it is infeasible task. The most general solutions would be
94 + to keep skb->encapsulation counter (sort of local ttl),
95 + and silently drop packet when it expires. It is the best
96 + solution, but it supposes maintaing new variable in ALL
97 + skb, even if no tunneling is used.
99 + Current solution: t->recursion lock breaks dead loops. It looks
100 + like dev->tbusy flag, but I preferred new variable, because
101 + the semantics is different. One day, when hard_start_xmit
102 + will be multithreaded we will have to use skb->encapsulation.
106 + 2. Networking dead loops would not kill routers, but would really
107 + kill network. IP hop limit plays role of "t->recursion" in this case,
108 + if we copy it from packet being encapsulated to upper header.
109 + It is very good solution, but it introduces two problems:
111 + - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
112 + do not work over tunnels.
113 + - traceroute does not work. I planned to relay ICMP from tunnel,
114 + so that this problem would be solved and traceroute output
115 + would even more informative. This idea appeared to be wrong:
116 + only Linux complies to rfc1812 now (yes, guys, Linux is the only
117 + true router now :-)), all routers (at least, in neighbourhood of mine)
118 + return only 8 bytes of payload. It is the end.
120 + Hence, if we want that OSPF worked or traceroute said something reasonable,
121 + we should search for another solution.
123 + One of them is to parse packet trying to detect inner encapsulation
124 + made by our node. It is difficult or even impossible, especially,
125 + taking into account fragmentation. TO be short, tt is not solution at all.
127 + Current solution: The solution was UNEXPECTEDLY SIMPLE.
128 + We force DF flag on tunnels with preconfigured hop limit,
129 + that is ALL. :-) Well, it does not remove the problem completely,
130 + but exponential growth of network traffic is changed to linear
131 + (branches, that exceed pmtu are pruned) and tunnel mtu
132 + fastly degrades to value <68, where looping stops.
133 + Yes, it is not good if there exists a router in the loop,
134 + which does not force DF, even when encapsulating packets have DF set.
135 + But it is not our problem! Nobody could accuse us, we made
136 + all that we could make. Even if it is your gated who injected
137 + fatal route to network, even if it were you who configured
138 + fatal static route: you are innocent. :-)
142 + 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
143 + practically identical code. It would be good to glue them
144 + together, but it is not very evident, how to make them modular.
145 + sit is integral part of IPv6, ipip and gre are naturally modular.
146 + We could extract common parts (hash table, ioctl etc)
147 + to a separate module (ip_tunnel.c).
152 +static int ipgre_tunnel_init(struct net_device *dev);
153 +static void ipgre_ip_tunnel_setup(struct net_device *dev);
154 +static void ipgre_eth_tunnel_setup(struct net_device *dev);
156 +/* Fallback tunnel: no source, no destination, no key, no options */
158 +static int ipgre_fb_tunnel_init(struct net_device *dev);
160 +static struct net_device *ipgre_fb_tunnel_dev;
162 +/* Tunnel hash table */
172 + We require exact key match i.e. if a key is present in packet
173 + it will match only tunnel with the same key; if it is not present,
174 + it will match only keyless tunnel.
176 + All keysless packets, if not matched configured keyless tunnels
177 + will match fallback tunnel.
180 +#define HASH_SIZE 1024
181 +#define HASH(addr) (ntohl(addr)&1023)
183 +static struct ip_tunnel *tunnels[4][HASH_SIZE];
185 +#define tunnels_r_l (tunnels[3])
186 +#define tunnels_r (tunnels[2])
187 +#define tunnels_l (tunnels[1])
188 +#define tunnels_wc (tunnels[0])
190 +static DEFINE_RWLOCK(ipgre_lock);
192 +/* Given src, dst and key, find appropriate for input tunnel. */
194 +static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be32 key)
197 + unsigned hash_value = HASH(key);
198 + struct ip_tunnel *t;
200 + t = tunnels_r_l[hash_value];
202 + if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP)) {
206 + t = tunnels_r[hash_value];
207 + if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP))
210 + t = tunnels_l[hash_value];
211 + if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP))
213 + t = tunnels_wc[hash_value];
214 + if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP))
216 + if (ipgre_fb_tunnel_dev->flags&IFF_UP)
217 + return netdev_priv(ipgre_fb_tunnel_dev);
221 +static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
223 + __be32 remote = t->parms.iph.daddr;
224 + __be32 local = t->parms.iph.saddr;
225 + __be32 key = t->parms.i_key;
226 + unsigned h = HASH(key);
231 + if (remote && !ipv4_is_multicast(remote)) {
233 + //h ^= HASH(remote);
236 + return &tunnels[prio][h];
239 +static void ipgre_tunnel_link(struct ip_tunnel *t)
241 + struct ip_tunnel **tp = ipgre_bucket(t);
244 + write_lock_bh(&ipgre_lock);
246 + write_unlock_bh(&ipgre_lock);
249 +static void ipgre_tunnel_unlink(struct ip_tunnel *t)
251 + struct ip_tunnel **tp;
253 + for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
255 + write_lock_bh(&ipgre_lock);
257 + write_unlock_bh(&ipgre_lock);
263 +static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
265 + __be32 remote = parms->iph.daddr;
266 + __be32 local = parms->iph.saddr;
267 + __be32 key = parms->i_key;
268 + __be16 proto = parms->proto_type;
269 + struct ip_tunnel *t, **tp, *nt;
270 + struct net_device *dev;
271 + unsigned h = HASH(key);
273 + char name[IFNAMSIZ];
277 + if (remote && !ipv4_is_multicast(remote)) {
279 + //h ^= HASH(remote);
281 + for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
282 + if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
283 + if (key == t->parms.i_key)
290 + printk(KERN_CRIT "Adding tunnel %s with key %d\n", parms->name, ntohl(key));
292 + if (parms->name[0])
293 + strlcpy(name, parms->name, IFNAMSIZ);
296 + for (i=1; i<100; i++) {
297 + sprintf(name, "gre%d", i);
298 + if (__dev_get_by_name(&init_net, name) == NULL)
305 + /* Tunnel creation: check payload type and call appropriate
310 + dev = alloc_netdev(sizeof(*t), name, ipgre_ip_tunnel_setup);
313 + dev = alloc_netdev(sizeof(*t), name, ipgre_eth_tunnel_setup);
322 + dev->init = ipgre_tunnel_init;
323 + nt = netdev_priv(dev);
324 + nt->parms = *parms;
326 + if (register_netdevice(dev) < 0) {
332 + ipgre_tunnel_link(nt);
339 +static void ipgre_tunnel_uninit(struct net_device *dev)
341 + ipgre_tunnel_unlink(netdev_priv(dev));
346 +static void ipgre_err(struct sk_buff *skb, u32 info)
348 +#ifndef I_WISH_WORLD_WERE_PERFECT
350 +/* It is not :-( All the routers (except for Linux) return only
351 + 8 bytes of packet payload. It means, that precise relaying of
352 + ICMP in the real Internet is absolutely infeasible.
354 + Moreover, Cisco "wise men" put GRE key to the third word
355 + in GRE header. It makes impossible maintaining even soft state for keyed
356 + GRE tunnels with enabled checksum. Tell them "thank you".
358 + Well, I wonder, rfc1812 was written by Cisco employee,
359 + what the hell these idiots break standrads established
363 + struct iphdr *iph = (struct iphdr*)skb->data;
364 + __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
365 + int grehlen = (iph->ihl<<2) + 4;
366 + int type = icmp_hdr(skb)->type;
367 + int code = icmp_hdr(skb)->code;
368 + struct ip_tunnel *t;
372 + if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
373 + if (flags&(GRE_VERSION|GRE_ROUTING))
375 + if (flags&GRE_KEY) {
377 + if (flags&GRE_CSUM)
382 + /* If only 8 bytes returned, keyed message will be dropped here */
383 + if (skb_headlen(skb) < grehlen)
388 + case ICMP_PARAMETERPROB:
391 + case ICMP_DEST_UNREACH:
393 + case ICMP_SR_FAILED:
394 + case ICMP_PORT_UNREACH:
395 + /* Impossible event. */
397 + case ICMP_FRAG_NEEDED:
398 + /* Soft state for pmtu is maintained by IP core. */
401 + /* All others are translated to HOST_UNREACH.
402 + rfc2003 contains "deep thoughts" about NET_UNREACH,
403 + I believe they are just ether pollution. --ANK
408 + case ICMP_TIME_EXCEEDED:
409 + if (code != ICMP_EXC_TTL)
414 + read_lock(&ipgre_lock);
415 + t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((__be32*)p) + (grehlen>>2) - 1) : 0);
416 + if (t == NULL || t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr))
419 + if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
422 + if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
426 + t->err_time = jiffies;
428 + read_unlock(&ipgre_lock);
431 + struct iphdr *iph = (struct iphdr*)dp;
432 + struct iphdr *eiph;
433 + __be16 *p = (__be16*)(dp+(iph->ihl<<2));
434 + int type = skb->h.icmph->type;
435 + int code = skb->h.icmph->code;
438 + __be32 rel_info = 0;
441 + int grehlen = (iph->ihl<<2) + 4;
442 + struct sk_buff *skb2;
446 + if (skb->dev->nd_net != &init_net)
449 + if (p[1] != htons(ETH_P_IP))
453 + if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
454 + if (flags&(GRE_VERSION|GRE_ROUTING))
456 + if (flags&GRE_CSUM)
463 + if (len < grehlen + sizeof(struct iphdr))
465 + eiph = (struct iphdr*)(dp + grehlen);
470 + case ICMP_PARAMETERPROB:
471 + n = ntohl(skb->h.icmph->un.gateway) >> 24;
472 + if (n < (iph->ihl<<2))
475 + /* So... This guy found something strange INSIDE encapsulated
476 + packet. Well, he is fool, but what can we do ?
478 + rel_type = ICMP_PARAMETERPROB;
480 + rel_info = htonl(n << 24);
483 + case ICMP_DEST_UNREACH:
485 + case ICMP_SR_FAILED:
486 + case ICMP_PORT_UNREACH:
487 + /* Impossible event. */
489 + case ICMP_FRAG_NEEDED:
490 + /* And it is the only really necessary thing :-) */
491 + n = ntohs(skb->h.icmph->un.frag.mtu);
492 + if (n < grehlen+68)
495 + /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
496 + if (n > ntohs(eiph->tot_len))
498 + rel_info = htonl(n);
501 + /* All others are translated to HOST_UNREACH.
502 + rfc2003 contains "deep thoughts" about NET_UNREACH,
503 + I believe, it is just ether pollution. --ANK
505 + rel_type = ICMP_DEST_UNREACH;
506 + rel_code = ICMP_HOST_UNREACH;
510 + case ICMP_TIME_EXCEEDED:
511 + if (code != ICMP_EXC_TTL)
516 + /* Prepare fake skb to feed it to icmp_send */
517 + skb2 = skb_clone(skb, GFP_ATOMIC);
520 + dst_release(skb2->dst);
522 + skb_pull(skb2, skb->data - (u8*)eiph);
523 + skb_reset_network_header(skb2);
525 + /* Try to guess incoming interface */
526 + memset(&fl, 0, sizeof(fl));
527 + //fl.fl_net = &init_net;
528 + fl.fl4_dst = eiph->saddr;
529 + fl.fl4_tos = RT_TOS(eiph->tos);
530 + fl.proto = IPPROTO_GRE;
531 + if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
535 + skb2->dev = rt->u.dst.dev;
537 + /* route "incoming" packet */
538 + if (rt->rt_flags&RTCF_LOCAL) {
541 + fl.fl4_dst = eiph->daddr;
542 + fl.fl4_src = eiph->saddr;
543 + fl.fl4_tos = eiph->tos;
544 + if (ip_route_output_key(&rt, &fl) ||
545 + rt->u.dst.dev->type != ARPHRD_IPGRE) {
552 + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
553 + skb2->dst->dev->type != ARPHRD_IPGRE) {
559 + /* change mtu on this route */
560 + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
561 + if (n > dst_mtu(skb2->dst)) {
565 + skb2->dst->ops->update_pmtu(skb2->dst, n);
566 + } else if (type == ICMP_TIME_EXCEEDED) {
567 + struct ip_tunnel *t = netdev_priv(skb2->dev);
568 + if (t->parms.iph.ttl) {
569 + rel_type = ICMP_DEST_UNREACH;
570 + rel_code = ICMP_HOST_UNREACH;
574 + icmp_send(skb2, rel_type, rel_code, rel_info);
579 +static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
581 + if (INET_ECN_is_ce(iph->tos)) {
582 + if (skb->protocol == htons(ETH_P_IP)) {
583 + IP_ECN_set_ce(ip_hdr(skb));
584 + } else if (skb->protocol == htons(ETH_P_IPV6)) {
585 + IP6_ECN_set_ce(ipv6_hdr(skb));
591 +ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
594 + if (skb->protocol == htons(ETH_P_IP))
595 + inner = old_iph->tos;
596 + else if (skb->protocol == htons(ETH_P_IPV6))
597 + inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
598 + return INET_ECN_encapsulate(tos, inner);
601 +static int ipgre_rcv(struct sk_buff *skb)
609 + struct ip_tunnel *tunnel;
613 + if (skb->dev->nd_net != &init_net) {
617 + if (!pskb_may_pull(skb, 16))
622 + flags = *(__be16*)h;
625 + printk(KERN_DEBUG "gre.c [601] src:%x dst:%x proto:%d %x", iph->saddr, iph->daddr, iph->protocol, skb->data);
627 + proto = ntohs(*(__be16*)(h+2)); /* XXX added XXX */
629 + if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
630 + /* - Version must be 0.
631 + - We do not support routing headers.
633 + if (flags&(GRE_VERSION|GRE_ROUTING))
636 + if (flags&GRE_CSUM) {
637 + switch (skb->ip_summed) {
638 + case CHECKSUM_COMPLETE:
639 + csum = csum_fold(skb->csum);
643 + case CHECKSUM_NONE:
645 + csum = __skb_checksum_complete(skb);
646 + skb->ip_summed = CHECKSUM_COMPLETE;
650 + if (flags&GRE_KEY) {
651 + key = *(__be32*)(h + offset);
654 + if (flags&GRE_SEQ) {
655 + seqno = ntohl(*(__be32*)(h + offset));
660 + read_lock(&ipgre_lock);
661 + if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
662 + secpath_reset(skb);
664 + skb->protocol = *(__be16*)(h + 2);
665 + /* WCCP version 1 and 2 protocol decoding.
666 + * - Change protocol to IP
667 + * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
670 + skb->protocol == htons(ETH_P_WCCP)) {
671 + skb->protocol = htons(ETH_P_IP);
672 + if ((*(h + offset) & 0xF0) != 0x40)
676 + //skb->mac.raw = skb->nh.raw;
677 + skb_reset_mac_header(skb);
678 + __pskb_pull(skb, offset);
679 + skb_reset_network_header(skb);
680 + skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
681 + if(proto == ETH_P_ETH)
684 + unsigned char* tmp_hdr = skb->data;
685 + printk(KERN_DEBUG "gre.c [658] %x %x %x %x %x %x\tskb %x\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
687 + skb->protocol = eth_type_trans(skb, tunnel->dev);
689 + /* XXX added these lines to make arp work? XXX */
690 + /*skb->mac.raw = skb->data;*/
691 + skb->network_header = skb->network_header + ETH_HLEN;
692 + /* XXX added these lines to make arp work? XXX */
695 + tmp_hdr = skb->data;
696 + printk(KERN_DEBUG "gre.c [669] %x %x %x %x %x %x\tskb %x\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
697 + printk(KERN_ALERT "gre.c [671] received ethernet on gre %x %x\n",skb->protocol, ((skb->nh).iph)->protocol);
699 + memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
702 + skb->pkt_type = PACKET_HOST;
703 +#ifdef CONFIG_NET_IPGRE_BROADCAST
704 + if (ipv4_is_multicast(iph->daddr)) {
705 + /* Looped back packet, drop it! */
706 + if (((struct rtable*)skb->dst)->fl.iif == 0)
708 + tunnel->dev->stats.multicast++;
709 + skb->pkt_type = PACKET_BROADCAST;
713 + if (((flags&GRE_CSUM) && csum) ||
714 + (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
715 + tunnel->dev->stats.rx_crc_errors++;
716 + tunnel->dev->stats.rx_errors++;
719 + if (tunnel->parms.i_flags&GRE_SEQ) {
720 + if (!(flags&GRE_SEQ) ||
721 + (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
722 + tunnel->dev->stats.rx_fifo_errors++;
723 + tunnel->dev->stats.rx_errors++;
726 + tunnel->i_seqno = seqno + 1;
728 + tunnel->dev->stats.rx_packets++;
729 + tunnel->dev->stats.rx_bytes += skb->len;
730 + skb->dev = tunnel->dev;
731 + dst_release(skb->dst);
734 + ipgre_ecn_decapsulate(iph, skb);
736 + read_unlock(&ipgre_lock);
739 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
742 + read_unlock(&ipgre_lock);
748 +static int ipgre_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
750 + struct ip_tunnel *tunnel = netdev_priv(dev);
751 + struct net_device_stats *stats = &tunnel->dev->stats;
752 + struct iphdr *old_iph = ip_hdr(skb);
753 + struct iphdr *tiph;
756 + struct rtable *rt; /* Route to the other host */
757 + struct net_device *tdev; /* Device to other host */
758 + struct iphdr *iph; /* Our new IP header */
759 + int max_headroom; /* The extra header space needed */
764 + if (tunnel->recursion++) {
765 + tunnel->dev->stats.collisions++;
769 + if (dev->header_ops) {
771 + tiph = (struct iphdr*)skb->data;
773 + gre_hlen = tunnel->hlen;
774 + tiph = &tunnel->parms.iph;
777 + if ((dst = tiph->daddr) == 0) {
780 + if (skb->dst == NULL) {
781 + tunnel->dev->stats.tx_fifo_errors++;
785 + if (skb->protocol == htons(ETH_P_IP)) {
786 + rt = (struct rtable*)skb->dst;
787 + if ((dst = rt->rt_gateway) == 0)
788 + goto tx_error_icmp;
791 + else if (skb->protocol == htons(ETH_P_IPV6)) {
792 + struct in6_addr *addr6;
794 + struct neighbour *neigh = skb->dst->neighbour;
799 + addr6 = (struct in6_addr*)&neigh->primary_key;
800 + addr_type = ipv6_addr_type(addr6);
802 + if (addr_type == IPV6_ADDR_ANY) {
803 + addr6 = &ipv6_hdr(skb)->daddr;
804 + addr_type = ipv6_addr_type(addr6);
807 + if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
808 + goto tx_error_icmp;
818 + if (skb->protocol == htons(ETH_P_IP))
819 + tos = old_iph->tos;
824 + struct flowi fl = { //.fl_net = &init_net,
825 + .oif = tunnel->parms.link,
828 + .saddr = tiph->saddr,
829 + .tos = RT_TOS(tos) } },
830 + .proto = IPPROTO_GRE };
831 + if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
832 + tunnel->dev->stats.tx_carrier_errors++;
836 + tdev = rt->u.dst.dev;
841 + tunnel->dev->stats.collisions++;
845 + df = tiph->frag_off;
847 + mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
849 + mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
852 + skb->dst->ops->update_pmtu(skb->dst, mtu);
854 + if (skb->protocol == htons(ETH_P_IP)) {
855 + df |= (old_iph->frag_off&htons(IP_DF));
857 + if ((old_iph->frag_off&htons(IP_DF)) &&
858 + mtu < ntohs(old_iph->tot_len)) {
859 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
865 + else if (skb->protocol == htons(ETH_P_IPV6)) {
866 + struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
868 + if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
869 + if ((tunnel->parms.iph.daddr && !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
870 + rt6->rt6i_dst.plen == 128) {
871 + rt6->rt6i_flags |= RTF_MODIFIED;
872 + skb->dst->metrics[RTAX_MTU-1] = mtu;
876 + if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
877 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
884 + if (tunnel->err_count > 0) {
885 + if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
886 + tunnel->err_count--;
888 + dst_link_failure(skb);
890 + tunnel->err_count = 0;
893 + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
895 + if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
896 + struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
899 + stats->tx_dropped++;
900 + dev_kfree_skb(skb);
901 + tunnel->recursion--;
905 + skb_set_owner_w(new_skb, skb->sk);
906 + dev_kfree_skb(skb);
908 + old_iph = ip_hdr(skb);
911 + skb->transport_header = skb->network_header;
912 + skb_push(skb, gre_hlen);
913 + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
914 + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
916 + dst_release(skb->dst);
917 + skb->dst = &rt->u.dst;
920 + * Push down and install the IPIP header.
925 + iph->ihl = sizeof(struct iphdr) >> 2;
926 + iph->frag_off = df;
927 + iph->protocol = IPPROTO_GRE;
928 + iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
929 + iph->daddr = rt->rt_dst;
930 + iph->saddr = rt->rt_src;
932 + if ((iph->ttl = tiph->ttl) == 0) {
933 + if (skb->protocol == htons(ETH_P_IP))
934 + iph->ttl = old_iph->ttl;
936 + else if (skb->protocol == htons(ETH_P_IPV6))
937 + iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
940 + iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
943 + ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
944 + ((__be16*)(iph+1))[1] = skb->protocol;
946 + if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
947 + __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
949 + if (tunnel->parms.o_flags&GRE_SEQ) {
951 + *ptr = htonl(tunnel->o_seqno);
954 + if (tunnel->parms.o_flags&GRE_KEY) {
955 + *ptr = tunnel->parms.o_key;
958 + if (tunnel->parms.o_flags&GRE_CSUM) {
960 + *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
967 + tunnel->recursion--;
971 + dst_link_failure(skb);
974 + stats->tx_errors++;
975 + dev_kfree_skb(skb);
976 + tunnel->recursion--;
980 +static int ipgre_eth_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
982 + struct ip_tunnel *tunnel = netdev_priv(dev);
983 + struct net_device_stats *stats = &tunnel->dev->stats;
984 + struct iphdr *old_iph = ip_hdr(skb);
985 + struct iphdr *tiph = &tunnel->parms.iph;
988 + struct rtable *rt; /* Route to the other host */
989 + struct net_device *tdev; /* Device to other host */
990 + int gre_hlen = tunnel->hlen; /* XXX changed XXX*/
991 + //struct etheriphdr *ethiph;
992 + struct iphdr *iph; /* Our new IP header */
993 + int max_headroom; /* The extra header space needed */
997 + printk(KERN_ALERT "gre.c:972 Starting xmit\n");
1000 + if (tunnel->recursion++) {
1001 + stats->collisions++;
1005 + /* Need valid non-ipv4_is_multicast daddr. */
1006 + if (tiph->daddr == 0 || ipv4_is_multicast(tiph->daddr))
1011 + if (skb->protocol == htons(ETH_P_IP))
1012 + tos = old_iph->tos;
1016 + printk(KERN_ALERT "gre.c:991 Passed tos assignment.\n");
1021 + struct flowi fl = { //.fl_net = &init_net,
1022 + .oif = tunnel->parms.link,
1023 + .nl_u = { .ip4_u =
1024 + { .daddr = tiph->daddr,
1025 + .saddr = tiph->saddr,
1026 + .tos = RT_TOS(tos) } },
1027 + .proto = IPPROTO_GRE };
1028 + if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
1029 + stats->tx_carrier_errors++;
1030 + goto tx_error_icmp;
1033 + tdev = rt->u.dst.dev;
1035 + printk(KERN_ALERT "gre.c:1006 Passed the route retrieval\n");
1037 + if (tdev == dev) {
1039 + stats->collisions++;
1043 + printk(KERN_ALERT "gre.c:1018 Passed tdev collision check.\n");
1046 + /* Check MTU stuff if kernel panic */
1047 + df = tiph->frag_off;
1049 + mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
1051 + mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
1054 + skb->dst->ops->update_pmtu(skb->dst, mtu);
1057 + printk(KERN_ALERT "gre.c:1032 Passed the pmtu setting.\n");
1060 + if (skb->protocol == htons(ETH_P_IP)) {
1061 + df |= (old_iph->frag_off&htons(IP_DF));
1063 + if ((old_iph->frag_off & htons(IP_DF)) &&
1064 + mtu < ntohs(old_iph->tot_len)) {
1065 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1071 + else if (skb->protocol == htons(ETH_P_IPV6)) {
1072 + struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
1074 + if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
1075 + if (tiph->daddr || rt6->rt6i_dst.plen == 128) {
1076 + rt6->rt6i_flags |= RTF_MODIFIED;
1077 + skb->dst->metrics[RTAX_MTU-1] = mtu;
1081 + /* @@@ Is this correct? */
1082 + if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
1083 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1090 + printk(KERN_ALERT "gre.c:1065 Passed the fragmentation check.\n");
1093 + if (tunnel->err_count > 0) {
1094 + if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
1095 + tunnel->err_count--;
1096 + dst_link_failure(skb);
1098 + tunnel->err_count = 0;
1101 + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
1103 + if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
1104 + struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
1107 + stats->tx_dropped++;
1108 + dev_kfree_skb(skb);
1109 + tunnel->recursion--;
1113 + skb_set_owner_w(new_skb, skb->sk);
1114 + dev_kfree_skb(skb);
1116 + old_iph = ip_hdr(skb);
1119 + printk(KERN_ALERT "gre.c:1094 Passed the headroom calculation\n");
1123 + skb->transport_header = skb->mac_header; // Added by valas
1124 + skb_push(skb, gre_hlen);
1125 + skb_reset_network_header(skb);
1126 + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1127 + dst_release(skb->dst);
1128 + skb->dst = &rt->u.dst;
1131 + * Push down and install the etherip header.
1134 + iph = ip_hdr(skb);
1136 + iph->ihl = sizeof(struct iphdr) >> 2;
1137 + iph->frag_off = df;
1138 + iph->protocol = IPPROTO_GRE;
1139 + iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
1140 + iph->daddr = rt->rt_dst;
1141 + iph->saddr = rt->rt_src;
1143 +/* ethiph->version = htons(ETHERIP_VERSION); */
1145 + printk(KERN_ALERT "gre.c:1121 Passed outer IP header construction.\n");
1148 + if ((iph->ttl = tiph->ttl) == 0) {
1149 + if (skb->protocol == htons(ETH_P_IP))
1150 + iph->ttl = old_iph->ttl;
1152 + else if (skb->protocol == htons(ETH_P_IPV6))
1153 + iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
1156 + iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
1159 + printk(KERN_ALERT "gre.c:1006 Passed the TTL check.\n");
1162 + ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
1163 + ((__be16*)(iph+1))[1] = htons(tunnel->parms.proto_type);
1165 + if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
1166 + __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
1168 + if (tunnel->parms.o_flags&GRE_SEQ) {
1169 + ++tunnel->o_seqno;
1170 + *ptr = htonl(tunnel->o_seqno);
1173 + if (tunnel->parms.o_flags&GRE_KEY) {
1174 + *ptr = tunnel->parms.o_key;
1177 + if (tunnel->parms.o_flags&GRE_CSUM) {
1179 + *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
1183 + printk(KERN_ALERT "gre.c:1006 Passed the tunnel transmit.\n");
1189 + tunnel->recursion--;
1193 + dst_link_failure(skb);
1196 + stats->tx_errors++;
1197 + dev_kfree_skb(skb);
1198 + tunnel->recursion--;
1204 +ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1207 + struct ip_tunnel_parm p;
1208 + struct ip_tunnel *t;
1210 + printk(KERN_ALERT "1174 GRE: entering gre ioctl. command is: %d\n", cmd);
1213 + case SIOCGETTUNNEL:
1215 + if (dev == ipgre_fb_tunnel_dev) {
1216 + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1220 + t = ipgre_tunnel_locate(&p, 0);
1223 + t = netdev_priv(dev);
1224 + memcpy(&p, &t->parms, sizeof(p));
1225 + if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1229 + case SIOCADDTUNNEL:
1230 + case SIOCCHGTUNNEL:
1232 + if (!capable(CAP_NET_ADMIN))
1236 + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1240 + if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1241 + p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1242 + ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1245 + p.iph.frag_off |= htons(IP_DF);
1247 + if (!(p.i_flags&GRE_KEY))
1249 + if (!(p.o_flags&GRE_KEY))
1252 + t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
1253 + if (t) printk(KERN_ALERT "1174 GRE: proto %s %d\n", p.name, p.proto_type);
1254 + if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1256 + if (t->dev != dev) {
1261 + unsigned nflags=0;
1263 + t = netdev_priv(dev);
1265 + if (ipv4_is_multicast(p.iph.daddr))
1266 + nflags = IFF_BROADCAST;
1267 + else if (p.iph.daddr)
1268 + nflags = IFF_POINTOPOINT;
1270 + /* XXX:Set back IFF_BROADCAST if
1271 + * transporting ethernet */
1272 + printk(KERN_ALERT "1193 GRE: proto %s %d\n", p.name, p.proto_type);
1273 + if (p.proto_type == ETH_P_ETH)
1274 + nflags = IFF_BROADCAST;
1276 + if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1280 + ipgre_tunnel_unlink(t);
1281 + t->parms.iph.saddr = p.iph.saddr;
1282 + t->parms.iph.daddr = p.iph.daddr;
1283 + t->parms.i_key = p.i_key;
1284 + t->parms.o_key = p.o_key;
1285 + /* XXX:Copy in the protocol field */
1286 + t->parms.proto_type = p.proto_type;
1287 + if (t->parms.proto_type != ETH_P_ETH)
1289 + memcpy(dev->dev_addr, &p.iph.saddr, 4);
1290 + memcpy(dev->broadcast, &p.iph.daddr, 4);
1292 + ipgre_tunnel_link(t);
1293 + netdev_state_change(dev);
1299 + if (cmd == SIOCCHGTUNNEL) {
1300 + t->parms.iph.ttl = p.iph.ttl;
1301 + t->parms.iph.tos = p.iph.tos;
1302 + t->parms.iph.frag_off = p.iph.frag_off;
1304 + if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1307 + err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1310 + case SIOCDELTUNNEL:
1312 + if (!capable(CAP_NET_ADMIN))
1315 + if (dev == ipgre_fb_tunnel_dev) {
1317 + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1320 + if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
1323 + if (t == netdev_priv(ipgre_fb_tunnel_dev))
1327 + unregister_netdevice(dev); // added by Valas
1338 +static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1340 + return &(((struct ip_tunnel*)netdev_priv(dev))->dev->stats);
1343 +static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1345 + struct ip_tunnel *tunnel = netdev_priv(dev);
1346 + if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
1348 + dev->mtu = new_mtu;
1352 +#ifdef CONFIG_NET_IPGRE_BROADCAST
1353 +/* Nice toy. Unfortunately, useless in real life :-)
1354 + It allows to construct virtual multiprotocol broadcast "LAN"
1355 + over the Internet, provided ipv4_is_multicast routing is tuned.
1358 + I have no idea was this bicycle invented before me,
1359 + so that I had to set ARPHRD_IPGRE to a random value.
1360 + I have an impression, that Cisco could make something similar,
1361 + but this feature is apparently missing in IOS<=11.2(8).
1363 + I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1364 + with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1366 + ping -t 255 224.66.66.66
1368 + If nobody answers, mbone does not work.
1370 + ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1371 + ip addr add 10.66.66.<somewhat>/24 dev Universe
1372 + ifconfig Universe up
1373 + ifconfig Universe add fe80::<Your_real_addr>/10
1374 + ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1377 + ftp fec0:6666:6666::193.233.7.65
1382 +static int ipgre_open(struct net_device *dev)
1384 + struct ip_tunnel *t = netdev_priv(dev);
1386 + if (ipv4_is_multicast(t->parms.iph.daddr)) {
1387 + struct flowi fl = { //.fl_net = &init_net,
1388 + .oif = t->parms.link,
1389 + .nl_u = { .ip4_u =
1390 + { .daddr = t->parms.iph.daddr,
1391 + .saddr = t->parms.iph.saddr,
1392 + .tos = RT_TOS(t->parms.iph.tos) } },
1393 + .proto = IPPROTO_GRE };
1394 + struct rtable *rt;
1395 + if (ip_route_output_key(dev_net(dev),&rt, &fl))
1396 + return -EADDRNOTAVAIL;
1397 + dev = rt->u.dst.dev;
1399 + if (__in_dev_get_rtnl(dev) == NULL)
1400 + return -EADDRNOTAVAIL;
1401 + t->mlink = dev->ifindex;
1402 + ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1407 +static int ipgre_close(struct net_device *dev)
1409 + struct ip_tunnel *t = netdev_priv(dev);
1410 + if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1411 + struct in_device *in_dev = inetdev_by_index(&init_net, t->mlink);
1413 + ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1414 + in_dev_put(in_dev);
1422 +static void ipgre_ip_tunnel_setup(struct net_device *dev)
1424 + //SET_MODULE_OWNER(dev);
1425 + dev->uninit = ipgre_tunnel_uninit;
1426 + dev->destructor = free_netdev;
1427 + dev->hard_start_xmit = ipgre_ip_tunnel_xmit;
1428 + dev->get_stats = ipgre_tunnel_get_stats;
1429 + dev->do_ioctl = ipgre_tunnel_ioctl;
1430 + dev->change_mtu = ipgre_tunnel_change_mtu;
1432 + dev->type = ARPHRD_IPGRE;
1433 + dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1434 + dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1435 + dev->flags = IFF_NOARP;
1437 + dev->addr_len = 4;
1440 +/* Tunnel setup for ipgre_eth */
1441 +static void ipgre_eth_tunnel_setup(struct net_device *dev)
1443 + //SET_MODULE_OWNER(dev);
1446 + dev->uninit = ipgre_tunnel_uninit;
1447 + dev->destructor = free_netdev;
1448 + dev->hard_start_xmit = ipgre_eth_tunnel_xmit;
1449 + dev->get_stats = ipgre_tunnel_get_stats;
1450 + dev->do_ioctl = ipgre_tunnel_ioctl;
1451 + dev->change_mtu = ipgre_tunnel_change_mtu;
1453 + dev->hard_header_len = ETH_HLEN + sizeof(struct iphdr) + 4;
1454 + dev->tx_queue_len = 0;
1455 + random_ether_addr(dev->dev_addr);
1458 + unsigned char* d = dev->dev_addr;
1459 + printk(KERN_ALERT "Here is the address we got:%x%x%x%x%x%x\n",d[0],d[1],d[2],d[3],d[4],d[5]);
1465 +static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1466 + unsigned short type,
1467 + const void *daddr, const void *saddr, unsigned len)
1469 + struct ip_tunnel *t = netdev_priv(dev);
1470 + struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1471 + __be16 *p = (__be16*)(iph+1);
1473 + memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1474 + p[0] = t->parms.o_flags;
1475 + p[1] = htons(type);
1478 + * Set the source hardware address.
1482 + memcpy(&iph->saddr, saddr, 4);
1485 + memcpy(&iph->daddr, daddr, 4);
1488 + if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1494 +static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1496 + struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
1497 + memcpy(haddr, &iph->saddr, 4);
1501 +static const struct header_ops ipgre_header_ops = {
1502 + .create = ipgre_header,
1503 + .parse = ipgre_header_parse,
1506 +static int ipgre_tunnel_init(struct net_device *dev)
1508 + struct net_device *tdev = NULL;
1509 + struct ip_tunnel *tunnel;
1510 + struct iphdr *iph;
1511 + int hlen = LL_MAX_HEADER;
1512 + int mtu = ETH_DATA_LEN;
1513 + int addend = sizeof(struct iphdr) + 4;
1515 + tunnel = netdev_priv(dev);
1516 + iph = &tunnel->parms.iph;
1518 + tunnel->dev = dev;
1519 + strcpy(tunnel->parms.name, dev->name);
1521 + if (tunnel->parms.proto_type != ETH_P_ETH)
1523 + memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1524 + memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1527 + /* Guess output device to choose reasonable mtu and hard_header_len */
1530 + struct flowi fl = { //.fl_net = &init_net,
1531 + .oif = tunnel->parms.link,
1532 + .nl_u = { .ip4_u =
1533 + { .daddr = iph->daddr,
1534 + .saddr = iph->saddr,
1535 + .tos = RT_TOS(iph->tos) } },
1536 + .proto = IPPROTO_GRE };
1537 + struct rtable *rt;
1538 + if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
1539 + tdev = rt->u.dst.dev;
1543 + if (tunnel->parms.proto_type == ETH_P_ETH)
1545 + dev->flags |= IFF_BROADCAST;
1549 + dev->flags |= IFF_POINTOPOINT;
1552 +#ifdef CONFIG_NET_IPGRE_BROADCAST
1553 + if (ipv4_is_multicast(iph->daddr)) {
1556 + dev->flags = IFF_BROADCAST;
1557 + dev->header_ops = &ipgre_header_ops;
1558 + dev->open = ipgre_open;
1559 + dev->stop = ipgre_close;
1564 + if (!tdev && tunnel->parms.link)
1565 + tdev = __dev_get_by_index(&init_net, tunnel->parms.link);
1568 + hlen = tdev->hard_header_len;
1571 + dev->iflink = tunnel->parms.link;
1573 + /* Precalculate GRE options length */
1574 + if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1575 + if (tunnel->parms.o_flags&GRE_CSUM)
1577 + if (tunnel->parms.o_flags&GRE_KEY)
1579 + if (tunnel->parms.o_flags&GRE_SEQ)
1582 + dev->hard_header_len = hlen + addend;
1583 + dev->mtu = mtu - addend;
1584 + tunnel->hlen = addend;
1588 +static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1590 + struct ip_tunnel *tunnel = netdev_priv(dev);
1591 + struct iphdr *iph = &tunnel->parms.iph;
1593 + tunnel->dev = dev;
1594 + strcpy(tunnel->parms.name, dev->name);
1597 + iph->protocol = IPPROTO_GRE;
1599 + tunnel->hlen = sizeof(struct iphdr) + 4;
1602 + tunnels_wc[0] = tunnel;
1607 +static struct net_protocol ipgre_protocol = {
1608 + .handler = ipgre_rcv,
1609 + .err_handler = ipgre_err,
1614 + * And now the modules code and kernel interface.
1617 +static int __init ipgre_init(void)
1621 + printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1623 + if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1624 + printk(KERN_INFO "ipgre init: can't add protocol\n");
1628 + ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1629 + ipgre_ip_tunnel_setup);
1630 + if (!ipgre_fb_tunnel_dev) {
1635 + ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1637 + if ((err = register_netdev(ipgre_fb_tunnel_dev)))
1642 + free_netdev(ipgre_fb_tunnel_dev);
1644 + inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1648 +static void __exit ipgre_destroy_tunnels(void)
1652 + for (prio = 0; prio < 4; prio++) {
1654 + for (h = 0; h < HASH_SIZE; h++) {
1655 + struct ip_tunnel *t;
1656 + while ((t = tunnels[prio][h]) != NULL)
1657 + unregister_netdevice(t->dev);
1662 +static void __exit ipgre_fini(void)
1664 + if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1665 + printk(KERN_INFO "ipgre close: can't remove protocol\n");
1668 + ipgre_destroy_tunnels();
1672 +module_init(ipgre_init);
1673 +module_exit(ipgre_fini);
1674 +MODULE_LICENSE("GPL");
1675 diff -Nurb linux-2.6.27-660/include/linux/if_ether.h linux-2.6.27-700/include/linux/if_ether.h
1676 --- linux-2.6.27-660/include/linux/if_ether.h 2008-10-09 18:13:53.000000000 -0400
1677 +++ linux-2.6.27-700/include/linux/if_ether.h 2009-04-16 10:27:39.000000000 -0400
1679 #define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
1680 #define ETH_P_CUST 0x6006 /* DEC Customer use */
1681 #define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
1682 +#define ETH_P_ETH 0x6558 /* Ethernet in Ethernet */
1683 #define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
1684 #define ETH_P_ATALK 0x809B /* Appletalk DDP */
1685 #define ETH_P_AARP 0x80F3 /* Appletalk AARP */
1686 diff -Nurb linux-2.6.27-660/include/linux/if_tunnel.h linux-2.6.27-700/include/linux/if_tunnel.h
1687 --- linux-2.6.27-660/include/linux/if_tunnel.h 2008-10-09 18:13:53.000000000 -0400
1688 +++ linux-2.6.27-700/include/linux/if_tunnel.h 2009-04-16 10:27:39.000000000 -0400
1693 + __be16 proto_type; /*Added*/
1697 diff -Nurb linux-2.6.27-660/net/ipv4/ip_gre.c linux-2.6.27-700/net/ipv4/ip_gre.c
1698 --- linux-2.6.27-660/net/ipv4/ip_gre.c 2008-10-09 18:13:53.000000000 -0400
1699 +++ linux-2.6.27-700/net/ipv4/ip_gre.c 2009-04-16 12:48:33.000000000 -0400
1701 #include <linux/init.h>
1702 #include <linux/in6.h>
1703 #include <linux/inetdevice.h>
1704 +#include <linux/etherdevice.h> /**XXX added XXX */
1705 #include <linux/igmp.h>
1706 #include <linux/netfilter_ipv4.h>
1707 #include <linux/if_ether.h>
1709 #include <net/ip6_route.h>
1712 +#define MULTICAST(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
1714 +//#define GRE_DEBUG 1
1717 Problems & solutions
1718 --------------------
1722 static int ipgre_tunnel_init(struct net_device *dev);
1723 -static void ipgre_tunnel_setup(struct net_device *dev);
1724 +static void ipgre_ip_tunnel_setup(struct net_device *dev);
1725 +static void ipgre_eth_tunnel_setup(struct net_device *dev);
1727 /* Fallback tunnel: no source, no destination, no key, no options */
1730 __be32 remote = parms->iph.daddr;
1731 __be32 local = parms->iph.saddr;
1732 __be32 key = parms->i_key;
1733 + __be16 proto = parms->proto_type;
1734 struct ip_tunnel *t, **tp, *nt;
1735 struct net_device *dev;
1736 char name[IFNAMSIZ];
1737 @@ -269,12 +276,28 @@
1741 + printk(KERN_CRIT "Adding tunnel %s with key %d\n", parms->name, ntohl(key));
1744 strlcpy(name, parms->name, IFNAMSIZ);
1746 sprintf(name, "gre%%d");
1748 - dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
1750 + /* Tunnel creation: check payload type and call appropriate
1755 + dev = alloc_netdev(sizeof(*t), name, ipgre_ip_tunnel_setup);
1758 + dev = alloc_netdev(sizeof(*t), name, ipgre_eth_tunnel_setup);
1769 struct ip_tunnel *tunnel;
1773 if (!pskb_may_pull(skb, 16))
1775 @@ -439,6 +463,11 @@
1777 flags = *(__be16*)h;
1780 + printk(KERN_DEBUG "gre.c [601] src:%x dst:%x proto:%d %p", iph->saddr, iph->daddr, iph->protocol, skb->data);
1782 + proto = ntohs(*(__be16*)(h+2)); /* XXX added XXX */
1784 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
1785 /* - Version must be 0.
1786 - We do not support routing headers.
1787 @@ -493,7 +522,29 @@
1788 __pskb_pull(skb, offset);
1789 skb_reset_network_header(skb);
1790 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
1791 + if(proto == ETH_P_ETH)
1794 + unsigned char* tmp_hdr = skb->data;
1795 + printk(KERN_DEBUG "gre.c [658] %x %x %x %x %x %x\tskb %p\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
1797 + skb->protocol = eth_type_trans(skb, tunnel->dev);
1799 + /* XXX added these lines to make arp work? XXX */
1800 + /*skb->mac.raw = skb->data;*/
1801 + skb->network_header = skb->network_header + ETH_HLEN;
1802 + /* XXX added these lines to make arp work? XXX */
1805 + tmp_hdr = skb->data;
1806 + printk(KERN_DEBUG "gre.c [669] %x %x %x %x %x %x\tskb %p\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
1807 + printk(KERN_ALERT "gre.c [671] received ethernet on gre %x\n",skb->protocol);
1809 + memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1812 skb->pkt_type = PACKET_HOST;
1814 #ifdef CONFIG_NET_IPGRE_BROADCAST
1815 if (ipv4_is_multicast(iph->daddr)) {
1816 /* Looped back packet, drop it! */
1821 -static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1822 +static int ipgre_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1824 struct ip_tunnel *tunnel = netdev_priv(dev);
1825 struct net_device_stats *stats = &tunnel->dev->stats;
1826 @@ -799,9 +850,17 @@
1827 tdev = rt->u.dst.dev;
1830 + if (tunnel->parms.proto_type == ETH_P_ETH)
1832 + dev->flags |= IFF_BROADCAST;
1836 dev->flags |= IFF_POINTOPOINT;
1841 if (!tdev && tunnel->parms.link)
1842 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1844 @@ -822,10 +881,234 @@
1846 dev->hard_header_len = hlen + addend;
1847 dev->mtu = mtu - addend;
1848 + if (tunnel->parms.proto_type == ETH_P_ETH)
1849 + dev->mtu -= ETH_HLEN;
1850 tunnel->hlen = addend;
1854 +static int ipgre_eth_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1856 + struct ip_tunnel *tunnel = netdev_priv(dev);
1857 + struct net_device_stats *stats = &tunnel->dev->stats;
1858 + struct iphdr *old_iph = ip_hdr(skb);
1859 + struct iphdr *tiph = &tunnel->parms.iph;
1862 + struct rtable *rt; /* Route to the other host */
1863 + struct net_device *tdev; /* Device to other host */
1864 + int gre_hlen = tunnel->hlen; /* XXX changed XXX*/
1865 + //struct etheriphdr *ethiph;
1866 + struct iphdr *iph; /* Our new IP header */
1867 + int max_headroom; /* The extra header space needed */
1871 + printk(KERN_ALERT "gre.c:972 Starting xmit\n");
1874 + if (tunnel->recursion++) {
1875 + stats->collisions++;
1879 + /* Need valid non-multicast daddr. */
1880 + if (tiph->daddr == 0 || MULTICAST(tiph->daddr))
1885 + if (skb->protocol == htons(ETH_P_IP))
1886 + tos = old_iph->tos;
1890 + printk(KERN_ALERT "gre.c:991 Passed tos assignment.\n");
1895 + struct flowi fl = { //.fl_net = &init_net,
1896 + .oif = tunnel->parms.link,
1897 + .nl_u = { .ip4_u =
1898 + { .daddr = tiph->daddr,
1899 + .saddr = tiph->saddr,
1900 + .tos = RT_TOS(tos) } },
1901 + .proto = IPPROTO_GRE };
1902 + if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
1903 + stats->tx_carrier_errors++;
1904 + goto tx_error_icmp;
1907 + tdev = rt->u.dst.dev;
1909 + printk(KERN_ALERT "gre.c:1006 Passed the route retrieval\n");
1911 + if (tdev == dev) {
1913 + stats->collisions++;
1917 + printk(KERN_ALERT "gre.c:1018 Passed tdev collision check.\n");
1920 + /* Check MTU stuff if kernel panic */
1921 + df = tiph->frag_off;
1923 + mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
1925 + mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
1928 + skb->dst->ops->update_pmtu(skb->dst, mtu);
1931 + printk(KERN_ALERT "gre.c:1032 Passed the pmtu setting.\n");
1934 + if (skb->protocol == htons(ETH_P_IP)) {
1935 + df |= (old_iph->frag_off&htons(IP_DF));
1937 + if ((old_iph->frag_off & htons(IP_DF)) &&
1938 + mtu < ntohs(old_iph->tot_len)) {
1939 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1945 + else if (skb->protocol == htons(ETH_P_IPV6)) {
1946 + struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
1948 + if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
1949 + if (tiph->daddr || rt6->rt6i_dst.plen == 128) {
1950 + rt6->rt6i_flags |= RTF_MODIFIED;
1951 + skb->dst->metrics[RTAX_MTU-1] = mtu;
1955 + /* @@@ Is this correct? */
1956 + if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
1957 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1964 + printk(KERN_ALERT "gre.c:1065 Passed the fragmentation check.\n");
1967 + if (tunnel->err_count > 0) {
1968 + if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
1969 + tunnel->err_count--;
1970 + dst_link_failure(skb);
1972 + tunnel->err_count = 0;
1975 + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
1977 + if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
1978 + struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
1981 + stats->tx_dropped++;
1982 + dev_kfree_skb(skb);
1983 + tunnel->recursion--;
1987 + skb_set_owner_w(new_skb, skb->sk);
1988 + dev_kfree_skb(skb);
1990 + old_iph = ip_hdr(skb);
1993 + printk(KERN_ALERT "gre.c:1094 Passed the headroom calculation\n");
1996 + skb->transport_header = skb->data;
1997 + skb_push(skb, gre_hlen);
1998 + skb_reset_network_header(skb);
1999 + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
2000 + dst_release(skb->dst);
2001 + skb->dst = &rt->u.dst;
2004 + * Push down and install the etherip header.
2007 + iph = ip_hdr(skb);
2009 + iph->ihl = sizeof(struct iphdr) >> 2;
2010 + iph->frag_off = df;
2011 + iph->protocol = IPPROTO_GRE;
2012 + iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
2013 + iph->daddr = rt->rt_dst;
2014 + iph->saddr = rt->rt_src;
2016 +/* ethiph->version = htons(ETHERIP_VERSION); */
2018 + printk(KERN_ALERT "gre.c:1121 Passed outer IP header construction.\n");
2021 + if ((iph->ttl = tiph->ttl) == 0) {
2022 + if (skb->protocol == htons(ETH_P_IP))
2023 + iph->ttl = old_iph->ttl;
2025 + else if (skb->protocol == htons(ETH_P_IPV6))
2026 + iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
2029 + iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
2032 + printk(KERN_ALERT "gre.c:1006 Passed the TTL check.\n");
2035 + ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
2036 + ((__be16*)(iph+1))[1] = htons(tunnel->parms.proto_type);
2038 + if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
2039 + __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
2041 + if (tunnel->parms.o_flags&GRE_SEQ) {
2042 + ++tunnel->o_seqno;
2043 + *ptr = htonl(tunnel->o_seqno);
2046 + if (tunnel->parms.o_flags&GRE_KEY) {
2047 + *ptr = tunnel->parms.o_key;
2050 + if (tunnel->parms.o_flags&GRE_CSUM) {
2052 + *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
2056 + printk(KERN_ALERT "gre.c:1006 Passed the tunnel transmit.\n");
2062 + tunnel->recursion--;
2066 + dst_link_failure(skb);
2069 + stats->tx_errors++;
2070 + dev_kfree_skb(skb);
2071 + tunnel->recursion--;
2077 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
2079 @@ -876,6 +1159,7 @@
2082 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
2083 + if (t) printk(KERN_ALERT "1174 GRE: proto %s %x\n", p.name, p.proto_type);
2085 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
2087 @@ -893,6 +1177,12 @@
2088 else if (p.iph.daddr)
2089 nflags = IFF_POINTOPOINT;
2091 + /* XXX:Set back IFF_BROADCAST if
2092 + * transporting ethernet */
2093 + printk(KERN_ALERT "1193 GRE: proto %s %d\n", p.name, p.proto_type);
2094 + if (p.proto_type == ETH_P_ETH)
2095 + nflags = IFF_BROADCAST;
2097 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
2100 @@ -902,8 +1192,13 @@
2101 t->parms.iph.daddr = p.iph.daddr;
2102 t->parms.i_key = p.i_key;
2103 t->parms.o_key = p.o_key;
2104 + /* XXX:Copy in the protocol field */
2105 + t->parms.proto_type = p.proto_type;
2106 + if (t->parms.proto_type != ETH_P_ETH) {
2107 memcpy(dev->dev_addr, &p.iph.saddr, 4);
2108 memcpy(dev->broadcast, &p.iph.daddr, 4);
2111 ipgre_tunnel_link(ign, t);
2112 netdev_state_change(dev);
2114 @@ -1076,13 +1371,13 @@
2118 -static void ipgre_tunnel_setup(struct net_device *dev)
2119 +static void ipgre_ip_tunnel_setup(struct net_device *dev)
2121 dev->uninit = ipgre_tunnel_uninit;
2122 dev->destructor = free_netdev;
2123 - dev->hard_start_xmit = ipgre_tunnel_xmit;
2124 dev->do_ioctl = ipgre_tunnel_ioctl;
2125 dev->change_mtu = ipgre_tunnel_change_mtu;
2126 + dev->hard_start_xmit = ipgre_ip_tunnel_xmit;
2128 dev->type = ARPHRD_IPGRE;
2129 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
2130 @@ -1093,6 +1388,36 @@
2131 dev->features |= NETIF_F_NETNS_LOCAL;
2134 +/* Tunnel setup for ipgre_eth */
2135 +static void ipgre_eth_tunnel_setup(struct net_device *dev)
2137 + //SET_MODULE_OWNER(dev);
2139 + // Set default values for Ethernet device
2142 + dev->uninit = ipgre_tunnel_uninit;
2143 + dev->destructor = free_netdev;
2144 + dev->hard_start_xmit = ipgre_eth_tunnel_xmit;
2145 + //dev->get_stats = ipgre_tunnel_get_stats;
2146 + dev->do_ioctl = ipgre_tunnel_ioctl;
2147 + dev->change_mtu = ipgre_tunnel_change_mtu;
2149 + dev->hard_header_len = LL_MAX_HEADER + ETH_HLEN + sizeof(struct iphdr) + 4;
2150 + dev->mtu = ETH_DATA_LEN - ETH_HLEN - sizeof(struct iphdr) - 4;
2151 + dev->tx_queue_len = 0;
2153 + dev->features |= NETIF_F_NETNS_LOCAL;
2155 + random_ether_addr(dev->dev_addr);
2158 + { unsigned char* d = dev->dev_addr;
2159 + printk(KERN_ALERT "Here is the address we got:%x%x%x%x%x%x\n",d[0],d[1],d[2],d[3],d[4],d[5]); }
2164 static int ipgre_tunnel_init(struct net_device *dev)
2166 struct ip_tunnel *tunnel;
2167 @@ -1104,8 +1429,10 @@
2169 strcpy(tunnel->parms.name, dev->name);
2171 + if (tunnel->parms.proto_type != ETH_P_ETH) {
2172 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
2173 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
2176 ipgre_tunnel_bind_dev(dev);
2178 @@ -1181,7 +1508,7 @@
2181 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
2182 - ipgre_tunnel_setup);
2183 + ipgre_ip_tunnel_setup);
2184 if (!ign->fb_tunnel_dev) {