Setting tag linux-2.6-27-38
[linux-2.6.git] / linux-2.6-700-egre.patch
1 diff -Nurb linux-2.6.27-660/drivers/net/Kconfig linux-2.6.27-700/drivers/net/Kconfig
2 --- linux-2.6.27-660/drivers/net/Kconfig        2009-04-16 10:27:01.000000000 -0400
3 +++ linux-2.6.27-700/drivers/net/Kconfig        2009-04-16 10:27:39.000000000 -0400
4 @@ -39,6 +39,9 @@
5           'ifb1' etc.
6           Look at the iproute2 documentation directory for usage etc
7  
8 +config EGRE
9 +       tristate "EGRE module for Ethernet over GRE Tunnels"
10 +      
11  config DUMMY
12         tristate "Dummy net driver support"
13         ---help---
14 diff -Nurb linux-2.6.27-660/drivers/net/Makefile linux-2.6.27-700/drivers/net/Makefile
15 --- linux-2.6.27-660/drivers/net/Makefile       2008-10-09 18:13:53.000000000 -0400
16 +++ linux-2.6.27-700/drivers/net/Makefile       2009-04-16 10:27:39.000000000 -0400
17 @@ -2,6 +2,7 @@
18  # Makefile for the Linux network (ethercard) device drivers.
19  #
20  
21 +obj-$(CONFIG_EGRE) += gre.o
22  obj-$(CONFIG_E1000) += e1000/
23  obj-$(CONFIG_E1000E) += e1000e/
24  obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
25 diff -Nurb linux-2.6.27-660/drivers/net/gre.c linux-2.6.27-700/drivers/net/gre.c
26 --- linux-2.6.27-660/drivers/net/gre.c  1969-12-31 19:00:00.000000000 -0500
27 +++ linux-2.6.27-700/drivers/net/gre.c  2009-04-16 12:48:33.000000000 -0400
28 @@ -0,0 +1,1646 @@
29 +/*
30 + *     Linux NET3:     GRE over IP protocol decoder.
31 + *
32 + *     Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
33 + *
34 + *     This program is free software; you can redistribute it and/or
35 + *     modify it under the terms of the GNU General Public License
36 + *     as published by the Free Software Foundation; either version
37 + *     2 of the License, or (at your option) any later version.
38 + *
39 + */
40 +
41 +#include <linux/capability.h>
42 +#include <linux/module.h>
43 +#include <linux/types.h>
44 +#include <linux/sched.h>
45 +#include <linux/kernel.h>
46 +#include <asm/uaccess.h>
47 +#include <linux/skbuff.h>
48 +#include <linux/netdevice.h>
49 +#include <linux/in.h>
50 +#include <linux/tcp.h>
51 +#include <linux/udp.h>
52 +#include <linux/if_arp.h>
53 +#include <linux/mroute.h>
54 +#include <linux/init.h>
55 +#include <linux/in6.h>
56 +#include <linux/inetdevice.h>
57 +#include <linux/etherdevice.h>   /**XXX added XXX */
58 +#include <linux/igmp.h>
59 +#include <linux/netfilter_ipv4.h>
60 +#include <linux/if_ether.h>
61 +
62 +#include <net/sock.h>
63 +#include <net/ip.h>
64 +#include <net/icmp.h>
65 +#include <net/protocol.h>
66 +#include <net/ipip.h>
67 +#include <net/arp.h>
68 +#include <net/checksum.h>
69 +#include <net/dsfield.h>
70 +#include <net/inet_ecn.h>
71 +#include <net/xfrm.h>
72 +
73 +#ifdef CONFIG_IPV6
74 +#include <net/ipv6.h>
75 +#include <net/ip6_fib.h>
76 +#include <net/ip6_route.h>
77 +#endif
78 +
79 +#define ipv4_is_multicast(x)    (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
80 +
81 +//#define GRE_DEBUG 1
82 +
83 +/*
84 +   Problems & solutions
85 +   --------------------
86 +
87 +   1. The most important issue is detecting local dead loops.
88 +   They would cause complete host lockup in transmit, which
89 +   would be "resolved" by stack overflow or, if queueing is enabled,
90 +   with infinite looping in net_bh.
91 +
92 +   We cannot track such dead loops during route installation,
93 +   it is infeasible task. The most general solutions would be
94 +   to keep skb->encapsulation counter (sort of local ttl),
95 +   and silently drop packet when it expires. It is the best
96 +   solution, but it supposes maintaing new variable in ALL
97 +   skb, even if no tunneling is used.
98 +
99 +   Current solution: t->recursion lock breaks dead loops. It looks
100 +   like dev->tbusy flag, but I preferred new variable, because
101 +   the semantics is different. One day, when hard_start_xmit
102 +   will be multithreaded we will have to use skb->encapsulation.
103 +
104 +
105 +
106 +   2. Networking dead loops would not kill routers, but would really
107 +   kill network. IP hop limit plays role of "t->recursion" in this case,
108 +   if we copy it from packet being encapsulated to upper header.
109 +   It is very good solution, but it introduces two problems:
110 +
111 +   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
112 +     do not work over tunnels.
113 +   - traceroute does not work. I planned to relay ICMP from tunnel,
114 +     so that this problem would be solved and traceroute output
115 +     would even more informative. This idea appeared to be wrong:
116 +     only Linux complies to rfc1812 now (yes, guys, Linux is the only
117 +     true router now :-)), all routers (at least, in neighbourhood of mine)
118 +     return only 8 bytes of payload. It is the end.
119 +
120 +   Hence, if we want that OSPF worked or traceroute said something reasonable,
121 +   we should search for another solution.
122 +
123 +   One of them is to parse packet trying to detect inner encapsulation
124 +   made by our node. It is difficult or even impossible, especially,
125 +   taking into account fragmentation. TO be short, tt is not solution at all.
126 +
127 +   Current solution: The solution was UNEXPECTEDLY SIMPLE.
128 +   We force DF flag on tunnels with preconfigured hop limit,
129 +   that is ALL. :-) Well, it does not remove the problem completely,
130 +   but exponential growth of network traffic is changed to linear
131 +   (branches, that exceed pmtu are pruned) and tunnel mtu
132 +   fastly degrades to value <68, where looping stops.
133 +   Yes, it is not good if there exists a router in the loop,
134 +   which does not force DF, even when encapsulating packets have DF set.
135 +   But it is not our problem! Nobody could accuse us, we made
136 +   all that we could make. Even if it is your gated who injected
137 +   fatal route to network, even if it were you who configured
138 +   fatal static route: you are innocent. :-)
139 +
140 +
141 +
142 +   3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
143 +   practically identical code. It would be good to glue them
144 +   together, but it is not very evident, how to make them modular.
145 +   sit is integral part of IPv6, ipip and gre are naturally modular.
146 +   We could extract common parts (hash table, ioctl etc)
147 +   to a separate module (ip_tunnel.c).
148 +
149 +   Alexey Kuznetsov.
150 + */
151 +
152 +static int ipgre_tunnel_init(struct net_device *dev);
153 +static void ipgre_ip_tunnel_setup(struct net_device *dev);
154 +static void ipgre_eth_tunnel_setup(struct net_device *dev);
155 +
156 +/* Fallback tunnel: no source, no destination, no key, no options */
157 +
158 +static int ipgre_fb_tunnel_init(struct net_device *dev);
159 +
160 +static struct net_device *ipgre_fb_tunnel_dev;
161 +
162 +/* Tunnel hash table */
163 +
164 +/*
165 +   4 hash tables:
166 +
167 +   3: (remote,local)
168 +   2: (remote,*)
169 +   1: (*,local)
170 +   0: (*,*)
171 +
172 +   We require exact key match i.e. if a key is present in packet
173 +   it will match only tunnel with the same key; if it is not present,
174 +   it will match only keyless tunnel.
175 +
176 +   All keysless packets, if not matched configured keyless tunnels
177 +   will match fallback tunnel.
178 + */
179 +
180 +#define HASH_SIZE  1024
181 +#define HASH(addr) (ntohl(addr)&1023)
182 +
183 +static struct ip_tunnel *tunnels[4][HASH_SIZE];
184 +
185 +#define tunnels_r_l    (tunnels[3])
186 +#define tunnels_r      (tunnels[2])
187 +#define tunnels_l      (tunnels[1])
188 +#define tunnels_wc     (tunnels[0])
189 +
190 +static DEFINE_RWLOCK(ipgre_lock);
191 +
192 +/* Given src, dst and key, find appropriate for input tunnel. */
193 +
194 +static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be32 key)
195 +{
196 +       /* HACK */
197 +       unsigned hash_value = HASH(key);
198 +       struct ip_tunnel *t;
199 +
200 +       t = tunnels_r_l[hash_value];
201 +
202 +       if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP)) {
203 +               return t;
204 +       }
205 +
206 +       t = tunnels_r[hash_value];
207 +                       if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP))
208 +                               return t;
209 +
210 +       t = tunnels_l[hash_value];
211 +                       if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP))
212 +                               return t;
213 +       t = tunnels_wc[hash_value];
214 +               if (t && (t->parms.i_key == key) && (t->dev->flags&IFF_UP))
215 +                       return t;
216 +       if (ipgre_fb_tunnel_dev->flags&IFF_UP)
217 +               return netdev_priv(ipgre_fb_tunnel_dev);
218 +       return NULL;
219 +}
220 +
221 +static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
222 +{
223 +       __be32 remote = t->parms.iph.daddr;
224 +       __be32 local = t->parms.iph.saddr;
225 +       __be32 key = t->parms.i_key;
226 +       unsigned h = HASH(key);
227 +       int prio = 0;
228 +
229 +       if (local)
230 +               prio |= 1;
231 +       if (remote && !ipv4_is_multicast(remote)) {
232 +               prio |= 2;
233 +               //h ^= HASH(remote);
234 +       }
235 +
236 +       return &tunnels[prio][h];
237 +}
238 +
239 +static void ipgre_tunnel_link(struct ip_tunnel *t)
240 +{
241 +       struct ip_tunnel **tp = ipgre_bucket(t);
242 +
243 +       t->next = *tp;
244 +       write_lock_bh(&ipgre_lock);
245 +       *tp = t;
246 +       write_unlock_bh(&ipgre_lock);
247 +}
248 +
249 +static void ipgre_tunnel_unlink(struct ip_tunnel *t)
250 +{
251 +       struct ip_tunnel **tp;
252 +
253 +       for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
254 +               if (t == *tp) {
255 +                       write_lock_bh(&ipgre_lock);
256 +                       *tp = t->next;
257 +                       write_unlock_bh(&ipgre_lock);
258 +                       break;
259 +               }
260 +       }
261 +}
262 +
263 +static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
264 +{
265 +       __be32 remote = parms->iph.daddr;
266 +       __be32 local = parms->iph.saddr;
267 +       __be32 key = parms->i_key;
268 +       __be16 proto = parms->proto_type;
269 +       struct ip_tunnel *t, **tp, *nt;
270 +       struct net_device *dev;
271 +       unsigned h = HASH(key);
272 +       int prio = 0;
273 +       char name[IFNAMSIZ];
274 +
275 +       if (local)
276 +               prio |= 1;
277 +       if (remote && !ipv4_is_multicast(remote)) {
278 +               prio |= 2;
279 +               //h ^= HASH(remote);
280 +       }
281 +       for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
282 +               if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
283 +                       if (key == t->parms.i_key)
284 +                               return t;
285 +               }
286 +       }
287 +       if (!create)
288 +               return NULL;
289 +
290 +       printk(KERN_CRIT "Adding tunnel %s with key %d\n", parms->name, ntohl(key));
291 +
292 +       if (parms->name[0])
293 +               strlcpy(name, parms->name, IFNAMSIZ);
294 +       else {
295 +               int i;
296 +               for (i=1; i<100; i++) {
297 +                       sprintf(name, "gre%d", i);
298 +                       if (__dev_get_by_name(&init_net, name) == NULL)
299 +                               break;
300 +               }
301 +               if (i==100)
302 +                       goto failed;
303 +       }
304 +       
305 +       /* Tunnel creation: check payload type and call appropriate
306 +        * function */
307 +       switch (proto)
308 +       {
309 +           case ETH_P_IP:
310 +               dev = alloc_netdev(sizeof(*t), name, ipgre_ip_tunnel_setup);
311 +               break;
312 +           case ETH_P_ETH:
313 +               dev = alloc_netdev(sizeof(*t), name, ipgre_eth_tunnel_setup);
314 +               break;
315 +           default:
316 +               return NULL;
317 +       }
318 +
319 +       if (!dev)
320 +         return NULL;
321 +
322 +       dev->init = ipgre_tunnel_init;
323 +       nt = netdev_priv(dev);
324 +       nt->parms = *parms;
325 +
326 +       if (register_netdevice(dev) < 0) {
327 +               free_netdev(dev);
328 +               goto failed;
329 +       }
330 +
331 +       dev_hold(dev);
332 +       ipgre_tunnel_link(nt);
333 +       return nt;
334 +
335 +failed:
336 +       return NULL;
337 +}
338 +
339 +static void ipgre_tunnel_uninit(struct net_device *dev)
340 +{
341 +       ipgre_tunnel_unlink(netdev_priv(dev));
342 +       dev_put(dev);
343 +}
344 +
345 +
346 +static void ipgre_err(struct sk_buff *skb, u32 info)
347 +{
348 +#ifndef I_WISH_WORLD_WERE_PERFECT
349 +
350 +/* It is not :-( All the routers (except for Linux) return only
351 +   8 bytes of packet payload. It means, that precise relaying of
352 +   ICMP in the real Internet is absolutely infeasible.
353 +
354 +   Moreover, Cisco "wise men" put GRE key to the third word
355 +   in GRE header. It makes impossible maintaining even soft state for keyed
356 +   GRE tunnels with enabled checksum. Tell them "thank you".
357 +
358 +   Well, I wonder, rfc1812 was written by Cisco employee,
359 +   what the hell these idiots break standrads established
360 +   by themself???
361 + */
362 +
363 +       struct iphdr *iph = (struct iphdr*)skb->data;
364 +       __be16       *p = (__be16*)(skb->data+(iph->ihl<<2));
365 +       int grehlen = (iph->ihl<<2) + 4;
366 +       int type = icmp_hdr(skb)->type;
367 +       int code = icmp_hdr(skb)->code;
368 +       struct ip_tunnel *t;
369 +       __be16 flags;
370 +
371 +       flags = p[0];
372 +       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
373 +               if (flags&(GRE_VERSION|GRE_ROUTING))
374 +                       return;
375 +               if (flags&GRE_KEY) {
376 +                       grehlen += 4;
377 +                       if (flags&GRE_CSUM)
378 +                               grehlen += 4;
379 +               }
380 +       }
381 +
382 +       /* If only 8 bytes returned, keyed message will be dropped here */
383 +       if (skb_headlen(skb) < grehlen)
384 +               return;
385 +
386 +       switch (type) {
387 +       default:
388 +       case ICMP_PARAMETERPROB:
389 +               return;
390 +
391 +       case ICMP_DEST_UNREACH:
392 +               switch (code) {
393 +               case ICMP_SR_FAILED:
394 +               case ICMP_PORT_UNREACH:
395 +                       /* Impossible event. */
396 +                       return;
397 +               case ICMP_FRAG_NEEDED:
398 +                       /* Soft state for pmtu is maintained by IP core. */
399 +                       return;
400 +               default:
401 +                       /* All others are translated to HOST_UNREACH.
402 +                          rfc2003 contains "deep thoughts" about NET_UNREACH,
403 +                          I believe they are just ether pollution. --ANK
404 +                        */
405 +                       break;
406 +               }
407 +               break;
408 +       case ICMP_TIME_EXCEEDED:
409 +               if (code != ICMP_EXC_TTL)
410 +                       return;
411 +               break;
412 +       }
413 +
414 +       read_lock(&ipgre_lock);
415 +       t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((__be32*)p) + (grehlen>>2) - 1) : 0);
416 +       if (t == NULL || t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr))
417 +               goto out;
418 +
419 +       if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
420 +               goto out;
421 +
422 +       if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
423 +               t->err_count++;
424 +       else
425 +               t->err_count = 1;
426 +       t->err_time = jiffies;
427 +out:
428 +       read_unlock(&ipgre_lock);
429 +       return;
430 +#else
431 +       struct iphdr *iph = (struct iphdr*)dp;
432 +       struct iphdr *eiph;
433 +       __be16       *p = (__be16*)(dp+(iph->ihl<<2));
434 +       int type = skb->h.icmph->type;
435 +       int code = skb->h.icmph->code;
436 +       int rel_type = 0;
437 +       int rel_code = 0;
438 +       __be32 rel_info = 0;
439 +       __u32 n = 0;
440 +       __be16 flags;
441 +       int grehlen = (iph->ihl<<2) + 4;
442 +       struct sk_buff *skb2;
443 +       struct flowi fl;
444 +       struct rtable *rt;
445 +
446 +       if (skb->dev->nd_net != &init_net)
447 +               return;
448 +
449 +       if (p[1] != htons(ETH_P_IP))
450 +               return;
451 +
452 +       flags = p[0];
453 +       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
454 +               if (flags&(GRE_VERSION|GRE_ROUTING))
455 +                       return;
456 +               if (flags&GRE_CSUM)
457 +                       grehlen += 4;
458 +               if (flags&GRE_KEY)
459 +                       grehlen += 4;
460 +               if (flags&GRE_SEQ)
461 +                       grehlen += 4;
462 +       }
463 +       if (len < grehlen + sizeof(struct iphdr))
464 +               return;
465 +       eiph = (struct iphdr*)(dp + grehlen);
466 +
467 +       switch (type) {
468 +       default:
469 +               return;
470 +       case ICMP_PARAMETERPROB:
471 +               n = ntohl(skb->h.icmph->un.gateway) >> 24;
472 +               if (n < (iph->ihl<<2))
473 +                       return;
474 +
475 +               /* So... This guy found something strange INSIDE encapsulated
476 +                  packet. Well, he is fool, but what can we do ?
477 +                */
478 +               rel_type = ICMP_PARAMETERPROB;
479 +               n -= grehlen;
480 +               rel_info = htonl(n << 24);
481 +               break;
482 +
483 +       case ICMP_DEST_UNREACH:
484 +               switch (code) {
485 +               case ICMP_SR_FAILED:
486 +               case ICMP_PORT_UNREACH:
487 +                       /* Impossible event. */
488 +                       return;
489 +               case ICMP_FRAG_NEEDED:
490 +                       /* And it is the only really necessary thing :-) */
491 +                       n = ntohs(skb->h.icmph->un.frag.mtu);
492 +                       if (n < grehlen+68)
493 +                               return;
494 +                       n -= grehlen;
495 +                       /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
496 +                       if (n > ntohs(eiph->tot_len))
497 +                               return;
498 +                       rel_info = htonl(n);
499 +                       break;
500 +               default:
501 +                       /* All others are translated to HOST_UNREACH.
502 +                          rfc2003 contains "deep thoughts" about NET_UNREACH,
503 +                          I believe, it is just ether pollution. --ANK
504 +                        */
505 +                       rel_type = ICMP_DEST_UNREACH;
506 +                       rel_code = ICMP_HOST_UNREACH;
507 +                       break;
508 +               }
509 +               break;
510 +       case ICMP_TIME_EXCEEDED:
511 +               if (code != ICMP_EXC_TTL)
512 +                       return;
513 +               break;
514 +       }
515 +
516 +       /* Prepare fake skb to feed it to icmp_send */
517 +       skb2 = skb_clone(skb, GFP_ATOMIC);
518 +       if (skb2 == NULL)
519 +               return;
520 +       dst_release(skb2->dst);
521 +       skb2->dst = NULL;
522 +       skb_pull(skb2, skb->data - (u8*)eiph);
523 +       skb_reset_network_header(skb2);
524 +
525 +       /* Try to guess incoming interface */
526 +       memset(&fl, 0, sizeof(fl));
527 +       //fl.fl_net = &init_net;
528 +       fl.fl4_dst = eiph->saddr;
529 +       fl.fl4_tos = RT_TOS(eiph->tos);
530 +       fl.proto = IPPROTO_GRE;
531 +       if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
532 +               kfree_skb(skb2);
533 +               return;
534 +       }
535 +       skb2->dev = rt->u.dst.dev;
536 +
537 +       /* route "incoming" packet */
538 +       if (rt->rt_flags&RTCF_LOCAL) {
539 +               ip_rt_put(rt);
540 +               rt = NULL;
541 +               fl.fl4_dst = eiph->daddr;
542 +               fl.fl4_src = eiph->saddr;
543 +               fl.fl4_tos = eiph->tos;
544 +               if (ip_route_output_key(&rt, &fl) ||
545 +                   rt->u.dst.dev->type != ARPHRD_IPGRE) {
546 +                       ip_rt_put(rt);
547 +                       kfree_skb(skb2);
548 +                       return;
549 +               }
550 +       } else {
551 +               ip_rt_put(rt);
552 +               if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
553 +                   skb2->dst->dev->type != ARPHRD_IPGRE) {
554 +                       kfree_skb(skb2);
555 +                       return;
556 +               }
557 +       }
558 +
559 +       /* change mtu on this route */
560 +       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
561 +               if (n > dst_mtu(skb2->dst)) {
562 +                       kfree_skb(skb2);
563 +                       return;
564 +               }
565 +               skb2->dst->ops->update_pmtu(skb2->dst, n);
566 +       } else if (type == ICMP_TIME_EXCEEDED) {
567 +               struct ip_tunnel *t = netdev_priv(skb2->dev);
568 +               if (t->parms.iph.ttl) {
569 +                       rel_type = ICMP_DEST_UNREACH;
570 +                       rel_code = ICMP_HOST_UNREACH;
571 +               }
572 +       }
573 +
574 +       icmp_send(skb2, rel_type, rel_code, rel_info);
575 +       kfree_skb(skb2);
576 +#endif
577 +}
578 +
579 +static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
580 +{
581 +       if (INET_ECN_is_ce(iph->tos)) {
582 +               if (skb->protocol == htons(ETH_P_IP)) {
583 +                       IP_ECN_set_ce(ip_hdr(skb));
584 +               } else if (skb->protocol == htons(ETH_P_IPV6)) {
585 +                       IP6_ECN_set_ce(ipv6_hdr(skb));
586 +               }
587 +       }
588 +}
589 +
590 +static inline u8
591 +ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
592 +{
593 +       u8 inner = 0;
594 +       if (skb->protocol == htons(ETH_P_IP))
595 +               inner = old_iph->tos;
596 +       else if (skb->protocol == htons(ETH_P_IPV6))
597 +               inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
598 +       return INET_ECN_encapsulate(tos, inner);
599 +}
600 +
601 +static int ipgre_rcv(struct sk_buff *skb)
602 +{
603 +       struct iphdr *iph;
604 +       u8     *h;
605 +       __be16    flags;
606 +       __sum16   csum = 0;
607 +       __be32 key = 0;
608 +       u32    seqno = 0;
609 +       struct ip_tunnel *tunnel;
610 +       int    offset = 4;
611 +       __be16 proto;
612 +
613 +       if (skb->dev->nd_net != &init_net) {
614 +               kfree_skb(skb);
615 +               return 0;
616 +       }
617 +       if (!pskb_may_pull(skb, 16))
618 +               goto drop_nolock;
619 +
620 +       iph = ip_hdr(skb);
621 +       h = skb->data;
622 +       flags = *(__be16*)h;
623 +
624 +#ifdef GRE_DEBUG
625 +       printk(KERN_DEBUG "gre.c [601] src:%x dst:%x  proto:%d %x", iph->saddr, iph->daddr, iph->protocol, skb->data);
626 +#endif 
627 +       proto = ntohs(*(__be16*)(h+2)); /* XXX added XXX */
628 +       
629 +       if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
630 +               /* - Version must be 0.
631 +                  - We do not support routing headers.
632 +                */
633 +               if (flags&(GRE_VERSION|GRE_ROUTING))
634 +                       goto drop_nolock;
635 +
636 +               if (flags&GRE_CSUM) {
637 +                       switch (skb->ip_summed) {
638 +                       case CHECKSUM_COMPLETE:
639 +                               csum = csum_fold(skb->csum);
640 +                               if (!csum)
641 +                                       break;
642 +                               /* fall through */
643 +                       case CHECKSUM_NONE:
644 +                               skb->csum = 0;
645 +                               csum = __skb_checksum_complete(skb);
646 +                               skb->ip_summed = CHECKSUM_COMPLETE;
647 +                       }
648 +                       offset += 4;
649 +               }
650 +               if (flags&GRE_KEY) {
651 +                       key = *(__be32*)(h + offset);
652 +                       offset += 4;
653 +               }
654 +               if (flags&GRE_SEQ) {
655 +                       seqno = ntohl(*(__be32*)(h + offset));
656 +                       offset += 4;
657 +               }
658 +       }
659 +
660 +       read_lock(&ipgre_lock);
661 +       if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
662 +               secpath_reset(skb);
663 +
664 +               skb->protocol = *(__be16*)(h + 2);
665 +               /* WCCP version 1 and 2 protocol decoding.
666 +                * - Change protocol to IP
667 +                * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
668 +                */
669 +               if (flags == 0 &&
670 +                   skb->protocol == htons(ETH_P_WCCP)) {
671 +                       skb->protocol = htons(ETH_P_IP);
672 +                       if ((*(h + offset) & 0xF0) != 0x40)
673 +                               offset += 4;
674 +               }
675 +
676 +               //skb->mac.raw = skb->nh.raw;
677 +               skb_reset_mac_header(skb);
678 +               __pskb_pull(skb, offset);
679 +               skb_reset_network_header(skb);
680 +               skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
681 +               if(proto == ETH_P_ETH)
682 +                 {
683 +#ifdef GRE_DEBUG
684 +                   unsigned char* tmp_hdr = skb->data;
685 +                   printk(KERN_DEBUG "gre.c [658] %x %x %x %x %x %x\tskb %x\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
686 +#endif             
687 +                   skb->protocol = eth_type_trans(skb, tunnel->dev);
688 +
689 +                   /* XXX added these lines to make arp work? XXX */
690 +                   /*skb->mac.raw = skb->data;*/
691 +                   skb->network_header = skb->network_header + ETH_HLEN;
692 +                   /* XXX added these lines to make arp work? XXX */
693 +
694 +#ifdef GRE_DEBUG
695 +                   tmp_hdr = skb->data;
696 +                   printk(KERN_DEBUG "gre.c [669] %x %x %x %x %x %x\tskb %x\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
697 +                   printk(KERN_ALERT "gre.c [671] received ethernet on gre %x %x\n",skb->protocol, ((skb->nh).iph)->protocol); 
698 +#endif
699 +                   memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
700 +                 }
701 +               else
702 +                 skb->pkt_type = PACKET_HOST;
703 +#ifdef CONFIG_NET_IPGRE_BROADCAST
704 +               if (ipv4_is_multicast(iph->daddr)) {
705 +                       /* Looped back packet, drop it! */
706 +                       if (((struct rtable*)skb->dst)->fl.iif == 0)
707 +                               goto drop;
708 +                       tunnel->dev->stats.multicast++;
709 +                       skb->pkt_type = PACKET_BROADCAST;
710 +               }
711 +#endif
712 +
713 +               if (((flags&GRE_CSUM) && csum) ||
714 +                   (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
715 +                       tunnel->dev->stats.rx_crc_errors++;
716 +                       tunnel->dev->stats.rx_errors++;
717 +                       goto drop;
718 +               }
719 +               if (tunnel->parms.i_flags&GRE_SEQ) {
720 +                       if (!(flags&GRE_SEQ) ||
721 +                           (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
722 +                               tunnel->dev->stats.rx_fifo_errors++;
723 +                               tunnel->dev->stats.rx_errors++;
724 +                               goto drop;
725 +                       }
726 +                       tunnel->i_seqno = seqno + 1;
727 +               }
728 +               tunnel->dev->stats.rx_packets++;
729 +               tunnel->dev->stats.rx_bytes += skb->len;
730 +               skb->dev = tunnel->dev;
731 +               dst_release(skb->dst);
732 +               skb->dst = NULL;
733 +               nf_reset(skb);
734 +               ipgre_ecn_decapsulate(iph, skb);
735 +               netif_rx(skb);
736 +               read_unlock(&ipgre_lock);
737 +               return(0);
738 +       }
739 +       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
740 +
741 +drop:
742 +       read_unlock(&ipgre_lock);
743 +drop_nolock:
744 +       kfree_skb(skb);
745 +       return(0);
746 +}
747 +
748 +static int ipgre_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
749 +{
750 +       struct ip_tunnel *tunnel = netdev_priv(dev);
751 +       struct net_device_stats *stats = &tunnel->dev->stats;
752 +       struct iphdr  *old_iph = ip_hdr(skb);
753 +       struct iphdr  *tiph;
754 +       u8     tos;
755 +       __be16 df;
756 +       struct rtable *rt;                      /* Route to the other host */
757 +       struct net_device *tdev;                        /* Device to other host */
758 +       struct iphdr  *iph;                     /* Our new IP header */
759 +       int    max_headroom;                    /* The extra header space needed */
760 +       int    gre_hlen;
761 +       __be32 dst;
762 +       int    mtu;
763 +
764 +       if (tunnel->recursion++) {
765 +               tunnel->dev->stats.collisions++;
766 +               goto tx_error;
767 +       }
768 +
769 +       if (dev->header_ops) {
770 +               gre_hlen = 0;
771 +               tiph = (struct iphdr*)skb->data;
772 +       } else {
773 +               gre_hlen = tunnel->hlen;
774 +               tiph = &tunnel->parms.iph;
775 +       }
776 +
777 +       if ((dst = tiph->daddr) == 0) {
778 +               /* NBMA tunnel */
779 +
780 +               if (skb->dst == NULL) {
781 +                       tunnel->dev->stats.tx_fifo_errors++;
782 +                       goto tx_error;
783 +               }
784 +
785 +               if (skb->protocol == htons(ETH_P_IP)) {
786 +                       rt = (struct rtable*)skb->dst;
787 +                       if ((dst = rt->rt_gateway) == 0)
788 +                               goto tx_error_icmp;
789 +               }
790 +#ifdef CONFIG_IPV6
791 +               else if (skb->protocol == htons(ETH_P_IPV6)) {
792 +                       struct in6_addr *addr6;
793 +                       int addr_type;
794 +                       struct neighbour *neigh = skb->dst->neighbour;
795 +
796 +                       if (neigh == NULL)
797 +                               goto tx_error;
798 +
799 +                       addr6 = (struct in6_addr*)&neigh->primary_key;
800 +                       addr_type = ipv6_addr_type(addr6);
801 +
802 +                       if (addr_type == IPV6_ADDR_ANY) {
803 +                               addr6 = &ipv6_hdr(skb)->daddr;
804 +                               addr_type = ipv6_addr_type(addr6);
805 +                       }
806 +
807 +                       if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
808 +                               goto tx_error_icmp;
809 +
810 +               }
811 +#endif
812 +               else
813 +                       goto tx_error;
814 +       }
815 +
816 +       tos = tiph->tos;
817 +       if (tos&1) {
818 +               if (skb->protocol == htons(ETH_P_IP))
819 +                       tos = old_iph->tos;
820 +               tos &= ~1;
821 +       }
822 +
823 +       {
824 +               struct flowi fl = { //.fl_net = &init_net,
825 +                                   .oif = tunnel->parms.link,
826 +                                   .nl_u = { .ip4_u =
827 +                                             { .daddr = dst,
828 +                                               .saddr = tiph->saddr,
829 +                                               .tos = RT_TOS(tos) } },
830 +                                   .proto = IPPROTO_GRE };
831 +               if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
832 +                       tunnel->dev->stats.tx_carrier_errors++;
833 +                       goto tx_error;
834 +               }
835 +       }
836 +       tdev = rt->u.dst.dev;
837 +
838 +
839 +       if (tdev == dev) {
840 +               ip_rt_put(rt);
841 +               tunnel->dev->stats.collisions++;
842 +               goto tx_error;
843 +       }
844 +
845 +       df = tiph->frag_off;
846 +       if (df)
847 +               mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
848 +       else
849 +               mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
850 +
851 +       if (skb->dst)
852 +               skb->dst->ops->update_pmtu(skb->dst, mtu);
853 +
854 +       if (skb->protocol == htons(ETH_P_IP)) {
855 +               df |= (old_iph->frag_off&htons(IP_DF));
856 +
857 +               if ((old_iph->frag_off&htons(IP_DF)) &&
858 +                   mtu < ntohs(old_iph->tot_len)) {
859 +                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
860 +                       ip_rt_put(rt);
861 +                       goto tx_error;
862 +               }
863 +       }
864 +#ifdef CONFIG_IPV6
865 +       else if (skb->protocol == htons(ETH_P_IPV6)) {
866 +               struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
867 +
868 +               if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
869 +                       if ((tunnel->parms.iph.daddr && !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
870 +                           rt6->rt6i_dst.plen == 128) {
871 +                               rt6->rt6i_flags |= RTF_MODIFIED;
872 +                               skb->dst->metrics[RTAX_MTU-1] = mtu;
873 +                       }
874 +               }
875 +
876 +               if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
877 +                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
878 +                       ip_rt_put(rt);
879 +                       goto tx_error;
880 +               }
881 +       }
882 +#endif
883 +
884 +       if (tunnel->err_count > 0) {
885 +               if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
886 +                       tunnel->err_count--;
887 +
888 +                       dst_link_failure(skb);
889 +               } else
890 +                       tunnel->err_count = 0;
891 +       }
892 +
893 +       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
894 +
895 +       if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
896 +               struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
897 +               if (!new_skb) {
898 +                       ip_rt_put(rt);
899 +                       stats->tx_dropped++;
900 +                       dev_kfree_skb(skb);
901 +                       tunnel->recursion--;
902 +                       return 0;
903 +               }
904 +               if (skb->sk)
905 +                       skb_set_owner_w(new_skb, skb->sk);
906 +               dev_kfree_skb(skb);
907 +               skb = new_skb;
908 +               old_iph = ip_hdr(skb);
909 +       }
910 +
911 +       skb->transport_header = skb->network_header;
912 +       skb_push(skb, gre_hlen);
913 +       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
914 +       IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
915 +                             IPSKB_REROUTED);
916 +       dst_release(skb->dst);
917 +       skb->dst = &rt->u.dst;
918 +
919 +       /*
920 +        *      Push down and install the IPIP header.
921 +        */
922 +
923 +       iph                     =       ip_hdr(skb);
924 +       iph->version            =       4;
925 +       iph->ihl                =       sizeof(struct iphdr) >> 2;
926 +       iph->frag_off           =       df;
927 +       iph->protocol           =       IPPROTO_GRE;
928 +       iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
929 +       iph->daddr              =       rt->rt_dst;
930 +       iph->saddr              =       rt->rt_src;
931 +
932 +       if ((iph->ttl = tiph->ttl) == 0) {
933 +               if (skb->protocol == htons(ETH_P_IP))
934 +                       iph->ttl = old_iph->ttl;
935 +#ifdef CONFIG_IPV6
936 +               else if (skb->protocol == htons(ETH_P_IPV6))
937 +                       iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
938 +#endif
939 +               else
940 +                       iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
941 +       }
942 +
943 +       ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
944 +       ((__be16*)(iph+1))[1] = skb->protocol;
945 +
946 +       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
947 +               __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
948 +
949 +               if (tunnel->parms.o_flags&GRE_SEQ) {
950 +                       ++tunnel->o_seqno;
951 +                       *ptr = htonl(tunnel->o_seqno);
952 +                       ptr--;
953 +               }
954 +               if (tunnel->parms.o_flags&GRE_KEY) {
955 +                       *ptr = tunnel->parms.o_key;
956 +                       ptr--;
957 +               }
958 +               if (tunnel->parms.o_flags&GRE_CSUM) {
959 +                       *ptr = 0;
960 +                       *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
961 +               }
962 +       }
963 +
964 +       nf_reset(skb);
965 +
966 +       IPTUNNEL_XMIT();
967 +       tunnel->recursion--;
968 +       return 0;
969 +
970 +tx_error_icmp:
971 +       dst_link_failure(skb);
972 +
973 +tx_error:
974 +       stats->tx_errors++;
975 +       dev_kfree_skb(skb);
976 +       tunnel->recursion--;
977 +       return 0;
978 +}
979 +
980 +static int ipgre_eth_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
981 +{
982 +       struct ip_tunnel *tunnel = netdev_priv(dev);
983 +       struct net_device_stats *stats = &tunnel->dev->stats;
984 +       struct iphdr *old_iph = ip_hdr(skb);
985 +       struct iphdr *tiph = &tunnel->parms.iph;
986 +       u8     tos;
987 +       __be16 df;
988 +       struct rtable *rt;              /* Route to the other host */
989 +       struct net_device *tdev;        /* Device to other host */
990 +       int    gre_hlen = tunnel->hlen; /* XXX changed XXX*/
991 +       //struct etheriphdr  *ethiph;
992 +       struct iphdr  *iph;             /* Our new IP header */
993 +       int    max_headroom;            /* The extra header space needed */
994 +       int    mtu;
995 +
996 +#ifdef GRE_DEBUG
997 +       printk(KERN_ALERT "gre.c:972 Starting xmit\n");
998 +#endif
999 +
1000 +       if (tunnel->recursion++) {
1001 +               stats->collisions++;
1002 +               goto tx_error;
1003 +       }
1004 +
1005 +       /* Need valid non-ipv4_is_multicast daddr.  */
1006 +       if (tiph->daddr == 0 || ipv4_is_multicast(tiph->daddr))
1007 +               goto tx_error;
1008 +
1009 +       tos = tiph->tos;
1010 +       if (tos&1) {
1011 +               if (skb->protocol == htons(ETH_P_IP))
1012 +                       tos = old_iph->tos;
1013 +               tos &= ~1;
1014 +       }
1015 +#ifdef GRE_DEBUG
1016 +       printk(KERN_ALERT "gre.c:991 Passed tos assignment.\n");
1017 +#endif
1018 +
1019 +
1020 +       {
1021 +               struct flowi fl = { //.fl_net = &init_net,
1022 +                                   .oif = tunnel->parms.link,
1023 +                                   .nl_u = { .ip4_u =
1024 +                                             { .daddr = tiph->daddr,
1025 +                                               .saddr = tiph->saddr,
1026 +                                               .tos = RT_TOS(tos) } },
1027 +                                   .proto = IPPROTO_GRE };
1028 +               if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
1029 +                       stats->tx_carrier_errors++;
1030 +                       goto tx_error_icmp;
1031 +               }
1032 +       }
1033 +       tdev = rt->u.dst.dev;
1034 +#ifdef GRE_DEBUG
1035 +       printk(KERN_ALERT "gre.c:1006 Passed the route retrieval\n");
1036 +#endif
1037 +       if (tdev == dev) {
1038 +               ip_rt_put(rt);
1039 +               stats->collisions++;
1040 +               goto tx_error;
1041 +       }
1042 +#ifdef GRE_DEBUG
1043 +       printk(KERN_ALERT "gre.c:1018 Passed tdev collision check.\n");
1044 +#endif
1045 +
1046 +       /* Check MTU stuff if kernel panic */
1047 +       df = tiph->frag_off;
1048 +       if (df)
1049 +               mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
1050 +       else
1051 +               mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
1052 +/*
1053 +       if (skb->dst)
1054 +               skb->dst->ops->update_pmtu(skb->dst, mtu);
1055 +        XXX */
1056 +#ifdef GRE_DEBUG
1057 +       printk(KERN_ALERT "gre.c:1032 Passed the pmtu setting.\n");
1058 +#endif
1059 +
1060 +       if (skb->protocol == htons(ETH_P_IP)) {
1061 +               df |= (old_iph->frag_off&htons(IP_DF));
1062 +
1063 +               if ((old_iph->frag_off & htons(IP_DF)) &&
1064 +                   mtu < ntohs(old_iph->tot_len)) {
1065 +                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1066 +                       ip_rt_put(rt);
1067 +                       goto tx_error;
1068 +               }
1069 +       }
1070 +#ifdef CONFIG_IPV6
1071 +       else if (skb->protocol == htons(ETH_P_IPV6)) {
1072 +               struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
1073 +
1074 +               if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
1075 +                       if (tiph->daddr || rt6->rt6i_dst.plen == 128) {
1076 +                               rt6->rt6i_flags |= RTF_MODIFIED;
1077 +                               skb->dst->metrics[RTAX_MTU-1] = mtu;
1078 +                       }
1079 +               }
1080 +
1081 +               /* @@@ Is this correct?  */
1082 +               if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
1083 +                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1084 +                       ip_rt_put(rt);
1085 +                       goto tx_error;
1086 +               }
1087 +       }
1088 +#endif
1089 +#ifdef GRE_DEBUG
1090 +       printk(KERN_ALERT "gre.c:1065 Passed the fragmentation check.\n");
1091 +#endif
1092 +
1093 +       if (tunnel->err_count > 0) {
1094 +               if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
1095 +                       tunnel->err_count--;
1096 +                       dst_link_failure(skb);
1097 +               } else
1098 +                       tunnel->err_count = 0;
1099 +       }
1100 +
1101 +       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
1102 +
1103 +       if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
1104 +               struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
1105 +               if (!new_skb) {
1106 +                       ip_rt_put(rt);
1107 +                       stats->tx_dropped++;
1108 +                       dev_kfree_skb(skb);
1109 +                       tunnel->recursion--;
1110 +                       return 0;
1111 +               }
1112 +               if (skb->sk)
1113 +                       skb_set_owner_w(new_skb, skb->sk);
1114 +               dev_kfree_skb(skb);
1115 +               skb = new_skb;
1116 +               old_iph = ip_hdr(skb);
1117 +       }
1118 +#ifdef GRE_DEBUG
1119 +       printk(KERN_ALERT "gre.c:1094 Passed the headroom calculation\n");
1120 +#endif
1121 +
1122 +
1123 +       skb->transport_header = skb->mac_header; // Added by valas
1124 +       skb_push(skb, gre_hlen);
1125 +       skb_reset_network_header(skb);
1126 +       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1127 +       dst_release(skb->dst);
1128 +       skb->dst = &rt->u.dst;
1129 +
1130 +       /*
1131 +        *      Push down and install the etherip header.
1132 +        */
1133 +
1134 +       iph                     =       ip_hdr(skb);
1135 +       iph->version            =       4;
1136 +       iph->ihl                =       sizeof(struct iphdr) >> 2;
1137 +       iph->frag_off           =       df;
1138 +       iph->protocol           =       IPPROTO_GRE;
1139 +       iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
1140 +       iph->daddr              =       rt->rt_dst;
1141 +       iph->saddr              =       rt->rt_src;
1142 +
1143 +/*     ethiph->version         =       htons(ETHERIP_VERSION); */
1144 +#ifdef GRE_DEBUG
1145 +       printk(KERN_ALERT "gre.c:1121 Passed outer IP header construction.\n");
1146 +#endif
1147 +
1148 +       if ((iph->ttl = tiph->ttl) == 0) {
1149 +               if (skb->protocol == htons(ETH_P_IP))
1150 +                       iph->ttl = old_iph->ttl;
1151 +#ifdef CONFIG_IPV6
1152 +               else if (skb->protocol == htons(ETH_P_IPV6))
1153 +                       iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
1154 +#endif
1155 +               else
1156 +                       iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
1157 +       }
1158 +#ifdef GRE_DEBUG
1159 +       printk(KERN_ALERT "gre.c:1006 Passed the TTL check.\n");
1160 +#endif
1161 +
1162 +       ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
1163 +       ((__be16*)(iph+1))[1] = htons(tunnel->parms.proto_type);
1164 +
1165 +       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
1166 +               __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
1167 +
1168 +               if (tunnel->parms.o_flags&GRE_SEQ) {
1169 +                       ++tunnel->o_seqno;
1170 +                       *ptr = htonl(tunnel->o_seqno);
1171 +                       ptr--;
1172 +               }
1173 +               if (tunnel->parms.o_flags&GRE_KEY) {
1174 +                       *ptr = tunnel->parms.o_key;
1175 +                       ptr--;
1176 +               }
1177 +               if (tunnel->parms.o_flags&GRE_CSUM) {
1178 +                       *ptr = 0;
1179 +                       *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
1180 +               }
1181 +       }
1182 +#ifdef GRE_DEBUG
1183 +       printk(KERN_ALERT "gre.c:1006 Passed the tunnel transmit.\n");
1184 +#endif
1185 +
1186 +       nf_reset(skb);
1187 +
1188 +       IPTUNNEL_XMIT();
1189 +       tunnel->recursion--;
1190 +       return 0;
1191 +
1192 +tx_error_icmp:
1193 +       dst_link_failure(skb);
1194 +
1195 +tx_error:
1196 +       stats->tx_errors++;
1197 +       dev_kfree_skb(skb);
1198 +       tunnel->recursion--;
1199 +       return 0;
1200 +}
1201 +
1202 +
1203 +static int
1204 +ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1205 +{
1206 +       int err = 0;
1207 +       struct ip_tunnel_parm p;
1208 +       struct ip_tunnel *t;
1209 +
1210 +        printk(KERN_ALERT "1174 GRE: entering gre ioctl. command is: %d\n", cmd);
1211 +
1212 +       switch (cmd) {
1213 +       case SIOCGETTUNNEL:
1214 +               t = NULL;
1215 +               if (dev == ipgre_fb_tunnel_dev) {
1216 +                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1217 +                               err = -EFAULT;
1218 +                               break;
1219 +                       }
1220 +                       t = ipgre_tunnel_locate(&p, 0);
1221 +               }
1222 +               if (t == NULL)
1223 +                       t = netdev_priv(dev);
1224 +               memcpy(&p, &t->parms, sizeof(p));
1225 +               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1226 +                       err = -EFAULT;
1227 +               break;
1228 +
1229 +       case SIOCADDTUNNEL:
1230 +       case SIOCCHGTUNNEL:
1231 +               err = -EPERM;
1232 +               if (!capable(CAP_NET_ADMIN))
1233 +                       goto done;
1234 +
1235 +               err = -EFAULT;
1236 +               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1237 +                       goto done;
1238 +
1239 +               err = -EINVAL;
1240 +               if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1241 +                   p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1242 +                   ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1243 +                       goto done;
1244 +               if (p.iph.ttl)
1245 +                       p.iph.frag_off |= htons(IP_DF);
1246 +
1247 +               if (!(p.i_flags&GRE_KEY))
1248 +                       p.i_key = 0;
1249 +               if (!(p.o_flags&GRE_KEY))
1250 +                       p.o_key = 0;
1251 +
1252 +               t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
1253 +               if (t) printk(KERN_ALERT "1174 GRE: proto %s %d\n", p.name, p.proto_type);
1254 +               if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1255 +                       if (t != NULL) {
1256 +                               if (t->dev != dev) {
1257 +                                       err = -EEXIST;
1258 +                                       break;
1259 +                               }
1260 +                       } else {
1261 +                               unsigned nflags=0;
1262 +
1263 +                               t = netdev_priv(dev);
1264 +
1265 +                               if (ipv4_is_multicast(p.iph.daddr))
1266 +                                       nflags = IFF_BROADCAST;
1267 +                               else if (p.iph.daddr)
1268 +                                       nflags = IFF_POINTOPOINT;
1269 +                               
1270 +                               /* XXX:Set back IFF_BROADCAST if
1271 +                                * transporting ethernet */
1272 +                               printk(KERN_ALERT "1193 GRE: proto %s %d\n", p.name, p.proto_type);
1273 +                               if (p.proto_type == ETH_P_ETH)
1274 +                                       nflags = IFF_BROADCAST;
1275 +
1276 +                               if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1277 +                                       err = -EINVAL;
1278 +                                       break;
1279 +                               }
1280 +                               ipgre_tunnel_unlink(t);
1281 +                               t->parms.iph.saddr = p.iph.saddr;
1282 +                               t->parms.iph.daddr = p.iph.daddr;
1283 +                               t->parms.i_key = p.i_key;
1284 +                               t->parms.o_key = p.o_key;
1285 +                               /* XXX:Copy in the protocol field */
1286 +                               t->parms.proto_type = p.proto_type;
1287 +                               if (t->parms.proto_type != ETH_P_ETH)
1288 +                               {
1289 +                                       memcpy(dev->dev_addr, &p.iph.saddr, 4);
1290 +                                       memcpy(dev->broadcast, &p.iph.daddr, 4);
1291 +                               }
1292 +                               ipgre_tunnel_link(t);
1293 +                               netdev_state_change(dev);
1294 +                       }
1295 +               }
1296 +
1297 +               if (t) {
1298 +                       err = 0;
1299 +                       if (cmd == SIOCCHGTUNNEL) {
1300 +                               t->parms.iph.ttl = p.iph.ttl;
1301 +                               t->parms.iph.tos = p.iph.tos;
1302 +                               t->parms.iph.frag_off = p.iph.frag_off;
1303 +                       }
1304 +                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1305 +                               err = -EFAULT;
1306 +               } else
1307 +                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1308 +               break;
1309 +
1310 +       case SIOCDELTUNNEL:
1311 +               err = -EPERM;
1312 +               if (!capable(CAP_NET_ADMIN))
1313 +                       goto done;
1314 +
1315 +               if (dev == ipgre_fb_tunnel_dev) {
1316 +                       err = -EFAULT;
1317 +                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1318 +                               goto done;
1319 +                       err = -ENOENT;
1320 +                       if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
1321 +                               goto done;
1322 +                       err = -EPERM;
1323 +                       if (t == netdev_priv(ipgre_fb_tunnel_dev))
1324 +                               goto done;
1325 +                       dev = t->dev;
1326 +               }
1327 +               unregister_netdevice(dev); // added by Valas
1328 +               break;
1329 +
1330 +       default:
1331 +               err = -EINVAL;
1332 +       }
1333 +
1334 +done:
1335 +       return err;
1336 +}
1337 +
1338 +static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1339 +{
1340 +       return &(((struct ip_tunnel*)netdev_priv(dev))->dev->stats);
1341 +}
1342 +
1343 +static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1344 +{
1345 +       struct ip_tunnel *tunnel = netdev_priv(dev);
1346 +       if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
1347 +               return -EINVAL;
1348 +       dev->mtu = new_mtu;
1349 +       return 0;
1350 +}
1351 +
1352 +#ifdef CONFIG_NET_IPGRE_BROADCAST
1353 +/* Nice toy. Unfortunately, useless in real life :-)
1354 +   It allows to construct virtual multiprotocol broadcast "LAN"
1355 +   over the Internet, provided ipv4_is_multicast routing is tuned.
1356 +
1357 +
1358 +   I have no idea was this bicycle invented before me,
1359 +   so that I had to set ARPHRD_IPGRE to a random value.
1360 +   I have an impression, that Cisco could make something similar,
1361 +   but this feature is apparently missing in IOS<=11.2(8).
1362 +
1363 +   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1364 +   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1365 +
1366 +   ping -t 255 224.66.66.66
1367 +
1368 +   If nobody answers, mbone does not work.
1369 +
1370 +   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1371 +   ip addr add 10.66.66.<somewhat>/24 dev Universe
1372 +   ifconfig Universe up
1373 +   ifconfig Universe add fe80::<Your_real_addr>/10
1374 +   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1375 +   ftp 10.66.66.66
1376 +   ...
1377 +   ftp fec0:6666:6666::193.233.7.65
1378 +   ...
1379 +
1380 + */
1381 +
1382 +static int ipgre_open(struct net_device *dev)
1383 +{
1384 +       struct ip_tunnel *t = netdev_priv(dev);
1385 +
1386 +       if (ipv4_is_multicast(t->parms.iph.daddr)) {
1387 +               struct flowi fl = { //.fl_net = &init_net,
1388 +                                   .oif = t->parms.link,
1389 +                                   .nl_u = { .ip4_u =
1390 +                                             { .daddr = t->parms.iph.daddr,
1391 +                                               .saddr = t->parms.iph.saddr,
1392 +                                               .tos = RT_TOS(t->parms.iph.tos) } },
1393 +                                   .proto = IPPROTO_GRE };
1394 +               struct rtable *rt;
1395 +               if (ip_route_output_key(dev_net(dev),&rt, &fl))
1396 +                       return -EADDRNOTAVAIL;
1397 +               dev = rt->u.dst.dev;
1398 +               ip_rt_put(rt);
1399 +               if (__in_dev_get_rtnl(dev) == NULL)
1400 +                       return -EADDRNOTAVAIL;
1401 +               t->mlink = dev->ifindex;
1402 +               ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1403 +       }
1404 +       return 0;
1405 +}
1406 +
1407 +static int ipgre_close(struct net_device *dev)
1408 +{
1409 +       struct ip_tunnel *t = netdev_priv(dev);
1410 +       if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1411 +               struct in_device *in_dev = inetdev_by_index(&init_net, t->mlink);
1412 +               if (in_dev) {
1413 +                       ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1414 +                       in_dev_put(in_dev);
1415 +               }
1416 +       }
1417 +       return 0;
1418 +}
1419 +
1420 +#endif
1421 +
1422 +static void ipgre_ip_tunnel_setup(struct net_device *dev)
1423 +{
1424 +       //SET_MODULE_OWNER(dev);
1425 +       dev->uninit             = ipgre_tunnel_uninit;
1426 +       dev->destructor         = free_netdev;
1427 +       dev->hard_start_xmit    = ipgre_ip_tunnel_xmit;
1428 +       dev->get_stats          = ipgre_tunnel_get_stats;
1429 +       dev->do_ioctl           = ipgre_tunnel_ioctl;
1430 +       dev->change_mtu         = ipgre_tunnel_change_mtu;
1431 +
1432 +       dev->type               = ARPHRD_IPGRE;
1433 +       dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1434 +       dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1435 +       dev->flags              = IFF_NOARP;
1436 +       dev->iflink             = 0;
1437 +       dev->addr_len           = 4;
1438 +}
1439 +
1440 +/* Tunnel setup for ipgre_eth */
1441 +static void ipgre_eth_tunnel_setup(struct net_device *dev)
1442 +{
1443 +       //SET_MODULE_OWNER(dev);
1444 +       ether_setup(dev);
1445 +
1446 +       dev->uninit             = ipgre_tunnel_uninit;
1447 +       dev->destructor         = free_netdev;
1448 +       dev->hard_start_xmit    = ipgre_eth_tunnel_xmit;
1449 +       dev->get_stats          = ipgre_tunnel_get_stats;
1450 +       dev->do_ioctl           = ipgre_tunnel_ioctl;
1451 +       dev->change_mtu         = ipgre_tunnel_change_mtu;
1452 +
1453 +       dev->hard_header_len    = ETH_HLEN + sizeof(struct iphdr) + 4;
1454 +       dev->tx_queue_len       = 0;
1455 +       random_ether_addr(dev->dev_addr);
1456 +
1457 +#ifdef GRE_DEBUG
1458 +       unsigned char* d = dev->dev_addr;
1459 +       printk(KERN_ALERT "Here is the address we got:%x%x%x%x%x%x\n",d[0],d[1],d[2],d[3],d[4],d[5]);
1460 +#endif 
1461 +
1462 +       dev->iflink             = 0;
1463 +}
1464 +
1465 +static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1466 +            unsigned short type,
1467 +            const void *daddr, const void *saddr, unsigned len)
1468 +{
1469 +    struct ip_tunnel *t = netdev_priv(dev);
1470 +    struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1471 +    __be16 *p = (__be16*)(iph+1);
1472 +
1473 +    memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1474 +    p[0]        = t->parms.o_flags;
1475 +    p[1]        = htons(type);
1476 +
1477 +    /*
1478 +     *  Set the source hardware address.
1479 +     */
1480 +
1481 +    if (saddr)
1482 +        memcpy(&iph->saddr, saddr, 4);
1483 +
1484 +    if (daddr) {
1485 +        memcpy(&iph->daddr, daddr, 4);
1486 +        return t->hlen;
1487 +    }
1488 +    if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1489 +        return t->hlen;
1490 +
1491 +    return -t->hlen;
1492 +}
1493 +
1494 +static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1495 +{
1496 +    struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
1497 +    memcpy(haddr, &iph->saddr, 4);
1498 +    return 4;
1499 +}
1500 +
1501 +static const struct header_ops ipgre_header_ops = {
1502 +    .create = ipgre_header,
1503 +    .parse  = ipgre_header_parse,
1504 +};
1505 +
1506 +static int ipgre_tunnel_init(struct net_device *dev)
1507 +{
1508 +       struct net_device *tdev = NULL;
1509 +       struct ip_tunnel *tunnel;
1510 +       struct iphdr *iph;
1511 +       int hlen = LL_MAX_HEADER;
1512 +       int mtu = ETH_DATA_LEN;
1513 +       int addend = sizeof(struct iphdr) + 4;
1514 +
1515 +       tunnel = netdev_priv(dev);
1516 +       iph = &tunnel->parms.iph;
1517 +
1518 +       tunnel->dev = dev;
1519 +       strcpy(tunnel->parms.name, dev->name);
1520 +
1521 +       if (tunnel->parms.proto_type != ETH_P_ETH)
1522 +       {
1523 +               memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1524 +               memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1525 +       }
1526 +
1527 +       /* Guess output device to choose reasonable mtu and hard_header_len */
1528 +
1529 +       if (iph->daddr) {
1530 +               struct flowi fl = { //.fl_net = &init_net,
1531 +                                   .oif = tunnel->parms.link,
1532 +                                   .nl_u = { .ip4_u =
1533 +                                             { .daddr = iph->daddr,
1534 +                                               .saddr = iph->saddr,
1535 +                                               .tos = RT_TOS(iph->tos) } },
1536 +                                   .proto = IPPROTO_GRE };
1537 +               struct rtable *rt;
1538 +               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
1539 +                       tdev = rt->u.dst.dev;
1540 +                       ip_rt_put(rt);
1541 +               }
1542 +
1543 +               if (tunnel->parms.proto_type == ETH_P_ETH)
1544 +               {
1545 +                   dev->flags |= IFF_BROADCAST;
1546 +               }
1547 +               else
1548 +               {
1549 +                       dev->flags |= IFF_POINTOPOINT;
1550 +               }
1551 +
1552 +#ifdef CONFIG_NET_IPGRE_BROADCAST
1553 +               if (ipv4_is_multicast(iph->daddr)) {
1554 +                       if (!iph->saddr)
1555 +                               return -EINVAL;
1556 +                       dev->flags = IFF_BROADCAST;
1557 +                       dev->header_ops = &ipgre_header_ops;
1558 +                       dev->open = ipgre_open;
1559 +                       dev->stop = ipgre_close;
1560 +               }
1561 +#endif
1562 +       }
1563 +
1564 +       if (!tdev && tunnel->parms.link)
1565 +               tdev = __dev_get_by_index(&init_net, tunnel->parms.link);
1566 +
1567 +       if (tdev) {
1568 +               hlen = tdev->hard_header_len;
1569 +               mtu = tdev->mtu;
1570 +       }
1571 +       dev->iflink = tunnel->parms.link;
1572 +
1573 +       /* Precalculate GRE options length */
1574 +       if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1575 +               if (tunnel->parms.o_flags&GRE_CSUM)
1576 +                       addend += 4;
1577 +               if (tunnel->parms.o_flags&GRE_KEY)
1578 +                       addend += 4;
1579 +               if (tunnel->parms.o_flags&GRE_SEQ)
1580 +                       addend += 4;
1581 +       }
1582 +       dev->hard_header_len = hlen + addend;
1583 +       dev->mtu = mtu - addend;
1584 +       tunnel->hlen = addend;
1585 +       return 0;
1586 +}
1587 +
1588 +static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1589 +{
1590 +       struct ip_tunnel *tunnel = netdev_priv(dev);
1591 +       struct iphdr *iph = &tunnel->parms.iph;
1592 +
1593 +       tunnel->dev = dev;
1594 +       strcpy(tunnel->parms.name, dev->name);
1595 +
1596 +       iph->version            = 4;
1597 +       iph->protocol           = IPPROTO_GRE;
1598 +       iph->ihl                = 5;
1599 +       tunnel->hlen            = sizeof(struct iphdr) + 4;
1600 +
1601 +       dev_hold(dev);
1602 +       tunnels_wc[0]           = tunnel;
1603 +       return 0;
1604 +}
1605 +
1606 +
1607 +static struct net_protocol ipgre_protocol = {
1608 +       .handler        =       ipgre_rcv,
1609 +       .err_handler    =       ipgre_err,
1610 +};
1611 +
1612 +
1613 +/*
1614 + *     And now the modules code and kernel interface.
1615 + */
1616 +
1617 +static int __init ipgre_init(void)
1618 +{
1619 +       int err;
1620 +
1621 +       printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1622 +
1623 +       if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1624 +               printk(KERN_INFO "ipgre init: can't add protocol\n");
1625 +               return -EAGAIN;
1626 +       }
1627 +
1628 +       ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1629 +                                          ipgre_ip_tunnel_setup);
1630 +       if (!ipgre_fb_tunnel_dev) {
1631 +               err = -ENOMEM;
1632 +               goto err1;
1633 +       }
1634 +
1635 +       ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1636 +
1637 +       if ((err = register_netdev(ipgre_fb_tunnel_dev)))
1638 +               goto err2;
1639 +out:
1640 +       return err;
1641 +err2:
1642 +       free_netdev(ipgre_fb_tunnel_dev);
1643 +err1:
1644 +       inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1645 +       goto out;
1646 +}
1647 +
1648 +static void __exit ipgre_destroy_tunnels(void)
1649 +{
1650 +       int prio;
1651 +
1652 +       for (prio = 0; prio < 4; prio++) {
1653 +               int h;
1654 +               for (h = 0; h < HASH_SIZE; h++) {
1655 +                       struct ip_tunnel *t;
1656 +                       while ((t = tunnels[prio][h]) != NULL)
1657 +                               unregister_netdevice(t->dev);
1658 +               }
1659 +       }
1660 +}
1661 +
1662 +static void __exit ipgre_fini(void)
1663 +{
1664 +       if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1665 +               printk(KERN_INFO "ipgre close: can't remove protocol\n");
1666 +
1667 +       rtnl_lock();
1668 +       ipgre_destroy_tunnels();
1669 +       rtnl_unlock();
1670 +}
1671 +
1672 +module_init(ipgre_init);
1673 +module_exit(ipgre_fini);
1674 +MODULE_LICENSE("GPL");
1675 diff -Nurb linux-2.6.27-660/include/linux/if_ether.h linux-2.6.27-700/include/linux/if_ether.h
1676 --- linux-2.6.27-660/include/linux/if_ether.h   2008-10-09 18:13:53.000000000 -0400
1677 +++ linux-2.6.27-700/include/linux/if_ether.h   2009-04-16 10:27:39.000000000 -0400
1678 @@ -56,6 +56,7 @@
1679  #define ETH_P_DIAG      0x6005          /* DEC Diagnostics              */
1680  #define ETH_P_CUST      0x6006          /* DEC Customer use             */
1681  #define ETH_P_SCA       0x6007          /* DEC Systems Comms Arch       */
1682 +#define ETH_P_ETH       0x6558          /* Ethernet in Ethernet         */
1683  #define ETH_P_RARP      0x8035         /* Reverse Addr Res packet      */
1684  #define ETH_P_ATALK    0x809B          /* Appletalk DDP                */
1685  #define ETH_P_AARP     0x80F3          /* Appletalk AARP               */
1686 diff -Nurb linux-2.6.27-660/include/linux/if_tunnel.h linux-2.6.27-700/include/linux/if_tunnel.h
1687 --- linux-2.6.27-660/include/linux/if_tunnel.h  2008-10-09 18:13:53.000000000 -0400
1688 +++ linux-2.6.27-700/include/linux/if_tunnel.h  2009-04-16 10:27:39.000000000 -0400
1689 @@ -29,6 +29,7 @@
1690         __be16                  o_flags;
1691         __be32                  i_key;
1692         __be32                  o_key;
1693 +        __be16                  proto_type;   /*Added*/
1694         struct iphdr            iph;
1695  };
1696  
1697 diff -Nurb linux-2.6.27-660/net/ipv4/ip_gre.c linux-2.6.27-700/net/ipv4/ip_gre.c
1698 --- linux-2.6.27-660/net/ipv4/ip_gre.c  2008-10-09 18:13:53.000000000 -0400
1699 +++ linux-2.6.27-700/net/ipv4/ip_gre.c  2009-04-16 12:48:33.000000000 -0400
1700 @@ -25,6 +25,7 @@
1701  #include <linux/init.h>
1702  #include <linux/in6.h>
1703  #include <linux/inetdevice.h>
1704 +#include <linux/etherdevice.h>   /**XXX added XXX */
1705  #include <linux/igmp.h>
1706  #include <linux/netfilter_ipv4.h>
1707  #include <linux/if_ether.h>
1708 @@ -48,6 +49,10 @@
1709  #include <net/ip6_route.h>
1710  #endif
1711  
1712 +#define MULTICAST(x)    (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
1713 +
1714 +//#define GRE_DEBUG 1
1715 +
1716  /*
1717     Problems & solutions
1718     --------------------
1719 @@ -118,7 +123,8 @@
1720   */
1721  
1722  static int ipgre_tunnel_init(struct net_device *dev);
1723 -static void ipgre_tunnel_setup(struct net_device *dev);
1724 +static void ipgre_ip_tunnel_setup(struct net_device *dev);
1725 +static void ipgre_eth_tunnel_setup(struct net_device *dev);
1726  
1727  /* Fallback tunnel: no source, no destination, no key, no options */
1728  
1729 @@ -255,6 +261,7 @@
1730         __be32 remote = parms->iph.daddr;
1731         __be32 local = parms->iph.saddr;
1732         __be32 key = parms->i_key;
1733 +       __be16 proto = parms->proto_type;
1734         struct ip_tunnel *t, **tp, *nt;
1735         struct net_device *dev;
1736         char name[IFNAMSIZ];
1737 @@ -269,12 +276,28 @@
1738         if (!create)
1739                 return NULL;
1740  
1741 +       printk(KERN_CRIT "Adding tunnel %s with key %d\n", parms->name, ntohl(key));
1742 +
1743         if (parms->name[0])
1744                 strlcpy(name, parms->name, IFNAMSIZ);
1745         else
1746                 sprintf(name, "gre%%d");
1747  
1748 -       dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
1749 +
1750 +       /* Tunnel creation: check payload type and call appropriate
1751 +        * function */
1752 +       switch (proto)
1753 +       {
1754 +           case ETH_P_IP:
1755 +               dev = alloc_netdev(sizeof(*t), name, ipgre_ip_tunnel_setup);
1756 +               break;
1757 +           case ETH_P_ETH:
1758 +               dev = alloc_netdev(sizeof(*t), name, ipgre_eth_tunnel_setup);
1759 +               break;
1760 +           default:
1761 +               return NULL;
1762 +       }
1763 +
1764         if (!dev)
1765           return NULL;
1766  
1767 @@ -431,6 +454,7 @@
1768         u32    seqno = 0;
1769         struct ip_tunnel *tunnel;
1770         int    offset = 4;
1771 +    __be16 proto;
1772  
1773         if (!pskb_may_pull(skb, 16))
1774                 goto drop_nolock;
1775 @@ -439,6 +463,11 @@
1776         h = skb->data;
1777         flags = *(__be16*)h;
1778  
1779 +#ifdef GRE_DEBUG
1780 +       printk(KERN_DEBUG "gre.c [601] src:%x dst:%x  proto:%d %p", iph->saddr, iph->daddr, iph->protocol, skb->data);
1781 +#endif 
1782 +       proto = ntohs(*(__be16*)(h+2)); /* XXX added XXX */
1783 +       
1784         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
1785                 /* - Version must be 0.
1786                    - We do not support routing headers.
1787 @@ -493,7 +522,29 @@
1788                 __pskb_pull(skb, offset);
1789                 skb_reset_network_header(skb);
1790                 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
1791 +               if(proto == ETH_P_ETH)
1792 +                 {
1793 + #ifdef GRE_DEBUG
1794 +                   unsigned char* tmp_hdr = skb->data;
1795 +                   printk(KERN_DEBUG "gre.c [658] %x %x %x %x %x %x\tskb %p\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
1796 + #endif                    
1797 +                   skb->protocol = eth_type_trans(skb, tunnel->dev);
1798
1799 +                   /* XXX added these lines to make arp work? XXX */
1800 +                   /*skb->mac.raw = skb->data;*/
1801 +                   skb->network_header = skb->network_header + ETH_HLEN;
1802 +                   /* XXX added these lines to make arp work? XXX */
1803
1804 + #ifdef GRE_DEBUG
1805 +                   tmp_hdr = skb->data;
1806 +                   printk(KERN_DEBUG "gre.c [669] %x %x %x %x %x %x\tskb %p\n", tmp_hdr[0], tmp_hdr[1], tmp_hdr[2], tmp_hdr[3], tmp_hdr[4], tmp_hdr[5], skb->data);
1807 +                   printk(KERN_ALERT "gre.c [671] received ethernet on gre %x\n",skb->protocol); 
1808 + #endif
1809 +                   memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1810 +                 }
1811 +               else
1812                 skb->pkt_type = PACKET_HOST;
1813 +
1814  #ifdef CONFIG_NET_IPGRE_BROADCAST
1815                 if (ipv4_is_multicast(iph->daddr)) {
1816                         /* Looped back packet, drop it! */
1817 @@ -539,7 +590,7 @@
1818         return(0);
1819  }
1820  
1821 -static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1822 +static int ipgre_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1823  {
1824         struct ip_tunnel *tunnel = netdev_priv(dev);
1825         struct net_device_stats *stats = &tunnel->dev->stats;
1826 @@ -799,9 +850,17 @@
1827                         tdev = rt->u.dst.dev;
1828                         ip_rt_put(rt);
1829                 }
1830 +               if (tunnel->parms.proto_type == ETH_P_ETH)
1831 +               {
1832 +                   dev->flags |= IFF_BROADCAST;
1833 +               }
1834 +               else
1835 +               {
1836                 dev->flags |= IFF_POINTOPOINT;
1837         }
1838  
1839 +       }
1840 +
1841         if (!tdev && tunnel->parms.link)
1842                 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1843  
1844 @@ -822,10 +881,234 @@
1845         }
1846         dev->hard_header_len = hlen + addend;
1847         dev->mtu = mtu - addend;
1848 +       if (tunnel->parms.proto_type == ETH_P_ETH)
1849 +               dev->mtu -= ETH_HLEN;
1850         tunnel->hlen = addend;
1851  
1852  }
1853  
1854 +static int ipgre_eth_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1855 +{
1856 +       struct ip_tunnel *tunnel = netdev_priv(dev);
1857 +       struct net_device_stats *stats = &tunnel->dev->stats;
1858 +       struct iphdr *old_iph = ip_hdr(skb);
1859 +       struct iphdr *tiph = &tunnel->parms.iph;
1860 +       u8     tos;
1861 +       __be16 df;
1862 +       struct rtable *rt;              /* Route to the other host */
1863 +       struct net_device *tdev;        /* Device to other host */
1864 +       int    gre_hlen = tunnel->hlen; /* XXX changed XXX*/
1865 +       //struct etheriphdr  *ethiph;
1866 +       struct iphdr  *iph;             /* Our new IP header */
1867 +       int    max_headroom;            /* The extra header space needed */
1868 +       int    mtu;
1869 +
1870 +#ifdef GRE_DEBUG
1871 +       printk(KERN_ALERT "gre.c:972 Starting xmit\n");
1872 +#endif
1873 +
1874 +       if (tunnel->recursion++) {
1875 +               stats->collisions++;
1876 +               goto tx_error;
1877 +       }
1878 +
1879 +       /* Need valid non-multicast daddr.  */
1880 +       if (tiph->daddr == 0 || MULTICAST(tiph->daddr))
1881 +               goto tx_error;
1882 +
1883 +       tos = tiph->tos;
1884 +       if (tos&1) {
1885 +               if (skb->protocol == htons(ETH_P_IP))
1886 +                       tos = old_iph->tos;
1887 +               tos &= ~1;
1888 +       }
1889 +#ifdef GRE_DEBUG
1890 +       printk(KERN_ALERT "gre.c:991 Passed tos assignment.\n");
1891 +#endif
1892 +
1893 +
1894 +       {
1895 +               struct flowi fl = { //.fl_net = &init_net,
1896 +                                   .oif = tunnel->parms.link,
1897 +                                   .nl_u = { .ip4_u =
1898 +                                             { .daddr = tiph->daddr,
1899 +                                               .saddr = tiph->saddr,
1900 +                                               .tos = RT_TOS(tos) } },
1901 +                                   .proto = IPPROTO_GRE };
1902 +               if (ip_route_output_key(dev_net(dev),&rt, &fl)) {
1903 +                       stats->tx_carrier_errors++;
1904 +                       goto tx_error_icmp;
1905 +               }
1906 +       }
1907 +       tdev = rt->u.dst.dev;
1908 +#ifdef GRE_DEBUG
1909 +       printk(KERN_ALERT "gre.c:1006 Passed the route retrieval\n");
1910 +#endif
1911 +       if (tdev == dev) {
1912 +               ip_rt_put(rt);
1913 +               stats->collisions++;
1914 +               goto tx_error;
1915 +       }
1916 +#ifdef GRE_DEBUG
1917 +       printk(KERN_ALERT "gre.c:1018 Passed tdev collision check.\n");
1918 +#endif
1919 +
1920 +       /* Check MTU stuff if kernel panic */
1921 +       df = tiph->frag_off;
1922 +       if (df)
1923 +               mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
1924 +       else
1925 +               mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
1926 +/*
1927 +       if (skb->dst)
1928 +               skb->dst->ops->update_pmtu(skb->dst, mtu);
1929 +        XXX */
1930 +#ifdef GRE_DEBUG
1931 +       printk(KERN_ALERT "gre.c:1032 Passed the pmtu setting.\n");
1932 +#endif
1933 +
1934 +       if (skb->protocol == htons(ETH_P_IP)) {
1935 +               df |= (old_iph->frag_off&htons(IP_DF));
1936 +
1937 +               if ((old_iph->frag_off & htons(IP_DF)) &&
1938 +                   mtu < ntohs(old_iph->tot_len)) {
1939 +                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1940 +                       ip_rt_put(rt);
1941 +                       goto tx_error;
1942 +               }
1943 +       }
1944 +#ifdef CONFIG_IPV6
1945 +       else if (skb->protocol == htons(ETH_P_IPV6)) {
1946 +               struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
1947 +
1948 +               if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
1949 +                       if (tiph->daddr || rt6->rt6i_dst.plen == 128) {
1950 +                               rt6->rt6i_flags |= RTF_MODIFIED;
1951 +                               skb->dst->metrics[RTAX_MTU-1] = mtu;
1952 +                       }
1953 +               }
1954 +
1955 +               /* @@@ Is this correct?  */
1956 +               if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
1957 +                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1958 +                       ip_rt_put(rt);
1959 +                       goto tx_error;
1960 +               }
1961 +       }
1962 +#endif
1963 +#ifdef GRE_DEBUG
1964 +       printk(KERN_ALERT "gre.c:1065 Passed the fragmentation check.\n");
1965 +#endif
1966 +
1967 +       if (tunnel->err_count > 0) {
1968 +               if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
1969 +                       tunnel->err_count--;
1970 +                       dst_link_failure(skb);
1971 +               } else
1972 +                       tunnel->err_count = 0;
1973 +       }
1974 +
1975 +       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
1976 +
1977 +       if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
1978 +               struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
1979 +               if (!new_skb) {
1980 +                       ip_rt_put(rt);
1981 +                       stats->tx_dropped++;
1982 +                       dev_kfree_skb(skb);
1983 +                       tunnel->recursion--;
1984 +                       return 0;
1985 +               }
1986 +               if (skb->sk)
1987 +                       skb_set_owner_w(new_skb, skb->sk);
1988 +               dev_kfree_skb(skb);
1989 +               skb = new_skb;
1990 +               old_iph = ip_hdr(skb);
1991 +       }
1992 +#ifdef GRE_DEBUG
1993 +       printk(KERN_ALERT "gre.c:1094 Passed the headroom calculation\n");
1994 +#endif
1995 +
1996 +       skb->transport_header = skb->data;
1997 +       skb_push(skb, gre_hlen);
1998 +       skb_reset_network_header(skb);
1999 +       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
2000 +       dst_release(skb->dst);
2001 +       skb->dst = &rt->u.dst;
2002 +
2003 +       /*
2004 +        *      Push down and install the etherip header.
2005 +        */
2006 +
2007 +       iph                     =       ip_hdr(skb);
2008 +       iph->version            =       4;
2009 +       iph->ihl                =       sizeof(struct iphdr) >> 2;
2010 +       iph->frag_off           =       df;
2011 +       iph->protocol           =       IPPROTO_GRE;
2012 +       iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
2013 +       iph->daddr              =       rt->rt_dst;
2014 +       iph->saddr              =       rt->rt_src;
2015 +
2016 +/*     ethiph->version         =       htons(ETHERIP_VERSION); */
2017 +#ifdef GRE_DEBUG
2018 +       printk(KERN_ALERT "gre.c:1121 Passed outer IP header construction.\n");
2019 +#endif
2020 +
2021 +       if ((iph->ttl = tiph->ttl) == 0) {
2022 +               if (skb->protocol == htons(ETH_P_IP))
2023 +                       iph->ttl = old_iph->ttl;
2024 +#ifdef CONFIG_IPV6
2025 +               else if (skb->protocol == htons(ETH_P_IPV6))
2026 +                       iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
2027 +#endif
2028 +               else
2029 +                       iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
2030 +       }
2031 +#ifdef GRE_DEBUG
2032 +       printk(KERN_ALERT "gre.c:1006 Passed the TTL check.\n");
2033 +#endif
2034 +
2035 +       ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
2036 +       ((__be16*)(iph+1))[1] = htons(tunnel->parms.proto_type);
2037 +
2038 +       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
2039 +               __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
2040 +
2041 +               if (tunnel->parms.o_flags&GRE_SEQ) {
2042 +                       ++tunnel->o_seqno;
2043 +                       *ptr = htonl(tunnel->o_seqno);
2044 +                       ptr--;
2045 +               }
2046 +               if (tunnel->parms.o_flags&GRE_KEY) {
2047 +                       *ptr = tunnel->parms.o_key;
2048 +                       ptr--;
2049 +               }
2050 +               if (tunnel->parms.o_flags&GRE_CSUM) {
2051 +                       *ptr = 0;
2052 +                       *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
2053 +               }
2054 +       }
2055 +#ifdef GRE_DEBUG
2056 +       printk(KERN_ALERT "gre.c:1006 Passed the tunnel transmit.\n");
2057 +#endif
2058 +
2059 +       nf_reset(skb);
2060 +
2061 +       IPTUNNEL_XMIT();
2062 +       tunnel->recursion--;
2063 +       return 0;
2064 +
2065 +tx_error_icmp:
2066 +       dst_link_failure(skb);
2067 +
2068 +tx_error:
2069 +       stats->tx_errors++;
2070 +       dev_kfree_skb(skb);
2071 +       tunnel->recursion--;
2072 +       return 0;
2073 +}
2074 +
2075 +
2076  static int
2077  ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
2078  {
2079 @@ -876,6 +1159,7 @@
2080                         p.o_key = 0;
2081  
2082                 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
2083 +               if (t) printk(KERN_ALERT "1174 GRE: proto %s %x\n", p.name, p.proto_type);
2084  
2085                 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
2086                         if (t != NULL) {
2087 @@ -893,6 +1177,12 @@
2088                                 else if (p.iph.daddr)
2089                                         nflags = IFF_POINTOPOINT;
2090  
2091 +                               /* XXX:Set back IFF_BROADCAST if
2092 +                                * transporting ethernet */
2093 +                               printk(KERN_ALERT "1193 GRE: proto %s %d\n", p.name, p.proto_type);
2094 +                               if (p.proto_type == ETH_P_ETH)
2095 +                                       nflags = IFF_BROADCAST;
2096 +
2097                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
2098                                         err = -EINVAL;
2099                                         break;
2100 @@ -902,8 +1192,13 @@
2101                                 t->parms.iph.daddr = p.iph.daddr;
2102                                 t->parms.i_key = p.i_key;
2103                                 t->parms.o_key = p.o_key;
2104 +                               /* XXX:Copy in the protocol field */
2105 +                               t->parms.proto_type = p.proto_type;
2106 +                               if (t->parms.proto_type != ETH_P_ETH) {
2107                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
2108                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
2109 +                               }
2110 +
2111                                 ipgre_tunnel_link(ign, t);
2112                                 netdev_state_change(dev);
2113                         }
2114 @@ -1076,13 +1371,13 @@
2115  
2116  #endif
2117  
2118 -static void ipgre_tunnel_setup(struct net_device *dev)
2119 +static void ipgre_ip_tunnel_setup(struct net_device *dev)
2120  {
2121         dev->uninit             = ipgre_tunnel_uninit;
2122         dev->destructor         = free_netdev;
2123 -       dev->hard_start_xmit    = ipgre_tunnel_xmit;
2124         dev->do_ioctl           = ipgre_tunnel_ioctl;
2125         dev->change_mtu         = ipgre_tunnel_change_mtu;
2126 +       dev->hard_start_xmit    = ipgre_ip_tunnel_xmit;
2127  
2128         dev->type               = ARPHRD_IPGRE;
2129         dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
2130 @@ -1093,6 +1388,36 @@
2131         dev->features           |= NETIF_F_NETNS_LOCAL;
2132  }
2133  
2134 +/* Tunnel setup for ipgre_eth */
2135 +static void ipgre_eth_tunnel_setup(struct net_device *dev)
2136 +{
2137 +       //SET_MODULE_OWNER(dev);
2138 +
2139 +       // Set default values for Ethernet device
2140 +       ether_setup(dev);
2141 +
2142 +       dev->uninit             = ipgre_tunnel_uninit;
2143 +       dev->destructor         = free_netdev;
2144 +       dev->hard_start_xmit    = ipgre_eth_tunnel_xmit;
2145 +       //dev->get_stats                = ipgre_tunnel_get_stats;
2146 +       dev->do_ioctl           = ipgre_tunnel_ioctl;
2147 +       dev->change_mtu         = ipgre_tunnel_change_mtu;
2148 +
2149 +       dev->hard_header_len    = LL_MAX_HEADER + ETH_HLEN + sizeof(struct iphdr) + 4;
2150 +       dev->mtu                = ETH_DATA_LEN - ETH_HLEN - sizeof(struct iphdr) - 4;
2151 +       dev->tx_queue_len       = 0;
2152 +       dev->iflink             = 0;
2153 +       dev->features           |= NETIF_F_NETNS_LOCAL;
2154 +
2155 +       random_ether_addr(dev->dev_addr);
2156 +
2157 +#ifdef GRE_DEBUG
2158 +       { unsigned char* d = dev->dev_addr;
2159 +       printk(KERN_ALERT "Here is the address we got:%x%x%x%x%x%x\n",d[0],d[1],d[2],d[3],d[4],d[5]); }
2160 +#endif 
2161 +}
2162 +
2163 +
2164  static int ipgre_tunnel_init(struct net_device *dev)
2165  {
2166         struct ip_tunnel *tunnel;
2167 @@ -1104,8 +1429,10 @@
2168         tunnel->dev = dev;
2169         strcpy(tunnel->parms.name, dev->name);
2170  
2171 +       if (tunnel->parms.proto_type != ETH_P_ETH) {
2172         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
2173         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
2174 +       } 
2175  
2176         ipgre_tunnel_bind_dev(dev);
2177  
2178 @@ -1181,7 +1508,7 @@
2179                 goto err_assign;
2180  
2181         ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
2182 -                                          ipgre_tunnel_setup);
2183 +                                          ipgre_ip_tunnel_setup);
2184         if (!ign->fb_tunnel_dev) {
2185                 err = -ENOMEM;
2186                 goto err_alloc_dev;