dist_modules += ip_gre
build_modules += $(if $(BUILD_GRE),ip_gre)
ip_gre_sources = \
+ linux-2.6/compat-2.6/dev-ip_gre.c \
linux-2.6/compat-2.6/ip_gre.c \
+ linux-2.6/compat-2.6/ip_output-ip_gre.c \
linux-2.6/compat-2.6/net_namespace-ip_gre.c
ip_gre_headers = \
linux-2.6/compat-2.6/compat26.h \
linux-2.6/compat-2.6/include/linux/tcp.h \
linux-2.6/compat-2.6/include/linux/types.h \
linux-2.6/compat-2.6/include/net/dst.h \
+ linux-2.6/compat-2.6/include/net/ip.h \
linux-2.6/compat-2.6/include/net/ipip.h \
linux-2.6/compat-2.6/include/net/netns/generic.h \
linux-2.6/compat-2.6/include/net/net_namespace.h \
--- /dev/null
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
+
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+struct netdev_list {
+ struct list_head unreg_list;
+ struct net_device *dev;
+};
+
+/**
+ * unregister_netdevice_queue - remove device from the kernel
+ * @dev: device
+ * @head: list
+
+ * This function shuts down a device interface and removes it
+ * from the kernel tables.
+ * If head not NULL, device is queued to be unregistered later.
+ *
+ * Callers must hold the rtnl semaphore. You may want
+ * unregister_netdev() instead of this.
+ */
+
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
+{
+ ASSERT_RTNL();
+
+ if (head) {
+ struct netdev_list *list_item = kmalloc(sizeof *list_item,
+ GFP_KERNEL);
+ /* If we can't queue it, probably better to try to destroy it
+ * now. Either could potentially be bad but this is probably
+ * less likely to cause problems. */
+ if (!list_item) {
+ unregister_netdevice(dev);
+ return;
+ }
+
+ list_item->dev = dev;
+ list_add_tail(&list_item->unreg_list, head);
+ } else
+ unregister_netdevice(dev);
+}
+
+/**
+ * unregister_netdevice_many - unregister many devices
+ * @head: list of devices
+ *
+ */
+void unregister_netdevice_many(struct list_head *head)
+{
+ if (!list_empty(head)) {
+ struct netdev_list *list_item, *next;
+
+ list_for_each_entry_safe(list_item, next, head, unreg_list) {
+ unregister_netdevice(list_item->dev);
+ kfree(list_item);
+ }
+ }
+}
+
+#endif /* kernel < 2.6.33 */
#define for_each_netdev(net,d) list_for_each_entry(d, &dev_base_head, dev_list)
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
+extern void unregister_netdevice_queue(struct net_device *dev,
+ struct list_head *head);
+extern void unregister_netdevice_many(struct list_head *head);
+#endif
#endif
--- /dev/null
+#ifndef __NET_IP_WRAPPER_H
+#define __NET_IP_WRAPPER_H 1
+
+#include_next <net/ip.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+extern int __ip_local_out(struct sk_buff *skb);
+extern int ip_local_out(struct sk_buff *skb);
+
+#endif /* linux kernel < 2.6.25 */
+
+#endif
#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+#define HAVE_NETDEV_QUEUE_STATS
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
#include <linux/if_tunnel.h>
spinlock_t lock;
};
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-#define IPTUNNEL_XMIT() do { \
- int err; \
- int pkt_len = skb->len - skb_transport_offset(skb); \
- \
- skb->ip_summed = CHECKSUM_NONE; \
- iph->tot_len = htons(skb->len); \
- ip_select_ident(iph, &rt->u.dst, NULL); \
- ip_send_check(iph); \
- \
- err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);\
- if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { \
- stats->tx_bytes += pkt_len; \
- stats->tx_packets++; \
- } else { \
- stats->tx_errors++; \
- stats->tx_aborted_errors++; \
- } \
-} while (0)
+#ifdef HAVE_NETDEV_QUEUE_STATS
+#define UPDATE_TX_STATS() \
+ txq->tx_bytes += pkt_len; \
+ txq->tx_packets++;
#else
+#define UPDATE_TX_STATS() \
+ stats->tx_bytes += pkt_len; \
+ stats->tx_packets++;
+#endif
+
#define IPTUNNEL_XMIT() do { \
int err; \
- int pkt_len = skb->len; \
+ int pkt_len = skb->len - skb_transport_offset(skb); \
\
skb->ip_summed = CHECKSUM_NONE; \
ip_select_ident(iph, &rt->u.dst, NULL); \
\
err = ip_local_out(skb); \
- if (net_xmit_eval(err) == 0) { \
- stats->tx_bytes += pkt_len; \
- stats->tx_packets++; \
+ if (likely(net_xmit_eval(err) == 0)) { \
+ UPDATE_TX_STATS(); \
} else { \
stats->tx_errors++; \
stats->tx_aborted_errors++; \
} \
} while (0)
-#endif
#else
#include_next <net/ipip.h>
#define __NET_NAMESPACE_WRAPPER_H 1
#include <linux/version.h>
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
#include_next <net/net_namespace.h>
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
struct net;
-struct pernet_operations {
+struct extended_pernet_operations {
struct list_head list;
int (*init)(struct net *net);
void (*exit)(struct net *net);
+ int *id;
+ size_t size;
};
-#endif /* linux kernel < 2.6.24 */
+#define pernet_operations extended_pernet_operations
+
+#define register_pernet_device rpl_register_pernet_device
+int rpl_register_pernet_device(struct extended_pernet_operations *ops);
-extern int register_pernet_gen_device(int *id, struct pernet_operations *);
-extern void unregister_pernet_gen_device(int id, struct pernet_operations *);
+#define unregister_pernet_device rpl_unregister_pernet_device
+void rpl_unregister_pernet_device(struct extended_pernet_operations *ops);
-#endif /* linux kernel < 2.6.26 */
+#endif /* linux kernel < 2.6.33 */
#endif
#define __NET_NETNS_GENERIC_WRAPPER_H 1
#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include_next <net/netns/generic.h>
+#endif
-struct net;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
-extern void *net_generic(struct net *net, int id);
-extern int net_assign_generic(struct net *net, int id, void *data);
+#define net_assign_generic rpl_net_assign_generic
+int rpl_net_assign_generic(struct net *net, int id, void *data);
-#else
-#include_next <net/netns/generic.h>
-#endif /* linux kernel < 2.6.26 */
+#define net_generic rpl_net_generic
+void *rpl_net_generic(struct net *net, int id);
+
+#endif /* linux kernel < 2.6.33 */
#endif
#define HASH_SIZE 16
-static int ipgre_net_id;
+static int ipgre_net_id __read_mostly;
struct ipgre_net {
struct ip_tunnel *tunnels[4][HASH_SIZE];
#define tunnels_r tunnels[2]
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
+/*
+ * Locking : hash tables are protected by RCU and a spinlock
+ */
+static DEFINE_SPINLOCK(ipgre_lock);
-static DEFINE_RWLOCK(ipgre_lock);
+#define for_each_ip_tunnel_rcu(start) \
+ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
/* Given src, dst and key, find appropriate for input tunnel. */
ARPHRD_ETHER : ARPHRD_IPGRE;
int score, cand_score = 4;
- for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
+ for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
if (local != t->parms.iph.saddr ||
remote != t->parms.iph.daddr ||
key != t->parms.i_key ||
}
}
- for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
+ for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
if (remote != t->parms.iph.daddr ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
}
}
- for (t = ign->tunnels_l[h1]; t; t = t->next) {
+ for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
if ((local != t->parms.iph.saddr &&
(local != t->parms.iph.daddr ||
!ipv4_is_multicast(local))) ||
}
}
- for (t = ign->tunnels_wc[h1]; t; t = t->next) {
+ for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
if (t->parms.i_key != key ||
!(t->dev->flags & IFF_UP))
continue;
if (cand != NULL)
return cand;
- if (ign->fb_tunnel_dev->flags & IFF_UP)
- return netdev_priv(ign->fb_tunnel_dev);
+ dev = ign->fb_tunnel_dev;
+ if (dev->flags & IFF_UP)
+ return netdev_priv(dev);
return NULL;
}
{
struct ip_tunnel **tp = ipgre_bucket(ign, t);
+ spin_lock_bh(&ipgre_lock);
t->next = *tp;
- write_lock_bh(&ipgre_lock);
- *tp = t;
- write_unlock_bh(&ipgre_lock);
+ rcu_assign_pointer(*tp, t);
+ spin_unlock_bh(&ipgre_lock);
}
static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
if (t == *tp) {
- write_lock_bh(&ipgre_lock);
+ spin_lock_bh(&ipgre_lock);
*tp = t->next;
- write_unlock_bh(&ipgre_lock);
+ spin_unlock_bh(&ipgre_lock);
break;
}
}
break;
}
- read_lock(&ipgre_lock);
+ rcu_read_lock();
t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
t->err_count = 1;
t->err_time = jiffies;
out:
- read_unlock(&ipgre_lock);
+ rcu_read_unlock();
return;
}
gre_proto = *(__be16 *)(h + 2);
- read_lock(&ipgre_lock);
+ rcu_read_lock();
if ((tunnel = ipgre_tunnel_lookup(skb->dev,
iph->saddr, iph->daddr, key,
gre_proto))) {
ipgre_ecn_decapsulate(iph, skb);
netif_rx(skb);
- read_unlock(&ipgre_lock);
+ rcu_read_unlock();
return(0);
}
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
- read_unlock(&ipgre_lock);
+ rcu_read_unlock();
drop_nolock:
kfree_skb(skb);
return(0);
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats;
+#ifdef HAVE_NETDEV_QUEUE_STATS
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+#endif
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *tiph;
u8 tos;
int mtu;
#ifdef HAVE_NETDEV_STATS
- stats = &tunnel->dev->stats;
+ stats = &dev->stats;
#else
stats = &tunnel->stat;
#endif
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
+#ifdef HAVE_NETDEV_QUEUE_STATS
+ txq->tx_dropped++;
+#else
stats->tx_dropped++;
+#endif
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
#endif
};
-static void ipgre_destroy_tunnels(struct ipgre_net *ign)
+static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
{
int prio;
for (prio = 0; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t;
- while ((t = ign->tunnels[prio][h]) != NULL)
- unregister_netdevice(t->dev);
+ struct ip_tunnel *t = ign->tunnels[prio][h];
+
+ while (t != NULL) {
+ unregister_netdevice_queue(t->dev, head);
+ t = t->next;
+ }
}
}
}
static int ipgre_init_net(struct net *net)
{
+ struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int err;
- struct ipgre_net *ign;
-
- err = -ENOMEM;
- ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
- if (ign == NULL)
- goto err_alloc;
-
- err = net_assign_generic(net, ipgre_net_id, ign);
- if (err < 0)
- goto err_assign;
ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), GRE_IOCTL_DEVICE,
ipgre_tunnel_setup);
err_reg_dev:
free_netdev(ign->fb_tunnel_dev);
err_alloc_dev:
- /* nothing */
-err_assign:
- kfree(ign);
-err_alloc:
return err;
}
static void ipgre_exit_net(struct net *net)
{
struct ipgre_net *ign;
+ LIST_HEAD(list);
ign = net_generic(net, ipgre_net_id);
rtnl_lock();
- ipgre_destroy_tunnels(ign);
+ ipgre_destroy_tunnels(ign, &list);
+ unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(ign);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
.exit = ipgre_exit_net,
+ .id = &ipgre_net_id,
+ .size = sizeof(struct ipgre_net),
};
static int ipgre_tap_init(struct net_device *dev)
parms->iph.frag_off = htons(IP_DF);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
+static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+#else
static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
+#endif
{
struct ip_tunnel *nt;
struct net *net = dev_net(dev);
return -EAGAIN;
}
- err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
+ err = register_pernet_device(&ipgre_net_ops);
if (err < 0)
goto gen_device_failed;
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ unregister_pernet_device(&ipgre_net_ops);
#endif
gen_device_failed:
inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
#endif
- unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ unregister_pernet_device(&ipgre_net_ops);
if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
printk(KERN_INFO "ipgre close: can't remove protocol\n");
}
--- /dev/null
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#include <linux/netfilter_ipv4.h>
+#include <net/ip.h>
+
+int __ip_local_out(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->tot_len = htons(skb->len);
+ ip_send_check(iph);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ return nf_hook(PF_INET, NF_IP_LOCAL_OUT, &skb, NULL, skb->dst->dev,
+ dst_output);
+#else
+ return nf_hook(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev,
+ dst_output);
+#endif /* kernel < 2.6.24 */
+}
+
+int ip_local_out(struct sk_buff *skb)
+{
+ int err;
+
+ err = __ip_local_out(skb);
+ if (likely(err == 1))
+ err = dst_output(skb);
+
+ return err;
+}
+
+#endif /* kernel < 2.6.25 */
#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
#include <linux/sched.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#undef pernet_operations
+#undef register_pernet_device
+#undef unregister_pernet_device
+#undef net_assign_generic
+#undef net_generic
+
/* This trivial implementation assumes that there is only a single pernet
- * generic device registered and that the caller is well behaved. It only
- * weakly attempts to check that these conditions are true. */
+ * device registered and that the caller is well behaved. It only weakly
+ * attempts to check that these conditions are true. */
-static bool device_registered;
+static struct extended_pernet_operations *dev_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
static void *ng_data;
+#else
+static struct pernet_operations new_ops;
+#endif
-int register_pernet_gen_device(int *id, struct pernet_operations *ops)
+static int device_init_net(struct net *net)
{
- BUG_ON(device_registered);
+ int err;
+ if (dev_ops->id && dev_ops->size) {
+ void *data = kzalloc(dev_ops->size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = rpl_net_assign_generic(net, *dev_ops->id, data);
+ if (err) {
+ kfree(data);
+ return err;
+ }
+ }
+ if (dev_ops->init)
+ return dev_ops->init(net);
+ return 0;
+}
- *id = 1;
- device_registered = true;
+static void device_exit_net(struct net *net)
+{
+ if (dev_ops->id && dev_ops->size) {
+ int id = *dev_ops->id;
+ kfree(rpl_net_generic(net, id));
+ }
- if (ops->init == NULL)
- return 0;
- return ops->init(NULL);
+ if (dev_ops->exit)
+ return dev_ops->exit(net);
}
-void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
+int rpl_register_pernet_device(struct extended_pernet_operations *ops)
{
- device_registered = false;
- if (ops->exit)
- ops->exit(NULL);
+ BUG_ON(dev_ops);
+ dev_ops = ops;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+ if (dev_ops->id)
+ *dev_ops->id = 1;
+
+ return device_init_net(NULL);
+#else
+ memcpy(&new_ops, dev_ops, sizeof new_ops);
+ new_ops.init = device_init_net;
+ new_ops.exit = device_exit_net;
+
+ if (ops->id)
+ return register_pernet_gen_device(dev_ops->id, &new_ops);
+ else
+ return register_pernet_device(&new_ops);
+#endif
}
-int net_assign_generic(struct net *net, int id, void *data)
+void rpl_unregister_pernet_device(struct extended_pernet_operations *ops)
{
+ BUG_ON(!dev_ops);
+ BUG_ON(dev_ops != ops);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+ device_exit_net(NULL);
+#else
+ if (ops->id)
+ unregister_pernet_gen_device(*dev_ops->id, &new_ops);
+ else
+ unregister_pernet_device(&new_ops);
+#endif
+
+ dev_ops = NULL;
+}
+
+int rpl_net_assign_generic(struct net *net, int id, void *data)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
BUG_ON(id != 1);
ng_data = data;
return 0;
+#else
+ return net_assign_generic(net, id, data);
+#endif
}
-void *net_generic(struct net *net, int id)
+void *rpl_net_generic(struct net *net, int id)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
BUG_ON(id != 1);
return ng_data;
+#else
+ return net_generic(net, id);
+#endif
}
-#endif /* kernel < 2.6.26 */
+#endif /* kernel < 2.6.33 */