1 diff -Nurb linux-2.6.22-524/include/linux/netdevice.h linux-2.6.22-525/include/linux/netdevice.h
2 --- linux-2.6.22-524/include/linux/netdevice.h 2008-07-27 22:06:14.000000000 -0400
3 +++ linux-2.6.22-525/include/linux/netdevice.h 2008-07-27 22:17:30.000000000 -0400
6 __be16 type; /* This is really htons(ether_type). */
7 struct net_device *dev; /* NULL is wildcarded here */
8 + unsigned char sknid_elevator;
9 int (*func) (struct sk_buff *,
12 diff -Nurb linux-2.6.22-524/net/core/dev.c linux-2.6.22-525/net/core/dev.c
13 --- linux-2.6.22-524/net/core/dev.c 2008-07-27 22:06:20.000000000 -0400
14 +++ linux-2.6.22-525/net/core/dev.c 2008-07-28 09:26:45.000000000 -0400
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stat.h>
19 +#include <linux/ip.h>
20 +#include <linux/tcp.h>
21 #include <linux/if_bridge.h>
23 #include <net/pkt_sched.h>
25 if ((ptype->dev == dev || !ptype->dev) &&
26 (ptype->af_packet_priv == NULL ||
27 (struct sock *)ptype->af_packet_priv != skb->sk)) {
28 - struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
29 + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
34 * the ingress scheduler, you just cant add policies on ingress.
38 static int ing_filter(struct sk_buff *skb)
41 @@ -1832,13 +1835,20 @@
45 +/* The code already makes the assumption that packet handlers run
46 + * sequentially on the same CPU. -Sapan */
47 +DEFINE_PER_CPU(int, sknid_elevator) = 0;
49 int netif_receive_skb(struct sk_buff *skb)
51 struct packet_type *ptype, *pt_prev;
52 struct net_device *orig_dev;
53 int ret = NET_RX_DROP;
54 + int *cur_elevator=&__get_cpu_var(sknid_elevator);
59 /* if we've gotten here through NAPI, check netpoll */
60 if (skb->dev->poll && netpoll_rx(skb))
64 list_for_each_entry_rcu(ptype, &ptype_all, list) {
65 if (!ptype->dev || ptype->dev == skb->dev) {
68 ret = deliver_skb(skb, pt_prev, orig_dev);
73 @@ -1913,7 +1924,27 @@
77 + /* At this point, cur_elevator may be -2 or a positive value, in
78 + * case a previous protocol handler marked it */
79 + if (*cur_elevator) {
80 + atomic_inc(&skb->users);
83 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
85 + if ((*cur_elevator)>0) {
86 + skb->skb_tag = *cur_elevator;
87 + list_for_each_entry_rcu(ptype, &ptype_all, list) {
88 + if ((!ptype->dev || ptype->dev == skb->dev) && (ptype->sknid_elevator)) {
89 + ret = deliver_skb(skb, ptype, orig_dev);
94 + if (*cur_elevator) {
95 + /* We have a packet */
100 /* Jamal, now you will not able to escape explaining
101 @@ -3780,6 +3811,7 @@
102 EXPORT_SYMBOL(net_enable_timestamp);
103 EXPORT_SYMBOL(net_disable_timestamp);
104 EXPORT_SYMBOL(dev_get_flags);
105 +EXPORT_PER_CPU_SYMBOL(sknid_elevator);
107 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
108 EXPORT_SYMBOL(br_handle_frame_hook);
109 diff -Nurb linux-2.6.22-524/net/packet/af_packet.c linux-2.6.22-525/net/packet/af_packet.c
110 --- linux-2.6.22-524/net/packet/af_packet.c 2007-07-08 19:32:17.000000000 -0400
111 +++ linux-2.6.22-525/net/packet/af_packet.c 2008-07-27 22:06:27.000000000 -0400
113 #include <linux/poll.h>
114 #include <linux/module.h>
115 #include <linux/init.h>
116 +#include <linux/vs_network.h>
119 #include <net/inet_common.h>
120 @@ -246,10 +247,53 @@
122 static const struct proto_ops packet_ops_spkt;
124 +extern DEFINE_PER_CPU(int, sknid_elevator);
126 +static inline unsigned int slice_check_and_elevate(struct sk_buff *skb, struct sock *sk) {
127 + /* This mechanism is quite involved, and caused us a lot of pain
128 + * including crashes and packet loss during the 4.2 rollout. This
129 + * function decides if a slice is allowed to see a given packet.
130 + * Unfortunately, the first time it is invoked for a packet it does not
131 + * have enough information to make this call, since xt_MARK has not had
132 + * a chance to tag it with the slice id. There is also no way of
133 + * passing state between xt_MARK and this function through a packet --
134 + * because the skb gets cloned quite a few times between these two
135 + * points. I'd rather not use skb_shared_info because it's treated as
136 + * a blob of memory, and so it would be quite hard to maintain.
138 + * What we do is to keep a global variable (per CPU) that transfers the
139 + * required state between xt_MARK and af_packet.c. As an optimization,
140 + * this state transfer and the step that follows is only executed for
141 + * packets that first get dropped here. When we drop a packet, we mark
142 + * it for 'elevation' (that's what this trick is called). When xt_MARK
143 + * tags the packet with the right slice, it intercepts this mark and
144 + * sets the value of sknid_elevator. Next, the packet is sent back here
145 + * for a second round, this time with the xid tag set.
148 + int *elevator=&__get_cpu_var(sknid_elevator);
149 + int tag = skb->skb_tag;
151 + if (sk->sk_nx_info && !(tag == 1 || sk->sk_nid == tag)) {
152 + if (skb->pkt_type==PACKET_HOST) {
153 + *elevator=-2; /* Rejecting this packet. Mark it for elevation in xt_MARK */
157 + else if (!sk->sk_nx_info && (*elevator>0)) {
158 + /* Root has already seen this packet once, since it has been elevated */
165 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
168 struct sockaddr_pkt *spkt;
169 + int tag = skb->skb_tag;
173 * When we registered the protocol we saved the socket in the data
175 * so that this procedure is noop.
179 + * (18:05:41) daniel_hozac: where?
180 + * (18:05:58) daniel_hozac: we already have filters on PF_PACKET, don't we?
181 + * (18:05:58) er: in packet_rcv_skpt
182 + * (18:07:33) daniel_hozac: oh, that's evil.
185 + if (!slice_check_and_elevate(skb, sk))
188 if (skb->pkt_type == PACKET_LOOPBACK)
195 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SEND))
199 * Get and verify the address.
201 @@ -416,11 +473,16 @@
207 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
210 struct sk_filter *filter;
212 + if (!slice_check_and_elevate(skb, sk))
216 filter = rcu_dereference(sk->sk_filter);
220 int ifindex, err, reserve = 0;
222 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SEND))
226 * Get and verify the address.
231 po->prot_hook.type = protocol;
232 + po->prot_hook.sknid_elevator = 1;
233 po->prot_hook.dev = dev;
235 po->ifindex = dev ? dev->ifindex : 0;
237 __be16 proto = (__force __be16)protocol; /* weird, but documented */
240 - if (!capable(CAP_NET_RAW))
241 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SOCKET))
244 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
245 sock->type != SOCK_PACKET)
246 return -ESOCKTNOSUPPORT;
247 @@ -1016,6 +1083,7 @@
249 spin_lock_init(&po->bind_lock);
250 po->prot_hook.func = packet_rcv;
251 + po->prot_hook.sknid_elevator = 1;
253 if (sock->type == SOCK_PACKET)
254 po->prot_hook.func = packet_rcv_spkt;