1 Index: linux-2.6.27.y/include/linux/netdevice.h
2 ===================================================================
3 --- linux-2.6.27.y.orig/include/linux/netdevice.h
4 +++ linux-2.6.27.y/include/linux/netdevice.h
5 @@ -857,6 +857,7 @@ static inline void netif_napi_del(struct
7 __be16 type; /* This is really htons(ether_type). */
8 struct net_device *dev; /* NULL is wildcarded here */
9 + unsigned char sknid_elevator;
10 int (*func) (struct sk_buff *,
13 Index: linux-2.6.27.y/net/core/dev.c
14 ===================================================================
15 --- linux-2.6.27.y.orig/net/core/dev.c
16 +++ linux-2.6.27.y/net/core/dev.c
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/stat.h>
21 +#include <linux/ip.h>
22 +#include <linux/tcp.h>
23 #include <linux/if_bridge.h>
24 #include <linux/if_macvlan.h>
26 @@ -1318,7 +1320,7 @@ static void dev_queue_xmit_nit(struct sk
27 if ((ptype->dev == dev || !ptype->dev) &&
28 (ptype->af_packet_priv == NULL ||
29 (struct sock *)ptype->af_packet_priv != skb->sk)) {
30 - struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
31 + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
35 @@ -2170,6 +2172,10 @@ void netif_nit_deliver(struct sk_buff *s
39 +/* The code already makes the assumption that packet handlers run
40 + * sequentially on the same CPU. -Sapan */
41 +DEFINE_PER_CPU(int, sknid_elevator) = 0;
44 * netif_receive_skb - process receive buffer from network
45 * @skb: buffer to process
46 @@ -2191,8 +2197,11 @@ int netif_receive_skb(struct sk_buff *sk
47 struct net_device *orig_dev;
48 struct net_device *null_or_orig;
49 int ret = NET_RX_DROP;
50 + int *cur_elevator = &__get_cpu_var(sknid_elevator);
55 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
56 return NET_RX_SUCCESS;
58 @@ -2272,7 +2281,27 @@ ncls:
62 + /* At this point, cur_elevator may be -2 or a positive value, in
63 + * case a previous protocol handler marked it */
64 + if (*cur_elevator) {
65 + atomic_inc(&skb->users);
68 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
70 + if ((*cur_elevator)>0) {
71 + skb->skb_tag = *cur_elevator;
72 + list_for_each_entry_rcu(ptype, &ptype_all, list) {
73 + if ((!ptype->dev || ptype->dev == skb->dev) && (ptype->sknid_elevator)) {
74 + ret = deliver_skb(skb, ptype, orig_dev);
79 + if (*cur_elevator) {
80 + /* We have a packet */
85 /* Jamal, now you will not able to escape explaining
86 @@ -4895,6 +4924,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
87 EXPORT_SYMBOL(net_enable_timestamp);
88 EXPORT_SYMBOL(net_disable_timestamp);
89 EXPORT_SYMBOL(dev_get_flags);
90 +EXPORT_PER_CPU_SYMBOL(sknid_elevator);
92 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
93 EXPORT_SYMBOL(br_handle_frame_hook);
94 Index: linux-2.6.27.y/net/packet/af_packet.c
95 ===================================================================
96 --- linux-2.6.27.y.orig/net/packet/af_packet.c
97 +++ linux-2.6.27.y/net/packet/af_packet.c
99 #include <linux/poll.h>
100 #include <linux/module.h>
101 #include <linux/init.h>
102 +#include <linux/vs_network.h>
103 #include <linux/mutex.h>
106 @@ -278,10 +279,53 @@ static const struct proto_ops packet_ops
108 static const struct proto_ops packet_ops_spkt;
110 +extern DEFINE_PER_CPU(int, sknid_elevator);
112 +static inline unsigned int slice_check_and_elevate(struct sk_buff *skb, struct sock *sk) {
113 + /* This mechanism is quite involved, and caused us a lot of pain
114 + * including crashes and packet loss during the 4.2 rollout. This
115 + * function decides if a slice is allowed to see a given packet.
116 + * Unfortunately, the first time it is invoked for a packet it does not
117 + * have enough information to make this call, since xt_MARK has not had
118 + * a chance to tag it with the slice id. There is also no way of
119 + * passing state between xt_MARK and this function through a packet --
120 + * because the skb gets cloned quite a few times between these two
121 + * points. I'd rather not use skb_shared_info because it's treated as
122 + * a blob of memory, and so it would be quite hard to maintain.
124 + * What we do is to keep a global variable (per CPU) that transfers the
125 + * required state between xt_MARK and af_packet.c. As an optimization,
126 + * this state transfer and the step that follows is only executed for
127 + * packets that first get dropped here. When we drop a packet, we mark
128 + * it for 'elevation' (that's what this trick is called). When xt_MARK
129 + * tags the packet with the right slice, it intercepts this mark and
130 + * sets the value of sknid_elevator. Next, the packet is sent back here
131 + * for a second round, this time with the xid tag set.
134 + int *elevator=&__get_cpu_var(sknid_elevator);
135 + int tag = skb->skb_tag;
137 + if (sk->sk_nx_info && !(tag == 1 || sk->sk_nid == tag)) {
138 + if (skb->pkt_type==PACKET_HOST) {
139 + *elevator=-2; /* Rejecting this packet. Mark it for elevation in xt_MARK */
143 + else if (!sk->sk_nx_info && (*elevator>0)) {
144 + /* Root has already seen this packet once, since it has been elevated */
151 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
154 struct sockaddr_pkt *spkt;
155 + int tag = skb->skb_tag;
159 * When we registered the protocol we saved the socket in the data
160 @@ -301,6 +345,16 @@ static int packet_rcv_spkt(struct sk_buf
161 * so that this procedure is noop.
165 + * (18:05:41) daniel_hozac: where?
166 + * (18:05:58) daniel_hozac: we already have filters on PF_PACKET, don't we?
167 + * (18:05:58) er: in packet_rcv_skpt
168 + * (18:07:33) daniel_hozac: oh, that's evil.
171 + if (!slice_check_and_elevate(skb, sk))
174 if (skb->pkt_type == PACKET_LOOPBACK)
177 @@ -359,6 +413,9 @@ static int packet_sendmsg_spkt(struct ki
181 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SEND))
185 * Get and verify the address.
187 @@ -451,11 +508,16 @@ out_unlock:
193 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
196 struct sk_filter *filter;
198 + if (!slice_check_and_elevate(skb, sk))
202 filter = rcu_dereference(sk->sk_filter);
204 @@ -775,6 +837,9 @@ static int packet_sendmsg(struct kiocb *
206 int ifindex, err, reserve = 0;
208 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SEND))
212 * Get and verify the address.
214 @@ -941,6 +1006,7 @@ static int packet_do_bind(struct sock *s
217 po->prot_hook.type = protocol;
218 + po->prot_hook.sknid_elevator = 1;
219 po->prot_hook.dev = dev;
221 po->ifindex = dev ? dev->ifindex : 0;
222 @@ -1039,8 +1105,9 @@ static int packet_create(struct net *net
223 __be16 proto = (__force __be16)protocol; /* weird, but documented */
226 - if (!capable(CAP_NET_RAW))
227 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SOCKET))
230 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
231 sock->type != SOCK_PACKET)
232 return -ESOCKTNOSUPPORT;
233 @@ -1072,6 +1139,7 @@ static int packet_create(struct net *net
234 spin_lock_init(&po->bind_lock);
235 mutex_init(&po->pg_vec_lock);
236 po->prot_hook.func = packet_rcv;
237 + po->prot_hook.sknid_elevator = 1;
239 if (sock->type == SOCK_PACKET)
240 po->prot_hook.func = packet_rcv_spkt;