1 commit a57d715bc58005cfae0fdf1626ebf11b11508025
2 Author: root <root@rhel6.(none)>
3 Date: Thu Apr 29 10:01:21 2010 -0400
5 linux-2.6-525-sknid-elevator.patch
7 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
8 index 4267c8b..3f36a91 100644
9 --- a/include/linux/netdevice.h
10 +++ b/include/linux/netdevice.h
11 @@ -1057,6 +1057,7 @@ struct napi_gro_cb {
13 __be16 type; /* This is really htons(ether_type). */
14 struct net_device *dev; /* NULL is wildcarded here */
15 + unsigned char sknid_elevator;
16 int (*func) (struct sk_buff *,
19 diff --git a/net/core/dev.c b/net/core/dev.c
20 index 8b6b941..651a1c3 100644
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/stat.h>
27 +#include <linux/ip.h>
28 +#include <linux/tcp.h>
29 #include <linux/if_bridge.h>
30 #include <linux/if_macvlan.h>
32 @@ -2275,6 +2277,10 @@ void netif_nit_deliver(struct sk_buff *skb)
36 +/* The code already makes the assumption that packet handlers run
37 + * sequentially on the same CPU. -Sapan */
38 +DEFINE_PER_CPU(int, sknid_elevator) = 0;
41 * netif_receive_skb - process receive buffer from network
42 * @skb: buffer to process
43 @@ -2296,8 +2302,11 @@ int netif_receive_skb(struct sk_buff *skb)
44 struct net_device *orig_dev;
45 struct net_device *null_or_orig;
46 int ret = NET_RX_DROP;
47 + int *cur_elevator = &__get_cpu_var(sknid_elevator);
52 if (!skb->tstamp.tv64)
55 @@ -2373,7 +2382,27 @@ ncls:
59 + /* At this point, cur_elevator may be -2 or a positive value, in
60 + * case a previous protocol handler marked it */
61 + if (*cur_elevator) {
62 + atomic_inc(&skb->users);
65 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
67 + if ((*cur_elevator)>0) {
68 + skb->skb_tag = *cur_elevator;
69 + list_for_each_entry_rcu(ptype, &ptype_all, list) {
70 + if ((!ptype->dev || ptype->dev == skb->dev) && (ptype->sknid_elevator)) {
71 + ret = deliver_skb(skb, ptype, orig_dev);
76 + if (*cur_elevator) {
77 + /* We have a packet */
82 /* Jamal, now you will not able to escape explaining
83 @@ -4127,6 +4156,7 @@ unsigned dev_get_flags(const struct net_device *dev)
86 EXPORT_SYMBOL(dev_get_flags);
87 +EXPORT_PER_CPU_SYMBOL(sknid_elevator);
90 * dev_change_flags - change device settings
91 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
92 index 1bd109e..5c2e9ad 100644
93 --- a/net/packet/af_packet.c
94 +++ b/net/packet/af_packet.c
96 #include <linux/poll.h>
97 #include <linux/module.h>
98 #include <linux/init.h>
99 +#include <linux/vs_network.h>
100 #include <linux/mutex.h>
103 @@ -337,12 +338,54 @@ static const struct proto_ops packet_ops;
105 static const struct proto_ops packet_ops_spkt;
107 +extern DEFINE_PER_CPU(int, sknid_elevator);
109 +static inline unsigned int slice_check_and_elevate(struct sk_buff *skb, struct sock *sk) {
110 + /* This mechanism is quite involved, and caused us a lot of pain
111 + * including crashes and packet loss during the 4.2 rollout. This
112 + * function decides if a slice is allowed to see a given packet.
113 + * Unfortunately, the first time it is invoked for a packet it does not
114 + * have enough information to make this call, since xt_MARK has not had
115 + * a chance to tag it with the slice id. There is also no way of
116 + * passing state between xt_MARK and this function through a packet --
117 + * because the skb gets cloned quite a few times between these two
118 + * points. I'd rather not use skb_shared_info because it's treated as
119 + * a blob of memory, and so it would be quite hard to maintain.
121 + * What we do is to keep a global variable (per CPU) that transfers the
122 + * required state between xt_MARK and af_packet.c. As an optimization,
123 + * this state transfer and the step that follows is only executed for
124 + * packets that first get dropped here. When we drop a packet, we mark
125 + * it for 'elevation' (that's what this trick is called). When xt_MARK
126 + * tags the packet with the right slice, it intercepts this mark and
127 + * sets the value of sknid_elevator. Next, the packet is sent back here
128 + * for a second round, this time with the xid tag set.
131 + int *elevator=&__get_cpu_var(sknid_elevator);
132 + int tag = skb->skb_tag;
134 + if (sk->sk_nx_info && !(tag == 1 || sk->sk_nid == tag)) {
135 + if (skb->pkt_type==PACKET_HOST) {
136 + *elevator=-2; /* Rejecting this packet. Mark it for elevation in xt_MARK */
140 + else if (!sk->sk_nx_info && (*elevator>0)) {
141 + /* Root has already seen this packet once, since it has been elevated */
148 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
149 struct packet_type *pt, struct net_device *orig_dev)
152 struct sockaddr_pkt *spkt;
154 + int tag = skb->skb_tag;
157 * When we registered the protocol we saved the socket in the data
158 * field for just this event.
159 @@ -361,6 +404,16 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
160 * so that this procedure is noop.
164 + * (18:05:41) daniel_hozac: where?
165 + * (18:05:58) daniel_hozac: we already have filters on PF_PACKET, don't we?
166 + * (18:05:58) er: in packet_rcv_skpt
167 + * (18:07:33) daniel_hozac: oh, that's evil.
170 + if (!slice_check_and_elevate(skb, sk))
173 if (skb->pkt_type == PACKET_LOOPBACK)
176 @@ -419,6 +472,9 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
180 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SEND))
184 * Get and verify the address.
186 @@ -509,11 +565,16 @@ out_unlock:
192 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
195 struct sk_filter *filter;
197 + if (!slice_check_and_elevate(skb, sk))
201 filter = rcu_dereference(sk->sk_filter);
203 @@ -1063,6 +1124,9 @@ static int packet_snd(struct socket *sock,
205 int ifindex, err, reserve = 0;
207 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SEND))
211 * Get and verify the address.
213 @@ -1248,6 +1312,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
216 po->prot_hook.type = protocol;
217 + po->prot_hook.sknid_elevator = 1;
218 po->prot_hook.dev = dev;
220 po->ifindex = dev ? dev->ifindex : 0;
221 @@ -1348,8 +1413,9 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
222 __be16 proto = (__force __be16)protocol; /* weird, but documented */
225 - if (!capable(CAP_NET_RAW))
226 + if (!nx_capable(CAP_NET_RAW, NXC_RAW_SOCKET))
229 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
230 sock->type != SOCK_PACKET)
231 return -ESOCKTNOSUPPORT;
232 @@ -1381,6 +1447,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
233 spin_lock_init(&po->bind_lock);
234 mutex_init(&po->pg_vec_lock);
235 po->prot_hook.func = packet_rcv;
236 + po->prot_hook.sknid_elevator = 1;
238 if (sock->type == SOCK_PACKET)
239 po->prot_hook.func = packet_rcv_spkt;