++
++static inline unsigned int slice_check_and_elevate(struct sk_buff *skb, struct sock *sk) {
++ /* This mechanism is quite involved, and caused us a lot of pain
++ * including crashes and packet loss during the 4.2 rollout. This
++ * function decides if a slice is allowed to see a given packet.
++ * Unfortunately, the first time it is invoked for a packet it does not
++ * have enough information to make this call, since xt_MARK has not had
++ * a chance to tag it with the slice id. There is also no way of
++ * passing state between xt_MARK and this function through a packet --
++ * because the skb gets cloned quite a few times between these two
++ * points. I'd rather not use skb_shared_info because it's treated as
++ * a blob of memory, and so it would be quite hard to maintain.
++ *
++ * What we do is to keep a global variable (per CPU) that transfers the
++ * required state between xt_MARK and af_packet.c. As an optimization,
++ * this state transfer and the step that follows is only executed for
++ * packets that first get dropped here. When we drop a packet, we mark
++ * it for 'elevation' (that's what this trick is called). When xt_MARK
++ * tags the packet with the right slice, it intercepts this mark and
++ * sets the value of sknid_elevator. Next, the packet is sent back here
++ * for a second round, this time with the xid tag set.
++ */
++
++ int *elevator=&__get_cpu_var(sknid_elevator);
++ int tag = skb->skb_tag;
++
++ if (sk->sk_nx_info && !(tag == 1 || sk->sk_nid == tag)) {
++ if (skb->pkt_type==PACKET_HOST) {
++ *elevator=-2; /* Rejecting this packet. Mark it for elevation in xt_MARK */
++ }
++ return 0;
++ }
++ else if (!sk->sk_nx_info && (*elevator>0)) {
++ /* Root has already seen this packet once, since it has been elevated */
++ return 0;
++ }
++
++ return 1;
++}
++