VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / net / core / skbuff.c
index 7d72cdb..c34a3a9 100644 (file)
@@ -49,6 +49,9 @@
 #include <linux/inet.h>
 #include <linux/slab.h>
 #include <linux/netdevice.h>
+#ifdef CONFIG_NET_CLS_ACT
+#include <net/pkt_sched.h>
+#endif
 #include <linux/string.h>
 #include <linux/skbuff.h>
 #include <linux/cache.h>
@@ -241,6 +244,15 @@ void __kfree_skb(struct sk_buff *skb)
        nf_bridge_put(skb->nf_bridge);
 #endif
 #endif
+/* XXX: IS this still necessary? - JHS */
+#ifdef CONFIG_NET_SCHED
+       skb->tc_index = 0;
+#ifdef CONFIG_NET_CLS_ACT
+       skb->tc_verd = 0;
+       skb->tc_classid = 0;
+#endif
+#endif
+
        kfree_skbmem(skb);
 }
 
@@ -312,6 +324,14 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
 #endif
 #ifdef CONFIG_NET_SCHED
        C(tc_index);
+#ifdef CONFIG_NET_CLS_ACT
+       n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
+       n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd);
+       n->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
+       C(input_dev);
+       C(tc_classid);
+#endif
+
 #endif
        C(truesize);
        atomic_set(&n->users, 1);
@@ -366,6 +386,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #endif
 #endif
 #ifdef CONFIG_NET_SCHED
+#ifdef CONFIG_NET_CLS_ACT
+       new->tc_verd = old->tc_verd;
+#endif
        new->tc_index   = old->tc_index;
 #endif
        atomic_set(&new->users, 1);
@@ -906,6 +929,70 @@ fault:
        return -EFAULT;
 }
 
+/* Keep iterating until skb_iter_next returns false. */
+void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
+{
+       i->len = skb_headlen(skb);
+       i->data = (unsigned char *)skb->data;
+       i->nextfrag = 0;
+       i->fraglist = NULL;
+}
+
+int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
+{
+       /* Unmap previous, if not head fragment. */
+       if (i->nextfrag)
+               kunmap_skb_frag(i->data);
+
+       if (i->fraglist) {
+       fraglist:
+               /* We're iterating through fraglist. */
+               if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
+                       i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
+                                               ->frags[i->nextfrag]);
+                       i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
+                               .size;
+                       i->nextfrag++;
+                       return 1;
+               }
+               /* Fragments with fragments?  Too hard! */
+               BUG_ON(skb_shinfo(i->fraglist)->frag_list);
+               i->fraglist = i->fraglist->next;
+               if (!i->fraglist)
+                       goto end;
+
+               i->len = skb_headlen(i->fraglist);
+               i->data = i->fraglist->data;
+               i->nextfrag = 0;
+               return 1;
+       }
+
+       if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
+               i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
+               i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
+               i->nextfrag++;
+               return 1;
+       }
+
+       i->fraglist = skb_shinfo(skb)->frag_list;
+       if (i->fraglist)
+               goto fraglist;
+
+end:
+       /* Bug trap for callers */
+       i->data = NULL;
+       return 0;
+}
+
+void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
+{
+       /* Unmap previous, if not head fragment. */
+       if (i->data && i->nextfrag)
+               kunmap_skb_frag(i->data);
+       /* Bug trap for callers */
+       i->data = NULL;
+}
+
 /* Checksum skb data. */
 
 unsigned int skb_checksum(const struct sk_buff *skb, int offset,
@@ -1263,6 +1350,81 @@ void skb_add_mtu(int mtu)
 }
 #endif
 
+static void inline skb_split_inside_header(struct sk_buff *skb,
+                                          struct sk_buff* skb1,
+                                          const u32 len, const int pos)
+{
+       int i;
+
+       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
+
+       /* And move data appendix as is. */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+               skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
+
+       skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb1->data_len             = skb->data_len;
+       skb1->len                  += skb1->data_len;
+       skb->data_len              = 0;
+       skb->len                   = len;
+       skb->tail                  = skb->data + len;
+}
+
+static void inline skb_split_no_header(struct sk_buff *skb,
+                                      struct sk_buff* skb1,
+                                      const u32 len, int pos)
+{
+       int i, k = 0;
+       const int nfrags = skb_shinfo(skb)->nr_frags;
+
+       skb_shinfo(skb)->nr_frags = 0;
+       skb1->len                 = skb1->data_len = skb->len - len;
+       skb->len                  = len;
+       skb->data_len             = len - pos;
+
+       for (i = 0; i < nfrags; i++) {
+               int size = skb_shinfo(skb)->frags[i].size;
+
+               if (pos + size > len) {
+                       skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
+
+                       if (pos < len) {
+                               /* Split frag.
+                                * We have to variants in this case:
+                                * 1. Move all the frag to the second
+                                *    part, if it is possible. F.e.
+                                *    this approach is mandatory for TUX,
+                                *    where splitting is expensive.
+                                * 2. Split is accurately. We make this.
+                                */
+                               get_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_shinfo(skb1)->frags[0].size -= len - pos;
+                               skb_shinfo(skb)->frags[i].size  = len - pos;
+                               skb_shinfo(skb)->nr_frags++;
+                       }
+                       k++;
+               } else
+                       skb_shinfo(skb)->nr_frags++;
+               pos += size;
+       }
+       skb_shinfo(skb1)->nr_frags = k;
+}
+
+/**
+ * skb_split - Split fragmented skb to two parts at length len.
+ */
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
+{
+       int pos = skb_headlen(skb);
+
+       if (len < pos)  /* Split line is inside header. */
+               skb_split_inside_header(skb, skb1, len, pos);
+       else            /* Second chunk has no header, nothing to copy. */
+               skb_split_no_header(skb, skb1, len, pos);
+}
+
 void __init skb_init(void)
 {
        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
@@ -1300,3 +1462,7 @@ EXPORT_SYMBOL(skb_queue_head);
 EXPORT_SYMBOL(skb_queue_tail);
 EXPORT_SYMBOL(skb_unlink);
 EXPORT_SYMBOL(skb_append);
+EXPORT_SYMBOL(skb_split);
+EXPORT_SYMBOL(skb_iter_first);
+EXPORT_SYMBOL(skb_iter_next);
+EXPORT_SYMBOL(skb_iter_abort);