vserver 1.9.5.x5
[linux-2.6.git] / net / core / skbuff.c
index f774929..c83dd4c 100644 (file)
@@ -163,6 +163,59 @@ nodata:
        goto out;
 }
 
+/**
+ *     alloc_skb_from_cache    -       allocate a network buffer
+ *     @cp: kmem_cache from which to allocate the data area
+ *           (object size must be big enough for @size bytes + skb overheads)
+ *     @size: size to allocate
+ *     @gfp_mask: allocation mask
+ *
+ *     Allocate a new &sk_buff. The returned buffer has no headroom and
+ *     tail room of size bytes. The object has a reference count of one.
+ *     The return is the buffer. On a failure the return is %NULL.
+ *
+ *     Buffers may only be allocated from interrupts using a @gfp_mask of
+ *     %GFP_ATOMIC.
+ */
+struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+                                    unsigned int size, int gfp_mask)
+{
+       struct sk_buff *skb;
+       u8 *data;
+
+       /* Get the HEAD */
+       skb = kmem_cache_alloc(skbuff_head_cache,
+                              gfp_mask & ~__GFP_DMA);
+       if (!skb)
+               goto out;
+
+       /* Get the DATA. */
+       size = SKB_DATA_ALIGN(size);
+       data = kmem_cache_alloc(cp, gfp_mask);
+       if (!data)
+               goto nodata;
+
+       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       skb->truesize = size + sizeof(struct sk_buff);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb->tail = data;
+       skb->end  = data + size;
+
+       atomic_set(&(skb_shinfo(skb)->dataref), 1);
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->tso_segs = 0;
+       skb_shinfo(skb)->frag_list = NULL;
+out:
+       return skb;
+nodata:
+       kmem_cache_free(skbuff_head_cache, skb);
+       skb = NULL;
+       goto out;
+}
+
 
 static void skb_drop_fraglist(struct sk_buff *skb)
 {
@@ -394,6 +447,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->tc_index   = old->tc_index;
 #endif
        atomic_set(&new->users, 1);
+       skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
+       skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
 }
 
 /**
@@ -483,8 +538,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
                }
                skb_shinfo(n)->nr_frags = i;
        }
-       skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
-       skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
 
        if (skb_shinfo(skb)->frag_list) {
                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
@@ -631,8 +684,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
                BUG();
 
        copy_skb_header(n, skb);
-       skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
-       skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
 
        return n;
 }
@@ -1352,7 +1403,7 @@ void skb_add_mtu(int mtu)
 }
 #endif
 
-static void inline skb_split_inside_header(struct sk_buff *skb,
+static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
                                           const u32 len, const int pos)
 {
@@ -1373,7 +1424,7 @@ static void inline skb_split_inside_header(struct sk_buff *skb,
        skb->tail                  = skb->data + len;
 }
 
-static void inline skb_split_no_header(struct sk_buff *skb,
+static inline void skb_split_no_header(struct sk_buff *skb,
                                       struct sk_buff* skb1,
                                       const u32 len, int pos)
 {