X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fcore%2Fskbuff.c;fp=net%2Fcore%2Fskbuff.c;h=a26b2348bc9bce70bf69b094361cd8d2bcb0f975;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=750ec6f8a7760162b47e1bcd8769cc7583270fca;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 750ec6f8a..a26b2348b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -38,6 +38,7 @@ * The functions in this file will not compile correctly with gcc 2.4.x */ +#include #include #include #include @@ -111,14 +112,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) BUG(); } -void skb_truesize_bug(struct sk_buff *skb) -{ - printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " - "len=%u, sizeof(sk_buff)=%Zd\n", - skb->truesize, skb->len, sizeof(struct sk_buff)); -} -EXPORT_SYMBOL(skb_truesize_bug); - /* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. @@ -139,7 +132,6 @@ EXPORT_SYMBOL(skb_truesize_bug); * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -#ifndef CONFIG_HAVE_ARCH_ALLOC_SKB struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int fclone) { @@ -157,7 +149,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); - data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); if (!data) goto nodata; @@ -172,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, shinfo = skb_shinfo(skb); atomic_set(&shinfo->dataref, 1); shinfo->nr_frags = 0; - shinfo->gso_size = 0; - shinfo->gso_segs = 0; - shinfo->gso_type = 0; + shinfo->tso_size = 0; + shinfo->tso_segs = 0; + shinfo->ufo_size = 0; shinfo->ip6_frag_id = 0; shinfo->frag_list = NULL; @@ -194,7 +186,6 @@ nodata: skb = NULL; goto out; } -#endif /* !CONFIG_HAVE_ARCH_ALLOC_SKB */ /** * alloc_skb_from_cache - allocate a network buffer @@ -212,17 +203,14 @@ nodata: */ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, unsigned int size, - gfp_t gfp_mask, - int fclone) + gfp_t gfp_mask) { - kmem_cache_t *cache; struct sk_buff *skb; u8 *data; - cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; - /* Get the HEAD */ - skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); + skb = kmem_cache_alloc(skbuff_head_cache, + gfp_mask & ~__GFP_DMA); if (!skb) goto out; @@ -242,53 +230,18 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->nr_frags = 0; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_segs = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_size = 0; + skb_shinfo(skb)->tso_segs = 0; + skb_shinfo(skb)->ufo_size = 0; skb_shinfo(skb)->frag_list = NULL; - - if (fclone) { - struct sk_buff *child = skb + 1; - atomic_t *fclone_ref = (atomic_t *) (child + 1); - - skb->fclone = SKB_FCLONE_ORIG; - atomic_set(fclone_ref, 1); - - child->fclone = SKB_FCLONE_UNAVAILABLE; - } out: return skb; nodata: - kmem_cache_free(cache, skb); + kmem_cache_free(skbuff_head_cache, skb); skb = NULL; goto out; } -/** - * __netdev_alloc_skb - allocate an skbuff for rx on a specific device - * @dev: network device to receive on - * @length: length to allocate - * @gfp_mask: get_free_pages mask, passed to alloc_skb - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. - */ -struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, gfp_t gfp_mask) -{ - struct sk_buff *skb; - - skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); - if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD); - skb->dev = dev; - } - return skb; -} static void skb_drop_list(struct sk_buff **listp) { @@ -316,7 +269,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } -static void skb_release_data(struct sk_buff *skb) +void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, @@ -408,24 +361,6 @@ void __kfree_skb(struct sk_buff *skb) kfree_skbmem(skb); } -/** - * kfree_skb - free an sk_buff - * @skb: buffer to free - * - * Drop a reference to the buffer and free it if the usage count has - * hit zero. - */ -void kfree_skb(struct sk_buff *skb) -{ - if (unlikely(!skb)) - return; - if (likely(atomic_read(&skb->users) == 1)) - smp_rmb(); - else if (likely(!atomic_dec_and_test(&skb->users))) - return; - __kfree_skb(skb); -} - /** * skb_clone - duplicate an sk_buff * @skb: buffer to clone @@ -479,10 +414,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) C(local_df); n->cloned = 1; n->nohdr = 0; -#ifdef CONFIG_XEN - C(proto_data_valid); - C(proto_csum_blank); -#endif C(pkt_type); C(ip_summed); C(priority); @@ -513,7 +444,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) n->tc_verd = CLR_TC_MUNGED(n->tc_verd); C(input_dev); #endif - skb_copy_secmark(n, skb); + #endif C(truesize); atomic_set(&n->users, 1); @@ -575,11 +506,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #endif new->tc_index = old->tc_index; #endif - skb_copy_secmark(new, old); atomic_set(&new->users, 1); - skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; - skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; - skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; + skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; + skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; + skb_shinfo(new)->ufo_size = skb_shinfo(old)->ufo_size; } /** @@ -832,40 +762,24 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, * filled. Used by network drivers which may DMA or transfer data * beyond the buffer end onto the wire. * - * May return error in out of memory cases. The skb is freed on error. + * May return NULL in out of memory cases. */ -int skb_pad(struct sk_buff *skb, int pad) +struct sk_buff *skb_pad(struct sk_buff *skb, int pad) { - int err; - int ntail; + struct sk_buff *nskb; /* If the skbuff is non linear tailroom is always zero.. */ - if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + if (skb_tailroom(skb) >= pad) { memset(skb->data+skb->len, 0, pad); - return 0; - } - - ntail = skb->data_len + pad - (skb->end - skb->tail); - if (likely(skb_cloned(skb) || ntail > 0)) { - err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); - if (unlikely(err)) - goto free_skb; + return skb; } - - /* FIXME: The use of this function with non-linear skb's really needs - * to be audited. - */ - err = skb_linearize(skb); - if (unlikely(err)) - goto free_skb; - - memset(skb->data + skb->len, 0, pad); - return 0; - -free_skb: + + nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); kfree_skb(skb); - return err; + if (nskb) + memset(nskb->data+nskb->len, 0, pad); + return nskb; } /* Trims skb to length len. It can change skb pointers. @@ -1828,15 +1742,12 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state) { - unsigned int ret; - config->get_next_block = skb_ts_get_next_block; config->finish = skb_ts_finish; skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); - ret = textsearch_find(config, state); - return (ret <= to - from ? ret : UINT_MAX); + return textsearch_find(config, state); } /** @@ -1911,155 +1822,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, return 0; } -/** - * skb_pull_rcsum - pull skb and update receive checksum - * @skb: buffer to update - * @start: start of data before pull - * @len: length of data pulled - * - * This function performs an skb_pull on the packet and updates - * update the CHECKSUM_HW checksum. It should be used on receive - * path processing instead of skb_pull unless you know that the - * checksum difference is zero (e.g., a valid IP header) or you - * are setting ip_summed to CHECKSUM_NONE. - */ -unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) -{ - BUG_ON(len > skb->len); - skb->len -= len; - BUG_ON(skb->len < skb->data_len); - skb_postpull_rcsum(skb, skb->data, len); - return skb->data += len; -} - -EXPORT_SYMBOL_GPL(skb_pull_rcsum); - -/** - * skb_segment - Perform protocol segmentation on skb. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - * - * This function performs segmentation on the given skb. It returns - * the segment at the given position. It returns NULL if there are - * no more segments to generate, or when an error is encountered. - */ -struct sk_buff *skb_segment(struct sk_buff *skb, int features) -{ - struct sk_buff *segs = NULL; - struct sk_buff *tail = NULL; - unsigned int mss = skb_shinfo(skb)->gso_size; - unsigned int doffset = skb->data - skb->mac.raw; - unsigned int offset = doffset; - unsigned int headroom; - unsigned int len; - int sg = features & NETIF_F_SG; - int nfrags = skb_shinfo(skb)->nr_frags; - int err = -ENOMEM; - int i = 0; - int pos; - - __skb_push(skb, doffset); - headroom = skb_headroom(skb); - pos = skb_headlen(skb); - - do { - struct sk_buff *nskb; - skb_frag_t *frag; - int hsize; - int k; - int size; - - len = skb->len - offset; - if (len > mss) - len = mss; - - hsize = skb_headlen(skb) - offset; - if (hsize < 0) - hsize = 0; - if (hsize > len || !sg) - hsize = len; - - nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC); - if (unlikely(!nskb)) - goto err; - - if (segs) - tail->next = nskb; - else - segs = nskb; - tail = nskb; - - nskb->dev = skb->dev; - nskb->priority = skb->priority; - nskb->protocol = skb->protocol; - nskb->dst = dst_clone(skb->dst); - memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); - nskb->pkt_type = skb->pkt_type; - nskb->mac_len = skb->mac_len; - - skb_reserve(nskb, headroom); - nskb->mac.raw = nskb->data; - nskb->nh.raw = nskb->data + skb->mac_len; - nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); - memcpy(skb_put(nskb, doffset), skb->data, doffset); - - if (!sg) { - nskb->csum = skb_copy_and_csum_bits(skb, offset, - skb_put(nskb, len), - len, 0); - continue; - } - - frag = skb_shinfo(nskb)->frags; - k = 0; - - nskb->ip_summed = CHECKSUM_HW; - nskb->csum = skb->csum; - memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); - - while (pos < offset + len) { - BUG_ON(i >= nfrags); - - *frag = skb_shinfo(skb)->frags[i]; - get_page(frag->page); - size = frag->size; - - if (pos < offset) { - frag->page_offset += offset - pos; - frag->size -= offset - pos; - } - - k++; - - if (pos + size <= offset + len) { - i++; - pos += size; - } else { - frag->size -= pos + size - (offset + len); - break; - } - - frag++; - } - - skb_shinfo(nskb)->nr_frags = k; - nskb->data_len = len - hsize; - nskb->len += nskb->data_len; - nskb->truesize += nskb->data_len; - } while ((offset += len) < skb->len); - - return segs; - -err: - while ((skb = segs)) { - segs = skb->next; - kfree(skb); - } - return ERR_PTR(err); -} - -EXPORT_SYMBOL_GPL(skb_segment); - void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", @@ -2082,10 +1844,8 @@ void __init skb_init(void) EXPORT_SYMBOL(___pskb_trim); EXPORT_SYMBOL(__kfree_skb); -EXPORT_SYMBOL(kfree_skb); EXPORT_SYMBOL(__pskb_pull_tail); EXPORT_SYMBOL(__alloc_skb); -EXPORT_SYMBOL(__netdev_alloc_skb); EXPORT_SYMBOL(pskb_copy); EXPORT_SYMBOL(pskb_expand_head); EXPORT_SYMBOL(skb_checksum);