X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fxen%2Fcore%2Fskbuff.c;fp=drivers%2Fxen%2Fcore%2Fskbuff.c;h=f38f3cffedecb93e6ff4aeb658ff5f4d2a93fb56;hb=1db395853d4f30d6120458bd279ede1f882a8525;hp=0000000000000000000000000000000000000000;hpb=34a75f0025b9cf803b6a88db032e6ad6950c9313;p=linux-2.6.git diff --git a/drivers/xen/core/skbuff.c b/drivers/xen/core/skbuff.c new file mode 100644 index 000000000..f38f3cffe --- /dev/null +++ b/drivers/xen/core/skbuff.c @@ -0,0 +1,141 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Referenced in netback.c. */ +/*static*/ kmem_cache_t *skbuff_cachep; +EXPORT_SYMBOL(skbuff_cachep); + +#define MAX_SKBUFF_ORDER 4 +static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1]; + +static struct { + int size; + kmem_cache_t *cachep; +} skbuff_small[] = { { 512, NULL }, { 2048, NULL } }; + +struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask, + int fclone) +{ + int order, i; + kmem_cache_t *cachep; + + length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info); + + if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) { + for (i = 0; skbuff_small[i].size < length; i++) + continue; + cachep = skbuff_small[i].cachep; + } else { + order = get_order(length); + if (order > MAX_SKBUFF_ORDER) { + printk(KERN_ALERT "Attempt to allocate order %d " + "skbuff. Increase MAX_SKBUFF_ORDER.\n", order); + return NULL; + } + cachep = skbuff_order_cachep[order]; + } + + length -= sizeof(struct skb_shared_info); + + return alloc_skb_from_cache(cachep, length, gfp_mask, fclone); +} + +struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) +{ + struct sk_buff *skb; + int order; + + length = SKB_DATA_ALIGN(length + 16); + order = get_order(length + sizeof(struct skb_shared_info)); + if (order > MAX_SKBUFF_ORDER) { + printk(KERN_ALERT "Attempt to allocate order %d skbuff. " + "Increase MAX_SKBUFF_ORDER.\n", order); + return NULL; + } + + skb = alloc_skb_from_cache( + skbuff_order_cachep[order], length, gfp_mask, 0); + if (skb != NULL) + skb_reserve(skb, 16); + + return skb; +} + +static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused) +{ + int order = 0; + + while (skbuff_order_cachep[order] != cachep) + order++; + + /* Do our best to allocate contiguous memory but fall back to IOMMU. */ + if (order != 0) + (void)xen_create_contiguous_region( + (unsigned long)buf, order, 0); + + scrub_pages(buf, 1 << order); +} + +static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused) +{ + int order = 0; + + while (skbuff_order_cachep[order] != cachep) + order++; + + if (order != 0) + xen_destroy_contiguous_region((unsigned long)buf, order); +} + +static int __init skbuff_init(void) +{ + static char name[MAX_SKBUFF_ORDER + 1][20]; + static char small_name[ARRAY_SIZE(skbuff_small)][20]; + unsigned long size; + int i, order; + + for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) { + size = skbuff_small[i].size; + sprintf(small_name[i], "xen-skb-%lu", size); + /* + * No ctor/dtor: objects do not span page boundaries, and they + * are only used on transmit path so no need for scrubbing. + */ + skbuff_small[i].cachep = kmem_cache_create( + small_name[i], size, size, 0, NULL, NULL); + } + + for (order = 0; order <= MAX_SKBUFF_ORDER; order++) { + size = PAGE_SIZE << order; + sprintf(name[order], "xen-skb-%lu", size); + if (is_running_on_xen() && + (xen_start_info->flags & SIF_PRIVILEGED)) + skbuff_order_cachep[order] = kmem_cache_create( + name[order], size, size, 0, + skbuff_ctor, skbuff_dtor); + else + skbuff_order_cachep[order] = kmem_cache_create( + name[order], size, size, 0, NULL, NULL); + + } + + skbuff_cachep = skbuff_order_cachep[0]; + + return 0; +} +core_initcall(skbuff_init); + +EXPORT_SYMBOL(__dev_alloc_skb);