2 #include <linux/module.h>
3 #include <linux/version.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/netdevice.h>
8 #include <linux/inetdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/init.h>
14 #include <asm/hypervisor.h>
16 /* Referenced in netback.c. */
17 /*static*/ struct kmem_cache *skbuff_cachep;
18 EXPORT_SYMBOL(skbuff_cachep);
20 /* Allow up to 64kB or page-sized packets (whichever is greater). */
22 #define MAX_SKBUFF_ORDER (16 - PAGE_SHIFT)
24 #define MAX_SKBUFF_ORDER 0
26 static struct kmem_cache *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
30 struct kmem_cache *cachep;
31 } skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
33 struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
37 struct kmem_cache *cachep;
39 length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
41 if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
42 for (i = 0; skbuff_small[i].size < length; i++)
44 cachep = skbuff_small[i].cachep;
46 order = get_order(length);
47 if (order > MAX_SKBUFF_ORDER) {
48 printk(KERN_ALERT "Attempt to allocate order %d "
49 "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
52 cachep = skbuff_order_cachep[order];
55 length -= sizeof(struct skb_shared_info);
57 return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
60 struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask)
65 length = SKB_DATA_ALIGN(length + 16);
66 order = get_order(length + sizeof(struct skb_shared_info));
67 if (order > MAX_SKBUFF_ORDER) {
68 printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
69 "Increase MAX_SKBUFF_ORDER.\n", order);
73 skb = alloc_skb_from_cache(
74 skbuff_order_cachep[order], length, gfp_mask, 0);
81 static void skbuff_ctor(void *buf, struct kmem_cache *cachep, unsigned long unused)
85 while (skbuff_order_cachep[order] != cachep)
88 /* Do our best to allocate contiguous memory but fall back to IOMMU. */
90 (void)xen_create_contiguous_region(
91 (unsigned long)buf, order, 0);
93 scrub_pages(buf, 1 << order);
96 static void skbuff_dtor(void *buf, struct kmem_cache *cachep, unsigned long unused)
100 while (skbuff_order_cachep[order] != cachep)
104 xen_destroy_contiguous_region((unsigned long)buf, order);
107 static int __init skbuff_init(void)
109 static char name[MAX_SKBUFF_ORDER + 1][20];
110 static char small_name[ARRAY_SIZE(skbuff_small)][20];
114 for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
115 size = skbuff_small[i].size;
116 sprintf(small_name[i], "xen-skb-%lu", size);
118 * No ctor/dtor: objects do not span page boundaries, and they
119 * are only used on transmit path so no need for scrubbing.
121 skbuff_small[i].cachep = kmem_cache_create(
122 small_name[i], size, size, 0, NULL, NULL);
125 for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
126 size = PAGE_SIZE << order;
127 sprintf(name[order], "xen-skb-%lu", size);
128 if (is_running_on_xen() && is_initial_xendomain())
129 skbuff_order_cachep[order] = kmem_cache_create(
130 name[order], size, size, 0,
131 skbuff_ctor, skbuff_dtor);
133 skbuff_order_cachep[order] = kmem_cache_create(
134 name[order], size, size, 0, NULL, NULL);
138 skbuff_cachep = skbuff_order_cachep[0];
142 core_initcall(skbuff_init);
144 EXPORT_SYMBOL(__dev_alloc_skb);