Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / drivers / xen / core / skbuff.c
1
2 #include <linux/module.h>
3 #include <linux/version.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/netdevice.h>
8 #include <linux/inetdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/init.h>
12 #include <asm/io.h>
13 #include <asm/page.h>
14 #include <asm/hypervisor.h>
15
16 /* Referenced in netback.c. */
17 /*static*/ kmem_cache_t *skbuff_cachep;
18 EXPORT_SYMBOL(skbuff_cachep);
19
20 /* Allow up to 64kB or page-sized packets (whichever is greater). */
21 #if PAGE_SHIFT < 16
22 #define MAX_SKBUFF_ORDER (16 - PAGE_SHIFT)
23 #else
24 #define MAX_SKBUFF_ORDER 0
25 #endif
26 static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
27
28 static struct {
29         int size;
30         kmem_cache_t *cachep;
31 } skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
32
33 struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
34                             int fclone)
35 {
36         int order, i;
37         kmem_cache_t *cachep;
38
39         length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
40
41         if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
42                 for (i = 0; skbuff_small[i].size < length; i++)
43                         continue;
44                 cachep = skbuff_small[i].cachep;
45         } else {
46                 order = get_order(length);
47                 if (order > MAX_SKBUFF_ORDER) {
48                         printk(KERN_ALERT "Attempt to allocate order %d "
49                                "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
50                         return NULL;
51                 }
52                 cachep = skbuff_order_cachep[order];
53         }
54
55         length -= sizeof(struct skb_shared_info);
56
57         return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
58 }
59
60 struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask)
61 {
62         struct sk_buff *skb;
63         int order;
64
65         length = SKB_DATA_ALIGN(length + 16);
66         order = get_order(length + sizeof(struct skb_shared_info));
67         if (order > MAX_SKBUFF_ORDER) {
68                 printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
69                        "Increase MAX_SKBUFF_ORDER.\n", order);
70                 return NULL;
71         }
72
73         skb = alloc_skb_from_cache(
74                 skbuff_order_cachep[order], length, gfp_mask, 0);
75         if (skb != NULL)
76                 skb_reserve(skb, 16);
77
78         return skb;
79 }
80
81 static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
82 {
83         int order = 0;
84
85         while (skbuff_order_cachep[order] != cachep)
86                 order++;
87
88         /* Do our best to allocate contiguous memory but fall back to IOMMU. */
89         if (order != 0)
90                 (void)xen_create_contiguous_region(
91                         (unsigned long)buf, order, 0);
92
93         scrub_pages(buf, 1 << order);
94 }
95
96 static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
97 {
98         int order = 0;
99
100         while (skbuff_order_cachep[order] != cachep)
101                 order++;
102
103         if (order != 0)
104                 xen_destroy_contiguous_region((unsigned long)buf, order);
105 }
106
107 static int __init skbuff_init(void)
108 {
109         static char name[MAX_SKBUFF_ORDER + 1][20];
110         static char small_name[ARRAY_SIZE(skbuff_small)][20];
111         unsigned long size;
112         int i, order;
113
114         for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
115                 size = skbuff_small[i].size;
116                 sprintf(small_name[i], "xen-skb-%lu", size);
117                 /*
118                  * No ctor/dtor: objects do not span page boundaries, and they
119                  * are only used on transmit path so no need for scrubbing.
120                  */
121                 skbuff_small[i].cachep = kmem_cache_create(
122                         small_name[i], size, size, 0, NULL, NULL);
123         }
124
125         for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
126                 size = PAGE_SIZE << order;
127                 sprintf(name[order], "xen-skb-%lu", size);
128                 if (is_running_on_xen() && is_initial_xendomain())
129                         skbuff_order_cachep[order] = kmem_cache_create(
130                                 name[order], size, size, 0,
131                                 skbuff_ctor, skbuff_dtor);
132                 else
133                         skbuff_order_cachep[order] = kmem_cache_create(
134                                 name[order], size, size, 0, NULL, NULL);
135                         
136         }
137
138         skbuff_cachep = skbuff_order_cachep[0];
139
140         return 0;
141 }
142 core_initcall(skbuff_init);
143
144 EXPORT_SYMBOL(__dev_alloc_skb);