This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / drivers / xen / core / skbuff.c
1
2 #include <linux/config.h>
3 #include <linux/module.h>
4 #include <linux/version.h>
5 #include <linux/kernel.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/netdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/etherdevice.h>
11 #include <linux/skbuff.h>
12 #include <linux/init.h>
13 #include <asm/io.h>
14 #include <asm/page.h>
15 #include <asm/hypervisor.h>
16
17 /* Referenced in netback.c. */
18 /*static*/ kmem_cache_t *skbuff_cachep;
19 EXPORT_SYMBOL(skbuff_cachep);
20
21 #define MAX_SKBUFF_ORDER 4
22 static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
23
24 static struct {
25         int size;
26         kmem_cache_t *cachep;
27 } skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
28
29 struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
30                             int fclone)
31 {
32         int order, i;
33         kmem_cache_t *cachep;
34
35         length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
36
37         if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
38                 for (i = 0; skbuff_small[i].size < length; i++)
39                         continue;
40                 cachep = skbuff_small[i].cachep;
41         } else {
42                 order = get_order(length);
43                 if (order > MAX_SKBUFF_ORDER) {
44                         printk(KERN_ALERT "Attempt to allocate order %d "
45                                "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
46                         return NULL;
47                 }
48                 cachep = skbuff_order_cachep[order];
49         }
50
51         length -= sizeof(struct skb_shared_info);
52
53         return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
54 }
55
56 struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask)
57 {
58         struct sk_buff *skb;
59         int order;
60
61         length = SKB_DATA_ALIGN(length + 16);
62         order = get_order(length + sizeof(struct skb_shared_info));
63         if (order > MAX_SKBUFF_ORDER) {
64                 printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
65                        "Increase MAX_SKBUFF_ORDER.\n", order);
66                 return NULL;
67         }
68
69         skb = alloc_skb_from_cache(
70                 skbuff_order_cachep[order], length, gfp_mask, 0);
71         if (skb != NULL)
72                 skb_reserve(skb, 16);
73
74         return skb;
75 }
76
77 static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
78 {
79         int order = 0;
80
81         while (skbuff_order_cachep[order] != cachep)
82                 order++;
83
84         /* Do our best to allocate contiguous memory but fall back to IOMMU. */
85         if (order != 0)
86                 (void)xen_create_contiguous_region(
87                         (unsigned long)buf, order, 0);
88
89         scrub_pages(buf, 1 << order);
90 }
91
92 static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
93 {
94         int order = 0;
95
96         while (skbuff_order_cachep[order] != cachep)
97                 order++;
98
99         if (order != 0)
100                 xen_destroy_contiguous_region((unsigned long)buf, order);
101 }
102
103 static int __init skbuff_init(void)
104 {
105         static char name[MAX_SKBUFF_ORDER + 1][20];
106         static char small_name[ARRAY_SIZE(skbuff_small)][20];
107         unsigned long size;
108         int i, order;
109
110         for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
111                 size = skbuff_small[i].size;
112                 sprintf(small_name[i], "xen-skb-%lu", size);
113                 /*
114                  * No ctor/dtor: objects do not span page boundaries, and they
115                  * are only used on transmit path so no need for scrubbing.
116                  */
117                 skbuff_small[i].cachep = kmem_cache_create(
118                         small_name[i], size, size, 0, NULL, NULL);
119         }
120
121         for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
122                 size = PAGE_SIZE << order;
123                 sprintf(name[order], "xen-skb-%lu", size);
124                 if (is_running_on_xen() &&
125                     (xen_start_info->flags & SIF_PRIVILEGED))
126                         skbuff_order_cachep[order] = kmem_cache_create(
127                                 name[order], size, size, 0,
128                                 skbuff_ctor, skbuff_dtor);
129                 else
130                         skbuff_order_cachep[order] = kmem_cache_create(
131                                 name[order], size, size, 0, NULL, NULL);
132                         
133         }
134
135         skbuff_cachep = skbuff_order_cachep[0];
136
137         return 0;
138 }
139 core_initcall(skbuff_init);
140
141 EXPORT_SYMBOL(__dev_alloc_skb);