X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fslab.h;fp=include%2Flinux%2Fslab.h;h=8cf52939d0ab676698083140e8cb80384ee6b263;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=45ad55b70d1c73e2eac26da896e4e2c2850d6431;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/include/linux/slab.h b/include/linux/slab.h index 45ad55b70..8cf52939d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -11,6 +11,7 @@ typedef struct kmem_cache kmem_cache_t; +#include /* kmalloc_sizes.h needs CONFIG_ options */ #include #include #include @@ -37,6 +38,7 @@ typedef struct kmem_cache kmem_cache_t; #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* Poison objects */ +#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ @@ -45,7 +47,6 @@ typedef struct kmem_cache kmem_cache_t; what is reclaimable later*/ #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ -#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ /* flags passed to a constructor func */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ @@ -63,7 +64,6 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo extern int kmem_cache_destroy(kmem_cache_t *); extern int kmem_cache_shrink(kmem_cache_t *); extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); -extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); extern void kmem_cache_free(kmem_cache_t *, void *); extern unsigned int kmem_cache_size(kmem_cache_t *); extern const char *kmem_cache_name(kmem_cache_t *); @@ -77,60 +77,14 @@ struct cache_sizes { }; extern struct cache_sizes malloc_sizes[]; -extern void *__kmalloc(size_t, gfp_t); #ifndef CONFIG_DEBUG_SLAB -#define ____kmalloc(size, flags) __kmalloc(size, flags) +extern void *__kmalloc(size_t, gfp_t); #else extern void *__kmalloc_track_caller(size_t, gfp_t, void*); -#define ____kmalloc(size, flags) \ +#define __kmalloc(size, flags) \ __kmalloc_track_caller(size, flags, __builtin_return_address(0)) #endif -/** - * kmalloc - allocate memory - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - * - * kmalloc is the normal method of allocating memory - * in the kernel. - * - * The @flags argument may be one of: - * - * %GFP_USER - Allocate memory on behalf of user. May sleep. - * - * %GFP_KERNEL - Allocate normal kernel ram. May sleep. - * - * %GFP_ATOMIC - Allocation will not sleep. - * For example, use this inside interrupt handlers. - * - * %GFP_HIGHUSER - Allocate pages from high memory. - * - * %GFP_NOIO - Do not do any I/O at all while trying to get memory. - * - * %GFP_NOFS - Do not make any fs calls while trying to get memory. - * - * Also it is possible to set different flags by OR'ing - * in one or more of the following additional @flags: - * - * %__GFP_COLD - Request cache-cold pages instead of - * trying to return cache-warm pages. - * - * %__GFP_DMA - Request memory from the DMA-capable zone. - * - * %__GFP_HIGH - This allocation has high priority and may use emergency pools. - * - * %__GFP_HIGHMEM - Allocated memory may be from highmem. - * - * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail - * (think twice before using). - * - * %__GFP_NORETRY - If memory is not immediately available, - * then give up at once. - * - * %__GFP_NOWARN - If allocation fails, don't issue any warnings. - * - * %__GFP_REPEAT - If allocation fails initially, try once more before failing. - */ static inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { @@ -154,35 +108,7 @@ found: return __kmalloc(size, flags); } -extern void *__kzalloc(size_t, gfp_t); - -/** - * kzalloc - allocate memory. The memory is set to zero. - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kmalloc). - */ -static inline void *kzalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size)) { - int i = 0; -#define CACHE(x) \ - if (size <= x) \ - goto found; \ - else \ - i++; -#include "kmalloc_sizes.h" -#undef CACHE - { - extern void __you_cannot_kzalloc_that_much(void); - __you_cannot_kzalloc_that_much(); - } -found: - return kmem_cache_zalloc((flags & GFP_DMA) ? - malloc_sizes[i].cs_dmacachep : - malloc_sizes[i].cs_cachep, flags); - } - return __kzalloc(size, flags); -} +extern void *kzalloc(size_t, gfp_t); /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -192,14 +118,13 @@ found: */ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { - if (n != 0 && size > ULONG_MAX / n) + if (n != 0 && size > INT_MAX / n) return NULL; return kzalloc(n * size, flags); } extern void kfree(const void *); extern unsigned int ksize(const void *); -extern int slab_is_available(void); #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); @@ -230,18 +155,17 @@ struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, void (*)(void *, struct kmem_cache *, unsigned long)); int kmem_cache_destroy(struct kmem_cache *c); void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags); -void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); void kmem_cache_free(struct kmem_cache *c, void *b); const char *kmem_cache_name(struct kmem_cache *); void *kmalloc(size_t size, gfp_t flags); -void *__kzalloc(size_t size, gfp_t flags); +void *kzalloc(size_t size, gfp_t flags); void kfree(const void *m); unsigned int ksize(const void *m); unsigned int kmem_cache_size(struct kmem_cache *c); static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { - return __kzalloc(n * size, flags); + return kzalloc(n * size, flags); } #define kmem_cache_shrink(d) (0) @@ -249,8 +173,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) #define kmem_ptr_validate(a, b) (0) #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) #define kmalloc_node(s, f, n) kmalloc(s, f) -#define kzalloc(s, f) __kzalloc(s, f) -#define ____kmalloc kmalloc #endif /* CONFIG_SLOB */ @@ -260,6 +182,7 @@ extern kmem_cache_t *names_cachep; extern kmem_cache_t *files_cachep; extern kmem_cache_t *filp_cachep; extern kmem_cache_t *fs_cachep; +extern kmem_cache_t *signal_cachep; extern kmem_cache_t *sighand_cachep; extern kmem_cache_t *bio_cachep;