#define ARCH_KMALLOC_MINALIGN 0
#endif
+#ifndef ARCH_KMALLOC_FLAGS
+#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
+#endif
+
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
* cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
* cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
*/
-static inline int obj_dbghead(kmem_cache_t *cachep)
+static int obj_dbghead(kmem_cache_t *cachep)
{
return cachep->dbghead;
}
-static inline int obj_reallen(kmem_cache_t *cachep)
+static int obj_reallen(kmem_cache_t *cachep)
{
return cachep->reallen;
}
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
}
+
#else
-static inline int obj_dbghead(kmem_cache_t *cachep)
-{
- return 0;
-}
-static inline int obj_reallen(kmem_cache_t *cachep)
-{
- return cachep->objsize;
-}
-static inline unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
-{
- BUG();
- return 0;
-}
-static inline unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
-{
- BUG();
- return 0;
-}
-static inline void **dbg_userword(kmem_cache_t *cachep, void *objp)
-{
- BUG();
- return 0;
-}
+
+#define obj_dbghead(x) 0
+#define obj_reallen(cachep) (cachep->objsize)
+#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
+#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
+#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
+
#endif
/*
static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
- { 0, }
+ { NULL, }
#undef CACHE
};
* allow tighter packing of the smaller caches. */
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- SLAB_PANIC, NULL, NULL);
+ (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- (SLAB_CACHE_DMA | SLAB_PANIC), NULL, NULL);
+ (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
+ NULL, NULL);
sizes++;
names++;
/*
* Interface to system's page release.
*/
-static inline void kmem_freepages(kmem_cache_t *cachep, void *addr)
+static void kmem_freepages(kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
struct page *page = virt_to_page(addr);
* %SLAB_NO_REAP - Don't automatically reap this cache when we're under
* memory pressure.
*
- * %SLAB_HWCACHE_ALIGN - This flag has no effect and will be removed soon.
- *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
*/
kmem_cache_t *
kmem_cache_create (const char *name, size_t size, size_t align,
in_interrupt() ||
(size < BYTES_PER_WORD) ||
(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
- (dtor && !ctor) ||
- (align < 0)) {
+ (dtor && !ctor)) {
printk(KERN_ERR "%s: Early error in slab %s\n",
__FUNCTION__, name);
BUG();
}
EXPORT_SYMBOL(kmem_cache_create);
-static inline void check_irq_off(void)
-{
#if DEBUG
+static void check_irq_off(void)
+{
BUG_ON(!irqs_disabled());
-#endif
}
-static inline void check_irq_on(void)
+static void check_irq_on(void)
{
-#if DEBUG
BUG_ON(irqs_disabled());
-#endif
}
-static inline void check_spinlock_acquired(kmem_cache_t *cachep)
+static void check_spinlock_acquired(kmem_cache_t *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
BUG_ON(spin_trylock(&cachep->spinlock));
#endif
}
+#else
+#define check_irq_off() do { } while(0)
+#define check_irq_on() do { } while(0)
+#define check_spinlock_acquired(x) do { } while(0)
+#endif
/*
* Waits for all CPUs to execute func().
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
-static inline struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
+static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
void *objp, int colour_off, int local_flags)
{
struct slab *slabp;
return 0;
}
+#if DEBUG
+
/*
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
* - destructor calls, for caches with POISON+dtor
*/
-static inline void kfree_debugcheck(const void *objp)
+static void kfree_debugcheck(const void *objp)
{
-#if DEBUG
struct page *page;
if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
BUG();
}
-#endif
}
-static inline void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller)
+static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller)
{
-#if DEBUG
struct page *page;
unsigned int objnr;
struct slab *slabp;
poison_obj(cachep, objp, POISON_FREE);
#endif
}
-#endif
return objp;
}
-static inline void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
+static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
{
-#if DEBUG
int i;
int entries = 0;
printk("\n");
BUG();
}
-#endif
}
+#else
+#define kfree_debugcheck(x) do { } while(0)
+#define cache_free_debugcheck(x,objp,z) (objp)
+#define check_slabp(x,y) do { } while(0)
+#endif
static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
{
#endif
}
-static inline void *
+#if DEBUG
+static void *
cache_alloc_debugcheck_after(kmem_cache_t *cachep,
unsigned long flags, void *objp, void *caller)
{
-#if DEBUG
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
cachep->ctor(objp, cachep, ctor_flags);
}
-#endif
return objp;
}
+#else
+#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
+#endif
static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
* If we cannot acquire the cache chain semaphore then just give up - we'll
* try again next timer interrupt.
*/
-static inline void cache_reap (void)
+static void cache_reap (void)
{
struct list_head *walk;
unsigned long num_slabs;
const char *name;
char *error = NULL;
- mm_segment_t old_fs;
- char tmp;
check_irq_on();
spin_lock_irq(&cachep->spinlock);
error = "free_objects accounting error";
name = cachep->name;
-
- /*
- * Check to see if `name' resides inside a module which has been
- * unloaded (someone forgot to destroy their cache)
- */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- if (__get_user(tmp, name))
- name = "broken";
- set_fs(old_fs);
-
if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);