#endif
#ifndef ARCH_KMALLOC_MINALIGN
+/*
+ * Enforce a minimum alignment for the kmalloc caches.
+ * Usually, the kmalloc caches are cache_line_size() aligned, except when
+ * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
+ * Note that this flag disables some debug features.
+ */
#define ARCH_KMALLOC_MINALIGN 0
#endif
+#ifndef ARCH_SLAB_MINALIGN
+/*
+ * Enforce a minimum alignment for all caches.
+ * Intended for archs that get misalignment faults even for BYTES_PER_WORD
+ * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
+ * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
+ * some debug features.
+ */
+#define ARCH_SLAB_MINALIGN 0
+#endif
+
#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
- { 0, }
+ CACHE(ULONG_MAX)
#undef CACHE
};
-
EXPORT_SYMBOL(malloc_sizes);
/* Must match cache_sizes above. Out of line to keep cache footprint low. */
static void enable_cpucache (kmem_cache_t *cachep);
static void cache_reap (void *unused);
-static inline void ** ac_entry(struct array_cache *ac)
+static inline void **ac_entry(struct array_cache *ac)
{
return (void**)(ac+1);
}
return cachep->array[smp_processor_id()];
}
-static kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
+static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
- /* This function could be moved to the header file, and
- * made inline so consumers can quickly determine what
- * cache pointer they require.
+#if DEBUG
+ /* This happens if someone tries to call
+ * kmem_cache_create(), or __kmalloc(), before
+ * the generic caches are initialized.
+ */
+ BUG_ON(csizep->cs_cachep == NULL);
+#endif
+ while (size > csizep->cs_size)
+ csizep++;
+
+ /*
+ * Really subtile: The last entry with cs->cs_size==ULONG_MAX
+ * has cs_{dma,}cachep==NULL. Thus no special case
+ * for large kmalloc calls required.
*/
- for ( ; csizep->cs_size; csizep++) {
- if (size > csizep->cs_size)
- continue;
- break;
- }
- return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
+ if (unlikely(gfpflags & GFP_DMA))
+ return csizep->cs_dmacachep;
+ return csizep->cs_cachep;
+}
+
+kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
+{
+ return __find_general_cachep(size, gfpflags);
}
+EXPORT_SYMBOL(kmem_find_general_cachep);
/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate (unsigned long gfporder, size_t size, size_t align,
+static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
int flags, size_t *left_over, unsigned int *num)
{
int i;
}
}
-static struct array_cache *alloc_arraycache(int cpu, int entries, int batchcount)
+static struct array_cache *alloc_arraycache(int cpu, int entries,
+ int batchcount)
{
int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
struct array_cache *nc = NULL;
- if (cpu != -1) {
- nc = kmem_cache_alloc_node(kmem_find_general_cachep(memsize,
- GFP_KERNEL), cpu_to_node(cpu));
- }
- if (!nc)
+ if (cpu == -1)
nc = kmalloc(memsize, GFP_KERNEL);
+ else
+ nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu));
+
if (nc) {
nc->avail = 0;
nc->limit = entries;
}
static int __devinit cpuup_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+ unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
kmem_cache_t* cachep;
sizes = malloc_sizes;
names = cache_names;
- while (sizes->cs_size) {
+ while (sizes->cs_size != ULONG_MAX) {
/* For performance, all the general caches are L1 aligned.
* This should be particularly beneficial on SMP boxes, as it
* eliminates "false sharing".
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid)
+static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
{
struct page *page;
void *addr;
flags |= cachep->gfpflags;
if (likely(nodeid == -1)) {
- addr = (void*)__get_free_pages(flags, cachep->gfporder);
- if (!addr)
- return NULL;
- page = virt_to_page(addr);
+ page = alloc_pages(flags, cachep->gfporder);
} else {
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
- if (!page)
- return NULL;
- addr = page_address(page);
}
+ if (!page)
+ return NULL;
+ addr = page_address(page);
i = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
#if DEBUG
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, unsigned long caller)
+static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
+ unsigned long caller)
{
int size = obj_reallen(cachep);
static void dump_line(char *data, int offset, int limit)
{
int i;
+ unsigned char total=0;
+
printk(KERN_ERR "%03x:", offset);
for (i=0;i<limit;i++) {
- printk(" %02x", (unsigned char)data[offset+i]);
+ if (data[offset+i] != POISON_FREE)
+ total += data[offset+i];
+ if (check_tainted() == 0)
+ printk(" %02x", (unsigned char)data[offset+i]);
+ else {
+ switch (i) {
+ case 0: printk(" f3 3d");
+ break;
+ case 1: break;
+ default:
+ printk(" %02x", (unsigned char)data[offset+i]);
+ }
+ }
}
printk("\n");
+ switch (total) {
+ case 0x36:
+ case 0x6a:
+ case 0x6f:
+ case 0x81:
+ case 0xac:
+ case 0xd3:
+ case 0xd5:
+ case 0xea:
+ printk (KERN_ERR "Single bit error detected. Possibly bad RAM. Run memtest86.\n");
+ return;
+ }
}
#endif
printk(KERN_ERR "Slab corruption: (%s) start=%p, len=%d\n",
print_tainted(), realobj, size);
print_objinfo(cachep, objp, 0);
+ dump_stack();
}
/* Hexdump the affected line */
i = (i/16)*16;
unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
void (*dtor)(void*, kmem_cache_t *, unsigned long))
{
- size_t left_over, slab_size;
+ size_t left_over, slab_size, ralign;
kmem_cache_t *cachep = NULL;
/*
if (flags & ~CREATE_MASK)
BUG();
- if (align) {
- /* combinations of forced alignment and advanced debugging is
- * not yet implemented.
+ /* Check that size is in terms of words. This is needed to avoid
+ * unaligned accesses for some archs when redzoning is used, and makes
+ * sure any on-slab bufctl's are also correctly aligned.
+ */
+ if (size & (BYTES_PER_WORD-1)) {
+ size += (BYTES_PER_WORD-1);
+ size &= ~(BYTES_PER_WORD-1);
+ }
+
+ /* calculate out the final buffer alignment: */
+ /* 1) arch recommendation: can be overridden for debug */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ /* Default alignment: as specified by the arch code.
+ * Except if an object is really small, then squeeze multiple
+ * objects into one cacheline.
*/
- flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ ralign = cache_line_size();
+ while (size <= ralign/2)
+ ralign /= 2;
} else {
- if (flags & SLAB_HWCACHE_ALIGN) {
- /* Default alignment: as specified by the arch code.
- * Except if an object is really small, then squeeze multiple
- * into one cacheline.
- */
- align = cache_line_size();
- while (size <= align/2)
- align /= 2;
- } else {
- align = BYTES_PER_WORD;
- }
- }
+ ralign = BYTES_PER_WORD;
+ }
+ /* 2) arch mandated alignment: disables debug if necessary */
+ if (ralign < ARCH_SLAB_MINALIGN) {
+ ralign = ARCH_SLAB_MINALIGN;
+ if (ralign > BYTES_PER_WORD)
+ flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ }
+ /* 3) caller mandated alignment: disables debug if necessary */
+ if (ralign < align) {
+ ralign = align;
+ if (ralign > BYTES_PER_WORD)
+ flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ }
+ /* 4) Store it. Note that the debug code below can reduce
+ * the alignment to BYTES_PER_WORD.
+ */
+ align = ralign;
/* Get cache's description obj. */
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
goto opps;
memset(cachep, 0, sizeof(kmem_cache_t));
- /* Check that size is in terms of words. This is needed to avoid
- * unaligned accesses for some archs when redzoning is used, and makes
- * sure any on-slab bufctl's are also correctly aligned.
- */
- if (size & (BYTES_PER_WORD-1)) {
- size += (BYTES_PER_WORD-1);
- size &= ~(BYTES_PER_WORD-1);
- }
-
#if DEBUG
cachep->reallen = size;
return __cache_shrink(cachep);
}
-
EXPORT_SYMBOL(kmem_cache_shrink);
/**
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
-int kmem_cache_destroy (kmem_cache_t * cachep)
+int kmem_cache_destroy(kmem_cache_t * cachep)
{
int i;
}
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
- synchronize_kernel();
+ synchronize_rcu();
/* no cpu_online check required here since we clear the percpu
* array on cpu offline and set this to NULL.
return 0;
}
-
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
-static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
- void *objp, int colour_off, int local_flags)
+static struct slab* alloc_slabmgmt(kmem_cache_t *cachep,
+ void *objp, int colour_off, unsigned int __nocast local_flags)
{
struct slab *slabp;
return (kmem_bufctl_t *)(slabp+1);
}
-static void cache_init_objs (kmem_cache_t * cachep,
- struct slab * slabp, unsigned long ctor_flags)
+static void cache_init_objs(kmem_cache_t *cachep,
+ struct slab *slabp, unsigned long ctor_flags)
{
int i;
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, int flags)
+static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid)
+static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
{
struct slab *slabp;
void *objp;
size_t offset;
- int local_flags;
+ unsigned int local_flags;
unsigned long ctor_flags;
/* Be lazy and only check for valid flags here,
}
}
-static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller)
+static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
+ void *caller)
{
struct page *page;
unsigned int objnr;
static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
{
- int i;
+ kmem_bufctl_t i;
int entries = 0;
check_spinlock_acquired(cachep);
/* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
entries++;
- if (entries > cachep->num || i < 0 || i >= cachep->num)
+ if (entries > cachep->num || i >= cachep->num)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
- int i;
bad:
printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse);
#define check_slabp(x,y) do { } while(0)
#endif
-static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
+static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags)
{
int batchcount;
struct kmem_list3 *l3;
}
static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
+cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
#endif
-static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
+static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
{
unsigned long save_flags;
void* objp;
}
}
-static void cache_flusharray (kmem_cache_t* cachep, struct array_cache *ac)
+static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
{
int batchcount;
*
* Called with disabled ints.
*/
-static inline void __cache_free (kmem_cache_t *cachep, void* objp)
+static inline void __cache_free(kmem_cache_t *cachep, void *objp)
{
struct array_cache *ac = ac_data(cachep);
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
{
return __cache_alloc(cachep, flags);
}
-
EXPORT_SYMBOL(kmem_cache_alloc);
/**
* and can sleep. And it will allocate memory on the given node, which
* can improve the performance for cpu bound structures.
*/
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
+void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
{
int loop;
void *objp;
spin_unlock_irq(&cachep->spinlock);
local_irq_disable();
- if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
+ if (!cache_grow(cachep, flags, nodeid)) {
local_irq_enable();
return NULL;
}
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+void *kmalloc_node(size_t size, int flags, int node)
+{
+ kmem_cache_t *cachep;
+
+ cachep = kmem_find_general_cachep(size, flags);
+ if (unlikely(cachep == NULL))
+ return NULL;
+ return kmem_cache_alloc_node(cachep, flags, node);
+}
+EXPORT_SYMBOL(kmalloc_node);
#endif
/**
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void * __kmalloc (size_t size, int flags)
+void *__kmalloc(size_t size, unsigned int __nocast flags)
{
- struct cache_sizes *csizep = malloc_sizes;
+ kmem_cache_t *cachep;
- for (; csizep->cs_size; csizep++) {
- if (size > csizep->cs_size)
- continue;
-#if DEBUG
- /* This happens if someone tries to call
- * kmem_cache_create(), or kmalloc(), before
- * the generic caches are initialized.
- */
- BUG_ON(csizep->cs_cachep == NULL);
-#endif
- return __cache_alloc(flags & GFP_DMA ?
- csizep->cs_dmacachep : csizep->cs_cachep, flags);
- }
- return NULL;
+ /* If you want to save a few bytes .text space: replace
+ * __ with kmem_.
+ * Then kmalloc uses the uninlined functions instead of the inline
+ * functions.
+ */
+ cachep = __find_general_cachep(size, flags);
+ if (unlikely(cachep == NULL))
+ return NULL;
+ return __cache_alloc(cachep, flags);
}
-
EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- pdata->ptrs[i] = kmem_cache_alloc_node(
- kmem_find_general_cachep(size, GFP_KERNEL),
- cpu_to_node(i));
+ pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(i));
if (!pdata->ptrs[i])
goto unwind_oom;
kfree(pdata);
return NULL;
}
-
EXPORT_SYMBOL(__alloc_percpu);
#endif
* Free an object which was previously allocated from this
* cache.
*/
-void kmem_cache_free (kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(kmem_cache_t *cachep, void *objp)
{
unsigned long flags;
__cache_free(cachep, objp);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(kmem_cache_free);
/**
* @size: element size.
* @flags: the type of memory to allocate.
*/
-void *kcalloc(size_t n, size_t size, int flags)
+void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
{
void *ret = NULL;
memset(ret, 0, n * size);
return ret;
}
-
EXPORT_SYMBOL(kcalloc);
/**
* Don't free memory not originally allocated by kmalloc()
* or you will run into trouble.
*/
-void kfree (const void *objp)
+void kfree(const void *objp)
{
kmem_cache_t *c;
unsigned long flags;
- if (!objp)
+ if (unlikely(!objp))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
__cache_free(c, (void*)objp);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(kfree);
#ifdef CONFIG_SMP
}
kfree(p);
}
-
EXPORT_SYMBOL(free_percpu);
#endif
{
return obj_reallen(cachep);
}
-
EXPORT_SYMBOL(kmem_cache_size);
#if DEBUG
if (!(cachep->flags & SLAB_RED_ZONE))
return; /* no redzone data to check */
+#if CONFIG_DEBUG_PAGEALLOC
+ /* Page alloc debugging on for this cache. Mapping & Unmapping happens
+ * without any locking, thus parallel checks are impossible.
+ */
+ if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
+ return;
+#endif
+
for (i=0;i<cachep->num;i++) {
void *objp = slabp->s_mem + cachep->objsize * i;
unsigned long red1, red2;
}
-static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount, int shared)
+static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
+ int shared)
{
struct ccupdate_struct new;
struct array_cache *new_shared;
}
-static void enable_cpucache (kmem_cache_t *cachep)
+static void enable_cpucache(kmem_cache_t *cachep)
{
int err;
int limit, shared;
next_unlock:
spin_unlock_irq(&searchp->spinlock);
next:
- ;
+ cond_resched();
}
check_irq_on();
up(&cache_chain_sem);
seq_puts(m, "slabinfo - version: 2.1\n");
#endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
- seq_puts(m, " : tunables <batchcount> <limit> <sharedfactor>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"