* The head array is strictly LIFO and should improve the cache hit rates.
* On SMP, it additionally reduces the spinlock operations.
*
- * The c_cpuarray may not be read with enabled local interrupts -
+ * The c_cpuarray may not be read with enabled local interrupts -
* it's changed with a smp_call_function().
*
* SMP synchronization:
* constructors and destructors are called without any locking.
- * Several members in kmem_cache_t and struct slab never change, they
+ * Several members in struct kmem_cache and struct slab never change, they
* are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking,
* and local interrupts are disabled so slab code is preempt-safe.
* Further notes from the original documentation:
*
* 11 April '97. Started multi-threading - markhe
- * The global cache-chain is protected by the semaphore 'cache_chain_sem'.
+ * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()).
*
* At present, each engine can be growing a cache. This should be blocked.
*
+ * 15 March 2005. NUMA slab allocator.
+ * Shai Fultheim <shai@scalex86.org>.
+ * Shobhit Dayal <shobhit@calsoftinc.com>
+ * Alok N Kataria <alokk@calsoftinc.com>
+ * Christoph Lameter <christoph@lameter.com>
+ *
+ * Modified the slab allocator to be node aware on NUMA systems.
+ * Each node has its own list of partial, free and full slabs.
+ * All object allocations for a node occur from node specific slab lists.
*/
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compiler.h>
+#include <linux/cpuset.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
+#include <linux/string.h>
+#include <linux/nodemask.h>
+#include <linux/mempolicy.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#define FORCED_DEBUG 0
#endif
-
/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_NO_REAP | SLAB_CACHE_DMA | \
+ SLAB_CACHE_DMA | \
SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU)
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
#else
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
+# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU)
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
#endif
/*
* is less than 512 (PAGE_SIZE<<3), but greater than 256.
*/
+typedef unsigned int kmem_bufctl_t;
#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2)
-
-/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in cache_grow().
- */
-static unsigned long offslab_limit;
+#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
+#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
* struct slab
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
- struct list_head list;
- unsigned long colouroff;
- void *s_mem; /* including colour offset */
- unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
+ struct list_head list;
+ unsigned long colouroff;
+ void *s_mem; /* including colour offset */
+ unsigned int inuse; /* num of objs active in slab */
+ kmem_bufctl_t free;
+ unsigned short nodeid;
};
/*
* We assume struct slab_rcu can overlay struct slab when destroying.
*/
struct slab_rcu {
- struct rcu_head head;
- kmem_cache_t *cachep;
- void *addr;
+ struct rcu_head head;
+ struct kmem_cache *cachep;
+ void *addr;
};
/*
* struct array_cache
*
- * Per cpu structures
* Purpose:
* - LIFO ordering, to hand out cache-warm objects from _alloc
* - reduce the number of linked list operations
unsigned int limit;
unsigned int batchcount;
unsigned int touched;
+ spinlock_t lock;
+ void *entry[0]; /*
+ * Must have this definition in here for the proper
+ * alignment of array_cache. Also simplifies accessing
+ * the entries.
+ * [0] is for gcc 2.95. It should really be [].
+ */
};
-/* bootstrap: The caches do not work without cpuarrays anymore,
- * but the cpuarrays are allocated from the generic caches...
+/*
+ * bootstrap: The caches do not work without cpuarrays anymore, but the
+ * cpuarrays are allocated from the generic caches...
*/
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
struct array_cache cache;
- void * entries[BOOT_CPUCACHE_ENTRIES];
+ void *entries[BOOT_CPUCACHE_ENTRIES];
};
/*
- * The slab lists of all objects.
- * Hopefully reduce the internal fragmentation
- * NUMA: The spinlock could be moved from the kmem_cache_t
- * into this structure, too. Figure out what causes
- * fewer cross-node spinlock operations.
+ * The slab lists for all objects.
*/
struct kmem_list3 {
- struct list_head slabs_partial; /* partial list first, better asm code */
- struct list_head slabs_full;
- struct list_head slabs_free;
- unsigned long free_objects;
- int free_touched;
- unsigned long next_reap;
- struct array_cache *shared;
+ struct list_head slabs_partial; /* partial list first, better asm code */
+ struct list_head slabs_full;
+ struct list_head slabs_free;
+ unsigned long free_objects;
+ unsigned int free_limit;
+ unsigned int colour_next; /* Per-node cache coloring */
+ spinlock_t list_lock;
+ struct array_cache *shared; /* shared per node */
+ struct array_cache **alien; /* on other nodes */
+ unsigned long next_reap; /* updated without locking */
+ int free_touched; /* updated without locking */
};
-#define LIST3_INIT(parent) \
- { \
- .slabs_full = LIST_HEAD_INIT(parent.slabs_full), \
- .slabs_partial = LIST_HEAD_INIT(parent.slabs_partial), \
- .slabs_free = LIST_HEAD_INIT(parent.slabs_free) \
- }
-#define list3_data(cachep) \
- (&(cachep)->lists)
+/*
+ * Need this for bootstrapping a per node allocator.
+ */
+#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
+struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
+#define CACHE_CACHE 0
+#define SIZE_AC 1
+#define SIZE_L3 (1 + MAX_NUMNODES)
+
+/*
+ * This function must be completely optimized away if a constant is passed to
+ * it. Mostly the same as what is in linux/slab.h except it returns an index.
+ */
+static __always_inline int index_of(const size_t size)
+{
+ extern void __bad_size(void);
+
+ if (__builtin_constant_p(size)) {
+ int i = 0;
-/* NUMA: per-node */
-#define list3_data_ptr(cachep, ptr) \
- list3_data(cachep)
+#define CACHE(x) \
+ if (size <=x) \
+ return i; \
+ else \
+ i++;
+#include "linux/kmalloc_sizes.h"
+#undef CACHE
+ __bad_size();
+ } else
+ __bad_size();
+ return 0;
+}
+
+#define INDEX_AC index_of(sizeof(struct arraycache_init))
+#define INDEX_L3 index_of(sizeof(struct kmem_list3))
+
+static void kmem_list3_init(struct kmem_list3 *parent)
+{
+ INIT_LIST_HEAD(&parent->slabs_full);
+ INIT_LIST_HEAD(&parent->slabs_partial);
+ INIT_LIST_HEAD(&parent->slabs_free);
+ parent->shared = NULL;
+ parent->alien = NULL;
+ parent->colour_next = 0;
+ spin_lock_init(&parent->list_lock);
+ parent->free_objects = 0;
+ parent->free_touched = 0;
+}
+
+#define MAKE_LIST(cachep, listp, slab, nodeid) \
+ do { \
+ INIT_LIST_HEAD(listp); \
+ list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
+ } while (0)
+
+#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
+ do { \
+ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
+ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
+ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
+ } while (0)
/*
- * kmem_cache_t
+ * struct kmem_cache
*
* manages a cache.
*/
-
-struct kmem_cache_s {
+
+struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
- struct array_cache *array[NR_CPUS];
- unsigned int batchcount;
- unsigned int limit;
-/* 2) touched by every alloc & free from the backend */
- struct kmem_list3 lists;
- /* NUMA: kmem_3list_t *nodelists[MAX_NUMNODES] */
- unsigned int objsize;
- unsigned int flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
- unsigned int free_limit; /* upper limit of objects in the lists */
- spinlock_t spinlock;
-
-/* 3) cache_grow/shrink */
+ struct array_cache *array[NR_CPUS];
+/* 2) Cache tunables. Protected by cache_chain_mutex */
+ unsigned int batchcount;
+ unsigned int limit;
+ unsigned int shared;
+
+ unsigned int buffer_size;
+/* 3) touched by every alloc & free from the backend */
+ struct kmem_list3 *nodelists[MAX_NUMNODES];
+
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+
+/* 4) cache_grow/shrink */
/* order of pgs per slab (2^n) */
- unsigned int gfporder;
+ unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
- unsigned int gfpflags;
+ gfp_t gfpflags;
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int colour_next; /* cache colouring */
- kmem_cache_t *slabp_cache;
- unsigned int slab_size;
- unsigned int dflags; /* dynamic flags */
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ struct kmem_cache *slabp_cache;
+ unsigned int slab_size;
+ unsigned int dflags; /* dynamic flags */
/* constructor func */
- void (*ctor)(void *, kmem_cache_t *, unsigned long);
+ void (*ctor) (void *, struct kmem_cache *, unsigned long);
/* de-constructor func */
- void (*dtor)(void *, kmem_cache_t *, unsigned long);
+ void (*dtor) (void *, struct kmem_cache *, unsigned long);
-/* 4) cache creation/removal */
- const char *name;
- struct list_head next;
+/* 5) cache creation/removal */
+ const char *name;
+ struct list_head next;
-/* 5) statistics */
+/* 6) statistics */
#if STATS
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+ unsigned long max_freeable;
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
#endif
#if DEBUG
- int dbghead;
- int reallen;
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. buffer_size contains the total
+ * object size including these internal fields, the following two
+ * variables contain the offset to the user object and its size.
+ */
+ int obj_offset;
+ int obj_size;
#endif
};
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
#define BATCHREFILL_LIMIT 16
-/* Optimization question: fewer reaps means less
- * probability for unnessary cpucache drain/refill cycles.
+/*
+ * Optimization question: fewer reaps means less probability for unnessary
+ * cpucache drain/refill cycles.
*
- * OTHO the cpuarrays can contain lots of objects,
+ * OTOH the cpuarrays can contain lots of objects,
* which could lock up otherwise freeable slabs.
*/
#define REAPTIMEOUT_CPUC (2*HZ)
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
#define STATS_INC_REAPED(x) ((x)->reaped++)
-#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
- (x)->high_mark = (x)->num_active; \
- } while (0)
+#define STATS_SET_HIGH(x) \
+ do { \
+ if ((x)->num_active > (x)->high_mark) \
+ (x)->high_mark = (x)->num_active; \
+ } while (0)
#define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
-#define STATS_SET_FREEABLE(x, i) \
- do { if ((x)->max_freeable < i) \
- (x)->max_freeable = i; \
- } while (0)
-
+#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
+#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
+#define STATS_SET_FREEABLE(x, i) \
+ do { \
+ if ((x)->max_freeable < i) \
+ (x)->max_freeable = i; \
+ } while (0)
#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
#define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
-#define STATS_SET_FREEABLE(x, i) \
- do { } while (0)
-
+#define STATS_INC_NODEFREES(x) do { } while (0)
+#define STATS_INC_ACOVERFLOW(x) do { } while (0)
+#define STATS_SET_FREEABLE(x, i) do { } while (0)
#define STATS_INC_ALLOCHIT(x) do { } while (0)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
#define STATS_INC_FREEHIT(x) do { } while (0)
#endif
#if DEBUG
-/* Magic nums for obj red zoning.
+/*
+ * Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
#define POISON_FREE 0x6b /* for use-after-free poisoning */
#define POISON_END 0xa5 /* end-byte of poisoning */
-/* memory layout of objects:
+/*
+ * memory layout of objects:
* 0 : objp
- * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
+ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
* allocation. Catches writes behind the end of the allocation.
- * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
+ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
* redzone word.
- * cachep->dbghead: The real object.
- * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
+ * cachep->obj_offset: The real object.
+ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
+ * [BYTES_PER_WORD long]
*/
-static int obj_dbghead(kmem_cache_t *cachep)
+static int obj_offset(struct kmem_cache *cachep)
{
- return cachep->dbghead;
+ return cachep->obj_offset;
}
-static int obj_reallen(kmem_cache_t *cachep)
+static int obj_size(struct kmem_cache *cachep)
{
- return cachep->reallen;
+ return cachep->obj_size;
}
-static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
+ return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
}
-static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
- return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
+ return (unsigned long *)(objp + cachep->buffer_size -
+ 2 * BYTES_PER_WORD);
+ return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
-static void **dbg_userword(kmem_cache_t *cachep, void *objp)
+static void **dbg_userword(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
+ return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
#else
-#define obj_dbghead(x) 0
-#define obj_reallen(cachep) (cachep->objsize)
+#define obj_offset(x) 0
+#define obj_size(cachep) (cachep->buffer_size)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
#endif
/*
- * Maximum size of an obj (in 2^order pages)
- * and absolute limit for the gfp order.
+ * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
+ * order.
*/
#if defined(CONFIG_LARGE_ALLOCS)
#define MAX_OBJ_ORDER 13 /* up to 32Mb */
#define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
-/* Macros for storing/retrieving the cachep and or slab from the
- * global 'mem_map'. These are used to find the slab an obj belongs to.
- * With kfree(), these are used to find the cache which an obj belongs to.
+/*
+ * Functions for storing/retrieving the cachep and or slab from the page
+ * allocator. These are used to find the slab an obj belongs to. With kfree(),
+ * these are used to find the cache which an obj belongs to.
*/
-#define SET_PAGE_CACHE(pg,x) ((pg)->lru.next = (struct list_head *)(x))
-#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->lru.next)
-#define SET_PAGE_SLAB(pg,x) ((pg)->lru.prev = (struct list_head *)(x))
-#define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->lru.prev)
+static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
+{
+ page->lru.next = (struct list_head *)cache;
+}
+
+static inline struct kmem_cache *page_get_cache(struct page *page)
+{
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page_private(page);
+ return (struct kmem_cache *)page->lru.next;
+}
+
+static inline void page_set_slab(struct page *page, struct slab *slab)
+{
+ page->lru.prev = (struct list_head *)slab;
+}
-/* These are the default caches for kmalloc. Custom caches can have other sizes. */
+static inline struct slab *page_get_slab(struct page *page)
+{
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page_private(page);
+ return (struct slab *)page->lru.prev;
+}
+
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+ struct page *page = virt_to_page(obj);
+ return page_get_cache(page);
+}
+
+static inline struct slab *virt_to_slab(const void *obj)
+{
+ struct page *page = virt_to_page(obj);
+ return page_get_slab(page);
+}
+
+static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
+ unsigned int idx)
+{
+ return slab->s_mem + cache->buffer_size * idx;
+}
+
+static inline unsigned int obj_to_index(struct kmem_cache *cache,
+ struct slab *slab, void *obj)
+{
+ return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
+}
+
+/*
+ * These are the default caches for kmalloc. Custom caches can have other sizes.
+ */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
- { 0, }
+ CACHE(ULONG_MAX)
#undef CACHE
};
-
EXPORT_SYMBOL(malloc_sizes);
/* Must match cache_sizes above. Out of line to keep cache footprint low. */
static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
- { NULL, }
+ {NULL,}
#undef CACHE
};
static struct arraycache_init initarray_cache __initdata =
- { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
- { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static kmem_cache_t cache_cache = {
- .lists = LIST3_INIT(cache_cache.lists),
- .batchcount = 1,
- .limit = BOOT_CPUCACHE_ENTRIES,
- .objsize = sizeof(kmem_cache_t),
- .flags = SLAB_NO_REAP,
- .spinlock = SPIN_LOCK_UNLOCKED,
- .name = "kmem_cache",
+static struct kmem_cache cache_cache = {
+ .batchcount = 1,
+ .limit = BOOT_CPUCACHE_ENTRIES,
+ .shared = 1,
+ .buffer_size = sizeof(struct kmem_cache),
+ .name = "kmem_cache",
#if DEBUG
- .reallen = sizeof(kmem_cache_t),
+ .obj_size = sizeof(struct kmem_cache),
#endif
};
/* Guard access to the cache-chain. */
-static struct semaphore cache_chain_sem;
+static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
/*
- * vm_enough_memory() looks at this to determine how many
- * slab-allocated pages are possibly freeable under pressure
+ * vm_enough_memory() looks at this to determine how many slab-allocated pages
+ * are possibly freeable under pressure
*
* SLAB_RECLAIM_ACCOUNT turns this on per-slab
*/
atomic_t slab_reclaim_pages;
-EXPORT_SYMBOL(slab_reclaim_pages);
/*
* chicken and egg problem: delay the per-cpu array allocation
*/
static enum {
NONE,
- PARTIAL,
+ PARTIAL_AC,
+ PARTIAL_L3,
FULL
} g_cpucache_up;
-static DEFINE_PER_CPU(struct work_struct, reap_work);
-
-static void free_block(kmem_cache_t* cachep, void** objpp, int len);
-static void enable_cpucache (kmem_cache_t *cachep);
-static void cache_reap (void *unused);
-
-static inline void ** ac_entry(struct array_cache *ac)
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
{
- return (void**)(ac+1);
+ return g_cpucache_up == FULL;
}
-static inline struct array_cache *ac_data(kmem_cache_t *cachep)
+static DEFINE_PER_CPU(struct work_struct, reap_work);
+
+static void free_block(struct kmem_cache *cachep, void **objpp, int len,
+ int node);
+static void enable_cpucache(struct kmem_cache *cachep);
+static void cache_reap(void *unused);
+static int __node_shrink(struct kmem_cache *cachep, int node);
+
+static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
-static kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
+static inline struct kmem_cache *__find_general_cachep(size_t size,
+ gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
- /* This function could be moved to the header file, and
- * made inline so consumers can quickly determine what
- * cache pointer they require.
+#if DEBUG
+ /* This happens if someone tries to call
+ * kmem_cache_create(), or __kmalloc(), before
+ * the generic caches are initialized.
*/
- for ( ; csizep->cs_size; csizep++) {
- if (size > csizep->cs_size)
- continue;
- break;
- }
- return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
+ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
+#endif
+ while (size > csizep->cs_size)
+ csizep++;
+
+ /*
+ * Really subtle: The last entry with cs->cs_size==ULONG_MAX
+ * has cs_{dma,}cachep==NULL. Thus no special case
+ * for large kmalloc calls required.
+ */
+ if (unlikely(gfpflags & GFP_DMA))
+ return csizep->cs_dmacachep;
+ return csizep->cs_cachep;
}
-/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate (unsigned long gfporder, size_t size, size_t align,
- int flags, size_t *left_over, unsigned int *num)
+struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
- int i;
- size_t wastage = PAGE_SIZE<<gfporder;
- size_t extra = 0;
- size_t base = 0;
+ return __find_general_cachep(size, gfpflags);
+}
+EXPORT_SYMBOL(kmem_find_general_cachep);
- if (!(flags & CFLGS_OFF_SLAB)) {
- base = sizeof(struct slab);
- extra = sizeof(kmem_bufctl_t);
- }
- i = 0;
- while (i*size + ALIGN(base+i*extra, align) <= wastage)
- i++;
- if (i > 0)
- i--;
+static size_t slab_mgmt_size(size_t nr_objs, size_t align)
+{
+ return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+}
+
+/*
+ * Calculate the number of objects and left-over bytes for a given buffer size.
+ */
+static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+ size_t align, int flags, size_t *left_over,
+ unsigned int *num)
+{
+ int nr_objs;
+ size_t mgmt_size;
+ size_t slab_size = PAGE_SIZE << gfporder;
+
+ /*
+ * The slab management structure can be either off the slab or
+ * on it. For the latter case, the memory allocated for a
+ * slab is used for:
+ *
+ * - The struct slab
+ * - One kmem_bufctl_t for each object
+ * - Padding to respect alignment of @align
+ * - @buffer_size bytes for each object
+ *
+ * If the slab management structure is off the slab, then the
+ * alignment will already be calculated into the size. Because
+ * the slabs are all pages aligned, the objects will be at the
+ * correct alignment when allocated.
+ */
+ if (flags & CFLGS_OFF_SLAB) {
+ mgmt_size = 0;
+ nr_objs = slab_size / buffer_size;
- if (i > SLAB_LIMIT)
- i = SLAB_LIMIT;
+ if (nr_objs > SLAB_LIMIT)
+ nr_objs = SLAB_LIMIT;
+ } else {
+ /*
+ * Ignore padding for the initial guess. The padding
+ * is at most @align-1 bytes, and @buffer_size is at
+ * least @align. In the worst case, this result will
+ * be one greater than the number of objects that fit
+ * into the memory allocation when taking the padding
+ * into account.
+ */
+ nr_objs = (slab_size - sizeof(struct slab)) /
+ (buffer_size + sizeof(kmem_bufctl_t));
+
+ /*
+ * This calculated number will be either the right
+ * amount, or one greater than what we want.
+ */
+ if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
+ > slab_size)
+ nr_objs--;
- *num = i;
- wastage -= i*size;
- wastage -= ALIGN(base+i*extra, align);
- *left_over = wastage;
+ if (nr_objs > SLAB_LIMIT)
+ nr_objs = SLAB_LIMIT;
+
+ mgmt_size = slab_mgmt_size(nr_objs, align);
+ }
+ *num = nr_objs;
+ *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
-static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
+static void __slab_error(const char *function, struct kmem_cache *cachep,
+ char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
- function, cachep->name, msg);
+ function, cachep->name, msg);
dump_stack();
}
+#ifdef CONFIG_NUMA
+/*
+ * Special reaping functions for NUMA systems called from cache_reap().
+ * These take care of doing round robin flushing of alien caches (containing
+ * objects freed on different nodes from which they were allocated) and the
+ * flushing of remote pcps by calling drain_node_pages.
+ */
+static DEFINE_PER_CPU(unsigned long, reap_node);
+
+static void init_reap_node(int cpu)
+{
+ int node;
+
+ node = next_node(cpu_to_node(cpu), node_online_map);
+ if (node == MAX_NUMNODES)
+ node = first_node(node_online_map);
+
+ __get_cpu_var(reap_node) = node;
+}
+
+static void next_reap_node(void)
+{
+ int node = __get_cpu_var(reap_node);
+
+ /*
+ * Also drain per cpu pages on remote zones
+ */
+ if (node != numa_node_id())
+ drain_node_pages(node);
+
+ node = next_node(node, node_online_map);
+ if (unlikely(node >= MAX_NUMNODES))
+ node = first_node(node_online_map);
+ __get_cpu_var(reap_node) = node;
+}
+
+#else
+#define init_reap_node(cpu) do { } while (0)
+#define next_reap_node(void) do { } while (0)
+#endif
+
/*
* Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
* via the workqueue/eventd.
* at that time.
*/
if (keventd_up() && reap_work->func == NULL) {
+ init_reap_node(cpu);
INIT_WORK(reap_work, cache_reap, NULL);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
-static struct array_cache *alloc_arraycache(int cpu, int entries, int batchcount)
+static struct array_cache *alloc_arraycache(int node, int entries,
+ int batchcount)
{
- int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
+ int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *nc = NULL;
- if (cpu != -1) {
- nc = kmem_cache_alloc_node(kmem_find_general_cachep(memsize,
- GFP_KERNEL), cpu_to_node(cpu));
- }
- if (!nc)
- nc = kmalloc(memsize, GFP_KERNEL);
+ nc = kmalloc_node(memsize, GFP_KERNEL, node);
if (nc) {
nc->avail = 0;
nc->limit = entries;
nc->batchcount = batchcount;
nc->touched = 0;
+ spin_lock_init(&nc->lock);
}
return nc;
}
-static int __devinit cpuup_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+/*
+ * Transfer objects in one arraycache to another.
+ * Locking must be handled by the caller.
+ *
+ * Return the number of entries transferred.
+ */
+static int transfer_objects(struct array_cache *to,
+ struct array_cache *from, unsigned int max)
+{
+ /* Figure out how many entries to transfer */
+ int nr = min(min(from->avail, max), to->limit - to->avail);
+
+ if (!nr)
+ return 0;
+
+ memcpy(to->entry + to->avail, from->entry + from->avail -nr,
+ sizeof(void *) *nr);
+
+ from->avail -= nr;
+ to->avail += nr;
+ to->touched = 1;
+ return nr;
+}
+
+#ifdef CONFIG_NUMA
+static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
+
+static struct array_cache **alloc_alien_cache(int node, int limit)
+{
+ struct array_cache **ac_ptr;
+ int memsize = sizeof(void *) * MAX_NUMNODES;
+ int i;
+
+ if (limit > 1)
+ limit = 12;
+ ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
+ if (ac_ptr) {
+ for_each_node(i) {
+ if (i == node || !node_online(i)) {
+ ac_ptr[i] = NULL;
+ continue;
+ }
+ ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
+ if (!ac_ptr[i]) {
+ for (i--; i <= 0; i--)
+ kfree(ac_ptr[i]);
+ kfree(ac_ptr);
+ return NULL;
+ }
+ }
+ }
+ return ac_ptr;
+}
+
+static void free_alien_cache(struct array_cache **ac_ptr)
+{
+ int i;
+
+ if (!ac_ptr)
+ return;
+ for_each_node(i)
+ kfree(ac_ptr[i]);
+ kfree(ac_ptr);
+}
+
+static void __drain_alien_cache(struct kmem_cache *cachep,
+ struct array_cache *ac, int node)
+{
+ struct kmem_list3 *rl3 = cachep->nodelists[node];
+
+ if (ac->avail) {
+ spin_lock(&rl3->list_lock);
+ /*
+ * Stuff objects into the remote nodes shared array first.
+ * That way we could avoid the overhead of putting the objects
+ * into the free lists and getting them back later.
+ */
+ if (rl3->shared)
+ transfer_objects(rl3->shared, ac, ac->limit);
+
+ free_block(cachep, ac->entry, ac->avail, node);
+ ac->avail = 0;
+ spin_unlock(&rl3->list_lock);
+ }
+}
+
+/*
+ * Called from cache_reap() to regularly drain alien caches round robin.
+ */
+static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
+{
+ int node = __get_cpu_var(reap_node);
+
+ if (l3->alien) {
+ struct array_cache *ac = l3->alien[node];
+
+ if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
+ __drain_alien_cache(cachep, ac, node);
+ spin_unlock_irq(&ac->lock);
+ }
+ }
+}
+
+static void drain_alien_cache(struct kmem_cache *cachep,
+ struct array_cache **alien)
+{
+ int i = 0;
+ struct array_cache *ac;
+ unsigned long flags;
+
+ for_each_online_node(i) {
+ ac = alien[i];
+ if (ac) {
+ spin_lock_irqsave(&ac->lock, flags);
+ __drain_alien_cache(cachep, ac, i);
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+ }
+}
+#else
+
+#define drain_alien_cache(cachep, alien) do { } while (0)
+#define reap_alien(cachep, l3) do { } while (0)
+
+static inline struct array_cache **alloc_alien_cache(int node, int limit)
+{
+ return (struct array_cache **) 0x01020304ul;
+}
+
+static inline void free_alien_cache(struct array_cache **ac_ptr)
+{
+}
+
+#endif
+
+static int cpuup_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
- kmem_cache_t* cachep;
+ struct kmem_cache *cachep;
+ struct kmem_list3 *l3 = NULL;
+ int node = cpu_to_node(cpu);
+ int memsize = sizeof(struct kmem_list3);
switch (action) {
case CPU_UP_PREPARE:
- down(&cache_chain_sem);
+ mutex_lock(&cache_chain_mutex);
+ /*
+ * We need to do this right in the beginning since
+ * alloc_arraycache's are going to use this list.
+ * kmalloc_node allows us to add the slab to the right
+ * kmem_list3 and not this cpu's kmem_list3
+ */
+
+ list_for_each_entry(cachep, &cache_chain, next) {
+ /*
+ * Set up the size64 kmemlist for cpu before we can
+ * begin anything. Make sure some other cpu on this
+ * node has not already allocated this
+ */
+ if (!cachep->nodelists[node]) {
+ l3 = kmalloc_node(memsize, GFP_KERNEL, node);
+ if (!l3)
+ goto bad;
+ kmem_list3_init(l3);
+ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+ /*
+ * The l3s don't come and go as CPUs come and
+ * go. cache_chain_mutex is sufficient
+ * protection here.
+ */
+ cachep->nodelists[node] = l3;
+ }
+
+ spin_lock_irq(&cachep->nodelists[node]->list_lock);
+ cachep->nodelists[node]->free_limit =
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+ spin_unlock_irq(&cachep->nodelists[node]->list_lock);
+ }
+
+ /*
+ * Now we can go ahead with allocating the shared arrays and
+ * array caches
+ */
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
- nc = alloc_arraycache(cpu, cachep->limit, cachep->batchcount);
+ nc = alloc_arraycache(node, cachep->limit,
+ cachep->batchcount);
if (!nc)
goto bad;
+ shared = alloc_arraycache(node,
+ cachep->shared * cachep->batchcount,
+ 0xbaadf00d);
+ if (!shared)
+ goto bad;
- spin_lock_irq(&cachep->spinlock);
+ alien = alloc_alien_cache(node, cachep->limit);
+ if (!alien)
+ goto bad;
cachep->array[cpu] = nc;
- cachep->free_limit = (1+num_online_cpus())*cachep->batchcount
- + cachep->num;
- spin_unlock_irq(&cachep->spinlock);
-
+ l3 = cachep->nodelists[node];
+ BUG_ON(!l3);
+
+ spin_lock_irq(&l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+ * CPU_UP_CANCELLED by the cpucontrol lock
+ */
+ l3->shared = shared;
+ shared = NULL;
+ }
+#ifdef CONFIG_NUMA
+ if (!l3->alien) {
+ l3->alien = alien;
+ alien = NULL;
+ }
+#endif
+ spin_unlock_irq(&l3->list_lock);
+ kfree(shared);
+ free_alien_cache(alien);
}
- up(&cache_chain_sem);
+ mutex_unlock(&cache_chain_mutex);
break;
case CPU_ONLINE:
start_cpu_timer(cpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
+ /*
+ * Even if all the cpus of a node are down, we don't free the
+ * kmem_list3 of any cache. This to avoid a race between
+ * cpu_down, and a kmalloc allocation from another cpu for
+ * memory from the node of the cpu going down. The list3
+ * structure is usually allocated from kmem_cache_create() and
+ * gets destroyed at kmem_cache_destroy().
+ */
/* fall thru */
case CPU_UP_CANCELED:
- down(&cache_chain_sem);
-
+ mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
+ cpumask_t mask;
- spin_lock_irq(&cachep->spinlock);
+ mask = node_to_cpumask(node);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
- cachep->free_limit -= cachep->batchcount;
- free_block(cachep, ac_entry(nc), nc->avail);
- spin_unlock_irq(&cachep->spinlock);
+ l3 = cachep->nodelists[node];
+
+ if (!l3)
+ goto free_array_cache;
+
+ spin_lock_irq(&l3->list_lock);
+
+ /* Free limit for this kmem_list3 */
+ l3->free_limit -= cachep->batchcount;
+ if (nc)
+ free_block(cachep, nc->entry, nc->avail, node);
+
+ if (!cpus_empty(mask)) {
+ spin_unlock_irq(&l3->list_lock);
+ goto free_array_cache;
+ }
+
+ shared = l3->shared;
+ if (shared) {
+ free_block(cachep, l3->shared->entry,
+ l3->shared->avail, node);
+ l3->shared = NULL;
+ }
+
+ alien = l3->alien;
+ l3->alien = NULL;
+
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+ drain_alien_cache(cachep, alien);
+ free_alien_cache(alien);
+ }
+free_array_cache:
kfree(nc);
}
- up(&cache_chain_sem);
+ /*
+ * In the previous loop, all the objects were freed to
+ * the respective cache's slabs, now we can go ahead and
+ * shrink each nodelist to its limit.
+ */
+ list_for_each_entry(cachep, &cache_chain, next) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+ spin_lock_irq(&l3->list_lock);
+ /* free slabs belonging to this node */
+ __node_shrink(cachep, node);
+ spin_unlock_irq(&l3->list_lock);
+ }
+ mutex_unlock(&cache_chain_mutex);
break;
#endif
}
return NOTIFY_OK;
bad:
- up(&cache_chain_sem);
+ mutex_unlock(&cache_chain_mutex);
return NOTIFY_BAD;
}
static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
-/* Initialisation.
- * Called after the gfp() functions have been enabled, and before smp_init().
+/*
+ * swap the static kmem_list3 with kmalloced memory
+ */
+static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
+ int nodeid)
+{
+ struct kmem_list3 *ptr;
+
+ BUG_ON(cachep->nodelists[nodeid] != list);
+ ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
+ BUG_ON(!ptr);
+
+ local_irq_disable();
+ memcpy(ptr, list, sizeof(struct kmem_list3));
+ MAKE_ALL_LISTS(cachep, ptr, nodeid);
+ cachep->nodelists[nodeid] = ptr;
+ local_irq_enable();
+}
+
+/*
+ * Initialisation. Called after the page allocator have been initialised and
+ * before smp_init().
*/
void __init kmem_cache_init(void)
{
size_t left_over;
struct cache_sizes *sizes;
struct cache_names *names;
+ int i;
+ int order;
+
+ for (i = 0; i < NUM_INIT_LISTS; i++) {
+ kmem_list3_init(&initkmem_list3[i]);
+ if (i < MAX_NUMNODES)
+ cache_cache.nodelists[i] = NULL;
+ }
/*
* Fragmentation resistance on low memory - only use bigger
if (num_physpages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
-
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
- * 1) initialize the cache_cache cache: it contains the kmem_cache_t
- * structures of all caches, except cache_cache itself: cache_cache
- * is statically allocated.
- * Initially an __init data area is used for the head array, it's
- * replaced with a kmalloc allocated array at the end of the bootstrap.
+ * 1) initialize the cache_cache cache: it contains the struct
+ * kmem_cache structures of all caches, except cache_cache itself:
+ * cache_cache is statically allocated.
+ * Initially an __init data area is used for the head array and the
+ * kmem_list3 structures, it's replaced with a kmalloc allocated
+ * array at the end of the bootstrap.
* 2) Create the first kmalloc cache.
- * The kmem_cache_t for the new cache is allocated normally. An __init
- * data area is used for the head array.
- * 3) Create the remaining kmalloc caches, with minimally sized head arrays.
+ * The struct kmem_cache for the new cache is allocated normally.
+ * An __init data area is used for the head array.
+ * 3) Create the remaining kmalloc caches, with minimally sized
+ * head arrays.
* 4) Replace the __init data head arrays for cache_cache and the first
* kmalloc cache with kmalloc allocated arrays.
- * 5) Resize the head arrays of the kmalloc caches to their final sizes.
+ * 5) Replace the __init data for kmem_list3 for cache_cache and
+ * the other cache's with kmalloc allocated memory.
+ * 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
/* 1) create the cache_cache */
- init_MUTEX(&cache_chain_sem);
INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
+ cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
- cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
+ cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
+ cache_line_size());
- cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
- &left_over, &cache_cache.num);
- if (!cache_cache.num)
- BUG();
-
- cache_cache.colour = left_over/cache_cache.colour_off;
- cache_cache.colour_next = 0;
- cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) +
- sizeof(struct slab), cache_line_size());
+ for (order = 0; order < MAX_ORDER; order++) {
+ cache_estimate(order, cache_cache.buffer_size,
+ cache_line_size(), 0, &left_over, &cache_cache.num);
+ if (cache_cache.num)
+ break;
+ }
+ BUG_ON(!cache_cache.num);
+ cache_cache.gfporder = order;
+ cache_cache.colour = left_over / cache_cache.colour_off;
+ cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
+ sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */
sizes = malloc_sizes;
names = cache_names;
- while (sizes->cs_size) {
- /* For performance, all the general caches are L1 aligned.
+ /*
+ * Initialize the caches that provide memory for the array cache and the
+ * kmem_list3 structures first. Without this, further allocations will
+ * bug.
+ */
+
+ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
+ NULL, NULL);
+
+ if (INDEX_AC != INDEX_L3) {
+ sizes[INDEX_L3].cs_cachep =
+ kmem_cache_create(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
+ NULL, NULL);
+ }
+
+ while (sizes->cs_size != ULONG_MAX) {
+ /*
+ * For performance, all the general caches are L1 aligned.
* This should be particularly beneficial on SMP boxes, as it
* eliminates "false sharing".
* Note for systems short on memory removing the alignment will
- * allow tighter packing of the smaller caches. */
- sizes->cs_cachep = kmem_cache_create(names->name,
- sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
-
- /* Inc off-slab bufctl limit until the ceiling is hit. */
- if (!(OFF_SLAB(sizes->cs_cachep))) {
- offslab_limit = sizes->cs_size-sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
+ * allow tighter packing of the smaller caches.
+ */
+ if (!sizes->cs_cachep) {
+ sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
+ NULL, NULL);
}
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
- sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
- NULL, NULL);
-
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
+ SLAB_PANIC,
+ NULL, NULL);
sizes++;
names++;
}
/* 4) Replace the bootstrap head arrays */
{
- void * ptr;
-
+ void *ptr;
+
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+
local_irq_disable();
- BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
- memcpy(ptr, ac_data(&cache_cache), sizeof(struct arraycache_init));
+ BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
+ memcpy(ptr, cpu_cache_get(&cache_cache),
+ sizeof(struct arraycache_init));
cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable();
-
+
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+
local_irq_disable();
- BUG_ON(ac_data(malloc_sizes[0].cs_cachep) != &initarray_generic.cache);
- memcpy(ptr, ac_data(malloc_sizes[0].cs_cachep),
- sizeof(struct arraycache_init));
- malloc_sizes[0].cs_cachep->array[smp_processor_id()] = ptr;
+ BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
+ != &initarray_generic.cache);
+ memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
+ sizeof(struct arraycache_init));
+ malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
+ ptr;
local_irq_enable();
}
-
- /* 5) resize the head arrays to their final sizes */
+ /* 5) Replace the bootstrap kmem_list3's */
{
- kmem_cache_t *cachep;
- down(&cache_chain_sem);
- list_for_each_entry(cachep, &cache_chain, next)
- enable_cpucache(cachep);
- up(&cache_chain_sem);
+ int node;
+ /* Replace the static kmem_list3 structures for the boot cpu */
+ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
+ numa_node_id());
+
+ for_each_online_node(node) {
+ init_list(malloc_sizes[INDEX_AC].cs_cachep,
+ &initkmem_list3[SIZE_AC + node], node);
+
+ if (INDEX_AC != INDEX_L3) {
+ init_list(malloc_sizes[INDEX_L3].cs_cachep,
+ &initkmem_list3[SIZE_L3 + node],
+ node);
+ }
+ }
+ }
+
+ /* 6) resize the head arrays to their final sizes */
+ {
+ struct kmem_cache *cachep;
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+ enable_cpucache(cachep);
+ mutex_unlock(&cache_chain_mutex);
}
/* Done! */
g_cpucache_up = FULL;
- /* Register a cpu startup notifier callback
- * that initializes ac_data for all new cpus
+ /*
+ * Register a cpu startup notifier callback that initializes
+ * cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
-
- /* The reap timers are started later, with a module init call:
- * That part of the kernel is not yet operational.
+ /*
+ * The reap timers are started later, with a module init call: That part
+ * of the kernel is not yet operational.
*/
}
{
int cpu;
- /*
- * Register the timers that return unneeded
- * pages to gfp.
+ /*
+ * Register the timers that return unneeded pages to the page allocator
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (cpu_online(cpu))
- start_cpu_timer(cpu);
- }
-
+ for_each_online_cpu(cpu)
+ start_cpu_timer(cpu);
return 0;
}
-
__initcall(cpucache_init);
/*
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid)
+static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
void *addr;
int i;
flags |= cachep->gfpflags;
- if (likely(nodeid == -1)) {
- page = alloc_pages(flags, cachep->gfporder);
- } else {
- page = alloc_pages_node(nodeid, flags, cachep->gfporder);
- }
+#ifndef CONFIG_MMU
+ /* nommu uses slab's for process anonymous memory allocations, so
+ * requires __GFP_COMP to properly refcount higher order allocations"
+ */
+ page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder);
+#else
+ page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+#endif
if (!page)
return NULL;
addr = page_address(page);
atomic_add(i, &slab_reclaim_pages);
add_page_state(nr_slab, i);
while (i--) {
- SetPageSlab(page);
+ __SetPageSlab(page);
page++;
}
return addr;
/*
* Interface to system's page release.
*/
-static void kmem_freepages(kmem_cache_t *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, void *addr)
{
- unsigned long i = (1<<cachep->gfporder);
+ unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
while (i--) {
- if (!TestClearPageSlab(page))
- BUG();
+ BUG_ON(!PageSlab(page));
+ __ClearPageSlab(page);
page++;
}
sub_page_state(nr_slab, nr_freed);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder);
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+ atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
}
static void kmem_rcu_free(struct rcu_head *head)
{
- struct slab_rcu *slab_rcu = (struct slab_rcu *) head;
- kmem_cache_t *cachep = slab_rcu->cachep;
+ struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
+ struct kmem_cache *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr);
if (OFF_SLAB(cachep))
#if DEBUG
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, unsigned long caller)
+static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
+ unsigned long caller)
{
- int size = obj_reallen(cachep);
+ int size = obj_size(cachep);
- addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)];
+ addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
- if (size < 5*sizeof(unsigned long))
+ if (size < 5 * sizeof(unsigned long))
return;
- *addr++=0x12345678;
- *addr++=caller;
- *addr++=smp_processor_id();
- size -= 3*sizeof(unsigned long);
+ *addr++ = 0x12345678;
+ *addr++ = caller;
+ *addr++ = smp_processor_id();
+ size -= 3 * sizeof(unsigned long);
{
unsigned long *sptr = &caller;
unsigned long svalue;
while (!kstack_end(sptr)) {
svalue = *sptr++;
if (kernel_text_address(svalue)) {
- *addr++=svalue;
+ *addr++ = svalue;
size -= sizeof(unsigned long);
if (size <= sizeof(unsigned long))
break;
}
}
- *addr++=0x87654321;
+ *addr++ = 0x87654321;
}
#endif
-static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
+static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
{
- int size = obj_reallen(cachep);
- addr = &((char*)addr)[obj_dbghead(cachep)];
+ int size = obj_size(cachep);
+ addr = &((char *)addr)[obj_offset(cachep)];
memset(addr, val, size);
- *(unsigned char *)(addr+size-1) = POISON_END;
+ *(unsigned char *)(addr + size - 1) = POISON_END;
}
static void dump_line(char *data, int offset, int limit)
{
int i;
+ unsigned char total=0, bad_count=0;
printk(KERN_ERR "%03x:", offset);
- for (i=0;i<limit;i++) {
- printk(" %02x", (unsigned char)data[offset+i]);
+ for (i = 0; i < limit; i++) {
+ if (data[offset+i] != POISON_FREE) {
+ total += data[offset+i];
+ ++bad_count;
+ }
+ printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
+ if (bad_count == 1) {
+ switch (total) {
+ case POISON_FREE ^ 0x01:
+ case POISON_FREE ^ 0x02:
+ case POISON_FREE ^ 0x04:
+ case POISON_FREE ^ 0x08:
+ case POISON_FREE ^ 0x10:
+ case POISON_FREE ^ 0x20:
+ case POISON_FREE ^ 0x40:
+ case POISON_FREE ^ 0x80:
+ printk (KERN_ERR "Single bit error detected. Possibly bad RAM.\n");
+#ifdef CONFIG_X86
+ printk (KERN_ERR "Run memtest86 or other memory test tool.\n");
+#endif
+ return;
+ }
+ }
}
#endif
#if DEBUG
-static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
+static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
{
int i, size;
char *realobj;
if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]",
- *dbg_userword(cachep, objp));
+ *dbg_userword(cachep, objp));
print_symbol("(%s)",
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
- realobj = (char*)objp+obj_dbghead(cachep);
- size = obj_reallen(cachep);
- for (i=0; i<size && lines;i+=16, lines--) {
+ realobj = (char *)objp + obj_offset(cachep);
+ size = obj_size(cachep);
+ for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
- if (i+limit > size)
- limit = size-i;
+ if (i + limit > size)
+ limit = size - i;
dump_line(realobj, i, limit);
}
}
-static void check_poison_obj(kmem_cache_t *cachep, void *objp)
+static void check_poison_obj(struct kmem_cache *cachep, void *objp)
{
char *realobj;
int size, i;
int lines = 0;
- realobj = (char*)objp+obj_dbghead(cachep);
- size = obj_reallen(cachep);
+ realobj = (char *)objp + obj_offset(cachep);
+ size = obj_size(cachep);
- for (i=0;i<size;i++) {
+ for (i = 0; i < size; i++) {
char exp = POISON_FREE;
- if (i == size-1)
+ if (i == size - 1)
exp = POISON_END;
if (realobj[i] != exp) {
int limit;
/* Mismatch ! */
/* Print header */
if (lines == 0) {
- printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
- realobj, size);
+ printk(KERN_ERR
+ "Slab corruption: (%s) start=%p, len=%d\n",
+ print_tainted(), realobj, size);
print_objinfo(cachep, objp, 0);
+ dump_stack();
}
/* Hexdump the affected line */
- i = (i/16)*16;
+ i = (i / 16) * 16;
limit = 16;
- if (i+limit > size)
- limit = size-i;
+ if (i + limit > size)
+ limit = size - i;
dump_line(realobj, i, limit);
i += 16;
lines++;
/* Print some data about the neighboring objects, if they
* exist:
*/
- struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp));
- int objnr;
+ struct slab *slabp = virt_to_slab(objp);
+ unsigned int objnr;
- objnr = (objp-slabp->s_mem)/cachep->objsize;
+ objnr = obj_to_index(cachep, slabp, objp);
if (objnr) {
- objp = slabp->s_mem+(objnr-1)*cachep->objsize;
- realobj = (char*)objp+obj_dbghead(cachep);
+ objp = index_to_obj(cachep, slabp, objnr - 1);
+ realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
- realobj, size);
+ realobj, size);
print_objinfo(cachep, objp, 2);
}
- if (objnr+1 < cachep->num) {
- objp = slabp->s_mem+(objnr+1)*cachep->objsize;
- realobj = (char*)objp+obj_dbghead(cachep);
+ if (objnr + 1 < cachep->num) {
+ objp = index_to_obj(cachep, slabp, objnr + 1);
+ realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
- realobj, size);
+ realobj, size);
print_objinfo(cachep, objp, 2);
}
}
}
#endif
-/* Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache.
- * The cache-lock is not held/needed.
+#if DEBUG
+/**
+ * slab_destroy_objs - destroy a slab and its objects
+ * @cachep: cache pointer being destroyed
+ * @slabp: slab pointer being destroyed
+ *
+ * Call the registered destructor for each object in a slab that is being
+ * destroyed.
*/
-static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
- void *addr = slabp->s_mem - slabp->colouroff;
-
-#if DEBUG
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = index_to_obj(cachep, slabp, i);
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
+ if (cachep->buffer_size % PAGE_SIZE == 0 &&
+ OFF_SLAB(cachep))
+ kernel_map_pages(virt_to_page(objp),
+ cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "start of a freed object "
- "was overwritten");
+ "was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "end of a freed object "
- "was overwritten");
+ "was overwritten");
}
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
- (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0);
+ (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
+}
#else
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
+{
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
- void* objp = slabp->s_mem+cachep->objsize*i;
- (cachep->dtor)(objp, cachep, 0);
+ void *objp = index_to_obj(cachep, slabp, i);
+ (cachep->dtor) (objp, cachep, 0);
}
}
+}
#endif
+/**
+ * slab_destroy - destroy and release all objects in a slab
+ * @cachep: cache pointer being destroyed
+ * @slabp: slab pointer being destroyed
+ *
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache. The
+ * cache-lock is not held/needed.
+ */
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+{
+ void *addr = slabp->s_mem - slabp->colouroff;
+
+ slab_destroy_objs(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
- slab_rcu = (struct slab_rcu *) slabp;
+ slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep;
slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free);
}
}
+/*
+ * For setting up all the kmem_list3s for cache whose buffer_size is same as
+ * size of kmem_list3.
+ */
+static void set_up_list3s(struct kmem_cache *cachep, int index)
+{
+ int node;
+
+ for_each_online_node(node) {
+ cachep->nodelists[node] = &initkmem_list3[index + node];
+ cachep->nodelists[node]->next_reap = jiffies +
+ REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ }
+}
+
+/**
+ * calculate_slab_order - calculate size (page order) of slabs
+ * @cachep: pointer to the cache that is being created
+ * @size: size of objects to be created in this cache.
+ * @align: required alignment for the objects.
+ * @flags: slab allocation flags
+ *
+ * Also calculates the number of objects per slab.
+ *
+ * This could be made much more intelligent. For now, try to avoid using
+ * high order pages for slabs. When the gfp() functions are more friendly
+ * towards high-order requests, this should be changed.
+ */
+static size_t calculate_slab_order(struct kmem_cache *cachep,
+ size_t size, size_t align, unsigned long flags)
+{
+ unsigned long offslab_limit;
+ size_t left_over = 0;
+ int gfporder;
+
+ for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
+ unsigned int num;
+ size_t remainder;
+
+ cache_estimate(gfporder, size, align, flags, &remainder, &num);
+ if (!num)
+ continue;
+
+ if (flags & CFLGS_OFF_SLAB) {
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
+ offslab_limit = size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
+
+ if (num > offslab_limit)
+ break;
+ }
+
+ /* Found something acceptable - save it away */
+ cachep->num = num;
+ cachep->gfporder = gfporder;
+ left_over = remainder;
+
+ /*
+ * A VFS-reclaimable slab tends to have most allocations
+ * as GFP_NOFS and we really don't want to have to be allocating
+ * higher-order pages when we are unable to shrink dcache.
+ */
+ if (flags & SLAB_RECLAIM_ACCOUNT)
+ break;
+
+ /*
+ * Large number of objects is good, but very large slabs are
+ * currently bad for the gfp()s.
+ */
+ if (gfporder >= slab_break_gfp_order)
+ break;
+
+ /*
+ * Acceptable internal fragmentation?
+ */
+ if (left_over * 8 <= (PAGE_SIZE << gfporder))
+ break;
+ }
+ return left_over;
+}
+
+static void setup_cpu_cache(struct kmem_cache *cachep)
+{
+ if (g_cpucache_up == FULL) {
+ enable_cpucache(cachep);
+ return;
+ }
+ if (g_cpucache_up == NONE) {
+ /*
+ * Note: the first kmem_cache_create must create the cache
+ * that's used by kmalloc(24), otherwise the creation of
+ * further caches will BUG().
+ */
+ cachep->array[smp_processor_id()] = &initarray_generic.cache;
+
+ /*
+ * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
+ * the first cache, then we need to set up all its list3s,
+ * otherwise the creation of further caches will BUG().
+ */
+ set_up_list3s(cachep, SIZE_AC);
+ if (INDEX_AC == INDEX_L3)
+ g_cpucache_up = PARTIAL_L3;
+ else
+ g_cpucache_up = PARTIAL_AC;
+ } else {
+ cachep->array[smp_processor_id()] =
+ kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+
+ if (g_cpucache_up == PARTIAL_AC) {
+ set_up_list3s(cachep, SIZE_L3);
+ g_cpucache_up = PARTIAL_L3;
+ } else {
+ int node;
+ for_each_online_node(node) {
+ cachep->nodelists[node] =
+ kmalloc_node(sizeof(struct kmem_list3),
+ GFP_KERNEL, node);
+ BUG_ON(!cachep->nodelists[node]);
+ kmem_list3_init(cachep->nodelists[node]);
+ }
+ }
+ }
+ cachep->nodelists[numa_node_id()]->next_reap =
+ jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+ cpu_cache_get(cachep)->avail = 0;
+ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+ cpu_cache_get(cachep)->batchcount = 1;
+ cpu_cache_get(cachep)->touched = 0;
+ cachep->batchcount = 1;
+ cachep->limit = BOOT_CPUCACHE_ENTRIES;
+}
+
/**
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* and the @dtor is run before the pages are handed back.
*
* @name must be valid until the cache is destroyed. This implies that
- * the module calling this has to destroy the cache before getting
- * unloaded.
- *
+ * the module calling this has to destroy the cache before getting unloaded.
+ *
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
* for buffer overruns.
*
- * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
- * memory pressure.
- *
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-kmem_cache_t *
+struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
- void (*dtor)(void*, kmem_cache_t *, unsigned long))
+ unsigned long flags,
+ void (*ctor)(void*, struct kmem_cache *, unsigned long),
+ void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- kmem_cache_t *cachep = NULL;
+ struct kmem_cache *cachep = NULL;
+ struct list_head *p;
/*
* Sanity checks... these are all serious usage bugs.
*/
- if ((!name) ||
- in_interrupt() ||
- (size < BYTES_PER_WORD) ||
- (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
- (dtor && !ctor)) {
- printk(KERN_ERR "%s: Early error in slab %s\n",
- __FUNCTION__, name);
- BUG();
+ if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
+ (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
+ printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
+ name);
+ BUG();
+ }
+
+ /*
+ * Prevent CPUs from coming and going.
+ * lock_cpu_hotplug() nests outside cache_chain_mutex
+ */
+ lock_cpu_hotplug();
+
+ mutex_lock(&cache_chain_mutex);
+
+ list_for_each(p, &cache_chain) {
+ struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
+ mm_segment_t old_fs = get_fs();
+ char tmp;
+ int res;
+
+ /*
+ * This happens when the module gets unloaded and doesn't
+ * destroy its slab cache and no-one else reuses the vmalloc
+ * area of the module. Print a warning.
+ */
+ set_fs(KERNEL_DS);
+ res = __get_user(tmp, pc->name);
+ set_fs(old_fs);
+ if (res) {
+ printk("SLAB: cache with size %d has lost its name\n",
+ pc->buffer_size);
+ continue;
+ }
+
+ if (!strcmp(pc->name, name)) {
+ printk("kmem_cache_create: duplicate cache %s\n", name);
+ dump_stack();
+ goto oops;
}
+ }
#if DEBUG
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
/* No constructor, but inital state check requested */
printk(KERN_ERR "%s: No con, but init state check "
- "requested - %s\n", __FUNCTION__, name);
+ "requested - %s\n", __FUNCTION__, name);
flags &= ~SLAB_DEBUG_INITIAL;
}
-
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
- if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
- flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
+ if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
+ flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
#endif
BUG_ON(dtor);
/*
- * Always checks flags, a caller might be expecting debug
- * support which isn't available.
+ * Always checks flags, a caller might be expecting debug support which
+ * isn't available.
*/
- if (flags & ~CREATE_MASK)
- BUG();
+ BUG_ON(flags & ~CREATE_MASK);
- /* Check that size is in terms of words. This is needed to avoid
+ /*
+ * Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
- if (size & (BYTES_PER_WORD-1)) {
- size += (BYTES_PER_WORD-1);
- size &= ~(BYTES_PER_WORD-1);
+ if (size & (BYTES_PER_WORD - 1)) {
+ size += (BYTES_PER_WORD - 1);
+ size &= ~(BYTES_PER_WORD - 1);
}
- /* calculate out the final buffer alignment: */
+ /* calculate the final buffer alignment: */
+
/* 1) arch recommendation: can be overridden for debug */
if (flags & SLAB_HWCACHE_ALIGN) {
- /* Default alignment: as specified by the arch code.
- * Except if an object is really small, then squeeze multiple
- * objects into one cacheline.
+ /*
+ * Default alignment: as specified by the arch code. Except if
+ * an object is really small, then squeeze multiple objects into
+ * one cacheline.
*/
ralign = cache_line_size();
- while (size <= ralign/2)
+ while (size <= ralign / 2)
ralign /= 2;
} else {
ralign = BYTES_PER_WORD;
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
/* 3) caller mandated alignment: disables debug if necessary */
if (ralign < align) {
ralign = align;
if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
- /* 4) Store it. Note that the debug code below can reduce
+ /*
+ * 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD.
*/
align = ralign;
/* Get cache's description obj. */
- cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+ cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
if (!cachep)
- goto opps;
- memset(cachep, 0, sizeof(kmem_cache_t));
+ goto oops;
#if DEBUG
- cachep->reallen = size;
+ cachep->obj_size = size;
if (flags & SLAB_RED_ZONE) {
/* redzoning only works with word aligned caches */
align = BYTES_PER_WORD;
/* add space for red zone words */
- cachep->dbghead += BYTES_PER_WORD;
- size += 2*BYTES_PER_WORD;
+ cachep->obj_offset += BYTES_PER_WORD;
+ size += 2 * BYTES_PER_WORD;
}
if (flags & SLAB_STORE_USER) {
/* user store requires word alignment and
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
- if (size > 128 && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
- cachep->dbghead += PAGE_SIZE - size;
+ if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
+ && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - size;
size = PAGE_SIZE;
}
#endif
#endif
/* Determine if the slab management is 'on' or 'off' slab. */
- if (size >= (PAGE_SIZE>>3))
+ if (size >= (PAGE_SIZE >> 3))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
size = ALIGN(size, align);
- if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) {
- /*
- * A VFS-reclaimable slab tends to have most allocations
- * as GFP_NOFS and we really don't want to have to be allocating
- * higher-order pages when we are unable to shrink dcache.
- */
- cachep->gfporder = 0;
- cache_estimate(cachep->gfporder, size, align, flags,
- &left_over, &cachep->num);
- } else {
- /*
- * Calculate size (in pages) of slabs, and the num of objs per
- * slab. This could be made much more intelligent. For now,
- * try to avoid using high page-orders for slabs. When the
- * gfp() funcs are more friendly towards high-order requests,
- * this should be changed.
- */
- do {
- unsigned int break_flag = 0;
-cal_wastage:
- cache_estimate(cachep->gfporder, size, align, flags,
- &left_over, &cachep->num);
- if (break_flag)
- break;
- if (cachep->gfporder >= MAX_GFP_ORDER)
- break;
- if (!cachep->num)
- goto next;
- if (flags & CFLGS_OFF_SLAB &&
- cachep->num > offslab_limit) {
- /* This num of objs will cause problems. */
- cachep->gfporder--;
- break_flag++;
- goto cal_wastage;
- }
-
- /*
- * Large num of objs is good, but v. large slabs are
- * currently bad for the gfp()s.
- */
- if (cachep->gfporder >= slab_break_gfp_order)
- break;
-
- if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
- break; /* Acceptable internal fragmentation. */
-next:
- cachep->gfporder++;
- } while (1);
- }
+ left_over = calculate_slab_order(cachep, size, align, flags);
if (!cachep->num) {
printk("kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
cachep = NULL;
- goto opps;
+ goto oops;
}
- slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
- + sizeof(struct slab), align);
+ slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ + sizeof(struct slab), align);
/*
* If the slab has been placed off-slab, and we have enough space then
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
- slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
+ slab_size =
+ cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
}
cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align)
cachep->colour_off = align;
- cachep->colour = left_over/cachep->colour_off;
+ cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
if (flags & SLAB_CACHE_DMA)
cachep->gfpflags |= GFP_DMA;
- spin_lock_init(&cachep->spinlock);
- cachep->objsize = size;
- /* NUMA */
- INIT_LIST_HEAD(&cachep->lists.slabs_full);
- INIT_LIST_HEAD(&cachep->lists.slabs_partial);
- INIT_LIST_HEAD(&cachep->lists.slabs_free);
+ cachep->buffer_size = size;
if (flags & CFLGS_OFF_SLAB)
- cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
+ cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
cachep->ctor = ctor;
cachep->dtor = dtor;
cachep->name = name;
- /* Don't let CPUs to come and go */
- lock_cpu_hotplug();
-
- if (g_cpucache_up == FULL) {
- enable_cpucache(cachep);
- } else {
- if (g_cpucache_up == NONE) {
- /* Note: the first kmem_cache_create must create
- * the cache that's used by kmalloc(24), otherwise
- * the creation of further caches will BUG().
- */
- cachep->array[smp_processor_id()] = &initarray_generic.cache;
- g_cpucache_up = PARTIAL;
- } else {
- cachep->array[smp_processor_id()] = kmalloc(sizeof(struct arraycache_init),GFP_KERNEL);
- }
- BUG_ON(!ac_data(cachep));
- ac_data(cachep)->avail = 0;
- ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
- ac_data(cachep)->batchcount = 1;
- ac_data(cachep)->touched = 0;
- cachep->batchcount = 1;
- cachep->limit = BOOT_CPUCACHE_ENTRIES;
- cachep->free_limit = (1+num_online_cpus())*cachep->batchcount
- + cachep->num;
- }
-
- cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
-
- /* Need the semaphore to access the chain. */
- down(&cache_chain_sem);
- {
- struct list_head *p;
- mm_segment_t old_fs;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- list_for_each(p, &cache_chain) {
- kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
- char tmp;
- /* This happens when the module gets unloaded and doesn't
- destroy its slab cache and noone else reuses the vmalloc
- area of the module. Print a warning. */
- if (__get_user(tmp,pc->name)) {
- printk("SLAB: cache with size %d has lost its name\n",
- pc->objsize);
- continue;
- }
- if (!strcmp(pc->name,name)) {
- printk("kmem_cache_create: duplicate cache %s\n",name);
- up(&cache_chain_sem);
- unlock_cpu_hotplug();
- BUG();
- }
- }
- set_fs(old_fs);
- }
+ setup_cpu_cache(cachep);
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
- up(&cache_chain_sem);
- unlock_cpu_hotplug();
-opps:
+oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
- name);
+ name);
+ mutex_unlock(&cache_chain_mutex);
+ unlock_cpu_hotplug();
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
BUG_ON(irqs_disabled());
}
-static void check_spinlock_acquired(kmem_cache_t *cachep)
+static void check_spinlock_acquired(struct kmem_cache *cachep)
+{
+#ifdef CONFIG_SMP
+ check_irq_off();
+ assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
+#endif
+}
+
+static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
- BUG_ON(spin_trylock(&cachep->spinlock));
+ assert_spin_locked(&cachep->nodelists[node]->list_lock);
#endif
}
+
#else
#define check_irq_off() do { } while(0)
#define check_irq_on() do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
+#define check_spinlock_acquired_node(x, y) do { } while(0)
#endif
-/*
- * Waits for all CPUs to execute func().
- */
-static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
-{
- check_irq_on();
- preempt_disable();
-
- local_irq_disable();
- func(arg);
- local_irq_enable();
-
- if (smp_call_function(func, arg, 1, 1))
- BUG();
-
- preempt_enable();
-}
-
-static void drain_array_locked(kmem_cache_t* cachep,
- struct array_cache *ac, int force);
+static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+ struct array_cache *ac,
+ int force, int node);
static void do_drain(void *arg)
{
- kmem_cache_t *cachep = (kmem_cache_t*)arg;
+ struct kmem_cache *cachep = arg;
struct array_cache *ac;
+ int node = numa_node_id();
check_irq_off();
- ac = ac_data(cachep);
- spin_lock(&cachep->spinlock);
- free_block(cachep, &ac_entry(ac)[0], ac->avail);
- spin_unlock(&cachep->spinlock);
+ ac = cpu_cache_get(cachep);
+ spin_lock(&cachep->nodelists[node]->list_lock);
+ free_block(cachep, ac->entry, ac->avail, node);
+ spin_unlock(&cachep->nodelists[node]->list_lock);
ac->avail = 0;
}
-static void drain_cpu_caches(kmem_cache_t *cachep)
+static void drain_cpu_caches(struct kmem_cache *cachep)
{
- smp_call_function_all_cpus(do_drain, cachep);
+ struct kmem_list3 *l3;
+ int node;
+
+ on_each_cpu(do_drain, cachep, 1, 1);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
- if (cachep->lists.shared)
- drain_array_locked(cachep, cachep->lists.shared, 1);
- spin_unlock_irq(&cachep->spinlock);
-}
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3 && l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3)
+ drain_array(cachep, l3, l3->shared, 1, node);
+ }
+}
-/* NUMA shrink all list3s */
-static int __cache_shrink(kmem_cache_t *cachep)
+static int __node_shrink(struct kmem_cache *cachep, int node)
{
struct slab *slabp;
+ struct kmem_list3 *l3 = cachep->nodelists[node];
int ret;
- drain_cpu_caches(cachep);
-
- check_irq_on();
- spin_lock_irq(&cachep->spinlock);
-
- for(;;) {
+ for (;;) {
struct list_head *p;
- p = cachep->lists.slabs_free.prev;
- if (p == &cachep->lists.slabs_free)
+ p = l3->slabs_free.prev;
+ if (p == &l3->slabs_free)
break;
- slabp = list_entry(cachep->lists.slabs_free.prev, struct slab, list);
+ slabp = list_entry(l3->slabs_free.prev, struct slab, list);
#if DEBUG
- if (slabp->inuse)
- BUG();
+ BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
- cachep->lists.free_objects -= cachep->num;
- spin_unlock_irq(&cachep->spinlock);
+ l3->free_objects -= cachep->num;
+ spin_unlock_irq(&l3->list_lock);
slab_destroy(cachep, slabp);
- spin_lock_irq(&cachep->spinlock);
+ spin_lock_irq(&l3->list_lock);
}
- ret = !list_empty(&cachep->lists.slabs_full) ||
- !list_empty(&cachep->lists.slabs_partial);
- spin_unlock_irq(&cachep->spinlock);
+ ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
return ret;
}
+static int __cache_shrink(struct kmem_cache *cachep)
+{
+ int ret = 0, i = 0;
+ struct kmem_list3 *l3;
+
+ drain_cpu_caches(cachep);
+
+ check_irq_on();
+ for_each_online_node(i) {
+ l3 = cachep->nodelists[i];
+ if (l3) {
+ spin_lock_irq(&l3->list_lock);
+ ret += __node_shrink(cachep, i);
+ spin_unlock_irq(&l3->list_lock);
+ }
+ }
+ return (ret ? 1 : 0);
+}
+
/**
* kmem_cache_shrink - Shrink a cache.
* @cachep: The cache to shrink.
* Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
*/
-int kmem_cache_shrink(kmem_cache_t *cachep)
+int kmem_cache_shrink(struct kmem_cache *cachep)
{
- if (!cachep || in_interrupt())
- BUG();
+ BUG_ON(!cachep || in_interrupt());
return __cache_shrink(cachep);
}
-
EXPORT_SYMBOL(kmem_cache_shrink);
/**
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
- * Remove a kmem_cache_t object from the slab cache.
+ * Remove a struct kmem_cache object from the slab cache.
* Returns 0 on success.
*
* It is expected this function will be called by a module when it is
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
-int kmem_cache_destroy (kmem_cache_t * cachep)
+int kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
+ struct kmem_list3 *l3;
- if (!cachep || in_interrupt())
- BUG();
+ BUG_ON(!cachep || in_interrupt());
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
/* Find the cache in the chain of caches. */
- down(&cache_chain_sem);
+ mutex_lock(&cache_chain_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->next);
- up(&cache_chain_sem);
+ mutex_unlock(&cache_chain_mutex);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
- down(&cache_chain_sem);
- list_add(&cachep->next,&cache_chain);
- up(&cache_chain_sem);
+ mutex_lock(&cache_chain_mutex);
+ list_add(&cachep->next, &cache_chain);
+ mutex_unlock(&cache_chain_mutex);
unlock_cpu_hotplug();
return 1;
}
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
- synchronize_kernel();
+ synchronize_rcu();
- /* no cpu_online check required here since we clear the percpu
- * array on cpu offline and set this to NULL.
- */
- for (i = 0; i < NR_CPUS; i++)
- kfree(cachep->array[i]);
+ for_each_online_cpu(i)
+ kfree(cachep->array[i]);
/* NUMA: free the list3 structures */
- kfree(cachep->lists.shared);
- cachep->lists.shared = NULL;
+ for_each_online_node(i) {
+ l3 = cachep->nodelists[i];
+ if (l3) {
+ kfree(l3->shared);
+ free_alien_cache(l3->alien);
+ kfree(l3);
+ }
+ }
kmem_cache_free(&cache_cache, cachep);
-
unlock_cpu_hotplug();
-
return 0;
}
-
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
-static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
- void *objp, int colour_off, int local_flags)
+static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
+ int colour_off, gfp_t local_flags,
+ int nodeid)
{
struct slab *slabp;
-
+
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
+ slabp = kmem_cache_alloc_node(cachep->slabp_cache,
+ local_flags, nodeid);
if (!slabp)
return NULL;
} else {
- slabp = objp+colour_off;
+ slabp = objp + colour_off;
colour_off += cachep->slab_size;
}
slabp->inuse = 0;
slabp->colouroff = colour_off;
- slabp->s_mem = objp+colour_off;
-
+ slabp->s_mem = objp + colour_off;
+ slabp->nodeid = nodeid;
return slabp;
}
static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
{
- return (kmem_bufctl_t *)(slabp+1);
+ return (kmem_bufctl_t *) (slabp + 1);
}
-static void cache_init_objs (kmem_cache_t * cachep,
- struct slab * slabp, unsigned long ctor_flags)
+static void cache_init_objs(struct kmem_cache *cachep,
+ struct slab *slabp, unsigned long ctor_flags)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void* objp = slabp->s_mem+cachep->objsize*i;
+ void *objp = index_to_obj(cachep, slabp, i);
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
/*
- * Constructors are not allowed to allocate memory from
- * the same cache which they are a constructor for.
- * Otherwise, deadlock. They must also be threaded.
+ * Constructors are not allowed to allocate memory from the same
+ * cache which they are a constructor for. Otherwise, deadlock.
+ * They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
- cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
+ cachep->ctor(objp + obj_offset(cachep), cachep,
+ ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
- " end of an object");
+ " end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
- " start of an object");
+ " start of an object");
}
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
+ OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
+ kernel_map_pages(virt_to_page(objp),
+ cachep->buffer_size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
#endif
- slab_bufctl(slabp)[i] = i+1;
+ slab_bufctl(slabp)[i] = i + 1;
}
- slab_bufctl(slabp)[i-1] = BUFCTL_END;
+ slab_bufctl(slabp)[i - 1] = BUFCTL_END;
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, int flags)
+static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
- if (flags & SLAB_DMA) {
- if (!(cachep->gfpflags & GFP_DMA))
- BUG();
- } else {
- if (cachep->gfpflags & GFP_DMA)
- BUG();
+ if (flags & SLAB_DMA)
+ BUG_ON(!(cachep->gfpflags & GFP_DMA));
+ else
+ BUG_ON(cachep->gfpflags & GFP_DMA);
+}
+
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
+ int nodeid)
+{
+ void *objp = index_to_obj(cachep, slabp, slabp->free);
+ kmem_bufctl_t next;
+
+ slabp->inuse++;
+ next = slab_bufctl(slabp)[slabp->free];
+#if DEBUG
+ slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+ WARN_ON(slabp->nodeid != nodeid);
+#endif
+ slabp->free = next;
+
+ return objp;
+}
+
+static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
+ void *objp, int nodeid)
+{
+ unsigned int objnr = obj_to_index(cachep, slabp, objp);
+
+#if DEBUG
+ /* Verify that the slab belongs to the intended node */
+ WARN_ON(slabp->nodeid != nodeid);
+
+ if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
+ printk(KERN_ERR "slab: double free detected in cache "
+ "'%s', objp %p\n", cachep->name, objp);
+ BUG();
}
+#endif
+ slab_bufctl(slabp)[objnr] = slabp->free;
+ slabp->free = objnr;
+ slabp->inuse--;
}
-static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
+ void *objp)
{
int i;
struct page *page;
/* Nasty!!!!!! I hope this is OK. */
- i = 1 << cachep->gfporder;
page = virt_to_page(objp);
+
+ i = 1;
+ if (likely(!PageCompound(page)))
+ i <<= cachep->gfporder;
do {
- SET_PAGE_CACHE(page, cachep);
- SET_PAGE_SLAB(page, slabp);
+ page_set_cache(page, cachep);
+ page_set_slab(page, slabp);
page++;
} while (--i);
}
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- struct slab *slabp;
- void *objp;
- size_t offset;
- int local_flags;
- unsigned long ctor_flags;
+ struct slab *slabp;
+ void *objp;
+ size_t offset;
+ gfp_t local_flags;
+ unsigned long ctor_flags;
+ struct kmem_list3 *l3;
- /* Be lazy and only check for valid flags here,
- * keeping it out of the critical path in kmem_cache_alloc().
+ /*
+ * Be lazy and only check for valid flags here, keeping it out of the
+ * critical path in kmem_cache_alloc().
*/
- if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
- BUG();
+ BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
if (flags & SLAB_NO_GROW)
return 0;
*/
ctor_flags |= SLAB_CTOR_ATOMIC;
- /* About to mess with non-constant members - lock. */
+ /* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
- spin_lock(&cachep->spinlock);
+ l3 = cachep->nodelists[nodeid];
+ spin_lock(&l3->list_lock);
/* Get colour for the slab, and cal the next value. */
- offset = cachep->colour_next;
- cachep->colour_next++;
- if (cachep->colour_next >= cachep->colour)
- cachep->colour_next = 0;
- offset *= cachep->colour_off;
+ offset = l3->colour_next;
+ l3->colour_next++;
+ if (l3->colour_next >= cachep->colour)
+ l3->colour_next = 0;
+ spin_unlock(&l3->list_lock);
- spin_unlock(&cachep->spinlock);
+ offset *= cachep->colour_off;
if (local_flags & __GFP_WAIT)
local_irq_enable();
*/
kmem_flagcheck(cachep, flags);
-
- /* Get mem for the objs. */
- if (!(objp = kmem_getpages(cachep, flags, nodeid)))
+ /*
+ * Get mem for the objs. Attempt to allocate a physical page from
+ * 'nodeid'.
+ */
+ objp = kmem_getpages(cachep, flags, nodeid);
+ if (!objp)
goto failed;
/* Get slab management. */
- if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
+ slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
+ if (!slabp)
goto opps1;
+ slabp->nodeid = nodeid;
set_slab_attr(cachep, slabp, objp);
cache_init_objs(cachep, slabp, ctor_flags);
if (local_flags & __GFP_WAIT)
local_irq_disable();
check_irq_off();
- spin_lock(&cachep->spinlock);
+ spin_lock(&l3->list_lock);
/* Make slab active. */
- list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_free));
+ list_add_tail(&slabp->list, &(l3->slabs_free));
STATS_INC_GROWN(cachep);
- list3_data(cachep)->free_objects += cachep->num;
- spin_unlock(&cachep->spinlock);
+ l3->free_objects += cachep->num;
+ spin_unlock(&l3->list_lock);
return 1;
opps1:
kmem_freepages(cachep, objp);
if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
- (unsigned long)objp);
- BUG();
+ (unsigned long)objp);
+ BUG();
}
page = virt_to_page(objp);
if (!PageSlab(page)) {
- printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
+ printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
+ (unsigned long)objp);
BUG();
}
}
-static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller)
+static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
+ void *caller)
{
struct page *page;
unsigned int objnr;
struct slab *slabp;
- objp -= obj_dbghead(cachep);
+ objp -= obj_offset(cachep);
kfree_debugcheck(objp);
page = virt_to_page(objp);
- if (GET_PAGE_CACHE(page) != cachep) {
- printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
- GET_PAGE_CACHE(page),cachep);
+ if (page_get_cache(page) != cachep) {
+ printk(KERN_ERR "mismatch in kmem_cache_free: expected "
+ "cache %p, got %p\n",
+ page_get_cache(page), cachep);
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
- printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name);
+ printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
+ page_get_cache(page)->name);
WARN_ON(1);
}
- slabp = GET_PAGE_SLAB(page);
+ slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
+ if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
+ *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
- printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
- objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+ printk(KERN_ERR "%p: redzone 1:0x%lx, "
+ "redzone 2:0x%lx.\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
- objnr = (objp-slabp->s_mem)/cachep->objsize;
+ objnr = obj_to_index(cachep, slabp, objp);
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize);
+ BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
if (cachep->flags & SLAB_DEBUG_INITIAL) {
- /* Need to call the slab's constructor so the
- * caller can perform a verify of its state (debugging).
- * Called without the cache-lock held.
+ /*
+ * Need to call the slab's constructor so the caller can
+ * perform a verify of its state (debugging). Called without
+ * the cache-lock held.
*/
- cachep->ctor(objp+obj_dbghead(cachep),
- cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
+ cachep->ctor(objp + obj_offset(cachep),
+ cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
}
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*/
- cachep->dtor(objp+obj_dbghead(cachep), cachep, 0);
+ cachep->dtor(objp + obj_offset(cachep), cachep, 0);
}
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+ slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
+#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
+ if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+ kernel_map_pages(virt_to_page(objp),
+ cachep->buffer_size / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
return objp;
}
-static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
+static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
{
- int i;
+ kmem_bufctl_t i;
int entries = 0;
-
- check_spinlock_acquired(cachep);
+
/* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
entries++;
- if (entries > cachep->num || i < 0 || i >= cachep->num)
+ if (entries > cachep->num || i >= cachep->num)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
- int i;
bad:
- printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
- cachep->name, cachep->num, slabp, slabp->inuse);
- for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
- if ((i%16)==0)
+ printk(KERN_ERR "slab: Internal list corruption detected in "
+ "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
+ cachep->name, cachep->num, slabp, slabp->inuse);
+ for (i = 0;
+ i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
+ i++) {
+ if (i % 16 == 0)
printk("\n%03x:", i);
- printk(" %02x", ((unsigned char*)slabp)[i]);
+ printk(" %02x", ((unsigned char *)slabp)[i]);
}
printk("\n");
BUG();
#define check_slabp(x,y) do { } while(0)
#endif
-static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
+static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
- /* if there was little recent activity on this
- * cache, then perform only a partial refill.
- * Otherwise we could generate refill bouncing.
+ /*
+ * If there was little recent activity on this cache, then
+ * perform only a partial refill. Otherwise we could generate
+ * refill bouncing.
*/
batchcount = BATCHREFILL_LIMIT;
}
- l3 = list3_data(cachep);
+ l3 = cachep->nodelists[numa_node_id()];
+
+ BUG_ON(ac->avail > 0 || !l3);
+ spin_lock(&l3->list_lock);
+
+ /* See if we can refill from the shared array */
+ if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
+ goto alloc_done;
- BUG_ON(ac->avail > 0);
- spin_lock(&cachep->spinlock);
- if (l3->shared) {
- struct array_cache *shared_array = l3->shared;
- if (shared_array->avail) {
- if (batchcount > shared_array->avail)
- batchcount = shared_array->avail;
- shared_array->avail -= batchcount;
- ac->avail = batchcount;
- memcpy(ac_entry(ac), &ac_entry(shared_array)[shared_array->avail],
- sizeof(void*)*batchcount);
- shared_array->touched = 1;
- goto alloc_done;
- }
- }
while (batchcount > 0) {
struct list_head *entry;
struct slab *slabp;
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep);
while (slabp->inuse < cachep->num && batchcount--) {
- kmem_bufctl_t next;
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- /* get obj pointer */
- ac_entry(ac)[ac->avail++] = slabp->s_mem + slabp->free*cachep->objsize;
-
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-#endif
- slabp->free = next;
+ ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
+ numa_node_id());
}
check_slabp(cachep, slabp);
must_grow:
l3->free_objects -= ac->avail;
alloc_done:
- spin_unlock(&cachep->spinlock);
+ spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) {
int x;
- x = cache_grow(cachep, flags, -1);
-
- // cache_grow can reenable interrupts, then ac could change.
- ac = ac_data(cachep);
- if (!x && ac->avail == 0) // no objects in sight? abort
+ x = cache_grow(cachep, flags, numa_node_id());
+
+ /* cache_grow can reenable interrupts, then ac could change. */
+ ac = cpu_cache_get(cachep);
+ if (!x && ac->avail == 0) /* no objects in sight? abort */
return NULL;
- if (!ac->avail) // objects refilled by interrupt?
+ if (!ac->avail) /* objects refilled by interrupt? */
goto retry;
}
ac->touched = 1;
- return ac_entry(ac)[--ac->avail];
+ return ac->entry[--ac->avail];
}
-static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
+static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
+ gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
}
#if DEBUG
-static void *
-cache_alloc_debugcheck_after(kmem_cache_t *cachep,
- unsigned long flags, void *objp, void *caller)
+static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
+ gfp_t flags, void *objp, void *caller)
{
- if (!objp)
+ if (!objp)
return objp;
- if (cachep->flags & SLAB_POISON) {
+ if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+ kernel_map_pages(virt_to_page(objp),
+ cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
*dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
+ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
+ *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
- printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
- objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+ printk(KERN_ERR
+ "%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
- objp += obj_dbghead(cachep);
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+ {
+ struct slab *slabp;
+ unsigned objnr;
+
+ slabp = page_get_slab(virt_to_page(objp));
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
+ slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
+ }
+#endif
+ objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) {
- unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+ unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
if (!(flags & __GFP_WAIT))
ctor_flags |= SLAB_CTOR_ATOMIC;
cachep->ctor(objp, cachep, ctor_flags);
- }
+ }
return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-
-static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
+static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- unsigned long save_flags;
- void* objp;
+ void *objp;
struct array_cache *ac;
- cache_alloc_debugcheck_before(cachep, flags);
+#ifdef CONFIG_NUMA
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+ objp = alternate_node_alloc(cachep, flags);
+ if (objp != NULL)
+ return objp;
+ }
+#endif
- local_irq_save(save_flags);
- ac = ac_data(cachep);
+ check_irq_off();
+ ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
ac->touched = 1;
- objp = ac_entry(ac)[--ac->avail];
+ objp = ac->entry[--ac->avail];
} else {
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
}
+ return objp;
+}
+
+static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags, void *caller)
+{
+ unsigned long save_flags;
+ void *objp;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+
+ local_irq_save(save_flags);
+ objp = ____cache_alloc(cachep, flags);
local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp, __builtin_return_address(0));
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp,
+ caller);
+ prefetchw(objp);
return objp;
}
-/*
- * NUMA: different approach needed if the spinlock is moved into
- * the l3 structure
+#ifdef CONFIG_NUMA
+/*
+ * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
+ *
+ * If we are in_interrupt, then process context, including cpusets and
+ * mempolicy, may not apply and should not be used for allocation policy.
*/
+static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+ int nid_alloc, nid_here;
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
+ if (in_interrupt())
+ return NULL;
+ nid_alloc = nid_here = numa_node_id();
+ if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
+ nid_alloc = cpuset_mem_spread_node();
+ else if (current->mempolicy)
+ nid_alloc = slab_node(current->mempolicy);
+ if (nid_alloc != nid_here)
+ return __cache_alloc_node(cachep, flags, nid_alloc);
+ return NULL;
+}
+
+/*
+ * A interface to enable slab creation on nodeid
+ */
+static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+ int nodeid)
{
- int i;
+ struct list_head *entry;
+ struct slab *slabp;
+ struct kmem_list3 *l3;
+ void *obj;
+ int x;
+
+ l3 = cachep->nodelists[nodeid];
+ BUG_ON(!l3);
+
+retry:
+ check_irq_off();
+ spin_lock(&l3->list_lock);
+ entry = l3->slabs_partial.next;
+ if (entry == &l3->slabs_partial) {
+ l3->free_touched = 1;
+ entry = l3->slabs_free.next;
+ if (entry == &l3->slabs_free)
+ goto must_grow;
+ }
+
+ slabp = list_entry(entry, struct slab, list);
+ check_spinlock_acquired_node(cachep, nodeid);
+ check_slabp(cachep, slabp);
+
+ STATS_INC_NODEALLOCS(cachep);
+ STATS_INC_ACTIVE(cachep);
+ STATS_SET_HIGH(cachep);
+
+ BUG_ON(slabp->inuse == cachep->num);
+
+ obj = slab_get_obj(cachep, slabp, nodeid);
+ check_slabp(cachep, slabp);
+ l3->free_objects--;
+ /* move slabp to correct slabp list: */
+ list_del(&slabp->list);
+
+ if (slabp->free == BUFCTL_END)
+ list_add(&slabp->list, &l3->slabs_full);
+ else
+ list_add(&slabp->list, &l3->slabs_partial);
+
+ spin_unlock(&l3->list_lock);
+ goto done;
+
+must_grow:
+ spin_unlock(&l3->list_lock);
+ x = cache_grow(cachep, flags, nodeid);
+
+ if (!x)
+ return NULL;
- check_spinlock_acquired(cachep);
+ goto retry;
+done:
+ return obj;
+}
+#endif
- /* NUMA: move add into loop */
- cachep->lists.free_objects += nr_objects;
+/*
+ * Caller needs to acquire correct kmem_list's list_lock
+ */
+static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
+ int node)
+{
+ int i;
+ struct kmem_list3 *l3;
for (i = 0; i < nr_objects; i++) {
void *objp = objpp[i];
struct slab *slabp;
- unsigned int objnr;
- slabp = GET_PAGE_SLAB(virt_to_page(objp));
+ slabp = virt_to_slab(objp);
+ l3 = cachep->nodelists[node];
list_del(&slabp->list);
- objnr = (objp - slabp->s_mem) / cachep->objsize;
+ check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);
-#if DEBUG
- if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
- printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n",
- cachep->name, objp);
- BUG();
- }
-#endif
- slab_bufctl(slabp)[objnr] = slabp->free;
- slabp->free = objnr;
+ slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep);
- slabp->inuse--;
+ l3->free_objects++;
check_slabp(cachep, slabp);
/* fixup slab chains */
if (slabp->inuse == 0) {
- if (cachep->lists.free_objects > cachep->free_limit) {
- cachep->lists.free_objects -= cachep->num;
+ if (l3->free_objects > l3->free_limit) {
+ l3->free_objects -= cachep->num;
slab_destroy(cachep, slabp);
} else {
- list_add(&slabp->list,
- &list3_data_ptr(cachep, objp)->slabs_free);
+ list_add(&slabp->list, &l3->slabs_free);
}
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
- list_add_tail(&slabp->list,
- &list3_data_ptr(cachep, objp)->slabs_partial);
+ list_add_tail(&slabp->list, &l3->slabs_partial);
}
}
}
-static void cache_flusharray (kmem_cache_t* cachep, struct array_cache *ac)
+static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
int batchcount;
+ struct kmem_list3 *l3;
+ int node = numa_node_id();
batchcount = ac->batchcount;
#if DEBUG
BUG_ON(!batchcount || batchcount > ac->avail);
#endif
check_irq_off();
- spin_lock(&cachep->spinlock);
- if (cachep->lists.shared) {
- struct array_cache *shared_array = cachep->lists.shared;
- int max = shared_array->limit-shared_array->avail;
+ l3 = cachep->nodelists[node];
+ spin_lock(&l3->list_lock);
+ if (l3->shared) {
+ struct array_cache *shared_array = l3->shared;
+ int max = shared_array->limit - shared_array->avail;
if (max) {
if (batchcount > max)
batchcount = max;
- memcpy(&ac_entry(shared_array)[shared_array->avail],
- &ac_entry(ac)[0],
- sizeof(void*)*batchcount);
+ memcpy(&(shared_array->entry[shared_array->avail]),
+ ac->entry, sizeof(void *) * batchcount);
shared_array->avail += batchcount;
goto free_done;
}
}
- free_block(cachep, &ac_entry(ac)[0], batchcount);
+ free_block(cachep, ac->entry, batchcount, node);
free_done:
#if STATS
{
int i = 0;
struct list_head *p;
- p = list3_data(cachep)->slabs_free.next;
- while (p != &(list3_data(cachep)->slabs_free)) {
+ p = l3->slabs_free.next;
+ while (p != &(l3->slabs_free)) {
struct slab *slabp;
slabp = list_entry(p, struct slab, list);
STATS_SET_FREEABLE(cachep, i);
}
#endif
- spin_unlock(&cachep->spinlock);
+ spin_unlock(&l3->list_lock);
ac->avail -= batchcount;
- memmove(&ac_entry(ac)[0], &ac_entry(ac)[batchcount],
- sizeof(void*)*ac->avail);
+ memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}
/*
- * __cache_free
- * Release an obj back to its cache. If the obj has a constructed
- * state, it must be in this state _before_ it is released.
- *
- * Called with disabled ints.
+ * Release an obj back to its cache. If the obj has a constructed state, it must
+ * be in this state _before_ it is released. Called with disabled ints.
*/
-static inline void __cache_free (kmem_cache_t *cachep, void* objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
- struct array_cache *ac = ac_data(cachep);
+ struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
+ /* Make sure we are not freeing a object from another
+ * node to the array cache on this cpu.
+ */
+#ifdef CONFIG_NUMA
+ {
+ struct slab *slabp;
+ slabp = virt_to_slab(objp);
+ if (unlikely(slabp->nodeid != numa_node_id())) {
+ struct array_cache *alien = NULL;
+ int nodeid = slabp->nodeid;
+ struct kmem_list3 *l3;
+
+ l3 = cachep->nodelists[numa_node_id()];
+ STATS_INC_NODEFREES(cachep);
+ if (l3->alien && l3->alien[nodeid]) {
+ alien = l3->alien[nodeid];
+ spin_lock(&alien->lock);
+ if (unlikely(alien->avail == alien->limit)) {
+ STATS_INC_ACOVERFLOW(cachep);
+ __drain_alien_cache(cachep,
+ alien, nodeid);
+ }
+ alien->entry[alien->avail++] = objp;
+ spin_unlock(&alien->lock);
+ } else {
+ spin_lock(&(cachep->nodelists[nodeid])->
+ list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&(cachep->nodelists[nodeid])->
+ list_lock);
+ }
+ return;
+ }
+ }
+#endif
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
- ac_entry(ac)[ac->avail++] = objp;
+ ac->entry[ac->avail++] = objp;
return;
} else {
STATS_INC_FREEMISS(cachep);
cache_flusharray(cachep, ac);
- ac_entry(ac)[ac->avail++] = objp;
+ ac->entry[ac->avail++] = objp;
}
}
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
-
EXPORT_SYMBOL(kmem_cache_alloc);
+/**
+ * kmem_cache_alloc - Allocate an object. The memory is set to zero.
+ * @cache: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache and set the allocated memory to zero.
+ * The flags are only relevant if the cache has no available objects.
+ */
+void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
+{
+ void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
+ if (ret)
+ memset(ret, 0, obj_size(cache));
+ return ret;
+}
+EXPORT_SYMBOL(kmem_cache_zalloc);
+
/**
* kmem_ptr_validate - check if an untrusted pointer might
* be a slab entry.
*
* Currently only used for dentry validation.
*/
-int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
{
- unsigned long addr = (unsigned long) ptr;
+ unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;
- unsigned long align_mask = BYTES_PER_WORD-1;
- unsigned long size = cachep->objsize;
+ unsigned long align_mask = BYTES_PER_WORD - 1;
+ unsigned long size = cachep->buffer_size;
struct page *page;
if (unlikely(addr < min_addr))
page = virt_to_page(ptr);
if (unlikely(!PageSlab(page)))
goto out;
- if (unlikely(GET_PAGE_CACHE(page) != cachep))
+ if (unlikely(page_get_cache(page) != cachep))
goto out;
return 1;
out:
* Identical to kmem_cache_alloc, except that this function is slow
* and can sleep. And it will allocate memory on the given node, which
* can improve the performance for cpu bound structures.
+ * New and improved: it will now make sure that the object gets
+ * put on the correct node list so that there is no false sharing.
*/
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- int loop;
- void *objp;
- struct slab *slabp;
- kmem_bufctl_t next;
-
- for (loop = 0;;loop++) {
- struct list_head *q;
-
- objp = NULL;
- check_irq_on();
- spin_lock_irq(&cachep->spinlock);
- /* walk through all partial and empty slab and find one
- * from the right node */
- list_for_each(q,&cachep->lists.slabs_partial) {
- slabp = list_entry(q, struct slab, list);
-
- if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
- loop > 2)
- goto got_slabp;
- }
- list_for_each(q, &cachep->lists.slabs_free) {
- slabp = list_entry(q, struct slab, list);
-
- if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
- loop > 2)
- goto got_slabp;
- }
- spin_unlock_irq(&cachep->spinlock);
-
- local_irq_disable();
- if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
- local_irq_enable();
- return NULL;
- }
- local_irq_enable();
- }
-got_slabp:
- /* found one: allocate object */
- check_slabp(cachep, slabp);
- check_spinlock_acquired(cachep);
-
- STATS_INC_ALLOCED(cachep);
- STATS_INC_ACTIVE(cachep);
- STATS_SET_HIGH(cachep);
- STATS_INC_NODEALLOCS(cachep);
-
- objp = slabp->s_mem + slabp->free*cachep->objsize;
+ unsigned long save_flags;
+ void *ptr;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-#endif
- slabp->free = next;
- check_slabp(cachep, slabp);
+ cache_alloc_debugcheck_before(cachep, flags);
+ local_irq_save(save_flags);
- /* move slabp to correct slabp list: */
- list_del(&slabp->list);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &cachep->lists.slabs_full);
+ if (nodeid == -1 || nodeid == numa_node_id() ||
+ !cachep->nodelists[nodeid])
+ ptr = ____cache_alloc(cachep, flags);
else
- list_add(&slabp->list, &cachep->lists.slabs_partial);
+ ptr = __cache_alloc_node(cachep, flags, nodeid);
+ local_irq_restore(save_flags);
- list3_data(cachep)->free_objects--;
- spin_unlock_irq(&cachep->spinlock);
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
+ __builtin_return_address(0));
- objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp,
- __builtin_return_address(0));
- return objp;
+ return ptr;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ struct kmem_cache *cachep;
+
+ cachep = kmem_find_general_cachep(size, flags);
+ if (unlikely(cachep == NULL))
+ return NULL;
+ return kmem_cache_alloc_node(cachep, flags, node);
+}
+EXPORT_SYMBOL(kmalloc_node);
#endif
/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
+ * @caller: function caller for debug tracking of the caller
*
* kmalloc is the normal method of allocating memory
* in the kernel.
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void * __kmalloc (size_t size, int flags)
+static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+ void *caller)
{
- struct cache_sizes *csizep = malloc_sizes;
+ struct kmem_cache *cachep;
- for (; csizep->cs_size; csizep++) {
- if (size > csizep->cs_size)
- continue;
-#if DEBUG
- /* This happens if someone tries to call
- * kmem_cache_create(), or kmalloc(), before
- * the generic caches are initialized.
- */
- BUG_ON(csizep->cs_cachep == NULL);
-#endif
- return __cache_alloc(flags & GFP_DMA ?
- csizep->cs_dmacachep : csizep->cs_cachep, flags);
- }
- return NULL;
+ /* If you want to save a few bytes .text space: replace
+ * __ with kmem_.
+ * Then kmalloc uses the uninlined functions instead of the inline
+ * functions.
+ */
+ cachep = __find_general_cachep(size, flags);
+ if (unlikely(cachep == NULL))
+ return NULL;
+ return __cache_alloc(cachep, flags, caller);
}
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+#ifndef CONFIG_DEBUG_SLAB
+ return __do_kmalloc(size, flags, NULL);
+#else
+ return __do_kmalloc(size, flags, __builtin_return_address(0));
+#endif
+}
EXPORT_SYMBOL(__kmalloc);
+#ifdef CONFIG_DEBUG_SLAB
+void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
+{
+ return __do_kmalloc(size, flags, caller);
+}
+EXPORT_SYMBOL(__kmalloc_track_caller);
+#endif
+
#ifdef CONFIG_SMP
/**
* __alloc_percpu - allocate one copy of the object for every present
* Objects should be dereferenced using the per_cpu_ptr macro only.
*
* @size: how many bytes of memory are required.
- * @align: the alignment, which can't be greater than SMP_CACHE_BYTES.
*/
-void *__alloc_percpu(size_t size, size_t align)
+void *__alloc_percpu(size_t size)
{
int i;
- struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
+ struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
- pdata->ptrs[i] = kmem_cache_alloc_node(
- kmem_find_general_cachep(size, GFP_KERNEL),
- cpu_to_node(i));
+ /*
+ * Cannot use for_each_online_cpu since a cpu may come online
+ * and we have no way of figuring out how to fix the array
+ * that we have allocated then....
+ */
+ for_each_possible_cpu(i) {
+ int node = cpu_to_node(i);
+
+ if (node_online(node))
+ pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node);
+ else
+ pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
if (!pdata->ptrs[i])
goto unwind_oom;
}
/* Catch derefs w/o wrappers */
- return (void *) (~(unsigned long) pdata);
+ return (void *)(~(unsigned long)pdata);
unwind_oom:
while (--i >= 0) {
kfree(pdata);
return NULL;
}
-
EXPORT_SYMBOL(__alloc_percpu);
#endif
* Free an object which was previously allocated from this
* cache.
*/
-void kmem_cache_free (kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
__cache_free(cachep, objp);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(kmem_cache_free);
-/**
- * kcalloc - allocate memory for an array. The memory is set to zero.
- * @n: number of elements.
- * @size: element size.
- * @flags: the type of memory to allocate.
- */
-void *kcalloc(size_t n, size_t size, int flags)
-{
- void *ret = NULL;
-
- if (n != 0 && size > INT_MAX / n)
- return ret;
-
- ret = kmalloc(n * size, flags);
- if (ret)
- memset(ret, 0, n * size);
- return ret;
-}
-
-EXPORT_SYMBOL(kcalloc);
-
/**
* kfree - free previously allocated memory
* @objp: pointer returned by kmalloc.
*
+ * If @objp is NULL, no operation is performed.
+ *
* Don't free memory not originally allocated by kmalloc()
* or you will run into trouble.
*/
-void kfree (const void *objp)
+void kfree(const void *objp)
{
- kmem_cache_t *c;
+ struct kmem_cache *c;
unsigned long flags;
- if (!objp)
+ if (unlikely(!objp))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
- c = GET_PAGE_CACHE(virt_to_page(objp));
- __cache_free(c, (void*)objp);
+ c = virt_to_cache(objp);
+ mutex_debug_check_no_locks_freed(objp, obj_size(c));
+ __cache_free(c, (void *)objp);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(kfree);
#ifdef CONFIG_SMP
* Don't free memory not originally allocated by alloc_percpu()
* The complemented objp is to check for that.
*/
-void
-free_percpu(const void *objp)
+void free_percpu(const void *objp)
{
int i;
- struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
+ struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
- kfree(p->ptrs[i]);
- }
+ /*
+ * We allocate for all cpus so we cannot use for online cpu here.
+ */
+ for_each_possible_cpu(i)
+ kfree(p->ptrs[i]);
kfree(p);
}
-
EXPORT_SYMBOL(free_percpu);
#endif
-unsigned int kmem_cache_size(kmem_cache_t *cachep)
+unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
- return obj_reallen(cachep);
+ return obj_size(cachep);
}
-
EXPORT_SYMBOL(kmem_cache_size);
+const char *kmem_cache_name(struct kmem_cache *cachep)
+{
+ return cachep->name;
+}
+EXPORT_SYMBOL_GPL(kmem_cache_name);
+
+/*
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
+ */
+static int alloc_kmemlist(struct kmem_cache *cachep)
+{
+ int node;
+ struct kmem_list3 *l3;
+ struct array_cache *new_shared;
+ struct array_cache **new_alien;
+
+ for_each_online_node(node) {
+
+ new_alien = alloc_alien_cache(node, cachep->limit);
+ if (!new_alien)
+ goto fail;
+
+ new_shared = alloc_arraycache(node,
+ cachep->shared*cachep->batchcount,
+ 0xbaadf00d);
+ if (!new_shared) {
+ free_alien_cache(new_alien);
+ goto fail;
+ }
+
+ l3 = cachep->nodelists[node];
+ if (l3) {
+ struct array_cache *shared = l3->shared;
+
+ spin_lock_irq(&l3->list_lock);
+
+ if (shared)
+ free_block(cachep, shared->entry,
+ shared->avail, node);
+
+ l3->shared = new_shared;
+ if (!l3->alien) {
+ l3->alien = new_alien;
+ new_alien = NULL;
+ }
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+ spin_unlock_irq(&l3->list_lock);
+ kfree(shared);
+ free_alien_cache(new_alien);
+ continue;
+ }
+ l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
+ if (!l3) {
+ free_alien_cache(new_alien);
+ kfree(new_shared);
+ goto fail;
+ }
+
+ kmem_list3_init(l3);
+ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ l3->shared = new_shared;
+ l3->alien = new_alien;
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+ cachep->nodelists[node] = l3;
+ }
+ return 0;
+
+fail:
+ if (!cachep->next.next) {
+ /* Cache is not active yet. Roll back what we did */
+ node--;
+ while (node >= 0) {
+ if (cachep->nodelists[node]) {
+ l3 = cachep->nodelists[node];
+
+ kfree(l3->shared);
+ free_alien_cache(l3->alien);
+ kfree(l3);
+ cachep->nodelists[node] = NULL;
+ }
+ node--;
+ }
+ }
+ return -ENOMEM;
+}
+
struct ccupdate_struct {
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
struct array_cache *new[NR_CPUS];
};
static void do_ccupdate_local(void *info)
{
- struct ccupdate_struct *new = (struct ccupdate_struct *)info;
+ struct ccupdate_struct *new = info;
struct array_cache *old;
check_irq_off();
- old = ac_data(new->cachep);
-
+ old = cpu_cache_get(new->cachep);
+
new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
new->new[smp_processor_id()] = old;
}
-
-static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount, int shared)
+/* Always called with the cache_chain_mutex held */
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+ int batchcount, int shared)
{
struct ccupdate_struct new;
- struct array_cache *new_shared;
- int i;
-
- memset(&new.new,0,sizeof(new.new));
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i)) {
- new.new[i] = alloc_arraycache(i, limit, batchcount);
- if (!new.new[i]) {
- for (i--; i >= 0; i--) kfree(new.new[i]);
- return -ENOMEM;
- }
- } else {
- new.new[i] = NULL;
+ int i, err;
+
+ memset(&new.new, 0, sizeof(new.new));
+ for_each_online_cpu(i) {
+ new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
+ batchcount);
+ if (!new.new[i]) {
+ for (i--; i >= 0; i--)
+ kfree(new.new[i]);
+ return -ENOMEM;
}
}
new.cachep = cachep;
- smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
-
+ on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
+
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
cachep->batchcount = batchcount;
cachep->limit = limit;
- cachep->free_limit = (1+num_online_cpus())*cachep->batchcount + cachep->num;
- spin_unlock_irq(&cachep->spinlock);
+ cachep->shared = shared;
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_online_cpu(i) {
struct array_cache *ccold = new.new[i];
if (!ccold)
continue;
- spin_lock_irq(&cachep->spinlock);
- free_block(cachep, ac_entry(ccold), ccold->avail);
- spin_unlock_irq(&cachep->spinlock);
+ spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
+ free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
+ spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
kfree(ccold);
}
- new_shared = alloc_arraycache(-1, batchcount*shared, 0xbaadf00d);
- if (new_shared) {
- struct array_cache *old;
- spin_lock_irq(&cachep->spinlock);
- old = cachep->lists.shared;
- cachep->lists.shared = new_shared;
- if (old)
- free_block(cachep, ac_entry(old), old->avail);
- spin_unlock_irq(&cachep->spinlock);
- kfree(old);
+ err = alloc_kmemlist(cachep);
+ if (err) {
+ printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
+ cachep->name, -err);
+ BUG();
}
-
return 0;
}
-
-static void enable_cpucache (kmem_cache_t *cachep)
+/* Called with cache_chain_mutex held always */
+static void enable_cpucache(struct kmem_cache *cachep)
{
int err;
int limit, shared;
- /* The head array serves three purposes:
+ /*
+ * The head array serves three purposes:
* - create a LIFO ordering, i.e. return objects that are cache-warm
* - reduce the number of spinlock operations.
- * - reduce the number of linked list operations on the slab and
+ * - reduce the number of linked list operations on the slab and
* bufctl chains: array operations are cheaper.
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
*/
- if (cachep->objsize > 131072)
+ if (cachep->buffer_size > 131072)
limit = 1;
- else if (cachep->objsize > PAGE_SIZE)
+ else if (cachep->buffer_size > PAGE_SIZE)
limit = 8;
- else if (cachep->objsize > 1024)
+ else if (cachep->buffer_size > 1024)
limit = 24;
- else if (cachep->objsize > 256)
+ else if (cachep->buffer_size > 256)
limit = 54;
else
limit = 120;
- /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
+ /*
+ * CPU bound tasks (e.g. network routing) can exhibit cpu bound
* allocation behaviour: Most allocs on one cpu, most free operations
* on another cpu. For these cases, an efficient object passing between
* cpus is necessary. This is provided by a shared array. The array
*/
shared = 0;
#ifdef CONFIG_SMP
- if (cachep->objsize <= PAGE_SIZE)
+ if (cachep->buffer_size <= PAGE_SIZE)
shared = 8;
#endif
#if DEBUG
- /* With debugging enabled, large batchcount lead to excessively
- * long periods with disabled local interrupts. Limit the
- * batchcount
+ /*
+ * With debugging enabled, large batchcount lead to excessively long
+ * periods with disabled local interrupts. Limit the batchcount
*/
if (limit > 32)
limit = 32;
#endif
- err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared);
+ err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
- cachep->name, -err);
+ cachep->name, -err);
}
-static void drain_array_locked(kmem_cache_t *cachep,
- struct array_cache *ac, int force)
+/*
+ * Drain an array if it contains any elements taking the l3 lock only if
+ * necessary. Note that the l3 listlock also protects the array_cache
+ * if drain_array() is used on the shared array.
+ */
+void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+ struct array_cache *ac, int force, int node)
{
int tofree;
- check_spinlock_acquired(cachep);
+ if (!ac || !ac->avail)
+ return;
if (ac->touched && !force) {
ac->touched = 0;
- } else if (ac->avail) {
- tofree = force ? ac->avail : (ac->limit+4)/5;
- if (tofree > ac->avail) {
- tofree = (ac->avail+1)/2;
+ } else {
+ spin_lock_irq(&l3->list_lock);
+ if (ac->avail) {
+ tofree = force ? ac->avail : (ac->limit + 4) / 5;
+ if (tofree > ac->avail)
+ tofree = (ac->avail + 1) / 2;
+ free_block(cachep, ac->entry, tofree, node);
+ ac->avail -= tofree;
+ memmove(ac->entry, &(ac->entry[tofree]),
+ sizeof(void *) * ac->avail);
}
- free_block(cachep, ac_entry(ac), tofree);
- ac->avail -= tofree;
- memmove(&ac_entry(ac)[0], &ac_entry(ac)[tofree],
- sizeof(void*)*ac->avail);
+ spin_unlock_irq(&l3->list_lock);
}
}
/**
* cache_reap - Reclaim memory from caches.
+ * @unused: unused parameter
*
* Called from workqueue/eventd every few seconds.
* Purpose:
* - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool.
*
- * If we cannot acquire the cache chain semaphore then just give up - we'll
- * try again on the next iteration.
+ * If we cannot acquire the cache chain mutex then just give up - we'll try
+ * again on the next iteration.
*/
static void cache_reap(void *unused)
{
struct list_head *walk;
+ struct kmem_list3 *l3;
+ int node = numa_node_id();
- if (down_trylock(&cache_chain_sem)) {
+ if (!mutex_trylock(&cache_chain_mutex)) {
/* Give up. Setup the next iteration. */
- schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
+ schedule_delayed_work(&__get_cpu_var(reap_work),
+ REAPTIMEOUT_CPUC);
return;
}
list_for_each(walk, &cache_chain) {
- kmem_cache_t *searchp;
- struct list_head* p;
+ struct kmem_cache *searchp;
+ struct list_head *p;
int tofree;
struct slab *slabp;
- searchp = list_entry(walk, kmem_cache_t, next);
-
- if (searchp->flags & SLAB_NO_REAP)
- goto next;
-
+ searchp = list_entry(walk, struct kmem_cache, next);
check_irq_on();
- spin_lock_irq(&searchp->spinlock);
+ /*
+ * We only take the l3 lock if absolutely necessary and we
+ * have established with reasonable certainty that
+ * we can do some work if the lock was obtained.
+ */
+ l3 = searchp->nodelists[node];
- drain_array_locked(searchp, ac_data(searchp), 0);
+ reap_alien(searchp, l3);
- if(time_after(searchp->lists.next_reap, jiffies))
- goto next_unlock;
+ drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
+
+ /*
+ * These are racy checks but it does not matter
+ * if we skip one check or scan twice.
+ */
+ if (time_after(l3->next_reap, jiffies))
+ goto next;
- searchp->lists.next_reap = jiffies + REAPTIMEOUT_LIST3;
+ l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
- if (searchp->lists.shared)
- drain_array_locked(searchp, searchp->lists.shared, 0);
+ drain_array(searchp, l3, l3->shared, 0, node);
- if (searchp->lists.free_touched) {
- searchp->lists.free_touched = 0;
- goto next_unlock;
+ if (l3->free_touched) {
+ l3->free_touched = 0;
+ goto next;
}
- tofree = (searchp->free_limit+5*searchp->num-1)/(5*searchp->num);
+ tofree = (l3->free_limit + 5 * searchp->num - 1) /
+ (5 * searchp->num);
do {
- p = list3_data(searchp)->slabs_free.next;
- if (p == &(list3_data(searchp)->slabs_free))
+ /*
+ * Do not lock if there are no free blocks.
+ */
+ if (list_empty(&l3->slabs_free))
+ break;
+
+ spin_lock_irq(&l3->list_lock);
+ p = l3->slabs_free.next;
+ if (p == &(l3->slabs_free)) {
+ spin_unlock_irq(&l3->list_lock);
break;
+ }
slabp = list_entry(p, struct slab, list);
BUG_ON(slabp->inuse);
list_del(&slabp->list);
STATS_INC_REAPED(searchp);
- /* Safe to drop the lock. The slab is no longer
- * linked to the cache.
- * searchp cannot disappear, we hold
+ /*
+ * Safe to drop the lock. The slab is no longer linked
+ * to the cache. searchp cannot disappear, we hold
* cache_chain_lock
*/
- searchp->lists.free_objects -= searchp->num;
- spin_unlock_irq(&searchp->spinlock);
+ l3->free_objects -= searchp->num;
+ spin_unlock_irq(&l3->list_lock);
slab_destroy(searchp, slabp);
- spin_lock_irq(&searchp->spinlock);
- } while(--tofree > 0);
-next_unlock:
- spin_unlock_irq(&searchp->spinlock);
+ } while (--tofree > 0);
next:
cond_resched();
}
check_irq_on();
- up(&cache_chain_sem);
- /* Setup the next iteration */
- schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
+ mutex_unlock(&cache_chain_mutex);
+ next_reap_node();
+ /* Set up the next iteration */
+ schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
}
#ifdef CONFIG_PROC_FS
-static void *s_start(struct seq_file *m, loff_t *pos)
+static void print_slabinfo_header(struct seq_file *m)
{
- loff_t n = *pos;
- struct list_head *p;
-
- down(&cache_chain_sem);
- if (!n) {
- /*
- * Output format version, so at least we can change it
- * without _too_ many complaints.
- */
+ /*
+ * Output format version, so at least we can change it
+ * without _too_ many complaints.
+ */
#if STATS
- seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
+ seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else
- seq_puts(m, "slabinfo - version: 2.1\n");
+ seq_puts(m, "slabinfo - version: 2.1\n");
#endif
- seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
- seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
- seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ "<objperslab> <pagesperslab>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS
- seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"
- " <error> <maxfreeable> <freelimit> <nodeallocs>");
- seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+ seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
+ "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+ seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
- seq_putc(m, '\n');
- }
+ seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+ struct list_head *p;
+
+ mutex_lock(&cache_chain_mutex);
+ if (!n)
+ print_slabinfo_header(m);
p = cache_chain.next;
while (n--) {
p = p->next;
if (p == &cache_chain)
return NULL;
}
- return list_entry(p, kmem_cache_t, next);
+ return list_entry(p, struct kmem_cache, next);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- kmem_cache_t *cachep = p;
+ struct kmem_cache *cachep = p;
++*pos;
- return cachep->next.next == &cache_chain ? NULL
- : list_entry(cachep->next.next, kmem_cache_t, next);
+ return cachep->next.next == &cache_chain ?
+ NULL : list_entry(cachep->next.next, struct kmem_cache, next);
}
static void s_stop(struct seq_file *m, void *p)
{
- up(&cache_chain_sem);
+ mutex_unlock(&cache_chain_mutex);
}
static int s_show(struct seq_file *m, void *p)
{
- kmem_cache_t *cachep = p;
+ struct kmem_cache *cachep = p;
struct list_head *q;
- struct slab *slabp;
- unsigned long active_objs;
- unsigned long num_objs;
- unsigned long active_slabs = 0;
- unsigned long num_slabs;
- const char *name;
+ struct slab *slabp;
+ unsigned long active_objs;
+ unsigned long num_objs;
+ unsigned long active_slabs = 0;
+ unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+ const char *name;
char *error = NULL;
+ int node;
+ struct kmem_list3 *l3;
- check_irq_on();
- spin_lock_irq(&cachep->spinlock);
active_objs = 0;
num_slabs = 0;
- list_for_each(q,&cachep->lists.slabs_full) {
- slabp = list_entry(q, struct slab, list);
- if (slabp->inuse != cachep->num && !error)
- error = "slabs_full accounting error";
- active_objs += cachep->num;
- active_slabs++;
- }
- list_for_each(q,&cachep->lists.slabs_partial) {
- slabp = list_entry(q, struct slab, list);
- if (slabp->inuse == cachep->num && !error)
- error = "slabs_partial inuse accounting error";
- if (!slabp->inuse && !error)
- error = "slabs_partial/inuse accounting error";
- active_objs += slabp->inuse;
- active_slabs++;
- }
- list_for_each(q,&cachep->lists.slabs_free) {
- slabp = list_entry(q, struct slab, list);
- if (slabp->inuse && !error)
- error = "slabs_free/inuse accounting error";
- num_slabs++;
- }
- num_slabs+=active_slabs;
- num_objs = num_slabs*cachep->num;
- if (num_objs - active_objs != cachep->lists.free_objects && !error)
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+
+ check_irq_on();
+ spin_lock_irq(&l3->list_lock);
+
+ list_for_each(q, &l3->slabs_full) {
+ slabp = list_entry(q, struct slab, list);
+ if (slabp->inuse != cachep->num && !error)
+ error = "slabs_full accounting error";
+ active_objs += cachep->num;
+ active_slabs++;
+ }
+ list_for_each(q, &l3->slabs_partial) {
+ slabp = list_entry(q, struct slab, list);
+ if (slabp->inuse == cachep->num && !error)
+ error = "slabs_partial inuse accounting error";
+ if (!slabp->inuse && !error)
+ error = "slabs_partial/inuse accounting error";
+ active_objs += slabp->inuse;
+ active_slabs++;
+ }
+ list_for_each(q, &l3->slabs_free) {
+ slabp = list_entry(q, struct slab, list);
+ if (slabp->inuse && !error)
+ error = "slabs_free/inuse accounting error";
+ num_slabs++;
+ }
+ free_objects += l3->free_objects;
+ if (l3->shared)
+ shared_avail += l3->shared->avail;
+
+ spin_unlock_irq(&l3->list_lock);
+ }
+ num_slabs += active_slabs;
+ num_objs = num_slabs * cachep->num;
+ if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
- name = cachep->name;
+ name = cachep->name;
if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
- name, active_objs, num_objs, cachep->objsize,
- cachep->num, (1<<cachep->gfporder));
+ name, active_objs, num_objs, cachep->buffer_size,
+ cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
- cachep->limit, cachep->batchcount,
- cachep->lists.shared->limit/cachep->batchcount);
- seq_printf(m, " : slabdata %6lu %6lu %6u",
- active_slabs, num_slabs, cachep->lists.shared->avail);
+ cachep->limit, cachep->batchcount, cachep->shared);
+ seq_printf(m, " : slabdata %6lu %6lu %6lu",
+ active_slabs, num_slabs, shared_avail);
#if STATS
- { /* list3 stats */
+ { /* list3 stats */
unsigned long high = cachep->high_mark;
unsigned long allocs = cachep->num_allocations;
unsigned long grown = cachep->grown;
unsigned long reaped = cachep->reaped;
unsigned long errors = cachep->errors;
unsigned long max_freeable = cachep->max_freeable;
- unsigned long free_limit = cachep->free_limit;
unsigned long node_allocs = cachep->node_allocs;
+ unsigned long node_frees = cachep->node_frees;
+ unsigned long overflows = cachep->node_overflow;
- seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu",
- allocs, high, grown, reaped, errors,
- max_freeable, free_limit, node_allocs);
+ seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
+ %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
+ reaped, errors, max_freeable, node_allocs,
+ node_frees, overflows);
}
/* cpu stats */
{
unsigned long freemiss = atomic_read(&cachep->freemiss);
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
- allochit, allocmiss, freehit, freemiss);
+ allochit, allocmiss, freehit, freemiss);
}
#endif
seq_putc(m, '\n');
- spin_unlock_irq(&cachep->spinlock);
return 0;
}
*/
struct seq_operations slabinfo_op = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show,
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
};
#define MAX_SLABINFO_WRITE 128
* @count: data length
* @ppos: unused
*/
-ssize_t slabinfo_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t *ppos)
{
- char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
+ char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
struct list_head *p;
-
+
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
if (copy_from_user(&kbuf, buffer, count))
return -EFAULT;
- kbuf[MAX_SLABINFO_WRITE] = '\0';
+ kbuf[MAX_SLABINFO_WRITE] = '\0';
tmp = strchr(kbuf, ' ');
if (!tmp)
return -EINVAL;
/* Find the cache in the chain of caches. */
- down(&cache_chain_sem);
+ mutex_lock(&cache_chain_mutex);
res = -EINVAL;
- list_for_each(p,&cache_chain) {
- kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
+ list_for_each(p, &cache_chain) {
+ struct kmem_cache *cachep;
+ cachep = list_entry(p, struct kmem_cache, next);
if (!strcmp(cachep->name, kbuf)) {
- if (limit < 1 ||
- batchcount < 1 ||
- batchcount > limit ||
- shared < 0) {
- res = -EINVAL;
+ if (limit < 1 || batchcount < 1 ||
+ batchcount > limit || shared < 0) {
+ res = 0;
} else {
- res = do_tune_cpucache(cachep, limit, batchcount, shared);
+ res = do_tune_cpucache(cachep, limit,
+ batchcount, shared);
}
break;
}
}
- up(&cache_chain_sem);
+ mutex_unlock(&cache_chain_mutex);
if (res >= 0)
res = count;
return res;
}
+
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+
+static void *leaks_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+ struct list_head *p;
+
+ mutex_lock(&cache_chain_mutex);
+ p = cache_chain.next;
+ while (n--) {
+ p = p->next;
+ if (p == &cache_chain)
+ return NULL;
+ }
+ return list_entry(p, struct kmem_cache, next);
+}
+
+static inline int add_caller(unsigned long *n, unsigned long v)
+{
+ unsigned long *p;
+ int l;
+ if (!v)
+ return 1;
+ l = n[1];
+ p = n + 2;
+ while (l) {
+ int i = l/2;
+ unsigned long *q = p + 2 * i;
+ if (*q == v) {
+ q[1]++;
+ return 1;
+ }
+ if (*q > v) {
+ l = i;
+ } else {
+ p = q + 2;
+ l -= i + 1;
+ }
+ }
+ if (++n[1] == n[0])
+ return 0;
+ memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
+ p[0] = v;
+ p[1] = 1;
+ return 1;
+}
+
+static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
+{
+ void *p;
+ int i;
+ if (n[0] == n[1])
+ return;
+ for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
+ if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
+ continue;
+ if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
+ return;
+ }
+}
+
+static void show_symbol(struct seq_file *m, unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+ char *modname;
+ const char *name;
+ unsigned long offset, size;
+ char namebuf[KSYM_NAME_LEN+1];
+
+ name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
+
+ if (name) {
+ seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
+ if (modname)
+ seq_printf(m, " [%s]", modname);
+ return;
+ }
#endif
+ seq_printf(m, "%p", (void *)address);
+}
-unsigned int ksize(const void *objp)
+static int leaks_show(struct seq_file *m, void *p)
{
- kmem_cache_t *c;
- unsigned long flags;
- unsigned int size = 0;
+ struct kmem_cache *cachep = p;
+ struct list_head *q;
+ struct slab *slabp;
+ struct kmem_list3 *l3;
+ const char *name;
+ unsigned long *n = m->private;
+ int node;
+ int i;
+
+ if (!(cachep->flags & SLAB_STORE_USER))
+ return 0;
+ if (!(cachep->flags & SLAB_RED_ZONE))
+ return 0;
+
+ /* OK, we can do it */
+
+ n[1] = 0;
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+
+ check_irq_on();
+ spin_lock_irq(&l3->list_lock);
- if (likely(objp != NULL)) {
- local_irq_save(flags);
- c = GET_PAGE_CACHE(virt_to_page(objp));
- size = kmem_cache_size(c);
- local_irq_restore(flags);
+ list_for_each(q, &l3->slabs_full) {
+ slabp = list_entry(q, struct slab, list);
+ handle_slab(n, cachep, slabp);
+ }
+ list_for_each(q, &l3->slabs_partial) {
+ slabp = list_entry(q, struct slab, list);
+ handle_slab(n, cachep, slabp);
+ }
+ spin_unlock_irq(&l3->list_lock);
+ }
+ name = cachep->name;
+ if (n[0] == n[1]) {
+ /* Increase the buffer size */
+ mutex_unlock(&cache_chain_mutex);
+ m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
+ if (!m->private) {
+ /* Too bad, we are really out */
+ m->private = n;
+ mutex_lock(&cache_chain_mutex);
+ return -ENOMEM;
+ }
+ *(unsigned long *)m->private = n[0] * 2;
+ kfree(n);
+ mutex_lock(&cache_chain_mutex);
+ /* Now make sure this entry will be retried */
+ m->count = m->size;
+ return 0;
+ }
+ for (i = 0; i < n[1]; i++) {
+ seq_printf(m, "%s: %lu ", name, n[2*i+3]);
+ show_symbol(m, n[2*i+2]);
+ seq_putc(m, '\n');
}
+ return 0;
+}
+
+struct seq_operations slabstats_op = {
+ .start = leaks_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = leaks_show,
+};
+#endif
+#endif
+
+/**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+ *
+ * kmalloc may internally round up allocations and return more memory
+ * than requested. ksize() can be used to determine the actual amount of
+ * memory allocated. The caller may use this additional memory, even though
+ * a smaller amount of memory was initially specified with the kmalloc call.
+ * The caller must guarantee that objp points to a valid object previously
+ * allocated with either kmalloc() or kmem_cache_alloc(). The object
+ * must not be freed during the duration of the call.
+ */
+unsigned int ksize(const void *objp)
+{
+ if (unlikely(objp == NULL))
+ return 0;
- return size;
+ return obj_size(virt_to_cache(objp));
}