* The head array is strictly LIFO and should improve the cache hit rates.
* On SMP, it additionally reduces the spinlock operations.
*
- * The c_cpuarray may not be read with enabled local interrupts -
+ * The c_cpuarray may not be read with enabled local interrupts -
* it's changed with a smp_call_function().
*
* SMP synchronization:
* All object allocations for a node occur from node specific slab lists.
*/
+#include <linux/config.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/poison.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compiler.h>
-#include <linux/cpuset.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
#include <linux/nodemask.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
-#include <linux/rtmutex.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
+ SLAB_NO_REAP | SLAB_CACHE_DMA | \
SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+ SLAB_DESTROY_BY_RCU)
#else
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
+# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+ SLAB_DESTROY_BY_RCU)
#endif
/*
typedef unsigned int kmem_bufctl_t;
#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
-#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
+#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2)
+
+/* Max number of objs-per-slab for caches which use off-slab slabs.
+ * Needed to avoid a possible looping condition in cache_grow().
+ */
+static unsigned long offslab_limit;
/*
* struct slab
unsigned int batchcount;
unsigned int touched;
spinlock_t lock;
- void *entry[0]; /*
- * Must have this definition in here for the proper
- * alignment of array_cache. Also simplifies accessing
- * the entries.
- * [0] is for gcc 2.95. It should really be [].
- */
+ void *entry[0]; /*
+ * Must have this definition in here for the proper
+ * alignment of array_cache. Also simplifies accessing
+ * the entries.
+ * [0] is for gcc 2.95. It should really be [].
+ */
};
-/*
- * bootstrap: The caches do not work without cpuarrays anymore, but the
- * cpuarrays are allocated from the generic caches...
+/* bootstrap: The caches do not work without cpuarrays anymore,
+ * but the cpuarrays are allocated from the generic caches...
*/
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
struct list_head slabs_full;
struct list_head slabs_free;
unsigned long free_objects;
+ unsigned long next_reap;
+ int free_touched;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
- unsigned long next_reap; /* updated without locking */
- int free_touched; /* updated without locking */
};
/*
#define SIZE_AC 1
#define SIZE_L3 (1 + MAX_NUMNODES)
-static int drain_freelist(struct kmem_cache *cache,
- struct kmem_list3 *l3, int tofree);
-static void free_block(struct kmem_cache *cachep, void **objpp, int len,
- int node);
-static void enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
-
/*
- * This function must be completely optimized away if a constant is passed to
- * it. Mostly the same as what is in linux/slab.h except it returns an index.
+ * This function must be completely optimized away if
+ * a constant is passed to it. Mostly the same as
+ * what is in linux/slab.h except it returns an
+ * index.
*/
static __always_inline int index_of(const size_t size)
{
return 0;
}
-static int slab_early_init = 1;
-
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
parent->free_touched = 0;
}
-#define MAKE_LIST(cachep, listp, slab, nodeid) \
- do { \
- INIT_LIST_HEAD(listp); \
- list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
+#define MAKE_LIST(cachep, listp, slab, nodeid) \
+ do { \
+ INIT_LIST_HEAD(listp); \
+ list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
} while (0)
-#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
- do { \
+#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
+ do { \
MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
-/* 2) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
-
unsigned int buffer_size;
-/* 3) touched by every alloc & free from the backend */
+/* 2) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+ spinlock_t spinlock;
- unsigned int flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 4) cache_grow/shrink */
+/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;
- size_t colour; /* cache colouring range */
+ size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
- unsigned int dflags; /* dynamic flags */
+ unsigned int dflags; /* dynamic flags */
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);
/* de-constructor func */
void (*dtor) (void *, struct kmem_cache *, unsigned long);
-/* 5) cache creation/removal */
+/* 4) cache creation/removal */
const char *name;
struct list_head next;
-/* 6) statistics */
+/* 5) statistics */
#if STATS
unsigned long num_active;
unsigned long num_allocations;
unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
- unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
#define BATCHREFILL_LIMIT 16
-/*
- * Optimization question: fewer reaps means less probability for unnessary
- * cpucache drain/refill cycles.
+/* Optimization question: fewer reaps means less
+ * probability for unnessary cpucache drain/refill cycles.
*
* OTOH the cpuarrays can contain lots of objects,
* which could lock up otherwise freeable slabs.
#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
-#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
-#define STATS_SET_HIGH(x) \
- do { \
- if ((x)->num_active > (x)->high_mark) \
- (x)->high_mark = (x)->num_active; \
- } while (0)
+#define STATS_INC_REAPED(x) ((x)->reaped++)
+#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
+ (x)->high_mark = (x)->num_active; \
+ } while (0)
#define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
-#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
-#define STATS_SET_FREEABLE(x, i) \
- do { \
- if ((x)->max_freeable < i) \
- (x)->max_freeable = i; \
- } while (0)
+#define STATS_SET_FREEABLE(x, i) \
+ do { if ((x)->max_freeable < i) \
+ (x)->max_freeable = i; \
+ } while (0)
+
#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
#define STATS_DEC_ACTIVE(x) do { } while (0)
#define STATS_INC_ALLOCED(x) do { } while (0)
#define STATS_INC_GROWN(x) do { } while (0)
-#define STATS_ADD_REAPED(x,y) do { } while (0)
+#define STATS_INC_REAPED(x) do { } while (0)
#define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#define STATS_INC_NODEFREES(x) do { } while (0)
-#define STATS_INC_ACOVERFLOW(x) do { } while (0)
-#define STATS_SET_FREEABLE(x, i) do { } while (0)
+#define STATS_SET_FREEABLE(x, i) \
+ do { } while (0)
+
#define STATS_INC_ALLOCHIT(x) do { } while (0)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
#define STATS_INC_FREEHIT(x) do { } while (0)
#endif
#if DEBUG
+/* Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
+#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
-/*
- * memory layout of objects:
+/* ...and for poisoning */
+#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
+#define POISON_FREE 0x6b /* for use-after-free poisoning */
+#define POISON_END 0xa5 /* end-byte of poisoning */
+
+/* memory layout of objects:
* 0 : objp
* 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
* redzone word.
* cachep->obj_offset: The real object.
* cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
- * [BYTES_PER_WORD long]
+ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
*/
static int obj_offset(struct kmem_cache *cachep)
{
#endif
/*
- * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
- * order.
+ * Maximum size of an obj (in 2^order pages)
+ * and absolute limit for the gfp order.
*/
#if defined(CONFIG_LARGE_ALLOCS)
#define MAX_OBJ_ORDER 13 /* up to 32Mb */
#define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
-/*
- * Functions for storing/retrieving the cachep and or slab from the page
- * allocator. These are used to find the slab an obj belongs to. With kfree(),
- * these are used to find the cache which an obj belongs to.
+/* Functions for storing/retrieving the cachep and or slab from the
+ * global 'mem_map'. These are used to find the slab an obj belongs to.
+ * With kfree(), these are used to find the cache which an obj belongs to.
*/
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
static inline struct kmem_cache *page_get_cache(struct page *page)
{
- if (unlikely(PageCompound(page)))
- page = (struct page *)page_private(page);
- BUG_ON(!PageSlab(page));
return (struct kmem_cache *)page->lru.next;
}
static inline struct slab *page_get_slab(struct page *page)
{
- if (unlikely(PageCompound(page)))
- page = (struct page *)page_private(page);
- BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
return page_get_slab(page);
}
-static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
- unsigned int idx)
-{
- return slab->s_mem + cache->buffer_size * idx;
-}
-
-static inline unsigned int obj_to_index(struct kmem_cache *cache,
- struct slab *slab, void *obj)
-{
- return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
-}
-
-/*
- * These are the default caches for kmalloc. Custom caches can have other sizes.
- */
+/* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
.buffer_size = sizeof(struct kmem_cache),
+ .flags = SLAB_NO_REAP,
+ .spinlock = SPIN_LOCK_UNLOCKED,
.name = "kmem_cache",
#if DEBUG
.obj_size = sizeof(struct kmem_cache),
#endif
};
-#define BAD_ALIEN_MAGIC 0x01020304ul
-
-#ifdef CONFIG_LOCKDEP
-
-/*
- * Slab sometimes uses the kmalloc slabs to store the slab headers
- * for other slabs "off slab".
- * The locking for this is tricky in that it nests within the locks
- * of all other slabs in a few places; to deal with this special
- * locking we put on-slab caches into a separate lock-class.
- *
- * We set lock class for alien array caches which are up during init.
- * The lock annotation will be lost if all cpus of a node goes down and
- * then comes back up during hotplug
- */
-static struct lock_class_key on_slab_l3_key;
-static struct lock_class_key on_slab_alc_key;
-
-static inline void init_lock_keys(void)
-
-{
- int q;
- struct cache_sizes *s = malloc_sizes;
-
- while (s->cs_size != ULONG_MAX) {
- for_each_node(q) {
- struct array_cache **alc;
- int r;
- struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
- continue;
- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
- alc = l3->alien;
- /*
- * FIXME: This check for BAD_ALIEN_MAGIC
- * should go away when common slab code is taught to
- * work even without alien caches.
- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
- * for alloc_alien_cache,
- */
- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
- continue;
- for_each_node(r) {
- if (alc[r])
- lockdep_set_class(&alc[r]->lock,
- &on_slab_alc_key);
- }
- }
- s++;
- }
-}
-#else
-static inline void init_lock_keys(void)
-{
-}
-#endif
-
/* Guard access to the cache-chain. */
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
/*
- * vm_enough_memory() looks at this to determine how many slab-allocated pages
- * are possibly freeable under pressure
+ * vm_enough_memory() looks at this to determine how many
+ * slab-allocated pages are possibly freeable under pressure
*
* SLAB_RECLAIM_ACCOUNT turns this on per-slab
*/
FULL
} g_cpucache_up;
-/*
- * used by boot code to determine if it can use slab based allocator
- */
-int slab_is_available(void)
-{
- return g_cpucache_up == FULL;
-}
-
static DEFINE_PER_CPU(struct work_struct, reap_work);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
+static void enable_cpucache(struct kmem_cache *cachep);
+static void cache_reap(void *unused);
+static int __node_shrink(struct kmem_cache *cachep, int node);
+
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
-static inline struct kmem_cache *__find_general_cachep(size_t size,
- gfp_t gfpflags)
+static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
-/*
- * Calculate the number of objects and left-over bytes for a given buffer size.
- */
+/* Calculate the number of objects and left-over bytes for a given
+ buffer size. */
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
size_t align, int flags, size_t *left_over,
unsigned int *num)
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
-static void __slab_error(const char *function, struct kmem_cache *cachep,
- char *msg)
+static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
node = next_node(cpu_to_node(cpu), node_online_map);
if (node == MAX_NUMNODES)
- node = first_node(node_online_map);
+ node = 0;
__get_cpu_var(reap_node) = node;
}
return nc;
}
-/*
- * Transfer objects in one arraycache to another.
- * Locking must be handled by the caller.
- *
- * Return the number of entries transferred.
- */
-static int transfer_objects(struct array_cache *to,
- struct array_cache *from, unsigned int max)
-{
- /* Figure out how many entries to transfer */
- int nr = min(min(from->avail, max), to->limit - to->avail);
-
- if (!nr)
- return 0;
-
- memcpy(to->entry + to->avail, from->entry + from->avail -nr,
- sizeof(void *) *nr);
-
- from->avail -= nr;
- to->avail += nr;
- to->touched = 1;
- return nr;
-}
-
#ifdef CONFIG_NUMA
static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
-static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
static struct array_cache **alloc_alien_cache(int node, int limit)
{
if (!ac_ptr)
return;
+
for_each_node(i)
kfree(ac_ptr[i]);
+
kfree(ac_ptr);
}
if (ac->avail) {
spin_lock(&rl3->list_lock);
- /*
- * Stuff objects into the remote nodes shared array first.
- * That way we could avoid the overhead of putting the objects
- * into the free lists and getting them back later.
- */
- if (rl3->shared)
- transfer_objects(rl3->shared, ac, ac->limit);
-
free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
spin_unlock(&rl3->list_lock);
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
-
- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
+ if (ac && ac->avail) {
+ spin_lock_irq(&ac->lock);
__drain_alien_cache(cachep, ac, node);
spin_unlock_irq(&ac->lock);
}
}
}
-static void drain_alien_cache(struct kmem_cache *cachep,
- struct array_cache **alien)
+static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
}
}
}
-
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
-{
- struct slab *slabp = virt_to_slab(objp);
- int nodeid = slabp->nodeid;
- struct kmem_list3 *l3;
- struct array_cache *alien = NULL;
-
- /*
- * Make sure we are not freeing a object from another node to the array
- * cache on this cpu.
- */
- if (likely(slabp->nodeid == numa_node_id()))
- return 0;
-
- l3 = cachep->nodelists[numa_node_id()];
- STATS_INC_NODEFREES(cachep);
- if (l3->alien && l3->alien[nodeid]) {
- alien = l3->alien[nodeid];
- spin_lock(&alien->lock);
- if (unlikely(alien->avail == alien->limit)) {
- STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep, alien, nodeid);
- }
- alien->entry[alien->avail++] = objp;
- spin_unlock(&alien->lock);
- } else {
- spin_lock(&(cachep->nodelists[nodeid])->list_lock);
- free_block(cachep, &objp, 1, nodeid);
- spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
- }
- return 1;
-}
-
#else
#define drain_alien_cache(cachep, alien) do { } while (0)
static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
- return (struct array_cache **)BAD_ALIEN_MAGIC;
+ return (struct array_cache **) 0x01020304ul;
}
static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
-{
- return 0;
-}
-
#endif
-static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
mutex_lock(&cache_chain_mutex);
- /*
- * We need to do this right in the beginning since
+ /* we need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3
*/
list_for_each_entry(cachep, &cache_chain, next) {
- /*
- * Set up the size64 kmemlist for cpu before we can
+ /* setup the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
if (!cachep->nodelists[node]) {
- l3 = kmalloc_node(memsize, GFP_KERNEL, node);
- if (!l3)
+ if (!(l3 = kmalloc_node(memsize,
+ GFP_KERNEL, node)))
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
- (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
}
- /*
- * Now we can go ahead with allocating the shared arrays and
- * array caches
- */
+ /* Now we can go ahead with allocating the shared array's
+ & array cache's */
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
if (!alien)
goto bad;
cachep->array[cpu] = nc;
+
l3 = cachep->nodelists[node];
BUG_ON(!l3);
}
#endif
spin_unlock_irq(&l3->list_lock);
+
kfree(shared);
free_alien_cache(alien);
}
/* fall thru */
case CPU_UP_CANCELED:
mutex_lock(&cache_chain_mutex);
+
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
l3 = cachep->nodelists[node];
if (!l3)
continue;
- drain_freelist(cachep, l3, l3->free_objects);
+ spin_lock_irq(&l3->list_lock);
+ /* free slabs belonging to this node */
+ __node_shrink(cachep, node);
+ spin_unlock_irq(&l3->list_lock);
}
mutex_unlock(&cache_chain_mutex);
break;
#endif
}
return NOTIFY_OK;
-bad:
+ bad:
mutex_unlock(&cache_chain_mutex);
return NOTIFY_BAD;
}
-static struct notifier_block __cpuinitdata cpucache_notifier = {
- &cpuup_callback, NULL, 0
-};
+static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
-static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
- int nodeid)
+static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
{
struct kmem_list3 *ptr;
local_irq_disable();
memcpy(ptr, list, sizeof(struct kmem_list3));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->list_lock);
-
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->nodelists[nodeid] = ptr;
local_irq_enable();
}
-/*
- * Initialisation. Called after the page allocator have been initialised and
- * before smp_init().
+/* Initialisation.
+ * Called after the gfp() functions have been enabled, and before smp_init().
*/
void __init kmem_cache_init(void)
{
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
- * 1) initialize the cache_cache cache: it contains the struct
- * kmem_cache structures of all caches, except cache_cache itself:
- * cache_cache is statically allocated.
+ * 1) initialize the cache_cache cache: it contains the struct kmem_cache
+ * structures of all caches, except cache_cache itself: cache_cache
+ * is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
- cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
- cache_line_size());
+ cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size,
if (cache_cache.num)
break;
}
- BUG_ON(!cache_cache.num);
+ if (!cache_cache.num)
+ BUG();
cache_cache.gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizes = malloc_sizes;
names = cache_names;
- /*
- * Initialize the caches that provide memory for the array cache and the
- * kmem_list3 structures first. Without this, further allocations will
- * bug.
+ /* Initialize the caches that provide memory for the array cache
+ * and the kmem_list3 structures first.
+ * Without this, further allocations will bug
*/
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
- sizes[INDEX_AC].cs_size,
- ARCH_KMALLOC_MINALIGN,
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ sizes[INDEX_AC].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS |
+ SLAB_PANIC), NULL, NULL);
- if (INDEX_AC != INDEX_L3) {
+ if (INDEX_AC != INDEX_L3)
sizes[INDEX_L3].cs_cachep =
- kmem_cache_create(names[INDEX_L3].name,
- sizes[INDEX_L3].cs_size,
- ARCH_KMALLOC_MINALIGN,
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
- }
-
- slab_early_init = 0;
+ kmem_cache_create(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
+ NULL);
while (sizes->cs_size != ULONG_MAX) {
/*
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
- if (!sizes->cs_cachep) {
+ if (!sizes->cs_cachep)
sizes->cs_cachep = kmem_cache_create(names->name,
- sizes->cs_size,
- ARCH_KMALLOC_MINALIGN,
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS
+ | SLAB_PANIC),
+ NULL, NULL);
+
+ /* Inc off-slab bufctl limit until the ceiling is hit. */
+ if (!(OFF_SLAB(sizes->cs_cachep))) {
+ offslab_limit = sizes->cs_size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
}
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
- sizes->cs_size,
- ARCH_KMALLOC_MINALIGN,
- ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
- SLAB_PANIC,
- NULL, NULL);
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS |
+ SLAB_CACHE_DMA |
+ SLAB_PANIC), NULL,
+ NULL);
+
sizes++;
names++;
}
/* 4) Replace the bootstrap head arrays */
{
- struct array_cache *ptr;
+ void *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->lock);
-
cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable();
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->lock);
-
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
local_irq_enable();
struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
- enable_cpucache(cachep);
+ enable_cpucache(cachep);
mutex_unlock(&cache_chain_mutex);
}
- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
-
-
/* Done! */
g_cpucache_up = FULL;
- /*
- * Register a cpu startup notifier callback that initializes
- * cpu_cache_get for all new cpus
+ /* Register a cpu startup notifier callback
+ * that initializes cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
- /*
- * The reap timers are started later, with a module init call: That part
- * of the kernel is not yet operational.
+ /* The reap timers are started later, with a module init call:
+ * That part of the kernel is not yet operational.
*/
}
{
int cpu;
- /*
- * Register the timers that return unneeded pages to the page allocator
+ /*
+ * Register the timers that return unneeded
+ * pages to gfp.
*/
for_each_online_cpu(cpu)
- start_cpu_timer(cpu);
+ start_cpu_timer(cpu);
+
return 0;
}
+
__initcall(cpucache_init);
/*
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
- int nr_pages;
+ void *addr;
int i;
-#ifndef CONFIG_MMU
- /*
- * Nommu uses slab's for process anonymous memory allocations, and thus
- * requires __GFP_COMP to properly refcount higher order allocations
- */
- flags |= __GFP_COMP;
-#endif
flags |= cachep->gfpflags;
-
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
return NULL;
+ addr = page_address(page);
- nr_pages = (1 << cachep->gfporder);
+ i = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- atomic_add(nr_pages, &slab_reclaim_pages);
- add_zone_page_state(page_zone(page), NR_SLAB, nr_pages);
- for (i = 0; i < nr_pages; i++)
- __SetPageSlab(page + i);
- return page_address(page);
+ atomic_add(i, &slab_reclaim_pages);
+ add_page_state(nr_slab, i);
+ while (i--) {
+ SetPageSlab(page);
+ page++;
+ }
+ return addr;
}
/*
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
- sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed);
while (i--) {
- BUG_ON(!PageSlab(page));
- __ClearPageSlab(page);
+ if (!TestClearPageSlab(page))
+ BUG();
page++;
}
+ sub_page_state(nr_slab, nr_freed);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder);
static void dump_line(char *data, int offset, int limit)
{
int i;
- unsigned char total = 0, bad_count = 0, errors;
printk(KERN_ERR "%03x:", offset);
for (i = 0; i < limit; i++) {
- if (data[offset + i] != POISON_FREE) {
- total += data[offset + i];
- bad_count++;
- }
printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
-
- if (bad_count == 1) {
- errors = total ^ POISON_FREE;
- if (errors && !(errors & (errors-1))) {
- printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
-#ifdef CONFIG_X86
- printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
-#else
- printk(KERN_ERR "Run a memory test tool.\n");
-#endif
- }
- }
}
#endif
if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
- *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
+ *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]",
- *dbg_userword(cachep, objp));
+ *dbg_userword(cachep, objp));
print_symbol("(%s)",
- (unsigned long)*dbg_userword(cachep, objp));
+ (unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
realobj = (char *)objp + obj_offset(cachep);
/* Print header */
if (lines == 0) {
printk(KERN_ERR
- "Slab corruption: (%s) start=%p, len=%d\n",
- print_tainted(), realobj, size);
+ "Slab corruption: start=%p, len=%d\n",
+ realobj, size);
print_objinfo(cachep, objp, 0);
- dump_stack();
}
/* Hexdump the affected line */
i = (i / 16) * 16;
* exist:
*/
struct slab *slabp = virt_to_slab(objp);
- unsigned int objnr;
+ int objnr;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
if (objnr) {
- objp = index_to_obj(cachep, slabp, objnr - 1);
+ objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
- objp = index_to_obj(cachep, slabp, objnr + 1);
+ objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
#if DEBUG
/**
- * slab_destroy_objs - destroy a slab and its objects
- * @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
- *
- * Call the registered destructor for each object in a slab that is being
- * destroyed.
+ * slab_destroy_objs - call the registered destructor for each object in
+ * a slab that is to be destroyed.
*/
static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if (cachep->buffer_size % PAGE_SIZE == 0 &&
- OFF_SLAB(cachep))
+ if ((cachep->buffer_size % PAGE_SIZE) == 0
+ && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->buffer_size / PAGE_SIZE, 1);
+ cachep->buffer_size / PAGE_SIZE,
+ 1);
else
check_poison_obj(cachep, objp);
#else
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
(cachep->dtor) (objp, cachep, 0);
}
}
#endif
/**
- * slab_destroy - destroy and release all objects in a slab
- * @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
- *
* Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache. The
- * cache-lock is not held/needed.
+ * Before calling the slab must have been unlinked from the cache.
+ * The cache-lock is not held/needed.
*/
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{
}
}
-/*
- * For setting up all the kmem_list3s for cache whose buffer_size is same as
- * size of kmem_list3.
- */
+/* For setting up all the kmem_list3s for cache whose buffer_size is same
+ as size of kmem_list3. */
static void set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
-static size_t calculate_slab_order(struct kmem_cache *cachep,
+static inline size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
- unsigned long offslab_limit;
size_t left_over = 0;
int gfporder;
- for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
+ for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
if (!num)
continue;
- if (flags & CFLGS_OFF_SLAB) {
- /*
- * Max number of objs-per-slab for caches which
- * use off-slab slabs. Needed to avoid a possible
- * looping condition in cache_grow().
- */
- offslab_limit = size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
-
- if (num > offslab_limit)
- break;
- }
+ /* More than offslab_limit objects will cause problems */
+ if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
+ break;
/* Found something acceptable - save it away */
cachep->num = num;
/*
* Acceptable internal fragmentation?
*/
- if (left_over * 8 <= (PAGE_SIZE << gfporder))
+ if ((left_over * 8) <= (PAGE_SIZE << gfporder))
break;
}
return left_over;
}
-static void setup_cpu_cache(struct kmem_cache *cachep)
-{
- if (g_cpucache_up == FULL) {
- enable_cpucache(cachep);
- return;
- }
- if (g_cpucache_up == NONE) {
- /*
- * Note: the first kmem_cache_create must create the cache
- * that's used by kmalloc(24), otherwise the creation of
- * further caches will BUG().
- */
- cachep->array[smp_processor_id()] = &initarray_generic.cache;
-
- /*
- * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
- * the first cache, then we need to set up all its list3s,
- * otherwise the creation of further caches will BUG().
- */
- set_up_list3s(cachep, SIZE_AC);
- if (INDEX_AC == INDEX_L3)
- g_cpucache_up = PARTIAL_L3;
- else
- g_cpucache_up = PARTIAL_AC;
- } else {
- cachep->array[smp_processor_id()] =
- kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
-
- if (g_cpucache_up == PARTIAL_AC) {
- set_up_list3s(cachep, SIZE_L3);
- g_cpucache_up = PARTIAL_L3;
- } else {
- int node;
- for_each_online_node(node) {
- cachep->nodelists[node] =
- kmalloc_node(sizeof(struct kmem_list3),
- GFP_KERNEL, node);
- BUG_ON(!cachep->nodelists[node]);
- kmem_list3_init(cachep->nodelists[node]);
- }
- }
- }
- cachep->nodelists[numa_node_id()]->next_reap =
- jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-
- cpu_cache_get(cachep)->avail = 0;
- cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
- cpu_cache_get(cachep)->batchcount = 1;
- cpu_cache_get(cachep)->touched = 0;
- cachep->batchcount = 1;
- cachep->limit = BOOT_CPUCACHE_ENTRIES;
-}
-
/**
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* and the @dtor is run before the pages are handed back.
*
* @name must be valid until the cache is destroyed. This implies that
- * the module calling this has to destroy the cache before getting unloaded.
- *
+ * the module calling this has to destroy the cache before getting
+ * unloaded.
+ *
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
* for buffer overruns.
*
+ * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
+ * memory pressure.
+ *
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags,
- void (*ctor)(void*, struct kmem_cache *, unsigned long),
+ unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL, *pc;
+ struct kmem_cache *cachep = NULL;
+ struct list_head *p;
/*
* Sanity checks... these are all serious usage bugs.
*/
- if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
+ if ((!name) ||
+ in_interrupt() ||
+ (size < BYTES_PER_WORD) ||
(size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
- printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
- name);
+ printk(KERN_ERR "%s: Early error in slab %s\n",
+ __FUNCTION__, name);
BUG();
}
mutex_lock(&cache_chain_mutex);
- list_for_each_entry(pc, &cache_chain, next) {
+ list_for_each(p, &cache_chain) {
+ struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
mm_segment_t old_fs = get_fs();
char tmp;
int res;
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
- if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
+ if ((size < 4096
+ || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
BUG_ON(dtor);
/*
- * Always checks flags, a caller might be expecting debug support which
- * isn't available.
+ * Always checks flags, a caller might be expecting debug
+ * support which isn't available.
*/
- BUG_ON(flags & ~CREATE_MASK);
+ if (flags & ~CREATE_MASK)
+ BUG();
- /*
- * Check that size is in terms of words. This is needed to avoid
+ /* Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
size &= ~(BYTES_PER_WORD - 1);
}
- /* calculate the final buffer alignment: */
-
+ /* calculate out the final buffer alignment: */
/* 1) arch recommendation: can be overridden for debug */
if (flags & SLAB_HWCACHE_ALIGN) {
- /*
- * Default alignment: as specified by the arch code. Except if
- * an object is really small, then squeeze multiple objects into
- * one cacheline.
+ /* Default alignment: as specified by the arch code.
+ * Except if an object is really small, then squeeze multiple
+ * objects into one cacheline.
*/
ralign = cache_line_size();
while (size <= ralign / 2)
if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
- /*
- * 4) Store it. Note that the debug code below can reduce
+ /* 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD.
*/
align = ralign;
/* Get cache's description obj. */
- cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
+ cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
if (!cachep)
goto oops;
+ memset(cachep, 0, sizeof(struct kmem_cache));
#if DEBUG
cachep->obj_size = size;
#endif
#endif
- /*
- * Determine if the slab management is 'on' or 'off' slab.
- * (bootstrapping cannot cope with offslab caches so don't do
- * it too early on.)
- */
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
+ /* Determine if the slab management is 'on' or 'off' slab. */
+ if (size >= (PAGE_SIZE >> 3))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
cachep->gfpflags = 0;
if (flags & SLAB_CACHE_DMA)
cachep->gfpflags |= GFP_DMA;
+ spin_lock_init(&cachep->spinlock);
cachep->buffer_size = size;
if (flags & CFLGS_OFF_SLAB)
cachep->name = name;
- setup_cpu_cache(cachep);
+ if (g_cpucache_up == FULL) {
+ enable_cpucache(cachep);
+ } else {
+ if (g_cpucache_up == NONE) {
+ /* Note: the first kmem_cache_create must create
+ * the cache that's used by kmalloc(24), otherwise
+ * the creation of further caches will BUG().
+ */
+ cachep->array[smp_processor_id()] =
+ &initarray_generic.cache;
+
+ /* If the cache that's used by
+ * kmalloc(sizeof(kmem_list3)) is the first cache,
+ * then we need to set up all its list3s, otherwise
+ * the creation of further caches will BUG().
+ */
+ set_up_list3s(cachep, SIZE_AC);
+ if (INDEX_AC == INDEX_L3)
+ g_cpucache_up = PARTIAL_L3;
+ else
+ g_cpucache_up = PARTIAL_AC;
+ } else {
+ cachep->array[smp_processor_id()] =
+ kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+
+ if (g_cpucache_up == PARTIAL_AC) {
+ set_up_list3s(cachep, SIZE_L3);
+ g_cpucache_up = PARTIAL_L3;
+ } else {
+ int node;
+ for_each_online_node(node) {
+
+ cachep->nodelists[node] =
+ kmalloc_node(sizeof
+ (struct kmem_list3),
+ GFP_KERNEL, node);
+ BUG_ON(!cachep->nodelists[node]);
+ kmem_list3_init(cachep->
+ nodelists[node]);
+ }
+ }
+ }
+ cachep->nodelists[numa_node_id()]->next_reap =
+ jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+ BUG_ON(!cpu_cache_get(cachep));
+ cpu_cache_get(cachep)->avail = 0;
+ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+ cpu_cache_get(cachep)->batchcount = 1;
+ cpu_cache_get(cachep)->touched = 0;
+ cachep->batchcount = 1;
+ cachep->limit = BOOT_CPUCACHE_ENTRIES;
+ }
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
-oops:
+ oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
#define check_spinlock_acquired_node(x, y) do { } while(0)
#endif
-static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
- struct array_cache *ac,
- int force, int node);
+/*
+ * Waits for all CPUs to execute func().
+ */
+static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
+{
+ check_irq_on();
+ preempt_disable();
+
+ local_irq_disable();
+ func(arg);
+ local_irq_enable();
+
+ if (smp_call_function(func, arg, 1, 1))
+ BUG();
+
+ preempt_enable();
+}
+
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
+ int force, int node);
static void do_drain(void *arg)
{
- struct kmem_cache *cachep = arg;
+ struct kmem_cache *cachep = (struct kmem_cache *) arg;
struct array_cache *ac;
int node = numa_node_id();
struct kmem_list3 *l3;
int node;
- on_each_cpu(do_drain, cachep, 1, 1);
+ smp_call_function_all_cpus(do_drain, cachep);
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
- if (l3 && l3->alien)
- drain_alien_cache(cachep, l3->alien);
- }
-
- for_each_online_node(node) {
- l3 = cachep->nodelists[node];
- if (l3)
- drain_array(cachep, l3, l3->shared, 1, node);
+ if (l3) {
+ spin_lock_irq(&l3->list_lock);
+ drain_array_locked(cachep, l3->shared, 1, node);
+ spin_unlock_irq(&l3->list_lock);
+ if (l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
}
}
-/*
- * Remove slabs from the list of free slabs.
- * Specify the number of slabs to drain in tofree.
- *
- * Returns the actual number of slabs released.
- */
-static int drain_freelist(struct kmem_cache *cache,
- struct kmem_list3 *l3, int tofree)
+static int __node_shrink(struct kmem_cache *cachep, int node)
{
- struct list_head *p;
- int nr_freed;
struct slab *slabp;
+ struct kmem_list3 *l3 = cachep->nodelists[node];
+ int ret;
- nr_freed = 0;
- while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
+ for (;;) {
+ struct list_head *p;
- spin_lock_irq(&l3->list_lock);
p = l3->slabs_free.prev;
- if (p == &l3->slabs_free) {
- spin_unlock_irq(&l3->list_lock);
- goto out;
- }
+ if (p == &l3->slabs_free)
+ break;
- slabp = list_entry(p, struct slab, list);
+ slabp = list_entry(l3->slabs_free.prev, struct slab, list);
#if DEBUG
- BUG_ON(slabp->inuse);
+ if (slabp->inuse)
+ BUG();
#endif
list_del(&slabp->list);
- /*
- * Safe to drop the lock. The slab is no longer linked
- * to the cache.
- */
- l3->free_objects -= cache->num;
+
+ l3->free_objects -= cachep->num;
spin_unlock_irq(&l3->list_lock);
- slab_destroy(cache, slabp);
- nr_freed++;
+ slab_destroy(cachep, slabp);
+ spin_lock_irq(&l3->list_lock);
}
-out:
- return nr_freed;
+ ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
+ return ret;
}
static int __cache_shrink(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(i) {
l3 = cachep->nodelists[i];
- if (!l3)
- continue;
-
- drain_freelist(cachep, l3, l3->free_objects);
-
- ret += !list_empty(&l3->slabs_full) ||
- !list_empty(&l3->slabs_partial);
+ if (l3) {
+ spin_lock_irq(&l3->list_lock);
+ ret += __node_shrink(cachep, i);
+ spin_unlock_irq(&l3->list_lock);
+ }
}
return (ret ? 1 : 0);
}
*/
int kmem_cache_shrink(struct kmem_cache *cachep)
{
- BUG_ON(!cachep || in_interrupt());
+ if (!cachep || in_interrupt())
+ BUG();
return __cache_shrink(cachep);
}
int i;
struct kmem_list3 *l3;
- BUG_ON(!cachep || in_interrupt());
+ if (!cachep || in_interrupt())
+ BUG();
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
/* NUMA: free the list3 structures */
for_each_online_node(i) {
- l3 = cachep->nodelists[i];
- if (l3) {
+ if ((l3 = cachep->nodelists[i])) {
kfree(l3->shared);
free_alien_cache(l3->alien);
kfree(l3);
}
}
kmem_cache_free(&cache_cache, cachep);
+
unlock_cpu_hotplug();
+
return 0;
}
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
- int colour_off, gfp_t local_flags,
- int nodeid)
+ int colour_off, gfp_t local_flags)
{
struct slab *slabp;
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc_node(cachep->slabp_cache,
- local_flags, nodeid);
+ slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
if (!slabp)
return NULL;
} else {
slabp->inuse = 0;
slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off;
- slabp->nodeid = nodeid;
+
return slabp;
}
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
/*
- * Constructors are not allowed to allocate memory from the same
- * cache which they are a constructor for. Otherwise, deadlock.
- * They must also be threaded.
+ * Constructors are not allowed to allocate memory from
+ * the same cache which they are a constructor for.
+ * Otherwise, deadlock. They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp + obj_offset(cachep), cachep,
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
- if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
- OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+ && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 0);
#else
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
- if (flags & SLAB_DMA)
- BUG_ON(!(cachep->gfpflags & GFP_DMA));
- else
- BUG_ON(cachep->gfpflags & GFP_DMA);
+ if (flags & SLAB_DMA) {
+ if (!(cachep->gfpflags & GFP_DMA))
+ BUG();
+ } else {
+ if (cachep->gfpflags & GFP_DMA)
+ BUG();
+ }
}
-static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
- int nodeid)
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
{
- void *objp = index_to_obj(cachep, slabp, slabp->free);
+ void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
kmem_bufctl_t next;
slabp->inuse++;
return objp;
}
-static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
- void *objp, int nodeid)
+static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
+ int nodeid)
{
- unsigned int objnr = obj_to_index(cachep, slabp, objp);
+ unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
#if DEBUG
/* Verify that the slab belongs to the intended node */
WARN_ON(slabp->nodeid != nodeid);
- if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
+ if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
+ "'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
slabp->inuse--;
}
-/*
- * Map pages beginning at addr to the given cache and slab. This is required
- * for the slab allocator to be able to lookup the cache and slab of a
- * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
- */
-static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
- void *addr)
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
{
- int nr_pages;
+ int i;
struct page *page;
- page = virt_to_page(addr);
-
- nr_pages = 1;
- if (likely(!PageCompound(page)))
- nr_pages <<= cache->gfporder;
-
+ /* Nasty!!!!!! I hope this is OK. */
+ i = 1 << cachep->gfporder;
+ page = virt_to_page(objp);
do {
- page_set_cache(page, cache);
- page_set_slab(page, slab);
+ page_set_cache(page, cachep);
+ page_set_slab(page, slabp);
page++;
- } while (--nr_pages);
+ } while (--i);
}
/*
unsigned long ctor_flags;
struct kmem_list3 *l3;
- /*
- * Be lazy and only check for valid flags here, keeping it out of the
- * critical path in kmem_cache_alloc().
+ /* Be lazy and only check for valid flags here,
+ * keeping it out of the critical path in kmem_cache_alloc().
*/
- BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
+ if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
+ BUG();
if (flags & SLAB_NO_GROW)
return 0;
*/
kmem_flagcheck(cachep, flags);
- /*
- * Get mem for the objs. Attempt to allocate a physical page from
- * 'nodeid'.
+ /* Get mem for the objs.
+ * Attempt to allocate a physical page from 'nodeid',
*/
- objp = kmem_getpages(cachep, flags, nodeid);
- if (!objp)
+ if (!(objp = kmem_getpages(cachep, flags, nodeid)))
goto failed;
/* Get slab management. */
- slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
- if (!slabp)
+ if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
goto opps1;
slabp->nodeid = nodeid;
- slab_map_pages(cachep, slabp, objp);
+ set_slab_attr(cachep, slabp, objp);
cache_init_objs(cachep, slabp, ctor_flags);
l3->free_objects += cachep->num;
spin_unlock(&l3->list_lock);
return 1;
-opps1:
+ opps1:
kmem_freepages(cachep, objp);
-failed:
+ failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
return 0;
}
}
-static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
-{
- unsigned long redzone1, redzone2;
-
- redzone1 = *dbg_redzone1(cache, obj);
- redzone2 = *dbg_redzone2(cache, obj);
-
- /*
- * Redzone is ok.
- */
- if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
- return;
-
- if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
- slab_error(cache, "double free detected");
- else
- slab_error(cache, "memory outside object was overwritten");
-
- printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
- obj, redzone1, redzone2);
-}
-
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
kfree_debugcheck(objp);
page = virt_to_page(objp);
+ if (page_get_cache(page) != cachep) {
+ printk(KERN_ERR
+ "mismatch in kmem_cache_free: expected cache %p, got %p\n",
+ page_get_cache(page), cachep);
+ printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
+ printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
+ page_get_cache(page)->name);
+ WARN_ON(1);
+ }
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
- verify_redzone_free(cachep, objp);
+ if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
+ || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
+ slab_error(cachep,
+ "double free, or memory outside"
+ " object was overwritten");
+ printk(KERN_ERR
+ "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
+ }
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
+ BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
if (cachep->flags & SLAB_DEBUG_INITIAL) {
- /*
- * Need to call the slab's constructor so the caller can
- * perform a verify of its state (debugging). Called without
- * the cache-lock held.
+ /* Need to call the slab's constructor so the
+ * caller can perform a verify of its state (debugging).
+ * Called without the cache-lock held.
*/
cachep->ctor(objp + obj_offset(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
*/
cachep->dtor(objp + obj_offset(cachep), cachep, 0);
}
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
-#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 0);
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
-bad:
- printk(KERN_ERR "slab: Internal list corruption detected in "
- "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
- cachep->name, cachep->num, slabp, slabp->inuse);
+ bad:
+ printk(KERN_ERR
+ "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
+ cachep->name, cachep->num, slabp, slabp->inuse);
for (i = 0;
i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
i++) {
- if (i % 16 == 0)
+ if ((i % 16) == 0)
printk("\n%03x:", i);
printk(" %02x", ((unsigned char *)slabp)[i]);
}
check_irq_off();
ac = cpu_cache_get(cachep);
-retry:
+ retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
- /*
- * If there was little recent activity on this cache, then
- * perform only a partial refill. Otherwise we could generate
- * refill bouncing.
+ /* if there was little recent activity on this
+ * cache, then perform only a partial refill.
+ * Otherwise we could generate refill bouncing.
*/
batchcount = BATCHREFILL_LIMIT;
}
BUG_ON(ac->avail > 0 || !l3);
spin_lock(&l3->list_lock);
- /* See if we can refill from the shared array */
- if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
- goto alloc_done;
-
+ if (l3->shared) {
+ struct array_cache *shared_array = l3->shared;
+ if (shared_array->avail) {
+ if (batchcount > shared_array->avail)
+ batchcount = shared_array->avail;
+ shared_array->avail -= batchcount;
+ ac->avail = batchcount;
+ memcpy(ac->entry,
+ &(shared_array->entry[shared_array->avail]),
+ sizeof(void *) * batchcount);
+ shared_array->touched = 1;
+ goto alloc_done;
+ }
+ }
while (batchcount > 0) {
struct list_head *entry;
struct slab *slabp;
list_add(&slabp->list, &l3->slabs_partial);
}
-must_grow:
+ must_grow:
l3->free_objects -= ac->avail;
-alloc_done:
+ alloc_done:
spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) {
int x;
x = cache_grow(cachep, flags, numa_node_id());
- /* cache_grow can reenable interrupts, then ac could change. */
+ // cache_grow can reenable interrupts, then ac could change.
ac = cpu_cache_get(cachep);
- if (!x && ac->avail == 0) /* no objects in sight? abort */
+ if (!x && ac->avail == 0) // no objects in sight? abort
return NULL;
- if (!ac->avail) /* objects refilled by interrupt? */
+ if (!ac->avail) // objects refilled by interrupt?
goto retry;
}
ac->touched = 1;
return ac->entry[--ac->avail];
}
-static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
- gfp_t flags)
+static inline void
+cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
}
#if DEBUG
-static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
- gfp_t flags, void *objp, void *caller)
+static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
+ void *objp, void *caller)
{
if (!objp)
return objp;
*dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
- *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
- slab_error(cachep, "double free, or memory outside"
- " object was overwritten");
+ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
+ || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
+ slab_error(cachep,
+ "double free, or memory outside"
+ " object was overwritten");
printk(KERN_ERR
- "%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
- objp, *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
+ "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- {
- struct slab *slabp;
- unsigned objnr;
-
- slabp = page_get_slab(virt_to_page(objp));
- objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
- slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
- }
-#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) {
unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
struct array_cache *ac;
#ifdef CONFIG_NUMA
- if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
- objp = alternate_node_alloc(cachep, flags);
- if (objp != NULL)
- return objp;
+ if (unlikely(current->mempolicy && !in_interrupt())) {
+ int nid = slab_node(current->mempolicy);
+
+ if (nid != numa_node_id())
+ return __cache_alloc_node(cachep, flags, nid);
}
#endif
return objp;
}
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
- gfp_t flags, void *caller)
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;
}
#ifdef CONFIG_NUMA
-/*
- * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
- *
- * If we are in_interrupt, then process context, including cpusets and
- * mempolicy, may not apply and should not be used for allocation policy.
- */
-static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
-{
- int nid_alloc, nid_here;
-
- if (in_interrupt())
- return NULL;
- nid_alloc = nid_here = numa_node_id();
- if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
- nid_alloc = cpuset_mem_spread_node();
- else if (current->mempolicy)
- nid_alloc = slab_node(current->mempolicy);
- if (nid_alloc != nid_here)
- return __cache_alloc_node(cachep, flags, nid_alloc);
- return NULL;
-}
-
/*
* A interface to enable slab creation on nodeid
*/
-static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
- int nodeid)
+static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct list_head *entry;
struct slab *slabp;
l3 = cachep->nodelists[nodeid];
BUG_ON(!l3);
-retry:
+ retry:
check_irq_off();
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
/* move slabp to correct slabp list: */
list_del(&slabp->list);
- if (slabp->free == BUFCTL_END)
+ if (slabp->free == BUFCTL_END) {
list_add(&slabp->list, &l3->slabs_full);
- else
+ } else {
list_add(&slabp->list, &l3->slabs_partial);
+ }
spin_unlock(&l3->list_lock);
goto done;
-must_grow:
+ must_grow:
spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags, nodeid);
return NULL;
goto retry;
-done:
+ done:
return obj;
}
#endif
}
free_block(cachep, ac->entry, batchcount, node);
-free_done:
+ free_done:
#if STATS
{
int i = 0;
#endif
spin_unlock(&l3->list_lock);
ac->avail -= batchcount;
- memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
+ memmove(ac->entry, &(ac->entry[batchcount]),
+ sizeof(void *) * ac->avail);
}
/*
- * Release an obj back to its cache. If the obj has a constructed state, it must
- * be in this state _before_ it is released. Called with disabled ints.
+ * __cache_free
+ * Release an obj back to its cache. If the obj has a constructed
+ * state, it must be in this state _before_ it is released.
+ *
+ * Called with disabled ints.
*/
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
- if (cache_free_alien(cachep, objp))
- return;
-
+ /* Make sure we are not freeing a object from another
+ * node to the array cache on this cpu.
+ */
+#ifdef CONFIG_NUMA
+ {
+ struct slab *slabp;
+ slabp = virt_to_slab(objp);
+ if (unlikely(slabp->nodeid != numa_node_id())) {
+ struct array_cache *alien = NULL;
+ int nodeid = slabp->nodeid;
+ struct kmem_list3 *l3 =
+ cachep->nodelists[numa_node_id()];
+
+ STATS_INC_NODEFREES(cachep);
+ if (l3->alien && l3->alien[nodeid]) {
+ alien = l3->alien[nodeid];
+ spin_lock(&alien->lock);
+ if (unlikely(alien->avail == alien->limit))
+ __drain_alien_cache(cachep,
+ alien, nodeid);
+ alien->entry[alien->avail++] = objp;
+ spin_unlock(&alien->lock);
+ } else {
+ spin_lock(&(cachep->nodelists[nodeid])->
+ list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&(cachep->nodelists[nodeid])->
+ list_lock);
+ }
+ return;
+ }
+ }
+#endif
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
}
EXPORT_SYMBOL(kmem_cache_alloc);
-/**
- * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
- * @cache: The cache to allocate from.
- * @flags: See kmalloc().
- *
- * Allocate an object from this cache and set the allocated memory to zero.
- * The flags are only relevant if the cache has no available objects.
- */
-void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
-{
- void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
- if (ret)
- memset(ret, 0, obj_size(cache));
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
/**
* kmem_ptr_validate - check if an untrusted pointer might
* be a slab entry.
if (unlikely(page_get_cache(page) != cachep))
goto out;
return 1;
-out:
+ out:
return 0;
}
local_irq_save(save_flags);
if (nodeid == -1 || nodeid == numa_node_id() ||
- !cachep->nodelists[nodeid])
+ !cachep->nodelists[nodeid])
ptr = ____cache_alloc(cachep, flags);
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
#endif
/**
- * __do_kmalloc - allocate memory
+ * kmalloc - allocate memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @caller: function caller for debug tracking of the caller
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
+ *
+ * Additionally, the %GFP_DMA flag may be set to indicate the memory
+ * must be suitable for DMA. This can mean different things on different
+ * platforms. For example, on i386, it means that the memory must come
+ * from the first 16MB.
*/
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
return __cache_alloc(cachep, flags, caller);
}
+#ifndef CONFIG_DEBUG_SLAB
void *__kmalloc(size_t size, gfp_t flags)
{
-#ifndef CONFIG_DEBUG_SLAB
return __do_kmalloc(size, flags, NULL);
-#else
- return __do_kmalloc(size, flags, __builtin_return_address(0));
-#endif
}
EXPORT_SYMBOL(__kmalloc);
-#ifdef CONFIG_DEBUG_SLAB
+#else
+
void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
{
return __do_kmalloc(size, flags, caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);
+
#endif
#ifdef CONFIG_SMP
* and we have no way of figuring out how to fix the array
* that we have allocated then....
*/
- for_each_possible_cpu(i) {
+ for_each_cpu(i) {
int node = cpu_to_node(i);
if (node_online(node))
/* Catch derefs w/o wrappers */
return (void *)(~(unsigned long)pdata);
-unwind_oom:
+ unwind_oom:
while (--i >= 0) {
if (!cpu_possible(i))
continue;
{
unsigned long flags;
- BUG_ON(virt_to_cache(objp) != cachep);
-
local_irq_save(flags);
__cache_free(cachep, objp);
local_irq_restore(flags);
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
- debug_check_no_locks_freed(objp, obj_size(c));
+ mutex_debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
/*
* We allocate for all cpus so we cannot use for online cpu here.
*/
- for_each_possible_cpu(i)
+ for_each_cpu(i)
kfree(p->ptrs[i]);
kfree(p);
}
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
- * This initializes kmem_list3 or resizes varioius caches for all nodes.
+ * This initializes kmem_list3 for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep)
{
int node;
struct kmem_list3 *l3;
- struct array_cache *new_shared;
- struct array_cache **new_alien;
+ int err = 0;
for_each_online_node(node) {
-
- new_alien = alloc_alien_cache(node, cachep->limit);
- if (!new_alien)
+ struct array_cache *nc = NULL, *new;
+ struct array_cache **new_alien = NULL;
+#ifdef CONFIG_NUMA
+ if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
goto fail;
-
- new_shared = alloc_arraycache(node,
- cachep->shared*cachep->batchcount,
- 0xbaadf00d);
- if (!new_shared) {
- free_alien_cache(new_alien);
+#endif
+ if (!(new = alloc_arraycache(node, (cachep->shared *
+ cachep->batchcount),
+ 0xbaadf00d)))
goto fail;
- }
-
- l3 = cachep->nodelists[node];
- if (l3) {
- struct array_cache *shared = l3->shared;
+ if ((l3 = cachep->nodelists[node])) {
spin_lock_irq(&l3->list_lock);
- if (shared)
- free_block(cachep, shared->entry,
- shared->avail, node);
+ if ((nc = cachep->nodelists[node]->shared))
+ free_block(cachep, nc->entry, nc->avail, node);
- l3->shared = new_shared;
- if (!l3->alien) {
+ l3->shared = new;
+ if (!cachep->nodelists[node]->alien) {
l3->alien = new_alien;
new_alien = NULL;
}
l3->free_limit = (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
+ cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock);
- kfree(shared);
+ kfree(nc);
free_alien_cache(new_alien);
continue;
}
- l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
- if (!l3) {
- free_alien_cache(new_alien);
- kfree(new_shared);
+ if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
+ GFP_KERNEL, node)))
goto fail;
- }
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
- l3->shared = new_shared;
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ l3->shared = new;
l3->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
+ cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3;
}
- return 0;
-
-fail:
- if (!cachep->next.next) {
- /* Cache is not active yet. Roll back what we did */
- node--;
- while (node >= 0) {
- if (cachep->nodelists[node]) {
- l3 = cachep->nodelists[node];
-
- kfree(l3->shared);
- free_alien_cache(l3->alien);
- kfree(l3);
- cachep->nodelists[node] = NULL;
- }
- node--;
- }
- }
- return -ENOMEM;
+ return err;
+ fail:
+ err = -ENOMEM;
+ return err;
}
struct ccupdate_struct {
static void do_ccupdate_local(void *info)
{
- struct ccupdate_struct *new = info;
+ struct ccupdate_struct *new = (struct ccupdate_struct *)info;
struct array_cache *old;
check_irq_off();
new->new[smp_processor_id()] = old;
}
-/* Always called with the cache_chain_mutex held */
-static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
- int batchcount, int shared)
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
+ int shared)
{
struct ccupdate_struct new;
int i, err;
memset(&new.new, 0, sizeof(new.new));
for_each_online_cpu(i) {
- new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
- batchcount);
+ new.new[i] =
+ alloc_arraycache(cpu_to_node(i), limit, batchcount);
if (!new.new[i]) {
for (i--; i >= 0; i--)
kfree(new.new[i]);
}
new.cachep = cachep;
- on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
+ smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
check_irq_on();
+ spin_lock(&cachep->spinlock);
cachep->batchcount = batchcount;
cachep->limit = limit;
cachep->shared = shared;
+ spin_unlock(&cachep->spinlock);
for_each_online_cpu(i) {
struct array_cache *ccold = new.new[i];
return 0;
}
-/* Called with cache_chain_mutex held always */
static void enable_cpucache(struct kmem_cache *cachep)
{
int err;
int limit, shared;
- /*
- * The head array serves three purposes:
+ /* The head array serves three purposes:
* - create a LIFO ordering, i.e. return objects that are cache-warm
* - reduce the number of spinlock operations.
- * - reduce the number of linked list operations on the slab and
+ * - reduce the number of linked list operations on the slab and
* bufctl chains: array operations are cheaper.
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
else
limit = 120;
- /*
- * CPU bound tasks (e.g. network routing) can exhibit cpu bound
+ /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
* allocation behaviour: Most allocs on one cpu, most free operations
* on another cpu. For these cases, an efficient object passing between
* cpus is necessary. This is provided by a shared array. The array
#endif
#if DEBUG
- /*
- * With debugging enabled, large batchcount lead to excessively long
- * periods with disabled local interrupts. Limit the batchcount
+ /* With debugging enabled, large batchcount lead to excessively
+ * long periods with disabled local interrupts. Limit the
+ * batchcount
*/
if (limit > 32)
limit = 32;
cachep->name, -err);
}
-/*
- * Drain an array if it contains any elements taking the l3 lock only if
- * necessary. Note that the l3 listlock also protects the array_cache
- * if drain_array() is used on the shared array.
- */
-void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
- struct array_cache *ac, int force, int node)
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
+ int force, int node)
{
int tofree;
- if (!ac || !ac->avail)
- return;
+ check_spinlock_acquired_node(cachep, node);
if (ac->touched && !force) {
ac->touched = 0;
- } else {
- spin_lock_irq(&l3->list_lock);
- if (ac->avail) {
- tofree = force ? ac->avail : (ac->limit + 4) / 5;
- if (tofree > ac->avail)
- tofree = (ac->avail + 1) / 2;
- free_block(cachep, ac->entry, tofree, node);
- ac->avail -= tofree;
- memmove(ac->entry, &(ac->entry[tofree]),
- sizeof(void *) * ac->avail);
+ } else if (ac->avail) {
+ tofree = force ? ac->avail : (ac->limit + 4) / 5;
+ if (tofree > ac->avail) {
+ tofree = (ac->avail + 1) / 2;
}
- spin_unlock_irq(&l3->list_lock);
+ free_block(cachep, ac->entry, tofree, node);
+ ac->avail -= tofree;
+ memmove(ac->entry, &(ac->entry[tofree]),
+ sizeof(void *) * ac->avail);
}
}
* - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool.
*
- * If we cannot acquire the cache chain mutex then just give up - we'll try
- * again on the next iteration.
+ * If we cannot acquire the cache chain mutex then just give up - we'll
+ * try again on the next iteration.
*/
static void cache_reap(void *unused)
{
- struct kmem_cache *searchp;
+ struct list_head *walk;
struct kmem_list3 *l3;
- int node = numa_node_id();
if (!mutex_trylock(&cache_chain_mutex)) {
/* Give up. Setup the next iteration. */
return;
}
- list_for_each_entry(searchp, &cache_chain, next) {
- check_irq_on();
+ list_for_each(walk, &cache_chain) {
+ struct kmem_cache *searchp;
+ struct list_head *p;
+ int tofree;
+ struct slab *slabp;
- /*
- * We only take the l3 lock if absolutely necessary and we
- * have established with reasonable certainty that
- * we can do some work if the lock was obtained.
- */
- l3 = searchp->nodelists[node];
+ searchp = list_entry(walk, struct kmem_cache, next);
+ if (searchp->flags & SLAB_NO_REAP)
+ goto next;
+
+ check_irq_on();
+
+ l3 = searchp->nodelists[numa_node_id()];
reap_alien(searchp, l3);
+ spin_lock_irq(&l3->list_lock);
- drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
+ drain_array_locked(searchp, cpu_cache_get(searchp), 0,
+ numa_node_id());
- /*
- * These are racy checks but it does not matter
- * if we skip one check or scan twice.
- */
if (time_after(l3->next_reap, jiffies))
- goto next;
+ goto next_unlock;
l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
- drain_array(searchp, l3, l3->shared, 0, node);
+ if (l3->shared)
+ drain_array_locked(searchp, l3->shared, 0,
+ numa_node_id());
- if (l3->free_touched)
+ if (l3->free_touched) {
l3->free_touched = 0;
- else {
- int freed;
-
- freed = drain_freelist(searchp, l3, (l3->free_limit +
- 5 * searchp->num - 1) / (5 * searchp->num));
- STATS_ADD_REAPED(searchp, freed);
+ goto next_unlock;
}
-next:
+
+ tofree =
+ (l3->free_limit + 5 * searchp->num -
+ 1) / (5 * searchp->num);
+ do {
+ p = l3->slabs_free.next;
+ if (p == &(l3->slabs_free))
+ break;
+
+ slabp = list_entry(p, struct slab, list);
+ BUG_ON(slabp->inuse);
+ list_del(&slabp->list);
+ STATS_INC_REAPED(searchp);
+
+ /* Safe to drop the lock. The slab is no longer
+ * linked to the cache.
+ * searchp cannot disappear, we hold
+ * cache_chain_lock
+ */
+ l3->free_objects -= searchp->num;
+ spin_unlock_irq(&l3->list_lock);
+ slab_destroy(searchp, slabp);
+ spin_lock_irq(&l3->list_lock);
+ } while (--tofree > 0);
+ next_unlock:
+ spin_unlock_irq(&l3->list_lock);
+ next:
cond_resched();
}
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
- refresh_cpu_vm_stats(smp_processor_id());
- /* Set up the next iteration */
+ /* Setup the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
}
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
- "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+ "<error> <maxfreeable> <nodeallocs> <remotefrees>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
seq_putc(m, '\n');
{
struct kmem_cache *cachep = p;
++*pos;
- return cachep->next.next == &cache_chain ?
- NULL : list_entry(cachep->next.next, struct kmem_cache, next);
+ return cachep->next.next == &cache_chain ? NULL
+ : list_entry(cachep->next.next, struct kmem_cache, next);
}
static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
+ struct list_head *q;
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
int node;
struct kmem_list3 *l3;
+ spin_lock(&cachep->spinlock);
active_objs = 0;
num_slabs = 0;
for_each_online_node(node) {
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each_entry(slabp, &l3->slabs_full, list) {
+ list_for_each(q, &l3->slabs_full) {
+ slabp = list_entry(q, struct slab, list);
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &l3->slabs_partial, list) {
+ list_for_each(q, &l3->slabs_partial) {
+ slabp = list_entry(q, struct slab, list);
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error)
active_objs += slabp->inuse;
active_slabs++;
}
- list_for_each_entry(slabp, &l3->slabs_free, list) {
+ list_for_each(q, &l3->slabs_free) {
+ slabp = list_entry(q, struct slab, list);
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
num_slabs++;
unsigned long max_freeable = cachep->max_freeable;
unsigned long node_allocs = cachep->node_allocs;
unsigned long node_frees = cachep->node_frees;
- unsigned long overflows = cachep->node_overflow;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
- %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
- reaped, errors, max_freeable, node_allocs,
- node_frees, overflows);
+ %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
}
/* cpu stats */
{
}
#endif
seq_putc(m, '\n');
+ spin_unlock(&cachep->spinlock);
return 0;
}
{
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
- struct kmem_cache *cachep;
+ struct list_head *p;
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each(p, &cache_chain) {
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+ next);
+
if (!strcmp(cachep->name, kbuf)) {
- if (limit < 1 || batchcount < 1 ||
- batchcount > limit || shared < 0) {
+ if (limit < 1 ||
+ batchcount < 1 ||
+ batchcount > limit || shared < 0) {
res = 0;
} else {
res = do_tune_cpucache(cachep, limit,
res = count;
return res;
}
-
-#ifdef CONFIG_DEBUG_SLAB_LEAK
-
-static void *leaks_start(struct seq_file *m, loff_t *pos)
-{
- loff_t n = *pos;
- struct list_head *p;
-
- mutex_lock(&cache_chain_mutex);
- p = cache_chain.next;
- while (n--) {
- p = p->next;
- if (p == &cache_chain)
- return NULL;
- }
- return list_entry(p, struct kmem_cache, next);
-}
-
-static inline int add_caller(unsigned long *n, unsigned long v)
-{
- unsigned long *p;
- int l;
- if (!v)
- return 1;
- l = n[1];
- p = n + 2;
- while (l) {
- int i = l/2;
- unsigned long *q = p + 2 * i;
- if (*q == v) {
- q[1]++;
- return 1;
- }
- if (*q > v) {
- l = i;
- } else {
- p = q + 2;
- l -= i + 1;
- }
- }
- if (++n[1] == n[0])
- return 0;
- memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
- p[0] = v;
- p[1] = 1;
- return 1;
-}
-
-static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
-{
- void *p;
- int i;
- if (n[0] == n[1])
- return;
- for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
- if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
- continue;
- if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
- return;
- }
-}
-
-static void show_symbol(struct seq_file *m, unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
- char *modname;
- const char *name;
- unsigned long offset, size;
- char namebuf[KSYM_NAME_LEN+1];
-
- name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
-
- if (name) {
- seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
- if (modname)
- seq_printf(m, " [%s]", modname);
- return;
- }
-#endif
- seq_printf(m, "%p", (void *)address);
-}
-
-static int leaks_show(struct seq_file *m, void *p)
-{
- struct kmem_cache *cachep = p;
- struct slab *slabp;
- struct kmem_list3 *l3;
- const char *name;
- unsigned long *n = m->private;
- int node;
- int i;
-
- if (!(cachep->flags & SLAB_STORE_USER))
- return 0;
- if (!(cachep->flags & SLAB_RED_ZONE))
- return 0;
-
- /* OK, we can do it */
-
- n[1] = 0;
-
- for_each_online_node(node) {
- l3 = cachep->nodelists[node];
- if (!l3)
- continue;
-
- check_irq_on();
- spin_lock_irq(&l3->list_lock);
-
- list_for_each_entry(slabp, &l3->slabs_full, list)
- handle_slab(n, cachep, slabp);
- list_for_each_entry(slabp, &l3->slabs_partial, list)
- handle_slab(n, cachep, slabp);
- spin_unlock_irq(&l3->list_lock);
- }
- name = cachep->name;
- if (n[0] == n[1]) {
- /* Increase the buffer size */
- mutex_unlock(&cache_chain_mutex);
- m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
- if (!m->private) {
- /* Too bad, we are really out */
- m->private = n;
- mutex_lock(&cache_chain_mutex);
- return -ENOMEM;
- }
- *(unsigned long *)m->private = n[0] * 2;
- kfree(n);
- mutex_lock(&cache_chain_mutex);
- /* Now make sure this entry will be retried */
- m->count = m->size;
- return 0;
- }
- for (i = 0; i < n[1]; i++) {
- seq_printf(m, "%s: %lu ", name, n[2*i+3]);
- show_symbol(m, n[2*i+2]);
- seq_putc(m, '\n');
- }
- return 0;
-}
-
-struct seq_operations slabstats_op = {
- .start = leaks_start,
- .next = s_next,
- .stop = s_stop,
- .show = leaks_show,
-};
-#endif
#endif
/**