* All object allocations for a node occur from node specific slab lists.
*/
+#include <linux/config.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/poison.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/nodemask.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
-#include <linux/rtmutex.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#define SIZE_AC 1
#define SIZE_L3 (1 + MAX_NUMNODES)
-static int drain_freelist(struct kmem_cache *cache,
- struct kmem_list3 *l3, int tofree);
-static void free_block(struct kmem_cache *cachep, void **objpp, int len,
- int node);
-static void enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
-
/*
* This function must be completely optimized away if a constant is passed to
* it. Mostly the same as what is in linux/slab.h except it returns an index.
return 0;
}
-static int slab_early_init = 1;
-
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
-#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
+#define STATS_INC_REAPED(x) ((x)->reaped++)
#define STATS_SET_HIGH(x) \
do { \
if ((x)->num_active > (x)->high_mark) \
#define STATS_DEC_ACTIVE(x) do { } while (0)
#define STATS_INC_ALLOCED(x) do { } while (0)
#define STATS_INC_GROWN(x) do { } while (0)
-#define STATS_ADD_REAPED(x,y) do { } while (0)
+#define STATS_INC_REAPED(x) do { } while (0)
#define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#endif
#if DEBUG
+/*
+ * Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
+#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
+
+/* ...and for poisoning */
+#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
+#define POISON_FREE 0x6b /* for use-after-free poisoning */
+#define POISON_END 0xa5 /* end-byte of poisoning */
/*
* memory layout of objects:
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
- BUG_ON(!PageSlab(page));
return (struct kmem_cache *)page->lru.next;
}
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
- BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
#endif
};
-#define BAD_ALIEN_MAGIC 0x01020304ul
-
-#ifdef CONFIG_LOCKDEP
-
-/*
- * Slab sometimes uses the kmalloc slabs to store the slab headers
- * for other slabs "off slab".
- * The locking for this is tricky in that it nests within the locks
- * of all other slabs in a few places; to deal with this special
- * locking we put on-slab caches into a separate lock-class.
- *
- * We set lock class for alien array caches which are up during init.
- * The lock annotation will be lost if all cpus of a node goes down and
- * then comes back up during hotplug
- */
-static struct lock_class_key on_slab_l3_key;
-static struct lock_class_key on_slab_alc_key;
-
-static inline void init_lock_keys(void)
-
-{
- int q;
- struct cache_sizes *s = malloc_sizes;
-
- while (s->cs_size != ULONG_MAX) {
- for_each_node(q) {
- struct array_cache **alc;
- int r;
- struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
- continue;
- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
- alc = l3->alien;
- /*
- * FIXME: This check for BAD_ALIEN_MAGIC
- * should go away when common slab code is taught to
- * work even without alien caches.
- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
- * for alloc_alien_cache,
- */
- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
- continue;
- for_each_node(r) {
- if (alc[r])
- lockdep_set_class(&alc[r]->lock,
- &on_slab_alc_key);
- }
- }
- s++;
- }
-}
-#else
-static inline void init_lock_keys(void)
-{
-}
-#endif
-
/* Guard access to the cache-chain. */
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
static DEFINE_PER_CPU(struct work_struct, reap_work);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len,
+ int node);
+static void enable_cpucache(struct kmem_cache *cachep);
+static void cache_reap(void *unused);
+static int __node_shrink(struct kmem_cache *cachep, int node);
+
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
}
}
-
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
-{
- struct slab *slabp = virt_to_slab(objp);
- int nodeid = slabp->nodeid;
- struct kmem_list3 *l3;
- struct array_cache *alien = NULL;
-
- /*
- * Make sure we are not freeing a object from another node to the array
- * cache on this cpu.
- */
- if (likely(slabp->nodeid == numa_node_id()))
- return 0;
-
- l3 = cachep->nodelists[numa_node_id()];
- STATS_INC_NODEFREES(cachep);
- if (l3->alien && l3->alien[nodeid]) {
- alien = l3->alien[nodeid];
- spin_lock(&alien->lock);
- if (unlikely(alien->avail == alien->limit)) {
- STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep, alien, nodeid);
- }
- alien->entry[alien->avail++] = objp;
- spin_unlock(&alien->lock);
- } else {
- spin_lock(&(cachep->nodelists[nodeid])->list_lock);
- free_block(cachep, &objp, 1, nodeid);
- spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
- }
- return 1;
-}
-
#else
#define drain_alien_cache(cachep, alien) do { } while (0)
static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
- return (struct array_cache **)BAD_ALIEN_MAGIC;
+ return (struct array_cache **) 0x01020304ul;
}
static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
-{
- return 0;
-}
-
#endif
-static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
l3 = cachep->nodelists[node];
if (!l3)
continue;
- drain_freelist(cachep, l3, l3->free_objects);
+ spin_lock_irq(&l3->list_lock);
+ /* free slabs belonging to this node */
+ __node_shrink(cachep, node);
+ spin_unlock_irq(&l3->list_lock);
}
mutex_unlock(&cache_chain_mutex);
break;
return NOTIFY_BAD;
}
-static struct notifier_block __cpuinitdata cpucache_notifier = {
- &cpuup_callback, NULL, 0
-};
+static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
local_irq_disable();
memcpy(ptr, list, sizeof(struct kmem_list3));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->list_lock);
-
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->nodelists[nodeid] = ptr;
local_irq_enable();
NULL, NULL);
}
- slab_early_init = 0;
-
while (sizes->cs_size != ULONG_MAX) {
/*
* For performance, all the general caches are L1 aligned.
}
/* 4) Replace the bootstrap head arrays */
{
- struct array_cache *ptr;
+ void *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->lock);
-
cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable();
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->lock);
-
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
local_irq_enable();
mutex_unlock(&cache_chain_mutex);
}
- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
-
-
/* Done! */
g_cpucache_up = FULL;
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
- int nr_pages;
+ void *addr;
int i;
+ flags |= cachep->gfpflags;
#ifndef CONFIG_MMU
- /*
- * Nommu uses slab's for process anonymous memory allocations, and thus
- * requires __GFP_COMP to properly refcount higher order allocations
+ /* nommu uses slab's for process anonymous memory allocations, so
+ * requires __GFP_COMP to properly refcount higher order allocations"
*/
- flags |= __GFP_COMP;
-#endif
- flags |= cachep->gfpflags;
-
+ page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder);
+#else
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+#endif
if (!page)
return NULL;
+ addr = page_address(page);
- nr_pages = (1 << cachep->gfporder);
+ i = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- atomic_add(nr_pages, &slab_reclaim_pages);
- add_zone_page_state(page_zone(page), NR_SLAB, nr_pages);
- for (i = 0; i < nr_pages; i++)
- __SetPageSlab(page + i);
- return page_address(page);
+ atomic_add(i, &slab_reclaim_pages);
+ add_page_state(nr_slab, i);
+ while (i--) {
+ __SetPageSlab(page);
+ page++;
+ }
+ return addr;
}
/*
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
- sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed);
while (i--) {
BUG_ON(!PageSlab(page));
__ClearPageSlab(page);
page++;
}
+ sub_page_state(nr_slab, nr_freed);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder);
static void dump_line(char *data, int offset, int limit)
{
int i;
- unsigned char total = 0, bad_count = 0, errors;
+ unsigned char total=0, bad_count=0;
printk(KERN_ERR "%03x:", offset);
for (i = 0; i < limit; i++) {
- if (data[offset + i] != POISON_FREE) {
- total += data[offset + i];
- bad_count++;
+ if (data[offset+i] != POISON_FREE) {
+ total += data[offset+i];
+ ++bad_count;
}
printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
-
if (bad_count == 1) {
- errors = total ^ POISON_FREE;
- if (errors && !(errors & (errors-1))) {
- printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
+ switch (total) {
+ case POISON_FREE ^ 0x01:
+ case POISON_FREE ^ 0x02:
+ case POISON_FREE ^ 0x04:
+ case POISON_FREE ^ 0x08:
+ case POISON_FREE ^ 0x10:
+ case POISON_FREE ^ 0x20:
+ case POISON_FREE ^ 0x40:
+ case POISON_FREE ^ 0x80:
+ printk (KERN_ERR "Single bit error detected. Possibly bad RAM.\n");
#ifdef CONFIG_X86
- printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
-#else
- printk(KERN_ERR "Run a memory test tool.\n");
+ printk (KERN_ERR "Run memtest86 or other memory test tool.\n");
#endif
+ return;
}
}
}
void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL, *pc;
+ struct kmem_cache *cachep = NULL;
+ struct list_head *p;
/*
* Sanity checks... these are all serious usage bugs.
mutex_lock(&cache_chain_mutex);
- list_for_each_entry(pc, &cache_chain, next) {
+ list_for_each(p, &cache_chain) {
+ struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
mm_segment_t old_fs = get_fs();
char tmp;
int res;
#endif
#endif
- /*
- * Determine if the slab management is 'on' or 'off' slab.
- * (bootstrapping cannot cope with offslab caches so don't do
- * it too early on.)
- */
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
+ /* Determine if the slab management is 'on' or 'off' slab. */
+ if (size >= (PAGE_SIZE >> 3))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
}
}
-/*
- * Remove slabs from the list of free slabs.
- * Specify the number of slabs to drain in tofree.
- *
- * Returns the actual number of slabs released.
- */
-static int drain_freelist(struct kmem_cache *cache,
- struct kmem_list3 *l3, int tofree)
+static int __node_shrink(struct kmem_cache *cachep, int node)
{
- struct list_head *p;
- int nr_freed;
struct slab *slabp;
+ struct kmem_list3 *l3 = cachep->nodelists[node];
+ int ret;
- nr_freed = 0;
- while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
+ for (;;) {
+ struct list_head *p;
- spin_lock_irq(&l3->list_lock);
p = l3->slabs_free.prev;
- if (p == &l3->slabs_free) {
- spin_unlock_irq(&l3->list_lock);
- goto out;
- }
+ if (p == &l3->slabs_free)
+ break;
- slabp = list_entry(p, struct slab, list);
+ slabp = list_entry(l3->slabs_free.prev, struct slab, list);
#if DEBUG
BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
- /*
- * Safe to drop the lock. The slab is no longer linked
- * to the cache.
- */
- l3->free_objects -= cache->num;
+
+ l3->free_objects -= cachep->num;
spin_unlock_irq(&l3->list_lock);
- slab_destroy(cache, slabp);
- nr_freed++;
+ slab_destroy(cachep, slabp);
+ spin_lock_irq(&l3->list_lock);
}
-out:
- return nr_freed;
+ ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
+ return ret;
}
static int __cache_shrink(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(i) {
l3 = cachep->nodelists[i];
- if (!l3)
- continue;
-
- drain_freelist(cachep, l3, l3->free_objects);
-
- ret += !list_empty(&l3->slabs_full) ||
- !list_empty(&l3->slabs_partial);
+ if (l3) {
+ spin_lock_irq(&l3->list_lock);
+ ret += __node_shrink(cachep, i);
+ spin_unlock_irq(&l3->list_lock);
+ }
}
return (ret ? 1 : 0);
}
slabp->inuse--;
}
-/*
- * Map pages beginning at addr to the given cache and slab. This is required
- * for the slab allocator to be able to lookup the cache and slab of a
- * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
- */
-static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
- void *addr)
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
+ void *objp)
{
- int nr_pages;
+ int i;
struct page *page;
- page = virt_to_page(addr);
+ /* Nasty!!!!!! I hope this is OK. */
+ page = virt_to_page(objp);
- nr_pages = 1;
+ i = 1;
if (likely(!PageCompound(page)))
- nr_pages <<= cache->gfporder;
-
+ i <<= cachep->gfporder;
do {
- page_set_cache(page, cache);
- page_set_slab(page, slab);
+ page_set_cache(page, cachep);
+ page_set_slab(page, slabp);
page++;
- } while (--nr_pages);
+ } while (--i);
}
/*
goto opps1;
slabp->nodeid = nodeid;
- slab_map_pages(cachep, slabp, objp);
+ set_slab_attr(cachep, slabp, objp);
cache_init_objs(cachep, slabp, ctor_flags);
}
}
-static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
-{
- unsigned long redzone1, redzone2;
-
- redzone1 = *dbg_redzone1(cache, obj);
- redzone2 = *dbg_redzone2(cache, obj);
-
- /*
- * Redzone is ok.
- */
- if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
- return;
-
- if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
- slab_error(cache, "double free detected");
- else
- slab_error(cache, "memory outside object was overwritten");
-
- printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
- obj, redzone1, redzone2);
-}
-
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
kfree_debugcheck(objp);
page = virt_to_page(objp);
+ if (page_get_cache(page) != cachep) {
+ printk(KERN_ERR "mismatch in kmem_cache_free: expected "
+ "cache %p, got %p\n",
+ page_get_cache(page), cachep);
+ printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
+ printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
+ page_get_cache(page)->name);
+ WARN_ON(1);
+ }
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
- verify_redzone_free(cachep, objp);
+ if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
+ *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
+ slab_error(cachep, "double free, or memory outside"
+ " object was overwritten");
+ printk(KERN_ERR "%p: redzone 1:0x%lx, "
+ "redzone 2:0x%lx.\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
+ }
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
- if (cache_free_alien(cachep, objp))
- return;
-
+ /* Make sure we are not freeing a object from another
+ * node to the array cache on this cpu.
+ */
+#ifdef CONFIG_NUMA
+ {
+ struct slab *slabp;
+ slabp = virt_to_slab(objp);
+ if (unlikely(slabp->nodeid != numa_node_id())) {
+ struct array_cache *alien = NULL;
+ int nodeid = slabp->nodeid;
+ struct kmem_list3 *l3;
+
+ l3 = cachep->nodelists[numa_node_id()];
+ STATS_INC_NODEFREES(cachep);
+ if (l3->alien && l3->alien[nodeid]) {
+ alien = l3->alien[nodeid];
+ spin_lock(&alien->lock);
+ if (unlikely(alien->avail == alien->limit)) {
+ STATS_INC_ACOVERFLOW(cachep);
+ __drain_alien_cache(cachep,
+ alien, nodeid);
+ }
+ alien->entry[alien->avail++] = objp;
+ spin_unlock(&alien->lock);
+ } else {
+ spin_lock(&(cachep->nodelists[nodeid])->
+ list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&(cachep->nodelists[nodeid])->
+ list_lock);
+ }
+ return;
+ }
+ }
+#endif
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
EXPORT_SYMBOL(kmem_cache_alloc);
/**
- * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
+ * kmem_cache_alloc - Allocate an object. The memory is set to zero.
* @cache: The cache to allocate from.
* @flags: See kmalloc().
*
#endif
/**
- * __do_kmalloc - allocate memory
+ * kmalloc - allocate memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
+ * @flags: the type of memory to allocate.
* @caller: function caller for debug tracking of the caller
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
+ *
+ * Additionally, the %GFP_DMA flag may be set to indicate the memory
+ * must be suitable for DMA. This can mean different things on different
+ * platforms. For example, on i386, it means that the memory must come
+ * from the first 16MB.
*/
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
{
unsigned long flags;
- BUG_ON(virt_to_cache(objp) != cachep);
-
local_irq_save(flags);
__cache_free(cachep, objp);
local_irq_restore(flags);
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
- debug_check_no_locks_freed(objp, obj_size(c));
+ mutex_debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
*/
static void cache_reap(void *unused)
{
- struct kmem_cache *searchp;
+ struct list_head *walk;
struct kmem_list3 *l3;
int node = numa_node_id();
return;
}
- list_for_each_entry(searchp, &cache_chain, next) {
+ list_for_each(walk, &cache_chain) {
+ struct kmem_cache *searchp;
+ struct list_head *p;
+ int tofree;
+ struct slab *slabp;
+
+ searchp = list_entry(walk, struct kmem_cache, next);
check_irq_on();
/*
drain_array(searchp, l3, l3->shared, 0, node);
- if (l3->free_touched)
+ if (l3->free_touched) {
l3->free_touched = 0;
- else {
- int freed;
-
- freed = drain_freelist(searchp, l3, (l3->free_limit +
- 5 * searchp->num - 1) / (5 * searchp->num));
- STATS_ADD_REAPED(searchp, freed);
+ goto next;
}
+
+ tofree = (l3->free_limit + 5 * searchp->num - 1) /
+ (5 * searchp->num);
+ do {
+ /*
+ * Do not lock if there are no free blocks.
+ */
+ if (list_empty(&l3->slabs_free))
+ break;
+
+ spin_lock_irq(&l3->list_lock);
+ p = l3->slabs_free.next;
+ if (p == &(l3->slabs_free)) {
+ spin_unlock_irq(&l3->list_lock);
+ break;
+ }
+
+ slabp = list_entry(p, struct slab, list);
+ BUG_ON(slabp->inuse);
+ list_del(&slabp->list);
+ STATS_INC_REAPED(searchp);
+
+ /*
+ * Safe to drop the lock. The slab is no longer linked
+ * to the cache. searchp cannot disappear, we hold
+ * cache_chain_lock
+ */
+ l3->free_objects -= searchp->num;
+ spin_unlock_irq(&l3->list_lock);
+ slab_destroy(searchp, slabp);
+ } while (--tofree > 0);
next:
cond_resched();
}
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
- refresh_cpu_vm_stats(smp_processor_id());
/* Set up the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
}
static int s_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
+ struct list_head *q;
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each_entry(slabp, &l3->slabs_full, list) {
+ list_for_each(q, &l3->slabs_full) {
+ slabp = list_entry(q, struct slab, list);
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &l3->slabs_partial, list) {
+ list_for_each(q, &l3->slabs_partial) {
+ slabp = list_entry(q, struct slab, list);
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error)
active_objs += slabp->inuse;
active_slabs++;
}
- list_for_each_entry(slabp, &l3->slabs_free, list) {
+ list_for_each(q, &l3->slabs_free) {
+ slabp = list_entry(q, struct slab, list);
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
num_slabs++;
{
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
- struct kmem_cache *cachep;
+ struct list_head *p;
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each(p, &cache_chain) {
+ struct kmem_cache *cachep;
+
+ cachep = list_entry(p, struct kmem_cache, next);
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
+ struct list_head *q;
struct slab *slabp;
struct kmem_list3 *l3;
const char *name;
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each_entry(slabp, &l3->slabs_full, list)
+ list_for_each(q, &l3->slabs_full) {
+ slabp = list_entry(q, struct slab, list);
handle_slab(n, cachep, slabp);
- list_for_each_entry(slabp, &l3->slabs_partial, list)
+ }
+ list_for_each(q, &l3->slabs_partial) {
+ slabp = list_entry(q, struct slab, list);
handle_slab(n, cachep, slabp);
+ }
spin_unlock_irq(&l3->list_lock);
}
name = cachep->name;