X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fslab.c;h=840742641152b91052d5688553f761aaff829be2;hb=37b9e453a7750f55f473d51c91d184b703c2b883;hp=3b00d4499b3e50f633acf350dac1ea91e5f64d3b;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/mm/slab.c b/mm/slab.c index 3b00d4499..840742641 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -128,9 +128,28 @@ #endif #ifndef ARCH_KMALLOC_MINALIGN +/* + * Enforce a minimum alignment for the kmalloc caches. + * Usually, the kmalloc caches are cache_line_size() aligned, except when + * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. + * Note that this flag disables some debug features. + */ #define ARCH_KMALLOC_MINALIGN 0 #endif +#ifndef ARCH_SLAB_MINALIGN +/* + * Enforce a minimum alignment for all caches. + * Intended for archs that get misalignment faults even for BYTES_PER_WORD + * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. + * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables + * some debug features. + */ +#define ARCH_SLAB_MINALIGN 0 +#endif + #ifndef ARCH_KMALLOC_FLAGS #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #endif @@ -327,6 +346,7 @@ struct kmem_cache_s { unsigned long reaped; unsigned long errors; unsigned long max_freeable; + unsigned long node_allocs; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; @@ -361,6 +381,7 @@ struct kmem_cache_s { (x)->high_mark = (x)->num_active; \ } while (0) #define STATS_INC_ERR(x) ((x)->errors++) +#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_SET_FREEABLE(x, i) \ do { if ((x)->max_freeable < i) \ (x)->max_freeable = i; \ @@ -378,6 +399,7 @@ struct kmem_cache_s { #define STATS_INC_REAPED(x) do { } while (0) #define STATS_SET_HIGH(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0) +#define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) \ do { } while (0) @@ -485,10 +507,9 @@ static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; struct cache_sizes malloc_sizes[] = { #define CACHE(x) { .cs_size = (x) }, #include - { 0, } + CACHE(ULONG_MAX) #undef CACHE }; - EXPORT_SYMBOL(malloc_sizes); /* Must match cache_sizes above. Out of line to keep cache footprint low. */ @@ -506,7 +527,7 @@ static struct cache_names __initdata cache_names[] = { static struct arraycache_init initarray_cache __initdata = { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; -static struct arraycache_init initarray_generic __initdata = +static struct arraycache_init initarray_generic = { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ @@ -552,7 +573,7 @@ static void free_block(kmem_cache_t* cachep, void** objpp, int len); static void enable_cpucache (kmem_cache_t *cachep); static void cache_reap (void *unused); -static inline void ** ac_entry(struct array_cache *ac) +static inline void **ac_entry(struct array_cache *ac) { return (void**)(ac+1); } @@ -562,24 +583,38 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) return cachep->array[smp_processor_id()]; } -static kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags) +static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags) { struct cache_sizes *csizep = malloc_sizes; - /* This function could be moved to the header file, and - * made inline so consumers can quickly determine what - * cache pointer they require. +#if DEBUG + /* This happens if someone tries to call + * kmem_cache_create(), or __kmalloc(), before + * the generic caches are initialized. + */ + BUG_ON(csizep->cs_cachep == NULL); +#endif + while (size > csizep->cs_size) + csizep++; + + /* + * Really subtile: The last entry with cs->cs_size==ULONG_MAX + * has cs_{dma,}cachep==NULL. Thus no special case + * for large kmalloc calls required. */ - for ( ; csizep->cs_size; csizep++) { - if (size > csizep->cs_size) - continue; - break; - } - return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep; + if (unlikely(gfpflags & GFP_DMA)) + return csizep->cs_dmacachep; + return csizep->cs_cachep; +} + +kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) +{ + return __find_general_cachep(size, gfpflags); } +EXPORT_SYMBOL(kmem_find_general_cachep); /* Cal the num objs, wastage, and bytes left over for a given slab size. */ -static void cache_estimate (unsigned long gfporder, size_t size, size_t align, +static void cache_estimate(unsigned long gfporder, size_t size, size_t align, int flags, size_t *left_over, unsigned int *num) { int i; @@ -637,17 +672,17 @@ static void __devinit start_cpu_timer(int cpu) } } -static struct array_cache *alloc_arraycache(int cpu, int entries, int batchcount) +static struct array_cache *alloc_arraycache(int cpu, int entries, + int batchcount) { int memsize = sizeof(void*)*entries+sizeof(struct array_cache); struct array_cache *nc = NULL; - if (cpu != -1) { - nc = kmem_cache_alloc_node(kmem_find_general_cachep(memsize, - GFP_KERNEL), cpu_to_node(cpu)); - } - if (!nc) + if (cpu == -1) nc = kmalloc(memsize, GFP_KERNEL); + else + nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu)); + if (nc) { nc->avail = 0; nc->limit = entries; @@ -658,8 +693,7 @@ static struct array_cache *alloc_arraycache(int cpu, int entries, int batchcount } static int __devinit cpuup_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) + unsigned long action, void *hcpu) { long cpu = (long)hcpu; kmem_cache_t* cachep; @@ -772,7 +806,7 @@ void __init kmem_cache_init(void) sizes = malloc_sizes; names = cache_names; - while (sizes->cs_size) { + while (sizes->cs_size != ULONG_MAX) { /* For performance, all the general caches are L1 aligned. * This should be particularly beneficial on SMP boxes, as it * eliminates "false sharing". @@ -864,7 +898,7 @@ __initcall(cpucache_init); * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ -static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid) +static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) { struct page *page; void *addr; @@ -872,16 +906,13 @@ static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid) flags |= cachep->gfpflags; if (likely(nodeid == -1)) { - addr = (void*)__get_free_pages(flags, cachep->gfporder); - if (!addr) - return NULL; - page = virt_to_page(addr); + page = alloc_pages(flags, cachep->gfporder); } else { page = alloc_pages_node(nodeid, flags, cachep->gfporder); - if (!page) - return NULL; - addr = page_address(page); } + if (!page) + return NULL; + addr = page_address(page); i = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) @@ -929,7 +960,8 @@ static void kmem_rcu_free(struct rcu_head *head) #if DEBUG #ifdef CONFIG_DEBUG_PAGEALLOC -static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, unsigned long caller) +static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, + unsigned long caller) { int size = obj_reallen(cachep); @@ -1169,7 +1201,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), void (*dtor)(void*, kmem_cache_t *, unsigned long)) { - size_t left_over, slab_size; + size_t left_over, slab_size, ralign; kmem_cache_t *cachep = NULL; /* @@ -1219,24 +1251,44 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & ~CREATE_MASK) BUG(); - if (align) { - /* combinations of forced alignment and advanced debugging is - * not yet implemented. + /* Check that size is in terms of words. This is needed to avoid + * unaligned accesses for some archs when redzoning is used, and makes + * sure any on-slab bufctl's are also correctly aligned. + */ + if (size & (BYTES_PER_WORD-1)) { + size += (BYTES_PER_WORD-1); + size &= ~(BYTES_PER_WORD-1); + } + + /* calculate out the final buffer alignment: */ + /* 1) arch recommendation: can be overridden for debug */ + if (flags & SLAB_HWCACHE_ALIGN) { + /* Default alignment: as specified by the arch code. + * Except if an object is really small, then squeeze multiple + * objects into one cacheline. */ - flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); + ralign = cache_line_size(); + while (size <= ralign/2) + ralign /= 2; } else { - if (flags & SLAB_HWCACHE_ALIGN) { - /* Default alignment: as specified by the arch code. - * Except if an object is really small, then squeeze multiple - * into one cacheline. - */ - align = cache_line_size(); - while (size <= align/2) - align /= 2; - } else { - align = BYTES_PER_WORD; - } - } + ralign = BYTES_PER_WORD; + } + /* 2) arch mandated alignment: disables debug if necessary */ + if (ralign < ARCH_SLAB_MINALIGN) { + ralign = ARCH_SLAB_MINALIGN; + if (ralign > BYTES_PER_WORD) + flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); + } + /* 3) caller mandated alignment: disables debug if necessary */ + if (ralign < align) { + ralign = align; + if (ralign > BYTES_PER_WORD) + flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); + } + /* 4) Store it. Note that the debug code below can reduce + * the alignment to BYTES_PER_WORD. + */ + align = ralign; /* Get cache's description obj. */ cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); @@ -1244,15 +1296,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, goto opps; memset(cachep, 0, sizeof(kmem_cache_t)); - /* Check that size is in terms of words. This is needed to avoid - * unaligned accesses for some archs when redzoning is used, and makes - * sure any on-slab bufctl's are also correctly aligned. - */ - if (size & (BYTES_PER_WORD-1)) { - size += (BYTES_PER_WORD-1); - size &= ~(BYTES_PER_WORD-1); - } - #if DEBUG cachep->reallen = size; @@ -1576,7 +1619,6 @@ int kmem_cache_shrink(kmem_cache_t *cachep) return __cache_shrink(cachep); } - EXPORT_SYMBOL(kmem_cache_shrink); /** @@ -1596,7 +1638,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */ -int kmem_cache_destroy (kmem_cache_t * cachep) +int kmem_cache_destroy(kmem_cache_t * cachep) { int i; @@ -1624,7 +1666,7 @@ int kmem_cache_destroy (kmem_cache_t * cachep) } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_kernel(); + synchronize_rcu(); /* no cpu_online check required here since we clear the percpu * array on cpu offline and set this to NULL. @@ -1641,12 +1683,11 @@ int kmem_cache_destroy (kmem_cache_t * cachep) return 0; } - EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ -static struct slab* alloc_slabmgmt (kmem_cache_t *cachep, - void *objp, int colour_off, int local_flags) +static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, + void *objp, int colour_off, unsigned int __nocast local_flags) { struct slab *slabp; @@ -1671,8 +1712,8 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) return (kmem_bufctl_t *)(slabp+1); } -static void cache_init_objs (kmem_cache_t * cachep, - struct slab * slabp, unsigned long ctor_flags) +static void cache_init_objs(kmem_cache_t *cachep, + struct slab *slabp, unsigned long ctor_flags) { int i; @@ -1717,7 +1758,7 @@ static void cache_init_objs (kmem_cache_t * cachep, slabp->free = 0; } -static void kmem_flagcheck(kmem_cache_t *cachep, int flags) +static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags) { if (flags & SLAB_DMA) { if (!(cachep->gfpflags & GFP_DMA)) @@ -1747,12 +1788,12 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow (kmem_cache_t * cachep, int flags) +static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) { struct slab *slabp; void *objp; size_t offset; - int local_flags; + unsigned int local_flags; unsigned long ctor_flags; /* Be lazy and only check for valid flags here, @@ -1798,7 +1839,7 @@ static int cache_grow (kmem_cache_t * cachep, int flags) /* Get mem for the objs. */ - if (!(objp = kmem_getpages(cachep, flags, -1))) + if (!(objp = kmem_getpages(cachep, flags, nodeid))) goto failed; /* Get slab management. */ @@ -1852,7 +1893,8 @@ static void kfree_debugcheck(const void *objp) } } -static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller) +static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, + void *caller) { struct page *page; unsigned int objnr; @@ -1920,18 +1962,17 @@ static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *ca static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) { - int i; + kmem_bufctl_t i; int entries = 0; check_spinlock_acquired(cachep); /* Check slab's freelist to see if this obj is there. */ for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { entries++; - if (entries > cachep->num || i < 0 || i >= cachep->num) + if (entries > cachep->num || i >= cachep->num) goto bad; } if (entries != cachep->num - slabp->inuse) { - int i; bad: printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", cachep->name, cachep->num, slabp, slabp->inuse); @@ -1950,7 +1991,7 @@ bad: #define check_slabp(x,y) do { } while(0) #endif -static void* cache_alloc_refill(kmem_cache_t* cachep, int flags) +static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) { int batchcount; struct kmem_list3 *l3; @@ -2032,7 +2073,7 @@ alloc_done: if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags); + x = cache_grow(cachep, flags, -1); // cache_grow can reenable interrupts, then ac could change. ac = ac_data(cachep); @@ -2047,7 +2088,7 @@ alloc_done: } static inline void -cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags) +cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) { might_sleep_if(flags & __GFP_WAIT); #if DEBUG @@ -2102,7 +2143,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, #endif -static inline void * __cache_alloc (kmem_cache_t *cachep, int flags) +static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) { unsigned long save_flags; void* objp; @@ -2181,7 +2222,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) } } -static void cache_flusharray (kmem_cache_t* cachep, struct array_cache *ac) +static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) { int batchcount; @@ -2238,7 +2279,7 @@ free_done: * * Called with disabled ints. */ -static inline void __cache_free (kmem_cache_t *cachep, void* objp) +static inline void __cache_free(kmem_cache_t *cachep, void *objp) { struct array_cache *ac = ac_data(cachep); @@ -2264,11 +2305,10 @@ static inline void __cache_free (kmem_cache_t *cachep, void* objp) * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ -void * kmem_cache_alloc (kmem_cache_t *cachep, int flags) +void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) { return __cache_alloc(cachep, flags); } - EXPORT_SYMBOL(kmem_cache_alloc); /** @@ -2313,6 +2353,7 @@ out: return 0; } +#ifdef CONFIG_NUMA /** * kmem_cache_alloc_node - Allocate an object on the specified node * @cachep: The cache to allocate from. @@ -2323,71 +2364,92 @@ out: * and can sleep. And it will allocate memory on the given node, which * can improve the performance for cpu bound structures. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) +void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) { - size_t offset; + int loop; void *objp; struct slab *slabp; kmem_bufctl_t next; - /* The main algorithms are not node aware, thus we have to cheat: - * We bypass all caches and allocate a new slab. - * The following code is a streamlined copy of cache_grow(). - */ + for (loop = 0;;loop++) { + struct list_head *q; - /* Get colour for the slab, and update the next value. */ - spin_lock_irq(&cachep->spinlock); - offset = cachep->colour_next; - cachep->colour_next++; - if (cachep->colour_next >= cachep->colour) - cachep->colour_next = 0; - offset *= cachep->colour_off; - spin_unlock_irq(&cachep->spinlock); + objp = NULL; + check_irq_on(); + spin_lock_irq(&cachep->spinlock); + /* walk through all partial and empty slab and find one + * from the right node */ + list_for_each(q,&cachep->lists.slabs_partial) { + slabp = list_entry(q, struct slab, list); + + if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid || + loop > 2) + goto got_slabp; + } + list_for_each(q, &cachep->lists.slabs_free) { + slabp = list_entry(q, struct slab, list); - /* Get mem for the objs. */ - if (!(objp = kmem_getpages(cachep, GFP_KERNEL, nodeid))) - goto failed; + if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid || + loop > 2) + goto got_slabp; + } + spin_unlock_irq(&cachep->spinlock); - /* Get slab management. */ - if (!(slabp = alloc_slabmgmt(cachep, objp, offset, GFP_KERNEL))) - goto opps1; + local_irq_disable(); + if (!cache_grow(cachep, flags, nodeid)) { + local_irq_enable(); + return NULL; + } + local_irq_enable(); + } +got_slabp: + /* found one: allocate object */ + check_slabp(cachep, slabp); + check_spinlock_acquired(cachep); - set_slab_attr(cachep, slabp, objp); - cache_init_objs(cachep, slabp, SLAB_CTOR_CONSTRUCTOR); + STATS_INC_ALLOCED(cachep); + STATS_INC_ACTIVE(cachep); + STATS_SET_HIGH(cachep); + STATS_INC_NODEALLOCS(cachep); - /* The first object is ours: */ objp = slabp->s_mem + slabp->free*cachep->objsize; + slabp->inuse++; next = slab_bufctl(slabp)[slabp->free]; #if DEBUG slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; #endif slabp->free = next; - - /* add the remaining objects into the cache */ - spin_lock_irq(&cachep->spinlock); check_slabp(cachep, slabp); - STATS_INC_GROWN(cachep); - /* Make slab active. */ - if (slabp->free == BUFCTL_END) { - list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_full)); - } else { - list_add_tail(&slabp->list, - &(list3_data(cachep)->slabs_partial)); - list3_data(cachep)->free_objects += cachep->num-1; - } + + /* move slabp to correct slabp list: */ + list_del(&slabp->list); + if (slabp->free == BUFCTL_END) + list_add(&slabp->list, &cachep->lists.slabs_full); + else + list_add(&slabp->list, &cachep->lists.slabs_partial); + + list3_data(cachep)->free_objects--; spin_unlock_irq(&cachep->spinlock); + objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp, __builtin_return_address(0)); return objp; -opps1: - kmem_freepages(cachep, objp); -failed: - return NULL; - } EXPORT_SYMBOL(kmem_cache_alloc_node); +void *kmalloc_node(size_t size, int flags, int node) +{ + kmem_cache_t *cachep; + + cachep = kmem_find_general_cachep(size, flags); + if (unlikely(cachep == NULL)) + return NULL; + return kmem_cache_alloc_node(cachep, flags, node); +} +EXPORT_SYMBOL(kmalloc_node); +#endif + /** * kmalloc - allocate memory * @size: how many bytes of memory are required. @@ -2409,26 +2471,20 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * platforms. For example, on i386, it means that the memory must come * from the first 16MB. */ -void * __kmalloc (size_t size, int flags) +void *__kmalloc(size_t size, unsigned int __nocast flags) { - struct cache_sizes *csizep = malloc_sizes; + kmem_cache_t *cachep; - for (; csizep->cs_size; csizep++) { - if (size > csizep->cs_size) - continue; -#if DEBUG - /* This happens if someone tries to call - * kmem_cache_create(), or kmalloc(), before - * the generic caches are initialized. - */ - BUG_ON(csizep->cs_cachep == NULL); -#endif - return __cache_alloc(flags & GFP_DMA ? - csizep->cs_dmacachep : csizep->cs_cachep, flags); - } - return NULL; + /* If you want to save a few bytes .text space: replace + * __ with kmem_. + * Then kmalloc uses the uninlined functions instead of the inline + * functions. + */ + cachep = __find_general_cachep(size, flags); + if (unlikely(cachep == NULL)) + return NULL; + return __cache_alloc(cachep, flags); } - EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_SMP @@ -2451,9 +2507,8 @@ void *__alloc_percpu(size_t size, size_t align) for (i = 0; i < NR_CPUS; i++) { if (!cpu_possible(i)) continue; - pdata->ptrs[i] = kmem_cache_alloc_node( - kmem_find_general_cachep(size, GFP_KERNEL), - cpu_to_node(i)); + pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(i)); if (!pdata->ptrs[i]) goto unwind_oom; @@ -2472,7 +2527,6 @@ unwind_oom: kfree(pdata); return NULL; } - EXPORT_SYMBOL(__alloc_percpu); #endif @@ -2484,7 +2538,7 @@ EXPORT_SYMBOL(__alloc_percpu); * Free an object which was previously allocated from this * cache. */ -void kmem_cache_free (kmem_cache_t *cachep, void *objp) +void kmem_cache_free(kmem_cache_t *cachep, void *objp) { unsigned long flags; @@ -2492,7 +2546,6 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp) __cache_free(cachep, objp); local_irq_restore(flags); } - EXPORT_SYMBOL(kmem_cache_free); /** @@ -2501,7 +2554,7 @@ EXPORT_SYMBOL(kmem_cache_free); * @size: element size. * @flags: the type of memory to allocate. */ -void *kcalloc(size_t n, size_t size, int flags) +void *kcalloc(size_t n, size_t size, unsigned int __nocast flags) { void *ret = NULL; @@ -2513,7 +2566,6 @@ void *kcalloc(size_t n, size_t size, int flags) memset(ret, 0, n * size); return ret; } - EXPORT_SYMBOL(kcalloc); /** @@ -2523,12 +2575,12 @@ EXPORT_SYMBOL(kcalloc); * Don't free memory not originally allocated by kmalloc() * or you will run into trouble. */ -void kfree (const void *objp) +void kfree(const void *objp) { kmem_cache_t *c; unsigned long flags; - if (!objp) + if (unlikely(!objp)) return; local_irq_save(flags); kfree_debugcheck(objp); @@ -2536,7 +2588,6 @@ void kfree (const void *objp) __cache_free(c, (void*)objp); local_irq_restore(flags); } - EXPORT_SYMBOL(kfree); #ifdef CONFIG_SMP @@ -2558,8 +2609,8 @@ free_percpu(const void *objp) continue; kfree(p->ptrs[i]); } + kfree(p); } - EXPORT_SYMBOL(free_percpu); #endif @@ -2567,7 +2618,6 @@ unsigned int kmem_cache_size(kmem_cache_t *cachep) { return obj_reallen(cachep); } - EXPORT_SYMBOL(kmem_cache_size); struct ccupdate_struct { @@ -2588,7 +2638,8 @@ static void do_ccupdate_local(void *info) } -static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount, int shared) +static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, + int shared) { struct ccupdate_struct new; struct array_cache *new_shared; @@ -2643,7 +2694,7 @@ static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount, in } -static void enable_cpucache (kmem_cache_t *cachep) +static void enable_cpucache(kmem_cache_t *cachep) { int err; int limit, shared; @@ -2790,7 +2841,7 @@ static void cache_reap(void *unused) next_unlock: spin_unlock_irq(&searchp->spinlock); next: - ; + cond_resched(); } check_irq_on(); up(&cache_chain_sem); @@ -2812,15 +2863,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) * without _too_ many complaints. */ #if STATS - seq_puts(m, "slabinfo - version: 2.0 (statistics)\n"); + seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); #else - seq_puts(m, "slabinfo - version: 2.0\n"); + seq_puts(m, "slabinfo - version: 2.1\n"); #endif seq_puts(m, "# name "); - seq_puts(m, " : tunables "); + seq_puts(m, " : tunables "); seq_puts(m, " : slabdata "); #if STATS - seq_puts(m, " : globalstat "); + seq_puts(m, " : globalstat " + " "); seq_puts(m, " : cpustat "); #endif seq_putc(m, '\n'); @@ -2911,10 +2963,11 @@ static int s_show(struct seq_file *m, void *p) unsigned long errors = cachep->errors; unsigned long max_freeable = cachep->max_freeable; unsigned long free_limit = cachep->free_limit; + unsigned long node_allocs = cachep->node_allocs; - seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu", + seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, - max_freeable, free_limit); + max_freeable, free_limit, node_allocs); } /* cpu stats */ {