+
+/**
+ * slab_destroy - destroy and release all objects in a slab
+ * @cachep: cache pointer being destroyed
+ * @slabp: slab pointer being destroyed
+ *
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache. The
+ * cache-lock is not held/needed.
+ */
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+{
+ void *addr = slabp->s_mem - slabp->colouroff;
+
+ slab_destroy_objs(cachep, slabp);
+ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
+ struct slab_rcu *slab_rcu;
+
+ slab_rcu = (struct slab_rcu *)slabp;
+ slab_rcu->cachep = cachep;
+ slab_rcu->addr = addr;
+ call_rcu(&slab_rcu->head, kmem_rcu_free);
+ } else {
+ kmem_freepages(cachep, addr);
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slabp);
+ }
+}
+
+/*
+ * For setting up all the kmem_list3s for cache whose buffer_size is same as
+ * size of kmem_list3.
+ */
+static void set_up_list3s(struct kmem_cache *cachep, int index)
+{
+ int node;
+
+ for_each_online_node(node) {
+ cachep->nodelists[node] = &initkmem_list3[index + node];
+ cachep->nodelists[node]->next_reap = jiffies +
+ REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ }
+}
+
+/**
+ * calculate_slab_order - calculate size (page order) of slabs
+ * @cachep: pointer to the cache that is being created
+ * @size: size of objects to be created in this cache.
+ * @align: required alignment for the objects.
+ * @flags: slab allocation flags
+ *
+ * Also calculates the number of objects per slab.
+ *
+ * This could be made much more intelligent. For now, try to avoid using
+ * high order pages for slabs. When the gfp() functions are more friendly
+ * towards high-order requests, this should be changed.
+ */
+static size_t calculate_slab_order(struct kmem_cache *cachep,
+ size_t size, size_t align, unsigned long flags)
+{
+ unsigned long offslab_limit;
+ size_t left_over = 0;
+ int gfporder;
+
+ for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
+ unsigned int num;
+ size_t remainder;
+
+ cache_estimate(gfporder, size, align, flags, &remainder, &num);
+ if (!num)
+ continue;
+
+ if (flags & CFLGS_OFF_SLAB) {
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
+ offslab_limit = size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
+
+ if (num > offslab_limit)
+ break;
+ }
+
+ /* Found something acceptable - save it away */
+ cachep->num = num;
+ cachep->gfporder = gfporder;
+ left_over = remainder;
+
+ /*
+ * A VFS-reclaimable slab tends to have most allocations
+ * as GFP_NOFS and we really don't want to have to be allocating
+ * higher-order pages when we are unable to shrink dcache.
+ */
+ if (flags & SLAB_RECLAIM_ACCOUNT)
+ break;
+
+ /*
+ * Large number of objects is good, but very large slabs are
+ * currently bad for the gfp()s.
+ */
+ if (gfporder >= slab_break_gfp_order)
+ break;
+
+ /*
+ * Acceptable internal fragmentation?
+ */
+ if (left_over * 8 <= (PAGE_SIZE << gfporder))
+ break;
+ }
+ return left_over;
+}
+
+static void setup_cpu_cache(struct kmem_cache *cachep)
+{
+ if (g_cpucache_up == FULL) {
+ enable_cpucache(cachep);
+ return;
+ }
+ if (g_cpucache_up == NONE) {
+ /*
+ * Note: the first kmem_cache_create must create the cache
+ * that's used by kmalloc(24), otherwise the creation of
+ * further caches will BUG().
+ */
+ cachep->array[smp_processor_id()] = &initarray_generic.cache;
+
+ /*
+ * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
+ * the first cache, then we need to set up all its list3s,
+ * otherwise the creation of further caches will BUG().
+ */
+ set_up_list3s(cachep, SIZE_AC);
+ if (INDEX_AC == INDEX_L3)
+ g_cpucache_up = PARTIAL_L3;
+ else
+ g_cpucache_up = PARTIAL_AC;
+ } else {
+ cachep->array[smp_processor_id()] =
+ kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+
+ if (g_cpucache_up == PARTIAL_AC) {
+ set_up_list3s(cachep, SIZE_L3);
+ g_cpucache_up = PARTIAL_L3;
+ } else {
+ int node;
+ for_each_online_node(node) {
+ cachep->nodelists[node] =
+ kmalloc_node(sizeof(struct kmem_list3),
+ GFP_KERNEL, node);
+ BUG_ON(!cachep->nodelists[node]);
+ kmem_list3_init(cachep->nodelists[node]);
+ }
+ }
+ }
+ cachep->nodelists[numa_node_id()]->next_reap =
+ jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+ cpu_cache_get(cachep)->avail = 0;
+ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+ cpu_cache_get(cachep)->batchcount = 1;
+ cpu_cache_get(cachep)->touched = 0;
+ cachep->batchcount = 1;
+ cachep->limit = BOOT_CPUCACHE_ENTRIES;