upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / mm / slab.c
index ebb8eaa..9852fae 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
  */
 
 #define BUFCTL_END     (((kmem_bufctl_t)(~0U))-0)
-#define BUFCTL_FREE    (((kmem_bufctl_t)(~0U))-1)
+#define BUFCTL_ALLOC   (((kmem_bufctl_t)(~0U))-1)
 #define        SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-2)
 
 /* Max number of objs-per-slab for caches which use off-slab slabs.
@@ -327,6 +327,7 @@ struct kmem_cache_s {
        unsigned long           reaped;
        unsigned long           errors;
        unsigned long           max_freeable;
+       unsigned long           node_allocs;
        atomic_t                allochit;
        atomic_t                allocmiss;
        atomic_t                freehit;
@@ -335,6 +336,7 @@ struct kmem_cache_s {
 #if DEBUG
        int                     dbghead;
        int                     reallen;
+       unsigned long           redzonetest;
 #endif
 };
 
@@ -350,6 +352,7 @@ struct kmem_cache_s {
  */
 #define REAPTIMEOUT_CPUC       (2*HZ)
 #define REAPTIMEOUT_LIST3      (4*HZ)
+#define REDZONETIMEOUT         (300*HZ)
 
 #if STATS
 #define        STATS_INC_ACTIVE(x)     ((x)->num_active++)
@@ -361,6 +364,7 @@ struct kmem_cache_s {
                                        (x)->high_mark = (x)->num_active; \
                                } while (0)
 #define        STATS_INC_ERR(x)        ((x)->errors++)
+#define        STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
 #define        STATS_SET_FREEABLE(x, i) \
                                do { if ((x)->max_freeable < i) \
                                        (x)->max_freeable = i; \
@@ -378,6 +382,7 @@ struct kmem_cache_s {
 #define        STATS_INC_REAPED(x)     do { } while (0)
 #define        STATS_SET_HIGH(x)       do { } while (0)
 #define        STATS_INC_ERR(x)        do { } while (0)
+#define        STATS_INC_NODEALLOCS(x) do { } while (0)
 #define        STATS_SET_FREEABLE(x, i) \
                                do { } while (0)
 
@@ -1030,8 +1035,8 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
                        /* Mismatch ! */
                        /* Print header */
                        if (lines == 0) {
-                               printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
-                                               realobj, size);
+                               printk(KERN_ERR "Slab corruption: (%s) start=%p, len=%d\n",
+                                               print_tainted(), realobj, size);
                                print_objinfo(cachep, objp, 0);
                        }
                        /* Hexdump the affected line */
@@ -1414,7 +1419,11 @@ next:
        } 
 
        cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
-                                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                                       ((unsigned long)cachep/L1_CACHE_BYTES)%REAPTIMEOUT_LIST3;
+#if DEBUG
+       cachep->redzonetest = jiffies + REDZONETIMEOUT +
+                                       ((unsigned long)cachep/L1_CACHE_BYTES)%REDZONETIMEOUT;
+#endif
 
        /* Need the semaphore to access the chain. */
        down(&cache_chain_sem);
@@ -1747,7 +1756,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow (kmem_cache_t * cachep, int flags)
+static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid)
 {
        struct slab     *slabp;
        void            *objp;
@@ -1798,7 +1807,7 @@ static int cache_grow (kmem_cache_t * cachep, int flags)
 
 
        /* Get mem for the objs. */
-       if (!(objp = kmem_getpages(cachep, flags, -1)))
+       if (!(objp = kmem_getpages(cachep, flags, nodeid)))
                goto failed;
 
        /* Get slab management. */
@@ -2011,7 +2020,7 @@ retry:
                        slabp->inuse++;
                        next = slab_bufctl(slabp)[slabp->free];
 #if DEBUG
-                       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+                       slab_bufctl(slabp)[slabp->free] = BUFCTL_ALLOC;
 #endif
                        slabp->free = next;
                }
@@ -2032,7 +2041,7 @@ alloc_done:
 
        if (unlikely(!ac->avail)) {
                int x;
-               x = cache_grow(cachep, flags);
+               x = cache_grow(cachep, flags, -1);
                
                // cache_grow can reenable interrupts, then ac could change.
                ac = ac_data(cachep);
@@ -2149,7 +2158,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
                objnr = (objp - slabp->s_mem) / cachep->objsize;
                check_slabp(cachep, slabp);
 #if DEBUG
-               if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+               if (slab_bufctl(slabp)[objnr] != BUFCTL_ALLOC) {
                        printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n",
                                                cachep->name, objp);
                        BUG();
@@ -2313,6 +2322,7 @@ out:
        return 0;
 }
 
+#ifdef CONFIG_NUMA
 /**
  * kmem_cache_alloc_node - Allocate an object on the specified node
  * @cachep: The cache to allocate from.
@@ -2325,69 +2335,80 @@ out:
  */
 void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
 {
-       size_t offset;
+       int loop;
        void *objp;
        struct slab *slabp;
        kmem_bufctl_t next;
 
-       /* The main algorithms are not node aware, thus we have to cheat:
-        * We bypass all caches and allocate a new slab.
-        * The following code is a streamlined copy of cache_grow().
-        */
+       for (loop = 0;;loop++) {
+               struct list_head *q;
 
-       /* Get colour for the slab, and update the next value. */
-       spin_lock_irq(&cachep->spinlock);
-       offset = cachep->colour_next;
-       cachep->colour_next++;
-       if (cachep->colour_next >= cachep->colour)
-               cachep->colour_next = 0;
-       offset *= cachep->colour_off;
-       spin_unlock_irq(&cachep->spinlock);
+               objp = NULL;
+               check_irq_on();
+               spin_lock_irq(&cachep->spinlock);
+               /* walk through all partial and empty slab and find one
+                * from the right node */
+               list_for_each(q,&cachep->lists.slabs_partial) {
+                       slabp = list_entry(q, struct slab, list);
+
+                       if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
+                                       loop > 2)
+                               goto got_slabp;
+               }
+               list_for_each(q, &cachep->lists.slabs_free) {
+                       slabp = list_entry(q, struct slab, list);
 
-       /* Get mem for the objs. */
-       if (!(objp = kmem_getpages(cachep, GFP_KERNEL, nodeid)))
-               goto failed;
+                       if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
+                                       loop > 2)
+                               goto got_slabp;
+               }
+               spin_unlock_irq(&cachep->spinlock);
 
-       /* Get slab management. */
-       if (!(slabp = alloc_slabmgmt(cachep, objp, offset, GFP_KERNEL)))
-               goto opps1;
+               local_irq_disable();
+               if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
+                       local_irq_enable();
+                       return NULL;
+               }
+               local_irq_enable();
+       }
+got_slabp:
+       /* found one: allocate object */
+       check_slabp(cachep, slabp);
+       check_spinlock_acquired(cachep);
 
-       set_slab_attr(cachep, slabp, objp);
-       cache_init_objs(cachep, slabp, SLAB_CTOR_CONSTRUCTOR);
+       STATS_INC_ALLOCED(cachep);
+       STATS_INC_ACTIVE(cachep);
+       STATS_SET_HIGH(cachep);
+       STATS_INC_NODEALLOCS(cachep);
 
-       /* The first object is ours: */
        objp = slabp->s_mem + slabp->free*cachep->objsize;
+
        slabp->inuse++;
        next = slab_bufctl(slabp)[slabp->free];
 #if DEBUG
-       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+       slab_bufctl(slabp)[slabp->free] = BUFCTL_ALLOC;
 #endif
        slabp->free = next;
-
-       /* add the remaining objects into the cache */
-       spin_lock_irq(&cachep->spinlock);
        check_slabp(cachep, slabp);
-       STATS_INC_GROWN(cachep);
-       /* Make slab active. */
-       if (slabp->free == BUFCTL_END) {
-               list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_full));
-       } else {
-               list_add_tail(&slabp->list,
-                               &(list3_data(cachep)->slabs_partial));
-               list3_data(cachep)->free_objects += cachep->num-1;
-       }
+
+       /* move slabp to correct slabp list: */
+       list_del(&slabp->list);
+       if (slabp->free == BUFCTL_END)
+               list_add(&slabp->list, &cachep->lists.slabs_full);
+       else
+               list_add(&slabp->list, &cachep->lists.slabs_partial);
+
+       list3_data(cachep)->free_objects--;
        spin_unlock_irq(&cachep->spinlock);
+
        objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp,
                                        __builtin_return_address(0));
        return objp;
-opps1:
-       kmem_freepages(cachep, objp);
-failed:
-       return NULL;
-
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
+#endif
+
 /**
  * kmalloc - allocate memory
  * @size: how many bytes of memory are required.
@@ -2558,6 +2579,7 @@ free_percpu(const void *objp)
                        continue;
                kfree(p->ptrs[i]);
        }
+       kfree(p);
 }
 
 EXPORT_SYMBOL(free_percpu);
@@ -2570,6 +2592,86 @@ unsigned int kmem_cache_size(kmem_cache_t *cachep)
 
 EXPORT_SYMBOL(kmem_cache_size);
 
+#if DEBUG
+static void check_slabuse(kmem_cache_t *cachep, struct slab *slabp)
+{
+       int i;
+
+       if (!(cachep->flags & SLAB_RED_ZONE))
+               return; /* no redzone data to check */
+
+       for (i=0;i<cachep->num;i++) {
+               void *objp = slabp->s_mem + cachep->objsize * i;
+               unsigned long red1, red2;
+
+               red1 = *dbg_redzone1(cachep, objp);
+               red2 = *dbg_redzone2(cachep, objp);
+
+               /* simplest case: marked as inactive */
+               if (red1 == RED_INACTIVE && red2 == RED_INACTIVE)
+                       continue;
+
+               /* tricky case: if the bufctl value is BUFCTL_ALLOC, then
+                * the object is either allocated or somewhere in a cpu
+                * cache. The cpu caches are lockless and there might be
+                * a concurrent alloc/free call, thus we must accept random
+                * combinations of RED_ACTIVE and _INACTIVE
+                */
+               if (slab_bufctl(slabp)[i] == BUFCTL_ALLOC &&
+                               (red1 == RED_INACTIVE || red1 == RED_ACTIVE) &&
+                               (red2 == RED_INACTIVE || red2 == RED_ACTIVE))
+                       continue;
+
+               printk(KERN_ERR "slab %s: redzone mismatch in slabp %p, objp %p, bufctl 0x%x\n",
+                               cachep->name, slabp, objp, slab_bufctl(slabp)[i]);
+               print_objinfo(cachep, objp, 2);
+       }
+}
+
+/*
+ * Perform a self test on all slabs from a cache
+ */
+static void check_redzone(kmem_cache_t *cachep)
+{
+       struct list_head *q;
+       struct slab *slabp;
+
+       check_spinlock_acquired(cachep);
+
+       list_for_each(q,&cachep->lists.slabs_full) {
+               slabp = list_entry(q, struct slab, list);
+
+               if (slabp->inuse != cachep->num) {
+                       printk(KERN_INFO "slab %s: wrong slabp found in full slab chain at %p (%d/%d).\n",
+                                       cachep->name, slabp, slabp->inuse, cachep->num);
+               }
+               check_slabp(cachep, slabp);
+               check_slabuse(cachep, slabp);
+       }
+       list_for_each(q,&cachep->lists.slabs_partial) {
+               slabp = list_entry(q, struct slab, list);
+
+               if (slabp->inuse == cachep->num || slabp->inuse == 0) {
+                       printk(KERN_INFO "slab %s: wrong slab found in partial chain at %p (%d/%d).\n",
+                                       cachep->name, slabp, slabp->inuse, cachep->num);
+               }
+               check_slabp(cachep, slabp);
+               check_slabuse(cachep, slabp);
+       }
+       list_for_each(q,&cachep->lists.slabs_free) {
+               slabp = list_entry(q, struct slab, list);
+
+               if (slabp->inuse != 0) {
+                       printk(KERN_INFO "slab %s: wrong slab found in free chain at %p (%d/%d).\n",
+                                       cachep->name, slabp, slabp->inuse, cachep->num);
+               }
+               check_slabp(cachep, slabp);
+               check_slabuse(cachep, slabp);
+       }
+}
+
+#endif
+
 struct ccupdate_struct {
        kmem_cache_t *cachep;
        struct array_cache *new[NR_CPUS];
@@ -2753,6 +2855,12 @@ static void cache_reap(void *unused)
 
                drain_array_locked(searchp, ac_data(searchp), 0);
 
+#if DEBUG
+               if(time_before(searchp->redzonetest, jiffies)) {
+                       searchp->redzonetest = jiffies + REDZONETIMEOUT;
+                       check_redzone(searchp);
+               }
+#endif
                if(time_after(searchp->lists.next_reap, jiffies))
                        goto next_unlock;
 
@@ -2812,15 +2920,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
                 * without _too_ many complaints.
                 */
 #if STATS
-               seq_puts(m, "slabinfo - version: 2.0 (statistics)\n");
+               seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
 #else
-               seq_puts(m, "slabinfo - version: 2.0\n");
+               seq_puts(m, "slabinfo - version: 2.1\n");
 #endif
                seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
                seq_puts(m, " : tunables <batchcount> <limit> <sharedfactor>");
                seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 #if STATS
-               seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <freelimit>");
+               seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"
+                               " <error> <maxfreeable> <freelimit> <nodeallocs>");
                seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
 #endif
                seq_putc(m, '\n');
@@ -2911,10 +3020,11 @@ static int s_show(struct seq_file *m, void *p)
                unsigned long errors = cachep->errors;
                unsigned long max_freeable = cachep->max_freeable;
                unsigned long free_limit = cachep->free_limit;
+               unsigned long node_allocs = cachep->node_allocs;
 
-               seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu",
+               seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu",
                                allocs, high, grown, reaped, errors, 
-                               max_freeable, free_limit);
+                               max_freeable, free_limit, node_allocs);
        }
        /* cpu stats */
        {