slub: enable tracking of full slabs
authorChristoph Lameter <clameter@sgi.com>
Sun, 6 May 2007 21:49:42 +0000 (14:49 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 7 May 2007 19:12:54 +0000 (12:12 -0700)
If slab tracking is on then build a list of full slabs so that we can verify
the integrity of all slabs and are also able to built list of alloc/free
callers.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/slub_def.h
mm/slub.c

index f8e0c86..ea27065 100644 (file)
@@ -16,6 +16,7 @@ struct kmem_cache_node {
        unsigned long nr_partial;
        atomic_long_t nr_slabs;
        struct list_head partial;
+       struct list_head full;
 };
 
 /*
index cfc5301..c4f40d3 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
        return search == NULL;
 }
 
+/*
+ * Tracking of fully allocated slabs for debugging
+ */
+static void add_full(struct kmem_cache *s, struct page *page)
+{
+       struct kmem_cache_node *n;
+
+       VM_BUG_ON(!irqs_disabled());
+
+       VM_BUG_ON(!irqs_disabled());
+
+       if (!(s->flags & SLAB_STORE_USER))
+               return;
+
+       n = get_node(s, page_to_nid(page));
+       spin_lock(&n->list_lock);
+       list_add(&page->lru, &n->full);
+       spin_unlock(&n->list_lock);
+}
+
+static void remove_full(struct kmem_cache *s, struct page *page)
+{
+       struct kmem_cache_node *n;
+
+       if (!(s->flags & SLAB_STORE_USER))
+               return;
+
+       n = get_node(s, page_to_nid(page));
+
+       spin_lock(&n->list_lock);
+       list_del(&page->lru);
+       spin_unlock(&n->list_lock);
+}
+
 static int alloc_object_checks(struct kmem_cache *s, struct page *page,
                                                        void *object)
 {
@@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
        if (page->inuse) {
                if (page->freelist)
                        add_partial(s, page);
+               else if (PageError(page))
+                       add_full(s, page);
                slab_unlock(page);
        } else {
                slab_unlock(page);
@@ -1302,7 +1338,7 @@ out_unlock:
 slab_empty:
        if (prior)
                /*
-                * Partially used slab that is on the partial list.
+                * Slab on the partial list.
                 */
                remove_partial(s, page);
 
@@ -1314,6 +1350,8 @@ slab_empty:
 debug:
        if (!free_object_checks(s, page, x))
                goto out_unlock;
+       if (!PageActive(page) && !page->freelist)
+               remove_full(s, page);
        if (s->flags & SLAB_STORE_USER)
                set_track(s, x, TRACK_FREE, addr);
        goto checks_ok;
@@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
        atomic_long_set(&n->nr_slabs, 0);
        spin_lock_init(&n->list_lock);
        INIT_LIST_HEAD(&n->partial);
+       INIT_LIST_HEAD(&n->full);
 }
 
 #ifdef CONFIG_NUMA