slab: NUMA kmem_cache diet
[powerpc.git] / mm / slab.c
index 21b2aef..1f2627c 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -389,7 +389,6 @@ struct kmem_cache {
        unsigned int buffer_size;
        u32 reciprocal_buffer_size;
 /* 3) touched by every alloc & free from the backend */
-       struct kmem_list3 *nodelists[MAX_NUMNODES];
 
        unsigned int flags;             /* constant flags */
        unsigned int num;               /* # of objs per slab */
@@ -444,6 +443,17 @@ struct kmem_cache {
        int obj_offset;
        int obj_size;
 #endif
+       /*
+        * We put nodelists[] at the end of kmem_cache, because we want to size
+        * this array to nr_node_ids slots instead of MAX_NUMNODES
+        * (see kmem_cache_init())
+        * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
+        * is statically defined, so we reserve the max number of nodes.
+        */
+       struct kmem_list3 *nodelists[MAX_NUMNODES];
+       /*
+        * Do not add fields after nodelists[]
+        */
 };
 
 #define CFLGS_OFF_SLAB         (0x80000000UL)
@@ -678,9 +688,6 @@ static struct kmem_cache cache_cache = {
        .shared = 1,
        .buffer_size = sizeof(struct kmem_cache),
        .name = "kmem_cache",
-#if DEBUG
-       .obj_size = sizeof(struct kmem_cache),
-#endif
 };
 
 #define BAD_ALIEN_MAGIC 0x01020304ul
@@ -1223,19 +1230,20 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                 */
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
-                       struct array_cache *shared;
+                       struct array_cache *shared = NULL;
                        struct array_cache **alien = NULL;
 
                        nc = alloc_arraycache(node, cachep->limit,
                                                cachep->batchcount);
                        if (!nc)
                                goto bad;
-                       shared = alloc_arraycache(node,
+                       if (cachep->shared) {
+                               shared = alloc_arraycache(node,
                                        cachep->shared * cachep->batchcount,
                                        0xbaadf00d);
-                       if (!shared)
-                               goto bad;
-
+                               if (!shared)
+                                       goto bad;
+                       }
                        if (use_alien_caches) {
                                 alien = alloc_alien_cache(node, cachep->limit);
                                 if (!alien)
@@ -1317,8 +1325,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
 
                        shared = l3->shared;
                        if (shared) {
-                               free_block(cachep, l3->shared->entry,
-                                          l3->shared->avail, node);
+                               free_block(cachep, shared->entry,
+                                          shared->avail, node);
                                l3->shared = NULL;
                        }
 
@@ -1439,6 +1447,15 @@ void __init kmem_cache_init(void)
        cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
        cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
 
+       /*
+        * struct kmem_cache size depends on nr_node_ids, which
+        * can be less than MAX_NUMNODES.
+        */
+       cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
+                                nr_node_ids * sizeof(struct kmem_list3 *);
+#if DEBUG
+       cache_cache.obj_size = cache_cache.buffer_size;
+#endif
        cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
                                        cache_line_size());
        cache_cache.reciprocal_buffer_size =
@@ -3870,12 +3887,15 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
                                 goto fail;
                 }
 
-               new_shared = alloc_arraycache(node,
+               new_shared = NULL;
+               if (cachep->shared) {
+                       new_shared = alloc_arraycache(node,
                                cachep->shared*cachep->batchcount,
                                        0xbaadf00d);
-               if (!new_shared) {
-                       free_alien_cache(new_alien);
-                       goto fail;
+                       if (!new_shared) {
+                               free_alien_cache(new_alien);
+                               goto fail;
+                       }
                }
 
                l3 = cachep->nodelists[node];
@@ -4033,10 +4053,8 @@ static int enable_cpucache(struct kmem_cache *cachep)
         * to a larger limit. Thus disabled by default.
         */
        shared = 0;
-#ifdef CONFIG_SMP
-       if (cachep->buffer_size <= PAGE_SIZE)
+       if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
                shared = 8;
-#endif
 
 #if DEBUG
        /*