From 8ef8286689c6b5bc76212437b85bdd2ba749ee44 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 20 Feb 2007 13:57:52 -0800 Subject: [PATCH] [PATCH] slab: reduce size of alien cache to cover only possible nodes The alien cache is a per cpu per node array allocated for every slab on the system. Currently we size this array for all nodes that the kernel does support. For IA64 this is 1024 nodes. So we allocate an array with 1024 objects even if we only boot a system with 4 nodes. This patch uses "nr_node_ids" to determine the number of possible nodes supported by a hardware configuration and only allocates an alien cache sized for possible nodes. The initialization of nr_node_ids occurred too late relative to the bootstrap of the slab allocator and so I moved the setup_nr_node_ids() into free_area_init_nodes(). Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- mm/slab.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f66538b3c3..41737395bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2964,6 +2964,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) early_node_map[i].end_pfn); /* Initialise every node */ + setup_nr_node_ids(); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); free_area_init_node(nid, pgdat, NULL, @@ -3189,7 +3190,6 @@ static int __init init_per_zone_pages_min(void) min_free_kbytes = 65536; setup_per_zone_pages_min(); setup_per_zone_lowmem_reserve(); - setup_nr_node_ids(); return 0; } module_init(init_per_zone_pages_min) diff --git a/mm/slab.c b/mm/slab.c index 70784b848b..8fdaffa717 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1042,7 +1042,7 @@ static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static struct array_cache **alloc_alien_cache(int node, int limit) { struct array_cache **ac_ptr; - int memsize = sizeof(void *) * MAX_NUMNODES; + int memsize = sizeof(void *) * nr_node_ids; int i; if (limit > 1) -- 2.20.1