X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fsparse.c;h=e03b39f3540f79adf384c476f185e7a9da91e4ac;hb=3e733f071e16bdad13a75eedb102e8941b09927e;hp=ac26eb0d73cddeaf53b20fb7a7d31f954b8bc117;hpb=6bfe5c9d6f4dcaa998f67e691359cf7b1c4b443d;p=powerpc.git diff --git a/mm/sparse.c b/mm/sparse.c index ac26eb0d73..e03b39f354 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -44,7 +44,7 @@ EXPORT_SYMBOL(page_to_nid); #endif #ifdef CONFIG_SPARSEMEM_EXTREME -static struct mem_section *sparse_index_alloc(int nid) +static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) { struct mem_section *section = NULL; unsigned long array_size = SECTIONS_PER_ROOT * @@ -61,7 +61,7 @@ static struct mem_section *sparse_index_alloc(int nid) return section; } -static int sparse_index_init(unsigned long section_nr, int nid) +static int __meminit sparse_index_init(unsigned long section_nr, int nid) { static DEFINE_SPINLOCK(index_init_lock); unsigned long root = SECTION_NR_TO_ROOT(section_nr); @@ -138,7 +138,7 @@ static inline int sparse_early_nid(struct mem_section *section) } /* Record a memory area against a node. */ -void memory_present(int nid, unsigned long start, unsigned long end) +void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; @@ -197,7 +197,7 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } -static int sparse_init_one_section(struct mem_section *ms, +static int __meminit sparse_init_one_section(struct mem_section *ms, unsigned long pnum, struct page *mem_map) { if (!valid_section(ms)) @@ -209,7 +209,13 @@ static int sparse_init_one_section(struct mem_section *ms, return 1; } -static struct page *sparse_early_mem_map_alloc(unsigned long pnum) +__attribute__((weak)) +void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) +{ + return NULL; +} + +static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; struct mem_section *ms = __nr_to_section(pnum); @@ -219,6 +225,11 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum) if (map) return map; + map = alloc_bootmem_high_node(NODE_DATA(nid), + sizeof(struct page) * PAGES_PER_SECTION); + if (map) + return map; + map = alloc_bootmem_node(NODE_DATA(nid), sizeof(struct page) * PAGES_PER_SECTION); if (map) @@ -229,6 +240,27 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum) return NULL; } +/* + * Allocate the accumulated non-linear sections, allocate a mem_map + * for each and record the physical to section mapping. + */ +void __init sparse_init(void) +{ + unsigned long pnum; + struct page *map; + + for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { + if (!valid_section_nr(pnum)) + continue; + + map = sparse_early_mem_map_alloc(pnum); + if (!map) + continue; + sparse_init_one_section(__nr_to_section(pnum), pnum, map); + } +} + +#ifdef CONFIG_MEMORY_HOTPLUG static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; @@ -268,26 +300,6 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) get_order(sizeof(struct page) * nr_pages)); } -/* - * Allocate the accumulated non-linear sections, allocate a mem_map - * for each and record the physical to section mapping. - */ -void sparse_init(void) -{ - unsigned long pnum; - struct page *map; - - for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { - if (!valid_section_nr(pnum)) - continue; - - map = sparse_early_mem_map_alloc(pnum); - if (!map) - continue; - sparse_init_one_section(__nr_to_section(pnum), pnum, map); - } -} - /* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in @@ -327,3 +339,4 @@ out: __kfree_section_memmap(memmap, nr_pages); return ret; } +#endif