X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fsparse.c;h=6f3fff907bc25719926d0e91f4b0ea70335b4447;hb=36f021b579d195cdc5fa6f3e2bab198b4bf70643;hp=b3c82ba300124ea28d1cb952f337e387b1c4b9eb;hpb=752c58a471c108d64da1676b2925dfbd83eb177e;p=powerpc.git diff --git a/mm/sparse.c b/mm/sparse.c index b3c82ba300..6f3fff907b 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -24,8 +24,27 @@ struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] #endif EXPORT_SYMBOL(mem_section); +#ifdef NODE_NOT_IN_PAGE_FLAGS +/* + * If we did not store the node number in the page then we have to + * do a lookup in the section_to_node_table in order to find which + * node the page belongs to. + */ +#if MAX_NUMNODES <= 256 +static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; +#else +static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; +#endif + +int page_to_nid(struct page *page) +{ + return section_to_node_table[page_to_section(page)]; +} +EXPORT_SYMBOL(page_to_nid); +#endif + #ifdef CONFIG_SPARSEMEM_EXTREME -static struct mem_section *sparse_index_alloc(int nid) +static struct mem_section noinline *sparse_index_alloc(int nid) { struct mem_section *section = NULL; unsigned long array_size = SECTIONS_PER_ROOT * @@ -42,13 +61,17 @@ static struct mem_section *sparse_index_alloc(int nid) return section; } -static int sparse_index_init(unsigned long section_nr, int nid) +static int __meminit sparse_index_init(unsigned long section_nr, int nid) { static DEFINE_SPINLOCK(index_init_lock); unsigned long root = SECTION_NR_TO_ROOT(section_nr); struct mem_section *section; int ret = 0; +#ifdef NODE_NOT_IN_PAGE_FLAGS + section_to_node_table[section_nr] = nid; +#endif + if (mem_section[root]) return -EEXIST; @@ -115,7 +138,7 @@ static inline int sparse_early_nid(struct mem_section *section) } /* Record a memory area against a node. */ -void memory_present(int nid, unsigned long start, unsigned long end) +void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; @@ -174,7 +197,7 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } -static int sparse_init_one_section(struct mem_section *ms, +static int __meminit sparse_init_one_section(struct mem_section *ms, unsigned long pnum, struct page *mem_map) { if (!valid_section(ms)) @@ -186,7 +209,7 @@ static int sparse_init_one_section(struct mem_section *ms, return 1; } -static struct page *sparse_early_mem_map_alloc(unsigned long pnum) +static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; struct mem_section *ms = __nr_to_section(pnum); @@ -249,7 +272,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ -void sparse_init(void) +void __init sparse_init(void) { unsigned long pnum; struct page *map; @@ -265,6 +288,7 @@ void sparse_init(void) } } +#ifdef CONFIG_MEMORY_HOTPLUG /* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in @@ -304,3 +328,4 @@ out: __kfree_section_memmap(memmap, nr_pages); return ret; } +#endif