[PATCH] Get rid of zone_table[]
[powerpc.git] / mm / page_alloc.c
index c5caac2..23bc5bc 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/stop_machine.h>
 #include <linux/sort.h>
 #include <linux/pfn.h>
+#include <linux/backing-dev.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -82,13 +83,6 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 
 EXPORT_SYMBOL(totalram_pages);
 
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
-EXPORT_SYMBOL(zone_table);
-
 static char *zone_names[MAX_NR_ZONES] = {
         "DMA",
 #ifdef CONFIG_ZONE_DMA32
@@ -485,7 +479,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order)
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
-       __free_one_page(page, zone ,order);
+       __free_one_page(page, zoneorder);
        spin_unlock(&zone->lock);
 }
 
@@ -495,15 +489,13 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        int i;
        int reserved = 0;
 
-       if (!PageHighMem(page))
-               debug_check_no_locks_freed(page_address(page),
-                                          PAGE_SIZE<<order);
-
        for (i = 0 ; i < (1 << order) ; ++i)
                reserved += free_pages_check(page + i);
        if (reserved)
                return;
 
+       if (!PageHighMem(page))
+               debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
@@ -787,6 +779,8 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
        if (free_pages_check(page))
                return;
 
+       if (!PageHighMem(page))
+               debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
@@ -852,7 +846,7 @@ again:
                pcp = &zone_pcp(zone, cpu)->pcp[cold];
                local_irq_save(flags);
                if (!pcp->count) {
-                       pcp->count += rmqueue_bulk(zone, 0,
+                       pcp->count = rmqueue_bulk(zone, 0,
                                                pcp->batch, &pcp->list);
                        if (unlikely(!pcp->count))
                                goto failed;
@@ -925,7 +919,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 }
 
 /*
- * get_page_from_freeliest goes through the zonelist trying to allocate
+ * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
 static struct page *
@@ -947,8 +941,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                        zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
-                               !cpuset_zone_allowed(zone, gfp_mask))
-                       continue;
+                       !cpuset_zone_allowed(zone, gfp_mask))
+                               continue;
 
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
@@ -958,17 +952,18 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                                mark = zone->pages_low;
                        else
                                mark = zone->pages_high;
-                       if (!zone_watermark_ok(zone , order, mark,
-                                   classzone_idx, alloc_flags))
+                       if (!zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags)) {
                                if (!zone_reclaim_mode ||
                                    !zone_reclaim(zone, gfp_mask, order))
                                        continue;
+                       }
                }
 
                page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
-               if (page) {
+               if (page)
                        break;
-               }
+
        } while (*(++z) != NULL);
        return page;
 }
@@ -1004,9 +999,8 @@ restart:
        if (page)
                goto got_pg;
 
-       do {
+       for (z = zonelist->zones; *z; z++)
                wakeup_kswapd(*z, order);
-       } while (*(++z));
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -1050,7 +1044,7 @@ nofail_alloc:
                        if (page)
                                goto got_pg;
                        if (gfp_mask & __GFP_NOFAIL) {
-                               blk_congestion_wait(WRITE, HZ/50);
+                               congestion_wait(WRITE, HZ/50);
                                goto nofail_alloc;
                        }
                }
@@ -1113,7 +1107,7 @@ rebalance:
                        do_retry = 1;
        }
        if (do_retry) {
-               blk_congestion_wait(WRITE, HZ/50);
+               congestion_wait(WRITE, HZ/50);
                goto rebalance;
        }
 
@@ -1688,6 +1682,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                if (!early_pfn_valid(pfn))
                        continue;
+               if (!early_pfn_in_nid(pfn, nid))
+                       continue;
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
@@ -1712,20 +1708,6 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
        }
 }
 
-#define ZONETABLE_INDEX(x, zone_nr)    ((x << ZONES_SHIFT) | zone_nr)
-void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-               unsigned long pfn, unsigned long size)
-{
-       unsigned long snum = pfn_to_section_nr(pfn);
-       unsigned long end = pfn_to_section_nr(pfn + size);
-
-       if (FLAGS_HAS_NODE)
-               zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
-       else
-               for (; snum <= end; snum++)
-                       zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
-}
-
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
        memmap_init_zone((size), (nid), (zone), (start_pfn))
@@ -2258,7 +2240,7 @@ unsigned long __init __absent_pages_in_range(int nid,
 
        /* Account for ranges past physical memory on this node */
        if (range_end_pfn > prev_end_pfn)
-               hole_pages = range_end_pfn -
+               hole_pages += range_end_pfn -
                                max(range_start_pfn, prev_end_pfn);
 
        return hole_pages;
@@ -2404,7 +2386,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                zone->zone_pgdat = pgdat;
                zone->free_pages = 0;
 
-               zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+               zone->prev_priority = DEF_PRIORITY;
 
                zone_pcp_init(zone);
                INIT_LIST_HEAD(&zone->active_list);
@@ -2418,7 +2400,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                if (!size)
                        continue;
 
-               zonetable_add(zone, nid, j, zone_start_pfn, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn, size);
                BUG_ON(ret);
                zone_start_pfn += size;
@@ -2609,6 +2590,9 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
 {
        int i;
 
+       /* Regions in the early_node_map can be in any order */
+       sort_node_map();
+
        /* Assuming a sorted map, the first range found has the starting pfn */
        for_each_active_range_index_in_nid(i, nid)
                return early_node_map[i].start_pfn;
@@ -2677,9 +2661,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
        }
 
-       /* Regions in the early_node_map can be in any order */
-       sort_node_map();
-
        /* Print out the zone ranges */
        printk("Zone PFN ranges:\n");
        for (i = 0; i < MAX_NR_ZONES; i++)
@@ -3119,3 +3100,19 @@ unsigned long page_to_pfn(struct page *page)
 EXPORT_SYMBOL(pfn_to_page);
 EXPORT_SYMBOL(page_to_pfn);
 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
+
+#if MAX_NUMNODES > 1
+/*
+ * Find the highest possible node id.
+ */
+int highest_possible_node_id(void)
+{
+       unsigned int node;
+       unsigned int highest = 0;
+
+       for_each_node_mask(node, node_possible_map)
+               highest = node;
+       return highest;
+}
+EXPORT_SYMBOL(highest_possible_node_id);
+#endif