[PATCH] cpuset: rework cpuset_zone_allowed api
[powerpc.git] / mm / page_alloc.c
index 5d123b3..8c1a116 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/sort.h>
 #include <linux/pfn.h>
 #include <linux/backing-dev.h>
+#include <linux/fault-inject.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -83,7 +84,7 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 
 EXPORT_SYMBOL(totalram_pages);
 
-static char *zone_names[MAX_NR_ZONES] = {
+static char * const zone_names[MAX_NR_ZONES] = {
         "DMA",
 #ifdef CONFIG_ZONE_DMA32
         "DMA32",
@@ -230,7 +231,7 @@ static void prep_compound_page(struct page *page, unsigned long order)
        int i;
        int nr_pages = 1 << order;
 
-       page[1].lru.next = (void *)free_compound_page;  /* set dtor */
+       set_compound_page_dtor(page, free_compound_page);
        page[1].lru.prev = (void *)order;
        for (i = 0; i < nr_pages; i++) {
                struct page *p = page + i;
@@ -701,7 +702,6 @@ void drain_node_pages(int nodeid)
 }
 #endif
 
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
 static void __drain_pages(unsigned int cpu)
 {
        unsigned long flags;
@@ -723,7 +723,6 @@ static void __drain_pages(unsigned int cpu)
                }
        }
 }
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM
 
@@ -894,6 +893,91 @@ failed:
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
 
+#ifdef CONFIG_FAIL_PAGE_ALLOC
+
+static struct fail_page_alloc_attr {
+       struct fault_attr attr;
+
+       u32 ignore_gfp_highmem;
+       u32 ignore_gfp_wait;
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+       struct dentry *ignore_gfp_highmem_file;
+       struct dentry *ignore_gfp_wait_file;
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+} fail_page_alloc = {
+       .attr = FAULT_ATTR_INITIALIZER,
+       .ignore_gfp_wait = 1,
+       .ignore_gfp_highmem = 1,
+};
+
+static int __init setup_fail_page_alloc(char *str)
+{
+       return setup_fault_attr(&fail_page_alloc.attr, str);
+}
+__setup("fail_page_alloc=", setup_fail_page_alloc);
+
+static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+       if (gfp_mask & __GFP_NOFAIL)
+               return 0;
+       if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
+               return 0;
+       if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
+               return 0;
+
+       return should_fail(&fail_page_alloc.attr, 1 << order);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_page_alloc_debugfs(void)
+{
+       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+       struct dentry *dir;
+       int err;
+
+       err = init_fault_attr_dentries(&fail_page_alloc.attr,
+                                      "fail_page_alloc");
+       if (err)
+               return err;
+       dir = fail_page_alloc.attr.dentries.dir;
+
+       fail_page_alloc.ignore_gfp_wait_file =
+               debugfs_create_bool("ignore-gfp-wait", mode, dir,
+                                     &fail_page_alloc.ignore_gfp_wait);
+
+       fail_page_alloc.ignore_gfp_highmem_file =
+               debugfs_create_bool("ignore-gfp-highmem", mode, dir,
+                                     &fail_page_alloc.ignore_gfp_highmem);
+
+       if (!fail_page_alloc.ignore_gfp_wait_file ||
+                       !fail_page_alloc.ignore_gfp_highmem_file) {
+               err = -ENOMEM;
+               debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
+               debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
+               cleanup_fault_attr_dentries(&fail_page_alloc.attr);
+       }
+
+       return err;
+}
+
+late_initcall(fail_page_alloc_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#else /* CONFIG_FAIL_PAGE_ALLOC */
+
+static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+       return 0;
+}
+
+#endif /* CONFIG_FAIL_PAGE_ALLOC */
+
 /*
  * Return 1 if free pages are above 'mark'. This takes into account the order
  * of the allocation.
@@ -1078,7 +1162,7 @@ zonelist_scan:
                        zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
-                       !cpuset_zone_allowed(zone, gfp_mask))
+                       !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
 
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
@@ -1138,6 +1222,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
 
        might_sleep_if(wait);
 
+       if (should_fail_alloc_page(gfp_mask, order))
+               return NULL;
+
 restart:
        z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
 
@@ -1151,6 +1238,17 @@ restart:
        if (page)
                goto got_pg;
 
+       /*
+        * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
+        * __GFP_NOWARN set) should not cause reclaim since the subsystem
+        * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
+        * using a larger set of nodes after it has established that the
+        * allowed per node queues are empty and that nodes are
+        * over allocated.
+        */
+       if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+               goto nopage;
+
        for (z = zonelist->zones; *z; z++)
                wakeup_kswapd(*z, order);
 
@@ -2896,7 +2994,6 @@ void __init free_area_init(unsigned long *zones_size)
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int page_alloc_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
 {
@@ -2911,7 +3008,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init page_alloc_init(void)
 {
@@ -3215,7 +3311,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        /* allow the kernel cmdline to have a say */
        if (!numentries) {
                /* round applicable memory size up to nearest megabyte */
-               numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
+               numentries = nr_kernel_pages;
                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
                numentries >>= 20 - PAGE_SHIFT;
                numentries <<= 20 - PAGE_SHIFT;
@@ -3237,7 +3333,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        if (numentries > max)
                numentries = max;
 
-       log2qty = long_log2(numentries);
+       log2qty = ilog2(numentries);
 
        do {
                size = bucketsize << log2qty;
@@ -3259,7 +3355,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
               tablename,
               (1U << log2qty),
-              long_log2(size) - PAGE_SHIFT,
+              ilog2(size) - PAGE_SHIFT,
               size);
 
        if (_hash_shift)