[PATCH] Swap Migration V5: LRU operations
[powerpc.git] / mm / vmscan.c
index 64f9570..261a56e 100644 (file)
@@ -63,20 +63,14 @@ struct scan_control {
 
        unsigned long nr_mapped;        /* From page_state */
 
-       /* How many pages shrink_cache() should reclaim */
-       int nr_to_reclaim;
-
        /* Ask shrink_caches, or shrink_zone to scan at this priority */
        unsigned int priority;
 
        /* This context's GFP mask */
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
 
        int may_writepage;
 
-       /* Can pages be swapped as part of reclaim? */
-       int may_swap;
-
        /* This context's SWAP_CLUSTER_MAX. If freeing memory for
         * suspend, we effectively ignore SWAP_CLUSTER_MAX.
         * In this context, it doesn't matter that we scan the
@@ -186,8 +180,7 @@ EXPORT_SYMBOL(remove_shrinker);
  *
  * Returns the number of slab objects which we shrunk.
  */
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
-                       unsigned long lru_pages)
+int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages)
 {
        struct shrinker *shrinker;
        int ret = 0;
@@ -201,13 +194,25 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
+               unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
 
                delta = (4 * scanned) / shrinker->seeks;
-               delta *= (*shrinker->shrinker)(0, gfp_mask);
+               delta *= max_pass;
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
-               if (shrinker->nr < 0)
-                       shrinker->nr = LONG_MAX;        /* It wrapped! */
+               if (shrinker->nr < 0) {
+                       printk(KERN_ERR "%s: nr=%ld\n",
+                                       __FUNCTION__, shrinker->nr);
+                       shrinker->nr = max_pass;
+               }
+
+               /*
+                * Avoid risking looping forever due to too large nr value:
+                * never try to free more than twice the estimate number of
+                * freeable entries.
+                */
+               if (shrinker->nr > max_pass * 2)
+                       shrinker->nr = max_pass * 2;
 
                total_scan = shrinker->nr;
                shrinker->nr = 0;
@@ -355,7 +360,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                res = mapping->a_ops->writepage(page, &wbc);
                if (res < 0)
                        handle_write_error(mapping, page, res);
-               if (res == WRITEPAGE_ACTIVATE) {
+               if (res == AOP_WRITEPAGE_ACTIVATE) {
                        ClearPageReclaim(page);
                        return PAGE_ACTIVATE;
                }
@@ -407,7 +412,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                if (PageWriteback(page))
                        goto keep_locked;
 
-               referenced = page_referenced(page, 1, sc->priority <= 0);
+               referenced = page_referenced(page, 1);
                /* In active use or really unfreeable?  Activate it. */
                if (referenced && page_mapping_inuse(page))
                        goto activate_locked;
@@ -417,7 +422,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
                 */
-               if (PageAnon(page) && !PageSwapCache(page) && sc->may_swap) {
+               if (PageAnon(page) && !PageSwapCache(page)) {
                        if (!add_to_swap(page))
                                goto activate_locked;
                }
@@ -519,7 +524,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
 
 #ifdef CONFIG_SWAP
                if (PageSwapCache(page)) {
-                       swp_entry_t swap = { .val = page->private };
+                       swp_entry_t swap = { .val = page_private(page) };
                        __delete_from_swap_cache(page);
                        write_unlock_irq(&mapping->tree_lock);
                        swap_free(swap);
@@ -588,20 +593,18 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               if (!TestClearPageLRU(page))
-                       BUG();
-               list_del(&page->lru);
-               if (get_page_testone(page)) {
-                       /*
-                        * It is being freed elsewhere
-                        */
-                       __put_page(page);
-                       SetPageLRU(page);
-                       list_add(&page->lru, src);
-                       continue;
-               } else {
-                       list_add(&page->lru, dst);
+               switch (__isolate_lru_page(page)) {
+               case 1:
+                       /* Succeeded to isolate page */
+                       list_move(&page->lru, dst);
                        nr_taken++;
+                       break;
+               case -ENOENT:
+                       /* Not possible to isolate */
+                       list_move(&page->lru, src);
+                       break;
+               default:
+                       BUG();
                }
        }
 
@@ -609,6 +612,48 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
        return nr_taken;
 }
 
+static void lru_add_drain_per_cpu(void *dummy)
+{
+       lru_add_drain();
+}
+
+/*
+ * Isolate one page from the LRU lists and put it on the
+ * indicated list. Do necessary cache draining if the
+ * page is not on the LRU lists yet.
+ *
+ * Result:
+ *  0 = page not on LRU list
+ *  1 = page removed from LRU list and added to the specified list.
+ * -ENOENT = page is being freed elsewhere.
+ */
+int isolate_lru_page(struct page *page)
+{
+       int rc = 0;
+       struct zone *zone = page_zone(page);
+
+redo:
+       spin_lock_irq(&zone->lru_lock);
+       rc = __isolate_lru_page(page);
+       if (rc == 1) {
+               if (PageActive(page))
+                       del_page_from_active_list(zone, page);
+               else
+                       del_page_from_inactive_list(zone, page);
+       }
+       spin_unlock_irq(&zone->lru_lock);
+       if (rc == 0) {
+               /*
+                * Maybe this page is still waiting for a cpu to drain it
+                * from one of the lru lists?
+                */
+               rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+               if (rc == 0 && PageLRU(page))
+                       goto redo;
+       }
+       return rc;
+}
+
 /*
  * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
  */
@@ -639,17 +684,17 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
                        goto done;
 
                max_scan -= nr_scan;
-               if (current_is_kswapd())
-                       mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
-               else
-                       mod_page_state_zone(zone, pgscan_direct, nr_scan);
                nr_freed = shrink_list(&page_list, sc);
-               if (current_is_kswapd())
-                       mod_page_state(kswapd_steal, nr_freed);
-               mod_page_state_zone(zone, pgsteal, nr_freed);
-               sc->nr_to_reclaim -= nr_freed;
 
-               spin_lock_irq(&zone->lru_lock);
+               local_irq_disable();
+               if (current_is_kswapd()) {
+                       __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
+                       __mod_page_state(kswapd_steal, nr_freed);
+               } else
+                       __mod_page_state_zone(zone, pgscan_direct, nr_scan);
+               __mod_page_state_zone(zone, pgsteal, nr_freed);
+
+               spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
                 */
@@ -674,6 +719,40 @@ done:
        pagevec_release(&pvec);
 }
 
+static inline void move_to_lru(struct page *page)
+{
+       list_del(&page->lru);
+       if (PageActive(page)) {
+               /*
+                * lru_cache_add_active checks that
+                * the PG_active bit is off.
+                */
+               ClearPageActive(page);
+               lru_cache_add_active(page);
+       } else {
+               lru_cache_add(page);
+       }
+       put_page(page);
+}
+
+/*
+ * Add isolated pages on the list back to the LRU
+ *
+ * returns the number of pages put back.
+ */
+int putback_lru_pages(struct list_head *l)
+{
+       struct page *page;
+       struct page *page2;
+       int count = 0;
+
+       list_for_each_entry_safe(page, page2, l, lru) {
+               move_to_lru(page);
+               count++;
+       }
+       return count;
+}
+
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -754,7 +833,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                if (page_mapped(page)) {
                        if (!reclaim_mapped ||
                            (total_swap_pages == 0 && PageAnon(page)) ||
-                           page_referenced(page, 0, sc->priority <= 0)) {
+                           page_referenced(page, 0)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -811,11 +890,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                }
        }
        zone->nr_active += pgmoved;
-       spin_unlock_irq(&zone->lru_lock);
-       pagevec_release(&pvec);
+       spin_unlock(&zone->lru_lock);
 
-       mod_page_state_zone(zone, pgrefill, pgscanned);
-       mod_page_state(pgdeactivate, pgdeactivate);
+       __mod_page_state_zone(zone, pgrefill, pgscanned);
+       __mod_page_state(pgdeactivate, pgdeactivate);
+       local_irq_enable();
+
+       pagevec_release(&pvec);
 }
 
 /*
@@ -847,8 +928,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
        else
                nr_inactive = 0;
 
-       sc->nr_to_reclaim = sc->swap_cluster_max;
-
        while (nr_active || nr_inactive) {
                if (nr_active) {
                        sc->nr_to_scan = min(nr_active,
@@ -862,8 +941,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
                                        (unsigned long)sc->swap_cluster_max);
                        nr_inactive -= sc->nr_to_scan;
                        shrink_cache(zone, sc);
-                       if (sc->nr_to_reclaim <= 0)
-                               break;
                }
        }
 
@@ -896,7 +973,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
-               if (zone->present_pages == 0)
+               if (!populated_zone(zone))
                        continue;
 
                if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
@@ -926,7 +1003,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
+int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
 {
        int priority;
        int ret = 0;
@@ -938,7 +1015,6 @@ int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
 
        sc.gfp_mask = gfp_mask;
        sc.may_writepage = 0;
-       sc.may_swap = 1;
 
        inc_page_state(allocstall);
 
@@ -958,6 +1034,8 @@ int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
                sc.nr_reclaimed = 0;
                sc.priority = priority;
                sc.swap_cluster_max = SWAP_CLUSTER_MAX;
+               if (!priority)
+                       disable_swap_token();
                shrink_caches(zones, &sc);
                shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
                if (reclaim_state) {
@@ -1039,7 +1117,6 @@ loop_again:
        total_reclaimed = 0;
        sc.gfp_mask = GFP_KERNEL;
        sc.may_writepage = 0;
-       sc.may_swap = 1;
        sc.nr_mapped = read_page_state(nr_mapped);
 
        inc_page_state(pageoutrun);
@@ -1054,6 +1131,10 @@ loop_again:
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
                unsigned long lru_pages = 0;
 
+               /* The swap token gets in the way of swapout... */
+               if (!priority)
+                       disable_swap_token();
+
                all_zones_ok = 1;
 
                if (nr_pages == 0) {
@@ -1064,7 +1145,7 @@ loop_again:
                        for (i = pgdat->nr_zones - 1; i >= 0; i--) {
                                struct zone *zone = pgdat->node_zones + i;
 
-                               if (zone->present_pages == 0)
+                               if (!populated_zone(zone))
                                        continue;
 
                                if (zone->all_unreclaimable &&
@@ -1072,7 +1153,7 @@ loop_again:
                                        continue;
 
                                if (!zone_watermark_ok(zone, order,
-                                               zone->pages_high, 0, 0, 0)) {
+                                               zone->pages_high, 0, 0)) {
                                        end_zone = i;
                                        goto scan;
                                }
@@ -1101,7 +1182,7 @@ scan:
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
 
-                       if (zone->present_pages == 0)
+                       if (!populated_zone(zone))
                                continue;
 
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
@@ -1109,7 +1190,7 @@ scan:
 
                        if (nr_pages == 0) {    /* Not software suspend */
                                if (!zone_watermark_ok(zone, order,
-                                               zone->pages_high, end_zone, 0, 0))
+                                               zone->pages_high, end_zone, 0))
                                        all_zones_ok = 0;
                        }
                        zone->temp_priority = priority;
@@ -1253,11 +1334,11 @@ void wakeup_kswapd(struct zone *zone, int order)
 {
        pg_data_t *pgdat;
 
-       if (zone->present_pages == 0)
+       if (!populated_zone(zone))
                return;
 
        pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0, 0))
+       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
@@ -1333,75 +1414,3 @@ static int __init kswapd_init(void)
 }
 
 module_init(kswapd_init)
-
-
-/*
- * Try to free up some pages from this zone through reclaim.
- */
-int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
-{
-       struct scan_control sc;
-       int nr_pages = 1 << order;
-       int total_reclaimed = 0;
-
-       /* The reclaim may sleep, so don't do it if sleep isn't allowed */
-       if (!(gfp_mask & __GFP_WAIT))
-               return 0;
-       if (zone->all_unreclaimable)
-               return 0;
-
-       sc.gfp_mask = gfp_mask;
-       sc.may_writepage = 0;
-       sc.may_swap = 0;
-       sc.nr_mapped = read_page_state(nr_mapped);
-       sc.nr_scanned = 0;
-       sc.nr_reclaimed = 0;
-       /* scan at the highest priority */
-       sc.priority = 0;
-
-       if (nr_pages > SWAP_CLUSTER_MAX)
-               sc.swap_cluster_max = nr_pages;
-       else
-               sc.swap_cluster_max = SWAP_CLUSTER_MAX;
-
-       /* Don't reclaim the zone if there are other reclaimers active */
-       if (atomic_read(&zone->reclaim_in_progress) > 0)
-               goto out;
-
-       shrink_zone(zone, &sc);
-       total_reclaimed = sc.nr_reclaimed;
-
- out:
-       return total_reclaimed;
-}
-
-asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
-                                    unsigned int state)
-{
-       struct zone *z;
-       int i;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
-       if (node >= MAX_NUMNODES || !node_online(node))
-               return -EINVAL;
-
-       /* This will break if we ever add more zones */
-       if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
-               return -EINVAL;
-
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               if (!(zone & 1<<i))
-                       continue;
-
-               z = &NODE_DATA(node)->node_zones[i];
-
-               if (state)
-                       z->reclaim_pages = 1;
-               else
-                       z->reclaim_pages = 0;
-       }
-
-       return 0;
-}