X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fswap.c;h=2ed7be39795e3034986988502b75982698a1febb;hb=6e40e73097a5e4ad1b9f92fa21757343fdd6a682;hp=91b7e2026f696718d3a51b49d40dd660ea308047;hpb=674539115cc88473f623581e1d53c0e2ecef2179;p=powerpc.git diff --git a/mm/swap.c b/mm/swap.c index 91b7e2026f..2ed7be3979 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -34,13 +34,32 @@ /* How many pages do we try to swap or page in/out together? */ int page_cluster; +/* + * This path almost never happens for VM activity - pages are normally + * freed via pagevecs. But it gets used by networking. + */ +static void fastcall __page_cache_release(struct page *page) +{ + if (PageLRU(page)) { + unsigned long flags; + struct zone *zone = page_zone(page); + + spin_lock_irqsave(&zone->lru_lock, flags); + VM_BUG_ON(!PageLRU(page)); + __ClearPageLRU(page); + del_page_from_lru(zone, page); + spin_unlock_irqrestore(&zone->lru_lock, flags); + } + free_hot_page(page); +} + static void put_compound_page(struct page *page) { page = (struct page *)page_private(page); if (put_page_testzero(page)) { - void (*dtor)(struct page *page); + compound_page_dtor *dtor; - dtor = (void (*)(struct page *))page[1].lru.next; + dtor = get_compound_page_dtor(page); (*dtor)(page); } } @@ -54,6 +73,26 @@ void put_page(struct page *page) } EXPORT_SYMBOL(put_page); +/** + * put_pages_list(): release a list of pages + * + * Release a list of pages which are strung together on page.lru. Currently + * used by read_cache_pages() and related error recovery code. + * + * @pages: list of pages threaded on page->lru + */ +void put_pages_list(struct list_head *pages) +{ + while (!list_empty(pages)) { + struct page *victim; + + victim = list_entry(pages->prev, struct page, lru); + list_del(&victim->lru); + page_cache_release(victim); + } +} +EXPORT_SYMBOL(put_pages_list); + /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the @@ -86,9 +125,8 @@ int rotate_reclaimable_page(struct page *page) zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); if (PageLRU(page) && !PageActive(page)) { - list_del(&page->lru); - list_add_tail(&page->lru, &zone->inactive_list); - inc_page_state(pgrotated); + list_move_tail(&page->lru, &zone->inactive_list); + __count_vm_event(PGROTATED); } if (!test_clear_page_writeback(page)) BUG(); @@ -108,7 +146,7 @@ void fastcall activate_page(struct page *page) del_page_from_inactive_list(zone, page); SetPageActive(page); add_page_to_active_list(zone, page); - inc_page_state(pgactivate); + __count_vm_event(PGACTIVATE); } spin_unlock_irq(&zone->lru_lock); } @@ -178,7 +216,7 @@ void lru_add_drain(void) } #ifdef CONFIG_NUMA -static void lru_add_drain_per_cpu(void *dummy) +static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } @@ -188,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy) */ int lru_add_drain_all(void) { - return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); + return schedule_on_each_cpu(lru_add_drain_per_cpu); } #else @@ -203,26 +241,6 @@ int lru_add_drain_all(void) } #endif -/* - * This path almost never happens for VM activity - pages are normally - * freed via pagevecs. But it gets used by networking. - */ -void fastcall __page_cache_release(struct page *page) -{ - if (PageLRU(page)) { - unsigned long flags; - struct zone *zone = page_zone(page); - - spin_lock_irqsave(&zone->lru_lock, flags); - BUG_ON(!PageLRU(page)); - __ClearPageLRU(page); - del_page_from_lru(zone, page); - spin_unlock_irqrestore(&zone->lru_lock, flags); - } - free_hot_page(page); -} -EXPORT_SYMBOL(__page_cache_release); - /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and @@ -265,7 +283,7 @@ void release_pages(struct page **pages, int nr, int cold) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - BUG_ON(!PageLRU(page)); + VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); } @@ -318,7 +336,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec) for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - BUG_ON(PageLRU(page)); + VM_BUG_ON(PageLRU(page)); if (put_page_testzero(page)) pagevec_add(&pages_to_free, page); } @@ -345,7 +363,7 @@ void __pagevec_lru_add(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - BUG_ON(PageLRU(page)); + VM_BUG_ON(PageLRU(page)); SetPageLRU(page); add_page_to_inactive_list(zone, page); } @@ -372,9 +390,9 @@ void __pagevec_lru_add_active(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - BUG_ON(PageLRU(page)); + VM_BUG_ON(PageLRU(page)); SetPageLRU(page); - BUG_ON(PageActive(page)); + VM_BUG_ON(PageActive(page)); SetPageActive(page); add_page_to_active_list(zone, page); } @@ -480,48 +498,6 @@ static int cpu_swap_callback(struct notifier_block *nfb, #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ -#ifdef CONFIG_SMP -void percpu_counter_mod(struct percpu_counter *fbc, long amount) -{ - long count; - long *pcount; - int cpu = get_cpu(); - - pcount = per_cpu_ptr(fbc->counters, cpu); - count = *pcount + amount; - if (count >= FBC_BATCH || count <= -FBC_BATCH) { - spin_lock(&fbc->lock); - fbc->count += count; - *pcount = 0; - spin_unlock(&fbc->lock); - } else { - *pcount = count; - } - put_cpu(); -} -EXPORT_SYMBOL(percpu_counter_mod); - -/* - * Add up all the per-cpu counts, return the result. This is a more accurate - * but much slower version of percpu_counter_read_positive() - */ -long percpu_counter_sum(struct percpu_counter *fbc) -{ - long ret; - int cpu; - - spin_lock(&fbc->lock); - ret = fbc->count; - for_each_cpu(cpu) { - long *pcount = per_cpu_ptr(fbc->counters, cpu); - ret += *pcount; - } - spin_unlock(&fbc->lock); - return ret < 0 ? 0 : ret; -} -EXPORT_SYMBOL(percpu_counter_sum); -#endif - /* * Perform any setup for the swap system */ @@ -538,5 +514,7 @@ void __init swap_setup(void) * Right now other parts of the system means that we * _really_ don't want to cluster much more */ +#ifdef CONFIG_HOTPLUG_CPU hotcpu_notifier(cpu_swap_callback, 0); +#endif }