mm: migrate: make buffer_migrate_page_norefs() actually succeed
[linux] / mm / migrate.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/mmu_notifier.h>
46 #include <linux/page_idle.h>
47 #include <linux/page_owner.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ptrace.h>
50
51 #include <asm/tlbflush.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/migrate.h>
55
56 #include "internal.h"
57
58 /*
59  * migrate_prep() needs to be called before we start compiling a list of pages
60  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
61  * undesirable, use migrate_prep_local()
62  */
63 int migrate_prep(void)
64 {
65         /*
66          * Clear the LRU lists so pages can be isolated.
67          * Note that pages may be moved off the LRU after we have
68          * drained them. Those pages will fail to migrate like other
69          * pages that may be busy.
70          */
71         lru_add_drain_all();
72
73         return 0;
74 }
75
76 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
77 int migrate_prep_local(void)
78 {
79         lru_add_drain();
80
81         return 0;
82 }
83
84 int isolate_movable_page(struct page *page, isolate_mode_t mode)
85 {
86         struct address_space *mapping;
87
88         /*
89          * Avoid burning cycles with pages that are yet under __free_pages(),
90          * or just got freed under us.
91          *
92          * In case we 'win' a race for a movable page being freed under us and
93          * raise its refcount preventing __free_pages() from doing its job
94          * the put_page() at the end of this block will take care of
95          * release this page, thus avoiding a nasty leakage.
96          */
97         if (unlikely(!get_page_unless_zero(page)))
98                 goto out;
99
100         /*
101          * Check PageMovable before holding a PG_lock because page's owner
102          * assumes anybody doesn't touch PG_lock of newly allocated page
103          * so unconditionally grapping the lock ruins page's owner side.
104          */
105         if (unlikely(!__PageMovable(page)))
106                 goto out_putpage;
107         /*
108          * As movable pages are not isolated from LRU lists, concurrent
109          * compaction threads can race against page migration functions
110          * as well as race against the releasing a page.
111          *
112          * In order to avoid having an already isolated movable page
113          * being (wrongly) re-isolated while it is under migration,
114          * or to avoid attempting to isolate pages being released,
115          * lets be sure we have the page lock
116          * before proceeding with the movable page isolation steps.
117          */
118         if (unlikely(!trylock_page(page)))
119                 goto out_putpage;
120
121         if (!PageMovable(page) || PageIsolated(page))
122                 goto out_no_isolated;
123
124         mapping = page_mapping(page);
125         VM_BUG_ON_PAGE(!mapping, page);
126
127         if (!mapping->a_ops->isolate_page(page, mode))
128                 goto out_no_isolated;
129
130         /* Driver shouldn't use PG_isolated bit of page->flags */
131         WARN_ON_ONCE(PageIsolated(page));
132         __SetPageIsolated(page);
133         unlock_page(page);
134
135         return 0;
136
137 out_no_isolated:
138         unlock_page(page);
139 out_putpage:
140         put_page(page);
141 out:
142         return -EBUSY;
143 }
144
145 /* It should be called on page which is PG_movable */
146 void putback_movable_page(struct page *page)
147 {
148         struct address_space *mapping;
149
150         VM_BUG_ON_PAGE(!PageLocked(page), page);
151         VM_BUG_ON_PAGE(!PageMovable(page), page);
152         VM_BUG_ON_PAGE(!PageIsolated(page), page);
153
154         mapping = page_mapping(page);
155         mapping->a_ops->putback_page(page);
156         __ClearPageIsolated(page);
157 }
158
159 /*
160  * Put previously isolated pages back onto the appropriate lists
161  * from where they were once taken off for compaction/migration.
162  *
163  * This function shall be used whenever the isolated pageset has been
164  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
165  * and isolate_huge_page().
166  */
167 void putback_movable_pages(struct list_head *l)
168 {
169         struct page *page;
170         struct page *page2;
171
172         list_for_each_entry_safe(page, page2, l, lru) {
173                 if (unlikely(PageHuge(page))) {
174                         putback_active_hugepage(page);
175                         continue;
176                 }
177                 list_del(&page->lru);
178                 /*
179                  * We isolated non-lru movable page so here we can use
180                  * __PageMovable because LRU page's mapping cannot have
181                  * PAGE_MAPPING_MOVABLE.
182                  */
183                 if (unlikely(__PageMovable(page))) {
184                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
185                         lock_page(page);
186                         if (PageMovable(page))
187                                 putback_movable_page(page);
188                         else
189                                 __ClearPageIsolated(page);
190                         unlock_page(page);
191                         put_page(page);
192                 } else {
193                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
194                                         page_is_file_cache(page), -hpage_nr_pages(page));
195                         putback_lru_page(page);
196                 }
197         }
198 }
199
200 /*
201  * Restore a potential migration pte to a working pte entry
202  */
203 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
204                                  unsigned long addr, void *old)
205 {
206         struct page_vma_mapped_walk pvmw = {
207                 .page = old,
208                 .vma = vma,
209                 .address = addr,
210                 .flags = PVMW_SYNC | PVMW_MIGRATION,
211         };
212         struct page *new;
213         pte_t pte;
214         swp_entry_t entry;
215
216         VM_BUG_ON_PAGE(PageTail(page), page);
217         while (page_vma_mapped_walk(&pvmw)) {
218                 if (PageKsm(page))
219                         new = page;
220                 else
221                         new = page - pvmw.page->index +
222                                 linear_page_index(vma, pvmw.address);
223
224 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
225                 /* PMD-mapped THP migration entry */
226                 if (!pvmw.pte) {
227                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
228                         remove_migration_pmd(&pvmw, new);
229                         continue;
230                 }
231 #endif
232
233                 get_page(new);
234                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
235                 if (pte_swp_soft_dirty(*pvmw.pte))
236                         pte = pte_mksoft_dirty(pte);
237
238                 /*
239                  * Recheck VMA as permissions can change since migration started
240                  */
241                 entry = pte_to_swp_entry(*pvmw.pte);
242                 if (is_write_migration_entry(entry))
243                         pte = maybe_mkwrite(pte, vma);
244
245                 if (unlikely(is_zone_device_page(new))) {
246                         if (is_device_private_page(new)) {
247                                 entry = make_device_private_entry(new, pte_write(pte));
248                                 pte = swp_entry_to_pte(entry);
249                         } else if (is_device_public_page(new)) {
250                                 pte = pte_mkdevmap(pte);
251                                 flush_dcache_page(new);
252                         }
253                 } else
254                         flush_dcache_page(new);
255
256 #ifdef CONFIG_HUGETLB_PAGE
257                 if (PageHuge(new)) {
258                         pte = pte_mkhuge(pte);
259                         pte = arch_make_huge_pte(pte, vma, new, 0);
260                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
261                         if (PageAnon(new))
262                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
263                         else
264                                 page_dup_rmap(new, true);
265                 } else
266 #endif
267                 {
268                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
269
270                         if (PageAnon(new))
271                                 page_add_anon_rmap(new, vma, pvmw.address, false);
272                         else
273                                 page_add_file_rmap(new, false);
274                 }
275                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
276                         mlock_vma_page(new);
277
278                 if (PageTransHuge(page) && PageMlocked(page))
279                         clear_page_mlock(page);
280
281                 /* No need to invalidate - it was non-present before */
282                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
283         }
284
285         return true;
286 }
287
288 /*
289  * Get rid of all migration entries and replace them by
290  * references to the indicated page.
291  */
292 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
293 {
294         struct rmap_walk_control rwc = {
295                 .rmap_one = remove_migration_pte,
296                 .arg = old,
297         };
298
299         if (locked)
300                 rmap_walk_locked(new, &rwc);
301         else
302                 rmap_walk(new, &rwc);
303 }
304
305 /*
306  * Something used the pte of a page under migration. We need to
307  * get to the page and wait until migration is finished.
308  * When we return from this function the fault will be retried.
309  */
310 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
311                                 spinlock_t *ptl)
312 {
313         pte_t pte;
314         swp_entry_t entry;
315         struct page *page;
316
317         spin_lock(ptl);
318         pte = *ptep;
319         if (!is_swap_pte(pte))
320                 goto out;
321
322         entry = pte_to_swp_entry(pte);
323         if (!is_migration_entry(entry))
324                 goto out;
325
326         page = migration_entry_to_page(entry);
327
328         /*
329          * Once page cache replacement of page migration started, page_count
330          * is zero; but we must not call put_and_wait_on_page_locked() without
331          * a ref. Use get_page_unless_zero(), and just fault again if it fails.
332          */
333         if (!get_page_unless_zero(page))
334                 goto out;
335         pte_unmap_unlock(ptep, ptl);
336         put_and_wait_on_page_locked(page);
337         return;
338 out:
339         pte_unmap_unlock(ptep, ptl);
340 }
341
342 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
343                                 unsigned long address)
344 {
345         spinlock_t *ptl = pte_lockptr(mm, pmd);
346         pte_t *ptep = pte_offset_map(pmd, address);
347         __migration_entry_wait(mm, ptep, ptl);
348 }
349
350 void migration_entry_wait_huge(struct vm_area_struct *vma,
351                 struct mm_struct *mm, pte_t *pte)
352 {
353         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
354         __migration_entry_wait(mm, pte, ptl);
355 }
356
357 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
358 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
359 {
360         spinlock_t *ptl;
361         struct page *page;
362
363         ptl = pmd_lock(mm, pmd);
364         if (!is_pmd_migration_entry(*pmd))
365                 goto unlock;
366         page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
367         if (!get_page_unless_zero(page))
368                 goto unlock;
369         spin_unlock(ptl);
370         put_and_wait_on_page_locked(page);
371         return;
372 unlock:
373         spin_unlock(ptl);
374 }
375 #endif
376
377 static int expected_page_refs(struct page *page)
378 {
379         int expected_count = 1;
380
381         /*
382          * Device public or private pages have an extra refcount as they are
383          * ZONE_DEVICE pages.
384          */
385         expected_count += is_device_private_page(page);
386         expected_count += is_device_public_page(page);
387         if (page_mapping(page))
388                 expected_count += hpage_nr_pages(page) + page_has_private(page);
389
390         return expected_count;
391 }
392
393 /*
394  * Replace the page in the mapping.
395  *
396  * The number of remaining references must be:
397  * 1 for anonymous pages without a mapping
398  * 2 for pages with a mapping
399  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
400  */
401 int migrate_page_move_mapping(struct address_space *mapping,
402                 struct page *newpage, struct page *page, enum migrate_mode mode,
403                 int extra_count)
404 {
405         XA_STATE(xas, &mapping->i_pages, page_index(page));
406         struct zone *oldzone, *newzone;
407         int dirty;
408         int expected_count = expected_page_refs(page) + extra_count;
409
410         if (!mapping) {
411                 /* Anonymous page without mapping */
412                 if (page_count(page) != expected_count)
413                         return -EAGAIN;
414
415                 /* No turning back from here */
416                 newpage->index = page->index;
417                 newpage->mapping = page->mapping;
418                 if (PageSwapBacked(page))
419                         __SetPageSwapBacked(newpage);
420
421                 return MIGRATEPAGE_SUCCESS;
422         }
423
424         oldzone = page_zone(page);
425         newzone = page_zone(newpage);
426
427         xas_lock_irq(&xas);
428         if (page_count(page) != expected_count || xas_load(&xas) != page) {
429                 xas_unlock_irq(&xas);
430                 return -EAGAIN;
431         }
432
433         if (!page_ref_freeze(page, expected_count)) {
434                 xas_unlock_irq(&xas);
435                 return -EAGAIN;
436         }
437
438         /*
439          * Now we know that no one else is looking at the page:
440          * no turning back from here.
441          */
442         newpage->index = page->index;
443         newpage->mapping = page->mapping;
444         page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
445         if (PageSwapBacked(page)) {
446                 __SetPageSwapBacked(newpage);
447                 if (PageSwapCache(page)) {
448                         SetPageSwapCache(newpage);
449                         set_page_private(newpage, page_private(page));
450                 }
451         } else {
452                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
453         }
454
455         /* Move dirty while page refs frozen and newpage not yet exposed */
456         dirty = PageDirty(page);
457         if (dirty) {
458                 ClearPageDirty(page);
459                 SetPageDirty(newpage);
460         }
461
462         xas_store(&xas, newpage);
463         if (PageTransHuge(page)) {
464                 int i;
465
466                 for (i = 1; i < HPAGE_PMD_NR; i++) {
467                         xas_next(&xas);
468                         xas_store(&xas, newpage + i);
469                 }
470         }
471
472         /*
473          * Drop cache reference from old page by unfreezing
474          * to one less reference.
475          * We know this isn't the last reference.
476          */
477         page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
478
479         xas_unlock(&xas);
480         /* Leave irq disabled to prevent preemption while updating stats */
481
482         /*
483          * If moved to a different zone then also account
484          * the page for that zone. Other VM counters will be
485          * taken care of when we establish references to the
486          * new page and drop references to the old page.
487          *
488          * Note that anonymous pages are accounted for
489          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
490          * are mapped to swap space.
491          */
492         if (newzone != oldzone) {
493                 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
494                 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
495                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
496                         __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
497                         __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
498                 }
499                 if (dirty && mapping_cap_account_dirty(mapping)) {
500                         __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
501                         __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
502                         __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
503                         __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
504                 }
505         }
506         local_irq_enable();
507
508         return MIGRATEPAGE_SUCCESS;
509 }
510 EXPORT_SYMBOL(migrate_page_move_mapping);
511
512 /*
513  * The expected number of remaining references is the same as that
514  * of migrate_page_move_mapping().
515  */
516 int migrate_huge_page_move_mapping(struct address_space *mapping,
517                                    struct page *newpage, struct page *page)
518 {
519         XA_STATE(xas, &mapping->i_pages, page_index(page));
520         int expected_count;
521
522         xas_lock_irq(&xas);
523         expected_count = 2 + page_has_private(page);
524         if (page_count(page) != expected_count || xas_load(&xas) != page) {
525                 xas_unlock_irq(&xas);
526                 return -EAGAIN;
527         }
528
529         if (!page_ref_freeze(page, expected_count)) {
530                 xas_unlock_irq(&xas);
531                 return -EAGAIN;
532         }
533
534         newpage->index = page->index;
535         newpage->mapping = page->mapping;
536
537         get_page(newpage);
538
539         xas_store(&xas, newpage);
540
541         page_ref_unfreeze(page, expected_count - 1);
542
543         xas_unlock_irq(&xas);
544
545         return MIGRATEPAGE_SUCCESS;
546 }
547
548 /*
549  * Gigantic pages are so large that we do not guarantee that page++ pointer
550  * arithmetic will work across the entire page.  We need something more
551  * specialized.
552  */
553 static void __copy_gigantic_page(struct page *dst, struct page *src,
554                                 int nr_pages)
555 {
556         int i;
557         struct page *dst_base = dst;
558         struct page *src_base = src;
559
560         for (i = 0; i < nr_pages; ) {
561                 cond_resched();
562                 copy_highpage(dst, src);
563
564                 i++;
565                 dst = mem_map_next(dst, dst_base, i);
566                 src = mem_map_next(src, src_base, i);
567         }
568 }
569
570 static void copy_huge_page(struct page *dst, struct page *src)
571 {
572         int i;
573         int nr_pages;
574
575         if (PageHuge(src)) {
576                 /* hugetlbfs page */
577                 struct hstate *h = page_hstate(src);
578                 nr_pages = pages_per_huge_page(h);
579
580                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
581                         __copy_gigantic_page(dst, src, nr_pages);
582                         return;
583                 }
584         } else {
585                 /* thp page */
586                 BUG_ON(!PageTransHuge(src));
587                 nr_pages = hpage_nr_pages(src);
588         }
589
590         for (i = 0; i < nr_pages; i++) {
591                 cond_resched();
592                 copy_highpage(dst + i, src + i);
593         }
594 }
595
596 /*
597  * Copy the page to its new location
598  */
599 void migrate_page_states(struct page *newpage, struct page *page)
600 {
601         int cpupid;
602
603         if (PageError(page))
604                 SetPageError(newpage);
605         if (PageReferenced(page))
606                 SetPageReferenced(newpage);
607         if (PageUptodate(page))
608                 SetPageUptodate(newpage);
609         if (TestClearPageActive(page)) {
610                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
611                 SetPageActive(newpage);
612         } else if (TestClearPageUnevictable(page))
613                 SetPageUnevictable(newpage);
614         if (PageWorkingset(page))
615                 SetPageWorkingset(newpage);
616         if (PageChecked(page))
617                 SetPageChecked(newpage);
618         if (PageMappedToDisk(page))
619                 SetPageMappedToDisk(newpage);
620
621         /* Move dirty on pages not done by migrate_page_move_mapping() */
622         if (PageDirty(page))
623                 SetPageDirty(newpage);
624
625         if (page_is_young(page))
626                 set_page_young(newpage);
627         if (page_is_idle(page))
628                 set_page_idle(newpage);
629
630         /*
631          * Copy NUMA information to the new page, to prevent over-eager
632          * future migrations of this same page.
633          */
634         cpupid = page_cpupid_xchg_last(page, -1);
635         page_cpupid_xchg_last(newpage, cpupid);
636
637         ksm_migrate_page(newpage, page);
638         /*
639          * Please do not reorder this without considering how mm/ksm.c's
640          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
641          */
642         if (PageSwapCache(page))
643                 ClearPageSwapCache(page);
644         ClearPagePrivate(page);
645         set_page_private(page, 0);
646
647         /*
648          * If any waiters have accumulated on the new page then
649          * wake them up.
650          */
651         if (PageWriteback(newpage))
652                 end_page_writeback(newpage);
653
654         copy_page_owner(page, newpage);
655
656         mem_cgroup_migrate(page, newpage);
657 }
658 EXPORT_SYMBOL(migrate_page_states);
659
660 void migrate_page_copy(struct page *newpage, struct page *page)
661 {
662         if (PageHuge(page) || PageTransHuge(page))
663                 copy_huge_page(newpage, page);
664         else
665                 copy_highpage(newpage, page);
666
667         migrate_page_states(newpage, page);
668 }
669 EXPORT_SYMBOL(migrate_page_copy);
670
671 /************************************************************
672  *                    Migration functions
673  ***********************************************************/
674
675 /*
676  * Common logic to directly migrate a single LRU page suitable for
677  * pages that do not use PagePrivate/PagePrivate2.
678  *
679  * Pages are locked upon entry and exit.
680  */
681 int migrate_page(struct address_space *mapping,
682                 struct page *newpage, struct page *page,
683                 enum migrate_mode mode)
684 {
685         int rc;
686
687         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
688
689         rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
690
691         if (rc != MIGRATEPAGE_SUCCESS)
692                 return rc;
693
694         if (mode != MIGRATE_SYNC_NO_COPY)
695                 migrate_page_copy(newpage, page);
696         else
697                 migrate_page_states(newpage, page);
698         return MIGRATEPAGE_SUCCESS;
699 }
700 EXPORT_SYMBOL(migrate_page);
701
702 #ifdef CONFIG_BLOCK
703 /* Returns true if all buffers are successfully locked */
704 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
705                                                         enum migrate_mode mode)
706 {
707         struct buffer_head *bh = head;
708
709         /* Simple case, sync compaction */
710         if (mode != MIGRATE_ASYNC) {
711                 do {
712                         lock_buffer(bh);
713                         bh = bh->b_this_page;
714
715                 } while (bh != head);
716
717                 return true;
718         }
719
720         /* async case, we cannot block on lock_buffer so use trylock_buffer */
721         do {
722                 if (!trylock_buffer(bh)) {
723                         /*
724                          * We failed to lock the buffer and cannot stall in
725                          * async migration. Release the taken locks
726                          */
727                         struct buffer_head *failed_bh = bh;
728                         bh = head;
729                         while (bh != failed_bh) {
730                                 unlock_buffer(bh);
731                                 bh = bh->b_this_page;
732                         }
733                         return false;
734                 }
735
736                 bh = bh->b_this_page;
737         } while (bh != head);
738         return true;
739 }
740
741 static int __buffer_migrate_page(struct address_space *mapping,
742                 struct page *newpage, struct page *page, enum migrate_mode mode,
743                 bool check_refs)
744 {
745         struct buffer_head *bh, *head;
746         int rc;
747         int expected_count;
748
749         if (!page_has_buffers(page))
750                 return migrate_page(mapping, newpage, page, mode);
751
752         /* Check whether page does not have extra refs before we do more work */
753         expected_count = expected_page_refs(page);
754         if (page_count(page) != expected_count)
755                 return -EAGAIN;
756
757         head = page_buffers(page);
758         if (!buffer_migrate_lock_buffers(head, mode))
759                 return -EAGAIN;
760
761         if (check_refs) {
762                 bool busy;
763                 bool invalidated = false;
764
765 recheck_buffers:
766                 busy = false;
767                 spin_lock(&mapping->private_lock);
768                 bh = head;
769                 do {
770                         if (atomic_read(&bh->b_count)) {
771                                 busy = true;
772                                 break;
773                         }
774                         bh = bh->b_this_page;
775                 } while (bh != head);
776                 spin_unlock(&mapping->private_lock);
777                 if (busy) {
778                         if (invalidated) {
779                                 rc = -EAGAIN;
780                                 goto unlock_buffers;
781                         }
782                         invalidate_bh_lrus();
783                         invalidated = true;
784                         goto recheck_buffers;
785                 }
786         }
787
788         rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
789         if (rc != MIGRATEPAGE_SUCCESS)
790                 goto unlock_buffers;
791
792         ClearPagePrivate(page);
793         set_page_private(newpage, page_private(page));
794         set_page_private(page, 0);
795         put_page(page);
796         get_page(newpage);
797
798         bh = head;
799         do {
800                 set_bh_page(bh, newpage, bh_offset(bh));
801                 bh = bh->b_this_page;
802
803         } while (bh != head);
804
805         SetPagePrivate(newpage);
806
807         if (mode != MIGRATE_SYNC_NO_COPY)
808                 migrate_page_copy(newpage, page);
809         else
810                 migrate_page_states(newpage, page);
811
812         rc = MIGRATEPAGE_SUCCESS;
813 unlock_buffers:
814         bh = head;
815         do {
816                 unlock_buffer(bh);
817                 bh = bh->b_this_page;
818
819         } while (bh != head);
820
821         return rc;
822 }
823
824 /*
825  * Migration function for pages with buffers. This function can only be used
826  * if the underlying filesystem guarantees that no other references to "page"
827  * exist. For example attached buffer heads are accessed only under page lock.
828  */
829 int buffer_migrate_page(struct address_space *mapping,
830                 struct page *newpage, struct page *page, enum migrate_mode mode)
831 {
832         return __buffer_migrate_page(mapping, newpage, page, mode, false);
833 }
834 EXPORT_SYMBOL(buffer_migrate_page);
835
836 /*
837  * Same as above except that this variant is more careful and checks that there
838  * are also no buffer head references. This function is the right one for
839  * mappings where buffer heads are directly looked up and referenced (such as
840  * block device mappings).
841  */
842 int buffer_migrate_page_norefs(struct address_space *mapping,
843                 struct page *newpage, struct page *page, enum migrate_mode mode)
844 {
845         return __buffer_migrate_page(mapping, newpage, page, mode, true);
846 }
847 #endif
848
849 /*
850  * Writeback a page to clean the dirty state
851  */
852 static int writeout(struct address_space *mapping, struct page *page)
853 {
854         struct writeback_control wbc = {
855                 .sync_mode = WB_SYNC_NONE,
856                 .nr_to_write = 1,
857                 .range_start = 0,
858                 .range_end = LLONG_MAX,
859                 .for_reclaim = 1
860         };
861         int rc;
862
863         if (!mapping->a_ops->writepage)
864                 /* No write method for the address space */
865                 return -EINVAL;
866
867         if (!clear_page_dirty_for_io(page))
868                 /* Someone else already triggered a write */
869                 return -EAGAIN;
870
871         /*
872          * A dirty page may imply that the underlying filesystem has
873          * the page on some queue. So the page must be clean for
874          * migration. Writeout may mean we loose the lock and the
875          * page state is no longer what we checked for earlier.
876          * At this point we know that the migration attempt cannot
877          * be successful.
878          */
879         remove_migration_ptes(page, page, false);
880
881         rc = mapping->a_ops->writepage(page, &wbc);
882
883         if (rc != AOP_WRITEPAGE_ACTIVATE)
884                 /* unlocked. Relock */
885                 lock_page(page);
886
887         return (rc < 0) ? -EIO : -EAGAIN;
888 }
889
890 /*
891  * Default handling if a filesystem does not provide a migration function.
892  */
893 static int fallback_migrate_page(struct address_space *mapping,
894         struct page *newpage, struct page *page, enum migrate_mode mode)
895 {
896         if (PageDirty(page)) {
897                 /* Only writeback pages in full synchronous migration */
898                 switch (mode) {
899                 case MIGRATE_SYNC:
900                 case MIGRATE_SYNC_NO_COPY:
901                         break;
902                 default:
903                         return -EBUSY;
904                 }
905                 return writeout(mapping, page);
906         }
907
908         /*
909          * Buffers may be managed in a filesystem specific way.
910          * We must have no buffers or drop them.
911          */
912         if (page_has_private(page) &&
913             !try_to_release_page(page, GFP_KERNEL))
914                 return -EAGAIN;
915
916         return migrate_page(mapping, newpage, page, mode);
917 }
918
919 /*
920  * Move a page to a newly allocated page
921  * The page is locked and all ptes have been successfully removed.
922  *
923  * The new page will have replaced the old page if this function
924  * is successful.
925  *
926  * Return value:
927  *   < 0 - error code
928  *  MIGRATEPAGE_SUCCESS - success
929  */
930 static int move_to_new_page(struct page *newpage, struct page *page,
931                                 enum migrate_mode mode)
932 {
933         struct address_space *mapping;
934         int rc = -EAGAIN;
935         bool is_lru = !__PageMovable(page);
936
937         VM_BUG_ON_PAGE(!PageLocked(page), page);
938         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
939
940         mapping = page_mapping(page);
941
942         if (likely(is_lru)) {
943                 if (!mapping)
944                         rc = migrate_page(mapping, newpage, page, mode);
945                 else if (mapping->a_ops->migratepage)
946                         /*
947                          * Most pages have a mapping and most filesystems
948                          * provide a migratepage callback. Anonymous pages
949                          * are part of swap space which also has its own
950                          * migratepage callback. This is the most common path
951                          * for page migration.
952                          */
953                         rc = mapping->a_ops->migratepage(mapping, newpage,
954                                                         page, mode);
955                 else
956                         rc = fallback_migrate_page(mapping, newpage,
957                                                         page, mode);
958         } else {
959                 /*
960                  * In case of non-lru page, it could be released after
961                  * isolation step. In that case, we shouldn't try migration.
962                  */
963                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
964                 if (!PageMovable(page)) {
965                         rc = MIGRATEPAGE_SUCCESS;
966                         __ClearPageIsolated(page);
967                         goto out;
968                 }
969
970                 rc = mapping->a_ops->migratepage(mapping, newpage,
971                                                 page, mode);
972                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
973                         !PageIsolated(page));
974         }
975
976         /*
977          * When successful, old pagecache page->mapping must be cleared before
978          * page is freed; but stats require that PageAnon be left as PageAnon.
979          */
980         if (rc == MIGRATEPAGE_SUCCESS) {
981                 if (__PageMovable(page)) {
982                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
983
984                         /*
985                          * We clear PG_movable under page_lock so any compactor
986                          * cannot try to migrate this page.
987                          */
988                         __ClearPageIsolated(page);
989                 }
990
991                 /*
992                  * Anonymous and movable page->mapping will be cleard by
993                  * free_pages_prepare so don't reset it here for keeping
994                  * the type to work PageAnon, for example.
995                  */
996                 if (!PageMappingFlags(page))
997                         page->mapping = NULL;
998         }
999 out:
1000         return rc;
1001 }
1002
1003 static int __unmap_and_move(struct page *page, struct page *newpage,
1004                                 int force, enum migrate_mode mode)
1005 {
1006         int rc = -EAGAIN;
1007         int page_was_mapped = 0;
1008         struct anon_vma *anon_vma = NULL;
1009         bool is_lru = !__PageMovable(page);
1010
1011         if (!trylock_page(page)) {
1012                 if (!force || mode == MIGRATE_ASYNC)
1013                         goto out;
1014
1015                 /*
1016                  * It's not safe for direct compaction to call lock_page.
1017                  * For example, during page readahead pages are added locked
1018                  * to the LRU. Later, when the IO completes the pages are
1019                  * marked uptodate and unlocked. However, the queueing
1020                  * could be merging multiple pages for one bio (e.g.
1021                  * mpage_readpages). If an allocation happens for the
1022                  * second or third page, the process can end up locking
1023                  * the same page twice and deadlocking. Rather than
1024                  * trying to be clever about what pages can be locked,
1025                  * avoid the use of lock_page for direct compaction
1026                  * altogether.
1027                  */
1028                 if (current->flags & PF_MEMALLOC)
1029                         goto out;
1030
1031                 lock_page(page);
1032         }
1033
1034         if (PageWriteback(page)) {
1035                 /*
1036                  * Only in the case of a full synchronous migration is it
1037                  * necessary to wait for PageWriteback. In the async case,
1038                  * the retry loop is too short and in the sync-light case,
1039                  * the overhead of stalling is too much
1040                  */
1041                 switch (mode) {
1042                 case MIGRATE_SYNC:
1043                 case MIGRATE_SYNC_NO_COPY:
1044                         break;
1045                 default:
1046                         rc = -EBUSY;
1047                         goto out_unlock;
1048                 }
1049                 if (!force)
1050                         goto out_unlock;
1051                 wait_on_page_writeback(page);
1052         }
1053
1054         /*
1055          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1056          * we cannot notice that anon_vma is freed while we migrates a page.
1057          * This get_anon_vma() delays freeing anon_vma pointer until the end
1058          * of migration. File cache pages are no problem because of page_lock()
1059          * File Caches may use write_page() or lock_page() in migration, then,
1060          * just care Anon page here.
1061          *
1062          * Only page_get_anon_vma() understands the subtleties of
1063          * getting a hold on an anon_vma from outside one of its mms.
1064          * But if we cannot get anon_vma, then we won't need it anyway,
1065          * because that implies that the anon page is no longer mapped
1066          * (and cannot be remapped so long as we hold the page lock).
1067          */
1068         if (PageAnon(page) && !PageKsm(page))
1069                 anon_vma = page_get_anon_vma(page);
1070
1071         /*
1072          * Block others from accessing the new page when we get around to
1073          * establishing additional references. We are usually the only one
1074          * holding a reference to newpage at this point. We used to have a BUG
1075          * here if trylock_page(newpage) fails, but would like to allow for
1076          * cases where there might be a race with the previous use of newpage.
1077          * This is much like races on refcount of oldpage: just don't BUG().
1078          */
1079         if (unlikely(!trylock_page(newpage)))
1080                 goto out_unlock;
1081
1082         if (unlikely(!is_lru)) {
1083                 rc = move_to_new_page(newpage, page, mode);
1084                 goto out_unlock_both;
1085         }
1086
1087         /*
1088          * Corner case handling:
1089          * 1. When a new swap-cache page is read into, it is added to the LRU
1090          * and treated as swapcache but it has no rmap yet.
1091          * Calling try_to_unmap() against a page->mapping==NULL page will
1092          * trigger a BUG.  So handle it here.
1093          * 2. An orphaned page (see truncate_complete_page) might have
1094          * fs-private metadata. The page can be picked up due to memory
1095          * offlining.  Everywhere else except page reclaim, the page is
1096          * invisible to the vm, so the page can not be migrated.  So try to
1097          * free the metadata, so the page can be freed.
1098          */
1099         if (!page->mapping) {
1100                 VM_BUG_ON_PAGE(PageAnon(page), page);
1101                 if (page_has_private(page)) {
1102                         try_to_free_buffers(page);
1103                         goto out_unlock_both;
1104                 }
1105         } else if (page_mapped(page)) {
1106                 /* Establish migration ptes */
1107                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1108                                 page);
1109                 try_to_unmap(page,
1110                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1111                 page_was_mapped = 1;
1112         }
1113
1114         if (!page_mapped(page))
1115                 rc = move_to_new_page(newpage, page, mode);
1116
1117         if (page_was_mapped)
1118                 remove_migration_ptes(page,
1119                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1120
1121 out_unlock_both:
1122         unlock_page(newpage);
1123 out_unlock:
1124         /* Drop an anon_vma reference if we took one */
1125         if (anon_vma)
1126                 put_anon_vma(anon_vma);
1127         unlock_page(page);
1128 out:
1129         /*
1130          * If migration is successful, decrease refcount of the newpage
1131          * which will not free the page because new page owner increased
1132          * refcounter. As well, if it is LRU page, add the page to LRU
1133          * list in here.
1134          */
1135         if (rc == MIGRATEPAGE_SUCCESS) {
1136                 if (unlikely(__PageMovable(newpage)))
1137                         put_page(newpage);
1138                 else
1139                         putback_lru_page(newpage);
1140         }
1141
1142         return rc;
1143 }
1144
1145 /*
1146  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1147  * around it.
1148  */
1149 #if defined(CONFIG_ARM) && \
1150         defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
1151 #define ICE_noinline noinline
1152 #else
1153 #define ICE_noinline
1154 #endif
1155
1156 /*
1157  * Obtain the lock on page, remove all ptes and migrate the page
1158  * to the newly allocated page in newpage.
1159  */
1160 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1161                                    free_page_t put_new_page,
1162                                    unsigned long private, struct page *page,
1163                                    int force, enum migrate_mode mode,
1164                                    enum migrate_reason reason)
1165 {
1166         int rc = MIGRATEPAGE_SUCCESS;
1167         struct page *newpage;
1168
1169         if (!thp_migration_supported() && PageTransHuge(page))
1170                 return -ENOMEM;
1171
1172         newpage = get_new_page(page, private);
1173         if (!newpage)
1174                 return -ENOMEM;
1175
1176         if (page_count(page) == 1) {
1177                 /* page was freed from under us. So we are done. */
1178                 ClearPageActive(page);
1179                 ClearPageUnevictable(page);
1180                 if (unlikely(__PageMovable(page))) {
1181                         lock_page(page);
1182                         if (!PageMovable(page))
1183                                 __ClearPageIsolated(page);
1184                         unlock_page(page);
1185                 }
1186                 if (put_new_page)
1187                         put_new_page(newpage, private);
1188                 else
1189                         put_page(newpage);
1190                 goto out;
1191         }
1192
1193         rc = __unmap_and_move(page, newpage, force, mode);
1194         if (rc == MIGRATEPAGE_SUCCESS)
1195                 set_page_owner_migrate_reason(newpage, reason);
1196
1197 out:
1198         if (rc != -EAGAIN) {
1199                 /*
1200                  * A page that has been migrated has all references
1201                  * removed and will be freed. A page that has not been
1202                  * migrated will have kepts its references and be
1203                  * restored.
1204                  */
1205                 list_del(&page->lru);
1206
1207                 /*
1208                  * Compaction can migrate also non-LRU pages which are
1209                  * not accounted to NR_ISOLATED_*. They can be recognized
1210                  * as __PageMovable
1211                  */
1212                 if (likely(!__PageMovable(page)))
1213                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1214                                         page_is_file_cache(page), -hpage_nr_pages(page));
1215         }
1216
1217         /*
1218          * If migration is successful, releases reference grabbed during
1219          * isolation. Otherwise, restore the page to right list unless
1220          * we want to retry.
1221          */
1222         if (rc == MIGRATEPAGE_SUCCESS) {
1223                 put_page(page);
1224                 if (reason == MR_MEMORY_FAILURE) {
1225                         /*
1226                          * Set PG_HWPoison on just freed page
1227                          * intentionally. Although it's rather weird,
1228                          * it's how HWPoison flag works at the moment.
1229                          */
1230                         if (set_hwpoison_free_buddy_page(page))
1231                                 num_poisoned_pages_inc();
1232                 }
1233         } else {
1234                 if (rc != -EAGAIN) {
1235                         if (likely(!__PageMovable(page))) {
1236                                 putback_lru_page(page);
1237                                 goto put_new;
1238                         }
1239
1240                         lock_page(page);
1241                         if (PageMovable(page))
1242                                 putback_movable_page(page);
1243                         else
1244                                 __ClearPageIsolated(page);
1245                         unlock_page(page);
1246                         put_page(page);
1247                 }
1248 put_new:
1249                 if (put_new_page)
1250                         put_new_page(newpage, private);
1251                 else
1252                         put_page(newpage);
1253         }
1254
1255         return rc;
1256 }
1257
1258 /*
1259  * Counterpart of unmap_and_move_page() for hugepage migration.
1260  *
1261  * This function doesn't wait the completion of hugepage I/O
1262  * because there is no race between I/O and migration for hugepage.
1263  * Note that currently hugepage I/O occurs only in direct I/O
1264  * where no lock is held and PG_writeback is irrelevant,
1265  * and writeback status of all subpages are counted in the reference
1266  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1267  * under direct I/O, the reference of the head page is 512 and a bit more.)
1268  * This means that when we try to migrate hugepage whose subpages are
1269  * doing direct I/O, some references remain after try_to_unmap() and
1270  * hugepage migration fails without data corruption.
1271  *
1272  * There is also no race when direct I/O is issued on the page under migration,
1273  * because then pte is replaced with migration swap entry and direct I/O code
1274  * will wait in the page fault for migration to complete.
1275  */
1276 static int unmap_and_move_huge_page(new_page_t get_new_page,
1277                                 free_page_t put_new_page, unsigned long private,
1278                                 struct page *hpage, int force,
1279                                 enum migrate_mode mode, int reason)
1280 {
1281         int rc = -EAGAIN;
1282         int page_was_mapped = 0;
1283         struct page *new_hpage;
1284         struct anon_vma *anon_vma = NULL;
1285
1286         /*
1287          * Movability of hugepages depends on architectures and hugepage size.
1288          * This check is necessary because some callers of hugepage migration
1289          * like soft offline and memory hotremove don't walk through page
1290          * tables or check whether the hugepage is pmd-based or not before
1291          * kicking migration.
1292          */
1293         if (!hugepage_migration_supported(page_hstate(hpage))) {
1294                 putback_active_hugepage(hpage);
1295                 return -ENOSYS;
1296         }
1297
1298         new_hpage = get_new_page(hpage, private);
1299         if (!new_hpage)
1300                 return -ENOMEM;
1301
1302         if (!trylock_page(hpage)) {
1303                 if (!force)
1304                         goto out;
1305                 switch (mode) {
1306                 case MIGRATE_SYNC:
1307                 case MIGRATE_SYNC_NO_COPY:
1308                         break;
1309                 default:
1310                         goto out;
1311                 }
1312                 lock_page(hpage);
1313         }
1314
1315         if (PageAnon(hpage))
1316                 anon_vma = page_get_anon_vma(hpage);
1317
1318         if (unlikely(!trylock_page(new_hpage)))
1319                 goto put_anon;
1320
1321         if (page_mapped(hpage)) {
1322                 try_to_unmap(hpage,
1323                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1324                 page_was_mapped = 1;
1325         }
1326
1327         if (!page_mapped(hpage))
1328                 rc = move_to_new_page(new_hpage, hpage, mode);
1329
1330         if (page_was_mapped)
1331                 remove_migration_ptes(hpage,
1332                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1333
1334         unlock_page(new_hpage);
1335
1336 put_anon:
1337         if (anon_vma)
1338                 put_anon_vma(anon_vma);
1339
1340         if (rc == MIGRATEPAGE_SUCCESS) {
1341                 move_hugetlb_state(hpage, new_hpage, reason);
1342                 put_new_page = NULL;
1343         }
1344
1345         unlock_page(hpage);
1346 out:
1347         if (rc != -EAGAIN)
1348                 putback_active_hugepage(hpage);
1349
1350         /*
1351          * If migration was not successful and there's a freeing callback, use
1352          * it.  Otherwise, put_page() will drop the reference grabbed during
1353          * isolation.
1354          */
1355         if (put_new_page)
1356                 put_new_page(new_hpage, private);
1357         else
1358                 putback_active_hugepage(new_hpage);
1359
1360         return rc;
1361 }
1362
1363 /*
1364  * migrate_pages - migrate the pages specified in a list, to the free pages
1365  *                 supplied as the target for the page migration
1366  *
1367  * @from:               The list of pages to be migrated.
1368  * @get_new_page:       The function used to allocate free pages to be used
1369  *                      as the target of the page migration.
1370  * @put_new_page:       The function used to free target pages if migration
1371  *                      fails, or NULL if no special handling is necessary.
1372  * @private:            Private data to be passed on to get_new_page()
1373  * @mode:               The migration mode that specifies the constraints for
1374  *                      page migration, if any.
1375  * @reason:             The reason for page migration.
1376  *
1377  * The function returns after 10 attempts or if no pages are movable any more
1378  * because the list has become empty or no retryable pages exist any more.
1379  * The caller should call putback_movable_pages() to return pages to the LRU
1380  * or free list only if ret != 0.
1381  *
1382  * Returns the number of pages that were not migrated, or an error code.
1383  */
1384 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1385                 free_page_t put_new_page, unsigned long private,
1386                 enum migrate_mode mode, int reason)
1387 {
1388         int retry = 1;
1389         int nr_failed = 0;
1390         int nr_succeeded = 0;
1391         int pass = 0;
1392         struct page *page;
1393         struct page *page2;
1394         int swapwrite = current->flags & PF_SWAPWRITE;
1395         int rc;
1396
1397         if (!swapwrite)
1398                 current->flags |= PF_SWAPWRITE;
1399
1400         for(pass = 0; pass < 10 && retry; pass++) {
1401                 retry = 0;
1402
1403                 list_for_each_entry_safe(page, page2, from, lru) {
1404 retry:
1405                         cond_resched();
1406
1407                         if (PageHuge(page))
1408                                 rc = unmap_and_move_huge_page(get_new_page,
1409                                                 put_new_page, private, page,
1410                                                 pass > 2, mode, reason);
1411                         else
1412                                 rc = unmap_and_move(get_new_page, put_new_page,
1413                                                 private, page, pass > 2, mode,
1414                                                 reason);
1415
1416                         switch(rc) {
1417                         case -ENOMEM:
1418                                 /*
1419                                  * THP migration might be unsupported or the
1420                                  * allocation could've failed so we should
1421                                  * retry on the same page with the THP split
1422                                  * to base pages.
1423                                  *
1424                                  * Head page is retried immediately and tail
1425                                  * pages are added to the tail of the list so
1426                                  * we encounter them after the rest of the list
1427                                  * is processed.
1428                                  */
1429                                 if (PageTransHuge(page) && !PageHuge(page)) {
1430                                         lock_page(page);
1431                                         rc = split_huge_page_to_list(page, from);
1432                                         unlock_page(page);
1433                                         if (!rc) {
1434                                                 list_safe_reset_next(page, page2, lru);
1435                                                 goto retry;
1436                                         }
1437                                 }
1438                                 nr_failed++;
1439                                 goto out;
1440                         case -EAGAIN:
1441                                 retry++;
1442                                 break;
1443                         case MIGRATEPAGE_SUCCESS:
1444                                 nr_succeeded++;
1445                                 break;
1446                         default:
1447                                 /*
1448                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1449                                  * unlike -EAGAIN case, the failed page is
1450                                  * removed from migration page list and not
1451                                  * retried in the next outer loop.
1452                                  */
1453                                 nr_failed++;
1454                                 break;
1455                         }
1456                 }
1457         }
1458         nr_failed += retry;
1459         rc = nr_failed;
1460 out:
1461         if (nr_succeeded)
1462                 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1463         if (nr_failed)
1464                 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1465         trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1466
1467         if (!swapwrite)
1468                 current->flags &= ~PF_SWAPWRITE;
1469
1470         return rc;
1471 }
1472
1473 #ifdef CONFIG_NUMA
1474
1475 static int store_status(int __user *status, int start, int value, int nr)
1476 {
1477         while (nr-- > 0) {
1478                 if (put_user(value, status + start))
1479                         return -EFAULT;
1480                 start++;
1481         }
1482
1483         return 0;
1484 }
1485
1486 static int do_move_pages_to_node(struct mm_struct *mm,
1487                 struct list_head *pagelist, int node)
1488 {
1489         int err;
1490
1491         if (list_empty(pagelist))
1492                 return 0;
1493
1494         err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1495                         MIGRATE_SYNC, MR_SYSCALL);
1496         if (err)
1497                 putback_movable_pages(pagelist);
1498         return err;
1499 }
1500
1501 /*
1502  * Resolves the given address to a struct page, isolates it from the LRU and
1503  * puts it to the given pagelist.
1504  * Returns -errno if the page cannot be found/isolated or 0 when it has been
1505  * queued or the page doesn't need to be migrated because it is already on
1506  * the target node
1507  */
1508 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1509                 int node, struct list_head *pagelist, bool migrate_all)
1510 {
1511         struct vm_area_struct *vma;
1512         struct page *page;
1513         unsigned int follflags;
1514         int err;
1515
1516         down_read(&mm->mmap_sem);
1517         err = -EFAULT;
1518         vma = find_vma(mm, addr);
1519         if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1520                 goto out;
1521
1522         /* FOLL_DUMP to ignore special (like zero) pages */
1523         follflags = FOLL_GET | FOLL_DUMP;
1524         page = follow_page(vma, addr, follflags);
1525
1526         err = PTR_ERR(page);
1527         if (IS_ERR(page))
1528                 goto out;
1529
1530         err = -ENOENT;
1531         if (!page)
1532                 goto out;
1533
1534         err = 0;
1535         if (page_to_nid(page) == node)
1536                 goto out_putpage;
1537
1538         err = -EACCES;
1539         if (page_mapcount(page) > 1 && !migrate_all)
1540                 goto out_putpage;
1541
1542         if (PageHuge(page)) {
1543                 if (PageHead(page)) {
1544                         isolate_huge_page(page, pagelist);
1545                         err = 0;
1546                 }
1547         } else {
1548                 struct page *head;
1549
1550                 head = compound_head(page);
1551                 err = isolate_lru_page(head);
1552                 if (err)
1553                         goto out_putpage;
1554
1555                 err = 0;
1556                 list_add_tail(&head->lru, pagelist);
1557                 mod_node_page_state(page_pgdat(head),
1558                         NR_ISOLATED_ANON + page_is_file_cache(head),
1559                         hpage_nr_pages(head));
1560         }
1561 out_putpage:
1562         /*
1563          * Either remove the duplicate refcount from
1564          * isolate_lru_page() or drop the page ref if it was
1565          * not isolated.
1566          */
1567         put_page(page);
1568 out:
1569         up_read(&mm->mmap_sem);
1570         return err;
1571 }
1572
1573 /*
1574  * Migrate an array of page address onto an array of nodes and fill
1575  * the corresponding array of status.
1576  */
1577 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1578                          unsigned long nr_pages,
1579                          const void __user * __user *pages,
1580                          const int __user *nodes,
1581                          int __user *status, int flags)
1582 {
1583         int current_node = NUMA_NO_NODE;
1584         LIST_HEAD(pagelist);
1585         int start, i;
1586         int err = 0, err1;
1587
1588         migrate_prep();
1589
1590         for (i = start = 0; i < nr_pages; i++) {
1591                 const void __user *p;
1592                 unsigned long addr;
1593                 int node;
1594
1595                 err = -EFAULT;
1596                 if (get_user(p, pages + i))
1597                         goto out_flush;
1598                 if (get_user(node, nodes + i))
1599                         goto out_flush;
1600                 addr = (unsigned long)p;
1601
1602                 err = -ENODEV;
1603                 if (node < 0 || node >= MAX_NUMNODES)
1604                         goto out_flush;
1605                 if (!node_state(node, N_MEMORY))
1606                         goto out_flush;
1607
1608                 err = -EACCES;
1609                 if (!node_isset(node, task_nodes))
1610                         goto out_flush;
1611
1612                 if (current_node == NUMA_NO_NODE) {
1613                         current_node = node;
1614                         start = i;
1615                 } else if (node != current_node) {
1616                         err = do_move_pages_to_node(mm, &pagelist, current_node);
1617                         if (err)
1618                                 goto out;
1619                         err = store_status(status, start, current_node, i - start);
1620                         if (err)
1621                                 goto out;
1622                         start = i;
1623                         current_node = node;
1624                 }
1625
1626                 /*
1627                  * Errors in the page lookup or isolation are not fatal and we simply
1628                  * report them via status
1629                  */
1630                 err = add_page_for_migration(mm, addr, current_node,
1631                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
1632                 if (!err)
1633                         continue;
1634
1635                 err = store_status(status, i, err, 1);
1636                 if (err)
1637                         goto out_flush;
1638
1639                 err = do_move_pages_to_node(mm, &pagelist, current_node);
1640                 if (err)
1641                         goto out;
1642                 if (i > start) {
1643                         err = store_status(status, start, current_node, i - start);
1644                         if (err)
1645                                 goto out;
1646                 }
1647                 current_node = NUMA_NO_NODE;
1648         }
1649 out_flush:
1650         if (list_empty(&pagelist))
1651                 return err;
1652
1653         /* Make sure we do not overwrite the existing error */
1654         err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1655         if (!err1)
1656                 err1 = store_status(status, start, current_node, i - start);
1657         if (!err)
1658                 err = err1;
1659 out:
1660         return err;
1661 }
1662
1663 /*
1664  * Determine the nodes of an array of pages and store it in an array of status.
1665  */
1666 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1667                                 const void __user **pages, int *status)
1668 {
1669         unsigned long i;
1670
1671         down_read(&mm->mmap_sem);
1672
1673         for (i = 0; i < nr_pages; i++) {
1674                 unsigned long addr = (unsigned long)(*pages);
1675                 struct vm_area_struct *vma;
1676                 struct page *page;
1677                 int err = -EFAULT;
1678
1679                 vma = find_vma(mm, addr);
1680                 if (!vma || addr < vma->vm_start)
1681                         goto set_status;
1682
1683                 /* FOLL_DUMP to ignore special (like zero) pages */
1684                 page = follow_page(vma, addr, FOLL_DUMP);
1685
1686                 err = PTR_ERR(page);
1687                 if (IS_ERR(page))
1688                         goto set_status;
1689
1690                 err = page ? page_to_nid(page) : -ENOENT;
1691 set_status:
1692                 *status = err;
1693
1694                 pages++;
1695                 status++;
1696         }
1697
1698         up_read(&mm->mmap_sem);
1699 }
1700
1701 /*
1702  * Determine the nodes of a user array of pages and store it in
1703  * a user array of status.
1704  */
1705 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1706                          const void __user * __user *pages,
1707                          int __user *status)
1708 {
1709 #define DO_PAGES_STAT_CHUNK_NR 16
1710         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1711         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1712
1713         while (nr_pages) {
1714                 unsigned long chunk_nr;
1715
1716                 chunk_nr = nr_pages;
1717                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1718                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1719
1720                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1721                         break;
1722
1723                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1724
1725                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1726                         break;
1727
1728                 pages += chunk_nr;
1729                 status += chunk_nr;
1730                 nr_pages -= chunk_nr;
1731         }
1732         return nr_pages ? -EFAULT : 0;
1733 }
1734
1735 /*
1736  * Move a list of pages in the address space of the currently executing
1737  * process.
1738  */
1739 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1740                              const void __user * __user *pages,
1741                              const int __user *nodes,
1742                              int __user *status, int flags)
1743 {
1744         struct task_struct *task;
1745         struct mm_struct *mm;
1746         int err;
1747         nodemask_t task_nodes;
1748
1749         /* Check flags */
1750         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1751                 return -EINVAL;
1752
1753         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1754                 return -EPERM;
1755
1756         /* Find the mm_struct */
1757         rcu_read_lock();
1758         task = pid ? find_task_by_vpid(pid) : current;
1759         if (!task) {
1760                 rcu_read_unlock();
1761                 return -ESRCH;
1762         }
1763         get_task_struct(task);
1764
1765         /*
1766          * Check if this process has the right to modify the specified
1767          * process. Use the regular "ptrace_may_access()" checks.
1768          */
1769         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1770                 rcu_read_unlock();
1771                 err = -EPERM;
1772                 goto out;
1773         }
1774         rcu_read_unlock();
1775
1776         err = security_task_movememory(task);
1777         if (err)
1778                 goto out;
1779
1780         task_nodes = cpuset_mems_allowed(task);
1781         mm = get_task_mm(task);
1782         put_task_struct(task);
1783
1784         if (!mm)
1785                 return -EINVAL;
1786
1787         if (nodes)
1788                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1789                                     nodes, status, flags);
1790         else
1791                 err = do_pages_stat(mm, nr_pages, pages, status);
1792
1793         mmput(mm);
1794         return err;
1795
1796 out:
1797         put_task_struct(task);
1798         return err;
1799 }
1800
1801 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1802                 const void __user * __user *, pages,
1803                 const int __user *, nodes,
1804                 int __user *, status, int, flags)
1805 {
1806         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1807 }
1808
1809 #ifdef CONFIG_COMPAT
1810 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1811                        compat_uptr_t __user *, pages32,
1812                        const int __user *, nodes,
1813                        int __user *, status,
1814                        int, flags)
1815 {
1816         const void __user * __user *pages;
1817         int i;
1818
1819         pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1820         for (i = 0; i < nr_pages; i++) {
1821                 compat_uptr_t p;
1822
1823                 if (get_user(p, pages32 + i) ||
1824                         put_user(compat_ptr(p), pages + i))
1825                         return -EFAULT;
1826         }
1827         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1828 }
1829 #endif /* CONFIG_COMPAT */
1830
1831 #ifdef CONFIG_NUMA_BALANCING
1832 /*
1833  * Returns true if this is a safe migration target node for misplaced NUMA
1834  * pages. Currently it only checks the watermarks which crude
1835  */
1836 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1837                                    unsigned long nr_migrate_pages)
1838 {
1839         int z;
1840
1841         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1842                 struct zone *zone = pgdat->node_zones + z;
1843
1844                 if (!populated_zone(zone))
1845                         continue;
1846
1847                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1848                 if (!zone_watermark_ok(zone, 0,
1849                                        high_wmark_pages(zone) +
1850                                        nr_migrate_pages,
1851                                        0, 0))
1852                         continue;
1853                 return true;
1854         }
1855         return false;
1856 }
1857
1858 static struct page *alloc_misplaced_dst_page(struct page *page,
1859                                            unsigned long data)
1860 {
1861         int nid = (int) data;
1862         struct page *newpage;
1863
1864         newpage = __alloc_pages_node(nid,
1865                                          (GFP_HIGHUSER_MOVABLE |
1866                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
1867                                           __GFP_NORETRY | __GFP_NOWARN) &
1868                                          ~__GFP_RECLAIM, 0);
1869
1870         return newpage;
1871 }
1872
1873 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1874 {
1875         int page_lru;
1876
1877         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1878
1879         /* Avoid migrating to a node that is nearly full */
1880         if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1881                 return 0;
1882
1883         if (isolate_lru_page(page))
1884                 return 0;
1885
1886         /*
1887          * migrate_misplaced_transhuge_page() skips page migration's usual
1888          * check on page_count(), so we must do it here, now that the page
1889          * has been isolated: a GUP pin, or any other pin, prevents migration.
1890          * The expected page count is 3: 1 for page's mapcount and 1 for the
1891          * caller's pin and 1 for the reference taken by isolate_lru_page().
1892          */
1893         if (PageTransHuge(page) && page_count(page) != 3) {
1894                 putback_lru_page(page);
1895                 return 0;
1896         }
1897
1898         page_lru = page_is_file_cache(page);
1899         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1900                                 hpage_nr_pages(page));
1901
1902         /*
1903          * Isolating the page has taken another reference, so the
1904          * caller's reference can be safely dropped without the page
1905          * disappearing underneath us during migration.
1906          */
1907         put_page(page);
1908         return 1;
1909 }
1910
1911 bool pmd_trans_migrating(pmd_t pmd)
1912 {
1913         struct page *page = pmd_page(pmd);
1914         return PageLocked(page);
1915 }
1916
1917 /*
1918  * Attempt to migrate a misplaced page to the specified destination
1919  * node. Caller is expected to have an elevated reference count on
1920  * the page that will be dropped by this function before returning.
1921  */
1922 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1923                            int node)
1924 {
1925         pg_data_t *pgdat = NODE_DATA(node);
1926         int isolated;
1927         int nr_remaining;
1928         LIST_HEAD(migratepages);
1929
1930         /*
1931          * Don't migrate file pages that are mapped in multiple processes
1932          * with execute permissions as they are probably shared libraries.
1933          */
1934         if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1935             (vma->vm_flags & VM_EXEC))
1936                 goto out;
1937
1938         /*
1939          * Also do not migrate dirty pages as not all filesystems can move
1940          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1941          */
1942         if (page_is_file_cache(page) && PageDirty(page))
1943                 goto out;
1944
1945         isolated = numamigrate_isolate_page(pgdat, page);
1946         if (!isolated)
1947                 goto out;
1948
1949         list_add(&page->lru, &migratepages);
1950         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1951                                      NULL, node, MIGRATE_ASYNC,
1952                                      MR_NUMA_MISPLACED);
1953         if (nr_remaining) {
1954                 if (!list_empty(&migratepages)) {
1955                         list_del(&page->lru);
1956                         dec_node_page_state(page, NR_ISOLATED_ANON +
1957                                         page_is_file_cache(page));
1958                         putback_lru_page(page);
1959                 }
1960                 isolated = 0;
1961         } else
1962                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1963         BUG_ON(!list_empty(&migratepages));
1964         return isolated;
1965
1966 out:
1967         put_page(page);
1968         return 0;
1969 }
1970 #endif /* CONFIG_NUMA_BALANCING */
1971
1972 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1973 /*
1974  * Migrates a THP to a given target node. page must be locked and is unlocked
1975  * before returning.
1976  */
1977 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1978                                 struct vm_area_struct *vma,
1979                                 pmd_t *pmd, pmd_t entry,
1980                                 unsigned long address,
1981                                 struct page *page, int node)
1982 {
1983         spinlock_t *ptl;
1984         pg_data_t *pgdat = NODE_DATA(node);
1985         int isolated = 0;
1986         struct page *new_page = NULL;
1987         int page_lru = page_is_file_cache(page);
1988         unsigned long start = address & HPAGE_PMD_MASK;
1989
1990         new_page = alloc_pages_node(node,
1991                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1992                 HPAGE_PMD_ORDER);
1993         if (!new_page)
1994                 goto out_fail;
1995         prep_transhuge_page(new_page);
1996
1997         isolated = numamigrate_isolate_page(pgdat, page);
1998         if (!isolated) {
1999                 put_page(new_page);
2000                 goto out_fail;
2001         }
2002
2003         /* Prepare a page as a migration target */
2004         __SetPageLocked(new_page);
2005         if (PageSwapBacked(page))
2006                 __SetPageSwapBacked(new_page);
2007
2008         /* anon mapping, we can simply copy page->mapping to the new page: */
2009         new_page->mapping = page->mapping;
2010         new_page->index = page->index;
2011         /* flush the cache before copying using the kernel virtual address */
2012         flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2013         migrate_page_copy(new_page, page);
2014         WARN_ON(PageLRU(new_page));
2015
2016         /* Recheck the target PMD */
2017         ptl = pmd_lock(mm, pmd);
2018         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2019                 spin_unlock(ptl);
2020
2021                 /* Reverse changes made by migrate_page_copy() */
2022                 if (TestClearPageActive(new_page))
2023                         SetPageActive(page);
2024                 if (TestClearPageUnevictable(new_page))
2025                         SetPageUnevictable(page);
2026
2027                 unlock_page(new_page);
2028                 put_page(new_page);             /* Free it */
2029
2030                 /* Retake the callers reference and putback on LRU */
2031                 get_page(page);
2032                 putback_lru_page(page);
2033                 mod_node_page_state(page_pgdat(page),
2034                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2035
2036                 goto out_unlock;
2037         }
2038
2039         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2040         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2041
2042         /*
2043          * Overwrite the old entry under pagetable lock and establish
2044          * the new PTE. Any parallel GUP will either observe the old
2045          * page blocking on the page lock, block on the page table
2046          * lock or observe the new page. The SetPageUptodate on the
2047          * new page and page_add_new_anon_rmap guarantee the copy is
2048          * visible before the pagetable update.
2049          */
2050         page_add_anon_rmap(new_page, vma, start, true);
2051         /*
2052          * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2053          * has already been flushed globally.  So no TLB can be currently
2054          * caching this non present pmd mapping.  There's no need to clear the
2055          * pmd before doing set_pmd_at(), nor to flush the TLB after
2056          * set_pmd_at().  Clearing the pmd here would introduce a race
2057          * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2058          * mmap_sem for reading.  If the pmd is set to NULL at any given time,
2059          * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2060          * pmd.
2061          */
2062         set_pmd_at(mm, start, pmd, entry);
2063         update_mmu_cache_pmd(vma, address, &entry);
2064
2065         page_ref_unfreeze(page, 2);
2066         mlock_migrate_page(new_page, page);
2067         page_remove_rmap(page, true);
2068         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2069
2070         spin_unlock(ptl);
2071
2072         /* Take an "isolate" reference and put new page on the LRU. */
2073         get_page(new_page);
2074         putback_lru_page(new_page);
2075
2076         unlock_page(new_page);
2077         unlock_page(page);
2078         put_page(page);                 /* Drop the rmap reference */
2079         put_page(page);                 /* Drop the LRU isolation reference */
2080
2081         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2082         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2083
2084         mod_node_page_state(page_pgdat(page),
2085                         NR_ISOLATED_ANON + page_lru,
2086                         -HPAGE_PMD_NR);
2087         return isolated;
2088
2089 out_fail:
2090         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2091         ptl = pmd_lock(mm, pmd);
2092         if (pmd_same(*pmd, entry)) {
2093                 entry = pmd_modify(entry, vma->vm_page_prot);
2094                 set_pmd_at(mm, start, pmd, entry);
2095                 update_mmu_cache_pmd(vma, address, &entry);
2096         }
2097         spin_unlock(ptl);
2098
2099 out_unlock:
2100         unlock_page(page);
2101         put_page(page);
2102         return 0;
2103 }
2104 #endif /* CONFIG_NUMA_BALANCING */
2105
2106 #endif /* CONFIG_NUMA */
2107
2108 #if defined(CONFIG_MIGRATE_VMA_HELPER)
2109 struct migrate_vma {
2110         struct vm_area_struct   *vma;
2111         unsigned long           *dst;
2112         unsigned long           *src;
2113         unsigned long           cpages;
2114         unsigned long           npages;
2115         unsigned long           start;
2116         unsigned long           end;
2117 };
2118
2119 static int migrate_vma_collect_hole(unsigned long start,
2120                                     unsigned long end,
2121                                     struct mm_walk *walk)
2122 {
2123         struct migrate_vma *migrate = walk->private;
2124         unsigned long addr;
2125
2126         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2127                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2128                 migrate->dst[migrate->npages] = 0;
2129                 migrate->npages++;
2130                 migrate->cpages++;
2131         }
2132
2133         return 0;
2134 }
2135
2136 static int migrate_vma_collect_skip(unsigned long start,
2137                                     unsigned long end,
2138                                     struct mm_walk *walk)
2139 {
2140         struct migrate_vma *migrate = walk->private;
2141         unsigned long addr;
2142
2143         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2144                 migrate->dst[migrate->npages] = 0;
2145                 migrate->src[migrate->npages++] = 0;
2146         }
2147
2148         return 0;
2149 }
2150
2151 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2152                                    unsigned long start,
2153                                    unsigned long end,
2154                                    struct mm_walk *walk)
2155 {
2156         struct migrate_vma *migrate = walk->private;
2157         struct vm_area_struct *vma = walk->vma;
2158         struct mm_struct *mm = vma->vm_mm;
2159         unsigned long addr = start, unmapped = 0;
2160         spinlock_t *ptl;
2161         pte_t *ptep;
2162
2163 again:
2164         if (pmd_none(*pmdp))
2165                 return migrate_vma_collect_hole(start, end, walk);
2166
2167         if (pmd_trans_huge(*pmdp)) {
2168                 struct page *page;
2169
2170                 ptl = pmd_lock(mm, pmdp);
2171                 if (unlikely(!pmd_trans_huge(*pmdp))) {
2172                         spin_unlock(ptl);
2173                         goto again;
2174                 }
2175
2176                 page = pmd_page(*pmdp);
2177                 if (is_huge_zero_page(page)) {
2178                         spin_unlock(ptl);
2179                         split_huge_pmd(vma, pmdp, addr);
2180                         if (pmd_trans_unstable(pmdp))
2181                                 return migrate_vma_collect_skip(start, end,
2182                                                                 walk);
2183                 } else {
2184                         int ret;
2185
2186                         get_page(page);
2187                         spin_unlock(ptl);
2188                         if (unlikely(!trylock_page(page)))
2189                                 return migrate_vma_collect_skip(start, end,
2190                                                                 walk);
2191                         ret = split_huge_page(page);
2192                         unlock_page(page);
2193                         put_page(page);
2194                         if (ret)
2195                                 return migrate_vma_collect_skip(start, end,
2196                                                                 walk);
2197                         if (pmd_none(*pmdp))
2198                                 return migrate_vma_collect_hole(start, end,
2199                                                                 walk);
2200                 }
2201         }
2202
2203         if (unlikely(pmd_bad(*pmdp)))
2204                 return migrate_vma_collect_skip(start, end, walk);
2205
2206         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2207         arch_enter_lazy_mmu_mode();
2208
2209         for (; addr < end; addr += PAGE_SIZE, ptep++) {
2210                 unsigned long mpfn, pfn;
2211                 struct page *page;
2212                 swp_entry_t entry;
2213                 pte_t pte;
2214
2215                 pte = *ptep;
2216                 pfn = pte_pfn(pte);
2217
2218                 if (pte_none(pte)) {
2219                         mpfn = MIGRATE_PFN_MIGRATE;
2220                         migrate->cpages++;
2221                         pfn = 0;
2222                         goto next;
2223                 }
2224
2225                 if (!pte_present(pte)) {
2226                         mpfn = pfn = 0;
2227
2228                         /*
2229                          * Only care about unaddressable device page special
2230                          * page table entry. Other special swap entries are not
2231                          * migratable, and we ignore regular swapped page.
2232                          */
2233                         entry = pte_to_swp_entry(pte);
2234                         if (!is_device_private_entry(entry))
2235                                 goto next;
2236
2237                         page = device_private_entry_to_page(entry);
2238                         mpfn = migrate_pfn(page_to_pfn(page))|
2239                                 MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2240                         if (is_write_device_private_entry(entry))
2241                                 mpfn |= MIGRATE_PFN_WRITE;
2242                 } else {
2243                         if (is_zero_pfn(pfn)) {
2244                                 mpfn = MIGRATE_PFN_MIGRATE;
2245                                 migrate->cpages++;
2246                                 pfn = 0;
2247                                 goto next;
2248                         }
2249                         page = _vm_normal_page(migrate->vma, addr, pte, true);
2250                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2251                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2252                 }
2253
2254                 /* FIXME support THP */
2255                 if (!page || !page->mapping || PageTransCompound(page)) {
2256                         mpfn = pfn = 0;
2257                         goto next;
2258                 }
2259                 pfn = page_to_pfn(page);
2260
2261                 /*
2262                  * By getting a reference on the page we pin it and that blocks
2263                  * any kind of migration. Side effect is that it "freezes" the
2264                  * pte.
2265                  *
2266                  * We drop this reference after isolating the page from the lru
2267                  * for non device page (device page are not on the lru and thus
2268                  * can't be dropped from it).
2269                  */
2270                 get_page(page);
2271                 migrate->cpages++;
2272
2273                 /*
2274                  * Optimize for the common case where page is only mapped once
2275                  * in one process. If we can lock the page, then we can safely
2276                  * set up a special migration page table entry now.
2277                  */
2278                 if (trylock_page(page)) {
2279                         pte_t swp_pte;
2280
2281                         mpfn |= MIGRATE_PFN_LOCKED;
2282                         ptep_get_and_clear(mm, addr, ptep);
2283
2284                         /* Setup special migration page table entry */
2285                         entry = make_migration_entry(page, mpfn &
2286                                                      MIGRATE_PFN_WRITE);
2287                         swp_pte = swp_entry_to_pte(entry);
2288                         if (pte_soft_dirty(pte))
2289                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2290                         set_pte_at(mm, addr, ptep, swp_pte);
2291
2292                         /*
2293                          * This is like regular unmap: we remove the rmap and
2294                          * drop page refcount. Page won't be freed, as we took
2295                          * a reference just above.
2296                          */
2297                         page_remove_rmap(page, false);
2298                         put_page(page);
2299
2300                         if (pte_present(pte))
2301                                 unmapped++;
2302                 }
2303
2304 next:
2305                 migrate->dst[migrate->npages] = 0;
2306                 migrate->src[migrate->npages++] = mpfn;
2307         }
2308         arch_leave_lazy_mmu_mode();
2309         pte_unmap_unlock(ptep - 1, ptl);
2310
2311         /* Only flush the TLB if we actually modified any entries */
2312         if (unmapped)
2313                 flush_tlb_range(walk->vma, start, end);
2314
2315         return 0;
2316 }
2317
2318 /*
2319  * migrate_vma_collect() - collect pages over a range of virtual addresses
2320  * @migrate: migrate struct containing all migration information
2321  *
2322  * This will walk the CPU page table. For each virtual address backed by a
2323  * valid page, it updates the src array and takes a reference on the page, in
2324  * order to pin the page until we lock it and unmap it.
2325  */
2326 static void migrate_vma_collect(struct migrate_vma *migrate)
2327 {
2328         struct mmu_notifier_range range;
2329         struct mm_walk mm_walk;
2330
2331         mm_walk.pmd_entry = migrate_vma_collect_pmd;
2332         mm_walk.pte_entry = NULL;
2333         mm_walk.pte_hole = migrate_vma_collect_hole;
2334         mm_walk.hugetlb_entry = NULL;
2335         mm_walk.test_walk = NULL;
2336         mm_walk.vma = migrate->vma;
2337         mm_walk.mm = migrate->vma->vm_mm;
2338         mm_walk.private = migrate;
2339
2340         mmu_notifier_range_init(&range, mm_walk.mm, migrate->start,
2341                                 migrate->end);
2342         mmu_notifier_invalidate_range_start(&range);
2343         walk_page_range(migrate->start, migrate->end, &mm_walk);
2344         mmu_notifier_invalidate_range_end(&range);
2345
2346         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2347 }
2348
2349 /*
2350  * migrate_vma_check_page() - check if page is pinned or not
2351  * @page: struct page to check
2352  *
2353  * Pinned pages cannot be migrated. This is the same test as in
2354  * migrate_page_move_mapping(), except that here we allow migration of a
2355  * ZONE_DEVICE page.
2356  */
2357 static bool migrate_vma_check_page(struct page *page)
2358 {
2359         /*
2360          * One extra ref because caller holds an extra reference, either from
2361          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2362          * a device page.
2363          */
2364         int extra = 1;
2365
2366         /*
2367          * FIXME support THP (transparent huge page), it is bit more complex to
2368          * check them than regular pages, because they can be mapped with a pmd
2369          * or with a pte (split pte mapping).
2370          */
2371         if (PageCompound(page))
2372                 return false;
2373
2374         /* Page from ZONE_DEVICE have one extra reference */
2375         if (is_zone_device_page(page)) {
2376                 /*
2377                  * Private page can never be pin as they have no valid pte and
2378                  * GUP will fail for those. Yet if there is a pending migration
2379                  * a thread might try to wait on the pte migration entry and
2380                  * will bump the page reference count. Sadly there is no way to
2381                  * differentiate a regular pin from migration wait. Hence to
2382                  * avoid 2 racing thread trying to migrate back to CPU to enter
2383                  * infinite loop (one stoping migration because the other is
2384                  * waiting on pte migration entry). We always return true here.
2385                  *
2386                  * FIXME proper solution is to rework migration_entry_wait() so
2387                  * it does not need to take a reference on page.
2388                  */
2389                 if (is_device_private_page(page))
2390                         return true;
2391
2392                 /*
2393                  * Only allow device public page to be migrated and account for
2394                  * the extra reference count imply by ZONE_DEVICE pages.
2395                  */
2396                 if (!is_device_public_page(page))
2397                         return false;
2398                 extra++;
2399         }
2400
2401         /* For file back page */
2402         if (page_mapping(page))
2403                 extra += 1 + page_has_private(page);
2404
2405         if ((page_count(page) - extra) > page_mapcount(page))
2406                 return false;
2407
2408         return true;
2409 }
2410
2411 /*
2412  * migrate_vma_prepare() - lock pages and isolate them from the lru
2413  * @migrate: migrate struct containing all migration information
2414  *
2415  * This locks pages that have been collected by migrate_vma_collect(). Once each
2416  * page is locked it is isolated from the lru (for non-device pages). Finally,
2417  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2418  * migrated by concurrent kernel threads.
2419  */
2420 static void migrate_vma_prepare(struct migrate_vma *migrate)
2421 {
2422         const unsigned long npages = migrate->npages;
2423         const unsigned long start = migrate->start;
2424         unsigned long addr, i, restore = 0;
2425         bool allow_drain = true;
2426
2427         lru_add_drain();
2428
2429         for (i = 0; (i < npages) && migrate->cpages; i++) {
2430                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2431                 bool remap = true;
2432
2433                 if (!page)
2434                         continue;
2435
2436                 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2437                         /*
2438                          * Because we are migrating several pages there can be
2439                          * a deadlock between 2 concurrent migration where each
2440                          * are waiting on each other page lock.
2441                          *
2442                          * Make migrate_vma() a best effort thing and backoff
2443                          * for any page we can not lock right away.
2444                          */
2445                         if (!trylock_page(page)) {
2446                                 migrate->src[i] = 0;
2447                                 migrate->cpages--;
2448                                 put_page(page);
2449                                 continue;
2450                         }
2451                         remap = false;
2452                         migrate->src[i] |= MIGRATE_PFN_LOCKED;
2453                 }
2454
2455                 /* ZONE_DEVICE pages are not on LRU */
2456                 if (!is_zone_device_page(page)) {
2457                         if (!PageLRU(page) && allow_drain) {
2458                                 /* Drain CPU's pagevec */
2459                                 lru_add_drain_all();
2460                                 allow_drain = false;
2461                         }
2462
2463                         if (isolate_lru_page(page)) {
2464                                 if (remap) {
2465                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2466                                         migrate->cpages--;
2467                                         restore++;
2468                                 } else {
2469                                         migrate->src[i] = 0;
2470                                         unlock_page(page);
2471                                         migrate->cpages--;
2472                                         put_page(page);
2473                                 }
2474                                 continue;
2475                         }
2476
2477                         /* Drop the reference we took in collect */
2478                         put_page(page);
2479                 }
2480
2481                 if (!migrate_vma_check_page(page)) {
2482                         if (remap) {
2483                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2484                                 migrate->cpages--;
2485                                 restore++;
2486
2487                                 if (!is_zone_device_page(page)) {
2488                                         get_page(page);
2489                                         putback_lru_page(page);
2490                                 }
2491                         } else {
2492                                 migrate->src[i] = 0;
2493                                 unlock_page(page);
2494                                 migrate->cpages--;
2495
2496                                 if (!is_zone_device_page(page))
2497                                         putback_lru_page(page);
2498                                 else
2499                                         put_page(page);
2500                         }
2501                 }
2502         }
2503
2504         for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2505                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2506
2507                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2508                         continue;
2509
2510                 remove_migration_pte(page, migrate->vma, addr, page);
2511
2512                 migrate->src[i] = 0;
2513                 unlock_page(page);
2514                 put_page(page);
2515                 restore--;
2516         }
2517 }
2518
2519 /*
2520  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2521  * @migrate: migrate struct containing all migration information
2522  *
2523  * Replace page mapping (CPU page table pte) with a special migration pte entry
2524  * and check again if it has been pinned. Pinned pages are restored because we
2525  * cannot migrate them.
2526  *
2527  * This is the last step before we call the device driver callback to allocate
2528  * destination memory and copy contents of original page over to new page.
2529  */
2530 static void migrate_vma_unmap(struct migrate_vma *migrate)
2531 {
2532         int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2533         const unsigned long npages = migrate->npages;
2534         const unsigned long start = migrate->start;
2535         unsigned long addr, i, restore = 0;
2536
2537         for (i = 0; i < npages; i++) {
2538                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2539
2540                 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2541                         continue;
2542
2543                 if (page_mapped(page)) {
2544                         try_to_unmap(page, flags);
2545                         if (page_mapped(page))
2546                                 goto restore;
2547                 }
2548
2549                 if (migrate_vma_check_page(page))
2550                         continue;
2551
2552 restore:
2553                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2554                 migrate->cpages--;
2555                 restore++;
2556         }
2557
2558         for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2559                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2560
2561                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2562                         continue;
2563
2564                 remove_migration_ptes(page, page, false);
2565
2566                 migrate->src[i] = 0;
2567                 unlock_page(page);
2568                 restore--;
2569
2570                 if (is_zone_device_page(page))
2571                         put_page(page);
2572                 else
2573                         putback_lru_page(page);
2574         }
2575 }
2576
2577 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2578                                     unsigned long addr,
2579                                     struct page *page,
2580                                     unsigned long *src,
2581                                     unsigned long *dst)
2582 {
2583         struct vm_area_struct *vma = migrate->vma;
2584         struct mm_struct *mm = vma->vm_mm;
2585         struct mem_cgroup *memcg;
2586         bool flush = false;
2587         spinlock_t *ptl;
2588         pte_t entry;
2589         pgd_t *pgdp;
2590         p4d_t *p4dp;
2591         pud_t *pudp;
2592         pmd_t *pmdp;
2593         pte_t *ptep;
2594
2595         /* Only allow populating anonymous memory */
2596         if (!vma_is_anonymous(vma))
2597                 goto abort;
2598
2599         pgdp = pgd_offset(mm, addr);
2600         p4dp = p4d_alloc(mm, pgdp, addr);
2601         if (!p4dp)
2602                 goto abort;
2603         pudp = pud_alloc(mm, p4dp, addr);
2604         if (!pudp)
2605                 goto abort;
2606         pmdp = pmd_alloc(mm, pudp, addr);
2607         if (!pmdp)
2608                 goto abort;
2609
2610         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2611                 goto abort;
2612
2613         /*
2614          * Use pte_alloc() instead of pte_alloc_map().  We can't run
2615          * pte_offset_map() on pmds where a huge pmd might be created
2616          * from a different thread.
2617          *
2618          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2619          * parallel threads are excluded by other means.
2620          *
2621          * Here we only have down_read(mmap_sem).
2622          */
2623         if (pte_alloc(mm, pmdp))
2624                 goto abort;
2625
2626         /* See the comment in pte_alloc_one_map() */
2627         if (unlikely(pmd_trans_unstable(pmdp)))
2628                 goto abort;
2629
2630         if (unlikely(anon_vma_prepare(vma)))
2631                 goto abort;
2632         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2633                 goto abort;
2634
2635         /*
2636          * The memory barrier inside __SetPageUptodate makes sure that
2637          * preceding stores to the page contents become visible before
2638          * the set_pte_at() write.
2639          */
2640         __SetPageUptodate(page);
2641
2642         if (is_zone_device_page(page)) {
2643                 if (is_device_private_page(page)) {
2644                         swp_entry_t swp_entry;
2645
2646                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2647                         entry = swp_entry_to_pte(swp_entry);
2648                 } else if (is_device_public_page(page)) {
2649                         entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
2650                         if (vma->vm_flags & VM_WRITE)
2651                                 entry = pte_mkwrite(pte_mkdirty(entry));
2652                         entry = pte_mkdevmap(entry);
2653                 }
2654         } else {
2655                 entry = mk_pte(page, vma->vm_page_prot);
2656                 if (vma->vm_flags & VM_WRITE)
2657                         entry = pte_mkwrite(pte_mkdirty(entry));
2658         }
2659
2660         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2661
2662         if (pte_present(*ptep)) {
2663                 unsigned long pfn = pte_pfn(*ptep);
2664
2665                 if (!is_zero_pfn(pfn)) {
2666                         pte_unmap_unlock(ptep, ptl);
2667                         mem_cgroup_cancel_charge(page, memcg, false);
2668                         goto abort;
2669                 }
2670                 flush = true;
2671         } else if (!pte_none(*ptep)) {
2672                 pte_unmap_unlock(ptep, ptl);
2673                 mem_cgroup_cancel_charge(page, memcg, false);
2674                 goto abort;
2675         }
2676
2677         /*
2678          * Check for usefaultfd but do not deliver the fault. Instead,
2679          * just back off.
2680          */
2681         if (userfaultfd_missing(vma)) {
2682                 pte_unmap_unlock(ptep, ptl);
2683                 mem_cgroup_cancel_charge(page, memcg, false);
2684                 goto abort;
2685         }
2686
2687         inc_mm_counter(mm, MM_ANONPAGES);
2688         page_add_new_anon_rmap(page, vma, addr, false);
2689         mem_cgroup_commit_charge(page, memcg, false, false);
2690         if (!is_zone_device_page(page))
2691                 lru_cache_add_active_or_unevictable(page, vma);
2692         get_page(page);
2693
2694         if (flush) {
2695                 flush_cache_page(vma, addr, pte_pfn(*ptep));
2696                 ptep_clear_flush_notify(vma, addr, ptep);
2697                 set_pte_at_notify(mm, addr, ptep, entry);
2698                 update_mmu_cache(vma, addr, ptep);
2699         } else {
2700                 /* No need to invalidate - it was non-present before */
2701                 set_pte_at(mm, addr, ptep, entry);
2702                 update_mmu_cache(vma, addr, ptep);
2703         }
2704
2705         pte_unmap_unlock(ptep, ptl);
2706         *src = MIGRATE_PFN_MIGRATE;
2707         return;
2708
2709 abort:
2710         *src &= ~MIGRATE_PFN_MIGRATE;
2711 }
2712
2713 /*
2714  * migrate_vma_pages() - migrate meta-data from src page to dst page
2715  * @migrate: migrate struct containing all migration information
2716  *
2717  * This migrates struct page meta-data from source struct page to destination
2718  * struct page. This effectively finishes the migration from source page to the
2719  * destination page.
2720  */
2721 static void migrate_vma_pages(struct migrate_vma *migrate)
2722 {
2723         const unsigned long npages = migrate->npages;
2724         const unsigned long start = migrate->start;
2725         struct mmu_notifier_range range;
2726         unsigned long addr, i;
2727         bool notified = false;
2728
2729         for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2730                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2731                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2732                 struct address_space *mapping;
2733                 int r;
2734
2735                 if (!newpage) {
2736                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2737                         continue;
2738                 }
2739
2740                 if (!page) {
2741                         if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2742                                 continue;
2743                         }
2744                         if (!notified) {
2745                                 notified = true;
2746
2747                                 mmu_notifier_range_init(&range,
2748                                                         migrate->vma->vm_mm,
2749                                                         addr, migrate->end);
2750                                 mmu_notifier_invalidate_range_start(&range);
2751                         }
2752                         migrate_vma_insert_page(migrate, addr, newpage,
2753                                                 &migrate->src[i],
2754                                                 &migrate->dst[i]);
2755                         continue;
2756                 }
2757
2758                 mapping = page_mapping(page);
2759
2760                 if (is_zone_device_page(newpage)) {
2761                         if (is_device_private_page(newpage)) {
2762                                 /*
2763                                  * For now only support private anonymous when
2764                                  * migrating to un-addressable device memory.
2765                                  */
2766                                 if (mapping) {
2767                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2768                                         continue;
2769                                 }
2770                         } else if (!is_device_public_page(newpage)) {
2771                                 /*
2772                                  * Other types of ZONE_DEVICE page are not
2773                                  * supported.
2774                                  */
2775                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2776                                 continue;
2777                         }
2778                 }
2779
2780                 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2781                 if (r != MIGRATEPAGE_SUCCESS)
2782                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2783         }
2784
2785         /*
2786          * No need to double call mmu_notifier->invalidate_range() callback as
2787          * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2788          * did already call it.
2789          */
2790         if (notified)
2791                 mmu_notifier_invalidate_range_only_end(&range);
2792 }
2793
2794 /*
2795  * migrate_vma_finalize() - restore CPU page table entry
2796  * @migrate: migrate struct containing all migration information
2797  *
2798  * This replaces the special migration pte entry with either a mapping to the
2799  * new page if migration was successful for that page, or to the original page
2800  * otherwise.
2801  *
2802  * This also unlocks the pages and puts them back on the lru, or drops the extra
2803  * refcount, for device pages.
2804  */
2805 static void migrate_vma_finalize(struct migrate_vma *migrate)
2806 {
2807         const unsigned long npages = migrate->npages;
2808         unsigned long i;
2809
2810         for (i = 0; i < npages; i++) {
2811                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2812                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2813
2814                 if (!page) {
2815                         if (newpage) {
2816                                 unlock_page(newpage);
2817                                 put_page(newpage);
2818                         }
2819                         continue;
2820                 }
2821
2822                 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2823                         if (newpage) {
2824                                 unlock_page(newpage);
2825                                 put_page(newpage);
2826                         }
2827                         newpage = page;
2828                 }
2829
2830                 remove_migration_ptes(page, newpage, false);
2831                 unlock_page(page);
2832                 migrate->cpages--;
2833
2834                 if (is_zone_device_page(page))
2835                         put_page(page);
2836                 else
2837                         putback_lru_page(page);
2838
2839                 if (newpage != page) {
2840                         unlock_page(newpage);
2841                         if (is_zone_device_page(newpage))
2842                                 put_page(newpage);
2843                         else
2844                                 putback_lru_page(newpage);
2845                 }
2846         }
2847 }
2848
2849 /*
2850  * migrate_vma() - migrate a range of memory inside vma
2851  *
2852  * @ops: migration callback for allocating destination memory and copying
2853  * @vma: virtual memory area containing the range to be migrated
2854  * @start: start address of the range to migrate (inclusive)
2855  * @end: end address of the range to migrate (exclusive)
2856  * @src: array of hmm_pfn_t containing source pfns
2857  * @dst: array of hmm_pfn_t containing destination pfns
2858  * @private: pointer passed back to each of the callback
2859  * Returns: 0 on success, error code otherwise
2860  *
2861  * This function tries to migrate a range of memory virtual address range, using
2862  * callbacks to allocate and copy memory from source to destination. First it
2863  * collects all the pages backing each virtual address in the range, saving this
2864  * inside the src array. Then it locks those pages and unmaps them. Once the pages
2865  * are locked and unmapped, it checks whether each page is pinned or not. Pages
2866  * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2867  * in the corresponding src array entry. It then restores any pages that are
2868  * pinned, by remapping and unlocking those pages.
2869  *
2870  * At this point it calls the alloc_and_copy() callback. For documentation on
2871  * what is expected from that callback, see struct migrate_vma_ops comments in
2872  * include/linux/migrate.h
2873  *
2874  * After the alloc_and_copy() callback, this function goes over each entry in
2875  * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2876  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2877  * then the function tries to migrate struct page information from the source
2878  * struct page to the destination struct page. If it fails to migrate the struct
2879  * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2880  * array.
2881  *
2882  * At this point all successfully migrated pages have an entry in the src
2883  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2884  * array entry with MIGRATE_PFN_VALID flag set.
2885  *
2886  * It then calls the finalize_and_map() callback. See comments for "struct
2887  * migrate_vma_ops", in include/linux/migrate.h for details about
2888  * finalize_and_map() behavior.
2889  *
2890  * After the finalize_and_map() callback, for successfully migrated pages, this
2891  * function updates the CPU page table to point to new pages, otherwise it
2892  * restores the CPU page table to point to the original source pages.
2893  *
2894  * Function returns 0 after the above steps, even if no pages were migrated
2895  * (The function only returns an error if any of the arguments are invalid.)
2896  *
2897  * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2898  * unsigned long entries.
2899  */
2900 int migrate_vma(const struct migrate_vma_ops *ops,
2901                 struct vm_area_struct *vma,
2902                 unsigned long start,
2903                 unsigned long end,
2904                 unsigned long *src,
2905                 unsigned long *dst,
2906                 void *private)
2907 {
2908         struct migrate_vma migrate;
2909
2910         /* Sanity check the arguments */
2911         start &= PAGE_MASK;
2912         end &= PAGE_MASK;
2913         if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
2914                         vma_is_dax(vma))
2915                 return -EINVAL;
2916         if (start < vma->vm_start || start >= vma->vm_end)
2917                 return -EINVAL;
2918         if (end <= vma->vm_start || end > vma->vm_end)
2919                 return -EINVAL;
2920         if (!ops || !src || !dst || start >= end)
2921                 return -EINVAL;
2922
2923         memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2924         migrate.src = src;
2925         migrate.dst = dst;
2926         migrate.start = start;
2927         migrate.npages = 0;
2928         migrate.cpages = 0;
2929         migrate.end = end;
2930         migrate.vma = vma;
2931
2932         /* Collect, and try to unmap source pages */
2933         migrate_vma_collect(&migrate);
2934         if (!migrate.cpages)
2935                 return 0;
2936
2937         /* Lock and isolate page */
2938         migrate_vma_prepare(&migrate);
2939         if (!migrate.cpages)
2940                 return 0;
2941
2942         /* Unmap pages */
2943         migrate_vma_unmap(&migrate);
2944         if (!migrate.cpages)
2945                 return 0;
2946
2947         /*
2948          * At this point pages are locked and unmapped, and thus they have
2949          * stable content and can safely be copied to destination memory that
2950          * is allocated by the callback.
2951          *
2952          * Note that migration can fail in migrate_vma_struct_page() for each
2953          * individual page.
2954          */
2955         ops->alloc_and_copy(vma, src, dst, start, end, private);
2956
2957         /* This does the real migration of struct page */
2958         migrate_vma_pages(&migrate);
2959
2960         ops->finalize_and_map(vma, src, dst, start, end, private);
2961
2962         /* Unlock and remap pages */
2963         migrate_vma_finalize(&migrate);
2964
2965         return 0;
2966 }
2967 EXPORT_SYMBOL(migrate_vma);
2968 #endif /* defined(MIGRATE_VMA_HELPER) */