xref: /openbmc/linux/mm/migrate.c (revision d6e0cbb1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/mmu_notifier.h>
46 #include <linux/page_idle.h>
47 #include <linux/page_owner.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ptrace.h>
50 
51 #include <asm/tlbflush.h>
52 
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/migrate.h>
55 
56 #include "internal.h"
57 
58 /*
59  * migrate_prep() needs to be called before we start compiling a list of pages
60  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
61  * undesirable, use migrate_prep_local()
62  */
63 int migrate_prep(void)
64 {
65 	/*
66 	 * Clear the LRU lists so pages can be isolated.
67 	 * Note that pages may be moved off the LRU after we have
68 	 * drained them. Those pages will fail to migrate like other
69 	 * pages that may be busy.
70 	 */
71 	lru_add_drain_all();
72 
73 	return 0;
74 }
75 
76 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
77 int migrate_prep_local(void)
78 {
79 	lru_add_drain();
80 
81 	return 0;
82 }
83 
84 int isolate_movable_page(struct page *page, isolate_mode_t mode)
85 {
86 	struct address_space *mapping;
87 
88 	/*
89 	 * Avoid burning cycles with pages that are yet under __free_pages(),
90 	 * or just got freed under us.
91 	 *
92 	 * In case we 'win' a race for a movable page being freed under us and
93 	 * raise its refcount preventing __free_pages() from doing its job
94 	 * the put_page() at the end of this block will take care of
95 	 * release this page, thus avoiding a nasty leakage.
96 	 */
97 	if (unlikely(!get_page_unless_zero(page)))
98 		goto out;
99 
100 	/*
101 	 * Check PageMovable before holding a PG_lock because page's owner
102 	 * assumes anybody doesn't touch PG_lock of newly allocated page
103 	 * so unconditionally grabbing the lock ruins page's owner side.
104 	 */
105 	if (unlikely(!__PageMovable(page)))
106 		goto out_putpage;
107 	/*
108 	 * As movable pages are not isolated from LRU lists, concurrent
109 	 * compaction threads can race against page migration functions
110 	 * as well as race against the releasing a page.
111 	 *
112 	 * In order to avoid having an already isolated movable page
113 	 * being (wrongly) re-isolated while it is under migration,
114 	 * or to avoid attempting to isolate pages being released,
115 	 * lets be sure we have the page lock
116 	 * before proceeding with the movable page isolation steps.
117 	 */
118 	if (unlikely(!trylock_page(page)))
119 		goto out_putpage;
120 
121 	if (!PageMovable(page) || PageIsolated(page))
122 		goto out_no_isolated;
123 
124 	mapping = page_mapping(page);
125 	VM_BUG_ON_PAGE(!mapping, page);
126 
127 	if (!mapping->a_ops->isolate_page(page, mode))
128 		goto out_no_isolated;
129 
130 	/* Driver shouldn't use PG_isolated bit of page->flags */
131 	WARN_ON_ONCE(PageIsolated(page));
132 	__SetPageIsolated(page);
133 	unlock_page(page);
134 
135 	return 0;
136 
137 out_no_isolated:
138 	unlock_page(page);
139 out_putpage:
140 	put_page(page);
141 out:
142 	return -EBUSY;
143 }
144 
145 /* It should be called on page which is PG_movable */
146 void putback_movable_page(struct page *page)
147 {
148 	struct address_space *mapping;
149 
150 	VM_BUG_ON_PAGE(!PageLocked(page), page);
151 	VM_BUG_ON_PAGE(!PageMovable(page), page);
152 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
153 
154 	mapping = page_mapping(page);
155 	mapping->a_ops->putback_page(page);
156 	__ClearPageIsolated(page);
157 }
158 
159 /*
160  * Put previously isolated pages back onto the appropriate lists
161  * from where they were once taken off for compaction/migration.
162  *
163  * This function shall be used whenever the isolated pageset has been
164  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
165  * and isolate_huge_page().
166  */
167 void putback_movable_pages(struct list_head *l)
168 {
169 	struct page *page;
170 	struct page *page2;
171 
172 	list_for_each_entry_safe(page, page2, l, lru) {
173 		if (unlikely(PageHuge(page))) {
174 			putback_active_hugepage(page);
175 			continue;
176 		}
177 		list_del(&page->lru);
178 		/*
179 		 * We isolated non-lru movable page so here we can use
180 		 * __PageMovable because LRU page's mapping cannot have
181 		 * PAGE_MAPPING_MOVABLE.
182 		 */
183 		if (unlikely(__PageMovable(page))) {
184 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
185 			lock_page(page);
186 			if (PageMovable(page))
187 				putback_movable_page(page);
188 			else
189 				__ClearPageIsolated(page);
190 			unlock_page(page);
191 			put_page(page);
192 		} else {
193 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
194 					page_is_file_cache(page), -hpage_nr_pages(page));
195 			putback_lru_page(page);
196 		}
197 	}
198 }
199 
200 /*
201  * Restore a potential migration pte to a working pte entry
202  */
203 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
204 				 unsigned long addr, void *old)
205 {
206 	struct page_vma_mapped_walk pvmw = {
207 		.page = old,
208 		.vma = vma,
209 		.address = addr,
210 		.flags = PVMW_SYNC | PVMW_MIGRATION,
211 	};
212 	struct page *new;
213 	pte_t pte;
214 	swp_entry_t entry;
215 
216 	VM_BUG_ON_PAGE(PageTail(page), page);
217 	while (page_vma_mapped_walk(&pvmw)) {
218 		if (PageKsm(page))
219 			new = page;
220 		else
221 			new = page - pvmw.page->index +
222 				linear_page_index(vma, pvmw.address);
223 
224 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
225 		/* PMD-mapped THP migration entry */
226 		if (!pvmw.pte) {
227 			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
228 			remove_migration_pmd(&pvmw, new);
229 			continue;
230 		}
231 #endif
232 
233 		get_page(new);
234 		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
235 		if (pte_swp_soft_dirty(*pvmw.pte))
236 			pte = pte_mksoft_dirty(pte);
237 
238 		/*
239 		 * Recheck VMA as permissions can change since migration started
240 		 */
241 		entry = pte_to_swp_entry(*pvmw.pte);
242 		if (is_write_migration_entry(entry))
243 			pte = maybe_mkwrite(pte, vma);
244 
245 		if (unlikely(is_zone_device_page(new))) {
246 			if (is_device_private_page(new)) {
247 				entry = make_device_private_entry(new, pte_write(pte));
248 				pte = swp_entry_to_pte(entry);
249 			}
250 		}
251 
252 #ifdef CONFIG_HUGETLB_PAGE
253 		if (PageHuge(new)) {
254 			pte = pte_mkhuge(pte);
255 			pte = arch_make_huge_pte(pte, vma, new, 0);
256 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
257 			if (PageAnon(new))
258 				hugepage_add_anon_rmap(new, vma, pvmw.address);
259 			else
260 				page_dup_rmap(new, true);
261 		} else
262 #endif
263 		{
264 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
265 
266 			if (PageAnon(new))
267 				page_add_anon_rmap(new, vma, pvmw.address, false);
268 			else
269 				page_add_file_rmap(new, false);
270 		}
271 		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
272 			mlock_vma_page(new);
273 
274 		if (PageTransHuge(page) && PageMlocked(page))
275 			clear_page_mlock(page);
276 
277 		/* No need to invalidate - it was non-present before */
278 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
279 	}
280 
281 	return true;
282 }
283 
284 /*
285  * Get rid of all migration entries and replace them by
286  * references to the indicated page.
287  */
288 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
289 {
290 	struct rmap_walk_control rwc = {
291 		.rmap_one = remove_migration_pte,
292 		.arg = old,
293 	};
294 
295 	if (locked)
296 		rmap_walk_locked(new, &rwc);
297 	else
298 		rmap_walk(new, &rwc);
299 }
300 
301 /*
302  * Something used the pte of a page under migration. We need to
303  * get to the page and wait until migration is finished.
304  * When we return from this function the fault will be retried.
305  */
306 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
307 				spinlock_t *ptl)
308 {
309 	pte_t pte;
310 	swp_entry_t entry;
311 	struct page *page;
312 
313 	spin_lock(ptl);
314 	pte = *ptep;
315 	if (!is_swap_pte(pte))
316 		goto out;
317 
318 	entry = pte_to_swp_entry(pte);
319 	if (!is_migration_entry(entry))
320 		goto out;
321 
322 	page = migration_entry_to_page(entry);
323 
324 	/*
325 	 * Once page cache replacement of page migration started, page_count
326 	 * is zero; but we must not call put_and_wait_on_page_locked() without
327 	 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
328 	 */
329 	if (!get_page_unless_zero(page))
330 		goto out;
331 	pte_unmap_unlock(ptep, ptl);
332 	put_and_wait_on_page_locked(page);
333 	return;
334 out:
335 	pte_unmap_unlock(ptep, ptl);
336 }
337 
338 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
339 				unsigned long address)
340 {
341 	spinlock_t *ptl = pte_lockptr(mm, pmd);
342 	pte_t *ptep = pte_offset_map(pmd, address);
343 	__migration_entry_wait(mm, ptep, ptl);
344 }
345 
346 void migration_entry_wait_huge(struct vm_area_struct *vma,
347 		struct mm_struct *mm, pte_t *pte)
348 {
349 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
350 	__migration_entry_wait(mm, pte, ptl);
351 }
352 
353 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
354 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
355 {
356 	spinlock_t *ptl;
357 	struct page *page;
358 
359 	ptl = pmd_lock(mm, pmd);
360 	if (!is_pmd_migration_entry(*pmd))
361 		goto unlock;
362 	page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
363 	if (!get_page_unless_zero(page))
364 		goto unlock;
365 	spin_unlock(ptl);
366 	put_and_wait_on_page_locked(page);
367 	return;
368 unlock:
369 	spin_unlock(ptl);
370 }
371 #endif
372 
373 static int expected_page_refs(struct address_space *mapping, struct page *page)
374 {
375 	int expected_count = 1;
376 
377 	/*
378 	 * Device public or private pages have an extra refcount as they are
379 	 * ZONE_DEVICE pages.
380 	 */
381 	expected_count += is_device_private_page(page);
382 	if (mapping)
383 		expected_count += hpage_nr_pages(page) + page_has_private(page);
384 
385 	return expected_count;
386 }
387 
388 /*
389  * Replace the page in the mapping.
390  *
391  * The number of remaining references must be:
392  * 1 for anonymous pages without a mapping
393  * 2 for pages with a mapping
394  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
395  */
396 int migrate_page_move_mapping(struct address_space *mapping,
397 		struct page *newpage, struct page *page, int extra_count)
398 {
399 	XA_STATE(xas, &mapping->i_pages, page_index(page));
400 	struct zone *oldzone, *newzone;
401 	int dirty;
402 	int expected_count = expected_page_refs(mapping, page) + extra_count;
403 
404 	if (!mapping) {
405 		/* Anonymous page without mapping */
406 		if (page_count(page) != expected_count)
407 			return -EAGAIN;
408 
409 		/* No turning back from here */
410 		newpage->index = page->index;
411 		newpage->mapping = page->mapping;
412 		if (PageSwapBacked(page))
413 			__SetPageSwapBacked(newpage);
414 
415 		return MIGRATEPAGE_SUCCESS;
416 	}
417 
418 	oldzone = page_zone(page);
419 	newzone = page_zone(newpage);
420 
421 	xas_lock_irq(&xas);
422 	if (page_count(page) != expected_count || xas_load(&xas) != page) {
423 		xas_unlock_irq(&xas);
424 		return -EAGAIN;
425 	}
426 
427 	if (!page_ref_freeze(page, expected_count)) {
428 		xas_unlock_irq(&xas);
429 		return -EAGAIN;
430 	}
431 
432 	/*
433 	 * Now we know that no one else is looking at the page:
434 	 * no turning back from here.
435 	 */
436 	newpage->index = page->index;
437 	newpage->mapping = page->mapping;
438 	page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
439 	if (PageSwapBacked(page)) {
440 		__SetPageSwapBacked(newpage);
441 		if (PageSwapCache(page)) {
442 			SetPageSwapCache(newpage);
443 			set_page_private(newpage, page_private(page));
444 		}
445 	} else {
446 		VM_BUG_ON_PAGE(PageSwapCache(page), page);
447 	}
448 
449 	/* Move dirty while page refs frozen and newpage not yet exposed */
450 	dirty = PageDirty(page);
451 	if (dirty) {
452 		ClearPageDirty(page);
453 		SetPageDirty(newpage);
454 	}
455 
456 	xas_store(&xas, newpage);
457 	if (PageTransHuge(page)) {
458 		int i;
459 
460 		for (i = 1; i < HPAGE_PMD_NR; i++) {
461 			xas_next(&xas);
462 			xas_store(&xas, newpage + i);
463 		}
464 	}
465 
466 	/*
467 	 * Drop cache reference from old page by unfreezing
468 	 * to one less reference.
469 	 * We know this isn't the last reference.
470 	 */
471 	page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
472 
473 	xas_unlock(&xas);
474 	/* Leave irq disabled to prevent preemption while updating stats */
475 
476 	/*
477 	 * If moved to a different zone then also account
478 	 * the page for that zone. Other VM counters will be
479 	 * taken care of when we establish references to the
480 	 * new page and drop references to the old page.
481 	 *
482 	 * Note that anonymous pages are accounted for
483 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
484 	 * are mapped to swap space.
485 	 */
486 	if (newzone != oldzone) {
487 		__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
488 		__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
489 		if (PageSwapBacked(page) && !PageSwapCache(page)) {
490 			__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
491 			__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
492 		}
493 		if (dirty && mapping_cap_account_dirty(mapping)) {
494 			__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
495 			__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
496 			__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
497 			__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
498 		}
499 	}
500 	local_irq_enable();
501 
502 	return MIGRATEPAGE_SUCCESS;
503 }
504 EXPORT_SYMBOL(migrate_page_move_mapping);
505 
506 /*
507  * The expected number of remaining references is the same as that
508  * of migrate_page_move_mapping().
509  */
510 int migrate_huge_page_move_mapping(struct address_space *mapping,
511 				   struct page *newpage, struct page *page)
512 {
513 	XA_STATE(xas, &mapping->i_pages, page_index(page));
514 	int expected_count;
515 
516 	xas_lock_irq(&xas);
517 	expected_count = 2 + page_has_private(page);
518 	if (page_count(page) != expected_count || xas_load(&xas) != page) {
519 		xas_unlock_irq(&xas);
520 		return -EAGAIN;
521 	}
522 
523 	if (!page_ref_freeze(page, expected_count)) {
524 		xas_unlock_irq(&xas);
525 		return -EAGAIN;
526 	}
527 
528 	newpage->index = page->index;
529 	newpage->mapping = page->mapping;
530 
531 	get_page(newpage);
532 
533 	xas_store(&xas, newpage);
534 
535 	page_ref_unfreeze(page, expected_count - 1);
536 
537 	xas_unlock_irq(&xas);
538 
539 	return MIGRATEPAGE_SUCCESS;
540 }
541 
542 /*
543  * Gigantic pages are so large that we do not guarantee that page++ pointer
544  * arithmetic will work across the entire page.  We need something more
545  * specialized.
546  */
547 static void __copy_gigantic_page(struct page *dst, struct page *src,
548 				int nr_pages)
549 {
550 	int i;
551 	struct page *dst_base = dst;
552 	struct page *src_base = src;
553 
554 	for (i = 0; i < nr_pages; ) {
555 		cond_resched();
556 		copy_highpage(dst, src);
557 
558 		i++;
559 		dst = mem_map_next(dst, dst_base, i);
560 		src = mem_map_next(src, src_base, i);
561 	}
562 }
563 
564 static void copy_huge_page(struct page *dst, struct page *src)
565 {
566 	int i;
567 	int nr_pages;
568 
569 	if (PageHuge(src)) {
570 		/* hugetlbfs page */
571 		struct hstate *h = page_hstate(src);
572 		nr_pages = pages_per_huge_page(h);
573 
574 		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
575 			__copy_gigantic_page(dst, src, nr_pages);
576 			return;
577 		}
578 	} else {
579 		/* thp page */
580 		BUG_ON(!PageTransHuge(src));
581 		nr_pages = hpage_nr_pages(src);
582 	}
583 
584 	for (i = 0; i < nr_pages; i++) {
585 		cond_resched();
586 		copy_highpage(dst + i, src + i);
587 	}
588 }
589 
590 /*
591  * Copy the page to its new location
592  */
593 void migrate_page_states(struct page *newpage, struct page *page)
594 {
595 	int cpupid;
596 
597 	if (PageError(page))
598 		SetPageError(newpage);
599 	if (PageReferenced(page))
600 		SetPageReferenced(newpage);
601 	if (PageUptodate(page))
602 		SetPageUptodate(newpage);
603 	if (TestClearPageActive(page)) {
604 		VM_BUG_ON_PAGE(PageUnevictable(page), page);
605 		SetPageActive(newpage);
606 	} else if (TestClearPageUnevictable(page))
607 		SetPageUnevictable(newpage);
608 	if (PageWorkingset(page))
609 		SetPageWorkingset(newpage);
610 	if (PageChecked(page))
611 		SetPageChecked(newpage);
612 	if (PageMappedToDisk(page))
613 		SetPageMappedToDisk(newpage);
614 
615 	/* Move dirty on pages not done by migrate_page_move_mapping() */
616 	if (PageDirty(page))
617 		SetPageDirty(newpage);
618 
619 	if (page_is_young(page))
620 		set_page_young(newpage);
621 	if (page_is_idle(page))
622 		set_page_idle(newpage);
623 
624 	/*
625 	 * Copy NUMA information to the new page, to prevent over-eager
626 	 * future migrations of this same page.
627 	 */
628 	cpupid = page_cpupid_xchg_last(page, -1);
629 	page_cpupid_xchg_last(newpage, cpupid);
630 
631 	ksm_migrate_page(newpage, page);
632 	/*
633 	 * Please do not reorder this without considering how mm/ksm.c's
634 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
635 	 */
636 	if (PageSwapCache(page))
637 		ClearPageSwapCache(page);
638 	ClearPagePrivate(page);
639 	set_page_private(page, 0);
640 
641 	/*
642 	 * If any waiters have accumulated on the new page then
643 	 * wake them up.
644 	 */
645 	if (PageWriteback(newpage))
646 		end_page_writeback(newpage);
647 
648 	copy_page_owner(page, newpage);
649 
650 	mem_cgroup_migrate(page, newpage);
651 }
652 EXPORT_SYMBOL(migrate_page_states);
653 
654 void migrate_page_copy(struct page *newpage, struct page *page)
655 {
656 	if (PageHuge(page) || PageTransHuge(page))
657 		copy_huge_page(newpage, page);
658 	else
659 		copy_highpage(newpage, page);
660 
661 	migrate_page_states(newpage, page);
662 }
663 EXPORT_SYMBOL(migrate_page_copy);
664 
665 /************************************************************
666  *                    Migration functions
667  ***********************************************************/
668 
669 /*
670  * Common logic to directly migrate a single LRU page suitable for
671  * pages that do not use PagePrivate/PagePrivate2.
672  *
673  * Pages are locked upon entry and exit.
674  */
675 int migrate_page(struct address_space *mapping,
676 		struct page *newpage, struct page *page,
677 		enum migrate_mode mode)
678 {
679 	int rc;
680 
681 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
682 
683 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
684 
685 	if (rc != MIGRATEPAGE_SUCCESS)
686 		return rc;
687 
688 	if (mode != MIGRATE_SYNC_NO_COPY)
689 		migrate_page_copy(newpage, page);
690 	else
691 		migrate_page_states(newpage, page);
692 	return MIGRATEPAGE_SUCCESS;
693 }
694 EXPORT_SYMBOL(migrate_page);
695 
696 #ifdef CONFIG_BLOCK
697 /* Returns true if all buffers are successfully locked */
698 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
699 							enum migrate_mode mode)
700 {
701 	struct buffer_head *bh = head;
702 
703 	/* Simple case, sync compaction */
704 	if (mode != MIGRATE_ASYNC) {
705 		do {
706 			lock_buffer(bh);
707 			bh = bh->b_this_page;
708 
709 		} while (bh != head);
710 
711 		return true;
712 	}
713 
714 	/* async case, we cannot block on lock_buffer so use trylock_buffer */
715 	do {
716 		if (!trylock_buffer(bh)) {
717 			/*
718 			 * We failed to lock the buffer and cannot stall in
719 			 * async migration. Release the taken locks
720 			 */
721 			struct buffer_head *failed_bh = bh;
722 			bh = head;
723 			while (bh != failed_bh) {
724 				unlock_buffer(bh);
725 				bh = bh->b_this_page;
726 			}
727 			return false;
728 		}
729 
730 		bh = bh->b_this_page;
731 	} while (bh != head);
732 	return true;
733 }
734 
735 static int __buffer_migrate_page(struct address_space *mapping,
736 		struct page *newpage, struct page *page, enum migrate_mode mode,
737 		bool check_refs)
738 {
739 	struct buffer_head *bh, *head;
740 	int rc;
741 	int expected_count;
742 
743 	if (!page_has_buffers(page))
744 		return migrate_page(mapping, newpage, page, mode);
745 
746 	/* Check whether page does not have extra refs before we do more work */
747 	expected_count = expected_page_refs(mapping, page);
748 	if (page_count(page) != expected_count)
749 		return -EAGAIN;
750 
751 	head = page_buffers(page);
752 	if (!buffer_migrate_lock_buffers(head, mode))
753 		return -EAGAIN;
754 
755 	if (check_refs) {
756 		bool busy;
757 		bool invalidated = false;
758 
759 recheck_buffers:
760 		busy = false;
761 		spin_lock(&mapping->private_lock);
762 		bh = head;
763 		do {
764 			if (atomic_read(&bh->b_count)) {
765 				busy = true;
766 				break;
767 			}
768 			bh = bh->b_this_page;
769 		} while (bh != head);
770 		if (busy) {
771 			if (invalidated) {
772 				rc = -EAGAIN;
773 				goto unlock_buffers;
774 			}
775 			spin_unlock(&mapping->private_lock);
776 			invalidate_bh_lrus();
777 			invalidated = true;
778 			goto recheck_buffers;
779 		}
780 	}
781 
782 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
783 	if (rc != MIGRATEPAGE_SUCCESS)
784 		goto unlock_buffers;
785 
786 	ClearPagePrivate(page);
787 	set_page_private(newpage, page_private(page));
788 	set_page_private(page, 0);
789 	put_page(page);
790 	get_page(newpage);
791 
792 	bh = head;
793 	do {
794 		set_bh_page(bh, newpage, bh_offset(bh));
795 		bh = bh->b_this_page;
796 
797 	} while (bh != head);
798 
799 	SetPagePrivate(newpage);
800 
801 	if (mode != MIGRATE_SYNC_NO_COPY)
802 		migrate_page_copy(newpage, page);
803 	else
804 		migrate_page_states(newpage, page);
805 
806 	rc = MIGRATEPAGE_SUCCESS;
807 unlock_buffers:
808 	if (check_refs)
809 		spin_unlock(&mapping->private_lock);
810 	bh = head;
811 	do {
812 		unlock_buffer(bh);
813 		bh = bh->b_this_page;
814 
815 	} while (bh != head);
816 
817 	return rc;
818 }
819 
820 /*
821  * Migration function for pages with buffers. This function can only be used
822  * if the underlying filesystem guarantees that no other references to "page"
823  * exist. For example attached buffer heads are accessed only under page lock.
824  */
825 int buffer_migrate_page(struct address_space *mapping,
826 		struct page *newpage, struct page *page, enum migrate_mode mode)
827 {
828 	return __buffer_migrate_page(mapping, newpage, page, mode, false);
829 }
830 EXPORT_SYMBOL(buffer_migrate_page);
831 
832 /*
833  * Same as above except that this variant is more careful and checks that there
834  * are also no buffer head references. This function is the right one for
835  * mappings where buffer heads are directly looked up and referenced (such as
836  * block device mappings).
837  */
838 int buffer_migrate_page_norefs(struct address_space *mapping,
839 		struct page *newpage, struct page *page, enum migrate_mode mode)
840 {
841 	return __buffer_migrate_page(mapping, newpage, page, mode, true);
842 }
843 #endif
844 
845 /*
846  * Writeback a page to clean the dirty state
847  */
848 static int writeout(struct address_space *mapping, struct page *page)
849 {
850 	struct writeback_control wbc = {
851 		.sync_mode = WB_SYNC_NONE,
852 		.nr_to_write = 1,
853 		.range_start = 0,
854 		.range_end = LLONG_MAX,
855 		.for_reclaim = 1
856 	};
857 	int rc;
858 
859 	if (!mapping->a_ops->writepage)
860 		/* No write method for the address space */
861 		return -EINVAL;
862 
863 	if (!clear_page_dirty_for_io(page))
864 		/* Someone else already triggered a write */
865 		return -EAGAIN;
866 
867 	/*
868 	 * A dirty page may imply that the underlying filesystem has
869 	 * the page on some queue. So the page must be clean for
870 	 * migration. Writeout may mean we loose the lock and the
871 	 * page state is no longer what we checked for earlier.
872 	 * At this point we know that the migration attempt cannot
873 	 * be successful.
874 	 */
875 	remove_migration_ptes(page, page, false);
876 
877 	rc = mapping->a_ops->writepage(page, &wbc);
878 
879 	if (rc != AOP_WRITEPAGE_ACTIVATE)
880 		/* unlocked. Relock */
881 		lock_page(page);
882 
883 	return (rc < 0) ? -EIO : -EAGAIN;
884 }
885 
886 /*
887  * Default handling if a filesystem does not provide a migration function.
888  */
889 static int fallback_migrate_page(struct address_space *mapping,
890 	struct page *newpage, struct page *page, enum migrate_mode mode)
891 {
892 	if (PageDirty(page)) {
893 		/* Only writeback pages in full synchronous migration */
894 		switch (mode) {
895 		case MIGRATE_SYNC:
896 		case MIGRATE_SYNC_NO_COPY:
897 			break;
898 		default:
899 			return -EBUSY;
900 		}
901 		return writeout(mapping, page);
902 	}
903 
904 	/*
905 	 * Buffers may be managed in a filesystem specific way.
906 	 * We must have no buffers or drop them.
907 	 */
908 	if (page_has_private(page) &&
909 	    !try_to_release_page(page, GFP_KERNEL))
910 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
911 
912 	return migrate_page(mapping, newpage, page, mode);
913 }
914 
915 /*
916  * Move a page to a newly allocated page
917  * The page is locked and all ptes have been successfully removed.
918  *
919  * The new page will have replaced the old page if this function
920  * is successful.
921  *
922  * Return value:
923  *   < 0 - error code
924  *  MIGRATEPAGE_SUCCESS - success
925  */
926 static int move_to_new_page(struct page *newpage, struct page *page,
927 				enum migrate_mode mode)
928 {
929 	struct address_space *mapping;
930 	int rc = -EAGAIN;
931 	bool is_lru = !__PageMovable(page);
932 
933 	VM_BUG_ON_PAGE(!PageLocked(page), page);
934 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
935 
936 	mapping = page_mapping(page);
937 
938 	if (likely(is_lru)) {
939 		if (!mapping)
940 			rc = migrate_page(mapping, newpage, page, mode);
941 		else if (mapping->a_ops->migratepage)
942 			/*
943 			 * Most pages have a mapping and most filesystems
944 			 * provide a migratepage callback. Anonymous pages
945 			 * are part of swap space which also has its own
946 			 * migratepage callback. This is the most common path
947 			 * for page migration.
948 			 */
949 			rc = mapping->a_ops->migratepage(mapping, newpage,
950 							page, mode);
951 		else
952 			rc = fallback_migrate_page(mapping, newpage,
953 							page, mode);
954 	} else {
955 		/*
956 		 * In case of non-lru page, it could be released after
957 		 * isolation step. In that case, we shouldn't try migration.
958 		 */
959 		VM_BUG_ON_PAGE(!PageIsolated(page), page);
960 		if (!PageMovable(page)) {
961 			rc = MIGRATEPAGE_SUCCESS;
962 			__ClearPageIsolated(page);
963 			goto out;
964 		}
965 
966 		rc = mapping->a_ops->migratepage(mapping, newpage,
967 						page, mode);
968 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
969 			!PageIsolated(page));
970 	}
971 
972 	/*
973 	 * When successful, old pagecache page->mapping must be cleared before
974 	 * page is freed; but stats require that PageAnon be left as PageAnon.
975 	 */
976 	if (rc == MIGRATEPAGE_SUCCESS) {
977 		if (__PageMovable(page)) {
978 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
979 
980 			/*
981 			 * We clear PG_movable under page_lock so any compactor
982 			 * cannot try to migrate this page.
983 			 */
984 			__ClearPageIsolated(page);
985 		}
986 
987 		/*
988 		 * Anonymous and movable page->mapping will be cleard by
989 		 * free_pages_prepare so don't reset it here for keeping
990 		 * the type to work PageAnon, for example.
991 		 */
992 		if (!PageMappingFlags(page))
993 			page->mapping = NULL;
994 
995 		if (likely(!is_zone_device_page(newpage)))
996 			flush_dcache_page(newpage);
997 
998 	}
999 out:
1000 	return rc;
1001 }
1002 
1003 static int __unmap_and_move(struct page *page, struct page *newpage,
1004 				int force, enum migrate_mode mode)
1005 {
1006 	int rc = -EAGAIN;
1007 	int page_was_mapped = 0;
1008 	struct anon_vma *anon_vma = NULL;
1009 	bool is_lru = !__PageMovable(page);
1010 
1011 	if (!trylock_page(page)) {
1012 		if (!force || mode == MIGRATE_ASYNC)
1013 			goto out;
1014 
1015 		/*
1016 		 * It's not safe for direct compaction to call lock_page.
1017 		 * For example, during page readahead pages are added locked
1018 		 * to the LRU. Later, when the IO completes the pages are
1019 		 * marked uptodate and unlocked. However, the queueing
1020 		 * could be merging multiple pages for one bio (e.g.
1021 		 * mpage_readpages). If an allocation happens for the
1022 		 * second or third page, the process can end up locking
1023 		 * the same page twice and deadlocking. Rather than
1024 		 * trying to be clever about what pages can be locked,
1025 		 * avoid the use of lock_page for direct compaction
1026 		 * altogether.
1027 		 */
1028 		if (current->flags & PF_MEMALLOC)
1029 			goto out;
1030 
1031 		lock_page(page);
1032 	}
1033 
1034 	if (PageWriteback(page)) {
1035 		/*
1036 		 * Only in the case of a full synchronous migration is it
1037 		 * necessary to wait for PageWriteback. In the async case,
1038 		 * the retry loop is too short and in the sync-light case,
1039 		 * the overhead of stalling is too much
1040 		 */
1041 		switch (mode) {
1042 		case MIGRATE_SYNC:
1043 		case MIGRATE_SYNC_NO_COPY:
1044 			break;
1045 		default:
1046 			rc = -EBUSY;
1047 			goto out_unlock;
1048 		}
1049 		if (!force)
1050 			goto out_unlock;
1051 		wait_on_page_writeback(page);
1052 	}
1053 
1054 	/*
1055 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1056 	 * we cannot notice that anon_vma is freed while we migrates a page.
1057 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1058 	 * of migration. File cache pages are no problem because of page_lock()
1059 	 * File Caches may use write_page() or lock_page() in migration, then,
1060 	 * just care Anon page here.
1061 	 *
1062 	 * Only page_get_anon_vma() understands the subtleties of
1063 	 * getting a hold on an anon_vma from outside one of its mms.
1064 	 * But if we cannot get anon_vma, then we won't need it anyway,
1065 	 * because that implies that the anon page is no longer mapped
1066 	 * (and cannot be remapped so long as we hold the page lock).
1067 	 */
1068 	if (PageAnon(page) && !PageKsm(page))
1069 		anon_vma = page_get_anon_vma(page);
1070 
1071 	/*
1072 	 * Block others from accessing the new page when we get around to
1073 	 * establishing additional references. We are usually the only one
1074 	 * holding a reference to newpage at this point. We used to have a BUG
1075 	 * here if trylock_page(newpage) fails, but would like to allow for
1076 	 * cases where there might be a race with the previous use of newpage.
1077 	 * This is much like races on refcount of oldpage: just don't BUG().
1078 	 */
1079 	if (unlikely(!trylock_page(newpage)))
1080 		goto out_unlock;
1081 
1082 	if (unlikely(!is_lru)) {
1083 		rc = move_to_new_page(newpage, page, mode);
1084 		goto out_unlock_both;
1085 	}
1086 
1087 	/*
1088 	 * Corner case handling:
1089 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1090 	 * and treated as swapcache but it has no rmap yet.
1091 	 * Calling try_to_unmap() against a page->mapping==NULL page will
1092 	 * trigger a BUG.  So handle it here.
1093 	 * 2. An orphaned page (see truncate_complete_page) might have
1094 	 * fs-private metadata. The page can be picked up due to memory
1095 	 * offlining.  Everywhere else except page reclaim, the page is
1096 	 * invisible to the vm, so the page can not be migrated.  So try to
1097 	 * free the metadata, so the page can be freed.
1098 	 */
1099 	if (!page->mapping) {
1100 		VM_BUG_ON_PAGE(PageAnon(page), page);
1101 		if (page_has_private(page)) {
1102 			try_to_free_buffers(page);
1103 			goto out_unlock_both;
1104 		}
1105 	} else if (page_mapped(page)) {
1106 		/* Establish migration ptes */
1107 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1108 				page);
1109 		try_to_unmap(page,
1110 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1111 		page_was_mapped = 1;
1112 	}
1113 
1114 	if (!page_mapped(page))
1115 		rc = move_to_new_page(newpage, page, mode);
1116 
1117 	if (page_was_mapped)
1118 		remove_migration_ptes(page,
1119 			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1120 
1121 out_unlock_both:
1122 	unlock_page(newpage);
1123 out_unlock:
1124 	/* Drop an anon_vma reference if we took one */
1125 	if (anon_vma)
1126 		put_anon_vma(anon_vma);
1127 	unlock_page(page);
1128 out:
1129 	/*
1130 	 * If migration is successful, decrease refcount of the newpage
1131 	 * which will not free the page because new page owner increased
1132 	 * refcounter. As well, if it is LRU page, add the page to LRU
1133 	 * list in here. Use the old state of the isolated source page to
1134 	 * determine if we migrated a LRU page. newpage was already unlocked
1135 	 * and possibly modified by its owner - don't rely on the page
1136 	 * state.
1137 	 */
1138 	if (rc == MIGRATEPAGE_SUCCESS) {
1139 		if (unlikely(!is_lru))
1140 			put_page(newpage);
1141 		else
1142 			putback_lru_page(newpage);
1143 	}
1144 
1145 	return rc;
1146 }
1147 
1148 /*
1149  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1150  * around it.
1151  */
1152 #if defined(CONFIG_ARM) && \
1153 	defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
1154 #define ICE_noinline noinline
1155 #else
1156 #define ICE_noinline
1157 #endif
1158 
1159 /*
1160  * Obtain the lock on page, remove all ptes and migrate the page
1161  * to the newly allocated page in newpage.
1162  */
1163 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1164 				   free_page_t put_new_page,
1165 				   unsigned long private, struct page *page,
1166 				   int force, enum migrate_mode mode,
1167 				   enum migrate_reason reason)
1168 {
1169 	int rc = MIGRATEPAGE_SUCCESS;
1170 	struct page *newpage;
1171 
1172 	if (!thp_migration_supported() && PageTransHuge(page))
1173 		return -ENOMEM;
1174 
1175 	newpage = get_new_page(page, private);
1176 	if (!newpage)
1177 		return -ENOMEM;
1178 
1179 	if (page_count(page) == 1) {
1180 		/* page was freed from under us. So we are done. */
1181 		ClearPageActive(page);
1182 		ClearPageUnevictable(page);
1183 		if (unlikely(__PageMovable(page))) {
1184 			lock_page(page);
1185 			if (!PageMovable(page))
1186 				__ClearPageIsolated(page);
1187 			unlock_page(page);
1188 		}
1189 		if (put_new_page)
1190 			put_new_page(newpage, private);
1191 		else
1192 			put_page(newpage);
1193 		goto out;
1194 	}
1195 
1196 	rc = __unmap_and_move(page, newpage, force, mode);
1197 	if (rc == MIGRATEPAGE_SUCCESS)
1198 		set_page_owner_migrate_reason(newpage, reason);
1199 
1200 out:
1201 	if (rc != -EAGAIN) {
1202 		/*
1203 		 * A page that has been migrated has all references
1204 		 * removed and will be freed. A page that has not been
1205 		 * migrated will have kepts its references and be
1206 		 * restored.
1207 		 */
1208 		list_del(&page->lru);
1209 
1210 		/*
1211 		 * Compaction can migrate also non-LRU pages which are
1212 		 * not accounted to NR_ISOLATED_*. They can be recognized
1213 		 * as __PageMovable
1214 		 */
1215 		if (likely(!__PageMovable(page)))
1216 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1217 					page_is_file_cache(page), -hpage_nr_pages(page));
1218 	}
1219 
1220 	/*
1221 	 * If migration is successful, releases reference grabbed during
1222 	 * isolation. Otherwise, restore the page to right list unless
1223 	 * we want to retry.
1224 	 */
1225 	if (rc == MIGRATEPAGE_SUCCESS) {
1226 		put_page(page);
1227 		if (reason == MR_MEMORY_FAILURE) {
1228 			/*
1229 			 * Set PG_HWPoison on just freed page
1230 			 * intentionally. Although it's rather weird,
1231 			 * it's how HWPoison flag works at the moment.
1232 			 */
1233 			if (set_hwpoison_free_buddy_page(page))
1234 				num_poisoned_pages_inc();
1235 		}
1236 	} else {
1237 		if (rc != -EAGAIN) {
1238 			if (likely(!__PageMovable(page))) {
1239 				putback_lru_page(page);
1240 				goto put_new;
1241 			}
1242 
1243 			lock_page(page);
1244 			if (PageMovable(page))
1245 				putback_movable_page(page);
1246 			else
1247 				__ClearPageIsolated(page);
1248 			unlock_page(page);
1249 			put_page(page);
1250 		}
1251 put_new:
1252 		if (put_new_page)
1253 			put_new_page(newpage, private);
1254 		else
1255 			put_page(newpage);
1256 	}
1257 
1258 	return rc;
1259 }
1260 
1261 /*
1262  * Counterpart of unmap_and_move_page() for hugepage migration.
1263  *
1264  * This function doesn't wait the completion of hugepage I/O
1265  * because there is no race between I/O and migration for hugepage.
1266  * Note that currently hugepage I/O occurs only in direct I/O
1267  * where no lock is held and PG_writeback is irrelevant,
1268  * and writeback status of all subpages are counted in the reference
1269  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1270  * under direct I/O, the reference of the head page is 512 and a bit more.)
1271  * This means that when we try to migrate hugepage whose subpages are
1272  * doing direct I/O, some references remain after try_to_unmap() and
1273  * hugepage migration fails without data corruption.
1274  *
1275  * There is also no race when direct I/O is issued on the page under migration,
1276  * because then pte is replaced with migration swap entry and direct I/O code
1277  * will wait in the page fault for migration to complete.
1278  */
1279 static int unmap_and_move_huge_page(new_page_t get_new_page,
1280 				free_page_t put_new_page, unsigned long private,
1281 				struct page *hpage, int force,
1282 				enum migrate_mode mode, int reason)
1283 {
1284 	int rc = -EAGAIN;
1285 	int page_was_mapped = 0;
1286 	struct page *new_hpage;
1287 	struct anon_vma *anon_vma = NULL;
1288 
1289 	/*
1290 	 * Migratability of hugepages depends on architectures and their size.
1291 	 * This check is necessary because some callers of hugepage migration
1292 	 * like soft offline and memory hotremove don't walk through page
1293 	 * tables or check whether the hugepage is pmd-based or not before
1294 	 * kicking migration.
1295 	 */
1296 	if (!hugepage_migration_supported(page_hstate(hpage))) {
1297 		putback_active_hugepage(hpage);
1298 		return -ENOSYS;
1299 	}
1300 
1301 	new_hpage = get_new_page(hpage, private);
1302 	if (!new_hpage)
1303 		return -ENOMEM;
1304 
1305 	if (!trylock_page(hpage)) {
1306 		if (!force)
1307 			goto out;
1308 		switch (mode) {
1309 		case MIGRATE_SYNC:
1310 		case MIGRATE_SYNC_NO_COPY:
1311 			break;
1312 		default:
1313 			goto out;
1314 		}
1315 		lock_page(hpage);
1316 	}
1317 
1318 	/*
1319 	 * Check for pages which are in the process of being freed.  Without
1320 	 * page_mapping() set, hugetlbfs specific move page routine will not
1321 	 * be called and we could leak usage counts for subpools.
1322 	 */
1323 	if (page_private(hpage) && !page_mapping(hpage)) {
1324 		rc = -EBUSY;
1325 		goto out_unlock;
1326 	}
1327 
1328 	if (PageAnon(hpage))
1329 		anon_vma = page_get_anon_vma(hpage);
1330 
1331 	if (unlikely(!trylock_page(new_hpage)))
1332 		goto put_anon;
1333 
1334 	if (page_mapped(hpage)) {
1335 		try_to_unmap(hpage,
1336 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1337 		page_was_mapped = 1;
1338 	}
1339 
1340 	if (!page_mapped(hpage))
1341 		rc = move_to_new_page(new_hpage, hpage, mode);
1342 
1343 	if (page_was_mapped)
1344 		remove_migration_ptes(hpage,
1345 			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1346 
1347 	unlock_page(new_hpage);
1348 
1349 put_anon:
1350 	if (anon_vma)
1351 		put_anon_vma(anon_vma);
1352 
1353 	if (rc == MIGRATEPAGE_SUCCESS) {
1354 		move_hugetlb_state(hpage, new_hpage, reason);
1355 		put_new_page = NULL;
1356 	}
1357 
1358 out_unlock:
1359 	unlock_page(hpage);
1360 out:
1361 	if (rc != -EAGAIN)
1362 		putback_active_hugepage(hpage);
1363 
1364 	/*
1365 	 * If migration was not successful and there's a freeing callback, use
1366 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1367 	 * isolation.
1368 	 */
1369 	if (put_new_page)
1370 		put_new_page(new_hpage, private);
1371 	else
1372 		putback_active_hugepage(new_hpage);
1373 
1374 	return rc;
1375 }
1376 
1377 /*
1378  * migrate_pages - migrate the pages specified in a list, to the free pages
1379  *		   supplied as the target for the page migration
1380  *
1381  * @from:		The list of pages to be migrated.
1382  * @get_new_page:	The function used to allocate free pages to be used
1383  *			as the target of the page migration.
1384  * @put_new_page:	The function used to free target pages if migration
1385  *			fails, or NULL if no special handling is necessary.
1386  * @private:		Private data to be passed on to get_new_page()
1387  * @mode:		The migration mode that specifies the constraints for
1388  *			page migration, if any.
1389  * @reason:		The reason for page migration.
1390  *
1391  * The function returns after 10 attempts or if no pages are movable any more
1392  * because the list has become empty or no retryable pages exist any more.
1393  * The caller should call putback_movable_pages() to return pages to the LRU
1394  * or free list only if ret != 0.
1395  *
1396  * Returns the number of pages that were not migrated, or an error code.
1397  */
1398 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1399 		free_page_t put_new_page, unsigned long private,
1400 		enum migrate_mode mode, int reason)
1401 {
1402 	int retry = 1;
1403 	int nr_failed = 0;
1404 	int nr_succeeded = 0;
1405 	int pass = 0;
1406 	struct page *page;
1407 	struct page *page2;
1408 	int swapwrite = current->flags & PF_SWAPWRITE;
1409 	int rc;
1410 
1411 	if (!swapwrite)
1412 		current->flags |= PF_SWAPWRITE;
1413 
1414 	for(pass = 0; pass < 10 && retry; pass++) {
1415 		retry = 0;
1416 
1417 		list_for_each_entry_safe(page, page2, from, lru) {
1418 retry:
1419 			cond_resched();
1420 
1421 			if (PageHuge(page))
1422 				rc = unmap_and_move_huge_page(get_new_page,
1423 						put_new_page, private, page,
1424 						pass > 2, mode, reason);
1425 			else
1426 				rc = unmap_and_move(get_new_page, put_new_page,
1427 						private, page, pass > 2, mode,
1428 						reason);
1429 
1430 			switch(rc) {
1431 			case -ENOMEM:
1432 				/*
1433 				 * THP migration might be unsupported or the
1434 				 * allocation could've failed so we should
1435 				 * retry on the same page with the THP split
1436 				 * to base pages.
1437 				 *
1438 				 * Head page is retried immediately and tail
1439 				 * pages are added to the tail of the list so
1440 				 * we encounter them after the rest of the list
1441 				 * is processed.
1442 				 */
1443 				if (PageTransHuge(page) && !PageHuge(page)) {
1444 					lock_page(page);
1445 					rc = split_huge_page_to_list(page, from);
1446 					unlock_page(page);
1447 					if (!rc) {
1448 						list_safe_reset_next(page, page2, lru);
1449 						goto retry;
1450 					}
1451 				}
1452 				nr_failed++;
1453 				goto out;
1454 			case -EAGAIN:
1455 				retry++;
1456 				break;
1457 			case MIGRATEPAGE_SUCCESS:
1458 				nr_succeeded++;
1459 				break;
1460 			default:
1461 				/*
1462 				 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1463 				 * unlike -EAGAIN case, the failed page is
1464 				 * removed from migration page list and not
1465 				 * retried in the next outer loop.
1466 				 */
1467 				nr_failed++;
1468 				break;
1469 			}
1470 		}
1471 	}
1472 	nr_failed += retry;
1473 	rc = nr_failed;
1474 out:
1475 	if (nr_succeeded)
1476 		count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1477 	if (nr_failed)
1478 		count_vm_events(PGMIGRATE_FAIL, nr_failed);
1479 	trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1480 
1481 	if (!swapwrite)
1482 		current->flags &= ~PF_SWAPWRITE;
1483 
1484 	return rc;
1485 }
1486 
1487 #ifdef CONFIG_NUMA
1488 
1489 static int store_status(int __user *status, int start, int value, int nr)
1490 {
1491 	while (nr-- > 0) {
1492 		if (put_user(value, status + start))
1493 			return -EFAULT;
1494 		start++;
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 static int do_move_pages_to_node(struct mm_struct *mm,
1501 		struct list_head *pagelist, int node)
1502 {
1503 	int err;
1504 
1505 	if (list_empty(pagelist))
1506 		return 0;
1507 
1508 	err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1509 			MIGRATE_SYNC, MR_SYSCALL);
1510 	if (err)
1511 		putback_movable_pages(pagelist);
1512 	return err;
1513 }
1514 
1515 /*
1516  * Resolves the given address to a struct page, isolates it from the LRU and
1517  * puts it to the given pagelist.
1518  * Returns -errno if the page cannot be found/isolated or 0 when it has been
1519  * queued or the page doesn't need to be migrated because it is already on
1520  * the target node
1521  */
1522 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1523 		int node, struct list_head *pagelist, bool migrate_all)
1524 {
1525 	struct vm_area_struct *vma;
1526 	struct page *page;
1527 	unsigned int follflags;
1528 	int err;
1529 
1530 	down_read(&mm->mmap_sem);
1531 	err = -EFAULT;
1532 	vma = find_vma(mm, addr);
1533 	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1534 		goto out;
1535 
1536 	/* FOLL_DUMP to ignore special (like zero) pages */
1537 	follflags = FOLL_GET | FOLL_DUMP;
1538 	page = follow_page(vma, addr, follflags);
1539 
1540 	err = PTR_ERR(page);
1541 	if (IS_ERR(page))
1542 		goto out;
1543 
1544 	err = -ENOENT;
1545 	if (!page)
1546 		goto out;
1547 
1548 	err = 0;
1549 	if (page_to_nid(page) == node)
1550 		goto out_putpage;
1551 
1552 	err = -EACCES;
1553 	if (page_mapcount(page) > 1 && !migrate_all)
1554 		goto out_putpage;
1555 
1556 	if (PageHuge(page)) {
1557 		if (PageHead(page)) {
1558 			isolate_huge_page(page, pagelist);
1559 			err = 0;
1560 		}
1561 	} else {
1562 		struct page *head;
1563 
1564 		head = compound_head(page);
1565 		err = isolate_lru_page(head);
1566 		if (err)
1567 			goto out_putpage;
1568 
1569 		err = 0;
1570 		list_add_tail(&head->lru, pagelist);
1571 		mod_node_page_state(page_pgdat(head),
1572 			NR_ISOLATED_ANON + page_is_file_cache(head),
1573 			hpage_nr_pages(head));
1574 	}
1575 out_putpage:
1576 	/*
1577 	 * Either remove the duplicate refcount from
1578 	 * isolate_lru_page() or drop the page ref if it was
1579 	 * not isolated.
1580 	 */
1581 	put_page(page);
1582 out:
1583 	up_read(&mm->mmap_sem);
1584 	return err;
1585 }
1586 
1587 /*
1588  * Migrate an array of page address onto an array of nodes and fill
1589  * the corresponding array of status.
1590  */
1591 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1592 			 unsigned long nr_pages,
1593 			 const void __user * __user *pages,
1594 			 const int __user *nodes,
1595 			 int __user *status, int flags)
1596 {
1597 	int current_node = NUMA_NO_NODE;
1598 	LIST_HEAD(pagelist);
1599 	int start, i;
1600 	int err = 0, err1;
1601 
1602 	migrate_prep();
1603 
1604 	for (i = start = 0; i < nr_pages; i++) {
1605 		const void __user *p;
1606 		unsigned long addr;
1607 		int node;
1608 
1609 		err = -EFAULT;
1610 		if (get_user(p, pages + i))
1611 			goto out_flush;
1612 		if (get_user(node, nodes + i))
1613 			goto out_flush;
1614 		addr = (unsigned long)p;
1615 
1616 		err = -ENODEV;
1617 		if (node < 0 || node >= MAX_NUMNODES)
1618 			goto out_flush;
1619 		if (!node_state(node, N_MEMORY))
1620 			goto out_flush;
1621 
1622 		err = -EACCES;
1623 		if (!node_isset(node, task_nodes))
1624 			goto out_flush;
1625 
1626 		if (current_node == NUMA_NO_NODE) {
1627 			current_node = node;
1628 			start = i;
1629 		} else if (node != current_node) {
1630 			err = do_move_pages_to_node(mm, &pagelist, current_node);
1631 			if (err)
1632 				goto out;
1633 			err = store_status(status, start, current_node, i - start);
1634 			if (err)
1635 				goto out;
1636 			start = i;
1637 			current_node = node;
1638 		}
1639 
1640 		/*
1641 		 * Errors in the page lookup or isolation are not fatal and we simply
1642 		 * report them via status
1643 		 */
1644 		err = add_page_for_migration(mm, addr, current_node,
1645 				&pagelist, flags & MPOL_MF_MOVE_ALL);
1646 		if (!err)
1647 			continue;
1648 
1649 		err = store_status(status, i, err, 1);
1650 		if (err)
1651 			goto out_flush;
1652 
1653 		err = do_move_pages_to_node(mm, &pagelist, current_node);
1654 		if (err)
1655 			goto out;
1656 		if (i > start) {
1657 			err = store_status(status, start, current_node, i - start);
1658 			if (err)
1659 				goto out;
1660 		}
1661 		current_node = NUMA_NO_NODE;
1662 	}
1663 out_flush:
1664 	if (list_empty(&pagelist))
1665 		return err;
1666 
1667 	/* Make sure we do not overwrite the existing error */
1668 	err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1669 	if (!err1)
1670 		err1 = store_status(status, start, current_node, i - start);
1671 	if (!err)
1672 		err = err1;
1673 out:
1674 	return err;
1675 }
1676 
1677 /*
1678  * Determine the nodes of an array of pages and store it in an array of status.
1679  */
1680 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1681 				const void __user **pages, int *status)
1682 {
1683 	unsigned long i;
1684 
1685 	down_read(&mm->mmap_sem);
1686 
1687 	for (i = 0; i < nr_pages; i++) {
1688 		unsigned long addr = (unsigned long)(*pages);
1689 		struct vm_area_struct *vma;
1690 		struct page *page;
1691 		int err = -EFAULT;
1692 
1693 		vma = find_vma(mm, addr);
1694 		if (!vma || addr < vma->vm_start)
1695 			goto set_status;
1696 
1697 		/* FOLL_DUMP to ignore special (like zero) pages */
1698 		page = follow_page(vma, addr, FOLL_DUMP);
1699 
1700 		err = PTR_ERR(page);
1701 		if (IS_ERR(page))
1702 			goto set_status;
1703 
1704 		err = page ? page_to_nid(page) : -ENOENT;
1705 set_status:
1706 		*status = err;
1707 
1708 		pages++;
1709 		status++;
1710 	}
1711 
1712 	up_read(&mm->mmap_sem);
1713 }
1714 
1715 /*
1716  * Determine the nodes of a user array of pages and store it in
1717  * a user array of status.
1718  */
1719 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1720 			 const void __user * __user *pages,
1721 			 int __user *status)
1722 {
1723 #define DO_PAGES_STAT_CHUNK_NR 16
1724 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1725 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1726 
1727 	while (nr_pages) {
1728 		unsigned long chunk_nr;
1729 
1730 		chunk_nr = nr_pages;
1731 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1732 			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1733 
1734 		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1735 			break;
1736 
1737 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1738 
1739 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1740 			break;
1741 
1742 		pages += chunk_nr;
1743 		status += chunk_nr;
1744 		nr_pages -= chunk_nr;
1745 	}
1746 	return nr_pages ? -EFAULT : 0;
1747 }
1748 
1749 /*
1750  * Move a list of pages in the address space of the currently executing
1751  * process.
1752  */
1753 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1754 			     const void __user * __user *pages,
1755 			     const int __user *nodes,
1756 			     int __user *status, int flags)
1757 {
1758 	struct task_struct *task;
1759 	struct mm_struct *mm;
1760 	int err;
1761 	nodemask_t task_nodes;
1762 
1763 	/* Check flags */
1764 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1765 		return -EINVAL;
1766 
1767 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1768 		return -EPERM;
1769 
1770 	/* Find the mm_struct */
1771 	rcu_read_lock();
1772 	task = pid ? find_task_by_vpid(pid) : current;
1773 	if (!task) {
1774 		rcu_read_unlock();
1775 		return -ESRCH;
1776 	}
1777 	get_task_struct(task);
1778 
1779 	/*
1780 	 * Check if this process has the right to modify the specified
1781 	 * process. Use the regular "ptrace_may_access()" checks.
1782 	 */
1783 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1784 		rcu_read_unlock();
1785 		err = -EPERM;
1786 		goto out;
1787 	}
1788 	rcu_read_unlock();
1789 
1790  	err = security_task_movememory(task);
1791  	if (err)
1792 		goto out;
1793 
1794 	task_nodes = cpuset_mems_allowed(task);
1795 	mm = get_task_mm(task);
1796 	put_task_struct(task);
1797 
1798 	if (!mm)
1799 		return -EINVAL;
1800 
1801 	if (nodes)
1802 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
1803 				    nodes, status, flags);
1804 	else
1805 		err = do_pages_stat(mm, nr_pages, pages, status);
1806 
1807 	mmput(mm);
1808 	return err;
1809 
1810 out:
1811 	put_task_struct(task);
1812 	return err;
1813 }
1814 
1815 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1816 		const void __user * __user *, pages,
1817 		const int __user *, nodes,
1818 		int __user *, status, int, flags)
1819 {
1820 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1821 }
1822 
1823 #ifdef CONFIG_COMPAT
1824 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1825 		       compat_uptr_t __user *, pages32,
1826 		       const int __user *, nodes,
1827 		       int __user *, status,
1828 		       int, flags)
1829 {
1830 	const void __user * __user *pages;
1831 	int i;
1832 
1833 	pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1834 	for (i = 0; i < nr_pages; i++) {
1835 		compat_uptr_t p;
1836 
1837 		if (get_user(p, pages32 + i) ||
1838 			put_user(compat_ptr(p), pages + i))
1839 			return -EFAULT;
1840 	}
1841 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1842 }
1843 #endif /* CONFIG_COMPAT */
1844 
1845 #ifdef CONFIG_NUMA_BALANCING
1846 /*
1847  * Returns true if this is a safe migration target node for misplaced NUMA
1848  * pages. Currently it only checks the watermarks which crude
1849  */
1850 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1851 				   unsigned long nr_migrate_pages)
1852 {
1853 	int z;
1854 
1855 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1856 		struct zone *zone = pgdat->node_zones + z;
1857 
1858 		if (!populated_zone(zone))
1859 			continue;
1860 
1861 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
1862 		if (!zone_watermark_ok(zone, 0,
1863 				       high_wmark_pages(zone) +
1864 				       nr_migrate_pages,
1865 				       0, 0))
1866 			continue;
1867 		return true;
1868 	}
1869 	return false;
1870 }
1871 
1872 static struct page *alloc_misplaced_dst_page(struct page *page,
1873 					   unsigned long data)
1874 {
1875 	int nid = (int) data;
1876 	struct page *newpage;
1877 
1878 	newpage = __alloc_pages_node(nid,
1879 					 (GFP_HIGHUSER_MOVABLE |
1880 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
1881 					  __GFP_NORETRY | __GFP_NOWARN) &
1882 					 ~__GFP_RECLAIM, 0);
1883 
1884 	return newpage;
1885 }
1886 
1887 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1888 {
1889 	int page_lru;
1890 
1891 	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1892 
1893 	/* Avoid migrating to a node that is nearly full */
1894 	if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1895 		return 0;
1896 
1897 	if (isolate_lru_page(page))
1898 		return 0;
1899 
1900 	/*
1901 	 * migrate_misplaced_transhuge_page() skips page migration's usual
1902 	 * check on page_count(), so we must do it here, now that the page
1903 	 * has been isolated: a GUP pin, or any other pin, prevents migration.
1904 	 * The expected page count is 3: 1 for page's mapcount and 1 for the
1905 	 * caller's pin and 1 for the reference taken by isolate_lru_page().
1906 	 */
1907 	if (PageTransHuge(page) && page_count(page) != 3) {
1908 		putback_lru_page(page);
1909 		return 0;
1910 	}
1911 
1912 	page_lru = page_is_file_cache(page);
1913 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1914 				hpage_nr_pages(page));
1915 
1916 	/*
1917 	 * Isolating the page has taken another reference, so the
1918 	 * caller's reference can be safely dropped without the page
1919 	 * disappearing underneath us during migration.
1920 	 */
1921 	put_page(page);
1922 	return 1;
1923 }
1924 
1925 bool pmd_trans_migrating(pmd_t pmd)
1926 {
1927 	struct page *page = pmd_page(pmd);
1928 	return PageLocked(page);
1929 }
1930 
1931 /*
1932  * Attempt to migrate a misplaced page to the specified destination
1933  * node. Caller is expected to have an elevated reference count on
1934  * the page that will be dropped by this function before returning.
1935  */
1936 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1937 			   int node)
1938 {
1939 	pg_data_t *pgdat = NODE_DATA(node);
1940 	int isolated;
1941 	int nr_remaining;
1942 	LIST_HEAD(migratepages);
1943 
1944 	/*
1945 	 * Don't migrate file pages that are mapped in multiple processes
1946 	 * with execute permissions as they are probably shared libraries.
1947 	 */
1948 	if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1949 	    (vma->vm_flags & VM_EXEC))
1950 		goto out;
1951 
1952 	/*
1953 	 * Also do not migrate dirty pages as not all filesystems can move
1954 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1955 	 */
1956 	if (page_is_file_cache(page) && PageDirty(page))
1957 		goto out;
1958 
1959 	isolated = numamigrate_isolate_page(pgdat, page);
1960 	if (!isolated)
1961 		goto out;
1962 
1963 	list_add(&page->lru, &migratepages);
1964 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1965 				     NULL, node, MIGRATE_ASYNC,
1966 				     MR_NUMA_MISPLACED);
1967 	if (nr_remaining) {
1968 		if (!list_empty(&migratepages)) {
1969 			list_del(&page->lru);
1970 			dec_node_page_state(page, NR_ISOLATED_ANON +
1971 					page_is_file_cache(page));
1972 			putback_lru_page(page);
1973 		}
1974 		isolated = 0;
1975 	} else
1976 		count_vm_numa_event(NUMA_PAGE_MIGRATE);
1977 	BUG_ON(!list_empty(&migratepages));
1978 	return isolated;
1979 
1980 out:
1981 	put_page(page);
1982 	return 0;
1983 }
1984 #endif /* CONFIG_NUMA_BALANCING */
1985 
1986 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1987 /*
1988  * Migrates a THP to a given target node. page must be locked and is unlocked
1989  * before returning.
1990  */
1991 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1992 				struct vm_area_struct *vma,
1993 				pmd_t *pmd, pmd_t entry,
1994 				unsigned long address,
1995 				struct page *page, int node)
1996 {
1997 	spinlock_t *ptl;
1998 	pg_data_t *pgdat = NODE_DATA(node);
1999 	int isolated = 0;
2000 	struct page *new_page = NULL;
2001 	int page_lru = page_is_file_cache(page);
2002 	unsigned long start = address & HPAGE_PMD_MASK;
2003 
2004 	new_page = alloc_pages_node(node,
2005 		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2006 		HPAGE_PMD_ORDER);
2007 	if (!new_page)
2008 		goto out_fail;
2009 	prep_transhuge_page(new_page);
2010 
2011 	isolated = numamigrate_isolate_page(pgdat, page);
2012 	if (!isolated) {
2013 		put_page(new_page);
2014 		goto out_fail;
2015 	}
2016 
2017 	/* Prepare a page as a migration target */
2018 	__SetPageLocked(new_page);
2019 	if (PageSwapBacked(page))
2020 		__SetPageSwapBacked(new_page);
2021 
2022 	/* anon mapping, we can simply copy page->mapping to the new page: */
2023 	new_page->mapping = page->mapping;
2024 	new_page->index = page->index;
2025 	/* flush the cache before copying using the kernel virtual address */
2026 	flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2027 	migrate_page_copy(new_page, page);
2028 	WARN_ON(PageLRU(new_page));
2029 
2030 	/* Recheck the target PMD */
2031 	ptl = pmd_lock(mm, pmd);
2032 	if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2033 		spin_unlock(ptl);
2034 
2035 		/* Reverse changes made by migrate_page_copy() */
2036 		if (TestClearPageActive(new_page))
2037 			SetPageActive(page);
2038 		if (TestClearPageUnevictable(new_page))
2039 			SetPageUnevictable(page);
2040 
2041 		unlock_page(new_page);
2042 		put_page(new_page);		/* Free it */
2043 
2044 		/* Retake the callers reference and putback on LRU */
2045 		get_page(page);
2046 		putback_lru_page(page);
2047 		mod_node_page_state(page_pgdat(page),
2048 			 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2049 
2050 		goto out_unlock;
2051 	}
2052 
2053 	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2054 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2055 
2056 	/*
2057 	 * Overwrite the old entry under pagetable lock and establish
2058 	 * the new PTE. Any parallel GUP will either observe the old
2059 	 * page blocking on the page lock, block on the page table
2060 	 * lock or observe the new page. The SetPageUptodate on the
2061 	 * new page and page_add_new_anon_rmap guarantee the copy is
2062 	 * visible before the pagetable update.
2063 	 */
2064 	page_add_anon_rmap(new_page, vma, start, true);
2065 	/*
2066 	 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2067 	 * has already been flushed globally.  So no TLB can be currently
2068 	 * caching this non present pmd mapping.  There's no need to clear the
2069 	 * pmd before doing set_pmd_at(), nor to flush the TLB after
2070 	 * set_pmd_at().  Clearing the pmd here would introduce a race
2071 	 * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2072 	 * mmap_sem for reading.  If the pmd is set to NULL at any given time,
2073 	 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2074 	 * pmd.
2075 	 */
2076 	set_pmd_at(mm, start, pmd, entry);
2077 	update_mmu_cache_pmd(vma, address, &entry);
2078 
2079 	page_ref_unfreeze(page, 2);
2080 	mlock_migrate_page(new_page, page);
2081 	page_remove_rmap(page, true);
2082 	set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2083 
2084 	spin_unlock(ptl);
2085 
2086 	/* Take an "isolate" reference and put new page on the LRU. */
2087 	get_page(new_page);
2088 	putback_lru_page(new_page);
2089 
2090 	unlock_page(new_page);
2091 	unlock_page(page);
2092 	put_page(page);			/* Drop the rmap reference */
2093 	put_page(page);			/* Drop the LRU isolation reference */
2094 
2095 	count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2096 	count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2097 
2098 	mod_node_page_state(page_pgdat(page),
2099 			NR_ISOLATED_ANON + page_lru,
2100 			-HPAGE_PMD_NR);
2101 	return isolated;
2102 
2103 out_fail:
2104 	count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2105 	ptl = pmd_lock(mm, pmd);
2106 	if (pmd_same(*pmd, entry)) {
2107 		entry = pmd_modify(entry, vma->vm_page_prot);
2108 		set_pmd_at(mm, start, pmd, entry);
2109 		update_mmu_cache_pmd(vma, address, &entry);
2110 	}
2111 	spin_unlock(ptl);
2112 
2113 out_unlock:
2114 	unlock_page(page);
2115 	put_page(page);
2116 	return 0;
2117 }
2118 #endif /* CONFIG_NUMA_BALANCING */
2119 
2120 #endif /* CONFIG_NUMA */
2121 
2122 #if defined(CONFIG_MIGRATE_VMA_HELPER)
2123 struct migrate_vma {
2124 	struct vm_area_struct	*vma;
2125 	unsigned long		*dst;
2126 	unsigned long		*src;
2127 	unsigned long		cpages;
2128 	unsigned long		npages;
2129 	unsigned long		start;
2130 	unsigned long		end;
2131 };
2132 
2133 static int migrate_vma_collect_hole(unsigned long start,
2134 				    unsigned long end,
2135 				    struct mm_walk *walk)
2136 {
2137 	struct migrate_vma *migrate = walk->private;
2138 	unsigned long addr;
2139 
2140 	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2141 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2142 		migrate->dst[migrate->npages] = 0;
2143 		migrate->npages++;
2144 		migrate->cpages++;
2145 	}
2146 
2147 	return 0;
2148 }
2149 
2150 static int migrate_vma_collect_skip(unsigned long start,
2151 				    unsigned long end,
2152 				    struct mm_walk *walk)
2153 {
2154 	struct migrate_vma *migrate = walk->private;
2155 	unsigned long addr;
2156 
2157 	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2158 		migrate->dst[migrate->npages] = 0;
2159 		migrate->src[migrate->npages++] = 0;
2160 	}
2161 
2162 	return 0;
2163 }
2164 
2165 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2166 				   unsigned long start,
2167 				   unsigned long end,
2168 				   struct mm_walk *walk)
2169 {
2170 	struct migrate_vma *migrate = walk->private;
2171 	struct vm_area_struct *vma = walk->vma;
2172 	struct mm_struct *mm = vma->vm_mm;
2173 	unsigned long addr = start, unmapped = 0;
2174 	spinlock_t *ptl;
2175 	pte_t *ptep;
2176 
2177 again:
2178 	if (pmd_none(*pmdp))
2179 		return migrate_vma_collect_hole(start, end, walk);
2180 
2181 	if (pmd_trans_huge(*pmdp)) {
2182 		struct page *page;
2183 
2184 		ptl = pmd_lock(mm, pmdp);
2185 		if (unlikely(!pmd_trans_huge(*pmdp))) {
2186 			spin_unlock(ptl);
2187 			goto again;
2188 		}
2189 
2190 		page = pmd_page(*pmdp);
2191 		if (is_huge_zero_page(page)) {
2192 			spin_unlock(ptl);
2193 			split_huge_pmd(vma, pmdp, addr);
2194 			if (pmd_trans_unstable(pmdp))
2195 				return migrate_vma_collect_skip(start, end,
2196 								walk);
2197 		} else {
2198 			int ret;
2199 
2200 			get_page(page);
2201 			spin_unlock(ptl);
2202 			if (unlikely(!trylock_page(page)))
2203 				return migrate_vma_collect_skip(start, end,
2204 								walk);
2205 			ret = split_huge_page(page);
2206 			unlock_page(page);
2207 			put_page(page);
2208 			if (ret)
2209 				return migrate_vma_collect_skip(start, end,
2210 								walk);
2211 			if (pmd_none(*pmdp))
2212 				return migrate_vma_collect_hole(start, end,
2213 								walk);
2214 		}
2215 	}
2216 
2217 	if (unlikely(pmd_bad(*pmdp)))
2218 		return migrate_vma_collect_skip(start, end, walk);
2219 
2220 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2221 	arch_enter_lazy_mmu_mode();
2222 
2223 	for (; addr < end; addr += PAGE_SIZE, ptep++) {
2224 		unsigned long mpfn, pfn;
2225 		struct page *page;
2226 		swp_entry_t entry;
2227 		pte_t pte;
2228 
2229 		pte = *ptep;
2230 		pfn = pte_pfn(pte);
2231 
2232 		if (pte_none(pte)) {
2233 			mpfn = MIGRATE_PFN_MIGRATE;
2234 			migrate->cpages++;
2235 			pfn = 0;
2236 			goto next;
2237 		}
2238 
2239 		if (!pte_present(pte)) {
2240 			mpfn = pfn = 0;
2241 
2242 			/*
2243 			 * Only care about unaddressable device page special
2244 			 * page table entry. Other special swap entries are not
2245 			 * migratable, and we ignore regular swapped page.
2246 			 */
2247 			entry = pte_to_swp_entry(pte);
2248 			if (!is_device_private_entry(entry))
2249 				goto next;
2250 
2251 			page = device_private_entry_to_page(entry);
2252 			mpfn = migrate_pfn(page_to_pfn(page))|
2253 				MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2254 			if (is_write_device_private_entry(entry))
2255 				mpfn |= MIGRATE_PFN_WRITE;
2256 		} else {
2257 			if (is_zero_pfn(pfn)) {
2258 				mpfn = MIGRATE_PFN_MIGRATE;
2259 				migrate->cpages++;
2260 				pfn = 0;
2261 				goto next;
2262 			}
2263 			page = vm_normal_page(migrate->vma, addr, pte);
2264 			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2265 			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2266 		}
2267 
2268 		/* FIXME support THP */
2269 		if (!page || !page->mapping || PageTransCompound(page)) {
2270 			mpfn = pfn = 0;
2271 			goto next;
2272 		}
2273 		pfn = page_to_pfn(page);
2274 
2275 		/*
2276 		 * By getting a reference on the page we pin it and that blocks
2277 		 * any kind of migration. Side effect is that it "freezes" the
2278 		 * pte.
2279 		 *
2280 		 * We drop this reference after isolating the page from the lru
2281 		 * for non device page (device page are not on the lru and thus
2282 		 * can't be dropped from it).
2283 		 */
2284 		get_page(page);
2285 		migrate->cpages++;
2286 
2287 		/*
2288 		 * Optimize for the common case where page is only mapped once
2289 		 * in one process. If we can lock the page, then we can safely
2290 		 * set up a special migration page table entry now.
2291 		 */
2292 		if (trylock_page(page)) {
2293 			pte_t swp_pte;
2294 
2295 			mpfn |= MIGRATE_PFN_LOCKED;
2296 			ptep_get_and_clear(mm, addr, ptep);
2297 
2298 			/* Setup special migration page table entry */
2299 			entry = make_migration_entry(page, mpfn &
2300 						     MIGRATE_PFN_WRITE);
2301 			swp_pte = swp_entry_to_pte(entry);
2302 			if (pte_soft_dirty(pte))
2303 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2304 			set_pte_at(mm, addr, ptep, swp_pte);
2305 
2306 			/*
2307 			 * This is like regular unmap: we remove the rmap and
2308 			 * drop page refcount. Page won't be freed, as we took
2309 			 * a reference just above.
2310 			 */
2311 			page_remove_rmap(page, false);
2312 			put_page(page);
2313 
2314 			if (pte_present(pte))
2315 				unmapped++;
2316 		}
2317 
2318 next:
2319 		migrate->dst[migrate->npages] = 0;
2320 		migrate->src[migrate->npages++] = mpfn;
2321 	}
2322 	arch_leave_lazy_mmu_mode();
2323 	pte_unmap_unlock(ptep - 1, ptl);
2324 
2325 	/* Only flush the TLB if we actually modified any entries */
2326 	if (unmapped)
2327 		flush_tlb_range(walk->vma, start, end);
2328 
2329 	return 0;
2330 }
2331 
2332 /*
2333  * migrate_vma_collect() - collect pages over a range of virtual addresses
2334  * @migrate: migrate struct containing all migration information
2335  *
2336  * This will walk the CPU page table. For each virtual address backed by a
2337  * valid page, it updates the src array and takes a reference on the page, in
2338  * order to pin the page until we lock it and unmap it.
2339  */
2340 static void migrate_vma_collect(struct migrate_vma *migrate)
2341 {
2342 	struct mmu_notifier_range range;
2343 	struct mm_walk mm_walk = {
2344 		.pmd_entry = migrate_vma_collect_pmd,
2345 		.pte_hole = migrate_vma_collect_hole,
2346 		.vma = migrate->vma,
2347 		.mm = migrate->vma->vm_mm,
2348 		.private = migrate,
2349 	};
2350 
2351 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
2352 				migrate->start,
2353 				migrate->end);
2354 	mmu_notifier_invalidate_range_start(&range);
2355 	walk_page_range(migrate->start, migrate->end, &mm_walk);
2356 	mmu_notifier_invalidate_range_end(&range);
2357 
2358 	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2359 }
2360 
2361 /*
2362  * migrate_vma_check_page() - check if page is pinned or not
2363  * @page: struct page to check
2364  *
2365  * Pinned pages cannot be migrated. This is the same test as in
2366  * migrate_page_move_mapping(), except that here we allow migration of a
2367  * ZONE_DEVICE page.
2368  */
2369 static bool migrate_vma_check_page(struct page *page)
2370 {
2371 	/*
2372 	 * One extra ref because caller holds an extra reference, either from
2373 	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2374 	 * a device page.
2375 	 */
2376 	int extra = 1;
2377 
2378 	/*
2379 	 * FIXME support THP (transparent huge page), it is bit more complex to
2380 	 * check them than regular pages, because they can be mapped with a pmd
2381 	 * or with a pte (split pte mapping).
2382 	 */
2383 	if (PageCompound(page))
2384 		return false;
2385 
2386 	/* Page from ZONE_DEVICE have one extra reference */
2387 	if (is_zone_device_page(page)) {
2388 		/*
2389 		 * Private page can never be pin as they have no valid pte and
2390 		 * GUP will fail for those. Yet if there is a pending migration
2391 		 * a thread might try to wait on the pte migration entry and
2392 		 * will bump the page reference count. Sadly there is no way to
2393 		 * differentiate a regular pin from migration wait. Hence to
2394 		 * avoid 2 racing thread trying to migrate back to CPU to enter
2395 		 * infinite loop (one stoping migration because the other is
2396 		 * waiting on pte migration entry). We always return true here.
2397 		 *
2398 		 * FIXME proper solution is to rework migration_entry_wait() so
2399 		 * it does not need to take a reference on page.
2400 		 */
2401 		return is_device_private_page(page);
2402 	}
2403 
2404 	/* For file back page */
2405 	if (page_mapping(page))
2406 		extra += 1 + page_has_private(page);
2407 
2408 	if ((page_count(page) - extra) > page_mapcount(page))
2409 		return false;
2410 
2411 	return true;
2412 }
2413 
2414 /*
2415  * migrate_vma_prepare() - lock pages and isolate them from the lru
2416  * @migrate: migrate struct containing all migration information
2417  *
2418  * This locks pages that have been collected by migrate_vma_collect(). Once each
2419  * page is locked it is isolated from the lru (for non-device pages). Finally,
2420  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2421  * migrated by concurrent kernel threads.
2422  */
2423 static void migrate_vma_prepare(struct migrate_vma *migrate)
2424 {
2425 	const unsigned long npages = migrate->npages;
2426 	const unsigned long start = migrate->start;
2427 	unsigned long addr, i, restore = 0;
2428 	bool allow_drain = true;
2429 
2430 	lru_add_drain();
2431 
2432 	for (i = 0; (i < npages) && migrate->cpages; i++) {
2433 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2434 		bool remap = true;
2435 
2436 		if (!page)
2437 			continue;
2438 
2439 		if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2440 			/*
2441 			 * Because we are migrating several pages there can be
2442 			 * a deadlock between 2 concurrent migration where each
2443 			 * are waiting on each other page lock.
2444 			 *
2445 			 * Make migrate_vma() a best effort thing and backoff
2446 			 * for any page we can not lock right away.
2447 			 */
2448 			if (!trylock_page(page)) {
2449 				migrate->src[i] = 0;
2450 				migrate->cpages--;
2451 				put_page(page);
2452 				continue;
2453 			}
2454 			remap = false;
2455 			migrate->src[i] |= MIGRATE_PFN_LOCKED;
2456 		}
2457 
2458 		/* ZONE_DEVICE pages are not on LRU */
2459 		if (!is_zone_device_page(page)) {
2460 			if (!PageLRU(page) && allow_drain) {
2461 				/* Drain CPU's pagevec */
2462 				lru_add_drain_all();
2463 				allow_drain = false;
2464 			}
2465 
2466 			if (isolate_lru_page(page)) {
2467 				if (remap) {
2468 					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2469 					migrate->cpages--;
2470 					restore++;
2471 				} else {
2472 					migrate->src[i] = 0;
2473 					unlock_page(page);
2474 					migrate->cpages--;
2475 					put_page(page);
2476 				}
2477 				continue;
2478 			}
2479 
2480 			/* Drop the reference we took in collect */
2481 			put_page(page);
2482 		}
2483 
2484 		if (!migrate_vma_check_page(page)) {
2485 			if (remap) {
2486 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2487 				migrate->cpages--;
2488 				restore++;
2489 
2490 				if (!is_zone_device_page(page)) {
2491 					get_page(page);
2492 					putback_lru_page(page);
2493 				}
2494 			} else {
2495 				migrate->src[i] = 0;
2496 				unlock_page(page);
2497 				migrate->cpages--;
2498 
2499 				if (!is_zone_device_page(page))
2500 					putback_lru_page(page);
2501 				else
2502 					put_page(page);
2503 			}
2504 		}
2505 	}
2506 
2507 	for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2508 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2509 
2510 		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2511 			continue;
2512 
2513 		remove_migration_pte(page, migrate->vma, addr, page);
2514 
2515 		migrate->src[i] = 0;
2516 		unlock_page(page);
2517 		put_page(page);
2518 		restore--;
2519 	}
2520 }
2521 
2522 /*
2523  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2524  * @migrate: migrate struct containing all migration information
2525  *
2526  * Replace page mapping (CPU page table pte) with a special migration pte entry
2527  * and check again if it has been pinned. Pinned pages are restored because we
2528  * cannot migrate them.
2529  *
2530  * This is the last step before we call the device driver callback to allocate
2531  * destination memory and copy contents of original page over to new page.
2532  */
2533 static void migrate_vma_unmap(struct migrate_vma *migrate)
2534 {
2535 	int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2536 	const unsigned long npages = migrate->npages;
2537 	const unsigned long start = migrate->start;
2538 	unsigned long addr, i, restore = 0;
2539 
2540 	for (i = 0; i < npages; i++) {
2541 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2542 
2543 		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2544 			continue;
2545 
2546 		if (page_mapped(page)) {
2547 			try_to_unmap(page, flags);
2548 			if (page_mapped(page))
2549 				goto restore;
2550 		}
2551 
2552 		if (migrate_vma_check_page(page))
2553 			continue;
2554 
2555 restore:
2556 		migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2557 		migrate->cpages--;
2558 		restore++;
2559 	}
2560 
2561 	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2562 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2563 
2564 		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2565 			continue;
2566 
2567 		remove_migration_ptes(page, page, false);
2568 
2569 		migrate->src[i] = 0;
2570 		unlock_page(page);
2571 		restore--;
2572 
2573 		if (is_zone_device_page(page))
2574 			put_page(page);
2575 		else
2576 			putback_lru_page(page);
2577 	}
2578 }
2579 
2580 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2581 				    unsigned long addr,
2582 				    struct page *page,
2583 				    unsigned long *src,
2584 				    unsigned long *dst)
2585 {
2586 	struct vm_area_struct *vma = migrate->vma;
2587 	struct mm_struct *mm = vma->vm_mm;
2588 	struct mem_cgroup *memcg;
2589 	bool flush = false;
2590 	spinlock_t *ptl;
2591 	pte_t entry;
2592 	pgd_t *pgdp;
2593 	p4d_t *p4dp;
2594 	pud_t *pudp;
2595 	pmd_t *pmdp;
2596 	pte_t *ptep;
2597 
2598 	/* Only allow populating anonymous memory */
2599 	if (!vma_is_anonymous(vma))
2600 		goto abort;
2601 
2602 	pgdp = pgd_offset(mm, addr);
2603 	p4dp = p4d_alloc(mm, pgdp, addr);
2604 	if (!p4dp)
2605 		goto abort;
2606 	pudp = pud_alloc(mm, p4dp, addr);
2607 	if (!pudp)
2608 		goto abort;
2609 	pmdp = pmd_alloc(mm, pudp, addr);
2610 	if (!pmdp)
2611 		goto abort;
2612 
2613 	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2614 		goto abort;
2615 
2616 	/*
2617 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
2618 	 * pte_offset_map() on pmds where a huge pmd might be created
2619 	 * from a different thread.
2620 	 *
2621 	 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2622 	 * parallel threads are excluded by other means.
2623 	 *
2624 	 * Here we only have down_read(mmap_sem).
2625 	 */
2626 	if (pte_alloc(mm, pmdp))
2627 		goto abort;
2628 
2629 	/* See the comment in pte_alloc_one_map() */
2630 	if (unlikely(pmd_trans_unstable(pmdp)))
2631 		goto abort;
2632 
2633 	if (unlikely(anon_vma_prepare(vma)))
2634 		goto abort;
2635 	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2636 		goto abort;
2637 
2638 	/*
2639 	 * The memory barrier inside __SetPageUptodate makes sure that
2640 	 * preceding stores to the page contents become visible before
2641 	 * the set_pte_at() write.
2642 	 */
2643 	__SetPageUptodate(page);
2644 
2645 	if (is_zone_device_page(page)) {
2646 		if (is_device_private_page(page)) {
2647 			swp_entry_t swp_entry;
2648 
2649 			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2650 			entry = swp_entry_to_pte(swp_entry);
2651 		}
2652 	} else {
2653 		entry = mk_pte(page, vma->vm_page_prot);
2654 		if (vma->vm_flags & VM_WRITE)
2655 			entry = pte_mkwrite(pte_mkdirty(entry));
2656 	}
2657 
2658 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2659 
2660 	if (pte_present(*ptep)) {
2661 		unsigned long pfn = pte_pfn(*ptep);
2662 
2663 		if (!is_zero_pfn(pfn)) {
2664 			pte_unmap_unlock(ptep, ptl);
2665 			mem_cgroup_cancel_charge(page, memcg, false);
2666 			goto abort;
2667 		}
2668 		flush = true;
2669 	} else if (!pte_none(*ptep)) {
2670 		pte_unmap_unlock(ptep, ptl);
2671 		mem_cgroup_cancel_charge(page, memcg, false);
2672 		goto abort;
2673 	}
2674 
2675 	/*
2676 	 * Check for usefaultfd but do not deliver the fault. Instead,
2677 	 * just back off.
2678 	 */
2679 	if (userfaultfd_missing(vma)) {
2680 		pte_unmap_unlock(ptep, ptl);
2681 		mem_cgroup_cancel_charge(page, memcg, false);
2682 		goto abort;
2683 	}
2684 
2685 	inc_mm_counter(mm, MM_ANONPAGES);
2686 	page_add_new_anon_rmap(page, vma, addr, false);
2687 	mem_cgroup_commit_charge(page, memcg, false, false);
2688 	if (!is_zone_device_page(page))
2689 		lru_cache_add_active_or_unevictable(page, vma);
2690 	get_page(page);
2691 
2692 	if (flush) {
2693 		flush_cache_page(vma, addr, pte_pfn(*ptep));
2694 		ptep_clear_flush_notify(vma, addr, ptep);
2695 		set_pte_at_notify(mm, addr, ptep, entry);
2696 		update_mmu_cache(vma, addr, ptep);
2697 	} else {
2698 		/* No need to invalidate - it was non-present before */
2699 		set_pte_at(mm, addr, ptep, entry);
2700 		update_mmu_cache(vma, addr, ptep);
2701 	}
2702 
2703 	pte_unmap_unlock(ptep, ptl);
2704 	*src = MIGRATE_PFN_MIGRATE;
2705 	return;
2706 
2707 abort:
2708 	*src &= ~MIGRATE_PFN_MIGRATE;
2709 }
2710 
2711 /*
2712  * migrate_vma_pages() - migrate meta-data from src page to dst page
2713  * @migrate: migrate struct containing all migration information
2714  *
2715  * This migrates struct page meta-data from source struct page to destination
2716  * struct page. This effectively finishes the migration from source page to the
2717  * destination page.
2718  */
2719 static void migrate_vma_pages(struct migrate_vma *migrate)
2720 {
2721 	const unsigned long npages = migrate->npages;
2722 	const unsigned long start = migrate->start;
2723 	struct mmu_notifier_range range;
2724 	unsigned long addr, i;
2725 	bool notified = false;
2726 
2727 	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2728 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2729 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2730 		struct address_space *mapping;
2731 		int r;
2732 
2733 		if (!newpage) {
2734 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2735 			continue;
2736 		}
2737 
2738 		if (!page) {
2739 			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2740 				continue;
2741 			}
2742 			if (!notified) {
2743 				notified = true;
2744 
2745 				mmu_notifier_range_init(&range,
2746 							MMU_NOTIFY_CLEAR, 0,
2747 							NULL,
2748 							migrate->vma->vm_mm,
2749 							addr, migrate->end);
2750 				mmu_notifier_invalidate_range_start(&range);
2751 			}
2752 			migrate_vma_insert_page(migrate, addr, newpage,
2753 						&migrate->src[i],
2754 						&migrate->dst[i]);
2755 			continue;
2756 		}
2757 
2758 		mapping = page_mapping(page);
2759 
2760 		if (is_zone_device_page(newpage)) {
2761 			if (is_device_private_page(newpage)) {
2762 				/*
2763 				 * For now only support private anonymous when
2764 				 * migrating to un-addressable device memory.
2765 				 */
2766 				if (mapping) {
2767 					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2768 					continue;
2769 				}
2770 			} else {
2771 				/*
2772 				 * Other types of ZONE_DEVICE page are not
2773 				 * supported.
2774 				 */
2775 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2776 				continue;
2777 			}
2778 		}
2779 
2780 		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2781 		if (r != MIGRATEPAGE_SUCCESS)
2782 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2783 	}
2784 
2785 	/*
2786 	 * No need to double call mmu_notifier->invalidate_range() callback as
2787 	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2788 	 * did already call it.
2789 	 */
2790 	if (notified)
2791 		mmu_notifier_invalidate_range_only_end(&range);
2792 }
2793 
2794 /*
2795  * migrate_vma_finalize() - restore CPU page table entry
2796  * @migrate: migrate struct containing all migration information
2797  *
2798  * This replaces the special migration pte entry with either a mapping to the
2799  * new page if migration was successful for that page, or to the original page
2800  * otherwise.
2801  *
2802  * This also unlocks the pages and puts them back on the lru, or drops the extra
2803  * refcount, for device pages.
2804  */
2805 static void migrate_vma_finalize(struct migrate_vma *migrate)
2806 {
2807 	const unsigned long npages = migrate->npages;
2808 	unsigned long i;
2809 
2810 	for (i = 0; i < npages; i++) {
2811 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2812 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2813 
2814 		if (!page) {
2815 			if (newpage) {
2816 				unlock_page(newpage);
2817 				put_page(newpage);
2818 			}
2819 			continue;
2820 		}
2821 
2822 		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2823 			if (newpage) {
2824 				unlock_page(newpage);
2825 				put_page(newpage);
2826 			}
2827 			newpage = page;
2828 		}
2829 
2830 		remove_migration_ptes(page, newpage, false);
2831 		unlock_page(page);
2832 		migrate->cpages--;
2833 
2834 		if (is_zone_device_page(page))
2835 			put_page(page);
2836 		else
2837 			putback_lru_page(page);
2838 
2839 		if (newpage != page) {
2840 			unlock_page(newpage);
2841 			if (is_zone_device_page(newpage))
2842 				put_page(newpage);
2843 			else
2844 				putback_lru_page(newpage);
2845 		}
2846 	}
2847 }
2848 
2849 /*
2850  * migrate_vma() - migrate a range of memory inside vma
2851  *
2852  * @ops: migration callback for allocating destination memory and copying
2853  * @vma: virtual memory area containing the range to be migrated
2854  * @start: start address of the range to migrate (inclusive)
2855  * @end: end address of the range to migrate (exclusive)
2856  * @src: array of hmm_pfn_t containing source pfns
2857  * @dst: array of hmm_pfn_t containing destination pfns
2858  * @private: pointer passed back to each of the callback
2859  * Returns: 0 on success, error code otherwise
2860  *
2861  * This function tries to migrate a range of memory virtual address range, using
2862  * callbacks to allocate and copy memory from source to destination. First it
2863  * collects all the pages backing each virtual address in the range, saving this
2864  * inside the src array. Then it locks those pages and unmaps them. Once the pages
2865  * are locked and unmapped, it checks whether each page is pinned or not. Pages
2866  * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2867  * in the corresponding src array entry. It then restores any pages that are
2868  * pinned, by remapping and unlocking those pages.
2869  *
2870  * At this point it calls the alloc_and_copy() callback. For documentation on
2871  * what is expected from that callback, see struct migrate_vma_ops comments in
2872  * include/linux/migrate.h
2873  *
2874  * After the alloc_and_copy() callback, this function goes over each entry in
2875  * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2876  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2877  * then the function tries to migrate struct page information from the source
2878  * struct page to the destination struct page. If it fails to migrate the struct
2879  * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2880  * array.
2881  *
2882  * At this point all successfully migrated pages have an entry in the src
2883  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2884  * array entry with MIGRATE_PFN_VALID flag set.
2885  *
2886  * It then calls the finalize_and_map() callback. See comments for "struct
2887  * migrate_vma_ops", in include/linux/migrate.h for details about
2888  * finalize_and_map() behavior.
2889  *
2890  * After the finalize_and_map() callback, for successfully migrated pages, this
2891  * function updates the CPU page table to point to new pages, otherwise it
2892  * restores the CPU page table to point to the original source pages.
2893  *
2894  * Function returns 0 after the above steps, even if no pages were migrated
2895  * (The function only returns an error if any of the arguments are invalid.)
2896  *
2897  * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2898  * unsigned long entries.
2899  */
2900 int migrate_vma(const struct migrate_vma_ops *ops,
2901 		struct vm_area_struct *vma,
2902 		unsigned long start,
2903 		unsigned long end,
2904 		unsigned long *src,
2905 		unsigned long *dst,
2906 		void *private)
2907 {
2908 	struct migrate_vma migrate;
2909 
2910 	/* Sanity check the arguments */
2911 	start &= PAGE_MASK;
2912 	end &= PAGE_MASK;
2913 	if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
2914 			vma_is_dax(vma))
2915 		return -EINVAL;
2916 	if (start < vma->vm_start || start >= vma->vm_end)
2917 		return -EINVAL;
2918 	if (end <= vma->vm_start || end > vma->vm_end)
2919 		return -EINVAL;
2920 	if (!ops || !src || !dst || start >= end)
2921 		return -EINVAL;
2922 
2923 	memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2924 	migrate.src = src;
2925 	migrate.dst = dst;
2926 	migrate.start = start;
2927 	migrate.npages = 0;
2928 	migrate.cpages = 0;
2929 	migrate.end = end;
2930 	migrate.vma = vma;
2931 
2932 	/* Collect, and try to unmap source pages */
2933 	migrate_vma_collect(&migrate);
2934 	if (!migrate.cpages)
2935 		return 0;
2936 
2937 	/* Lock and isolate page */
2938 	migrate_vma_prepare(&migrate);
2939 	if (!migrate.cpages)
2940 		return 0;
2941 
2942 	/* Unmap pages */
2943 	migrate_vma_unmap(&migrate);
2944 	if (!migrate.cpages)
2945 		return 0;
2946 
2947 	/*
2948 	 * At this point pages are locked and unmapped, and thus they have
2949 	 * stable content and can safely be copied to destination memory that
2950 	 * is allocated by the callback.
2951 	 *
2952 	 * Note that migration can fail in migrate_vma_struct_page() for each
2953 	 * individual page.
2954 	 */
2955 	ops->alloc_and_copy(vma, src, dst, start, end, private);
2956 
2957 	/* This does the real migration of struct page */
2958 	migrate_vma_pages(&migrate);
2959 
2960 	ops->finalize_and_map(vma, src, dst, start, end, private);
2961 
2962 	/* Unlock and remap pages */
2963 	migrate_vma_finalize(&migrate);
2964 
2965 	return 0;
2966 }
2967 EXPORT_SYMBOL(migrate_vma);
2968 #endif /* defined(MIGRATE_VMA_HELPER) */
2969