xref: /openbmc/linux/mm/migrate.c (revision 47010c04)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/page_idle.h>
46 #include <linux/page_owner.h>
47 #include <linux/sched/mm.h>
48 #include <linux/ptrace.h>
49 #include <linux/oom.h>
50 #include <linux/memory.h>
51 #include <linux/random.h>
52 #include <linux/sched/sysctl.h>
53 
54 #include <asm/tlbflush.h>
55 
56 #include <trace/events/migrate.h>
57 
58 #include "internal.h"
59 
60 int isolate_movable_page(struct page *page, isolate_mode_t mode)
61 {
62 	struct address_space *mapping;
63 
64 	/*
65 	 * Avoid burning cycles with pages that are yet under __free_pages(),
66 	 * or just got freed under us.
67 	 *
68 	 * In case we 'win' a race for a movable page being freed under us and
69 	 * raise its refcount preventing __free_pages() from doing its job
70 	 * the put_page() at the end of this block will take care of
71 	 * release this page, thus avoiding a nasty leakage.
72 	 */
73 	if (unlikely(!get_page_unless_zero(page)))
74 		goto out;
75 
76 	/*
77 	 * Check PageMovable before holding a PG_lock because page's owner
78 	 * assumes anybody doesn't touch PG_lock of newly allocated page
79 	 * so unconditionally grabbing the lock ruins page's owner side.
80 	 */
81 	if (unlikely(!__PageMovable(page)))
82 		goto out_putpage;
83 	/*
84 	 * As movable pages are not isolated from LRU lists, concurrent
85 	 * compaction threads can race against page migration functions
86 	 * as well as race against the releasing a page.
87 	 *
88 	 * In order to avoid having an already isolated movable page
89 	 * being (wrongly) re-isolated while it is under migration,
90 	 * or to avoid attempting to isolate pages being released,
91 	 * lets be sure we have the page lock
92 	 * before proceeding with the movable page isolation steps.
93 	 */
94 	if (unlikely(!trylock_page(page)))
95 		goto out_putpage;
96 
97 	if (!PageMovable(page) || PageIsolated(page))
98 		goto out_no_isolated;
99 
100 	mapping = page_mapping(page);
101 	VM_BUG_ON_PAGE(!mapping, page);
102 
103 	if (!mapping->a_ops->isolate_page(page, mode))
104 		goto out_no_isolated;
105 
106 	/* Driver shouldn't use PG_isolated bit of page->flags */
107 	WARN_ON_ONCE(PageIsolated(page));
108 	SetPageIsolated(page);
109 	unlock_page(page);
110 
111 	return 0;
112 
113 out_no_isolated:
114 	unlock_page(page);
115 out_putpage:
116 	put_page(page);
117 out:
118 	return -EBUSY;
119 }
120 
121 static void putback_movable_page(struct page *page)
122 {
123 	struct address_space *mapping;
124 
125 	mapping = page_mapping(page);
126 	mapping->a_ops->putback_page(page);
127 	ClearPageIsolated(page);
128 }
129 
130 /*
131  * Put previously isolated pages back onto the appropriate lists
132  * from where they were once taken off for compaction/migration.
133  *
134  * This function shall be used whenever the isolated pageset has been
135  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
136  * and isolate_huge_page().
137  */
138 void putback_movable_pages(struct list_head *l)
139 {
140 	struct page *page;
141 	struct page *page2;
142 
143 	list_for_each_entry_safe(page, page2, l, lru) {
144 		if (unlikely(PageHuge(page))) {
145 			putback_active_hugepage(page);
146 			continue;
147 		}
148 		list_del(&page->lru);
149 		/*
150 		 * We isolated non-lru movable page so here we can use
151 		 * __PageMovable because LRU page's mapping cannot have
152 		 * PAGE_MAPPING_MOVABLE.
153 		 */
154 		if (unlikely(__PageMovable(page))) {
155 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
156 			lock_page(page);
157 			if (PageMovable(page))
158 				putback_movable_page(page);
159 			else
160 				ClearPageIsolated(page);
161 			unlock_page(page);
162 			put_page(page);
163 		} else {
164 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
165 					page_is_file_lru(page), -thp_nr_pages(page));
166 			putback_lru_page(page);
167 		}
168 	}
169 }
170 
171 /*
172  * Restore a potential migration pte to a working pte entry
173  */
174 static bool remove_migration_pte(struct folio *folio,
175 		struct vm_area_struct *vma, unsigned long addr, void *old)
176 {
177 	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
178 
179 	while (page_vma_mapped_walk(&pvmw)) {
180 		pte_t pte;
181 		swp_entry_t entry;
182 		struct page *new;
183 		unsigned long idx = 0;
184 
185 		/* pgoff is invalid for ksm pages, but they are never large */
186 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
187 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
188 		new = folio_page(folio, idx);
189 
190 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
191 		/* PMD-mapped THP migration entry */
192 		if (!pvmw.pte) {
193 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
194 					!folio_test_pmd_mappable(folio), folio);
195 			remove_migration_pmd(&pvmw, new);
196 			continue;
197 		}
198 #endif
199 
200 		folio_get(folio);
201 		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
202 		if (pte_swp_soft_dirty(*pvmw.pte))
203 			pte = pte_mksoft_dirty(pte);
204 
205 		/*
206 		 * Recheck VMA as permissions can change since migration started
207 		 */
208 		entry = pte_to_swp_entry(*pvmw.pte);
209 		if (is_writable_migration_entry(entry))
210 			pte = maybe_mkwrite(pte, vma);
211 		else if (pte_swp_uffd_wp(*pvmw.pte))
212 			pte = pte_mkuffd_wp(pte);
213 
214 		if (unlikely(is_device_private_page(new))) {
215 			if (pte_write(pte))
216 				entry = make_writable_device_private_entry(
217 							page_to_pfn(new));
218 			else
219 				entry = make_readable_device_private_entry(
220 							page_to_pfn(new));
221 			pte = swp_entry_to_pte(entry);
222 			if (pte_swp_soft_dirty(*pvmw.pte))
223 				pte = pte_swp_mksoft_dirty(pte);
224 			if (pte_swp_uffd_wp(*pvmw.pte))
225 				pte = pte_swp_mkuffd_wp(pte);
226 		}
227 
228 #ifdef CONFIG_HUGETLB_PAGE
229 		if (folio_test_hugetlb(folio)) {
230 			unsigned int shift = huge_page_shift(hstate_vma(vma));
231 
232 			pte = pte_mkhuge(pte);
233 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
234 			if (folio_test_anon(folio))
235 				hugepage_add_anon_rmap(new, vma, pvmw.address);
236 			else
237 				page_dup_rmap(new, true);
238 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
239 		} else
240 #endif
241 		{
242 			if (folio_test_anon(folio))
243 				page_add_anon_rmap(new, vma, pvmw.address, false);
244 			else
245 				page_add_file_rmap(new, vma, false);
246 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
247 		}
248 		if (vma->vm_flags & VM_LOCKED)
249 			mlock_page_drain_local();
250 
251 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
252 					   compound_order(new));
253 
254 		/* No need to invalidate - it was non-present before */
255 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
256 	}
257 
258 	return true;
259 }
260 
261 /*
262  * Get rid of all migration entries and replace them by
263  * references to the indicated page.
264  */
265 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
266 {
267 	struct rmap_walk_control rwc = {
268 		.rmap_one = remove_migration_pte,
269 		.arg = src,
270 	};
271 
272 	if (locked)
273 		rmap_walk_locked(dst, &rwc);
274 	else
275 		rmap_walk(dst, &rwc);
276 }
277 
278 /*
279  * Something used the pte of a page under migration. We need to
280  * get to the page and wait until migration is finished.
281  * When we return from this function the fault will be retried.
282  */
283 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
284 				spinlock_t *ptl)
285 {
286 	pte_t pte;
287 	swp_entry_t entry;
288 
289 	spin_lock(ptl);
290 	pte = *ptep;
291 	if (!is_swap_pte(pte))
292 		goto out;
293 
294 	entry = pte_to_swp_entry(pte);
295 	if (!is_migration_entry(entry))
296 		goto out;
297 
298 	migration_entry_wait_on_locked(entry, ptep, ptl);
299 	return;
300 out:
301 	pte_unmap_unlock(ptep, ptl);
302 }
303 
304 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
305 				unsigned long address)
306 {
307 	spinlock_t *ptl = pte_lockptr(mm, pmd);
308 	pte_t *ptep = pte_offset_map(pmd, address);
309 	__migration_entry_wait(mm, ptep, ptl);
310 }
311 
312 void migration_entry_wait_huge(struct vm_area_struct *vma,
313 		struct mm_struct *mm, pte_t *pte)
314 {
315 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
316 	__migration_entry_wait(mm, pte, ptl);
317 }
318 
319 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
320 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
321 {
322 	spinlock_t *ptl;
323 
324 	ptl = pmd_lock(mm, pmd);
325 	if (!is_pmd_migration_entry(*pmd))
326 		goto unlock;
327 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
328 	return;
329 unlock:
330 	spin_unlock(ptl);
331 }
332 #endif
333 
334 static int expected_page_refs(struct address_space *mapping, struct page *page)
335 {
336 	int expected_count = 1;
337 
338 	if (mapping)
339 		expected_count += compound_nr(page) + page_has_private(page);
340 	return expected_count;
341 }
342 
343 /*
344  * Replace the page in the mapping.
345  *
346  * The number of remaining references must be:
347  * 1 for anonymous pages without a mapping
348  * 2 for pages with a mapping
349  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
350  */
351 int folio_migrate_mapping(struct address_space *mapping,
352 		struct folio *newfolio, struct folio *folio, int extra_count)
353 {
354 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
355 	struct zone *oldzone, *newzone;
356 	int dirty;
357 	int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
358 	long nr = folio_nr_pages(folio);
359 
360 	if (!mapping) {
361 		/* Anonymous page without mapping */
362 		if (folio_ref_count(folio) != expected_count)
363 			return -EAGAIN;
364 
365 		/* No turning back from here */
366 		newfolio->index = folio->index;
367 		newfolio->mapping = folio->mapping;
368 		if (folio_test_swapbacked(folio))
369 			__folio_set_swapbacked(newfolio);
370 
371 		return MIGRATEPAGE_SUCCESS;
372 	}
373 
374 	oldzone = folio_zone(folio);
375 	newzone = folio_zone(newfolio);
376 
377 	xas_lock_irq(&xas);
378 	if (!folio_ref_freeze(folio, expected_count)) {
379 		xas_unlock_irq(&xas);
380 		return -EAGAIN;
381 	}
382 
383 	/*
384 	 * Now we know that no one else is looking at the folio:
385 	 * no turning back from here.
386 	 */
387 	newfolio->index = folio->index;
388 	newfolio->mapping = folio->mapping;
389 	folio_ref_add(newfolio, nr); /* add cache reference */
390 	if (folio_test_swapbacked(folio)) {
391 		__folio_set_swapbacked(newfolio);
392 		if (folio_test_swapcache(folio)) {
393 			folio_set_swapcache(newfolio);
394 			newfolio->private = folio_get_private(folio);
395 		}
396 	} else {
397 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
398 	}
399 
400 	/* Move dirty while page refs frozen and newpage not yet exposed */
401 	dirty = folio_test_dirty(folio);
402 	if (dirty) {
403 		folio_clear_dirty(folio);
404 		folio_set_dirty(newfolio);
405 	}
406 
407 	xas_store(&xas, newfolio);
408 
409 	/*
410 	 * Drop cache reference from old page by unfreezing
411 	 * to one less reference.
412 	 * We know this isn't the last reference.
413 	 */
414 	folio_ref_unfreeze(folio, expected_count - nr);
415 
416 	xas_unlock(&xas);
417 	/* Leave irq disabled to prevent preemption while updating stats */
418 
419 	/*
420 	 * If moved to a different zone then also account
421 	 * the page for that zone. Other VM counters will be
422 	 * taken care of when we establish references to the
423 	 * new page and drop references to the old page.
424 	 *
425 	 * Note that anonymous pages are accounted for
426 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
427 	 * are mapped to swap space.
428 	 */
429 	if (newzone != oldzone) {
430 		struct lruvec *old_lruvec, *new_lruvec;
431 		struct mem_cgroup *memcg;
432 
433 		memcg = folio_memcg(folio);
434 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
435 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
436 
437 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
438 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
439 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
440 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
441 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
442 		}
443 #ifdef CONFIG_SWAP
444 		if (folio_test_swapcache(folio)) {
445 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
446 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
447 		}
448 #endif
449 		if (dirty && mapping_can_writeback(mapping)) {
450 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
451 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
452 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
453 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
454 		}
455 	}
456 	local_irq_enable();
457 
458 	return MIGRATEPAGE_SUCCESS;
459 }
460 EXPORT_SYMBOL(folio_migrate_mapping);
461 
462 /*
463  * The expected number of remaining references is the same as that
464  * of folio_migrate_mapping().
465  */
466 int migrate_huge_page_move_mapping(struct address_space *mapping,
467 				   struct page *newpage, struct page *page)
468 {
469 	XA_STATE(xas, &mapping->i_pages, page_index(page));
470 	int expected_count;
471 
472 	xas_lock_irq(&xas);
473 	expected_count = 2 + page_has_private(page);
474 	if (!page_ref_freeze(page, expected_count)) {
475 		xas_unlock_irq(&xas);
476 		return -EAGAIN;
477 	}
478 
479 	newpage->index = page->index;
480 	newpage->mapping = page->mapping;
481 
482 	get_page(newpage);
483 
484 	xas_store(&xas, newpage);
485 
486 	page_ref_unfreeze(page, expected_count - 1);
487 
488 	xas_unlock_irq(&xas);
489 
490 	return MIGRATEPAGE_SUCCESS;
491 }
492 
493 /*
494  * Copy the flags and some other ancillary information
495  */
496 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
497 {
498 	int cpupid;
499 
500 	if (folio_test_error(folio))
501 		folio_set_error(newfolio);
502 	if (folio_test_referenced(folio))
503 		folio_set_referenced(newfolio);
504 	if (folio_test_uptodate(folio))
505 		folio_mark_uptodate(newfolio);
506 	if (folio_test_clear_active(folio)) {
507 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
508 		folio_set_active(newfolio);
509 	} else if (folio_test_clear_unevictable(folio))
510 		folio_set_unevictable(newfolio);
511 	if (folio_test_workingset(folio))
512 		folio_set_workingset(newfolio);
513 	if (folio_test_checked(folio))
514 		folio_set_checked(newfolio);
515 	if (folio_test_mappedtodisk(folio))
516 		folio_set_mappedtodisk(newfolio);
517 
518 	/* Move dirty on pages not done by folio_migrate_mapping() */
519 	if (folio_test_dirty(folio))
520 		folio_set_dirty(newfolio);
521 
522 	if (folio_test_young(folio))
523 		folio_set_young(newfolio);
524 	if (folio_test_idle(folio))
525 		folio_set_idle(newfolio);
526 
527 	/*
528 	 * Copy NUMA information to the new page, to prevent over-eager
529 	 * future migrations of this same page.
530 	 */
531 	cpupid = page_cpupid_xchg_last(&folio->page, -1);
532 	page_cpupid_xchg_last(&newfolio->page, cpupid);
533 
534 	folio_migrate_ksm(newfolio, folio);
535 	/*
536 	 * Please do not reorder this without considering how mm/ksm.c's
537 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
538 	 */
539 	if (folio_test_swapcache(folio))
540 		folio_clear_swapcache(folio);
541 	folio_clear_private(folio);
542 
543 	/* page->private contains hugetlb specific flags */
544 	if (!folio_test_hugetlb(folio))
545 		folio->private = NULL;
546 
547 	/*
548 	 * If any waiters have accumulated on the new page then
549 	 * wake them up.
550 	 */
551 	if (folio_test_writeback(newfolio))
552 		folio_end_writeback(newfolio);
553 
554 	/*
555 	 * PG_readahead shares the same bit with PG_reclaim.  The above
556 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
557 	 * bit after that.
558 	 */
559 	if (folio_test_readahead(folio))
560 		folio_set_readahead(newfolio);
561 
562 	folio_copy_owner(newfolio, folio);
563 
564 	if (!folio_test_hugetlb(folio))
565 		mem_cgroup_migrate(folio, newfolio);
566 }
567 EXPORT_SYMBOL(folio_migrate_flags);
568 
569 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
570 {
571 	folio_copy(newfolio, folio);
572 	folio_migrate_flags(newfolio, folio);
573 }
574 EXPORT_SYMBOL(folio_migrate_copy);
575 
576 /************************************************************
577  *                    Migration functions
578  ***********************************************************/
579 
580 /*
581  * Common logic to directly migrate a single LRU page suitable for
582  * pages that do not use PagePrivate/PagePrivate2.
583  *
584  * Pages are locked upon entry and exit.
585  */
586 int migrate_page(struct address_space *mapping,
587 		struct page *newpage, struct page *page,
588 		enum migrate_mode mode)
589 {
590 	struct folio *newfolio = page_folio(newpage);
591 	struct folio *folio = page_folio(page);
592 	int rc;
593 
594 	BUG_ON(folio_test_writeback(folio));	/* Writeback must be complete */
595 
596 	rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
597 
598 	if (rc != MIGRATEPAGE_SUCCESS)
599 		return rc;
600 
601 	if (mode != MIGRATE_SYNC_NO_COPY)
602 		folio_migrate_copy(newfolio, folio);
603 	else
604 		folio_migrate_flags(newfolio, folio);
605 	return MIGRATEPAGE_SUCCESS;
606 }
607 EXPORT_SYMBOL(migrate_page);
608 
609 #ifdef CONFIG_BLOCK
610 /* Returns true if all buffers are successfully locked */
611 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
612 							enum migrate_mode mode)
613 {
614 	struct buffer_head *bh = head;
615 
616 	/* Simple case, sync compaction */
617 	if (mode != MIGRATE_ASYNC) {
618 		do {
619 			lock_buffer(bh);
620 			bh = bh->b_this_page;
621 
622 		} while (bh != head);
623 
624 		return true;
625 	}
626 
627 	/* async case, we cannot block on lock_buffer so use trylock_buffer */
628 	do {
629 		if (!trylock_buffer(bh)) {
630 			/*
631 			 * We failed to lock the buffer and cannot stall in
632 			 * async migration. Release the taken locks
633 			 */
634 			struct buffer_head *failed_bh = bh;
635 			bh = head;
636 			while (bh != failed_bh) {
637 				unlock_buffer(bh);
638 				bh = bh->b_this_page;
639 			}
640 			return false;
641 		}
642 
643 		bh = bh->b_this_page;
644 	} while (bh != head);
645 	return true;
646 }
647 
648 static int __buffer_migrate_page(struct address_space *mapping,
649 		struct page *newpage, struct page *page, enum migrate_mode mode,
650 		bool check_refs)
651 {
652 	struct buffer_head *bh, *head;
653 	int rc;
654 	int expected_count;
655 
656 	if (!page_has_buffers(page))
657 		return migrate_page(mapping, newpage, page, mode);
658 
659 	/* Check whether page does not have extra refs before we do more work */
660 	expected_count = expected_page_refs(mapping, page);
661 	if (page_count(page) != expected_count)
662 		return -EAGAIN;
663 
664 	head = page_buffers(page);
665 	if (!buffer_migrate_lock_buffers(head, mode))
666 		return -EAGAIN;
667 
668 	if (check_refs) {
669 		bool busy;
670 		bool invalidated = false;
671 
672 recheck_buffers:
673 		busy = false;
674 		spin_lock(&mapping->private_lock);
675 		bh = head;
676 		do {
677 			if (atomic_read(&bh->b_count)) {
678 				busy = true;
679 				break;
680 			}
681 			bh = bh->b_this_page;
682 		} while (bh != head);
683 		if (busy) {
684 			if (invalidated) {
685 				rc = -EAGAIN;
686 				goto unlock_buffers;
687 			}
688 			spin_unlock(&mapping->private_lock);
689 			invalidate_bh_lrus();
690 			invalidated = true;
691 			goto recheck_buffers;
692 		}
693 	}
694 
695 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
696 	if (rc != MIGRATEPAGE_SUCCESS)
697 		goto unlock_buffers;
698 
699 	attach_page_private(newpage, detach_page_private(page));
700 
701 	bh = head;
702 	do {
703 		set_bh_page(bh, newpage, bh_offset(bh));
704 		bh = bh->b_this_page;
705 
706 	} while (bh != head);
707 
708 	if (mode != MIGRATE_SYNC_NO_COPY)
709 		migrate_page_copy(newpage, page);
710 	else
711 		migrate_page_states(newpage, page);
712 
713 	rc = MIGRATEPAGE_SUCCESS;
714 unlock_buffers:
715 	if (check_refs)
716 		spin_unlock(&mapping->private_lock);
717 	bh = head;
718 	do {
719 		unlock_buffer(bh);
720 		bh = bh->b_this_page;
721 
722 	} while (bh != head);
723 
724 	return rc;
725 }
726 
727 /*
728  * Migration function for pages with buffers. This function can only be used
729  * if the underlying filesystem guarantees that no other references to "page"
730  * exist. For example attached buffer heads are accessed only under page lock.
731  */
732 int buffer_migrate_page(struct address_space *mapping,
733 		struct page *newpage, struct page *page, enum migrate_mode mode)
734 {
735 	return __buffer_migrate_page(mapping, newpage, page, mode, false);
736 }
737 EXPORT_SYMBOL(buffer_migrate_page);
738 
739 /*
740  * Same as above except that this variant is more careful and checks that there
741  * are also no buffer head references. This function is the right one for
742  * mappings where buffer heads are directly looked up and referenced (such as
743  * block device mappings).
744  */
745 int buffer_migrate_page_norefs(struct address_space *mapping,
746 		struct page *newpage, struct page *page, enum migrate_mode mode)
747 {
748 	return __buffer_migrate_page(mapping, newpage, page, mode, true);
749 }
750 #endif
751 
752 /*
753  * Writeback a page to clean the dirty state
754  */
755 static int writeout(struct address_space *mapping, struct page *page)
756 {
757 	struct folio *folio = page_folio(page);
758 	struct writeback_control wbc = {
759 		.sync_mode = WB_SYNC_NONE,
760 		.nr_to_write = 1,
761 		.range_start = 0,
762 		.range_end = LLONG_MAX,
763 		.for_reclaim = 1
764 	};
765 	int rc;
766 
767 	if (!mapping->a_ops->writepage)
768 		/* No write method for the address space */
769 		return -EINVAL;
770 
771 	if (!clear_page_dirty_for_io(page))
772 		/* Someone else already triggered a write */
773 		return -EAGAIN;
774 
775 	/*
776 	 * A dirty page may imply that the underlying filesystem has
777 	 * the page on some queue. So the page must be clean for
778 	 * migration. Writeout may mean we loose the lock and the
779 	 * page state is no longer what we checked for earlier.
780 	 * At this point we know that the migration attempt cannot
781 	 * be successful.
782 	 */
783 	remove_migration_ptes(folio, folio, false);
784 
785 	rc = mapping->a_ops->writepage(page, &wbc);
786 
787 	if (rc != AOP_WRITEPAGE_ACTIVATE)
788 		/* unlocked. Relock */
789 		lock_page(page);
790 
791 	return (rc < 0) ? -EIO : -EAGAIN;
792 }
793 
794 /*
795  * Default handling if a filesystem does not provide a migration function.
796  */
797 static int fallback_migrate_page(struct address_space *mapping,
798 	struct page *newpage, struct page *page, enum migrate_mode mode)
799 {
800 	if (PageDirty(page)) {
801 		/* Only writeback pages in full synchronous migration */
802 		switch (mode) {
803 		case MIGRATE_SYNC:
804 		case MIGRATE_SYNC_NO_COPY:
805 			break;
806 		default:
807 			return -EBUSY;
808 		}
809 		return writeout(mapping, page);
810 	}
811 
812 	/*
813 	 * Buffers may be managed in a filesystem specific way.
814 	 * We must have no buffers or drop them.
815 	 */
816 	if (page_has_private(page) &&
817 	    !try_to_release_page(page, GFP_KERNEL))
818 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
819 
820 	return migrate_page(mapping, newpage, page, mode);
821 }
822 
823 /*
824  * Move a page to a newly allocated page
825  * The page is locked and all ptes have been successfully removed.
826  *
827  * The new page will have replaced the old page if this function
828  * is successful.
829  *
830  * Return value:
831  *   < 0 - error code
832  *  MIGRATEPAGE_SUCCESS - success
833  */
834 static int move_to_new_page(struct page *newpage, struct page *page,
835 				enum migrate_mode mode)
836 {
837 	struct address_space *mapping;
838 	int rc = -EAGAIN;
839 	bool is_lru = !__PageMovable(page);
840 
841 	VM_BUG_ON_PAGE(!PageLocked(page), page);
842 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
843 
844 	mapping = page_mapping(page);
845 
846 	if (likely(is_lru)) {
847 		if (!mapping)
848 			rc = migrate_page(mapping, newpage, page, mode);
849 		else if (mapping->a_ops->migratepage)
850 			/*
851 			 * Most pages have a mapping and most filesystems
852 			 * provide a migratepage callback. Anonymous pages
853 			 * are part of swap space which also has its own
854 			 * migratepage callback. This is the most common path
855 			 * for page migration.
856 			 */
857 			rc = mapping->a_ops->migratepage(mapping, newpage,
858 							page, mode);
859 		else
860 			rc = fallback_migrate_page(mapping, newpage,
861 							page, mode);
862 	} else {
863 		/*
864 		 * In case of non-lru page, it could be released after
865 		 * isolation step. In that case, we shouldn't try migration.
866 		 */
867 		VM_BUG_ON_PAGE(!PageIsolated(page), page);
868 		if (!PageMovable(page)) {
869 			rc = MIGRATEPAGE_SUCCESS;
870 			ClearPageIsolated(page);
871 			goto out;
872 		}
873 
874 		rc = mapping->a_ops->migratepage(mapping, newpage,
875 						page, mode);
876 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
877 			!PageIsolated(page));
878 	}
879 
880 	/*
881 	 * When successful, old pagecache page->mapping must be cleared before
882 	 * page is freed; but stats require that PageAnon be left as PageAnon.
883 	 */
884 	if (rc == MIGRATEPAGE_SUCCESS) {
885 		if (__PageMovable(page)) {
886 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
887 
888 			/*
889 			 * We clear PG_movable under page_lock so any compactor
890 			 * cannot try to migrate this page.
891 			 */
892 			ClearPageIsolated(page);
893 		}
894 
895 		/*
896 		 * Anonymous and movable page->mapping will be cleared by
897 		 * free_pages_prepare so don't reset it here for keeping
898 		 * the type to work PageAnon, for example.
899 		 */
900 		if (!PageMappingFlags(page))
901 			page->mapping = NULL;
902 
903 		if (likely(!is_zone_device_page(newpage)))
904 			flush_dcache_folio(page_folio(newpage));
905 	}
906 out:
907 	return rc;
908 }
909 
910 static int __unmap_and_move(struct page *page, struct page *newpage,
911 				int force, enum migrate_mode mode)
912 {
913 	struct folio *folio = page_folio(page);
914 	struct folio *dst = page_folio(newpage);
915 	int rc = -EAGAIN;
916 	bool page_was_mapped = false;
917 	struct anon_vma *anon_vma = NULL;
918 	bool is_lru = !__PageMovable(page);
919 
920 	if (!trylock_page(page)) {
921 		if (!force || mode == MIGRATE_ASYNC)
922 			goto out;
923 
924 		/*
925 		 * It's not safe for direct compaction to call lock_page.
926 		 * For example, during page readahead pages are added locked
927 		 * to the LRU. Later, when the IO completes the pages are
928 		 * marked uptodate and unlocked. However, the queueing
929 		 * could be merging multiple pages for one bio (e.g.
930 		 * mpage_readahead). If an allocation happens for the
931 		 * second or third page, the process can end up locking
932 		 * the same page twice and deadlocking. Rather than
933 		 * trying to be clever about what pages can be locked,
934 		 * avoid the use of lock_page for direct compaction
935 		 * altogether.
936 		 */
937 		if (current->flags & PF_MEMALLOC)
938 			goto out;
939 
940 		lock_page(page);
941 	}
942 
943 	if (PageWriteback(page)) {
944 		/*
945 		 * Only in the case of a full synchronous migration is it
946 		 * necessary to wait for PageWriteback. In the async case,
947 		 * the retry loop is too short and in the sync-light case,
948 		 * the overhead of stalling is too much
949 		 */
950 		switch (mode) {
951 		case MIGRATE_SYNC:
952 		case MIGRATE_SYNC_NO_COPY:
953 			break;
954 		default:
955 			rc = -EBUSY;
956 			goto out_unlock;
957 		}
958 		if (!force)
959 			goto out_unlock;
960 		wait_on_page_writeback(page);
961 	}
962 
963 	/*
964 	 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
965 	 * we cannot notice that anon_vma is freed while we migrates a page.
966 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
967 	 * of migration. File cache pages are no problem because of page_lock()
968 	 * File Caches may use write_page() or lock_page() in migration, then,
969 	 * just care Anon page here.
970 	 *
971 	 * Only page_get_anon_vma() understands the subtleties of
972 	 * getting a hold on an anon_vma from outside one of its mms.
973 	 * But if we cannot get anon_vma, then we won't need it anyway,
974 	 * because that implies that the anon page is no longer mapped
975 	 * (and cannot be remapped so long as we hold the page lock).
976 	 */
977 	if (PageAnon(page) && !PageKsm(page))
978 		anon_vma = page_get_anon_vma(page);
979 
980 	/*
981 	 * Block others from accessing the new page when we get around to
982 	 * establishing additional references. We are usually the only one
983 	 * holding a reference to newpage at this point. We used to have a BUG
984 	 * here if trylock_page(newpage) fails, but would like to allow for
985 	 * cases where there might be a race with the previous use of newpage.
986 	 * This is much like races on refcount of oldpage: just don't BUG().
987 	 */
988 	if (unlikely(!trylock_page(newpage)))
989 		goto out_unlock;
990 
991 	if (unlikely(!is_lru)) {
992 		rc = move_to_new_page(newpage, page, mode);
993 		goto out_unlock_both;
994 	}
995 
996 	/*
997 	 * Corner case handling:
998 	 * 1. When a new swap-cache page is read into, it is added to the LRU
999 	 * and treated as swapcache but it has no rmap yet.
1000 	 * Calling try_to_unmap() against a page->mapping==NULL page will
1001 	 * trigger a BUG.  So handle it here.
1002 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1003 	 * fs-private metadata. The page can be picked up due to memory
1004 	 * offlining.  Everywhere else except page reclaim, the page is
1005 	 * invisible to the vm, so the page can not be migrated.  So try to
1006 	 * free the metadata, so the page can be freed.
1007 	 */
1008 	if (!page->mapping) {
1009 		VM_BUG_ON_PAGE(PageAnon(page), page);
1010 		if (page_has_private(page)) {
1011 			try_to_free_buffers(page);
1012 			goto out_unlock_both;
1013 		}
1014 	} else if (page_mapped(page)) {
1015 		/* Establish migration ptes */
1016 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1017 				page);
1018 		try_to_migrate(folio, 0);
1019 		page_was_mapped = true;
1020 	}
1021 
1022 	if (!page_mapped(page))
1023 		rc = move_to_new_page(newpage, page, mode);
1024 
1025 	/*
1026 	 * When successful, push newpage to LRU immediately: so that if it
1027 	 * turns out to be an mlocked page, remove_migration_ptes() will
1028 	 * automatically build up the correct newpage->mlock_count for it.
1029 	 *
1030 	 * We would like to do something similar for the old page, when
1031 	 * unsuccessful, and other cases when a page has been temporarily
1032 	 * isolated from the unevictable LRU: but this case is the easiest.
1033 	 */
1034 	if (rc == MIGRATEPAGE_SUCCESS) {
1035 		lru_cache_add(newpage);
1036 		if (page_was_mapped)
1037 			lru_add_drain();
1038 	}
1039 
1040 	if (page_was_mapped)
1041 		remove_migration_ptes(folio,
1042 			rc == MIGRATEPAGE_SUCCESS ? dst : folio, false);
1043 
1044 out_unlock_both:
1045 	unlock_page(newpage);
1046 out_unlock:
1047 	/* Drop an anon_vma reference if we took one */
1048 	if (anon_vma)
1049 		put_anon_vma(anon_vma);
1050 	unlock_page(page);
1051 out:
1052 	/*
1053 	 * If migration is successful, decrease refcount of the newpage,
1054 	 * which will not free the page because new page owner increased
1055 	 * refcounter.
1056 	 */
1057 	if (rc == MIGRATEPAGE_SUCCESS)
1058 		put_page(newpage);
1059 
1060 	return rc;
1061 }
1062 
1063 /*
1064  * Obtain the lock on page, remove all ptes and migrate the page
1065  * to the newly allocated page in newpage.
1066  */
1067 static int unmap_and_move(new_page_t get_new_page,
1068 				   free_page_t put_new_page,
1069 				   unsigned long private, struct page *page,
1070 				   int force, enum migrate_mode mode,
1071 				   enum migrate_reason reason,
1072 				   struct list_head *ret)
1073 {
1074 	int rc = MIGRATEPAGE_SUCCESS;
1075 	struct page *newpage = NULL;
1076 
1077 	if (!thp_migration_supported() && PageTransHuge(page))
1078 		return -ENOSYS;
1079 
1080 	if (page_count(page) == 1) {
1081 		/* page was freed from under us. So we are done. */
1082 		ClearPageActive(page);
1083 		ClearPageUnevictable(page);
1084 		if (unlikely(__PageMovable(page))) {
1085 			lock_page(page);
1086 			if (!PageMovable(page))
1087 				ClearPageIsolated(page);
1088 			unlock_page(page);
1089 		}
1090 		goto out;
1091 	}
1092 
1093 	newpage = get_new_page(page, private);
1094 	if (!newpage)
1095 		return -ENOMEM;
1096 
1097 	rc = __unmap_and_move(page, newpage, force, mode);
1098 	if (rc == MIGRATEPAGE_SUCCESS)
1099 		set_page_owner_migrate_reason(newpage, reason);
1100 
1101 out:
1102 	if (rc != -EAGAIN) {
1103 		/*
1104 		 * A page that has been migrated has all references
1105 		 * removed and will be freed. A page that has not been
1106 		 * migrated will have kept its references and be restored.
1107 		 */
1108 		list_del(&page->lru);
1109 	}
1110 
1111 	/*
1112 	 * If migration is successful, releases reference grabbed during
1113 	 * isolation. Otherwise, restore the page to right list unless
1114 	 * we want to retry.
1115 	 */
1116 	if (rc == MIGRATEPAGE_SUCCESS) {
1117 		/*
1118 		 * Compaction can migrate also non-LRU pages which are
1119 		 * not accounted to NR_ISOLATED_*. They can be recognized
1120 		 * as __PageMovable
1121 		 */
1122 		if (likely(!__PageMovable(page)))
1123 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1124 					page_is_file_lru(page), -thp_nr_pages(page));
1125 
1126 		if (reason != MR_MEMORY_FAILURE)
1127 			/*
1128 			 * We release the page in page_handle_poison.
1129 			 */
1130 			put_page(page);
1131 	} else {
1132 		if (rc != -EAGAIN)
1133 			list_add_tail(&page->lru, ret);
1134 
1135 		if (put_new_page)
1136 			put_new_page(newpage, private);
1137 		else
1138 			put_page(newpage);
1139 	}
1140 
1141 	return rc;
1142 }
1143 
1144 /*
1145  * Counterpart of unmap_and_move_page() for hugepage migration.
1146  *
1147  * This function doesn't wait the completion of hugepage I/O
1148  * because there is no race between I/O and migration for hugepage.
1149  * Note that currently hugepage I/O occurs only in direct I/O
1150  * where no lock is held and PG_writeback is irrelevant,
1151  * and writeback status of all subpages are counted in the reference
1152  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1153  * under direct I/O, the reference of the head page is 512 and a bit more.)
1154  * This means that when we try to migrate hugepage whose subpages are
1155  * doing direct I/O, some references remain after try_to_unmap() and
1156  * hugepage migration fails without data corruption.
1157  *
1158  * There is also no race when direct I/O is issued on the page under migration,
1159  * because then pte is replaced with migration swap entry and direct I/O code
1160  * will wait in the page fault for migration to complete.
1161  */
1162 static int unmap_and_move_huge_page(new_page_t get_new_page,
1163 				free_page_t put_new_page, unsigned long private,
1164 				struct page *hpage, int force,
1165 				enum migrate_mode mode, int reason,
1166 				struct list_head *ret)
1167 {
1168 	struct folio *dst, *src = page_folio(hpage);
1169 	int rc = -EAGAIN;
1170 	int page_was_mapped = 0;
1171 	struct page *new_hpage;
1172 	struct anon_vma *anon_vma = NULL;
1173 	struct address_space *mapping = NULL;
1174 
1175 	/*
1176 	 * Migratability of hugepages depends on architectures and their size.
1177 	 * This check is necessary because some callers of hugepage migration
1178 	 * like soft offline and memory hotremove don't walk through page
1179 	 * tables or check whether the hugepage is pmd-based or not before
1180 	 * kicking migration.
1181 	 */
1182 	if (!hugepage_migration_supported(page_hstate(hpage))) {
1183 		list_move_tail(&hpage->lru, ret);
1184 		return -ENOSYS;
1185 	}
1186 
1187 	if (page_count(hpage) == 1) {
1188 		/* page was freed from under us. So we are done. */
1189 		putback_active_hugepage(hpage);
1190 		return MIGRATEPAGE_SUCCESS;
1191 	}
1192 
1193 	new_hpage = get_new_page(hpage, private);
1194 	if (!new_hpage)
1195 		return -ENOMEM;
1196 	dst = page_folio(new_hpage);
1197 
1198 	if (!trylock_page(hpage)) {
1199 		if (!force)
1200 			goto out;
1201 		switch (mode) {
1202 		case MIGRATE_SYNC:
1203 		case MIGRATE_SYNC_NO_COPY:
1204 			break;
1205 		default:
1206 			goto out;
1207 		}
1208 		lock_page(hpage);
1209 	}
1210 
1211 	/*
1212 	 * Check for pages which are in the process of being freed.  Without
1213 	 * page_mapping() set, hugetlbfs specific move page routine will not
1214 	 * be called and we could leak usage counts for subpools.
1215 	 */
1216 	if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
1217 		rc = -EBUSY;
1218 		goto out_unlock;
1219 	}
1220 
1221 	if (PageAnon(hpage))
1222 		anon_vma = page_get_anon_vma(hpage);
1223 
1224 	if (unlikely(!trylock_page(new_hpage)))
1225 		goto put_anon;
1226 
1227 	if (page_mapped(hpage)) {
1228 		enum ttu_flags ttu = 0;
1229 
1230 		if (!PageAnon(hpage)) {
1231 			/*
1232 			 * In shared mappings, try_to_unmap could potentially
1233 			 * call huge_pmd_unshare.  Because of this, take
1234 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1235 			 * to let lower levels know we have taken the lock.
1236 			 */
1237 			mapping = hugetlb_page_mapping_lock_write(hpage);
1238 			if (unlikely(!mapping))
1239 				goto unlock_put_anon;
1240 
1241 			ttu = TTU_RMAP_LOCKED;
1242 		}
1243 
1244 		try_to_migrate(src, ttu);
1245 		page_was_mapped = 1;
1246 
1247 		if (ttu & TTU_RMAP_LOCKED)
1248 			i_mmap_unlock_write(mapping);
1249 	}
1250 
1251 	if (!page_mapped(hpage))
1252 		rc = move_to_new_page(new_hpage, hpage, mode);
1253 
1254 	if (page_was_mapped)
1255 		remove_migration_ptes(src,
1256 			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1257 
1258 unlock_put_anon:
1259 	unlock_page(new_hpage);
1260 
1261 put_anon:
1262 	if (anon_vma)
1263 		put_anon_vma(anon_vma);
1264 
1265 	if (rc == MIGRATEPAGE_SUCCESS) {
1266 		move_hugetlb_state(hpage, new_hpage, reason);
1267 		put_new_page = NULL;
1268 	}
1269 
1270 out_unlock:
1271 	unlock_page(hpage);
1272 out:
1273 	if (rc == MIGRATEPAGE_SUCCESS)
1274 		putback_active_hugepage(hpage);
1275 	else if (rc != -EAGAIN)
1276 		list_move_tail(&hpage->lru, ret);
1277 
1278 	/*
1279 	 * If migration was not successful and there's a freeing callback, use
1280 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1281 	 * isolation.
1282 	 */
1283 	if (put_new_page)
1284 		put_new_page(new_hpage, private);
1285 	else
1286 		putback_active_hugepage(new_hpage);
1287 
1288 	return rc;
1289 }
1290 
1291 static inline int try_split_thp(struct page *page, struct page **page2,
1292 				struct list_head *from)
1293 {
1294 	int rc = 0;
1295 
1296 	lock_page(page);
1297 	rc = split_huge_page_to_list(page, from);
1298 	unlock_page(page);
1299 	if (!rc)
1300 		list_safe_reset_next(page, *page2, lru);
1301 
1302 	return rc;
1303 }
1304 
1305 /*
1306  * migrate_pages - migrate the pages specified in a list, to the free pages
1307  *		   supplied as the target for the page migration
1308  *
1309  * @from:		The list of pages to be migrated.
1310  * @get_new_page:	The function used to allocate free pages to be used
1311  *			as the target of the page migration.
1312  * @put_new_page:	The function used to free target pages if migration
1313  *			fails, or NULL if no special handling is necessary.
1314  * @private:		Private data to be passed on to get_new_page()
1315  * @mode:		The migration mode that specifies the constraints for
1316  *			page migration, if any.
1317  * @reason:		The reason for page migration.
1318  * @ret_succeeded:	Set to the number of normal pages migrated successfully if
1319  *			the caller passes a non-NULL pointer.
1320  *
1321  * The function returns after 10 attempts or if no pages are movable any more
1322  * because the list has become empty or no retryable pages exist any more.
1323  * It is caller's responsibility to call putback_movable_pages() to return pages
1324  * to the LRU or free list only if ret != 0.
1325  *
1326  * Returns the number of {normal page, THP, hugetlb} that were not migrated, or
1327  * an error code. The number of THP splits will be considered as the number of
1328  * non-migrated THP, no matter how many subpages of the THP are migrated successfully.
1329  */
1330 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1331 		free_page_t put_new_page, unsigned long private,
1332 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1333 {
1334 	int retry = 1;
1335 	int thp_retry = 1;
1336 	int nr_failed = 0;
1337 	int nr_failed_pages = 0;
1338 	int nr_succeeded = 0;
1339 	int nr_thp_succeeded = 0;
1340 	int nr_thp_failed = 0;
1341 	int nr_thp_split = 0;
1342 	int pass = 0;
1343 	bool is_thp = false;
1344 	struct page *page;
1345 	struct page *page2;
1346 	int rc, nr_subpages;
1347 	LIST_HEAD(ret_pages);
1348 	LIST_HEAD(thp_split_pages);
1349 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1350 	bool no_subpage_counting = false;
1351 
1352 	trace_mm_migrate_pages_start(mode, reason);
1353 
1354 thp_subpage_migration:
1355 	for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1356 		retry = 0;
1357 		thp_retry = 0;
1358 
1359 		list_for_each_entry_safe(page, page2, from, lru) {
1360 retry:
1361 			/*
1362 			 * THP statistics is based on the source huge page.
1363 			 * Capture required information that might get lost
1364 			 * during migration.
1365 			 */
1366 			is_thp = PageTransHuge(page) && !PageHuge(page);
1367 			nr_subpages = compound_nr(page);
1368 			cond_resched();
1369 
1370 			if (PageHuge(page))
1371 				rc = unmap_and_move_huge_page(get_new_page,
1372 						put_new_page, private, page,
1373 						pass > 2, mode, reason,
1374 						&ret_pages);
1375 			else
1376 				rc = unmap_and_move(get_new_page, put_new_page,
1377 						private, page, pass > 2, mode,
1378 						reason, &ret_pages);
1379 			/*
1380 			 * The rules are:
1381 			 *	Success: non hugetlb page will be freed, hugetlb
1382 			 *		 page will be put back
1383 			 *	-EAGAIN: stay on the from list
1384 			 *	-ENOMEM: stay on the from list
1385 			 *	Other errno: put on ret_pages list then splice to
1386 			 *		     from list
1387 			 */
1388 			switch(rc) {
1389 			/*
1390 			 * THP migration might be unsupported or the
1391 			 * allocation could've failed so we should
1392 			 * retry on the same page with the THP split
1393 			 * to base pages.
1394 			 *
1395 			 * Head page is retried immediately and tail
1396 			 * pages are added to the tail of the list so
1397 			 * we encounter them after the rest of the list
1398 			 * is processed.
1399 			 */
1400 			case -ENOSYS:
1401 				/* THP migration is unsupported */
1402 				if (is_thp) {
1403 					nr_thp_failed++;
1404 					if (!try_split_thp(page, &page2, &thp_split_pages)) {
1405 						nr_thp_split++;
1406 						goto retry;
1407 					}
1408 				/* Hugetlb migration is unsupported */
1409 				} else if (!no_subpage_counting) {
1410 					nr_failed++;
1411 				}
1412 
1413 				nr_failed_pages += nr_subpages;
1414 				break;
1415 			case -ENOMEM:
1416 				/*
1417 				 * When memory is low, don't bother to try to migrate
1418 				 * other pages, just exit.
1419 				 * THP NUMA faulting doesn't split THP to retry.
1420 				 */
1421 				if (is_thp && !nosplit) {
1422 					nr_thp_failed++;
1423 					if (!try_split_thp(page, &page2, &thp_split_pages)) {
1424 						nr_thp_split++;
1425 						goto retry;
1426 					}
1427 				} else if (!no_subpage_counting) {
1428 					nr_failed++;
1429 				}
1430 
1431 				nr_failed_pages += nr_subpages;
1432 				/*
1433 				 * There might be some subpages of fail-to-migrate THPs
1434 				 * left in thp_split_pages list. Move them back to migration
1435 				 * list so that they could be put back to the right list by
1436 				 * the caller otherwise the page refcnt will be leaked.
1437 				 */
1438 				list_splice_init(&thp_split_pages, from);
1439 				nr_thp_failed += thp_retry;
1440 				goto out;
1441 			case -EAGAIN:
1442 				if (is_thp)
1443 					thp_retry++;
1444 				else
1445 					retry++;
1446 				break;
1447 			case MIGRATEPAGE_SUCCESS:
1448 				nr_succeeded += nr_subpages;
1449 				if (is_thp)
1450 					nr_thp_succeeded++;
1451 				break;
1452 			default:
1453 				/*
1454 				 * Permanent failure (-EBUSY, etc.):
1455 				 * unlike -EAGAIN case, the failed page is
1456 				 * removed from migration page list and not
1457 				 * retried in the next outer loop.
1458 				 */
1459 				if (is_thp)
1460 					nr_thp_failed++;
1461 				else if (!no_subpage_counting)
1462 					nr_failed++;
1463 
1464 				nr_failed_pages += nr_subpages;
1465 				break;
1466 			}
1467 		}
1468 	}
1469 	nr_failed += retry;
1470 	nr_thp_failed += thp_retry;
1471 	/*
1472 	 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed
1473 	 * counting in this round, since all subpages of a THP is counted
1474 	 * as 1 failure in the first round.
1475 	 */
1476 	if (!list_empty(&thp_split_pages)) {
1477 		/*
1478 		 * Move non-migrated pages (after 10 retries) to ret_pages
1479 		 * to avoid migrating them again.
1480 		 */
1481 		list_splice_init(from, &ret_pages);
1482 		list_splice_init(&thp_split_pages, from);
1483 		no_subpage_counting = true;
1484 		retry = 1;
1485 		goto thp_subpage_migration;
1486 	}
1487 
1488 	rc = nr_failed + nr_thp_failed;
1489 out:
1490 	/*
1491 	 * Put the permanent failure page back to migration list, they
1492 	 * will be put back to the right list by the caller.
1493 	 */
1494 	list_splice(&ret_pages, from);
1495 
1496 	count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1497 	count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
1498 	count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1499 	count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1500 	count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1501 	trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
1502 			       nr_thp_failed, nr_thp_split, mode, reason);
1503 
1504 	if (ret_succeeded)
1505 		*ret_succeeded = nr_succeeded;
1506 
1507 	return rc;
1508 }
1509 
1510 struct page *alloc_migration_target(struct page *page, unsigned long private)
1511 {
1512 	struct folio *folio = page_folio(page);
1513 	struct migration_target_control *mtc;
1514 	gfp_t gfp_mask;
1515 	unsigned int order = 0;
1516 	struct folio *new_folio = NULL;
1517 	int nid;
1518 	int zidx;
1519 
1520 	mtc = (struct migration_target_control *)private;
1521 	gfp_mask = mtc->gfp_mask;
1522 	nid = mtc->nid;
1523 	if (nid == NUMA_NO_NODE)
1524 		nid = folio_nid(folio);
1525 
1526 	if (folio_test_hugetlb(folio)) {
1527 		struct hstate *h = page_hstate(&folio->page);
1528 
1529 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1530 		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1531 	}
1532 
1533 	if (folio_test_large(folio)) {
1534 		/*
1535 		 * clear __GFP_RECLAIM to make the migration callback
1536 		 * consistent with regular THP allocations.
1537 		 */
1538 		gfp_mask &= ~__GFP_RECLAIM;
1539 		gfp_mask |= GFP_TRANSHUGE;
1540 		order = folio_order(folio);
1541 	}
1542 	zidx = zone_idx(folio_zone(folio));
1543 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1544 		gfp_mask |= __GFP_HIGHMEM;
1545 
1546 	new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
1547 
1548 	return &new_folio->page;
1549 }
1550 
1551 #ifdef CONFIG_NUMA
1552 
1553 static int store_status(int __user *status, int start, int value, int nr)
1554 {
1555 	while (nr-- > 0) {
1556 		if (put_user(value, status + start))
1557 			return -EFAULT;
1558 		start++;
1559 	}
1560 
1561 	return 0;
1562 }
1563 
1564 static int do_move_pages_to_node(struct mm_struct *mm,
1565 		struct list_head *pagelist, int node)
1566 {
1567 	int err;
1568 	struct migration_target_control mtc = {
1569 		.nid = node,
1570 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1571 	};
1572 
1573 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
1574 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1575 	if (err)
1576 		putback_movable_pages(pagelist);
1577 	return err;
1578 }
1579 
1580 /*
1581  * Resolves the given address to a struct page, isolates it from the LRU and
1582  * puts it to the given pagelist.
1583  * Returns:
1584  *     errno - if the page cannot be found/isolated
1585  *     0 - when it doesn't have to be migrated because it is already on the
1586  *         target node
1587  *     1 - when it has been queued
1588  */
1589 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1590 		int node, struct list_head *pagelist, bool migrate_all)
1591 {
1592 	struct vm_area_struct *vma;
1593 	struct page *page;
1594 	int err;
1595 
1596 	mmap_read_lock(mm);
1597 	err = -EFAULT;
1598 	vma = vma_lookup(mm, addr);
1599 	if (!vma || !vma_migratable(vma))
1600 		goto out;
1601 
1602 	/* FOLL_DUMP to ignore special (like zero) pages */
1603 	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1604 
1605 	err = PTR_ERR(page);
1606 	if (IS_ERR(page))
1607 		goto out;
1608 
1609 	err = -ENOENT;
1610 	if (!page)
1611 		goto out;
1612 
1613 	err = 0;
1614 	if (page_to_nid(page) == node)
1615 		goto out_putpage;
1616 
1617 	err = -EACCES;
1618 	if (page_mapcount(page) > 1 && !migrate_all)
1619 		goto out_putpage;
1620 
1621 	if (PageHuge(page)) {
1622 		if (PageHead(page)) {
1623 			isolate_huge_page(page, pagelist);
1624 			err = 1;
1625 		}
1626 	} else {
1627 		struct page *head;
1628 
1629 		head = compound_head(page);
1630 		err = isolate_lru_page(head);
1631 		if (err)
1632 			goto out_putpage;
1633 
1634 		err = 1;
1635 		list_add_tail(&head->lru, pagelist);
1636 		mod_node_page_state(page_pgdat(head),
1637 			NR_ISOLATED_ANON + page_is_file_lru(head),
1638 			thp_nr_pages(head));
1639 	}
1640 out_putpage:
1641 	/*
1642 	 * Either remove the duplicate refcount from
1643 	 * isolate_lru_page() or drop the page ref if it was
1644 	 * not isolated.
1645 	 */
1646 	put_page(page);
1647 out:
1648 	mmap_read_unlock(mm);
1649 	return err;
1650 }
1651 
1652 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1653 		struct list_head *pagelist, int __user *status,
1654 		int start, int i, unsigned long nr_pages)
1655 {
1656 	int err;
1657 
1658 	if (list_empty(pagelist))
1659 		return 0;
1660 
1661 	err = do_move_pages_to_node(mm, pagelist, node);
1662 	if (err) {
1663 		/*
1664 		 * Positive err means the number of failed
1665 		 * pages to migrate.  Since we are going to
1666 		 * abort and return the number of non-migrated
1667 		 * pages, so need to include the rest of the
1668 		 * nr_pages that have not been attempted as
1669 		 * well.
1670 		 */
1671 		if (err > 0)
1672 			err += nr_pages - i - 1;
1673 		return err;
1674 	}
1675 	return store_status(status, start, node, i - start);
1676 }
1677 
1678 /*
1679  * Migrate an array of page address onto an array of nodes and fill
1680  * the corresponding array of status.
1681  */
1682 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1683 			 unsigned long nr_pages,
1684 			 const void __user * __user *pages,
1685 			 const int __user *nodes,
1686 			 int __user *status, int flags)
1687 {
1688 	int current_node = NUMA_NO_NODE;
1689 	LIST_HEAD(pagelist);
1690 	int start, i;
1691 	int err = 0, err1;
1692 
1693 	lru_cache_disable();
1694 
1695 	for (i = start = 0; i < nr_pages; i++) {
1696 		const void __user *p;
1697 		unsigned long addr;
1698 		int node;
1699 
1700 		err = -EFAULT;
1701 		if (get_user(p, pages + i))
1702 			goto out_flush;
1703 		if (get_user(node, nodes + i))
1704 			goto out_flush;
1705 		addr = (unsigned long)untagged_addr(p);
1706 
1707 		err = -ENODEV;
1708 		if (node < 0 || node >= MAX_NUMNODES)
1709 			goto out_flush;
1710 		if (!node_state(node, N_MEMORY))
1711 			goto out_flush;
1712 
1713 		err = -EACCES;
1714 		if (!node_isset(node, task_nodes))
1715 			goto out_flush;
1716 
1717 		if (current_node == NUMA_NO_NODE) {
1718 			current_node = node;
1719 			start = i;
1720 		} else if (node != current_node) {
1721 			err = move_pages_and_store_status(mm, current_node,
1722 					&pagelist, status, start, i, nr_pages);
1723 			if (err)
1724 				goto out;
1725 			start = i;
1726 			current_node = node;
1727 		}
1728 
1729 		/*
1730 		 * Errors in the page lookup or isolation are not fatal and we simply
1731 		 * report them via status
1732 		 */
1733 		err = add_page_for_migration(mm, addr, current_node,
1734 				&pagelist, flags & MPOL_MF_MOVE_ALL);
1735 
1736 		if (err > 0) {
1737 			/* The page is successfully queued for migration */
1738 			continue;
1739 		}
1740 
1741 		/*
1742 		 * The move_pages() man page does not have an -EEXIST choice, so
1743 		 * use -EFAULT instead.
1744 		 */
1745 		if (err == -EEXIST)
1746 			err = -EFAULT;
1747 
1748 		/*
1749 		 * If the page is already on the target node (!err), store the
1750 		 * node, otherwise, store the err.
1751 		 */
1752 		err = store_status(status, i, err ? : current_node, 1);
1753 		if (err)
1754 			goto out_flush;
1755 
1756 		err = move_pages_and_store_status(mm, current_node, &pagelist,
1757 				status, start, i, nr_pages);
1758 		if (err)
1759 			goto out;
1760 		current_node = NUMA_NO_NODE;
1761 	}
1762 out_flush:
1763 	/* Make sure we do not overwrite the existing error */
1764 	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1765 				status, start, i, nr_pages);
1766 	if (err >= 0)
1767 		err = err1;
1768 out:
1769 	lru_cache_enable();
1770 	return err;
1771 }
1772 
1773 /*
1774  * Determine the nodes of an array of pages and store it in an array of status.
1775  */
1776 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1777 				const void __user **pages, int *status)
1778 {
1779 	unsigned long i;
1780 
1781 	mmap_read_lock(mm);
1782 
1783 	for (i = 0; i < nr_pages; i++) {
1784 		unsigned long addr = (unsigned long)(*pages);
1785 		struct vm_area_struct *vma;
1786 		struct page *page;
1787 		int err = -EFAULT;
1788 
1789 		vma = vma_lookup(mm, addr);
1790 		if (!vma)
1791 			goto set_status;
1792 
1793 		/* FOLL_DUMP to ignore special (like zero) pages */
1794 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1795 
1796 		err = PTR_ERR(page);
1797 		if (IS_ERR(page))
1798 			goto set_status;
1799 
1800 		if (page) {
1801 			err = page_to_nid(page);
1802 			put_page(page);
1803 		} else {
1804 			err = -ENOENT;
1805 		}
1806 set_status:
1807 		*status = err;
1808 
1809 		pages++;
1810 		status++;
1811 	}
1812 
1813 	mmap_read_unlock(mm);
1814 }
1815 
1816 static int get_compat_pages_array(const void __user *chunk_pages[],
1817 				  const void __user * __user *pages,
1818 				  unsigned long chunk_nr)
1819 {
1820 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1821 	compat_uptr_t p;
1822 	int i;
1823 
1824 	for (i = 0; i < chunk_nr; i++) {
1825 		if (get_user(p, pages32 + i))
1826 			return -EFAULT;
1827 		chunk_pages[i] = compat_ptr(p);
1828 	}
1829 
1830 	return 0;
1831 }
1832 
1833 /*
1834  * Determine the nodes of a user array of pages and store it in
1835  * a user array of status.
1836  */
1837 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1838 			 const void __user * __user *pages,
1839 			 int __user *status)
1840 {
1841 #define DO_PAGES_STAT_CHUNK_NR 16UL
1842 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1843 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1844 
1845 	while (nr_pages) {
1846 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
1847 
1848 		if (in_compat_syscall()) {
1849 			if (get_compat_pages_array(chunk_pages, pages,
1850 						   chunk_nr))
1851 				break;
1852 		} else {
1853 			if (copy_from_user(chunk_pages, pages,
1854 				      chunk_nr * sizeof(*chunk_pages)))
1855 				break;
1856 		}
1857 
1858 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1859 
1860 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1861 			break;
1862 
1863 		pages += chunk_nr;
1864 		status += chunk_nr;
1865 		nr_pages -= chunk_nr;
1866 	}
1867 	return nr_pages ? -EFAULT : 0;
1868 }
1869 
1870 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1871 {
1872 	struct task_struct *task;
1873 	struct mm_struct *mm;
1874 
1875 	/*
1876 	 * There is no need to check if current process has the right to modify
1877 	 * the specified process when they are same.
1878 	 */
1879 	if (!pid) {
1880 		mmget(current->mm);
1881 		*mem_nodes = cpuset_mems_allowed(current);
1882 		return current->mm;
1883 	}
1884 
1885 	/* Find the mm_struct */
1886 	rcu_read_lock();
1887 	task = find_task_by_vpid(pid);
1888 	if (!task) {
1889 		rcu_read_unlock();
1890 		return ERR_PTR(-ESRCH);
1891 	}
1892 	get_task_struct(task);
1893 
1894 	/*
1895 	 * Check if this process has the right to modify the specified
1896 	 * process. Use the regular "ptrace_may_access()" checks.
1897 	 */
1898 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1899 		rcu_read_unlock();
1900 		mm = ERR_PTR(-EPERM);
1901 		goto out;
1902 	}
1903 	rcu_read_unlock();
1904 
1905 	mm = ERR_PTR(security_task_movememory(task));
1906 	if (IS_ERR(mm))
1907 		goto out;
1908 	*mem_nodes = cpuset_mems_allowed(task);
1909 	mm = get_task_mm(task);
1910 out:
1911 	put_task_struct(task);
1912 	if (!mm)
1913 		mm = ERR_PTR(-EINVAL);
1914 	return mm;
1915 }
1916 
1917 /*
1918  * Move a list of pages in the address space of the currently executing
1919  * process.
1920  */
1921 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1922 			     const void __user * __user *pages,
1923 			     const int __user *nodes,
1924 			     int __user *status, int flags)
1925 {
1926 	struct mm_struct *mm;
1927 	int err;
1928 	nodemask_t task_nodes;
1929 
1930 	/* Check flags */
1931 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1932 		return -EINVAL;
1933 
1934 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1935 		return -EPERM;
1936 
1937 	mm = find_mm_struct(pid, &task_nodes);
1938 	if (IS_ERR(mm))
1939 		return PTR_ERR(mm);
1940 
1941 	if (nodes)
1942 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
1943 				    nodes, status, flags);
1944 	else
1945 		err = do_pages_stat(mm, nr_pages, pages, status);
1946 
1947 	mmput(mm);
1948 	return err;
1949 }
1950 
1951 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1952 		const void __user * __user *, pages,
1953 		const int __user *, nodes,
1954 		int __user *, status, int, flags)
1955 {
1956 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1957 }
1958 
1959 #ifdef CONFIG_NUMA_BALANCING
1960 /*
1961  * Returns true if this is a safe migration target node for misplaced NUMA
1962  * pages. Currently it only checks the watermarks which is crude.
1963  */
1964 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1965 				   unsigned long nr_migrate_pages)
1966 {
1967 	int z;
1968 
1969 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1970 		struct zone *zone = pgdat->node_zones + z;
1971 
1972 		if (!managed_zone(zone))
1973 			continue;
1974 
1975 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
1976 		if (!zone_watermark_ok(zone, 0,
1977 				       high_wmark_pages(zone) +
1978 				       nr_migrate_pages,
1979 				       ZONE_MOVABLE, 0))
1980 			continue;
1981 		return true;
1982 	}
1983 	return false;
1984 }
1985 
1986 static struct page *alloc_misplaced_dst_page(struct page *page,
1987 					   unsigned long data)
1988 {
1989 	int nid = (int) data;
1990 	int order = compound_order(page);
1991 	gfp_t gfp = __GFP_THISNODE;
1992 	struct folio *new;
1993 
1994 	if (order > 0)
1995 		gfp |= GFP_TRANSHUGE_LIGHT;
1996 	else {
1997 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
1998 			__GFP_NOWARN;
1999 		gfp &= ~__GFP_RECLAIM;
2000 	}
2001 	new = __folio_alloc_node(gfp, order, nid);
2002 
2003 	return &new->page;
2004 }
2005 
2006 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2007 {
2008 	int nr_pages = thp_nr_pages(page);
2009 	int order = compound_order(page);
2010 
2011 	VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2012 
2013 	/* Do not migrate THP mapped by multiple processes */
2014 	if (PageTransHuge(page) && total_mapcount(page) > 1)
2015 		return 0;
2016 
2017 	/* Avoid migrating to a node that is nearly full */
2018 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2019 		int z;
2020 
2021 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2022 			return 0;
2023 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2024 			if (managed_zone(pgdat->node_zones + z))
2025 				break;
2026 		}
2027 		wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2028 		return 0;
2029 	}
2030 
2031 	if (isolate_lru_page(page))
2032 		return 0;
2033 
2034 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2035 			    nr_pages);
2036 
2037 	/*
2038 	 * Isolating the page has taken another reference, so the
2039 	 * caller's reference can be safely dropped without the page
2040 	 * disappearing underneath us during migration.
2041 	 */
2042 	put_page(page);
2043 	return 1;
2044 }
2045 
2046 /*
2047  * Attempt to migrate a misplaced page to the specified destination
2048  * node. Caller is expected to have an elevated reference count on
2049  * the page that will be dropped by this function before returning.
2050  */
2051 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2052 			   int node)
2053 {
2054 	pg_data_t *pgdat = NODE_DATA(node);
2055 	int isolated;
2056 	int nr_remaining;
2057 	unsigned int nr_succeeded;
2058 	LIST_HEAD(migratepages);
2059 	int nr_pages = thp_nr_pages(page);
2060 
2061 	/*
2062 	 * Don't migrate file pages that are mapped in multiple processes
2063 	 * with execute permissions as they are probably shared libraries.
2064 	 */
2065 	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2066 	    (vma->vm_flags & VM_EXEC))
2067 		goto out;
2068 
2069 	/*
2070 	 * Also do not migrate dirty pages as not all filesystems can move
2071 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2072 	 */
2073 	if (page_is_file_lru(page) && PageDirty(page))
2074 		goto out;
2075 
2076 	isolated = numamigrate_isolate_page(pgdat, page);
2077 	if (!isolated)
2078 		goto out;
2079 
2080 	list_add(&page->lru, &migratepages);
2081 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2082 				     NULL, node, MIGRATE_ASYNC,
2083 				     MR_NUMA_MISPLACED, &nr_succeeded);
2084 	if (nr_remaining) {
2085 		if (!list_empty(&migratepages)) {
2086 			list_del(&page->lru);
2087 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2088 					page_is_file_lru(page), -nr_pages);
2089 			putback_lru_page(page);
2090 		}
2091 		isolated = 0;
2092 	}
2093 	if (nr_succeeded) {
2094 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2095 		if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2096 			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2097 					    nr_succeeded);
2098 	}
2099 	BUG_ON(!list_empty(&migratepages));
2100 	return isolated;
2101 
2102 out:
2103 	put_page(page);
2104 	return 0;
2105 }
2106 #endif /* CONFIG_NUMA_BALANCING */
2107 
2108 /*
2109  * node_demotion[] example:
2110  *
2111  * Consider a system with two sockets.  Each socket has
2112  * three classes of memory attached: fast, medium and slow.
2113  * Each memory class is placed in its own NUMA node.  The
2114  * CPUs are placed in the node with the "fast" memory.  The
2115  * 6 NUMA nodes (0-5) might be split among the sockets like
2116  * this:
2117  *
2118  *	Socket A: 0, 1, 2
2119  *	Socket B: 3, 4, 5
2120  *
2121  * When Node 0 fills up, its memory should be migrated to
2122  * Node 1.  When Node 1 fills up, it should be migrated to
2123  * Node 2.  The migration path start on the nodes with the
2124  * processors (since allocations default to this node) and
2125  * fast memory, progress through medium and end with the
2126  * slow memory:
2127  *
2128  *	0 -> 1 -> 2 -> stop
2129  *	3 -> 4 -> 5 -> stop
2130  *
2131  * This is represented in the node_demotion[] like this:
2132  *
2133  *	{  nr=1, nodes[0]=1 }, // Node 0 migrates to 1
2134  *	{  nr=1, nodes[0]=2 }, // Node 1 migrates to 2
2135  *	{  nr=0, nodes[0]=-1 }, // Node 2 does not migrate
2136  *	{  nr=1, nodes[0]=4 }, // Node 3 migrates to 4
2137  *	{  nr=1, nodes[0]=5 }, // Node 4 migrates to 5
2138  *	{  nr=0, nodes[0]=-1 }, // Node 5 does not migrate
2139  *
2140  * Moreover some systems may have multiple slow memory nodes.
2141  * Suppose a system has one socket with 3 memory nodes, node 0
2142  * is fast memory type, and node 1/2 both are slow memory
2143  * type, and the distance between fast memory node and slow
2144  * memory node is same. So the migration path should be:
2145  *
2146  *	0 -> 1/2 -> stop
2147  *
2148  * This is represented in the node_demotion[] like this:
2149  *	{ nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2
2150  *	{ nr=0, nodes[0]=-1, }, // Node 1 dose not migrate
2151  *	{ nr=0, nodes[0]=-1, }, // Node 2 does not migrate
2152  */
2153 
2154 /*
2155  * Writes to this array occur without locking.  Cycles are
2156  * not allowed: Node X demotes to Y which demotes to X...
2157  *
2158  * If multiple reads are performed, a single rcu_read_lock()
2159  * must be held over all reads to ensure that no cycles are
2160  * observed.
2161  */
2162 #define DEFAULT_DEMOTION_TARGET_NODES 15
2163 
2164 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES
2165 #define DEMOTION_TARGET_NODES	(MAX_NUMNODES - 1)
2166 #else
2167 #define DEMOTION_TARGET_NODES	DEFAULT_DEMOTION_TARGET_NODES
2168 #endif
2169 
2170 struct demotion_nodes {
2171 	unsigned short nr;
2172 	short nodes[DEMOTION_TARGET_NODES];
2173 };
2174 
2175 static struct demotion_nodes *node_demotion __read_mostly;
2176 
2177 /**
2178  * next_demotion_node() - Get the next node in the demotion path
2179  * @node: The starting node to lookup the next node
2180  *
2181  * Return: node id for next memory node in the demotion path hierarchy
2182  * from @node; NUMA_NO_NODE if @node is terminal.  This does not keep
2183  * @node online or guarantee that it *continues* to be the next demotion
2184  * target.
2185  */
2186 int next_demotion_node(int node)
2187 {
2188 	struct demotion_nodes *nd;
2189 	unsigned short target_nr, index;
2190 	int target;
2191 
2192 	if (!node_demotion)
2193 		return NUMA_NO_NODE;
2194 
2195 	nd = &node_demotion[node];
2196 
2197 	/*
2198 	 * node_demotion[] is updated without excluding this
2199 	 * function from running.  RCU doesn't provide any
2200 	 * compiler barriers, so the READ_ONCE() is required
2201 	 * to avoid compiler reordering or read merging.
2202 	 *
2203 	 * Make sure to use RCU over entire code blocks if
2204 	 * node_demotion[] reads need to be consistent.
2205 	 */
2206 	rcu_read_lock();
2207 	target_nr = READ_ONCE(nd->nr);
2208 
2209 	switch (target_nr) {
2210 	case 0:
2211 		target = NUMA_NO_NODE;
2212 		goto out;
2213 	case 1:
2214 		index = 0;
2215 		break;
2216 	default:
2217 		/*
2218 		 * If there are multiple target nodes, just select one
2219 		 * target node randomly.
2220 		 *
2221 		 * In addition, we can also use round-robin to select
2222 		 * target node, but we should introduce another variable
2223 		 * for node_demotion[] to record last selected target node,
2224 		 * that may cause cache ping-pong due to the changing of
2225 		 * last target node. Or introducing per-cpu data to avoid
2226 		 * caching issue, which seems more complicated. So selecting
2227 		 * target node randomly seems better until now.
2228 		 */
2229 		index = get_random_int() % target_nr;
2230 		break;
2231 	}
2232 
2233 	target = READ_ONCE(nd->nodes[index]);
2234 
2235 out:
2236 	rcu_read_unlock();
2237 	return target;
2238 }
2239 
2240 /* Disable reclaim-based migration. */
2241 static void __disable_all_migrate_targets(void)
2242 {
2243 	int node, i;
2244 
2245 	if (!node_demotion)
2246 		return;
2247 
2248 	for_each_online_node(node) {
2249 		node_demotion[node].nr = 0;
2250 		for (i = 0; i < DEMOTION_TARGET_NODES; i++)
2251 			node_demotion[node].nodes[i] = NUMA_NO_NODE;
2252 	}
2253 }
2254 
2255 static void disable_all_migrate_targets(void)
2256 {
2257 	__disable_all_migrate_targets();
2258 
2259 	/*
2260 	 * Ensure that the "disable" is visible across the system.
2261 	 * Readers will see either a combination of before+disable
2262 	 * state or disable+after.  They will never see before and
2263 	 * after state together.
2264 	 *
2265 	 * The before+after state together might have cycles and
2266 	 * could cause readers to do things like loop until this
2267 	 * function finishes.  This ensures they can only see a
2268 	 * single "bad" read and would, for instance, only loop
2269 	 * once.
2270 	 */
2271 	synchronize_rcu();
2272 }
2273 
2274 /*
2275  * Find an automatic demotion target for 'node'.
2276  * Failing here is OK.  It might just indicate
2277  * being at the end of a chain.
2278  */
2279 static int establish_migrate_target(int node, nodemask_t *used,
2280 				    int best_distance)
2281 {
2282 	int migration_target, index, val;
2283 	struct demotion_nodes *nd;
2284 
2285 	if (!node_demotion)
2286 		return NUMA_NO_NODE;
2287 
2288 	nd = &node_demotion[node];
2289 
2290 	migration_target = find_next_best_node(node, used);
2291 	if (migration_target == NUMA_NO_NODE)
2292 		return NUMA_NO_NODE;
2293 
2294 	/*
2295 	 * If the node has been set a migration target node before,
2296 	 * which means it's the best distance between them. Still
2297 	 * check if this node can be demoted to other target nodes
2298 	 * if they have a same best distance.
2299 	 */
2300 	if (best_distance != -1) {
2301 		val = node_distance(node, migration_target);
2302 		if (val > best_distance)
2303 			goto out_clear;
2304 	}
2305 
2306 	index = nd->nr;
2307 	if (WARN_ONCE(index >= DEMOTION_TARGET_NODES,
2308 		      "Exceeds maximum demotion target nodes\n"))
2309 		goto out_clear;
2310 
2311 	nd->nodes[index] = migration_target;
2312 	nd->nr++;
2313 
2314 	return migration_target;
2315 out_clear:
2316 	node_clear(migration_target, *used);
2317 	return NUMA_NO_NODE;
2318 }
2319 
2320 /*
2321  * When memory fills up on a node, memory contents can be
2322  * automatically migrated to another node instead of
2323  * discarded at reclaim.
2324  *
2325  * Establish a "migration path" which will start at nodes
2326  * with CPUs and will follow the priorities used to build the
2327  * page allocator zonelists.
2328  *
2329  * The difference here is that cycles must be avoided.  If
2330  * node0 migrates to node1, then neither node1, nor anything
2331  * node1 migrates to can migrate to node0. Also one node can
2332  * be migrated to multiple nodes if the target nodes all have
2333  * a same best-distance against the source node.
2334  *
2335  * This function can run simultaneously with readers of
2336  * node_demotion[].  However, it can not run simultaneously
2337  * with itself.  Exclusion is provided by memory hotplug events
2338  * being single-threaded.
2339  */
2340 static void __set_migration_target_nodes(void)
2341 {
2342 	nodemask_t next_pass;
2343 	nodemask_t this_pass;
2344 	nodemask_t used_targets = NODE_MASK_NONE;
2345 	int node, best_distance;
2346 
2347 	/*
2348 	 * Avoid any oddities like cycles that could occur
2349 	 * from changes in the topology.  This will leave
2350 	 * a momentary gap when migration is disabled.
2351 	 */
2352 	disable_all_migrate_targets();
2353 
2354 	/*
2355 	 * Allocations go close to CPUs, first.  Assume that
2356 	 * the migration path starts at the nodes with CPUs.
2357 	 */
2358 	next_pass = node_states[N_CPU];
2359 again:
2360 	this_pass = next_pass;
2361 	next_pass = NODE_MASK_NONE;
2362 	/*
2363 	 * To avoid cycles in the migration "graph", ensure
2364 	 * that migration sources are not future targets by
2365 	 * setting them in 'used_targets'.  Do this only
2366 	 * once per pass so that multiple source nodes can
2367 	 * share a target node.
2368 	 *
2369 	 * 'used_targets' will become unavailable in future
2370 	 * passes.  This limits some opportunities for
2371 	 * multiple source nodes to share a destination.
2372 	 */
2373 	nodes_or(used_targets, used_targets, this_pass);
2374 
2375 	for_each_node_mask(node, this_pass) {
2376 		best_distance = -1;
2377 
2378 		/*
2379 		 * Try to set up the migration path for the node, and the target
2380 		 * migration nodes can be multiple, so doing a loop to find all
2381 		 * the target nodes if they all have a best node distance.
2382 		 */
2383 		do {
2384 			int target_node =
2385 				establish_migrate_target(node, &used_targets,
2386 							 best_distance);
2387 
2388 			if (target_node == NUMA_NO_NODE)
2389 				break;
2390 
2391 			if (best_distance == -1)
2392 				best_distance = node_distance(node, target_node);
2393 
2394 			/*
2395 			 * Visit targets from this pass in the next pass.
2396 			 * Eventually, every node will have been part of
2397 			 * a pass, and will become set in 'used_targets'.
2398 			 */
2399 			node_set(target_node, next_pass);
2400 		} while (1);
2401 	}
2402 	/*
2403 	 * 'next_pass' contains nodes which became migration
2404 	 * targets in this pass.  Make additional passes until
2405 	 * no more migrations targets are available.
2406 	 */
2407 	if (!nodes_empty(next_pass))
2408 		goto again;
2409 }
2410 
2411 /*
2412  * For callers that do not hold get_online_mems() already.
2413  */
2414 void set_migration_target_nodes(void)
2415 {
2416 	get_online_mems();
2417 	__set_migration_target_nodes();
2418 	put_online_mems();
2419 }
2420 
2421 /*
2422  * This leaves migrate-on-reclaim transiently disabled between
2423  * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
2424  * whether reclaim-based migration is enabled or not, which
2425  * ensures that the user can turn reclaim-based migration at
2426  * any time without needing to recalculate migration targets.
2427  *
2428  * These callbacks already hold get_online_mems().  That is why
2429  * __set_migration_target_nodes() can be used as opposed to
2430  * set_migration_target_nodes().
2431  */
2432 #ifdef CONFIG_MEMORY_HOTPLUG
2433 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
2434 						 unsigned long action, void *_arg)
2435 {
2436 	struct memory_notify *arg = _arg;
2437 
2438 	/*
2439 	 * Only update the node migration order when a node is
2440 	 * changing status, like online->offline.  This avoids
2441 	 * the overhead of synchronize_rcu() in most cases.
2442 	 */
2443 	if (arg->status_change_nid < 0)
2444 		return notifier_from_errno(0);
2445 
2446 	switch (action) {
2447 	case MEM_GOING_OFFLINE:
2448 		/*
2449 		 * Make sure there are not transient states where
2450 		 * an offline node is a migration target.  This
2451 		 * will leave migration disabled until the offline
2452 		 * completes and the MEM_OFFLINE case below runs.
2453 		 */
2454 		disable_all_migrate_targets();
2455 		break;
2456 	case MEM_OFFLINE:
2457 	case MEM_ONLINE:
2458 		/*
2459 		 * Recalculate the target nodes once the node
2460 		 * reaches its final state (online or offline).
2461 		 */
2462 		__set_migration_target_nodes();
2463 		break;
2464 	case MEM_CANCEL_OFFLINE:
2465 		/*
2466 		 * MEM_GOING_OFFLINE disabled all the migration
2467 		 * targets.  Reenable them.
2468 		 */
2469 		__set_migration_target_nodes();
2470 		break;
2471 	case MEM_GOING_ONLINE:
2472 	case MEM_CANCEL_ONLINE:
2473 		break;
2474 	}
2475 
2476 	return notifier_from_errno(0);
2477 }
2478 #endif
2479 
2480 void __init migrate_on_reclaim_init(void)
2481 {
2482 	node_demotion = kcalloc(nr_node_ids,
2483 				sizeof(struct demotion_nodes),
2484 				GFP_KERNEL);
2485 	WARN_ON(!node_demotion);
2486 #ifdef CONFIG_MEMORY_HOTPLUG
2487 	hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
2488 #endif
2489 	/*
2490 	 * At this point, all numa nodes with memory/CPus have their state
2491 	 * properly set, so we can build the demotion order now.
2492 	 * Let us hold the cpu_hotplug lock just, as we could possibily have
2493 	 * CPU hotplug events during boot.
2494 	 */
2495 	cpus_read_lock();
2496 	set_migration_target_nodes();
2497 	cpus_read_unlock();
2498 }
2499 
2500 bool numa_demotion_enabled = false;
2501 
2502 #ifdef CONFIG_SYSFS
2503 static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
2504 					  struct kobj_attribute *attr, char *buf)
2505 {
2506 	return sysfs_emit(buf, "%s\n",
2507 			  numa_demotion_enabled ? "true" : "false");
2508 }
2509 
2510 static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
2511 					   struct kobj_attribute *attr,
2512 					   const char *buf, size_t count)
2513 {
2514 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
2515 		numa_demotion_enabled = true;
2516 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
2517 		numa_demotion_enabled = false;
2518 	else
2519 		return -EINVAL;
2520 
2521 	return count;
2522 }
2523 
2524 static struct kobj_attribute numa_demotion_enabled_attr =
2525 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
2526 	       numa_demotion_enabled_store);
2527 
2528 static struct attribute *numa_attrs[] = {
2529 	&numa_demotion_enabled_attr.attr,
2530 	NULL,
2531 };
2532 
2533 static const struct attribute_group numa_attr_group = {
2534 	.attrs = numa_attrs,
2535 };
2536 
2537 static int __init numa_init_sysfs(void)
2538 {
2539 	int err;
2540 	struct kobject *numa_kobj;
2541 
2542 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
2543 	if (!numa_kobj) {
2544 		pr_err("failed to create numa kobject\n");
2545 		return -ENOMEM;
2546 	}
2547 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
2548 	if (err) {
2549 		pr_err("failed to register numa group\n");
2550 		goto delete_obj;
2551 	}
2552 	return 0;
2553 
2554 delete_obj:
2555 	kobject_put(numa_kobj);
2556 	return err;
2557 }
2558 subsys_initcall(numa_init_sysfs);
2559 #endif /* CONFIG_SYSFS */
2560 #endif /* CONFIG_NUMA */
2561