xref: /openbmc/linux/mm/migrate.c (revision 840d9a813c8eaa5c55d86525e374a97ca5023b53)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/compat.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/gfp.h>
40 #include <linux/pfn_t.h>
41 #include <linux/memremap.h>
42 #include <linux/userfaultfd_k.h>
43 #include <linux/balloon_compaction.h>
44 #include <linux/page_idle.h>
45 #include <linux/page_owner.h>
46 #include <linux/sched/mm.h>
47 #include <linux/ptrace.h>
48 #include <linux/oom.h>
49 #include <linux/memory.h>
50 #include <linux/random.h>
51 #include <linux/sched/sysctl.h>
52 #include <linux/memory-tiers.h>
53 
54 #include <asm/tlbflush.h>
55 
56 #include <trace/events/migrate.h>
57 
58 #include "internal.h"
59 
isolate_movable_page(struct page * page,isolate_mode_t mode)60 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61 {
62 	struct folio *folio = folio_get_nontail_page(page);
63 	const struct movable_operations *mops;
64 
65 	/*
66 	 * Avoid burning cycles with pages that are yet under __free_pages(),
67 	 * or just got freed under us.
68 	 *
69 	 * In case we 'win' a race for a movable page being freed under us and
70 	 * raise its refcount preventing __free_pages() from doing its job
71 	 * the put_page() at the end of this block will take care of
72 	 * release this page, thus avoiding a nasty leakage.
73 	 */
74 	if (!folio)
75 		goto out;
76 
77 	if (unlikely(folio_test_slab(folio)))
78 		goto out_putfolio;
79 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 	smp_rmb();
81 	/*
82 	 * Check movable flag before taking the page lock because
83 	 * we use non-atomic bitops on newly allocated page flags so
84 	 * unconditionally grabbing the lock ruins page's owner side.
85 	 */
86 	if (unlikely(!__folio_test_movable(folio)))
87 		goto out_putfolio;
88 	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 	smp_rmb();
90 	if (unlikely(folio_test_slab(folio)))
91 		goto out_putfolio;
92 
93 	/*
94 	 * As movable pages are not isolated from LRU lists, concurrent
95 	 * compaction threads can race against page migration functions
96 	 * as well as race against the releasing a page.
97 	 *
98 	 * In order to avoid having an already isolated movable page
99 	 * being (wrongly) re-isolated while it is under migration,
100 	 * or to avoid attempting to isolate pages being released,
101 	 * lets be sure we have the page lock
102 	 * before proceeding with the movable page isolation steps.
103 	 */
104 	if (unlikely(!folio_trylock(folio)))
105 		goto out_putfolio;
106 
107 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 		goto out_no_isolated;
109 
110 	mops = folio_movable_ops(folio);
111 	VM_BUG_ON_FOLIO(!mops, folio);
112 
113 	if (!mops->isolate_page(&folio->page, mode))
114 		goto out_no_isolated;
115 
116 	/* Driver shouldn't use PG_isolated bit of page->flags */
117 	WARN_ON_ONCE(folio_test_isolated(folio));
118 	folio_set_isolated(folio);
119 	folio_unlock(folio);
120 
121 	return true;
122 
123 out_no_isolated:
124 	folio_unlock(folio);
125 out_putfolio:
126 	folio_put(folio);
127 out:
128 	return false;
129 }
130 
putback_movable_folio(struct folio * folio)131 static void putback_movable_folio(struct folio *folio)
132 {
133 	const struct movable_operations *mops = folio_movable_ops(folio);
134 
135 	mops->putback_page(&folio->page);
136 	folio_clear_isolated(folio);
137 }
138 
139 /*
140  * Put previously isolated pages back onto the appropriate lists
141  * from where they were once taken off for compaction/migration.
142  *
143  * This function shall be used whenever the isolated pageset has been
144  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145  * and isolate_hugetlb().
146  */
putback_movable_pages(struct list_head * l)147 void putback_movable_pages(struct list_head *l)
148 {
149 	struct folio *folio;
150 	struct folio *folio2;
151 
152 	list_for_each_entry_safe(folio, folio2, l, lru) {
153 		if (unlikely(folio_test_hugetlb(folio))) {
154 			folio_putback_active_hugetlb(folio);
155 			continue;
156 		}
157 		list_del(&folio->lru);
158 		/*
159 		 * We isolated non-lru movable folio so here we can use
160 		 * __PageMovable because LRU folio's mapping cannot have
161 		 * PAGE_MAPPING_MOVABLE.
162 		 */
163 		if (unlikely(__folio_test_movable(folio))) {
164 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 			folio_lock(folio);
166 			if (folio_test_movable(folio))
167 				putback_movable_folio(folio);
168 			else
169 				folio_clear_isolated(folio);
170 			folio_unlock(folio);
171 			folio_put(folio);
172 		} else {
173 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 					folio_is_file_lru(folio), -folio_nr_pages(folio));
175 			folio_putback_lru(folio);
176 		}
177 	}
178 }
179 
180 /*
181  * Restore a potential migration pte to a working pte entry
182  */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * old)183 static bool remove_migration_pte(struct folio *folio,
184 		struct vm_area_struct *vma, unsigned long addr, void *old)
185 {
186 	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187 
188 	while (page_vma_mapped_walk(&pvmw)) {
189 		rmap_t rmap_flags = RMAP_NONE;
190 		pte_t old_pte;
191 		pte_t pte;
192 		swp_entry_t entry;
193 		struct page *new;
194 		unsigned long idx = 0;
195 
196 		/* pgoff is invalid for ksm pages, but they are never large */
197 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 		new = folio_page(folio, idx);
200 
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 		/* PMD-mapped THP migration entry */
203 		if (!pvmw.pte) {
204 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 					!folio_test_pmd_mappable(folio), folio);
206 			remove_migration_pmd(&pvmw, new);
207 			continue;
208 		}
209 #endif
210 
211 		folio_get(folio);
212 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 		old_pte = ptep_get(pvmw.pte);
214 		if (pte_swp_soft_dirty(old_pte))
215 			pte = pte_mksoft_dirty(pte);
216 
217 		entry = pte_to_swp_entry(old_pte);
218 		if (!is_migration_entry_young(entry))
219 			pte = pte_mkold(pte);
220 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
221 			pte = pte_mkdirty(pte);
222 		if (is_writable_migration_entry(entry))
223 			pte = pte_mkwrite(pte, vma);
224 		else if (pte_swp_uffd_wp(old_pte))
225 			pte = pte_mkuffd_wp(pte);
226 
227 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
228 			rmap_flags |= RMAP_EXCLUSIVE;
229 
230 		if (unlikely(is_device_private_page(new))) {
231 			if (pte_write(pte))
232 				entry = make_writable_device_private_entry(
233 							page_to_pfn(new));
234 			else
235 				entry = make_readable_device_private_entry(
236 							page_to_pfn(new));
237 			pte = swp_entry_to_pte(entry);
238 			if (pte_swp_soft_dirty(old_pte))
239 				pte = pte_swp_mksoft_dirty(pte);
240 			if (pte_swp_uffd_wp(old_pte))
241 				pte = pte_swp_mkuffd_wp(pte);
242 		}
243 
244 #ifdef CONFIG_HUGETLB_PAGE
245 		if (folio_test_hugetlb(folio)) {
246 			struct hstate *h = hstate_vma(vma);
247 			unsigned int shift = huge_page_shift(h);
248 			unsigned long psize = huge_page_size(h);
249 
250 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
251 			if (folio_test_anon(folio))
252 				hugepage_add_anon_rmap(new, vma, pvmw.address,
253 						       rmap_flags);
254 			else
255 				page_dup_file_rmap(new, true);
256 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
257 					psize);
258 		} else
259 #endif
260 		{
261 			if (folio_test_anon(folio))
262 				page_add_anon_rmap(new, vma, pvmw.address,
263 						   rmap_flags);
264 			else
265 				page_add_file_rmap(new, vma, false);
266 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
267 		}
268 		if (vma->vm_flags & VM_LOCKED)
269 			mlock_drain_local();
270 
271 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
272 					   compound_order(new));
273 
274 		/* No need to invalidate - it was non-present before */
275 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
276 	}
277 
278 	return true;
279 }
280 
281 /*
282  * Get rid of all migration entries and replace them by
283  * references to the indicated page.
284  */
remove_migration_ptes(struct folio * src,struct folio * dst,bool locked)285 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
286 {
287 	struct rmap_walk_control rwc = {
288 		.rmap_one = remove_migration_pte,
289 		.arg = src,
290 	};
291 
292 	if (locked)
293 		rmap_walk_locked(dst, &rwc);
294 	else
295 		rmap_walk(dst, &rwc);
296 }
297 
298 /*
299  * Something used the pte of a page under migration. We need to
300  * get to the page and wait until migration is finished.
301  * When we return from this function the fault will be retried.
302  */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)303 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
304 			  unsigned long address)
305 {
306 	spinlock_t *ptl;
307 	pte_t *ptep;
308 	pte_t pte;
309 	swp_entry_t entry;
310 
311 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
312 	if (!ptep)
313 		return;
314 
315 	pte = ptep_get(ptep);
316 	pte_unmap(ptep);
317 
318 	if (!is_swap_pte(pte))
319 		goto out;
320 
321 	entry = pte_to_swp_entry(pte);
322 	if (!is_migration_entry(entry))
323 		goto out;
324 
325 	migration_entry_wait_on_locked(entry, ptl);
326 	return;
327 out:
328 	spin_unlock(ptl);
329 }
330 
331 #ifdef CONFIG_HUGETLB_PAGE
332 /*
333  * The vma read lock must be held upon entry. Holding that lock prevents either
334  * the pte or the ptl from being freed.
335  *
336  * This function will release the vma lock before returning.
337  */
migration_entry_wait_huge(struct vm_area_struct * vma,pte_t * ptep)338 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
339 {
340 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
341 	pte_t pte;
342 
343 	hugetlb_vma_assert_locked(vma);
344 	spin_lock(ptl);
345 	pte = huge_ptep_get(ptep);
346 
347 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
348 		spin_unlock(ptl);
349 		hugetlb_vma_unlock_read(vma);
350 	} else {
351 		/*
352 		 * If migration entry existed, safe to release vma lock
353 		 * here because the pgtable page won't be freed without the
354 		 * pgtable lock released.  See comment right above pgtable
355 		 * lock release in migration_entry_wait_on_locked().
356 		 */
357 		hugetlb_vma_unlock_read(vma);
358 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
359 	}
360 }
361 #endif
362 
363 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)364 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
365 {
366 	spinlock_t *ptl;
367 
368 	ptl = pmd_lock(mm, pmd);
369 	if (!is_pmd_migration_entry(*pmd))
370 		goto unlock;
371 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
372 	return;
373 unlock:
374 	spin_unlock(ptl);
375 }
376 #endif
377 
folio_expected_refs(struct address_space * mapping,struct folio * folio)378 static int folio_expected_refs(struct address_space *mapping,
379 		struct folio *folio)
380 {
381 	int refs = 1;
382 	if (!mapping)
383 		return refs;
384 
385 	refs += folio_nr_pages(folio);
386 	if (folio_test_private(folio))
387 		refs++;
388 
389 	return refs;
390 }
391 
392 /*
393  * Replace the page in the mapping.
394  *
395  * The number of remaining references must be:
396  * 1 for anonymous pages without a mapping
397  * 2 for pages with a mapping
398  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
399  */
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)400 int folio_migrate_mapping(struct address_space *mapping,
401 		struct folio *newfolio, struct folio *folio, int extra_count)
402 {
403 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
404 	struct zone *oldzone, *newzone;
405 	int dirty;
406 	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
407 	long nr = folio_nr_pages(folio);
408 	long entries, i;
409 
410 	if (!mapping) {
411 		/* Anonymous page without mapping */
412 		if (folio_ref_count(folio) != expected_count)
413 			return -EAGAIN;
414 
415 		/* No turning back from here */
416 		newfolio->index = folio->index;
417 		newfolio->mapping = folio->mapping;
418 		if (folio_test_swapbacked(folio))
419 			__folio_set_swapbacked(newfolio);
420 
421 		return MIGRATEPAGE_SUCCESS;
422 	}
423 
424 	oldzone = folio_zone(folio);
425 	newzone = folio_zone(newfolio);
426 
427 	xas_lock_irq(&xas);
428 	if (!folio_ref_freeze(folio, expected_count)) {
429 		xas_unlock_irq(&xas);
430 		return -EAGAIN;
431 	}
432 
433 	/*
434 	 * Now we know that no one else is looking at the folio:
435 	 * no turning back from here.
436 	 */
437 	newfolio->index = folio->index;
438 	newfolio->mapping = folio->mapping;
439 	folio_ref_add(newfolio, nr); /* add cache reference */
440 	if (folio_test_swapbacked(folio))
441 		__folio_set_swapbacked(newfolio);
442 	if (folio_test_swapcache(folio)) {
443 		folio_set_swapcache(newfolio);
444 		newfolio->private = folio_get_private(folio);
445 		entries = nr;
446 	} else {
447 		entries = 1;
448 	}
449 
450 	/* Move dirty while page refs frozen and newpage not yet exposed */
451 	dirty = folio_test_dirty(folio);
452 	if (dirty) {
453 		folio_clear_dirty(folio);
454 		folio_set_dirty(newfolio);
455 	}
456 
457 	/* Swap cache still stores N entries instead of a high-order entry */
458 	for (i = 0; i < entries; i++) {
459 		xas_store(&xas, newfolio);
460 		xas_next(&xas);
461 	}
462 
463 	/*
464 	 * Drop cache reference from old page by unfreezing
465 	 * to one less reference.
466 	 * We know this isn't the last reference.
467 	 */
468 	folio_ref_unfreeze(folio, expected_count - nr);
469 
470 	xas_unlock(&xas);
471 	/* Leave irq disabled to prevent preemption while updating stats */
472 
473 	/*
474 	 * If moved to a different zone then also account
475 	 * the page for that zone. Other VM counters will be
476 	 * taken care of when we establish references to the
477 	 * new page and drop references to the old page.
478 	 *
479 	 * Note that anonymous pages are accounted for
480 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
481 	 * are mapped to swap space.
482 	 */
483 	if (newzone != oldzone) {
484 		struct lruvec *old_lruvec, *new_lruvec;
485 		struct mem_cgroup *memcg;
486 
487 		memcg = folio_memcg(folio);
488 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
489 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
490 
491 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
492 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
493 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
494 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
495 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
496 
497 			if (folio_test_pmd_mappable(folio)) {
498 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
499 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
500 			}
501 		}
502 #ifdef CONFIG_SWAP
503 		if (folio_test_swapcache(folio)) {
504 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
505 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
506 		}
507 #endif
508 		if (dirty && mapping_can_writeback(mapping)) {
509 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
510 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
511 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
512 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
513 		}
514 	}
515 	local_irq_enable();
516 
517 	return MIGRATEPAGE_SUCCESS;
518 }
519 EXPORT_SYMBOL(folio_migrate_mapping);
520 
521 /*
522  * The expected number of remaining references is the same as that
523  * of folio_migrate_mapping().
524  */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)525 int migrate_huge_page_move_mapping(struct address_space *mapping,
526 				   struct folio *dst, struct folio *src)
527 {
528 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
529 	int expected_count;
530 
531 	xas_lock_irq(&xas);
532 	expected_count = 2 + folio_has_private(src);
533 	if (!folio_ref_freeze(src, expected_count)) {
534 		xas_unlock_irq(&xas);
535 		return -EAGAIN;
536 	}
537 
538 	dst->index = src->index;
539 	dst->mapping = src->mapping;
540 
541 	folio_get(dst);
542 
543 	xas_store(&xas, dst);
544 
545 	folio_ref_unfreeze(src, expected_count - 1);
546 
547 	xas_unlock_irq(&xas);
548 
549 	return MIGRATEPAGE_SUCCESS;
550 }
551 
552 /*
553  * Copy the flags and some other ancillary information
554  */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)555 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
556 {
557 	int cpupid;
558 
559 	if (folio_test_error(folio))
560 		folio_set_error(newfolio);
561 	if (folio_test_referenced(folio))
562 		folio_set_referenced(newfolio);
563 	if (folio_test_uptodate(folio))
564 		folio_mark_uptodate(newfolio);
565 	if (folio_test_clear_active(folio)) {
566 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
567 		folio_set_active(newfolio);
568 	} else if (folio_test_clear_unevictable(folio))
569 		folio_set_unevictable(newfolio);
570 	if (folio_test_workingset(folio))
571 		folio_set_workingset(newfolio);
572 	if (folio_test_checked(folio))
573 		folio_set_checked(newfolio);
574 	/*
575 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
576 	 * migration entries. We can still have PG_anon_exclusive set on an
577 	 * effectively unmapped and unreferenced first sub-pages of an
578 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
579 	 */
580 	if (folio_test_mappedtodisk(folio))
581 		folio_set_mappedtodisk(newfolio);
582 
583 	/* Move dirty on pages not done by folio_migrate_mapping() */
584 	if (folio_test_dirty(folio))
585 		folio_set_dirty(newfolio);
586 
587 	if (folio_test_young(folio))
588 		folio_set_young(newfolio);
589 	if (folio_test_idle(folio))
590 		folio_set_idle(newfolio);
591 
592 	/*
593 	 * Copy NUMA information to the new page, to prevent over-eager
594 	 * future migrations of this same page.
595 	 */
596 	cpupid = page_cpupid_xchg_last(&folio->page, -1);
597 	/*
598 	 * For memory tiering mode, when migrate between slow and fast
599 	 * memory node, reset cpupid, because that is used to record
600 	 * page access time in slow memory node.
601 	 */
602 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
603 		bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
604 		bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
605 
606 		if (f_toptier != t_toptier)
607 			cpupid = -1;
608 	}
609 	page_cpupid_xchg_last(&newfolio->page, cpupid);
610 
611 	folio_migrate_ksm(newfolio, folio);
612 	/*
613 	 * Please do not reorder this without considering how mm/ksm.c's
614 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
615 	 */
616 	if (folio_test_swapcache(folio))
617 		folio_clear_swapcache(folio);
618 	folio_clear_private(folio);
619 
620 	/* page->private contains hugetlb specific flags */
621 	if (!folio_test_hugetlb(folio))
622 		folio->private = NULL;
623 
624 	/*
625 	 * If any waiters have accumulated on the new page then
626 	 * wake them up.
627 	 */
628 	if (folio_test_writeback(newfolio))
629 		folio_end_writeback(newfolio);
630 
631 	/*
632 	 * PG_readahead shares the same bit with PG_reclaim.  The above
633 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
634 	 * bit after that.
635 	 */
636 	if (folio_test_readahead(folio))
637 		folio_set_readahead(newfolio);
638 
639 	folio_copy_owner(newfolio, folio);
640 
641 	if (!folio_test_hugetlb(folio))
642 		mem_cgroup_migrate(folio, newfolio);
643 }
644 EXPORT_SYMBOL(folio_migrate_flags);
645 
folio_migrate_copy(struct folio * newfolio,struct folio * folio)646 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
647 {
648 	folio_copy(newfolio, folio);
649 	folio_migrate_flags(newfolio, folio);
650 }
651 EXPORT_SYMBOL(folio_migrate_copy);
652 
653 /************************************************************
654  *                    Migration functions
655  ***********************************************************/
656 
migrate_folio_extra(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,int extra_count)657 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
658 		struct folio *src, enum migrate_mode mode, int extra_count)
659 {
660 	int rc;
661 
662 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
663 
664 	rc = folio_migrate_mapping(mapping, dst, src, extra_count);
665 
666 	if (rc != MIGRATEPAGE_SUCCESS)
667 		return rc;
668 
669 	if (mode != MIGRATE_SYNC_NO_COPY)
670 		folio_migrate_copy(dst, src);
671 	else
672 		folio_migrate_flags(dst, src);
673 	return MIGRATEPAGE_SUCCESS;
674 }
675 
676 /**
677  * migrate_folio() - Simple folio migration.
678  * @mapping: The address_space containing the folio.
679  * @dst: The folio to migrate the data to.
680  * @src: The folio containing the current data.
681  * @mode: How to migrate the page.
682  *
683  * Common logic to directly migrate a single LRU folio suitable for
684  * folios that do not use PagePrivate/PagePrivate2.
685  *
686  * Folios are locked upon entry and exit.
687  */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)688 int migrate_folio(struct address_space *mapping, struct folio *dst,
689 		struct folio *src, enum migrate_mode mode)
690 {
691 	return migrate_folio_extra(mapping, dst, src, mode, 0);
692 }
693 EXPORT_SYMBOL(migrate_folio);
694 
695 #ifdef CONFIG_BUFFER_HEAD
696 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)697 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
698 							enum migrate_mode mode)
699 {
700 	struct buffer_head *bh = head;
701 	struct buffer_head *failed_bh;
702 
703 	do {
704 		if (!trylock_buffer(bh)) {
705 			if (mode == MIGRATE_ASYNC)
706 				goto unlock;
707 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
708 				goto unlock;
709 			lock_buffer(bh);
710 		}
711 
712 		bh = bh->b_this_page;
713 	} while (bh != head);
714 
715 	return true;
716 
717 unlock:
718 	/* We failed to lock the buffer and cannot stall. */
719 	failed_bh = bh;
720 	bh = head;
721 	while (bh != failed_bh) {
722 		unlock_buffer(bh);
723 		bh = bh->b_this_page;
724 	}
725 
726 	return false;
727 }
728 
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)729 static int __buffer_migrate_folio(struct address_space *mapping,
730 		struct folio *dst, struct folio *src, enum migrate_mode mode,
731 		bool check_refs)
732 {
733 	struct buffer_head *bh, *head;
734 	int rc;
735 	int expected_count;
736 
737 	head = folio_buffers(src);
738 	if (!head)
739 		return migrate_folio(mapping, dst, src, mode);
740 
741 	/* Check whether page does not have extra refs before we do more work */
742 	expected_count = folio_expected_refs(mapping, src);
743 	if (folio_ref_count(src) != expected_count)
744 		return -EAGAIN;
745 
746 	if (!buffer_migrate_lock_buffers(head, mode))
747 		return -EAGAIN;
748 
749 	if (check_refs) {
750 		bool busy;
751 		bool invalidated = false;
752 
753 recheck_buffers:
754 		busy = false;
755 		spin_lock(&mapping->private_lock);
756 		bh = head;
757 		do {
758 			if (atomic_read(&bh->b_count)) {
759 				busy = true;
760 				break;
761 			}
762 			bh = bh->b_this_page;
763 		} while (bh != head);
764 		if (busy) {
765 			if (invalidated) {
766 				rc = -EAGAIN;
767 				goto unlock_buffers;
768 			}
769 			spin_unlock(&mapping->private_lock);
770 			invalidate_bh_lrus();
771 			invalidated = true;
772 			goto recheck_buffers;
773 		}
774 	}
775 
776 	rc = folio_migrate_mapping(mapping, dst, src, 0);
777 	if (rc != MIGRATEPAGE_SUCCESS)
778 		goto unlock_buffers;
779 
780 	folio_attach_private(dst, folio_detach_private(src));
781 
782 	bh = head;
783 	do {
784 		folio_set_bh(bh, dst, bh_offset(bh));
785 		bh = bh->b_this_page;
786 	} while (bh != head);
787 
788 	if (mode != MIGRATE_SYNC_NO_COPY)
789 		folio_migrate_copy(dst, src);
790 	else
791 		folio_migrate_flags(dst, src);
792 
793 	rc = MIGRATEPAGE_SUCCESS;
794 unlock_buffers:
795 	if (check_refs)
796 		spin_unlock(&mapping->private_lock);
797 	bh = head;
798 	do {
799 		unlock_buffer(bh);
800 		bh = bh->b_this_page;
801 	} while (bh != head);
802 
803 	return rc;
804 }
805 
806 /**
807  * buffer_migrate_folio() - Migration function for folios with buffers.
808  * @mapping: The address space containing @src.
809  * @dst: The folio to migrate to.
810  * @src: The folio to migrate from.
811  * @mode: How to migrate the folio.
812  *
813  * This function can only be used if the underlying filesystem guarantees
814  * that no other references to @src exist. For example attached buffer
815  * heads are accessed only under the folio lock.  If your filesystem cannot
816  * provide this guarantee, buffer_migrate_folio_norefs() may be more
817  * appropriate.
818  *
819  * Return: 0 on success or a negative errno on failure.
820  */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)821 int buffer_migrate_folio(struct address_space *mapping,
822 		struct folio *dst, struct folio *src, enum migrate_mode mode)
823 {
824 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
825 }
826 EXPORT_SYMBOL(buffer_migrate_folio);
827 
828 /**
829  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
830  * @mapping: The address space containing @src.
831  * @dst: The folio to migrate to.
832  * @src: The folio to migrate from.
833  * @mode: How to migrate the folio.
834  *
835  * Like buffer_migrate_folio() except that this variant is more careful
836  * and checks that there are also no buffer head references. This function
837  * is the right one for mappings where buffer heads are directly looked
838  * up and referenced (such as block device mappings).
839  *
840  * Return: 0 on success or a negative errno on failure.
841  */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)842 int buffer_migrate_folio_norefs(struct address_space *mapping,
843 		struct folio *dst, struct folio *src, enum migrate_mode mode)
844 {
845 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
846 }
847 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
848 #endif /* CONFIG_BUFFER_HEAD */
849 
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)850 int filemap_migrate_folio(struct address_space *mapping,
851 		struct folio *dst, struct folio *src, enum migrate_mode mode)
852 {
853 	int ret;
854 
855 	ret = folio_migrate_mapping(mapping, dst, src, 0);
856 	if (ret != MIGRATEPAGE_SUCCESS)
857 		return ret;
858 
859 	if (folio_get_private(src))
860 		folio_attach_private(dst, folio_detach_private(src));
861 
862 	if (mode != MIGRATE_SYNC_NO_COPY)
863 		folio_migrate_copy(dst, src);
864 	else
865 		folio_migrate_flags(dst, src);
866 	return MIGRATEPAGE_SUCCESS;
867 }
868 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
869 
870 /*
871  * Writeback a folio to clean the dirty state
872  */
writeout(struct address_space * mapping,struct folio * folio)873 static int writeout(struct address_space *mapping, struct folio *folio)
874 {
875 	struct writeback_control wbc = {
876 		.sync_mode = WB_SYNC_NONE,
877 		.nr_to_write = 1,
878 		.range_start = 0,
879 		.range_end = LLONG_MAX,
880 		.for_reclaim = 1
881 	};
882 	int rc;
883 
884 	if (!mapping->a_ops->writepage)
885 		/* No write method for the address space */
886 		return -EINVAL;
887 
888 	if (!folio_clear_dirty_for_io(folio))
889 		/* Someone else already triggered a write */
890 		return -EAGAIN;
891 
892 	/*
893 	 * A dirty folio may imply that the underlying filesystem has
894 	 * the folio on some queue. So the folio must be clean for
895 	 * migration. Writeout may mean we lose the lock and the
896 	 * folio state is no longer what we checked for earlier.
897 	 * At this point we know that the migration attempt cannot
898 	 * be successful.
899 	 */
900 	remove_migration_ptes(folio, folio, false);
901 
902 	rc = mapping->a_ops->writepage(&folio->page, &wbc);
903 
904 	if (rc != AOP_WRITEPAGE_ACTIVATE)
905 		/* unlocked. Relock */
906 		folio_lock(folio);
907 
908 	return (rc < 0) ? -EIO : -EAGAIN;
909 }
910 
911 /*
912  * Default handling if a filesystem does not provide a migration function.
913  */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)914 static int fallback_migrate_folio(struct address_space *mapping,
915 		struct folio *dst, struct folio *src, enum migrate_mode mode)
916 {
917 	if (folio_test_dirty(src)) {
918 		/* Only writeback folios in full synchronous migration */
919 		switch (mode) {
920 		case MIGRATE_SYNC:
921 		case MIGRATE_SYNC_NO_COPY:
922 			break;
923 		default:
924 			return -EBUSY;
925 		}
926 		return writeout(mapping, src);
927 	}
928 
929 	/*
930 	 * Buffers may be managed in a filesystem specific way.
931 	 * We must have no buffers or drop them.
932 	 */
933 	if (!filemap_release_folio(src, GFP_KERNEL))
934 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
935 
936 	return migrate_folio(mapping, dst, src, mode);
937 }
938 
939 /*
940  * Move a page to a newly allocated page
941  * The page is locked and all ptes have been successfully removed.
942  *
943  * The new page will have replaced the old page if this function
944  * is successful.
945  *
946  * Return value:
947  *   < 0 - error code
948  *  MIGRATEPAGE_SUCCESS - success
949  */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)950 static int move_to_new_folio(struct folio *dst, struct folio *src,
951 				enum migrate_mode mode)
952 {
953 	int rc = -EAGAIN;
954 	bool is_lru = !__PageMovable(&src->page);
955 
956 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
957 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
958 
959 	if (likely(is_lru)) {
960 		struct address_space *mapping = folio_mapping(src);
961 
962 		if (!mapping)
963 			rc = migrate_folio(mapping, dst, src, mode);
964 		else if (mapping->a_ops->migrate_folio)
965 			/*
966 			 * Most folios have a mapping and most filesystems
967 			 * provide a migrate_folio callback. Anonymous folios
968 			 * are part of swap space which also has its own
969 			 * migrate_folio callback. This is the most common path
970 			 * for page migration.
971 			 */
972 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
973 								mode);
974 		else
975 			rc = fallback_migrate_folio(mapping, dst, src, mode);
976 	} else {
977 		const struct movable_operations *mops;
978 
979 		/*
980 		 * In case of non-lru page, it could be released after
981 		 * isolation step. In that case, we shouldn't try migration.
982 		 */
983 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
984 		if (!folio_test_movable(src)) {
985 			rc = MIGRATEPAGE_SUCCESS;
986 			folio_clear_isolated(src);
987 			goto out;
988 		}
989 
990 		mops = folio_movable_ops(src);
991 		rc = mops->migrate_page(&dst->page, &src->page, mode);
992 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
993 				!folio_test_isolated(src));
994 	}
995 
996 	/*
997 	 * When successful, old pagecache src->mapping must be cleared before
998 	 * src is freed; but stats require that PageAnon be left as PageAnon.
999 	 */
1000 	if (rc == MIGRATEPAGE_SUCCESS) {
1001 		if (__PageMovable(&src->page)) {
1002 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1003 
1004 			/*
1005 			 * We clear PG_movable under page_lock so any compactor
1006 			 * cannot try to migrate this page.
1007 			 */
1008 			folio_clear_isolated(src);
1009 		}
1010 
1011 		/*
1012 		 * Anonymous and movable src->mapping will be cleared by
1013 		 * free_pages_prepare so don't reset it here for keeping
1014 		 * the type to work PageAnon, for example.
1015 		 */
1016 		if (!folio_mapping_flags(src))
1017 			src->mapping = NULL;
1018 
1019 		if (likely(!folio_is_zone_device(dst)))
1020 			flush_dcache_folio(dst);
1021 	}
1022 out:
1023 	return rc;
1024 }
1025 
1026 /*
1027  * To record some information during migration, we use unused private
1028  * field of struct folio of the newly allocated destination folio.
1029  * This is safe because nobody is using it except us.
1030  */
1031 enum {
1032 	PAGE_WAS_MAPPED = BIT(0),
1033 	PAGE_WAS_MLOCKED = BIT(1),
1034 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1035 };
1036 
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1037 static void __migrate_folio_record(struct folio *dst,
1038 				   int old_page_state,
1039 				   struct anon_vma *anon_vma)
1040 {
1041 	dst->private = (void *)anon_vma + old_page_state;
1042 }
1043 
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1044 static void __migrate_folio_extract(struct folio *dst,
1045 				   int *old_page_state,
1046 				   struct anon_vma **anon_vmap)
1047 {
1048 	unsigned long private = (unsigned long)dst->private;
1049 
1050 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1051 	*old_page_state = private & PAGE_OLD_STATES;
1052 	dst->private = NULL;
1053 }
1054 
1055 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1056 static void migrate_folio_undo_src(struct folio *src,
1057 				   int page_was_mapped,
1058 				   struct anon_vma *anon_vma,
1059 				   bool locked,
1060 				   struct list_head *ret)
1061 {
1062 	if (page_was_mapped)
1063 		remove_migration_ptes(src, src, false);
1064 	/* Drop an anon_vma reference if we took one */
1065 	if (anon_vma)
1066 		put_anon_vma(anon_vma);
1067 	if (locked)
1068 		folio_unlock(src);
1069 	if (ret)
1070 		list_move_tail(&src->lru, ret);
1071 }
1072 
1073 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1074 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1075 		free_folio_t put_new_folio, unsigned long private)
1076 {
1077 	if (locked)
1078 		folio_unlock(dst);
1079 	if (put_new_folio)
1080 		put_new_folio(dst, private);
1081 	else
1082 		folio_put(dst);
1083 }
1084 
1085 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1086 static void migrate_folio_done(struct folio *src,
1087 			       enum migrate_reason reason)
1088 {
1089 	/*
1090 	 * Compaction can migrate also non-LRU pages which are
1091 	 * not accounted to NR_ISOLATED_*. They can be recognized
1092 	 * as __PageMovable
1093 	 */
1094 	if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1095 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1096 				    folio_is_file_lru(src), -folio_nr_pages(src));
1097 
1098 	if (reason != MR_MEMORY_FAILURE)
1099 		/* We release the page in page_handle_poison. */
1100 		folio_put(src);
1101 }
1102 
1103 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1104 static int migrate_folio_unmap(new_folio_t get_new_folio,
1105 		free_folio_t put_new_folio, unsigned long private,
1106 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1107 		enum migrate_reason reason, struct list_head *ret)
1108 {
1109 	struct folio *dst;
1110 	int rc = -EAGAIN;
1111 	int old_page_state = 0;
1112 	struct anon_vma *anon_vma = NULL;
1113 	bool is_lru = !__PageMovable(&src->page);
1114 	bool locked = false;
1115 	bool dst_locked = false;
1116 
1117 	if (folio_ref_count(src) == 1) {
1118 		/* Folio was freed from under us. So we are done. */
1119 		folio_clear_active(src);
1120 		folio_clear_unevictable(src);
1121 		/* free_pages_prepare() will clear PG_isolated. */
1122 		list_del(&src->lru);
1123 		migrate_folio_done(src, reason);
1124 		return MIGRATEPAGE_SUCCESS;
1125 	}
1126 
1127 	dst = get_new_folio(src, private);
1128 	if (!dst)
1129 		return -ENOMEM;
1130 	*dstp = dst;
1131 
1132 	dst->private = NULL;
1133 
1134 	if (!folio_trylock(src)) {
1135 		if (mode == MIGRATE_ASYNC)
1136 			goto out;
1137 
1138 		/*
1139 		 * It's not safe for direct compaction to call lock_page.
1140 		 * For example, during page readahead pages are added locked
1141 		 * to the LRU. Later, when the IO completes the pages are
1142 		 * marked uptodate and unlocked. However, the queueing
1143 		 * could be merging multiple pages for one bio (e.g.
1144 		 * mpage_readahead). If an allocation happens for the
1145 		 * second or third page, the process can end up locking
1146 		 * the same page twice and deadlocking. Rather than
1147 		 * trying to be clever about what pages can be locked,
1148 		 * avoid the use of lock_page for direct compaction
1149 		 * altogether.
1150 		 */
1151 		if (current->flags & PF_MEMALLOC)
1152 			goto out;
1153 
1154 		/*
1155 		 * In "light" mode, we can wait for transient locks (eg
1156 		 * inserting a page into the page table), but it's not
1157 		 * worth waiting for I/O.
1158 		 */
1159 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1160 			goto out;
1161 
1162 		folio_lock(src);
1163 	}
1164 	locked = true;
1165 	if (folio_test_mlocked(src))
1166 		old_page_state |= PAGE_WAS_MLOCKED;
1167 
1168 	if (folio_test_writeback(src)) {
1169 		/*
1170 		 * Only in the case of a full synchronous migration is it
1171 		 * necessary to wait for PageWriteback. In the async case,
1172 		 * the retry loop is too short and in the sync-light case,
1173 		 * the overhead of stalling is too much
1174 		 */
1175 		switch (mode) {
1176 		case MIGRATE_SYNC:
1177 		case MIGRATE_SYNC_NO_COPY:
1178 			break;
1179 		default:
1180 			rc = -EBUSY;
1181 			goto out;
1182 		}
1183 		folio_wait_writeback(src);
1184 	}
1185 
1186 	/*
1187 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1188 	 * we cannot notice that anon_vma is freed while we migrate a page.
1189 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1190 	 * of migration. File cache pages are no problem because of page_lock()
1191 	 * File Caches may use write_page() or lock_page() in migration, then,
1192 	 * just care Anon page here.
1193 	 *
1194 	 * Only folio_get_anon_vma() understands the subtleties of
1195 	 * getting a hold on an anon_vma from outside one of its mms.
1196 	 * But if we cannot get anon_vma, then we won't need it anyway,
1197 	 * because that implies that the anon page is no longer mapped
1198 	 * (and cannot be remapped so long as we hold the page lock).
1199 	 */
1200 	if (folio_test_anon(src) && !folio_test_ksm(src))
1201 		anon_vma = folio_get_anon_vma(src);
1202 
1203 	/*
1204 	 * Block others from accessing the new page when we get around to
1205 	 * establishing additional references. We are usually the only one
1206 	 * holding a reference to dst at this point. We used to have a BUG
1207 	 * here if folio_trylock(dst) fails, but would like to allow for
1208 	 * cases where there might be a race with the previous use of dst.
1209 	 * This is much like races on refcount of oldpage: just don't BUG().
1210 	 */
1211 	if (unlikely(!folio_trylock(dst)))
1212 		goto out;
1213 	dst_locked = true;
1214 
1215 	if (unlikely(!is_lru)) {
1216 		__migrate_folio_record(dst, old_page_state, anon_vma);
1217 		return MIGRATEPAGE_UNMAP;
1218 	}
1219 
1220 	/*
1221 	 * Corner case handling:
1222 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1223 	 * and treated as swapcache but it has no rmap yet.
1224 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1225 	 * trigger a BUG.  So handle it here.
1226 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1227 	 * fs-private metadata. The page can be picked up due to memory
1228 	 * offlining.  Everywhere else except page reclaim, the page is
1229 	 * invisible to the vm, so the page can not be migrated.  So try to
1230 	 * free the metadata, so the page can be freed.
1231 	 */
1232 	if (!src->mapping) {
1233 		if (folio_test_private(src)) {
1234 			try_to_free_buffers(src);
1235 			goto out;
1236 		}
1237 	} else if (folio_mapped(src)) {
1238 		/* Establish migration ptes */
1239 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1240 			       !folio_test_ksm(src) && !anon_vma, src);
1241 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1242 		old_page_state |= PAGE_WAS_MAPPED;
1243 	}
1244 
1245 	if (!folio_mapped(src)) {
1246 		__migrate_folio_record(dst, old_page_state, anon_vma);
1247 		return MIGRATEPAGE_UNMAP;
1248 	}
1249 
1250 out:
1251 	/*
1252 	 * A folio that has not been unmapped will be restored to
1253 	 * right list unless we want to retry.
1254 	 */
1255 	if (rc == -EAGAIN)
1256 		ret = NULL;
1257 
1258 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1259 			       anon_vma, locked, ret);
1260 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1261 
1262 	return rc;
1263 }
1264 
1265 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1266 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1267 			      struct folio *src, struct folio *dst,
1268 			      enum migrate_mode mode, enum migrate_reason reason,
1269 			      struct list_head *ret)
1270 {
1271 	int rc;
1272 	int old_page_state = 0;
1273 	struct anon_vma *anon_vma = NULL;
1274 	bool is_lru = !__PageMovable(&src->page);
1275 	struct list_head *prev;
1276 
1277 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1278 	prev = dst->lru.prev;
1279 	list_del(&dst->lru);
1280 
1281 	rc = move_to_new_folio(dst, src, mode);
1282 	if (rc)
1283 		goto out;
1284 
1285 	if (unlikely(!is_lru))
1286 		goto out_unlock_both;
1287 
1288 	/*
1289 	 * When successful, push dst to LRU immediately: so that if it
1290 	 * turns out to be an mlocked page, remove_migration_ptes() will
1291 	 * automatically build up the correct dst->mlock_count for it.
1292 	 *
1293 	 * We would like to do something similar for the old page, when
1294 	 * unsuccessful, and other cases when a page has been temporarily
1295 	 * isolated from the unevictable LRU: but this case is the easiest.
1296 	 */
1297 	folio_add_lru(dst);
1298 	if (old_page_state & PAGE_WAS_MLOCKED)
1299 		lru_add_drain();
1300 
1301 	if (old_page_state & PAGE_WAS_MAPPED)
1302 		remove_migration_ptes(src, dst, false);
1303 
1304 out_unlock_both:
1305 	folio_unlock(dst);
1306 	set_page_owner_migrate_reason(&dst->page, reason);
1307 	/*
1308 	 * If migration is successful, decrease refcount of dst,
1309 	 * which will not free the page because new page owner increased
1310 	 * refcounter.
1311 	 */
1312 	folio_put(dst);
1313 
1314 	/*
1315 	 * A folio that has been migrated has all references removed
1316 	 * and will be freed.
1317 	 */
1318 	list_del(&src->lru);
1319 	/* Drop an anon_vma reference if we took one */
1320 	if (anon_vma)
1321 		put_anon_vma(anon_vma);
1322 	folio_unlock(src);
1323 	migrate_folio_done(src, reason);
1324 
1325 	return rc;
1326 out:
1327 	/*
1328 	 * A folio that has not been migrated will be restored to
1329 	 * right list unless we want to retry.
1330 	 */
1331 	if (rc == -EAGAIN) {
1332 		list_add(&dst->lru, prev);
1333 		__migrate_folio_record(dst, old_page_state, anon_vma);
1334 		return rc;
1335 	}
1336 
1337 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1338 			       anon_vma, true, ret);
1339 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1340 
1341 	return rc;
1342 }
1343 
1344 /*
1345  * Counterpart of unmap_and_move_page() for hugepage migration.
1346  *
1347  * This function doesn't wait the completion of hugepage I/O
1348  * because there is no race between I/O and migration for hugepage.
1349  * Note that currently hugepage I/O occurs only in direct I/O
1350  * where no lock is held and PG_writeback is irrelevant,
1351  * and writeback status of all subpages are counted in the reference
1352  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1353  * under direct I/O, the reference of the head page is 512 and a bit more.)
1354  * This means that when we try to migrate hugepage whose subpages are
1355  * doing direct I/O, some references remain after try_to_unmap() and
1356  * hugepage migration fails without data corruption.
1357  *
1358  * There is also no race when direct I/O is issued on the page under migration,
1359  * because then pte is replaced with migration swap entry and direct I/O code
1360  * will wait in the page fault for migration to complete.
1361  */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1362 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1363 		free_folio_t put_new_folio, unsigned long private,
1364 		struct folio *src, int force, enum migrate_mode mode,
1365 		int reason, struct list_head *ret)
1366 {
1367 	struct folio *dst;
1368 	int rc = -EAGAIN;
1369 	int page_was_mapped = 0;
1370 	struct anon_vma *anon_vma = NULL;
1371 	struct address_space *mapping = NULL;
1372 
1373 	if (folio_ref_count(src) == 1) {
1374 		/* page was freed from under us. So we are done. */
1375 		folio_putback_active_hugetlb(src);
1376 		return MIGRATEPAGE_SUCCESS;
1377 	}
1378 
1379 	dst = get_new_folio(src, private);
1380 	if (!dst)
1381 		return -ENOMEM;
1382 
1383 	if (!folio_trylock(src)) {
1384 		if (!force)
1385 			goto out;
1386 		switch (mode) {
1387 		case MIGRATE_SYNC:
1388 		case MIGRATE_SYNC_NO_COPY:
1389 			break;
1390 		default:
1391 			goto out;
1392 		}
1393 		folio_lock(src);
1394 	}
1395 
1396 	/*
1397 	 * Check for pages which are in the process of being freed.  Without
1398 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1399 	 * be called and we could leak usage counts for subpools.
1400 	 */
1401 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1402 		rc = -EBUSY;
1403 		goto out_unlock;
1404 	}
1405 
1406 	if (folio_test_anon(src))
1407 		anon_vma = folio_get_anon_vma(src);
1408 
1409 	if (unlikely(!folio_trylock(dst)))
1410 		goto put_anon;
1411 
1412 	if (folio_mapped(src)) {
1413 		enum ttu_flags ttu = 0;
1414 
1415 		if (!folio_test_anon(src)) {
1416 			/*
1417 			 * In shared mappings, try_to_unmap could potentially
1418 			 * call huge_pmd_unshare.  Because of this, take
1419 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1420 			 * to let lower levels know we have taken the lock.
1421 			 */
1422 			mapping = hugetlb_page_mapping_lock_write(&src->page);
1423 			if (unlikely(!mapping))
1424 				goto unlock_put_anon;
1425 
1426 			ttu = TTU_RMAP_LOCKED;
1427 		}
1428 
1429 		try_to_migrate(src, ttu);
1430 		page_was_mapped = 1;
1431 
1432 		if (ttu & TTU_RMAP_LOCKED)
1433 			i_mmap_unlock_write(mapping);
1434 	}
1435 
1436 	if (!folio_mapped(src))
1437 		rc = move_to_new_folio(dst, src, mode);
1438 
1439 	if (page_was_mapped)
1440 		remove_migration_ptes(src,
1441 			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1442 
1443 unlock_put_anon:
1444 	folio_unlock(dst);
1445 
1446 put_anon:
1447 	if (anon_vma)
1448 		put_anon_vma(anon_vma);
1449 
1450 	if (rc == MIGRATEPAGE_SUCCESS) {
1451 		move_hugetlb_state(src, dst, reason);
1452 		put_new_folio = NULL;
1453 	}
1454 
1455 out_unlock:
1456 	folio_unlock(src);
1457 out:
1458 	if (rc == MIGRATEPAGE_SUCCESS)
1459 		folio_putback_active_hugetlb(src);
1460 	else if (rc != -EAGAIN)
1461 		list_move_tail(&src->lru, ret);
1462 
1463 	/*
1464 	 * If migration was not successful and there's a freeing callback, use
1465 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1466 	 * isolation.
1467 	 */
1468 	if (put_new_folio)
1469 		put_new_folio(dst, private);
1470 	else
1471 		folio_putback_active_hugetlb(dst);
1472 
1473 	return rc;
1474 }
1475 
try_split_folio(struct folio * folio,struct list_head * split_folios)1476 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1477 {
1478 	int rc;
1479 
1480 	folio_lock(folio);
1481 	rc = split_folio_to_list(folio, split_folios);
1482 	folio_unlock(folio);
1483 	if (!rc)
1484 		list_move_tail(&folio->lru, split_folios);
1485 
1486 	return rc;
1487 }
1488 
1489 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1490 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1491 #else
1492 #define NR_MAX_BATCHED_MIGRATION	512
1493 #endif
1494 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1495 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1496 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1497 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1498 
1499 struct migrate_pages_stats {
1500 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1501 				   units of base pages */
1502 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1503 				   units of base pages.  Untried folios aren't counted */
1504 	int nr_thp_succeeded;	/* THP migrated successfully */
1505 	int nr_thp_failed;	/* THP failed to be migrated */
1506 	int nr_thp_split;	/* THP split before migrating */
1507 };
1508 
1509 /*
1510  * Returns the number of hugetlb folios that were not migrated, or an error code
1511  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1512  * any more because the list has become empty or no retryable hugetlb folios
1513  * exist any more. It is caller's responsibility to call putback_movable_pages()
1514  * only if ret != 0.
1515  */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1516 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1517 			    free_folio_t put_new_folio, unsigned long private,
1518 			    enum migrate_mode mode, int reason,
1519 			    struct migrate_pages_stats *stats,
1520 			    struct list_head *ret_folios)
1521 {
1522 	int retry = 1;
1523 	int nr_failed = 0;
1524 	int nr_retry_pages = 0;
1525 	int pass = 0;
1526 	struct folio *folio, *folio2;
1527 	int rc, nr_pages;
1528 
1529 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1530 		retry = 0;
1531 		nr_retry_pages = 0;
1532 
1533 		list_for_each_entry_safe(folio, folio2, from, lru) {
1534 			if (!folio_test_hugetlb(folio))
1535 				continue;
1536 
1537 			nr_pages = folio_nr_pages(folio);
1538 
1539 			cond_resched();
1540 
1541 			/*
1542 			 * Migratability of hugepages depends on architectures and
1543 			 * their size.  This check is necessary because some callers
1544 			 * of hugepage migration like soft offline and memory
1545 			 * hotremove don't walk through page tables or check whether
1546 			 * the hugepage is pmd-based or not before kicking migration.
1547 			 */
1548 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1549 				nr_failed++;
1550 				stats->nr_failed_pages += nr_pages;
1551 				list_move_tail(&folio->lru, ret_folios);
1552 				continue;
1553 			}
1554 
1555 			rc = unmap_and_move_huge_page(get_new_folio,
1556 						      put_new_folio, private,
1557 						      folio, pass > 2, mode,
1558 						      reason, ret_folios);
1559 			/*
1560 			 * The rules are:
1561 			 *	Success: hugetlb folio will be put back
1562 			 *	-EAGAIN: stay on the from list
1563 			 *	-ENOMEM: stay on the from list
1564 			 *	Other errno: put on ret_folios list
1565 			 */
1566 			switch(rc) {
1567 			case -ENOMEM:
1568 				/*
1569 				 * When memory is low, don't bother to try to migrate
1570 				 * other folios, just exit.
1571 				 */
1572 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1573 				return -ENOMEM;
1574 			case -EAGAIN:
1575 				retry++;
1576 				nr_retry_pages += nr_pages;
1577 				break;
1578 			case MIGRATEPAGE_SUCCESS:
1579 				stats->nr_succeeded += nr_pages;
1580 				break;
1581 			default:
1582 				/*
1583 				 * Permanent failure (-EBUSY, etc.):
1584 				 * unlike -EAGAIN case, the failed folio is
1585 				 * removed from migration folio list and not
1586 				 * retried in the next outer loop.
1587 				 */
1588 				nr_failed++;
1589 				stats->nr_failed_pages += nr_pages;
1590 				break;
1591 			}
1592 		}
1593 	}
1594 	/*
1595 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1596 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1597 	 * folios as failed.
1598 	 */
1599 	nr_failed += retry;
1600 	stats->nr_failed_pages += nr_retry_pages;
1601 
1602 	return nr_failed;
1603 }
1604 
1605 /*
1606  * migrate_pages_batch() first unmaps folios in the from list as many as
1607  * possible, then move the unmapped folios.
1608  *
1609  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1610  * lock or bit when we have locked more than one folio.  Which may cause
1611  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1612  * length of the from list must be <= 1.
1613  */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1614 static int migrate_pages_batch(struct list_head *from,
1615 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1616 		unsigned long private, enum migrate_mode mode, int reason,
1617 		struct list_head *ret_folios, struct list_head *split_folios,
1618 		struct migrate_pages_stats *stats, int nr_pass)
1619 {
1620 	int retry = 1;
1621 	int thp_retry = 1;
1622 	int nr_failed = 0;
1623 	int nr_retry_pages = 0;
1624 	int pass = 0;
1625 	bool is_thp = false;
1626 	struct folio *folio, *folio2, *dst = NULL, *dst2;
1627 	int rc, rc_saved = 0, nr_pages;
1628 	LIST_HEAD(unmap_folios);
1629 	LIST_HEAD(dst_folios);
1630 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1631 
1632 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1633 			!list_empty(from) && !list_is_singular(from));
1634 
1635 	for (pass = 0; pass < nr_pass && retry; pass++) {
1636 		retry = 0;
1637 		thp_retry = 0;
1638 		nr_retry_pages = 0;
1639 
1640 		list_for_each_entry_safe(folio, folio2, from, lru) {
1641 			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1642 			nr_pages = folio_nr_pages(folio);
1643 
1644 			cond_resched();
1645 
1646 			/*
1647 			 * Large folio migration might be unsupported or
1648 			 * the allocation might be failed so we should retry
1649 			 * on the same folio with the large folio split
1650 			 * to normal folios.
1651 			 *
1652 			 * Split folios are put in split_folios, and
1653 			 * we will migrate them after the rest of the
1654 			 * list is processed.
1655 			 */
1656 			if (!thp_migration_supported() && is_thp) {
1657 				nr_failed++;
1658 				stats->nr_thp_failed++;
1659 				if (!try_split_folio(folio, split_folios)) {
1660 					stats->nr_thp_split++;
1661 					continue;
1662 				}
1663 				stats->nr_failed_pages += nr_pages;
1664 				list_move_tail(&folio->lru, ret_folios);
1665 				continue;
1666 			}
1667 
1668 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1669 					private, folio, &dst, mode, reason,
1670 					ret_folios);
1671 			/*
1672 			 * The rules are:
1673 			 *	Success: folio will be freed
1674 			 *	Unmap: folio will be put on unmap_folios list,
1675 			 *	       dst folio put on dst_folios list
1676 			 *	-EAGAIN: stay on the from list
1677 			 *	-ENOMEM: stay on the from list
1678 			 *	Other errno: put on ret_folios list
1679 			 */
1680 			switch(rc) {
1681 			case -ENOMEM:
1682 				/*
1683 				 * When memory is low, don't bother to try to migrate
1684 				 * other folios, move unmapped folios, then exit.
1685 				 */
1686 				nr_failed++;
1687 				stats->nr_thp_failed += is_thp;
1688 				/* Large folio NUMA faulting doesn't split to retry. */
1689 				if (folio_test_large(folio) && !nosplit) {
1690 					int ret = try_split_folio(folio, split_folios);
1691 
1692 					if (!ret) {
1693 						stats->nr_thp_split += is_thp;
1694 						break;
1695 					} else if (reason == MR_LONGTERM_PIN &&
1696 						   ret == -EAGAIN) {
1697 						/*
1698 						 * Try again to split large folio to
1699 						 * mitigate the failure of longterm pinning.
1700 						 */
1701 						retry++;
1702 						thp_retry += is_thp;
1703 						nr_retry_pages += nr_pages;
1704 						/* Undo duplicated failure counting. */
1705 						nr_failed--;
1706 						stats->nr_thp_failed -= is_thp;
1707 						break;
1708 					}
1709 				}
1710 
1711 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1712 				/* nr_failed isn't updated for not used */
1713 				stats->nr_thp_failed += thp_retry;
1714 				rc_saved = rc;
1715 				if (list_empty(&unmap_folios))
1716 					goto out;
1717 				else
1718 					goto move;
1719 			case -EAGAIN:
1720 				retry++;
1721 				thp_retry += is_thp;
1722 				nr_retry_pages += nr_pages;
1723 				break;
1724 			case MIGRATEPAGE_SUCCESS:
1725 				stats->nr_succeeded += nr_pages;
1726 				stats->nr_thp_succeeded += is_thp;
1727 				break;
1728 			case MIGRATEPAGE_UNMAP:
1729 				list_move_tail(&folio->lru, &unmap_folios);
1730 				list_add_tail(&dst->lru, &dst_folios);
1731 				break;
1732 			default:
1733 				/*
1734 				 * Permanent failure (-EBUSY, etc.):
1735 				 * unlike -EAGAIN case, the failed folio is
1736 				 * removed from migration folio list and not
1737 				 * retried in the next outer loop.
1738 				 */
1739 				nr_failed++;
1740 				stats->nr_thp_failed += is_thp;
1741 				stats->nr_failed_pages += nr_pages;
1742 				break;
1743 			}
1744 		}
1745 	}
1746 	nr_failed += retry;
1747 	stats->nr_thp_failed += thp_retry;
1748 	stats->nr_failed_pages += nr_retry_pages;
1749 move:
1750 	/* Flush TLBs for all unmapped folios */
1751 	try_to_unmap_flush();
1752 
1753 	retry = 1;
1754 	for (pass = 0; pass < nr_pass && retry; pass++) {
1755 		retry = 0;
1756 		thp_retry = 0;
1757 		nr_retry_pages = 0;
1758 
1759 		dst = list_first_entry(&dst_folios, struct folio, lru);
1760 		dst2 = list_next_entry(dst, lru);
1761 		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1762 			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1763 			nr_pages = folio_nr_pages(folio);
1764 
1765 			cond_resched();
1766 
1767 			rc = migrate_folio_move(put_new_folio, private,
1768 						folio, dst, mode,
1769 						reason, ret_folios);
1770 			/*
1771 			 * The rules are:
1772 			 *	Success: folio will be freed
1773 			 *	-EAGAIN: stay on the unmap_folios list
1774 			 *	Other errno: put on ret_folios list
1775 			 */
1776 			switch(rc) {
1777 			case -EAGAIN:
1778 				retry++;
1779 				thp_retry += is_thp;
1780 				nr_retry_pages += nr_pages;
1781 				break;
1782 			case MIGRATEPAGE_SUCCESS:
1783 				stats->nr_succeeded += nr_pages;
1784 				stats->nr_thp_succeeded += is_thp;
1785 				break;
1786 			default:
1787 				nr_failed++;
1788 				stats->nr_thp_failed += is_thp;
1789 				stats->nr_failed_pages += nr_pages;
1790 				break;
1791 			}
1792 			dst = dst2;
1793 			dst2 = list_next_entry(dst, lru);
1794 		}
1795 	}
1796 	nr_failed += retry;
1797 	stats->nr_thp_failed += thp_retry;
1798 	stats->nr_failed_pages += nr_retry_pages;
1799 
1800 	rc = rc_saved ? : nr_failed;
1801 out:
1802 	/* Cleanup remaining folios */
1803 	dst = list_first_entry(&dst_folios, struct folio, lru);
1804 	dst2 = list_next_entry(dst, lru);
1805 	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1806 		int old_page_state = 0;
1807 		struct anon_vma *anon_vma = NULL;
1808 
1809 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1810 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1811 				       anon_vma, true, ret_folios);
1812 		list_del(&dst->lru);
1813 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1814 		dst = dst2;
1815 		dst2 = list_next_entry(dst, lru);
1816 	}
1817 
1818 	return rc;
1819 }
1820 
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)1821 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1822 		free_folio_t put_new_folio, unsigned long private,
1823 		enum migrate_mode mode, int reason,
1824 		struct list_head *ret_folios, struct list_head *split_folios,
1825 		struct migrate_pages_stats *stats)
1826 {
1827 	int rc, nr_failed = 0;
1828 	LIST_HEAD(folios);
1829 	struct migrate_pages_stats astats;
1830 
1831 	memset(&astats, 0, sizeof(astats));
1832 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1833 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1834 				 reason, &folios, split_folios, &astats,
1835 				 NR_MAX_MIGRATE_ASYNC_RETRY);
1836 	stats->nr_succeeded += astats.nr_succeeded;
1837 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1838 	stats->nr_thp_split += astats.nr_thp_split;
1839 	if (rc < 0) {
1840 		stats->nr_failed_pages += astats.nr_failed_pages;
1841 		stats->nr_thp_failed += astats.nr_thp_failed;
1842 		list_splice_tail(&folios, ret_folios);
1843 		return rc;
1844 	}
1845 	stats->nr_thp_failed += astats.nr_thp_split;
1846 	nr_failed += astats.nr_thp_split;
1847 	/*
1848 	 * Fall back to migrate all failed folios one by one synchronously. All
1849 	 * failed folios except split THPs will be retried, so their failure
1850 	 * isn't counted
1851 	 */
1852 	list_splice_tail_init(&folios, from);
1853 	while (!list_empty(from)) {
1854 		list_move(from->next, &folios);
1855 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1856 					 private, mode, reason, ret_folios,
1857 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1858 		list_splice_tail_init(&folios, ret_folios);
1859 		if (rc < 0)
1860 			return rc;
1861 		nr_failed += rc;
1862 	}
1863 
1864 	return nr_failed;
1865 }
1866 
1867 /*
1868  * migrate_pages - migrate the folios specified in a list, to the free folios
1869  *		   supplied as the target for the page migration
1870  *
1871  * @from:		The list of folios to be migrated.
1872  * @get_new_folio:	The function used to allocate free folios to be used
1873  *			as the target of the folio migration.
1874  * @put_new_folio:	The function used to free target folios if migration
1875  *			fails, or NULL if no special handling is necessary.
1876  * @private:		Private data to be passed on to get_new_folio()
1877  * @mode:		The migration mode that specifies the constraints for
1878  *			folio migration, if any.
1879  * @reason:		The reason for folio migration.
1880  * @ret_succeeded:	Set to the number of folios migrated successfully if
1881  *			the caller passes a non-NULL pointer.
1882  *
1883  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1884  * are movable any more because the list has become empty or no retryable folios
1885  * exist any more. It is caller's responsibility to call putback_movable_pages()
1886  * only if ret != 0.
1887  *
1888  * Returns the number of {normal folio, large folio, hugetlb} that were not
1889  * migrated, or an error code. The number of large folio splits will be
1890  * considered as the number of non-migrated large folio, no matter how many
1891  * split folios of the large folio are migrated successfully.
1892  */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)1893 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1894 		free_folio_t put_new_folio, unsigned long private,
1895 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1896 {
1897 	int rc, rc_gather;
1898 	int nr_pages;
1899 	struct folio *folio, *folio2;
1900 	LIST_HEAD(folios);
1901 	LIST_HEAD(ret_folios);
1902 	LIST_HEAD(split_folios);
1903 	struct migrate_pages_stats stats;
1904 
1905 	trace_mm_migrate_pages_start(mode, reason);
1906 
1907 	memset(&stats, 0, sizeof(stats));
1908 
1909 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1910 				     mode, reason, &stats, &ret_folios);
1911 	if (rc_gather < 0)
1912 		goto out;
1913 
1914 again:
1915 	nr_pages = 0;
1916 	list_for_each_entry_safe(folio, folio2, from, lru) {
1917 		/* Retried hugetlb folios will be kept in list  */
1918 		if (folio_test_hugetlb(folio)) {
1919 			list_move_tail(&folio->lru, &ret_folios);
1920 			continue;
1921 		}
1922 
1923 		nr_pages += folio_nr_pages(folio);
1924 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1925 			break;
1926 	}
1927 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1928 		list_cut_before(&folios, from, &folio2->lru);
1929 	else
1930 		list_splice_init(from, &folios);
1931 	if (mode == MIGRATE_ASYNC)
1932 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1933 				private, mode, reason, &ret_folios,
1934 				&split_folios, &stats,
1935 				NR_MAX_MIGRATE_PAGES_RETRY);
1936 	else
1937 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1938 				private, mode, reason, &ret_folios,
1939 				&split_folios, &stats);
1940 	list_splice_tail_init(&folios, &ret_folios);
1941 	if (rc < 0) {
1942 		rc_gather = rc;
1943 		list_splice_tail(&split_folios, &ret_folios);
1944 		goto out;
1945 	}
1946 	if (!list_empty(&split_folios)) {
1947 		/*
1948 		 * Failure isn't counted since all split folios of a large folio
1949 		 * is counted as 1 failure already.  And, we only try to migrate
1950 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1951 		 */
1952 		migrate_pages_batch(&split_folios, get_new_folio,
1953 				put_new_folio, private, MIGRATE_ASYNC, reason,
1954 				&ret_folios, NULL, &stats, 1);
1955 		list_splice_tail_init(&split_folios, &ret_folios);
1956 	}
1957 	rc_gather += rc;
1958 	if (!list_empty(from))
1959 		goto again;
1960 out:
1961 	/*
1962 	 * Put the permanent failure folio back to migration list, they
1963 	 * will be put back to the right list by the caller.
1964 	 */
1965 	list_splice(&ret_folios, from);
1966 
1967 	/*
1968 	 * Return 0 in case all split folios of fail-to-migrate large folios
1969 	 * are migrated successfully.
1970 	 */
1971 	if (list_empty(from))
1972 		rc_gather = 0;
1973 
1974 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1975 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1976 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1977 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1978 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1979 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1980 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
1981 			       stats.nr_thp_split, mode, reason);
1982 
1983 	if (ret_succeeded)
1984 		*ret_succeeded = stats.nr_succeeded;
1985 
1986 	return rc_gather;
1987 }
1988 
alloc_migration_target(struct folio * src,unsigned long private)1989 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
1990 {
1991 	struct migration_target_control *mtc;
1992 	gfp_t gfp_mask;
1993 	unsigned int order = 0;
1994 	int nid;
1995 	int zidx;
1996 
1997 	mtc = (struct migration_target_control *)private;
1998 	gfp_mask = mtc->gfp_mask;
1999 	nid = mtc->nid;
2000 	if (nid == NUMA_NO_NODE)
2001 		nid = folio_nid(src);
2002 
2003 	if (folio_test_hugetlb(src)) {
2004 		struct hstate *h = folio_hstate(src);
2005 
2006 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2007 		return alloc_hugetlb_folio_nodemask(h, nid,
2008 						mtc->nmask, gfp_mask);
2009 	}
2010 
2011 	if (folio_test_large(src)) {
2012 		/*
2013 		 * clear __GFP_RECLAIM to make the migration callback
2014 		 * consistent with regular THP allocations.
2015 		 */
2016 		gfp_mask &= ~__GFP_RECLAIM;
2017 		gfp_mask |= GFP_TRANSHUGE;
2018 		order = folio_order(src);
2019 	}
2020 	zidx = zone_idx(folio_zone(src));
2021 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2022 		gfp_mask |= __GFP_HIGHMEM;
2023 
2024 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2025 }
2026 
2027 #ifdef CONFIG_NUMA
2028 
store_status(int __user * status,int start,int value,int nr)2029 static int store_status(int __user *status, int start, int value, int nr)
2030 {
2031 	while (nr-- > 0) {
2032 		if (put_user(value, status + start))
2033 			return -EFAULT;
2034 		start++;
2035 	}
2036 
2037 	return 0;
2038 }
2039 
do_move_pages_to_node(struct mm_struct * mm,struct list_head * pagelist,int node)2040 static int do_move_pages_to_node(struct mm_struct *mm,
2041 		struct list_head *pagelist, int node)
2042 {
2043 	int err;
2044 	struct migration_target_control mtc = {
2045 		.nid = node,
2046 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2047 	};
2048 
2049 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2050 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2051 	if (err)
2052 		putback_movable_pages(pagelist);
2053 	return err;
2054 }
2055 
2056 /*
2057  * Resolves the given address to a struct page, isolates it from the LRU and
2058  * puts it to the given pagelist.
2059  * Returns:
2060  *     errno - if the page cannot be found/isolated
2061  *     0 - when it doesn't have to be migrated because it is already on the
2062  *         target node
2063  *     1 - when it has been queued
2064  */
add_page_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2065 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2066 		int node, struct list_head *pagelist, bool migrate_all)
2067 {
2068 	struct vm_area_struct *vma;
2069 	unsigned long addr;
2070 	struct page *page;
2071 	int err;
2072 	bool isolated;
2073 
2074 	mmap_read_lock(mm);
2075 	addr = (unsigned long)untagged_addr_remote(mm, p);
2076 
2077 	err = -EFAULT;
2078 	vma = vma_lookup(mm, addr);
2079 	if (!vma || !vma_migratable(vma))
2080 		goto out;
2081 
2082 	/* FOLL_DUMP to ignore special (like zero) pages */
2083 	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2084 
2085 	err = PTR_ERR(page);
2086 	if (IS_ERR(page))
2087 		goto out;
2088 
2089 	err = -ENOENT;
2090 	if (!page)
2091 		goto out;
2092 
2093 	if (is_zone_device_page(page))
2094 		goto out_putpage;
2095 
2096 	err = 0;
2097 	if (page_to_nid(page) == node)
2098 		goto out_putpage;
2099 
2100 	err = -EACCES;
2101 	if (page_mapcount(page) > 1 && !migrate_all)
2102 		goto out_putpage;
2103 
2104 	if (PageHuge(page)) {
2105 		if (PageHead(page)) {
2106 			isolated = isolate_hugetlb(page_folio(page), pagelist);
2107 			err = isolated ? 1 : -EBUSY;
2108 		}
2109 	} else {
2110 		struct page *head;
2111 
2112 		head = compound_head(page);
2113 		isolated = isolate_lru_page(head);
2114 		if (!isolated) {
2115 			err = -EBUSY;
2116 			goto out_putpage;
2117 		}
2118 
2119 		err = 1;
2120 		list_add_tail(&head->lru, pagelist);
2121 		mod_node_page_state(page_pgdat(head),
2122 			NR_ISOLATED_ANON + page_is_file_lru(head),
2123 			thp_nr_pages(head));
2124 	}
2125 out_putpage:
2126 	/*
2127 	 * Either remove the duplicate refcount from
2128 	 * isolate_lru_page() or drop the page ref if it was
2129 	 * not isolated.
2130 	 */
2131 	put_page(page);
2132 out:
2133 	mmap_read_unlock(mm);
2134 	return err;
2135 }
2136 
move_pages_and_store_status(struct mm_struct * mm,int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2137 static int move_pages_and_store_status(struct mm_struct *mm, int node,
2138 		struct list_head *pagelist, int __user *status,
2139 		int start, int i, unsigned long nr_pages)
2140 {
2141 	int err;
2142 
2143 	if (list_empty(pagelist))
2144 		return 0;
2145 
2146 	err = do_move_pages_to_node(mm, pagelist, node);
2147 	if (err) {
2148 		/*
2149 		 * Positive err means the number of failed
2150 		 * pages to migrate.  Since we are going to
2151 		 * abort and return the number of non-migrated
2152 		 * pages, so need to include the rest of the
2153 		 * nr_pages that have not been attempted as
2154 		 * well.
2155 		 */
2156 		if (err > 0)
2157 			err += nr_pages - i;
2158 		return err;
2159 	}
2160 	return store_status(status, start, node, i - start);
2161 }
2162 
2163 /*
2164  * Migrate an array of page address onto an array of nodes and fill
2165  * the corresponding array of status.
2166  */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2167 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2168 			 unsigned long nr_pages,
2169 			 const void __user * __user *pages,
2170 			 const int __user *nodes,
2171 			 int __user *status, int flags)
2172 {
2173 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2174 	int current_node = NUMA_NO_NODE;
2175 	LIST_HEAD(pagelist);
2176 	int start, i;
2177 	int err = 0, err1;
2178 
2179 	lru_cache_disable();
2180 
2181 	for (i = start = 0; i < nr_pages; i++) {
2182 		const void __user *p;
2183 		int node;
2184 
2185 		err = -EFAULT;
2186 		if (in_compat_syscall()) {
2187 			compat_uptr_t cp;
2188 
2189 			if (get_user(cp, compat_pages + i))
2190 				goto out_flush;
2191 
2192 			p = compat_ptr(cp);
2193 		} else {
2194 			if (get_user(p, pages + i))
2195 				goto out_flush;
2196 		}
2197 		if (get_user(node, nodes + i))
2198 			goto out_flush;
2199 
2200 		err = -ENODEV;
2201 		if (node < 0 || node >= MAX_NUMNODES)
2202 			goto out_flush;
2203 		if (!node_state(node, N_MEMORY))
2204 			goto out_flush;
2205 
2206 		err = -EACCES;
2207 		if (!node_isset(node, task_nodes))
2208 			goto out_flush;
2209 
2210 		if (current_node == NUMA_NO_NODE) {
2211 			current_node = node;
2212 			start = i;
2213 		} else if (node != current_node) {
2214 			err = move_pages_and_store_status(mm, current_node,
2215 					&pagelist, status, start, i, nr_pages);
2216 			if (err)
2217 				goto out;
2218 			start = i;
2219 			current_node = node;
2220 		}
2221 
2222 		/*
2223 		 * Errors in the page lookup or isolation are not fatal and we simply
2224 		 * report them via status
2225 		 */
2226 		err = add_page_for_migration(mm, p, current_node, &pagelist,
2227 					     flags & MPOL_MF_MOVE_ALL);
2228 
2229 		if (err > 0) {
2230 			/* The page is successfully queued for migration */
2231 			continue;
2232 		}
2233 
2234 		/*
2235 		 * The move_pages() man page does not have an -EEXIST choice, so
2236 		 * use -EFAULT instead.
2237 		 */
2238 		if (err == -EEXIST)
2239 			err = -EFAULT;
2240 
2241 		/*
2242 		 * If the page is already on the target node (!err), store the
2243 		 * node, otherwise, store the err.
2244 		 */
2245 		err = store_status(status, i, err ? : current_node, 1);
2246 		if (err)
2247 			goto out_flush;
2248 
2249 		err = move_pages_and_store_status(mm, current_node, &pagelist,
2250 				status, start, i, nr_pages);
2251 		if (err) {
2252 			/* We have accounted for page i */
2253 			if (err > 0)
2254 				err--;
2255 			goto out;
2256 		}
2257 		current_node = NUMA_NO_NODE;
2258 	}
2259 out_flush:
2260 	/* Make sure we do not overwrite the existing error */
2261 	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2262 				status, start, i, nr_pages);
2263 	if (err >= 0)
2264 		err = err1;
2265 out:
2266 	lru_cache_enable();
2267 	return err;
2268 }
2269 
2270 /*
2271  * Determine the nodes of an array of pages and store it in an array of status.
2272  */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2273 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2274 				const void __user **pages, int *status)
2275 {
2276 	unsigned long i;
2277 
2278 	mmap_read_lock(mm);
2279 
2280 	for (i = 0; i < nr_pages; i++) {
2281 		unsigned long addr = (unsigned long)(*pages);
2282 		struct vm_area_struct *vma;
2283 		struct page *page;
2284 		int err = -EFAULT;
2285 
2286 		vma = vma_lookup(mm, addr);
2287 		if (!vma)
2288 			goto set_status;
2289 
2290 		/* FOLL_DUMP to ignore special (like zero) pages */
2291 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2292 
2293 		err = PTR_ERR(page);
2294 		if (IS_ERR(page))
2295 			goto set_status;
2296 
2297 		err = -ENOENT;
2298 		if (!page)
2299 			goto set_status;
2300 
2301 		if (!is_zone_device_page(page))
2302 			err = page_to_nid(page);
2303 
2304 		put_page(page);
2305 set_status:
2306 		*status = err;
2307 
2308 		pages++;
2309 		status++;
2310 	}
2311 
2312 	mmap_read_unlock(mm);
2313 }
2314 
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)2315 static int get_compat_pages_array(const void __user *chunk_pages[],
2316 				  const void __user * __user *pages,
2317 				  unsigned long chunk_nr)
2318 {
2319 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2320 	compat_uptr_t p;
2321 	int i;
2322 
2323 	for (i = 0; i < chunk_nr; i++) {
2324 		if (get_user(p, pages32 + i))
2325 			return -EFAULT;
2326 		chunk_pages[i] = compat_ptr(p);
2327 	}
2328 
2329 	return 0;
2330 }
2331 
2332 /*
2333  * Determine the nodes of a user array of pages and store it in
2334  * a user array of status.
2335  */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2336 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2337 			 const void __user * __user *pages,
2338 			 int __user *status)
2339 {
2340 #define DO_PAGES_STAT_CHUNK_NR 16UL
2341 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2342 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2343 
2344 	while (nr_pages) {
2345 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2346 
2347 		if (in_compat_syscall()) {
2348 			if (get_compat_pages_array(chunk_pages, pages,
2349 						   chunk_nr))
2350 				break;
2351 		} else {
2352 			if (copy_from_user(chunk_pages, pages,
2353 				      chunk_nr * sizeof(*chunk_pages)))
2354 				break;
2355 		}
2356 
2357 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2358 
2359 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2360 			break;
2361 
2362 		pages += chunk_nr;
2363 		status += chunk_nr;
2364 		nr_pages -= chunk_nr;
2365 	}
2366 	return nr_pages ? -EFAULT : 0;
2367 }
2368 
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2369 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2370 {
2371 	struct task_struct *task;
2372 	struct mm_struct *mm;
2373 
2374 	/*
2375 	 * There is no need to check if current process has the right to modify
2376 	 * the specified process when they are same.
2377 	 */
2378 	if (!pid) {
2379 		mmget(current->mm);
2380 		*mem_nodes = cpuset_mems_allowed(current);
2381 		return current->mm;
2382 	}
2383 
2384 	/* Find the mm_struct */
2385 	rcu_read_lock();
2386 	task = find_task_by_vpid(pid);
2387 	if (!task) {
2388 		rcu_read_unlock();
2389 		return ERR_PTR(-ESRCH);
2390 	}
2391 	get_task_struct(task);
2392 
2393 	/*
2394 	 * Check if this process has the right to modify the specified
2395 	 * process. Use the regular "ptrace_may_access()" checks.
2396 	 */
2397 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2398 		rcu_read_unlock();
2399 		mm = ERR_PTR(-EPERM);
2400 		goto out;
2401 	}
2402 	rcu_read_unlock();
2403 
2404 	mm = ERR_PTR(security_task_movememory(task));
2405 	if (IS_ERR(mm))
2406 		goto out;
2407 	*mem_nodes = cpuset_mems_allowed(task);
2408 	mm = get_task_mm(task);
2409 out:
2410 	put_task_struct(task);
2411 	if (!mm)
2412 		mm = ERR_PTR(-EINVAL);
2413 	return mm;
2414 }
2415 
2416 /*
2417  * Move a list of pages in the address space of the currently executing
2418  * process.
2419  */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2420 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2421 			     const void __user * __user *pages,
2422 			     const int __user *nodes,
2423 			     int __user *status, int flags)
2424 {
2425 	struct mm_struct *mm;
2426 	int err;
2427 	nodemask_t task_nodes;
2428 
2429 	/* Check flags */
2430 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2431 		return -EINVAL;
2432 
2433 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2434 		return -EPERM;
2435 
2436 	mm = find_mm_struct(pid, &task_nodes);
2437 	if (IS_ERR(mm))
2438 		return PTR_ERR(mm);
2439 
2440 	if (nodes)
2441 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2442 				    nodes, status, flags);
2443 	else
2444 		err = do_pages_stat(mm, nr_pages, pages, status);
2445 
2446 	mmput(mm);
2447 	return err;
2448 }
2449 
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2450 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2451 		const void __user * __user *, pages,
2452 		const int __user *, nodes,
2453 		int __user *, status, int, flags)
2454 {
2455 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2456 }
2457 
2458 #ifdef CONFIG_NUMA_BALANCING
2459 /*
2460  * Returns true if this is a safe migration target node for misplaced NUMA
2461  * pages. Currently it only checks the watermarks which is crude.
2462  */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2463 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2464 				   unsigned long nr_migrate_pages)
2465 {
2466 	int z;
2467 
2468 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2469 		struct zone *zone = pgdat->node_zones + z;
2470 
2471 		if (!managed_zone(zone))
2472 			continue;
2473 
2474 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2475 		if (!zone_watermark_ok(zone, 0,
2476 				       high_wmark_pages(zone) +
2477 				       nr_migrate_pages,
2478 				       ZONE_MOVABLE, 0))
2479 			continue;
2480 		return true;
2481 	}
2482 	return false;
2483 }
2484 
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2485 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2486 					   unsigned long data)
2487 {
2488 	int nid = (int) data;
2489 	int order = folio_order(src);
2490 	gfp_t gfp = __GFP_THISNODE;
2491 
2492 	if (order > 0)
2493 		gfp |= GFP_TRANSHUGE_LIGHT;
2494 	else {
2495 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2496 			__GFP_NOWARN;
2497 		gfp &= ~__GFP_RECLAIM;
2498 	}
2499 	return __folio_alloc_node(gfp, order, nid);
2500 }
2501 
numamigrate_isolate_page(pg_data_t * pgdat,struct page * page)2502 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2503 {
2504 	int nr_pages = thp_nr_pages(page);
2505 	int order = compound_order(page);
2506 
2507 	VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2508 
2509 	/* Do not migrate THP mapped by multiple processes */
2510 	if (PageTransHuge(page) && total_mapcount(page) > 1)
2511 		return 0;
2512 
2513 	/* Avoid migrating to a node that is nearly full */
2514 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2515 		int z;
2516 
2517 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2518 			return 0;
2519 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2520 			if (managed_zone(pgdat->node_zones + z))
2521 				break;
2522 		}
2523 
2524 		/*
2525 		 * If there are no managed zones, it should not proceed
2526 		 * further.
2527 		 */
2528 		if (z < 0)
2529 			return 0;
2530 
2531 		wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2532 		return 0;
2533 	}
2534 
2535 	if (!isolate_lru_page(page))
2536 		return 0;
2537 
2538 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2539 			    nr_pages);
2540 
2541 	/*
2542 	 * Isolating the page has taken another reference, so the
2543 	 * caller's reference can be safely dropped without the page
2544 	 * disappearing underneath us during migration.
2545 	 */
2546 	put_page(page);
2547 	return 1;
2548 }
2549 
2550 /*
2551  * Attempt to migrate a misplaced page to the specified destination
2552  * node. Caller is expected to have an elevated reference count on
2553  * the page that will be dropped by this function before returning.
2554  */
migrate_misplaced_page(struct page * page,struct vm_area_struct * vma,int node)2555 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2556 			   int node)
2557 {
2558 	pg_data_t *pgdat = NODE_DATA(node);
2559 	int isolated;
2560 	int nr_remaining;
2561 	unsigned int nr_succeeded;
2562 	LIST_HEAD(migratepages);
2563 	int nr_pages = thp_nr_pages(page);
2564 
2565 	/*
2566 	 * Don't migrate file pages that are mapped in multiple processes
2567 	 * with execute permissions as they are probably shared libraries.
2568 	 */
2569 	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2570 	    (vma->vm_flags & VM_EXEC))
2571 		goto out;
2572 
2573 	/*
2574 	 * Also do not migrate dirty pages as not all filesystems can move
2575 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2576 	 */
2577 	if (page_is_file_lru(page) && PageDirty(page))
2578 		goto out;
2579 
2580 	isolated = numamigrate_isolate_page(pgdat, page);
2581 	if (!isolated)
2582 		goto out;
2583 
2584 	list_add(&page->lru, &migratepages);
2585 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2586 				     NULL, node, MIGRATE_ASYNC,
2587 				     MR_NUMA_MISPLACED, &nr_succeeded);
2588 	if (nr_remaining) {
2589 		if (!list_empty(&migratepages)) {
2590 			list_del(&page->lru);
2591 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2592 					page_is_file_lru(page), -nr_pages);
2593 			putback_lru_page(page);
2594 		}
2595 		isolated = 0;
2596 	}
2597 	if (nr_succeeded) {
2598 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2599 		if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2600 			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2601 					    nr_succeeded);
2602 	}
2603 	BUG_ON(!list_empty(&migratepages));
2604 	return isolated;
2605 
2606 out:
2607 	put_page(page);
2608 	return 0;
2609 }
2610 #endif /* CONFIG_NUMA_BALANCING */
2611 #endif /* CONFIG_NUMA */
2612