1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pfn_t.h> 42 #include <linux/memremap.h> 43 #include <linux/userfaultfd_k.h> 44 #include <linux/balloon_compaction.h> 45 #include <linux/page_idle.h> 46 #include <linux/page_owner.h> 47 #include <linux/sched/mm.h> 48 #include <linux/ptrace.h> 49 #include <linux/oom.h> 50 #include <linux/memory.h> 51 #include <linux/random.h> 52 #include <linux/sched/sysctl.h> 53 #include <linux/memory-tiers.h> 54 55 #include <asm/tlbflush.h> 56 57 #include <trace/events/migrate.h> 58 59 #include "internal.h" 60 61 bool isolate_movable_page(struct page *page, isolate_mode_t mode) 62 { 63 struct folio *folio = folio_get_nontail_page(page); 64 const struct movable_operations *mops; 65 66 /* 67 * Avoid burning cycles with pages that are yet under __free_pages(), 68 * or just got freed under us. 69 * 70 * In case we 'win' a race for a movable page being freed under us and 71 * raise its refcount preventing __free_pages() from doing its job 72 * the put_page() at the end of this block will take care of 73 * release this page, thus avoiding a nasty leakage. 74 */ 75 if (!folio) 76 goto out; 77 78 if (unlikely(folio_test_slab(folio))) 79 goto out_putfolio; 80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ 81 smp_rmb(); 82 /* 83 * Check movable flag before taking the page lock because 84 * we use non-atomic bitops on newly allocated page flags so 85 * unconditionally grabbing the lock ruins page's owner side. 86 */ 87 if (unlikely(!__folio_test_movable(folio))) 88 goto out_putfolio; 89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ 90 smp_rmb(); 91 if (unlikely(folio_test_slab(folio))) 92 goto out_putfolio; 93 94 /* 95 * As movable pages are not isolated from LRU lists, concurrent 96 * compaction threads can race against page migration functions 97 * as well as race against the releasing a page. 98 * 99 * In order to avoid having an already isolated movable page 100 * being (wrongly) re-isolated while it is under migration, 101 * or to avoid attempting to isolate pages being released, 102 * lets be sure we have the page lock 103 * before proceeding with the movable page isolation steps. 104 */ 105 if (unlikely(!folio_trylock(folio))) 106 goto out_putfolio; 107 108 if (!folio_test_movable(folio) || folio_test_isolated(folio)) 109 goto out_no_isolated; 110 111 mops = folio_movable_ops(folio); 112 VM_BUG_ON_FOLIO(!mops, folio); 113 114 if (!mops->isolate_page(&folio->page, mode)) 115 goto out_no_isolated; 116 117 /* Driver shouldn't use PG_isolated bit of page->flags */ 118 WARN_ON_ONCE(folio_test_isolated(folio)); 119 folio_set_isolated(folio); 120 folio_unlock(folio); 121 122 return true; 123 124 out_no_isolated: 125 folio_unlock(folio); 126 out_putfolio: 127 folio_put(folio); 128 out: 129 return false; 130 } 131 132 static void putback_movable_folio(struct folio *folio) 133 { 134 const struct movable_operations *mops = folio_movable_ops(folio); 135 136 mops->putback_page(&folio->page); 137 folio_clear_isolated(folio); 138 } 139 140 /* 141 * Put previously isolated pages back onto the appropriate lists 142 * from where they were once taken off for compaction/migration. 143 * 144 * This function shall be used whenever the isolated pageset has been 145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 146 * and isolate_hugetlb(). 147 */ 148 void putback_movable_pages(struct list_head *l) 149 { 150 struct folio *folio; 151 struct folio *folio2; 152 153 list_for_each_entry_safe(folio, folio2, l, lru) { 154 if (unlikely(folio_test_hugetlb(folio))) { 155 folio_putback_active_hugetlb(folio); 156 continue; 157 } 158 list_del(&folio->lru); 159 /* 160 * We isolated non-lru movable folio so here we can use 161 * __PageMovable because LRU folio's mapping cannot have 162 * PAGE_MAPPING_MOVABLE. 163 */ 164 if (unlikely(__folio_test_movable(folio))) { 165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); 166 folio_lock(folio); 167 if (folio_test_movable(folio)) 168 putback_movable_folio(folio); 169 else 170 folio_clear_isolated(folio); 171 folio_unlock(folio); 172 folio_put(folio); 173 } else { 174 node_stat_mod_folio(folio, NR_ISOLATED_ANON + 175 folio_is_file_lru(folio), -folio_nr_pages(folio)); 176 folio_putback_lru(folio); 177 } 178 } 179 } 180 181 /* 182 * Restore a potential migration pte to a working pte entry 183 */ 184 static bool remove_migration_pte(struct folio *folio, 185 struct vm_area_struct *vma, unsigned long addr, void *old) 186 { 187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 188 189 while (page_vma_mapped_walk(&pvmw)) { 190 rmap_t rmap_flags = RMAP_NONE; 191 pte_t pte; 192 swp_entry_t entry; 193 struct page *new; 194 unsigned long idx = 0; 195 196 /* pgoff is invalid for ksm pages, but they are never large */ 197 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 199 new = folio_page(folio, idx); 200 201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 202 /* PMD-mapped THP migration entry */ 203 if (!pvmw.pte) { 204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 205 !folio_test_pmd_mappable(folio), folio); 206 remove_migration_pmd(&pvmw, new); 207 continue; 208 } 209 #endif 210 211 folio_get(folio); 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 213 if (pte_swp_soft_dirty(*pvmw.pte)) 214 pte = pte_mksoft_dirty(pte); 215 216 /* 217 * Recheck VMA as permissions can change since migration started 218 */ 219 entry = pte_to_swp_entry(*pvmw.pte); 220 if (!is_migration_entry_young(entry)) 221 pte = pte_mkold(pte); 222 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 223 pte = pte_mkdirty(pte); 224 if (is_writable_migration_entry(entry)) 225 pte = maybe_mkwrite(pte, vma); 226 else if (pte_swp_uffd_wp(*pvmw.pte)) 227 pte = pte_mkuffd_wp(pte); 228 else 229 pte = pte_wrprotect(pte); 230 231 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 232 rmap_flags |= RMAP_EXCLUSIVE; 233 234 if (unlikely(is_device_private_page(new))) { 235 if (pte_write(pte)) 236 entry = make_writable_device_private_entry( 237 page_to_pfn(new)); 238 else 239 entry = make_readable_device_private_entry( 240 page_to_pfn(new)); 241 pte = swp_entry_to_pte(entry); 242 if (pte_swp_soft_dirty(*pvmw.pte)) 243 pte = pte_swp_mksoft_dirty(pte); 244 if (pte_swp_uffd_wp(*pvmw.pte)) 245 pte = pte_swp_mkuffd_wp(pte); 246 } 247 248 #ifdef CONFIG_HUGETLB_PAGE 249 if (folio_test_hugetlb(folio)) { 250 unsigned int shift = huge_page_shift(hstate_vma(vma)); 251 252 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 253 if (folio_test_anon(folio)) 254 hugepage_add_anon_rmap(new, vma, pvmw.address, 255 rmap_flags); 256 else 257 page_dup_file_rmap(new, true); 258 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 259 } else 260 #endif 261 { 262 if (folio_test_anon(folio)) 263 page_add_anon_rmap(new, vma, pvmw.address, 264 rmap_flags); 265 else 266 page_add_file_rmap(new, vma, false); 267 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 268 } 269 if (vma->vm_flags & VM_LOCKED) 270 mlock_drain_local(); 271 272 trace_remove_migration_pte(pvmw.address, pte_val(pte), 273 compound_order(new)); 274 275 /* No need to invalidate - it was non-present before */ 276 update_mmu_cache(vma, pvmw.address, pvmw.pte); 277 } 278 279 return true; 280 } 281 282 /* 283 * Get rid of all migration entries and replace them by 284 * references to the indicated page. 285 */ 286 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 287 { 288 struct rmap_walk_control rwc = { 289 .rmap_one = remove_migration_pte, 290 .arg = src, 291 }; 292 293 if (locked) 294 rmap_walk_locked(dst, &rwc); 295 else 296 rmap_walk(dst, &rwc); 297 } 298 299 /* 300 * Something used the pte of a page under migration. We need to 301 * get to the page and wait until migration is finished. 302 * When we return from this function the fault will be retried. 303 */ 304 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 305 spinlock_t *ptl) 306 { 307 pte_t pte; 308 swp_entry_t entry; 309 310 spin_lock(ptl); 311 pte = *ptep; 312 if (!is_swap_pte(pte)) 313 goto out; 314 315 entry = pte_to_swp_entry(pte); 316 if (!is_migration_entry(entry)) 317 goto out; 318 319 migration_entry_wait_on_locked(entry, ptep, ptl); 320 return; 321 out: 322 pte_unmap_unlock(ptep, ptl); 323 } 324 325 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 326 unsigned long address) 327 { 328 spinlock_t *ptl = pte_lockptr(mm, pmd); 329 pte_t *ptep = pte_offset_map(pmd, address); 330 __migration_entry_wait(mm, ptep, ptl); 331 } 332 333 #ifdef CONFIG_HUGETLB_PAGE 334 /* 335 * The vma read lock must be held upon entry. Holding that lock prevents either 336 * the pte or the ptl from being freed. 337 * 338 * This function will release the vma lock before returning. 339 */ 340 void __migration_entry_wait_huge(struct vm_area_struct *vma, 341 pte_t *ptep, spinlock_t *ptl) 342 { 343 pte_t pte; 344 345 hugetlb_vma_assert_locked(vma); 346 spin_lock(ptl); 347 pte = huge_ptep_get(ptep); 348 349 if (unlikely(!is_hugetlb_entry_migration(pte))) { 350 spin_unlock(ptl); 351 hugetlb_vma_unlock_read(vma); 352 } else { 353 /* 354 * If migration entry existed, safe to release vma lock 355 * here because the pgtable page won't be freed without the 356 * pgtable lock released. See comment right above pgtable 357 * lock release in migration_entry_wait_on_locked(). 358 */ 359 hugetlb_vma_unlock_read(vma); 360 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); 361 } 362 } 363 364 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) 365 { 366 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); 367 368 __migration_entry_wait_huge(vma, pte, ptl); 369 } 370 #endif 371 372 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 373 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 374 { 375 spinlock_t *ptl; 376 377 ptl = pmd_lock(mm, pmd); 378 if (!is_pmd_migration_entry(*pmd)) 379 goto unlock; 380 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 381 return; 382 unlock: 383 spin_unlock(ptl); 384 } 385 #endif 386 387 static int folio_expected_refs(struct address_space *mapping, 388 struct folio *folio) 389 { 390 int refs = 1; 391 if (!mapping) 392 return refs; 393 394 refs += folio_nr_pages(folio); 395 if (folio_test_private(folio)) 396 refs++; 397 398 return refs; 399 } 400 401 /* 402 * Replace the page in the mapping. 403 * 404 * The number of remaining references must be: 405 * 1 for anonymous pages without a mapping 406 * 2 for pages with a mapping 407 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 408 */ 409 int folio_migrate_mapping(struct address_space *mapping, 410 struct folio *newfolio, struct folio *folio, int extra_count) 411 { 412 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 413 struct zone *oldzone, *newzone; 414 int dirty; 415 int expected_count = folio_expected_refs(mapping, folio) + extra_count; 416 long nr = folio_nr_pages(folio); 417 418 if (!mapping) { 419 /* Anonymous page without mapping */ 420 if (folio_ref_count(folio) != expected_count) 421 return -EAGAIN; 422 423 /* No turning back from here */ 424 newfolio->index = folio->index; 425 newfolio->mapping = folio->mapping; 426 if (folio_test_swapbacked(folio)) 427 __folio_set_swapbacked(newfolio); 428 429 return MIGRATEPAGE_SUCCESS; 430 } 431 432 oldzone = folio_zone(folio); 433 newzone = folio_zone(newfolio); 434 435 xas_lock_irq(&xas); 436 if (!folio_ref_freeze(folio, expected_count)) { 437 xas_unlock_irq(&xas); 438 return -EAGAIN; 439 } 440 441 /* 442 * Now we know that no one else is looking at the folio: 443 * no turning back from here. 444 */ 445 newfolio->index = folio->index; 446 newfolio->mapping = folio->mapping; 447 folio_ref_add(newfolio, nr); /* add cache reference */ 448 if (folio_test_swapbacked(folio)) { 449 __folio_set_swapbacked(newfolio); 450 if (folio_test_swapcache(folio)) { 451 folio_set_swapcache(newfolio); 452 newfolio->private = folio_get_private(folio); 453 } 454 } else { 455 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 456 } 457 458 /* Move dirty while page refs frozen and newpage not yet exposed */ 459 dirty = folio_test_dirty(folio); 460 if (dirty) { 461 folio_clear_dirty(folio); 462 folio_set_dirty(newfolio); 463 } 464 465 xas_store(&xas, newfolio); 466 467 /* 468 * Drop cache reference from old page by unfreezing 469 * to one less reference. 470 * We know this isn't the last reference. 471 */ 472 folio_ref_unfreeze(folio, expected_count - nr); 473 474 xas_unlock(&xas); 475 /* Leave irq disabled to prevent preemption while updating stats */ 476 477 /* 478 * If moved to a different zone then also account 479 * the page for that zone. Other VM counters will be 480 * taken care of when we establish references to the 481 * new page and drop references to the old page. 482 * 483 * Note that anonymous pages are accounted for 484 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 485 * are mapped to swap space. 486 */ 487 if (newzone != oldzone) { 488 struct lruvec *old_lruvec, *new_lruvec; 489 struct mem_cgroup *memcg; 490 491 memcg = folio_memcg(folio); 492 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 493 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 494 495 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 496 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 497 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 498 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 499 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 500 } 501 #ifdef CONFIG_SWAP 502 if (folio_test_swapcache(folio)) { 503 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 504 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 505 } 506 #endif 507 if (dirty && mapping_can_writeback(mapping)) { 508 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 509 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 510 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 511 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 512 } 513 } 514 local_irq_enable(); 515 516 return MIGRATEPAGE_SUCCESS; 517 } 518 EXPORT_SYMBOL(folio_migrate_mapping); 519 520 /* 521 * The expected number of remaining references is the same as that 522 * of folio_migrate_mapping(). 523 */ 524 int migrate_huge_page_move_mapping(struct address_space *mapping, 525 struct folio *dst, struct folio *src) 526 { 527 XA_STATE(xas, &mapping->i_pages, folio_index(src)); 528 int expected_count; 529 530 xas_lock_irq(&xas); 531 expected_count = 2 + folio_has_private(src); 532 if (!folio_ref_freeze(src, expected_count)) { 533 xas_unlock_irq(&xas); 534 return -EAGAIN; 535 } 536 537 dst->index = src->index; 538 dst->mapping = src->mapping; 539 540 folio_get(dst); 541 542 xas_store(&xas, dst); 543 544 folio_ref_unfreeze(src, expected_count - 1); 545 546 xas_unlock_irq(&xas); 547 548 return MIGRATEPAGE_SUCCESS; 549 } 550 551 /* 552 * Copy the flags and some other ancillary information 553 */ 554 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 555 { 556 int cpupid; 557 558 if (folio_test_error(folio)) 559 folio_set_error(newfolio); 560 if (folio_test_referenced(folio)) 561 folio_set_referenced(newfolio); 562 if (folio_test_uptodate(folio)) 563 folio_mark_uptodate(newfolio); 564 if (folio_test_clear_active(folio)) { 565 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 566 folio_set_active(newfolio); 567 } else if (folio_test_clear_unevictable(folio)) 568 folio_set_unevictable(newfolio); 569 if (folio_test_workingset(folio)) 570 folio_set_workingset(newfolio); 571 if (folio_test_checked(folio)) 572 folio_set_checked(newfolio); 573 /* 574 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 575 * migration entries. We can still have PG_anon_exclusive set on an 576 * effectively unmapped and unreferenced first sub-pages of an 577 * anonymous THP: we can simply copy it here via PG_mappedtodisk. 578 */ 579 if (folio_test_mappedtodisk(folio)) 580 folio_set_mappedtodisk(newfolio); 581 582 /* Move dirty on pages not done by folio_migrate_mapping() */ 583 if (folio_test_dirty(folio)) 584 folio_set_dirty(newfolio); 585 586 if (folio_test_young(folio)) 587 folio_set_young(newfolio); 588 if (folio_test_idle(folio)) 589 folio_set_idle(newfolio); 590 591 /* 592 * Copy NUMA information to the new page, to prevent over-eager 593 * future migrations of this same page. 594 */ 595 cpupid = page_cpupid_xchg_last(&folio->page, -1); 596 /* 597 * For memory tiering mode, when migrate between slow and fast 598 * memory node, reset cpupid, because that is used to record 599 * page access time in slow memory node. 600 */ 601 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { 602 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); 603 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); 604 605 if (f_toptier != t_toptier) 606 cpupid = -1; 607 } 608 page_cpupid_xchg_last(&newfolio->page, cpupid); 609 610 folio_migrate_ksm(newfolio, folio); 611 /* 612 * Please do not reorder this without considering how mm/ksm.c's 613 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 614 */ 615 if (folio_test_swapcache(folio)) 616 folio_clear_swapcache(folio); 617 folio_clear_private(folio); 618 619 /* page->private contains hugetlb specific flags */ 620 if (!folio_test_hugetlb(folio)) 621 folio->private = NULL; 622 623 /* 624 * If any waiters have accumulated on the new page then 625 * wake them up. 626 */ 627 if (folio_test_writeback(newfolio)) 628 folio_end_writeback(newfolio); 629 630 /* 631 * PG_readahead shares the same bit with PG_reclaim. The above 632 * end_page_writeback() may clear PG_readahead mistakenly, so set the 633 * bit after that. 634 */ 635 if (folio_test_readahead(folio)) 636 folio_set_readahead(newfolio); 637 638 folio_copy_owner(newfolio, folio); 639 640 if (!folio_test_hugetlb(folio)) 641 mem_cgroup_migrate(folio, newfolio); 642 } 643 EXPORT_SYMBOL(folio_migrate_flags); 644 645 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 646 { 647 folio_copy(newfolio, folio); 648 folio_migrate_flags(newfolio, folio); 649 } 650 EXPORT_SYMBOL(folio_migrate_copy); 651 652 /************************************************************ 653 * Migration functions 654 ***********************************************************/ 655 656 int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 657 struct folio *src, enum migrate_mode mode, int extra_count) 658 { 659 int rc; 660 661 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 662 663 rc = folio_migrate_mapping(mapping, dst, src, extra_count); 664 665 if (rc != MIGRATEPAGE_SUCCESS) 666 return rc; 667 668 if (mode != MIGRATE_SYNC_NO_COPY) 669 folio_migrate_copy(dst, src); 670 else 671 folio_migrate_flags(dst, src); 672 return MIGRATEPAGE_SUCCESS; 673 } 674 675 /** 676 * migrate_folio() - Simple folio migration. 677 * @mapping: The address_space containing the folio. 678 * @dst: The folio to migrate the data to. 679 * @src: The folio containing the current data. 680 * @mode: How to migrate the page. 681 * 682 * Common logic to directly migrate a single LRU folio suitable for 683 * folios that do not use PagePrivate/PagePrivate2. 684 * 685 * Folios are locked upon entry and exit. 686 */ 687 int migrate_folio(struct address_space *mapping, struct folio *dst, 688 struct folio *src, enum migrate_mode mode) 689 { 690 return migrate_folio_extra(mapping, dst, src, mode, 0); 691 } 692 EXPORT_SYMBOL(migrate_folio); 693 694 #ifdef CONFIG_BLOCK 695 /* Returns true if all buffers are successfully locked */ 696 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 697 enum migrate_mode mode) 698 { 699 struct buffer_head *bh = head; 700 701 /* Simple case, sync compaction */ 702 if (mode != MIGRATE_ASYNC) { 703 do { 704 lock_buffer(bh); 705 bh = bh->b_this_page; 706 707 } while (bh != head); 708 709 return true; 710 } 711 712 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 713 do { 714 if (!trylock_buffer(bh)) { 715 /* 716 * We failed to lock the buffer and cannot stall in 717 * async migration. Release the taken locks 718 */ 719 struct buffer_head *failed_bh = bh; 720 bh = head; 721 while (bh != failed_bh) { 722 unlock_buffer(bh); 723 bh = bh->b_this_page; 724 } 725 return false; 726 } 727 728 bh = bh->b_this_page; 729 } while (bh != head); 730 return true; 731 } 732 733 static int __buffer_migrate_folio(struct address_space *mapping, 734 struct folio *dst, struct folio *src, enum migrate_mode mode, 735 bool check_refs) 736 { 737 struct buffer_head *bh, *head; 738 int rc; 739 int expected_count; 740 741 head = folio_buffers(src); 742 if (!head) 743 return migrate_folio(mapping, dst, src, mode); 744 745 /* Check whether page does not have extra refs before we do more work */ 746 expected_count = folio_expected_refs(mapping, src); 747 if (folio_ref_count(src) != expected_count) 748 return -EAGAIN; 749 750 if (!buffer_migrate_lock_buffers(head, mode)) 751 return -EAGAIN; 752 753 if (check_refs) { 754 bool busy; 755 bool invalidated = false; 756 757 recheck_buffers: 758 busy = false; 759 spin_lock(&mapping->private_lock); 760 bh = head; 761 do { 762 if (atomic_read(&bh->b_count)) { 763 busy = true; 764 break; 765 } 766 bh = bh->b_this_page; 767 } while (bh != head); 768 if (busy) { 769 if (invalidated) { 770 rc = -EAGAIN; 771 goto unlock_buffers; 772 } 773 spin_unlock(&mapping->private_lock); 774 invalidate_bh_lrus(); 775 invalidated = true; 776 goto recheck_buffers; 777 } 778 } 779 780 rc = folio_migrate_mapping(mapping, dst, src, 0); 781 if (rc != MIGRATEPAGE_SUCCESS) 782 goto unlock_buffers; 783 784 folio_attach_private(dst, folio_detach_private(src)); 785 786 bh = head; 787 do { 788 set_bh_page(bh, &dst->page, bh_offset(bh)); 789 bh = bh->b_this_page; 790 } while (bh != head); 791 792 if (mode != MIGRATE_SYNC_NO_COPY) 793 folio_migrate_copy(dst, src); 794 else 795 folio_migrate_flags(dst, src); 796 797 rc = MIGRATEPAGE_SUCCESS; 798 unlock_buffers: 799 if (check_refs) 800 spin_unlock(&mapping->private_lock); 801 bh = head; 802 do { 803 unlock_buffer(bh); 804 bh = bh->b_this_page; 805 } while (bh != head); 806 807 return rc; 808 } 809 810 /** 811 * buffer_migrate_folio() - Migration function for folios with buffers. 812 * @mapping: The address space containing @src. 813 * @dst: The folio to migrate to. 814 * @src: The folio to migrate from. 815 * @mode: How to migrate the folio. 816 * 817 * This function can only be used if the underlying filesystem guarantees 818 * that no other references to @src exist. For example attached buffer 819 * heads are accessed only under the folio lock. If your filesystem cannot 820 * provide this guarantee, buffer_migrate_folio_norefs() may be more 821 * appropriate. 822 * 823 * Return: 0 on success or a negative errno on failure. 824 */ 825 int buffer_migrate_folio(struct address_space *mapping, 826 struct folio *dst, struct folio *src, enum migrate_mode mode) 827 { 828 return __buffer_migrate_folio(mapping, dst, src, mode, false); 829 } 830 EXPORT_SYMBOL(buffer_migrate_folio); 831 832 /** 833 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 834 * @mapping: The address space containing @src. 835 * @dst: The folio to migrate to. 836 * @src: The folio to migrate from. 837 * @mode: How to migrate the folio. 838 * 839 * Like buffer_migrate_folio() except that this variant is more careful 840 * and checks that there are also no buffer head references. This function 841 * is the right one for mappings where buffer heads are directly looked 842 * up and referenced (such as block device mappings). 843 * 844 * Return: 0 on success or a negative errno on failure. 845 */ 846 int buffer_migrate_folio_norefs(struct address_space *mapping, 847 struct folio *dst, struct folio *src, enum migrate_mode mode) 848 { 849 return __buffer_migrate_folio(mapping, dst, src, mode, true); 850 } 851 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); 852 #endif 853 854 int filemap_migrate_folio(struct address_space *mapping, 855 struct folio *dst, struct folio *src, enum migrate_mode mode) 856 { 857 int ret; 858 859 ret = folio_migrate_mapping(mapping, dst, src, 0); 860 if (ret != MIGRATEPAGE_SUCCESS) 861 return ret; 862 863 if (folio_get_private(src)) 864 folio_attach_private(dst, folio_detach_private(src)); 865 866 if (mode != MIGRATE_SYNC_NO_COPY) 867 folio_migrate_copy(dst, src); 868 else 869 folio_migrate_flags(dst, src); 870 return MIGRATEPAGE_SUCCESS; 871 } 872 EXPORT_SYMBOL_GPL(filemap_migrate_folio); 873 874 /* 875 * Writeback a folio to clean the dirty state 876 */ 877 static int writeout(struct address_space *mapping, struct folio *folio) 878 { 879 struct writeback_control wbc = { 880 .sync_mode = WB_SYNC_NONE, 881 .nr_to_write = 1, 882 .range_start = 0, 883 .range_end = LLONG_MAX, 884 .for_reclaim = 1 885 }; 886 int rc; 887 888 if (!mapping->a_ops->writepage) 889 /* No write method for the address space */ 890 return -EINVAL; 891 892 if (!folio_clear_dirty_for_io(folio)) 893 /* Someone else already triggered a write */ 894 return -EAGAIN; 895 896 /* 897 * A dirty folio may imply that the underlying filesystem has 898 * the folio on some queue. So the folio must be clean for 899 * migration. Writeout may mean we lose the lock and the 900 * folio state is no longer what we checked for earlier. 901 * At this point we know that the migration attempt cannot 902 * be successful. 903 */ 904 remove_migration_ptes(folio, folio, false); 905 906 rc = mapping->a_ops->writepage(&folio->page, &wbc); 907 908 if (rc != AOP_WRITEPAGE_ACTIVATE) 909 /* unlocked. Relock */ 910 folio_lock(folio); 911 912 return (rc < 0) ? -EIO : -EAGAIN; 913 } 914 915 /* 916 * Default handling if a filesystem does not provide a migration function. 917 */ 918 static int fallback_migrate_folio(struct address_space *mapping, 919 struct folio *dst, struct folio *src, enum migrate_mode mode) 920 { 921 if (folio_test_dirty(src)) { 922 /* Only writeback folios in full synchronous migration */ 923 switch (mode) { 924 case MIGRATE_SYNC: 925 case MIGRATE_SYNC_NO_COPY: 926 break; 927 default: 928 return -EBUSY; 929 } 930 return writeout(mapping, src); 931 } 932 933 /* 934 * Buffers may be managed in a filesystem specific way. 935 * We must have no buffers or drop them. 936 */ 937 if (folio_test_private(src) && 938 !filemap_release_folio(src, GFP_KERNEL)) 939 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 940 941 return migrate_folio(mapping, dst, src, mode); 942 } 943 944 /* 945 * Move a page to a newly allocated page 946 * The page is locked and all ptes have been successfully removed. 947 * 948 * The new page will have replaced the old page if this function 949 * is successful. 950 * 951 * Return value: 952 * < 0 - error code 953 * MIGRATEPAGE_SUCCESS - success 954 */ 955 static int move_to_new_folio(struct folio *dst, struct folio *src, 956 enum migrate_mode mode) 957 { 958 int rc = -EAGAIN; 959 bool is_lru = !__PageMovable(&src->page); 960 961 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 962 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 963 964 if (likely(is_lru)) { 965 struct address_space *mapping = folio_mapping(src); 966 967 if (!mapping) 968 rc = migrate_folio(mapping, dst, src, mode); 969 else if (mapping->a_ops->migrate_folio) 970 /* 971 * Most folios have a mapping and most filesystems 972 * provide a migrate_folio callback. Anonymous folios 973 * are part of swap space which also has its own 974 * migrate_folio callback. This is the most common path 975 * for page migration. 976 */ 977 rc = mapping->a_ops->migrate_folio(mapping, dst, src, 978 mode); 979 else 980 rc = fallback_migrate_folio(mapping, dst, src, mode); 981 } else { 982 const struct movable_operations *mops; 983 984 /* 985 * In case of non-lru page, it could be released after 986 * isolation step. In that case, we shouldn't try migration. 987 */ 988 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 989 if (!folio_test_movable(src)) { 990 rc = MIGRATEPAGE_SUCCESS; 991 folio_clear_isolated(src); 992 goto out; 993 } 994 995 mops = folio_movable_ops(src); 996 rc = mops->migrate_page(&dst->page, &src->page, mode); 997 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 998 !folio_test_isolated(src)); 999 } 1000 1001 /* 1002 * When successful, old pagecache src->mapping must be cleared before 1003 * src is freed; but stats require that PageAnon be left as PageAnon. 1004 */ 1005 if (rc == MIGRATEPAGE_SUCCESS) { 1006 if (__PageMovable(&src->page)) { 1007 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 1008 1009 /* 1010 * We clear PG_movable under page_lock so any compactor 1011 * cannot try to migrate this page. 1012 */ 1013 folio_clear_isolated(src); 1014 } 1015 1016 /* 1017 * Anonymous and movable src->mapping will be cleared by 1018 * free_pages_prepare so don't reset it here for keeping 1019 * the type to work PageAnon, for example. 1020 */ 1021 if (!folio_mapping_flags(src)) 1022 src->mapping = NULL; 1023 1024 if (likely(!folio_is_zone_device(dst))) 1025 flush_dcache_folio(dst); 1026 } 1027 out: 1028 return rc; 1029 } 1030 1031 /* 1032 * To record some information during migration, we use some unused 1033 * fields (mapping and private) of struct folio of the newly allocated 1034 * destination folio. This is safe because nobody is using them 1035 * except us. 1036 */ 1037 union migration_ptr { 1038 struct anon_vma *anon_vma; 1039 struct address_space *mapping; 1040 }; 1041 static void __migrate_folio_record(struct folio *dst, 1042 unsigned long page_was_mapped, 1043 struct anon_vma *anon_vma) 1044 { 1045 union migration_ptr ptr = { .anon_vma = anon_vma }; 1046 dst->mapping = ptr.mapping; 1047 dst->private = (void *)page_was_mapped; 1048 } 1049 1050 static void __migrate_folio_extract(struct folio *dst, 1051 int *page_was_mappedp, 1052 struct anon_vma **anon_vmap) 1053 { 1054 union migration_ptr ptr = { .mapping = dst->mapping }; 1055 *anon_vmap = ptr.anon_vma; 1056 *page_was_mappedp = (unsigned long)dst->private; 1057 dst->mapping = NULL; 1058 dst->private = NULL; 1059 } 1060 1061 /* Restore the source folio to the original state upon failure */ 1062 static void migrate_folio_undo_src(struct folio *src, 1063 int page_was_mapped, 1064 struct anon_vma *anon_vma, 1065 bool locked, 1066 struct list_head *ret) 1067 { 1068 if (page_was_mapped) 1069 remove_migration_ptes(src, src, false); 1070 /* Drop an anon_vma reference if we took one */ 1071 if (anon_vma) 1072 put_anon_vma(anon_vma); 1073 if (locked) 1074 folio_unlock(src); 1075 if (ret) 1076 list_move_tail(&src->lru, ret); 1077 } 1078 1079 /* Restore the destination folio to the original state upon failure */ 1080 static void migrate_folio_undo_dst(struct folio *dst, 1081 bool locked, 1082 free_page_t put_new_page, 1083 unsigned long private) 1084 { 1085 if (locked) 1086 folio_unlock(dst); 1087 if (put_new_page) 1088 put_new_page(&dst->page, private); 1089 else 1090 folio_put(dst); 1091 } 1092 1093 /* Cleanup src folio upon migration success */ 1094 static void migrate_folio_done(struct folio *src, 1095 enum migrate_reason reason) 1096 { 1097 /* 1098 * Compaction can migrate also non-LRU pages which are 1099 * not accounted to NR_ISOLATED_*. They can be recognized 1100 * as __PageMovable 1101 */ 1102 if (likely(!__folio_test_movable(src))) 1103 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + 1104 folio_is_file_lru(src), -folio_nr_pages(src)); 1105 1106 if (reason != MR_MEMORY_FAILURE) 1107 /* We release the page in page_handle_poison. */ 1108 folio_put(src); 1109 } 1110 1111 /* Obtain the lock on page, remove all ptes. */ 1112 static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page, 1113 unsigned long private, struct folio *src, 1114 struct folio **dstp, enum migrate_mode mode, 1115 enum migrate_reason reason, struct list_head *ret) 1116 { 1117 struct folio *dst; 1118 int rc = -EAGAIN; 1119 struct page *newpage = NULL; 1120 int page_was_mapped = 0; 1121 struct anon_vma *anon_vma = NULL; 1122 bool is_lru = !__PageMovable(&src->page); 1123 bool locked = false; 1124 bool dst_locked = false; 1125 1126 if (folio_ref_count(src) == 1) { 1127 /* Folio was freed from under us. So we are done. */ 1128 folio_clear_active(src); 1129 folio_clear_unevictable(src); 1130 /* free_pages_prepare() will clear PG_isolated. */ 1131 list_del(&src->lru); 1132 migrate_folio_done(src, reason); 1133 return MIGRATEPAGE_SUCCESS; 1134 } 1135 1136 newpage = get_new_page(&src->page, private); 1137 if (!newpage) 1138 return -ENOMEM; 1139 dst = page_folio(newpage); 1140 *dstp = dst; 1141 1142 dst->private = NULL; 1143 1144 if (!folio_trylock(src)) { 1145 if (mode == MIGRATE_ASYNC) 1146 goto out; 1147 1148 /* 1149 * It's not safe for direct compaction to call lock_page. 1150 * For example, during page readahead pages are added locked 1151 * to the LRU. Later, when the IO completes the pages are 1152 * marked uptodate and unlocked. However, the queueing 1153 * could be merging multiple pages for one bio (e.g. 1154 * mpage_readahead). If an allocation happens for the 1155 * second or third page, the process can end up locking 1156 * the same page twice and deadlocking. Rather than 1157 * trying to be clever about what pages can be locked, 1158 * avoid the use of lock_page for direct compaction 1159 * altogether. 1160 */ 1161 if (current->flags & PF_MEMALLOC) 1162 goto out; 1163 1164 folio_lock(src); 1165 } 1166 locked = true; 1167 1168 if (folio_test_writeback(src)) { 1169 /* 1170 * Only in the case of a full synchronous migration is it 1171 * necessary to wait for PageWriteback. In the async case, 1172 * the retry loop is too short and in the sync-light case, 1173 * the overhead of stalling is too much 1174 */ 1175 switch (mode) { 1176 case MIGRATE_SYNC: 1177 case MIGRATE_SYNC_NO_COPY: 1178 break; 1179 default: 1180 rc = -EBUSY; 1181 goto out; 1182 } 1183 folio_wait_writeback(src); 1184 } 1185 1186 /* 1187 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case, 1188 * we cannot notice that anon_vma is freed while we migrate a page. 1189 * This get_anon_vma() delays freeing anon_vma pointer until the end 1190 * of migration. File cache pages are no problem because of page_lock() 1191 * File Caches may use write_page() or lock_page() in migration, then, 1192 * just care Anon page here. 1193 * 1194 * Only folio_get_anon_vma() understands the subtleties of 1195 * getting a hold on an anon_vma from outside one of its mms. 1196 * But if we cannot get anon_vma, then we won't need it anyway, 1197 * because that implies that the anon page is no longer mapped 1198 * (and cannot be remapped so long as we hold the page lock). 1199 */ 1200 if (folio_test_anon(src) && !folio_test_ksm(src)) 1201 anon_vma = folio_get_anon_vma(src); 1202 1203 /* 1204 * Block others from accessing the new page when we get around to 1205 * establishing additional references. We are usually the only one 1206 * holding a reference to dst at this point. We used to have a BUG 1207 * here if folio_trylock(dst) fails, but would like to allow for 1208 * cases where there might be a race with the previous use of dst. 1209 * This is much like races on refcount of oldpage: just don't BUG(). 1210 */ 1211 if (unlikely(!folio_trylock(dst))) 1212 goto out; 1213 dst_locked = true; 1214 1215 if (unlikely(!is_lru)) { 1216 __migrate_folio_record(dst, page_was_mapped, anon_vma); 1217 return MIGRATEPAGE_UNMAP; 1218 } 1219 1220 /* 1221 * Corner case handling: 1222 * 1. When a new swap-cache page is read into, it is added to the LRU 1223 * and treated as swapcache but it has no rmap yet. 1224 * Calling try_to_unmap() against a src->mapping==NULL page will 1225 * trigger a BUG. So handle it here. 1226 * 2. An orphaned page (see truncate_cleanup_page) might have 1227 * fs-private metadata. The page can be picked up due to memory 1228 * offlining. Everywhere else except page reclaim, the page is 1229 * invisible to the vm, so the page can not be migrated. So try to 1230 * free the metadata, so the page can be freed. 1231 */ 1232 if (!src->mapping) { 1233 if (folio_test_private(src)) { 1234 try_to_free_buffers(src); 1235 goto out; 1236 } 1237 } else if (folio_mapped(src)) { 1238 /* Establish migration ptes */ 1239 VM_BUG_ON_FOLIO(folio_test_anon(src) && 1240 !folio_test_ksm(src) && !anon_vma, src); 1241 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); 1242 page_was_mapped = 1; 1243 } 1244 1245 if (!folio_mapped(src)) { 1246 __migrate_folio_record(dst, page_was_mapped, anon_vma); 1247 return MIGRATEPAGE_UNMAP; 1248 } 1249 1250 out: 1251 /* 1252 * A folio that has not been unmapped will be restored to 1253 * right list unless we want to retry. 1254 */ 1255 if (rc == -EAGAIN) 1256 ret = NULL; 1257 1258 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); 1259 migrate_folio_undo_dst(dst, dst_locked, put_new_page, private); 1260 1261 return rc; 1262 } 1263 1264 /* Migrate the folio to the newly allocated folio in dst. */ 1265 static int migrate_folio_move(free_page_t put_new_page, unsigned long private, 1266 struct folio *src, struct folio *dst, 1267 enum migrate_mode mode, enum migrate_reason reason, 1268 struct list_head *ret) 1269 { 1270 int rc; 1271 int page_was_mapped = 0; 1272 struct anon_vma *anon_vma = NULL; 1273 bool is_lru = !__PageMovable(&src->page); 1274 struct list_head *prev; 1275 1276 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); 1277 prev = dst->lru.prev; 1278 list_del(&dst->lru); 1279 1280 rc = move_to_new_folio(dst, src, mode); 1281 if (rc) 1282 goto out; 1283 1284 if (unlikely(!is_lru)) 1285 goto out_unlock_both; 1286 1287 /* 1288 * When successful, push dst to LRU immediately: so that if it 1289 * turns out to be an mlocked page, remove_migration_ptes() will 1290 * automatically build up the correct dst->mlock_count for it. 1291 * 1292 * We would like to do something similar for the old page, when 1293 * unsuccessful, and other cases when a page has been temporarily 1294 * isolated from the unevictable LRU: but this case is the easiest. 1295 */ 1296 folio_add_lru(dst); 1297 if (page_was_mapped) 1298 lru_add_drain(); 1299 1300 if (page_was_mapped) 1301 remove_migration_ptes(src, dst, false); 1302 1303 out_unlock_both: 1304 folio_unlock(dst); 1305 set_page_owner_migrate_reason(&dst->page, reason); 1306 /* 1307 * If migration is successful, decrease refcount of dst, 1308 * which will not free the page because new page owner increased 1309 * refcounter. 1310 */ 1311 folio_put(dst); 1312 1313 /* 1314 * A folio that has been migrated has all references removed 1315 * and will be freed. 1316 */ 1317 list_del(&src->lru); 1318 /* Drop an anon_vma reference if we took one */ 1319 if (anon_vma) 1320 put_anon_vma(anon_vma); 1321 folio_unlock(src); 1322 migrate_folio_done(src, reason); 1323 1324 return rc; 1325 out: 1326 /* 1327 * A folio that has not been migrated will be restored to 1328 * right list unless we want to retry. 1329 */ 1330 if (rc == -EAGAIN) { 1331 list_add(&dst->lru, prev); 1332 __migrate_folio_record(dst, page_was_mapped, anon_vma); 1333 return rc; 1334 } 1335 1336 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); 1337 migrate_folio_undo_dst(dst, true, put_new_page, private); 1338 1339 return rc; 1340 } 1341 1342 /* 1343 * Counterpart of unmap_and_move_page() for hugepage migration. 1344 * 1345 * This function doesn't wait the completion of hugepage I/O 1346 * because there is no race between I/O and migration for hugepage. 1347 * Note that currently hugepage I/O occurs only in direct I/O 1348 * where no lock is held and PG_writeback is irrelevant, 1349 * and writeback status of all subpages are counted in the reference 1350 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1351 * under direct I/O, the reference of the head page is 512 and a bit more.) 1352 * This means that when we try to migrate hugepage whose subpages are 1353 * doing direct I/O, some references remain after try_to_unmap() and 1354 * hugepage migration fails without data corruption. 1355 * 1356 * There is also no race when direct I/O is issued on the page under migration, 1357 * because then pte is replaced with migration swap entry and direct I/O code 1358 * will wait in the page fault for migration to complete. 1359 */ 1360 static int unmap_and_move_huge_page(new_page_t get_new_page, 1361 free_page_t put_new_page, unsigned long private, 1362 struct page *hpage, int force, 1363 enum migrate_mode mode, int reason, 1364 struct list_head *ret) 1365 { 1366 struct folio *dst, *src = page_folio(hpage); 1367 int rc = -EAGAIN; 1368 int page_was_mapped = 0; 1369 struct page *new_hpage; 1370 struct anon_vma *anon_vma = NULL; 1371 struct address_space *mapping = NULL; 1372 1373 if (folio_ref_count(src) == 1) { 1374 /* page was freed from under us. So we are done. */ 1375 folio_putback_active_hugetlb(src); 1376 return MIGRATEPAGE_SUCCESS; 1377 } 1378 1379 new_hpage = get_new_page(hpage, private); 1380 if (!new_hpage) 1381 return -ENOMEM; 1382 dst = page_folio(new_hpage); 1383 1384 if (!folio_trylock(src)) { 1385 if (!force) 1386 goto out; 1387 switch (mode) { 1388 case MIGRATE_SYNC: 1389 case MIGRATE_SYNC_NO_COPY: 1390 break; 1391 default: 1392 goto out; 1393 } 1394 folio_lock(src); 1395 } 1396 1397 /* 1398 * Check for pages which are in the process of being freed. Without 1399 * folio_mapping() set, hugetlbfs specific move page routine will not 1400 * be called and we could leak usage counts for subpools. 1401 */ 1402 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { 1403 rc = -EBUSY; 1404 goto out_unlock; 1405 } 1406 1407 if (folio_test_anon(src)) 1408 anon_vma = folio_get_anon_vma(src); 1409 1410 if (unlikely(!folio_trylock(dst))) 1411 goto put_anon; 1412 1413 if (folio_mapped(src)) { 1414 enum ttu_flags ttu = 0; 1415 1416 if (!folio_test_anon(src)) { 1417 /* 1418 * In shared mappings, try_to_unmap could potentially 1419 * call huge_pmd_unshare. Because of this, take 1420 * semaphore in write mode here and set TTU_RMAP_LOCKED 1421 * to let lower levels know we have taken the lock. 1422 */ 1423 mapping = hugetlb_page_mapping_lock_write(hpage); 1424 if (unlikely(!mapping)) 1425 goto unlock_put_anon; 1426 1427 ttu = TTU_RMAP_LOCKED; 1428 } 1429 1430 try_to_migrate(src, ttu); 1431 page_was_mapped = 1; 1432 1433 if (ttu & TTU_RMAP_LOCKED) 1434 i_mmap_unlock_write(mapping); 1435 } 1436 1437 if (!folio_mapped(src)) 1438 rc = move_to_new_folio(dst, src, mode); 1439 1440 if (page_was_mapped) 1441 remove_migration_ptes(src, 1442 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1443 1444 unlock_put_anon: 1445 folio_unlock(dst); 1446 1447 put_anon: 1448 if (anon_vma) 1449 put_anon_vma(anon_vma); 1450 1451 if (rc == MIGRATEPAGE_SUCCESS) { 1452 move_hugetlb_state(src, dst, reason); 1453 put_new_page = NULL; 1454 } 1455 1456 out_unlock: 1457 folio_unlock(src); 1458 out: 1459 if (rc == MIGRATEPAGE_SUCCESS) 1460 folio_putback_active_hugetlb(src); 1461 else if (rc != -EAGAIN) 1462 list_move_tail(&src->lru, ret); 1463 1464 /* 1465 * If migration was not successful and there's a freeing callback, use 1466 * it. Otherwise, put_page() will drop the reference grabbed during 1467 * isolation. 1468 */ 1469 if (put_new_page) 1470 put_new_page(new_hpage, private); 1471 else 1472 folio_putback_active_hugetlb(dst); 1473 1474 return rc; 1475 } 1476 1477 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) 1478 { 1479 int rc; 1480 1481 folio_lock(folio); 1482 rc = split_folio_to_list(folio, split_folios); 1483 folio_unlock(folio); 1484 if (!rc) 1485 list_move_tail(&folio->lru, split_folios); 1486 1487 return rc; 1488 } 1489 1490 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1491 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR 1492 #else 1493 #define NR_MAX_BATCHED_MIGRATION 512 1494 #endif 1495 #define NR_MAX_MIGRATE_PAGES_RETRY 10 1496 #define NR_MAX_MIGRATE_ASYNC_RETRY 3 1497 #define NR_MAX_MIGRATE_SYNC_RETRY \ 1498 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY) 1499 1500 struct migrate_pages_stats { 1501 int nr_succeeded; /* Normal and large folios migrated successfully, in 1502 units of base pages */ 1503 int nr_failed_pages; /* Normal and large folios failed to be migrated, in 1504 units of base pages. Untried folios aren't counted */ 1505 int nr_thp_succeeded; /* THP migrated successfully */ 1506 int nr_thp_failed; /* THP failed to be migrated */ 1507 int nr_thp_split; /* THP split before migrating */ 1508 }; 1509 1510 /* 1511 * Returns the number of hugetlb folios that were not migrated, or an error code 1512 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable 1513 * any more because the list has become empty or no retryable hugetlb folios 1514 * exist any more. It is caller's responsibility to call putback_movable_pages() 1515 * only if ret != 0. 1516 */ 1517 static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, 1518 free_page_t put_new_page, unsigned long private, 1519 enum migrate_mode mode, int reason, 1520 struct migrate_pages_stats *stats, 1521 struct list_head *ret_folios) 1522 { 1523 int retry = 1; 1524 int nr_failed = 0; 1525 int nr_retry_pages = 0; 1526 int pass = 0; 1527 struct folio *folio, *folio2; 1528 int rc, nr_pages; 1529 1530 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) { 1531 retry = 0; 1532 nr_retry_pages = 0; 1533 1534 list_for_each_entry_safe(folio, folio2, from, lru) { 1535 if (!folio_test_hugetlb(folio)) 1536 continue; 1537 1538 nr_pages = folio_nr_pages(folio); 1539 1540 cond_resched(); 1541 1542 /* 1543 * Migratability of hugepages depends on architectures and 1544 * their size. This check is necessary because some callers 1545 * of hugepage migration like soft offline and memory 1546 * hotremove don't walk through page tables or check whether 1547 * the hugepage is pmd-based or not before kicking migration. 1548 */ 1549 if (!hugepage_migration_supported(folio_hstate(folio))) { 1550 nr_failed++; 1551 stats->nr_failed_pages += nr_pages; 1552 list_move_tail(&folio->lru, ret_folios); 1553 continue; 1554 } 1555 1556 rc = unmap_and_move_huge_page(get_new_page, 1557 put_new_page, private, 1558 &folio->page, pass > 2, mode, 1559 reason, ret_folios); 1560 /* 1561 * The rules are: 1562 * Success: hugetlb folio will be put back 1563 * -EAGAIN: stay on the from list 1564 * -ENOMEM: stay on the from list 1565 * Other errno: put on ret_folios list 1566 */ 1567 switch(rc) { 1568 case -ENOMEM: 1569 /* 1570 * When memory is low, don't bother to try to migrate 1571 * other folios, just exit. 1572 */ 1573 stats->nr_failed_pages += nr_pages + nr_retry_pages; 1574 return -ENOMEM; 1575 case -EAGAIN: 1576 retry++; 1577 nr_retry_pages += nr_pages; 1578 break; 1579 case MIGRATEPAGE_SUCCESS: 1580 stats->nr_succeeded += nr_pages; 1581 break; 1582 default: 1583 /* 1584 * Permanent failure (-EBUSY, etc.): 1585 * unlike -EAGAIN case, the failed folio is 1586 * removed from migration folio list and not 1587 * retried in the next outer loop. 1588 */ 1589 nr_failed++; 1590 stats->nr_failed_pages += nr_pages; 1591 break; 1592 } 1593 } 1594 } 1595 /* 1596 * nr_failed is number of hugetlb folios failed to be migrated. After 1597 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb 1598 * folios as failed. 1599 */ 1600 nr_failed += retry; 1601 stats->nr_failed_pages += nr_retry_pages; 1602 1603 return nr_failed; 1604 } 1605 1606 /* 1607 * migrate_pages_batch() first unmaps folios in the from list as many as 1608 * possible, then move the unmapped folios. 1609 * 1610 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a 1611 * lock or bit when we have locked more than one folio. Which may cause 1612 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the 1613 * length of the from list must be <= 1. 1614 */ 1615 static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, 1616 free_page_t put_new_page, unsigned long private, 1617 enum migrate_mode mode, int reason, struct list_head *ret_folios, 1618 struct list_head *split_folios, struct migrate_pages_stats *stats, 1619 int nr_pass) 1620 { 1621 int retry = 1; 1622 int large_retry = 1; 1623 int thp_retry = 1; 1624 int nr_failed = 0; 1625 int nr_retry_pages = 0; 1626 int nr_large_failed = 0; 1627 int pass = 0; 1628 bool is_large = false; 1629 bool is_thp = false; 1630 struct folio *folio, *folio2, *dst = NULL, *dst2; 1631 int rc, rc_saved = 0, nr_pages; 1632 LIST_HEAD(unmap_folios); 1633 LIST_HEAD(dst_folios); 1634 bool nosplit = (reason == MR_NUMA_MISPLACED); 1635 1636 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && 1637 !list_empty(from) && !list_is_singular(from)); 1638 1639 for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { 1640 retry = 0; 1641 large_retry = 0; 1642 thp_retry = 0; 1643 nr_retry_pages = 0; 1644 1645 list_for_each_entry_safe(folio, folio2, from, lru) { 1646 /* 1647 * Large folio statistics is based on the source large 1648 * folio. Capture required information that might get 1649 * lost during migration. 1650 */ 1651 is_large = folio_test_large(folio); 1652 is_thp = is_large && folio_test_pmd_mappable(folio); 1653 nr_pages = folio_nr_pages(folio); 1654 1655 cond_resched(); 1656 1657 /* 1658 * Large folio migration might be unsupported or 1659 * the allocation might be failed so we should retry 1660 * on the same folio with the large folio split 1661 * to normal folios. 1662 * 1663 * Split folios are put in split_folios, and 1664 * we will migrate them after the rest of the 1665 * list is processed. 1666 */ 1667 if (!thp_migration_supported() && is_thp) { 1668 nr_large_failed++; 1669 stats->nr_thp_failed++; 1670 if (!try_split_folio(folio, split_folios)) { 1671 stats->nr_thp_split++; 1672 continue; 1673 } 1674 stats->nr_failed_pages += nr_pages; 1675 list_move_tail(&folio->lru, ret_folios); 1676 continue; 1677 } 1678 1679 rc = migrate_folio_unmap(get_new_page, put_new_page, private, 1680 folio, &dst, mode, reason, ret_folios); 1681 /* 1682 * The rules are: 1683 * Success: folio will be freed 1684 * Unmap: folio will be put on unmap_folios list, 1685 * dst folio put on dst_folios list 1686 * -EAGAIN: stay on the from list 1687 * -ENOMEM: stay on the from list 1688 * Other errno: put on ret_folios list 1689 */ 1690 switch(rc) { 1691 case -ENOMEM: 1692 /* 1693 * When memory is low, don't bother to try to migrate 1694 * other folios, move unmapped folios, then exit. 1695 */ 1696 if (is_large) { 1697 nr_large_failed++; 1698 stats->nr_thp_failed += is_thp; 1699 /* Large folio NUMA faulting doesn't split to retry. */ 1700 if (!nosplit) { 1701 int ret = try_split_folio(folio, split_folios); 1702 1703 if (!ret) { 1704 stats->nr_thp_split += is_thp; 1705 break; 1706 } else if (reason == MR_LONGTERM_PIN && 1707 ret == -EAGAIN) { 1708 /* 1709 * Try again to split large folio to 1710 * mitigate the failure of longterm pinning. 1711 */ 1712 large_retry++; 1713 thp_retry += is_thp; 1714 nr_retry_pages += nr_pages; 1715 break; 1716 } 1717 } 1718 } else { 1719 nr_failed++; 1720 } 1721 1722 stats->nr_failed_pages += nr_pages + nr_retry_pages; 1723 /* nr_failed isn't updated for not used */ 1724 nr_large_failed += large_retry; 1725 stats->nr_thp_failed += thp_retry; 1726 rc_saved = rc; 1727 if (list_empty(&unmap_folios)) 1728 goto out; 1729 else 1730 goto move; 1731 case -EAGAIN: 1732 if (is_large) { 1733 large_retry++; 1734 thp_retry += is_thp; 1735 } else { 1736 retry++; 1737 } 1738 nr_retry_pages += nr_pages; 1739 break; 1740 case MIGRATEPAGE_SUCCESS: 1741 stats->nr_succeeded += nr_pages; 1742 stats->nr_thp_succeeded += is_thp; 1743 break; 1744 case MIGRATEPAGE_UNMAP: 1745 list_move_tail(&folio->lru, &unmap_folios); 1746 list_add_tail(&dst->lru, &dst_folios); 1747 break; 1748 default: 1749 /* 1750 * Permanent failure (-EBUSY, etc.): 1751 * unlike -EAGAIN case, the failed folio is 1752 * removed from migration folio list and not 1753 * retried in the next outer loop. 1754 */ 1755 if (is_large) { 1756 nr_large_failed++; 1757 stats->nr_thp_failed += is_thp; 1758 } else { 1759 nr_failed++; 1760 } 1761 1762 stats->nr_failed_pages += nr_pages; 1763 break; 1764 } 1765 } 1766 } 1767 nr_failed += retry; 1768 nr_large_failed += large_retry; 1769 stats->nr_thp_failed += thp_retry; 1770 stats->nr_failed_pages += nr_retry_pages; 1771 move: 1772 /* Flush TLBs for all unmapped folios */ 1773 try_to_unmap_flush(); 1774 1775 retry = 1; 1776 for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { 1777 retry = 0; 1778 large_retry = 0; 1779 thp_retry = 0; 1780 nr_retry_pages = 0; 1781 1782 dst = list_first_entry(&dst_folios, struct folio, lru); 1783 dst2 = list_next_entry(dst, lru); 1784 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 1785 is_large = folio_test_large(folio); 1786 is_thp = is_large && folio_test_pmd_mappable(folio); 1787 nr_pages = folio_nr_pages(folio); 1788 1789 cond_resched(); 1790 1791 rc = migrate_folio_move(put_new_page, private, 1792 folio, dst, mode, 1793 reason, ret_folios); 1794 /* 1795 * The rules are: 1796 * Success: folio will be freed 1797 * -EAGAIN: stay on the unmap_folios list 1798 * Other errno: put on ret_folios list 1799 */ 1800 switch(rc) { 1801 case -EAGAIN: 1802 if (is_large) { 1803 large_retry++; 1804 thp_retry += is_thp; 1805 } else { 1806 retry++; 1807 } 1808 nr_retry_pages += nr_pages; 1809 break; 1810 case MIGRATEPAGE_SUCCESS: 1811 stats->nr_succeeded += nr_pages; 1812 stats->nr_thp_succeeded += is_thp; 1813 break; 1814 default: 1815 if (is_large) { 1816 nr_large_failed++; 1817 stats->nr_thp_failed += is_thp; 1818 } else { 1819 nr_failed++; 1820 } 1821 1822 stats->nr_failed_pages += nr_pages; 1823 break; 1824 } 1825 dst = dst2; 1826 dst2 = list_next_entry(dst, lru); 1827 } 1828 } 1829 nr_failed += retry; 1830 nr_large_failed += large_retry; 1831 stats->nr_thp_failed += thp_retry; 1832 stats->nr_failed_pages += nr_retry_pages; 1833 1834 if (rc_saved) 1835 rc = rc_saved; 1836 else 1837 rc = nr_failed + nr_large_failed; 1838 out: 1839 /* Cleanup remaining folios */ 1840 dst = list_first_entry(&dst_folios, struct folio, lru); 1841 dst2 = list_next_entry(dst, lru); 1842 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 1843 int page_was_mapped = 0; 1844 struct anon_vma *anon_vma = NULL; 1845 1846 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); 1847 migrate_folio_undo_src(folio, page_was_mapped, anon_vma, 1848 true, ret_folios); 1849 list_del(&dst->lru); 1850 migrate_folio_undo_dst(dst, true, put_new_page, private); 1851 dst = dst2; 1852 dst2 = list_next_entry(dst, lru); 1853 } 1854 1855 return rc; 1856 } 1857 1858 static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, 1859 free_page_t put_new_page, unsigned long private, 1860 enum migrate_mode mode, int reason, struct list_head *ret_folios, 1861 struct list_head *split_folios, struct migrate_pages_stats *stats) 1862 { 1863 int rc, nr_failed = 0; 1864 LIST_HEAD(folios); 1865 struct migrate_pages_stats astats; 1866 1867 memset(&astats, 0, sizeof(astats)); 1868 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ 1869 rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC, 1870 reason, &folios, split_folios, &astats, 1871 NR_MAX_MIGRATE_ASYNC_RETRY); 1872 stats->nr_succeeded += astats.nr_succeeded; 1873 stats->nr_thp_succeeded += astats.nr_thp_succeeded; 1874 stats->nr_thp_split += astats.nr_thp_split; 1875 if (rc < 0) { 1876 stats->nr_failed_pages += astats.nr_failed_pages; 1877 stats->nr_thp_failed += astats.nr_thp_failed; 1878 list_splice_tail(&folios, ret_folios); 1879 return rc; 1880 } 1881 stats->nr_thp_failed += astats.nr_thp_split; 1882 nr_failed += astats.nr_thp_split; 1883 /* 1884 * Fall back to migrate all failed folios one by one synchronously. All 1885 * failed folios except split THPs will be retried, so their failure 1886 * isn't counted 1887 */ 1888 list_splice_tail_init(&folios, from); 1889 while (!list_empty(from)) { 1890 list_move(from->next, &folios); 1891 rc = migrate_pages_batch(&folios, get_new_page, put_new_page, 1892 private, mode, reason, ret_folios, 1893 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); 1894 list_splice_tail_init(&folios, ret_folios); 1895 if (rc < 0) 1896 return rc; 1897 nr_failed += rc; 1898 } 1899 1900 return nr_failed; 1901 } 1902 1903 /* 1904 * migrate_pages - migrate the folios specified in a list, to the free folios 1905 * supplied as the target for the page migration 1906 * 1907 * @from: The list of folios to be migrated. 1908 * @get_new_page: The function used to allocate free folios to be used 1909 * as the target of the folio migration. 1910 * @put_new_page: The function used to free target folios if migration 1911 * fails, or NULL if no special handling is necessary. 1912 * @private: Private data to be passed on to get_new_page() 1913 * @mode: The migration mode that specifies the constraints for 1914 * folio migration, if any. 1915 * @reason: The reason for folio migration. 1916 * @ret_succeeded: Set to the number of folios migrated successfully if 1917 * the caller passes a non-NULL pointer. 1918 * 1919 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios 1920 * are movable any more because the list has become empty or no retryable folios 1921 * exist any more. It is caller's responsibility to call putback_movable_pages() 1922 * only if ret != 0. 1923 * 1924 * Returns the number of {normal folio, large folio, hugetlb} that were not 1925 * migrated, or an error code. The number of large folio splits will be 1926 * considered as the number of non-migrated large folio, no matter how many 1927 * split folios of the large folio are migrated successfully. 1928 */ 1929 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1930 free_page_t put_new_page, unsigned long private, 1931 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1932 { 1933 int rc, rc_gather; 1934 int nr_pages; 1935 struct folio *folio, *folio2; 1936 LIST_HEAD(folios); 1937 LIST_HEAD(ret_folios); 1938 LIST_HEAD(split_folios); 1939 struct migrate_pages_stats stats; 1940 1941 trace_mm_migrate_pages_start(mode, reason); 1942 1943 memset(&stats, 0, sizeof(stats)); 1944 1945 rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private, 1946 mode, reason, &stats, &ret_folios); 1947 if (rc_gather < 0) 1948 goto out; 1949 1950 again: 1951 nr_pages = 0; 1952 list_for_each_entry_safe(folio, folio2, from, lru) { 1953 /* Retried hugetlb folios will be kept in list */ 1954 if (folio_test_hugetlb(folio)) { 1955 list_move_tail(&folio->lru, &ret_folios); 1956 continue; 1957 } 1958 1959 nr_pages += folio_nr_pages(folio); 1960 if (nr_pages >= NR_MAX_BATCHED_MIGRATION) 1961 break; 1962 } 1963 if (nr_pages >= NR_MAX_BATCHED_MIGRATION) 1964 list_cut_before(&folios, from, &folio2->lru); 1965 else 1966 list_splice_init(from, &folios); 1967 if (mode == MIGRATE_ASYNC) 1968 rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private, 1969 mode, reason, &ret_folios, &split_folios, &stats, 1970 NR_MAX_MIGRATE_PAGES_RETRY); 1971 else 1972 rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private, 1973 mode, reason, &ret_folios, &split_folios, &stats); 1974 list_splice_tail_init(&folios, &ret_folios); 1975 if (rc < 0) { 1976 rc_gather = rc; 1977 list_splice_tail(&split_folios, &ret_folios); 1978 goto out; 1979 } 1980 if (!list_empty(&split_folios)) { 1981 /* 1982 * Failure isn't counted since all split folios of a large folio 1983 * is counted as 1 failure already. And, we only try to migrate 1984 * with minimal effort, force MIGRATE_ASYNC mode and retry once. 1985 */ 1986 migrate_pages_batch(&split_folios, get_new_page, put_new_page, private, 1987 MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1); 1988 list_splice_tail_init(&split_folios, &ret_folios); 1989 } 1990 rc_gather += rc; 1991 if (!list_empty(from)) 1992 goto again; 1993 out: 1994 /* 1995 * Put the permanent failure folio back to migration list, they 1996 * will be put back to the right list by the caller. 1997 */ 1998 list_splice(&ret_folios, from); 1999 2000 /* 2001 * Return 0 in case all split folios of fail-to-migrate large folios 2002 * are migrated successfully. 2003 */ 2004 if (list_empty(from)) 2005 rc_gather = 0; 2006 2007 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded); 2008 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages); 2009 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded); 2010 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed); 2011 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); 2012 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, 2013 stats.nr_thp_succeeded, stats.nr_thp_failed, 2014 stats.nr_thp_split, mode, reason); 2015 2016 if (ret_succeeded) 2017 *ret_succeeded = stats.nr_succeeded; 2018 2019 return rc_gather; 2020 } 2021 2022 struct page *alloc_migration_target(struct page *page, unsigned long private) 2023 { 2024 struct folio *folio = page_folio(page); 2025 struct migration_target_control *mtc; 2026 gfp_t gfp_mask; 2027 unsigned int order = 0; 2028 struct folio *hugetlb_folio = NULL; 2029 struct folio *new_folio = NULL; 2030 int nid; 2031 int zidx; 2032 2033 mtc = (struct migration_target_control *)private; 2034 gfp_mask = mtc->gfp_mask; 2035 nid = mtc->nid; 2036 if (nid == NUMA_NO_NODE) 2037 nid = folio_nid(folio); 2038 2039 if (folio_test_hugetlb(folio)) { 2040 struct hstate *h = folio_hstate(folio); 2041 2042 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 2043 hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid, 2044 mtc->nmask, gfp_mask); 2045 return &hugetlb_folio->page; 2046 } 2047 2048 if (folio_test_large(folio)) { 2049 /* 2050 * clear __GFP_RECLAIM to make the migration callback 2051 * consistent with regular THP allocations. 2052 */ 2053 gfp_mask &= ~__GFP_RECLAIM; 2054 gfp_mask |= GFP_TRANSHUGE; 2055 order = folio_order(folio); 2056 } 2057 zidx = zone_idx(folio_zone(folio)); 2058 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 2059 gfp_mask |= __GFP_HIGHMEM; 2060 2061 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); 2062 2063 return &new_folio->page; 2064 } 2065 2066 #ifdef CONFIG_NUMA 2067 2068 static int store_status(int __user *status, int start, int value, int nr) 2069 { 2070 while (nr-- > 0) { 2071 if (put_user(value, status + start)) 2072 return -EFAULT; 2073 start++; 2074 } 2075 2076 return 0; 2077 } 2078 2079 static int do_move_pages_to_node(struct mm_struct *mm, 2080 struct list_head *pagelist, int node) 2081 { 2082 int err; 2083 struct migration_target_control mtc = { 2084 .nid = node, 2085 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 2086 }; 2087 2088 err = migrate_pages(pagelist, alloc_migration_target, NULL, 2089 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 2090 if (err) 2091 putback_movable_pages(pagelist); 2092 return err; 2093 } 2094 2095 /* 2096 * Resolves the given address to a struct page, isolates it from the LRU and 2097 * puts it to the given pagelist. 2098 * Returns: 2099 * errno - if the page cannot be found/isolated 2100 * 0 - when it doesn't have to be migrated because it is already on the 2101 * target node 2102 * 1 - when it has been queued 2103 */ 2104 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 2105 int node, struct list_head *pagelist, bool migrate_all) 2106 { 2107 struct vm_area_struct *vma; 2108 struct page *page; 2109 int err; 2110 bool isolated; 2111 2112 mmap_read_lock(mm); 2113 err = -EFAULT; 2114 vma = vma_lookup(mm, addr); 2115 if (!vma || !vma_migratable(vma)) 2116 goto out; 2117 2118 /* FOLL_DUMP to ignore special (like zero) pages */ 2119 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 2120 2121 err = PTR_ERR(page); 2122 if (IS_ERR(page)) 2123 goto out; 2124 2125 err = -ENOENT; 2126 if (!page) 2127 goto out; 2128 2129 if (is_zone_device_page(page)) 2130 goto out_putpage; 2131 2132 err = 0; 2133 if (page_to_nid(page) == node) 2134 goto out_putpage; 2135 2136 err = -EACCES; 2137 if (page_mapcount(page) > 1 && !migrate_all) 2138 goto out_putpage; 2139 2140 if (PageHuge(page)) { 2141 if (PageHead(page)) { 2142 isolated = isolate_hugetlb(page_folio(page), pagelist); 2143 err = isolated ? 1 : -EBUSY; 2144 } 2145 } else { 2146 struct page *head; 2147 2148 head = compound_head(page); 2149 isolated = isolate_lru_page(head); 2150 if (!isolated) { 2151 err = -EBUSY; 2152 goto out_putpage; 2153 } 2154 2155 err = 1; 2156 list_add_tail(&head->lru, pagelist); 2157 mod_node_page_state(page_pgdat(head), 2158 NR_ISOLATED_ANON + page_is_file_lru(head), 2159 thp_nr_pages(head)); 2160 } 2161 out_putpage: 2162 /* 2163 * Either remove the duplicate refcount from 2164 * isolate_lru_page() or drop the page ref if it was 2165 * not isolated. 2166 */ 2167 put_page(page); 2168 out: 2169 mmap_read_unlock(mm); 2170 return err; 2171 } 2172 2173 static int move_pages_and_store_status(struct mm_struct *mm, int node, 2174 struct list_head *pagelist, int __user *status, 2175 int start, int i, unsigned long nr_pages) 2176 { 2177 int err; 2178 2179 if (list_empty(pagelist)) 2180 return 0; 2181 2182 err = do_move_pages_to_node(mm, pagelist, node); 2183 if (err) { 2184 /* 2185 * Positive err means the number of failed 2186 * pages to migrate. Since we are going to 2187 * abort and return the number of non-migrated 2188 * pages, so need to include the rest of the 2189 * nr_pages that have not been attempted as 2190 * well. 2191 */ 2192 if (err > 0) 2193 err += nr_pages - i; 2194 return err; 2195 } 2196 return store_status(status, start, node, i - start); 2197 } 2198 2199 /* 2200 * Migrate an array of page address onto an array of nodes and fill 2201 * the corresponding array of status. 2202 */ 2203 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 2204 unsigned long nr_pages, 2205 const void __user * __user *pages, 2206 const int __user *nodes, 2207 int __user *status, int flags) 2208 { 2209 int current_node = NUMA_NO_NODE; 2210 LIST_HEAD(pagelist); 2211 int start, i; 2212 int err = 0, err1; 2213 2214 lru_cache_disable(); 2215 2216 for (i = start = 0; i < nr_pages; i++) { 2217 const void __user *p; 2218 unsigned long addr; 2219 int node; 2220 2221 err = -EFAULT; 2222 if (get_user(p, pages + i)) 2223 goto out_flush; 2224 if (get_user(node, nodes + i)) 2225 goto out_flush; 2226 addr = (unsigned long)untagged_addr(p); 2227 2228 err = -ENODEV; 2229 if (node < 0 || node >= MAX_NUMNODES) 2230 goto out_flush; 2231 if (!node_state(node, N_MEMORY)) 2232 goto out_flush; 2233 2234 err = -EACCES; 2235 if (!node_isset(node, task_nodes)) 2236 goto out_flush; 2237 2238 if (current_node == NUMA_NO_NODE) { 2239 current_node = node; 2240 start = i; 2241 } else if (node != current_node) { 2242 err = move_pages_and_store_status(mm, current_node, 2243 &pagelist, status, start, i, nr_pages); 2244 if (err) 2245 goto out; 2246 start = i; 2247 current_node = node; 2248 } 2249 2250 /* 2251 * Errors in the page lookup or isolation are not fatal and we simply 2252 * report them via status 2253 */ 2254 err = add_page_for_migration(mm, addr, current_node, 2255 &pagelist, flags & MPOL_MF_MOVE_ALL); 2256 2257 if (err > 0) { 2258 /* The page is successfully queued for migration */ 2259 continue; 2260 } 2261 2262 /* 2263 * The move_pages() man page does not have an -EEXIST choice, so 2264 * use -EFAULT instead. 2265 */ 2266 if (err == -EEXIST) 2267 err = -EFAULT; 2268 2269 /* 2270 * If the page is already on the target node (!err), store the 2271 * node, otherwise, store the err. 2272 */ 2273 err = store_status(status, i, err ? : current_node, 1); 2274 if (err) 2275 goto out_flush; 2276 2277 err = move_pages_and_store_status(mm, current_node, &pagelist, 2278 status, start, i, nr_pages); 2279 if (err) { 2280 /* We have accounted for page i */ 2281 if (err > 0) 2282 err--; 2283 goto out; 2284 } 2285 current_node = NUMA_NO_NODE; 2286 } 2287 out_flush: 2288 /* Make sure we do not overwrite the existing error */ 2289 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 2290 status, start, i, nr_pages); 2291 if (err >= 0) 2292 err = err1; 2293 out: 2294 lru_cache_enable(); 2295 return err; 2296 } 2297 2298 /* 2299 * Determine the nodes of an array of pages and store it in an array of status. 2300 */ 2301 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 2302 const void __user **pages, int *status) 2303 { 2304 unsigned long i; 2305 2306 mmap_read_lock(mm); 2307 2308 for (i = 0; i < nr_pages; i++) { 2309 unsigned long addr = (unsigned long)(*pages); 2310 struct vm_area_struct *vma; 2311 struct page *page; 2312 int err = -EFAULT; 2313 2314 vma = vma_lookup(mm, addr); 2315 if (!vma) 2316 goto set_status; 2317 2318 /* FOLL_DUMP to ignore special (like zero) pages */ 2319 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 2320 2321 err = PTR_ERR(page); 2322 if (IS_ERR(page)) 2323 goto set_status; 2324 2325 err = -ENOENT; 2326 if (!page) 2327 goto set_status; 2328 2329 if (!is_zone_device_page(page)) 2330 err = page_to_nid(page); 2331 2332 put_page(page); 2333 set_status: 2334 *status = err; 2335 2336 pages++; 2337 status++; 2338 } 2339 2340 mmap_read_unlock(mm); 2341 } 2342 2343 static int get_compat_pages_array(const void __user *chunk_pages[], 2344 const void __user * __user *pages, 2345 unsigned long chunk_nr) 2346 { 2347 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 2348 compat_uptr_t p; 2349 int i; 2350 2351 for (i = 0; i < chunk_nr; i++) { 2352 if (get_user(p, pages32 + i)) 2353 return -EFAULT; 2354 chunk_pages[i] = compat_ptr(p); 2355 } 2356 2357 return 0; 2358 } 2359 2360 /* 2361 * Determine the nodes of a user array of pages and store it in 2362 * a user array of status. 2363 */ 2364 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 2365 const void __user * __user *pages, 2366 int __user *status) 2367 { 2368 #define DO_PAGES_STAT_CHUNK_NR 16UL 2369 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 2370 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 2371 2372 while (nr_pages) { 2373 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 2374 2375 if (in_compat_syscall()) { 2376 if (get_compat_pages_array(chunk_pages, pages, 2377 chunk_nr)) 2378 break; 2379 } else { 2380 if (copy_from_user(chunk_pages, pages, 2381 chunk_nr * sizeof(*chunk_pages))) 2382 break; 2383 } 2384 2385 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 2386 2387 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 2388 break; 2389 2390 pages += chunk_nr; 2391 status += chunk_nr; 2392 nr_pages -= chunk_nr; 2393 } 2394 return nr_pages ? -EFAULT : 0; 2395 } 2396 2397 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 2398 { 2399 struct task_struct *task; 2400 struct mm_struct *mm; 2401 2402 /* 2403 * There is no need to check if current process has the right to modify 2404 * the specified process when they are same. 2405 */ 2406 if (!pid) { 2407 mmget(current->mm); 2408 *mem_nodes = cpuset_mems_allowed(current); 2409 return current->mm; 2410 } 2411 2412 /* Find the mm_struct */ 2413 rcu_read_lock(); 2414 task = find_task_by_vpid(pid); 2415 if (!task) { 2416 rcu_read_unlock(); 2417 return ERR_PTR(-ESRCH); 2418 } 2419 get_task_struct(task); 2420 2421 /* 2422 * Check if this process has the right to modify the specified 2423 * process. Use the regular "ptrace_may_access()" checks. 2424 */ 2425 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 2426 rcu_read_unlock(); 2427 mm = ERR_PTR(-EPERM); 2428 goto out; 2429 } 2430 rcu_read_unlock(); 2431 2432 mm = ERR_PTR(security_task_movememory(task)); 2433 if (IS_ERR(mm)) 2434 goto out; 2435 *mem_nodes = cpuset_mems_allowed(task); 2436 mm = get_task_mm(task); 2437 out: 2438 put_task_struct(task); 2439 if (!mm) 2440 mm = ERR_PTR(-EINVAL); 2441 return mm; 2442 } 2443 2444 /* 2445 * Move a list of pages in the address space of the currently executing 2446 * process. 2447 */ 2448 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 2449 const void __user * __user *pages, 2450 const int __user *nodes, 2451 int __user *status, int flags) 2452 { 2453 struct mm_struct *mm; 2454 int err; 2455 nodemask_t task_nodes; 2456 2457 /* Check flags */ 2458 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 2459 return -EINVAL; 2460 2461 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 2462 return -EPERM; 2463 2464 mm = find_mm_struct(pid, &task_nodes); 2465 if (IS_ERR(mm)) 2466 return PTR_ERR(mm); 2467 2468 if (nodes) 2469 err = do_pages_move(mm, task_nodes, nr_pages, pages, 2470 nodes, status, flags); 2471 else 2472 err = do_pages_stat(mm, nr_pages, pages, status); 2473 2474 mmput(mm); 2475 return err; 2476 } 2477 2478 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 2479 const void __user * __user *, pages, 2480 const int __user *, nodes, 2481 int __user *, status, int, flags) 2482 { 2483 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2484 } 2485 2486 #ifdef CONFIG_NUMA_BALANCING 2487 /* 2488 * Returns true if this is a safe migration target node for misplaced NUMA 2489 * pages. Currently it only checks the watermarks which is crude. 2490 */ 2491 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2492 unsigned long nr_migrate_pages) 2493 { 2494 int z; 2495 2496 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2497 struct zone *zone = pgdat->node_zones + z; 2498 2499 if (!managed_zone(zone)) 2500 continue; 2501 2502 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2503 if (!zone_watermark_ok(zone, 0, 2504 high_wmark_pages(zone) + 2505 nr_migrate_pages, 2506 ZONE_MOVABLE, 0)) 2507 continue; 2508 return true; 2509 } 2510 return false; 2511 } 2512 2513 static struct page *alloc_misplaced_dst_page(struct page *page, 2514 unsigned long data) 2515 { 2516 int nid = (int) data; 2517 int order = compound_order(page); 2518 gfp_t gfp = __GFP_THISNODE; 2519 struct folio *new; 2520 2521 if (order > 0) 2522 gfp |= GFP_TRANSHUGE_LIGHT; 2523 else { 2524 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2525 __GFP_NOWARN; 2526 gfp &= ~__GFP_RECLAIM; 2527 } 2528 new = __folio_alloc_node(gfp, order, nid); 2529 2530 return &new->page; 2531 } 2532 2533 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2534 { 2535 int nr_pages = thp_nr_pages(page); 2536 int order = compound_order(page); 2537 2538 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2539 2540 /* Do not migrate THP mapped by multiple processes */ 2541 if (PageTransHuge(page) && total_mapcount(page) > 1) 2542 return 0; 2543 2544 /* Avoid migrating to a node that is nearly full */ 2545 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2546 int z; 2547 2548 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2549 return 0; 2550 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2551 if (managed_zone(pgdat->node_zones + z)) 2552 break; 2553 } 2554 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2555 return 0; 2556 } 2557 2558 if (!isolate_lru_page(page)) 2559 return 0; 2560 2561 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 2562 nr_pages); 2563 2564 /* 2565 * Isolating the page has taken another reference, so the 2566 * caller's reference can be safely dropped without the page 2567 * disappearing underneath us during migration. 2568 */ 2569 put_page(page); 2570 return 1; 2571 } 2572 2573 /* 2574 * Attempt to migrate a misplaced page to the specified destination 2575 * node. Caller is expected to have an elevated reference count on 2576 * the page that will be dropped by this function before returning. 2577 */ 2578 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2579 int node) 2580 { 2581 pg_data_t *pgdat = NODE_DATA(node); 2582 int isolated; 2583 int nr_remaining; 2584 unsigned int nr_succeeded; 2585 LIST_HEAD(migratepages); 2586 int nr_pages = thp_nr_pages(page); 2587 2588 /* 2589 * Don't migrate file pages that are mapped in multiple processes 2590 * with execute permissions as they are probably shared libraries. 2591 */ 2592 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2593 (vma->vm_flags & VM_EXEC)) 2594 goto out; 2595 2596 /* 2597 * Also do not migrate dirty pages as not all filesystems can move 2598 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2599 */ 2600 if (page_is_file_lru(page) && PageDirty(page)) 2601 goto out; 2602 2603 isolated = numamigrate_isolate_page(pgdat, page); 2604 if (!isolated) 2605 goto out; 2606 2607 list_add(&page->lru, &migratepages); 2608 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2609 NULL, node, MIGRATE_ASYNC, 2610 MR_NUMA_MISPLACED, &nr_succeeded); 2611 if (nr_remaining) { 2612 if (!list_empty(&migratepages)) { 2613 list_del(&page->lru); 2614 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2615 page_is_file_lru(page), -nr_pages); 2616 putback_lru_page(page); 2617 } 2618 isolated = 0; 2619 } 2620 if (nr_succeeded) { 2621 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2622 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2623 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2624 nr_succeeded); 2625 } 2626 BUG_ON(!list_empty(&migratepages)); 2627 return isolated; 2628 2629 out: 2630 put_page(page); 2631 return 0; 2632 } 2633 #endif /* CONFIG_NUMA_BALANCING */ 2634 #endif /* CONFIG_NUMA */ 2635