1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pfn_t.h> 42 #include <linux/memremap.h> 43 #include <linux/userfaultfd_k.h> 44 #include <linux/balloon_compaction.h> 45 #include <linux/page_idle.h> 46 #include <linux/page_owner.h> 47 #include <linux/sched/mm.h> 48 #include <linux/ptrace.h> 49 #include <linux/oom.h> 50 #include <linux/memory.h> 51 #include <linux/random.h> 52 #include <linux/sched/sysctl.h> 53 #include <linux/memory-tiers.h> 54 55 #include <asm/tlbflush.h> 56 57 #include <trace/events/migrate.h> 58 59 #include "internal.h" 60 61 bool isolate_movable_page(struct page *page, isolate_mode_t mode) 62 { 63 struct folio *folio = folio_get_nontail_page(page); 64 const struct movable_operations *mops; 65 66 /* 67 * Avoid burning cycles with pages that are yet under __free_pages(), 68 * or just got freed under us. 69 * 70 * In case we 'win' a race for a movable page being freed under us and 71 * raise its refcount preventing __free_pages() from doing its job 72 * the put_page() at the end of this block will take care of 73 * release this page, thus avoiding a nasty leakage. 74 */ 75 if (!folio) 76 goto out; 77 78 if (unlikely(folio_test_slab(folio))) 79 goto out_putfolio; 80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ 81 smp_rmb(); 82 /* 83 * Check movable flag before taking the page lock because 84 * we use non-atomic bitops on newly allocated page flags so 85 * unconditionally grabbing the lock ruins page's owner side. 86 */ 87 if (unlikely(!__folio_test_movable(folio))) 88 goto out_putfolio; 89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ 90 smp_rmb(); 91 if (unlikely(folio_test_slab(folio))) 92 goto out_putfolio; 93 94 /* 95 * As movable pages are not isolated from LRU lists, concurrent 96 * compaction threads can race against page migration functions 97 * as well as race against the releasing a page. 98 * 99 * In order to avoid having an already isolated movable page 100 * being (wrongly) re-isolated while it is under migration, 101 * or to avoid attempting to isolate pages being released, 102 * lets be sure we have the page lock 103 * before proceeding with the movable page isolation steps. 104 */ 105 if (unlikely(!folio_trylock(folio))) 106 goto out_putfolio; 107 108 if (!folio_test_movable(folio) || folio_test_isolated(folio)) 109 goto out_no_isolated; 110 111 mops = folio_movable_ops(folio); 112 VM_BUG_ON_FOLIO(!mops, folio); 113 114 if (!mops->isolate_page(&folio->page, mode)) 115 goto out_no_isolated; 116 117 /* Driver shouldn't use PG_isolated bit of page->flags */ 118 WARN_ON_ONCE(folio_test_isolated(folio)); 119 folio_set_isolated(folio); 120 folio_unlock(folio); 121 122 return true; 123 124 out_no_isolated: 125 folio_unlock(folio); 126 out_putfolio: 127 folio_put(folio); 128 out: 129 return false; 130 } 131 132 static void putback_movable_folio(struct folio *folio) 133 { 134 const struct movable_operations *mops = folio_movable_ops(folio); 135 136 mops->putback_page(&folio->page); 137 folio_clear_isolated(folio); 138 } 139 140 /* 141 * Put previously isolated pages back onto the appropriate lists 142 * from where they were once taken off for compaction/migration. 143 * 144 * This function shall be used whenever the isolated pageset has been 145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 146 * and isolate_hugetlb(). 147 */ 148 void putback_movable_pages(struct list_head *l) 149 { 150 struct folio *folio; 151 struct folio *folio2; 152 153 list_for_each_entry_safe(folio, folio2, l, lru) { 154 if (unlikely(folio_test_hugetlb(folio))) { 155 folio_putback_active_hugetlb(folio); 156 continue; 157 } 158 list_del(&folio->lru); 159 /* 160 * We isolated non-lru movable folio so here we can use 161 * __PageMovable because LRU folio's mapping cannot have 162 * PAGE_MAPPING_MOVABLE. 163 */ 164 if (unlikely(__folio_test_movable(folio))) { 165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); 166 folio_lock(folio); 167 if (folio_test_movable(folio)) 168 putback_movable_folio(folio); 169 else 170 folio_clear_isolated(folio); 171 folio_unlock(folio); 172 folio_put(folio); 173 } else { 174 node_stat_mod_folio(folio, NR_ISOLATED_ANON + 175 folio_is_file_lru(folio), -folio_nr_pages(folio)); 176 folio_putback_lru(folio); 177 } 178 } 179 } 180 181 /* 182 * Restore a potential migration pte to a working pte entry 183 */ 184 static bool remove_migration_pte(struct folio *folio, 185 struct vm_area_struct *vma, unsigned long addr, void *old) 186 { 187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 188 189 while (page_vma_mapped_walk(&pvmw)) { 190 rmap_t rmap_flags = RMAP_NONE; 191 pte_t old_pte; 192 pte_t pte; 193 swp_entry_t entry; 194 struct page *new; 195 unsigned long idx = 0; 196 197 /* pgoff is invalid for ksm pages, but they are never large */ 198 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 199 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 200 new = folio_page(folio, idx); 201 202 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 203 /* PMD-mapped THP migration entry */ 204 if (!pvmw.pte) { 205 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 206 !folio_test_pmd_mappable(folio), folio); 207 remove_migration_pmd(&pvmw, new); 208 continue; 209 } 210 #endif 211 212 folio_get(folio); 213 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 214 old_pte = ptep_get(pvmw.pte); 215 if (pte_swp_soft_dirty(old_pte)) 216 pte = pte_mksoft_dirty(pte); 217 218 entry = pte_to_swp_entry(old_pte); 219 if (!is_migration_entry_young(entry)) 220 pte = pte_mkold(pte); 221 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 222 pte = pte_mkdirty(pte); 223 if (is_writable_migration_entry(entry)) 224 pte = pte_mkwrite(pte); 225 else if (pte_swp_uffd_wp(old_pte)) 226 pte = pte_mkuffd_wp(pte); 227 228 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 229 rmap_flags |= RMAP_EXCLUSIVE; 230 231 if (unlikely(is_device_private_page(new))) { 232 if (pte_write(pte)) 233 entry = make_writable_device_private_entry( 234 page_to_pfn(new)); 235 else 236 entry = make_readable_device_private_entry( 237 page_to_pfn(new)); 238 pte = swp_entry_to_pte(entry); 239 if (pte_swp_soft_dirty(old_pte)) 240 pte = pte_swp_mksoft_dirty(pte); 241 if (pte_swp_uffd_wp(old_pte)) 242 pte = pte_swp_mkuffd_wp(pte); 243 } 244 245 #ifdef CONFIG_HUGETLB_PAGE 246 if (folio_test_hugetlb(folio)) { 247 unsigned int shift = huge_page_shift(hstate_vma(vma)); 248 249 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 250 if (folio_test_anon(folio)) 251 hugepage_add_anon_rmap(new, vma, pvmw.address, 252 rmap_flags); 253 else 254 page_dup_file_rmap(new, true); 255 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 256 } else 257 #endif 258 { 259 if (folio_test_anon(folio)) 260 page_add_anon_rmap(new, vma, pvmw.address, 261 rmap_flags); 262 else 263 page_add_file_rmap(new, vma, false); 264 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 265 } 266 if (vma->vm_flags & VM_LOCKED) 267 mlock_drain_local(); 268 269 trace_remove_migration_pte(pvmw.address, pte_val(pte), 270 compound_order(new)); 271 272 /* No need to invalidate - it was non-present before */ 273 update_mmu_cache(vma, pvmw.address, pvmw.pte); 274 } 275 276 return true; 277 } 278 279 /* 280 * Get rid of all migration entries and replace them by 281 * references to the indicated page. 282 */ 283 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 284 { 285 struct rmap_walk_control rwc = { 286 .rmap_one = remove_migration_pte, 287 .arg = src, 288 }; 289 290 if (locked) 291 rmap_walk_locked(dst, &rwc); 292 else 293 rmap_walk(dst, &rwc); 294 } 295 296 /* 297 * Something used the pte of a page under migration. We need to 298 * get to the page and wait until migration is finished. 299 * When we return from this function the fault will be retried. 300 */ 301 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 302 unsigned long address) 303 { 304 spinlock_t *ptl; 305 pte_t *ptep; 306 pte_t pte; 307 swp_entry_t entry; 308 309 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 310 if (!ptep) 311 return; 312 313 pte = ptep_get(ptep); 314 pte_unmap(ptep); 315 316 if (!is_swap_pte(pte)) 317 goto out; 318 319 entry = pte_to_swp_entry(pte); 320 if (!is_migration_entry(entry)) 321 goto out; 322 323 migration_entry_wait_on_locked(entry, ptl); 324 return; 325 out: 326 spin_unlock(ptl); 327 } 328 329 #ifdef CONFIG_HUGETLB_PAGE 330 /* 331 * The vma read lock must be held upon entry. Holding that lock prevents either 332 * the pte or the ptl from being freed. 333 * 334 * This function will release the vma lock before returning. 335 */ 336 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) 337 { 338 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); 339 pte_t pte; 340 341 hugetlb_vma_assert_locked(vma); 342 spin_lock(ptl); 343 pte = huge_ptep_get(ptep); 344 345 if (unlikely(!is_hugetlb_entry_migration(pte))) { 346 spin_unlock(ptl); 347 hugetlb_vma_unlock_read(vma); 348 } else { 349 /* 350 * If migration entry existed, safe to release vma lock 351 * here because the pgtable page won't be freed without the 352 * pgtable lock released. See comment right above pgtable 353 * lock release in migration_entry_wait_on_locked(). 354 */ 355 hugetlb_vma_unlock_read(vma); 356 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); 357 } 358 } 359 #endif 360 361 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 362 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 363 { 364 spinlock_t *ptl; 365 366 ptl = pmd_lock(mm, pmd); 367 if (!is_pmd_migration_entry(*pmd)) 368 goto unlock; 369 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); 370 return; 371 unlock: 372 spin_unlock(ptl); 373 } 374 #endif 375 376 static int folio_expected_refs(struct address_space *mapping, 377 struct folio *folio) 378 { 379 int refs = 1; 380 if (!mapping) 381 return refs; 382 383 refs += folio_nr_pages(folio); 384 if (folio_test_private(folio)) 385 refs++; 386 387 return refs; 388 } 389 390 /* 391 * Replace the page in the mapping. 392 * 393 * The number of remaining references must be: 394 * 1 for anonymous pages without a mapping 395 * 2 for pages with a mapping 396 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 397 */ 398 int folio_migrate_mapping(struct address_space *mapping, 399 struct folio *newfolio, struct folio *folio, int extra_count) 400 { 401 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 402 struct zone *oldzone, *newzone; 403 int dirty; 404 int expected_count = folio_expected_refs(mapping, folio) + extra_count; 405 long nr = folio_nr_pages(folio); 406 407 if (!mapping) { 408 /* Anonymous page without mapping */ 409 if (folio_ref_count(folio) != expected_count) 410 return -EAGAIN; 411 412 /* No turning back from here */ 413 newfolio->index = folio->index; 414 newfolio->mapping = folio->mapping; 415 if (folio_test_swapbacked(folio)) 416 __folio_set_swapbacked(newfolio); 417 418 return MIGRATEPAGE_SUCCESS; 419 } 420 421 oldzone = folio_zone(folio); 422 newzone = folio_zone(newfolio); 423 424 xas_lock_irq(&xas); 425 if (!folio_ref_freeze(folio, expected_count)) { 426 xas_unlock_irq(&xas); 427 return -EAGAIN; 428 } 429 430 /* 431 * Now we know that no one else is looking at the folio: 432 * no turning back from here. 433 */ 434 newfolio->index = folio->index; 435 newfolio->mapping = folio->mapping; 436 folio_ref_add(newfolio, nr); /* add cache reference */ 437 if (folio_test_swapbacked(folio)) { 438 __folio_set_swapbacked(newfolio); 439 if (folio_test_swapcache(folio)) { 440 folio_set_swapcache(newfolio); 441 newfolio->private = folio_get_private(folio); 442 } 443 } else { 444 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 445 } 446 447 /* Move dirty while page refs frozen and newpage not yet exposed */ 448 dirty = folio_test_dirty(folio); 449 if (dirty) { 450 folio_clear_dirty(folio); 451 folio_set_dirty(newfolio); 452 } 453 454 xas_store(&xas, newfolio); 455 456 /* 457 * Drop cache reference from old page by unfreezing 458 * to one less reference. 459 * We know this isn't the last reference. 460 */ 461 folio_ref_unfreeze(folio, expected_count - nr); 462 463 xas_unlock(&xas); 464 /* Leave irq disabled to prevent preemption while updating stats */ 465 466 /* 467 * If moved to a different zone then also account 468 * the page for that zone. Other VM counters will be 469 * taken care of when we establish references to the 470 * new page and drop references to the old page. 471 * 472 * Note that anonymous pages are accounted for 473 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 474 * are mapped to swap space. 475 */ 476 if (newzone != oldzone) { 477 struct lruvec *old_lruvec, *new_lruvec; 478 struct mem_cgroup *memcg; 479 480 memcg = folio_memcg(folio); 481 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 482 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 483 484 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 485 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 486 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 487 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 488 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 489 490 if (folio_test_pmd_mappable(folio)) { 491 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); 492 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); 493 } 494 } 495 #ifdef CONFIG_SWAP 496 if (folio_test_swapcache(folio)) { 497 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 498 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 499 } 500 #endif 501 if (dirty && mapping_can_writeback(mapping)) { 502 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 503 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 504 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 505 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 506 } 507 } 508 local_irq_enable(); 509 510 return MIGRATEPAGE_SUCCESS; 511 } 512 EXPORT_SYMBOL(folio_migrate_mapping); 513 514 /* 515 * The expected number of remaining references is the same as that 516 * of folio_migrate_mapping(). 517 */ 518 int migrate_huge_page_move_mapping(struct address_space *mapping, 519 struct folio *dst, struct folio *src) 520 { 521 XA_STATE(xas, &mapping->i_pages, folio_index(src)); 522 int expected_count; 523 524 xas_lock_irq(&xas); 525 expected_count = 2 + folio_has_private(src); 526 if (!folio_ref_freeze(src, expected_count)) { 527 xas_unlock_irq(&xas); 528 return -EAGAIN; 529 } 530 531 dst->index = src->index; 532 dst->mapping = src->mapping; 533 534 folio_get(dst); 535 536 xas_store(&xas, dst); 537 538 folio_ref_unfreeze(src, expected_count - 1); 539 540 xas_unlock_irq(&xas); 541 542 return MIGRATEPAGE_SUCCESS; 543 } 544 545 /* 546 * Copy the flags and some other ancillary information 547 */ 548 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 549 { 550 int cpupid; 551 552 if (folio_test_error(folio)) 553 folio_set_error(newfolio); 554 if (folio_test_referenced(folio)) 555 folio_set_referenced(newfolio); 556 if (folio_test_uptodate(folio)) 557 folio_mark_uptodate(newfolio); 558 if (folio_test_clear_active(folio)) { 559 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 560 folio_set_active(newfolio); 561 } else if (folio_test_clear_unevictable(folio)) 562 folio_set_unevictable(newfolio); 563 if (folio_test_workingset(folio)) 564 folio_set_workingset(newfolio); 565 if (folio_test_checked(folio)) 566 folio_set_checked(newfolio); 567 /* 568 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 569 * migration entries. We can still have PG_anon_exclusive set on an 570 * effectively unmapped and unreferenced first sub-pages of an 571 * anonymous THP: we can simply copy it here via PG_mappedtodisk. 572 */ 573 if (folio_test_mappedtodisk(folio)) 574 folio_set_mappedtodisk(newfolio); 575 576 /* Move dirty on pages not done by folio_migrate_mapping() */ 577 if (folio_test_dirty(folio)) 578 folio_set_dirty(newfolio); 579 580 if (folio_test_young(folio)) 581 folio_set_young(newfolio); 582 if (folio_test_idle(folio)) 583 folio_set_idle(newfolio); 584 585 /* 586 * Copy NUMA information to the new page, to prevent over-eager 587 * future migrations of this same page. 588 */ 589 cpupid = page_cpupid_xchg_last(&folio->page, -1); 590 /* 591 * For memory tiering mode, when migrate between slow and fast 592 * memory node, reset cpupid, because that is used to record 593 * page access time in slow memory node. 594 */ 595 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { 596 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); 597 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); 598 599 if (f_toptier != t_toptier) 600 cpupid = -1; 601 } 602 page_cpupid_xchg_last(&newfolio->page, cpupid); 603 604 folio_migrate_ksm(newfolio, folio); 605 /* 606 * Please do not reorder this without considering how mm/ksm.c's 607 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 608 */ 609 if (folio_test_swapcache(folio)) 610 folio_clear_swapcache(folio); 611 folio_clear_private(folio); 612 613 /* page->private contains hugetlb specific flags */ 614 if (!folio_test_hugetlb(folio)) 615 folio->private = NULL; 616 617 /* 618 * If any waiters have accumulated on the new page then 619 * wake them up. 620 */ 621 if (folio_test_writeback(newfolio)) 622 folio_end_writeback(newfolio); 623 624 /* 625 * PG_readahead shares the same bit with PG_reclaim. The above 626 * end_page_writeback() may clear PG_readahead mistakenly, so set the 627 * bit after that. 628 */ 629 if (folio_test_readahead(folio)) 630 folio_set_readahead(newfolio); 631 632 folio_copy_owner(newfolio, folio); 633 634 if (!folio_test_hugetlb(folio)) 635 mem_cgroup_migrate(folio, newfolio); 636 } 637 EXPORT_SYMBOL(folio_migrate_flags); 638 639 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 640 { 641 folio_copy(newfolio, folio); 642 folio_migrate_flags(newfolio, folio); 643 } 644 EXPORT_SYMBOL(folio_migrate_copy); 645 646 /************************************************************ 647 * Migration functions 648 ***********************************************************/ 649 650 int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 651 struct folio *src, enum migrate_mode mode, int extra_count) 652 { 653 int rc; 654 655 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 656 657 rc = folio_migrate_mapping(mapping, dst, src, extra_count); 658 659 if (rc != MIGRATEPAGE_SUCCESS) 660 return rc; 661 662 if (mode != MIGRATE_SYNC_NO_COPY) 663 folio_migrate_copy(dst, src); 664 else 665 folio_migrate_flags(dst, src); 666 return MIGRATEPAGE_SUCCESS; 667 } 668 669 /** 670 * migrate_folio() - Simple folio migration. 671 * @mapping: The address_space containing the folio. 672 * @dst: The folio to migrate the data to. 673 * @src: The folio containing the current data. 674 * @mode: How to migrate the page. 675 * 676 * Common logic to directly migrate a single LRU folio suitable for 677 * folios that do not use PagePrivate/PagePrivate2. 678 * 679 * Folios are locked upon entry and exit. 680 */ 681 int migrate_folio(struct address_space *mapping, struct folio *dst, 682 struct folio *src, enum migrate_mode mode) 683 { 684 return migrate_folio_extra(mapping, dst, src, mode, 0); 685 } 686 EXPORT_SYMBOL(migrate_folio); 687 688 #ifdef CONFIG_BLOCK 689 /* Returns true if all buffers are successfully locked */ 690 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 691 enum migrate_mode mode) 692 { 693 struct buffer_head *bh = head; 694 struct buffer_head *failed_bh; 695 696 do { 697 if (!trylock_buffer(bh)) { 698 if (mode == MIGRATE_ASYNC) 699 goto unlock; 700 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh)) 701 goto unlock; 702 lock_buffer(bh); 703 } 704 705 bh = bh->b_this_page; 706 } while (bh != head); 707 708 return true; 709 710 unlock: 711 /* We failed to lock the buffer and cannot stall. */ 712 failed_bh = bh; 713 bh = head; 714 while (bh != failed_bh) { 715 unlock_buffer(bh); 716 bh = bh->b_this_page; 717 } 718 719 return false; 720 } 721 722 static int __buffer_migrate_folio(struct address_space *mapping, 723 struct folio *dst, struct folio *src, enum migrate_mode mode, 724 bool check_refs) 725 { 726 struct buffer_head *bh, *head; 727 int rc; 728 int expected_count; 729 730 head = folio_buffers(src); 731 if (!head) 732 return migrate_folio(mapping, dst, src, mode); 733 734 /* Check whether page does not have extra refs before we do more work */ 735 expected_count = folio_expected_refs(mapping, src); 736 if (folio_ref_count(src) != expected_count) 737 return -EAGAIN; 738 739 if (!buffer_migrate_lock_buffers(head, mode)) 740 return -EAGAIN; 741 742 if (check_refs) { 743 bool busy; 744 bool invalidated = false; 745 746 recheck_buffers: 747 busy = false; 748 spin_lock(&mapping->private_lock); 749 bh = head; 750 do { 751 if (atomic_read(&bh->b_count)) { 752 busy = true; 753 break; 754 } 755 bh = bh->b_this_page; 756 } while (bh != head); 757 if (busy) { 758 if (invalidated) { 759 rc = -EAGAIN; 760 goto unlock_buffers; 761 } 762 spin_unlock(&mapping->private_lock); 763 invalidate_bh_lrus(); 764 invalidated = true; 765 goto recheck_buffers; 766 } 767 } 768 769 rc = folio_migrate_mapping(mapping, dst, src, 0); 770 if (rc != MIGRATEPAGE_SUCCESS) 771 goto unlock_buffers; 772 773 folio_attach_private(dst, folio_detach_private(src)); 774 775 bh = head; 776 do { 777 set_bh_page(bh, &dst->page, bh_offset(bh)); 778 bh = bh->b_this_page; 779 } while (bh != head); 780 781 if (mode != MIGRATE_SYNC_NO_COPY) 782 folio_migrate_copy(dst, src); 783 else 784 folio_migrate_flags(dst, src); 785 786 rc = MIGRATEPAGE_SUCCESS; 787 unlock_buffers: 788 if (check_refs) 789 spin_unlock(&mapping->private_lock); 790 bh = head; 791 do { 792 unlock_buffer(bh); 793 bh = bh->b_this_page; 794 } while (bh != head); 795 796 return rc; 797 } 798 799 /** 800 * buffer_migrate_folio() - Migration function for folios with buffers. 801 * @mapping: The address space containing @src. 802 * @dst: The folio to migrate to. 803 * @src: The folio to migrate from. 804 * @mode: How to migrate the folio. 805 * 806 * This function can only be used if the underlying filesystem guarantees 807 * that no other references to @src exist. For example attached buffer 808 * heads are accessed only under the folio lock. If your filesystem cannot 809 * provide this guarantee, buffer_migrate_folio_norefs() may be more 810 * appropriate. 811 * 812 * Return: 0 on success or a negative errno on failure. 813 */ 814 int buffer_migrate_folio(struct address_space *mapping, 815 struct folio *dst, struct folio *src, enum migrate_mode mode) 816 { 817 return __buffer_migrate_folio(mapping, dst, src, mode, false); 818 } 819 EXPORT_SYMBOL(buffer_migrate_folio); 820 821 /** 822 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 823 * @mapping: The address space containing @src. 824 * @dst: The folio to migrate to. 825 * @src: The folio to migrate from. 826 * @mode: How to migrate the folio. 827 * 828 * Like buffer_migrate_folio() except that this variant is more careful 829 * and checks that there are also no buffer head references. This function 830 * is the right one for mappings where buffer heads are directly looked 831 * up and referenced (such as block device mappings). 832 * 833 * Return: 0 on success or a negative errno on failure. 834 */ 835 int buffer_migrate_folio_norefs(struct address_space *mapping, 836 struct folio *dst, struct folio *src, enum migrate_mode mode) 837 { 838 return __buffer_migrate_folio(mapping, dst, src, mode, true); 839 } 840 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); 841 #endif 842 843 int filemap_migrate_folio(struct address_space *mapping, 844 struct folio *dst, struct folio *src, enum migrate_mode mode) 845 { 846 int ret; 847 848 ret = folio_migrate_mapping(mapping, dst, src, 0); 849 if (ret != MIGRATEPAGE_SUCCESS) 850 return ret; 851 852 if (folio_get_private(src)) 853 folio_attach_private(dst, folio_detach_private(src)); 854 855 if (mode != MIGRATE_SYNC_NO_COPY) 856 folio_migrate_copy(dst, src); 857 else 858 folio_migrate_flags(dst, src); 859 return MIGRATEPAGE_SUCCESS; 860 } 861 EXPORT_SYMBOL_GPL(filemap_migrate_folio); 862 863 /* 864 * Writeback a folio to clean the dirty state 865 */ 866 static int writeout(struct address_space *mapping, struct folio *folio) 867 { 868 struct writeback_control wbc = { 869 .sync_mode = WB_SYNC_NONE, 870 .nr_to_write = 1, 871 .range_start = 0, 872 .range_end = LLONG_MAX, 873 .for_reclaim = 1 874 }; 875 int rc; 876 877 if (!mapping->a_ops->writepage) 878 /* No write method for the address space */ 879 return -EINVAL; 880 881 if (!folio_clear_dirty_for_io(folio)) 882 /* Someone else already triggered a write */ 883 return -EAGAIN; 884 885 /* 886 * A dirty folio may imply that the underlying filesystem has 887 * the folio on some queue. So the folio must be clean for 888 * migration. Writeout may mean we lose the lock and the 889 * folio state is no longer what we checked for earlier. 890 * At this point we know that the migration attempt cannot 891 * be successful. 892 */ 893 remove_migration_ptes(folio, folio, false); 894 895 rc = mapping->a_ops->writepage(&folio->page, &wbc); 896 897 if (rc != AOP_WRITEPAGE_ACTIVATE) 898 /* unlocked. Relock */ 899 folio_lock(folio); 900 901 return (rc < 0) ? -EIO : -EAGAIN; 902 } 903 904 /* 905 * Default handling if a filesystem does not provide a migration function. 906 */ 907 static int fallback_migrate_folio(struct address_space *mapping, 908 struct folio *dst, struct folio *src, enum migrate_mode mode) 909 { 910 if (folio_test_dirty(src)) { 911 /* Only writeback folios in full synchronous migration */ 912 switch (mode) { 913 case MIGRATE_SYNC: 914 case MIGRATE_SYNC_NO_COPY: 915 break; 916 default: 917 return -EBUSY; 918 } 919 return writeout(mapping, src); 920 } 921 922 /* 923 * Buffers may be managed in a filesystem specific way. 924 * We must have no buffers or drop them. 925 */ 926 if (folio_test_private(src) && 927 !filemap_release_folio(src, GFP_KERNEL)) 928 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 929 930 return migrate_folio(mapping, dst, src, mode); 931 } 932 933 /* 934 * Move a page to a newly allocated page 935 * The page is locked and all ptes have been successfully removed. 936 * 937 * The new page will have replaced the old page if this function 938 * is successful. 939 * 940 * Return value: 941 * < 0 - error code 942 * MIGRATEPAGE_SUCCESS - success 943 */ 944 static int move_to_new_folio(struct folio *dst, struct folio *src, 945 enum migrate_mode mode) 946 { 947 int rc = -EAGAIN; 948 bool is_lru = !__PageMovable(&src->page); 949 950 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 951 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 952 953 if (likely(is_lru)) { 954 struct address_space *mapping = folio_mapping(src); 955 956 if (!mapping) 957 rc = migrate_folio(mapping, dst, src, mode); 958 else if (mapping->a_ops->migrate_folio) 959 /* 960 * Most folios have a mapping and most filesystems 961 * provide a migrate_folio callback. Anonymous folios 962 * are part of swap space which also has its own 963 * migrate_folio callback. This is the most common path 964 * for page migration. 965 */ 966 rc = mapping->a_ops->migrate_folio(mapping, dst, src, 967 mode); 968 else 969 rc = fallback_migrate_folio(mapping, dst, src, mode); 970 } else { 971 const struct movable_operations *mops; 972 973 /* 974 * In case of non-lru page, it could be released after 975 * isolation step. In that case, we shouldn't try migration. 976 */ 977 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 978 if (!folio_test_movable(src)) { 979 rc = MIGRATEPAGE_SUCCESS; 980 folio_clear_isolated(src); 981 goto out; 982 } 983 984 mops = folio_movable_ops(src); 985 rc = mops->migrate_page(&dst->page, &src->page, mode); 986 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 987 !folio_test_isolated(src)); 988 } 989 990 /* 991 * When successful, old pagecache src->mapping must be cleared before 992 * src is freed; but stats require that PageAnon be left as PageAnon. 993 */ 994 if (rc == MIGRATEPAGE_SUCCESS) { 995 if (__PageMovable(&src->page)) { 996 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 997 998 /* 999 * We clear PG_movable under page_lock so any compactor 1000 * cannot try to migrate this page. 1001 */ 1002 folio_clear_isolated(src); 1003 } 1004 1005 /* 1006 * Anonymous and movable src->mapping will be cleared by 1007 * free_pages_prepare so don't reset it here for keeping 1008 * the type to work PageAnon, for example. 1009 */ 1010 if (!folio_mapping_flags(src)) 1011 src->mapping = NULL; 1012 1013 if (likely(!folio_is_zone_device(dst))) 1014 flush_dcache_folio(dst); 1015 } 1016 out: 1017 return rc; 1018 } 1019 1020 /* 1021 * To record some information during migration, we use some unused 1022 * fields (mapping and private) of struct folio of the newly allocated 1023 * destination folio. This is safe because nobody is using them 1024 * except us. 1025 */ 1026 union migration_ptr { 1027 struct anon_vma *anon_vma; 1028 struct address_space *mapping; 1029 }; 1030 static void __migrate_folio_record(struct folio *dst, 1031 unsigned long page_was_mapped, 1032 struct anon_vma *anon_vma) 1033 { 1034 union migration_ptr ptr = { .anon_vma = anon_vma }; 1035 dst->mapping = ptr.mapping; 1036 dst->private = (void *)page_was_mapped; 1037 } 1038 1039 static void __migrate_folio_extract(struct folio *dst, 1040 int *page_was_mappedp, 1041 struct anon_vma **anon_vmap) 1042 { 1043 union migration_ptr ptr = { .mapping = dst->mapping }; 1044 *anon_vmap = ptr.anon_vma; 1045 *page_was_mappedp = (unsigned long)dst->private; 1046 dst->mapping = NULL; 1047 dst->private = NULL; 1048 } 1049 1050 /* Restore the source folio to the original state upon failure */ 1051 static void migrate_folio_undo_src(struct folio *src, 1052 int page_was_mapped, 1053 struct anon_vma *anon_vma, 1054 bool locked, 1055 struct list_head *ret) 1056 { 1057 if (page_was_mapped) 1058 remove_migration_ptes(src, src, false); 1059 /* Drop an anon_vma reference if we took one */ 1060 if (anon_vma) 1061 put_anon_vma(anon_vma); 1062 if (locked) 1063 folio_unlock(src); 1064 if (ret) 1065 list_move_tail(&src->lru, ret); 1066 } 1067 1068 /* Restore the destination folio to the original state upon failure */ 1069 static void migrate_folio_undo_dst(struct folio *dst, bool locked, 1070 free_folio_t put_new_folio, unsigned long private) 1071 { 1072 if (locked) 1073 folio_unlock(dst); 1074 if (put_new_folio) 1075 put_new_folio(dst, private); 1076 else 1077 folio_put(dst); 1078 } 1079 1080 /* Cleanup src folio upon migration success */ 1081 static void migrate_folio_done(struct folio *src, 1082 enum migrate_reason reason) 1083 { 1084 /* 1085 * Compaction can migrate also non-LRU pages which are 1086 * not accounted to NR_ISOLATED_*. They can be recognized 1087 * as __PageMovable 1088 */ 1089 if (likely(!__folio_test_movable(src))) 1090 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + 1091 folio_is_file_lru(src), -folio_nr_pages(src)); 1092 1093 if (reason != MR_MEMORY_FAILURE) 1094 /* We release the page in page_handle_poison. */ 1095 folio_put(src); 1096 } 1097 1098 /* Obtain the lock on page, remove all ptes. */ 1099 static int migrate_folio_unmap(new_folio_t get_new_folio, 1100 free_folio_t put_new_folio, unsigned long private, 1101 struct folio *src, struct folio **dstp, enum migrate_mode mode, 1102 enum migrate_reason reason, struct list_head *ret) 1103 { 1104 struct folio *dst; 1105 int rc = -EAGAIN; 1106 int page_was_mapped = 0; 1107 struct anon_vma *anon_vma = NULL; 1108 bool is_lru = !__PageMovable(&src->page); 1109 bool locked = false; 1110 bool dst_locked = false; 1111 1112 if (folio_ref_count(src) == 1) { 1113 /* Folio was freed from under us. So we are done. */ 1114 folio_clear_active(src); 1115 folio_clear_unevictable(src); 1116 /* free_pages_prepare() will clear PG_isolated. */ 1117 list_del(&src->lru); 1118 migrate_folio_done(src, reason); 1119 return MIGRATEPAGE_SUCCESS; 1120 } 1121 1122 dst = get_new_folio(src, private); 1123 if (!dst) 1124 return -ENOMEM; 1125 *dstp = dst; 1126 1127 dst->private = NULL; 1128 1129 if (!folio_trylock(src)) { 1130 if (mode == MIGRATE_ASYNC) 1131 goto out; 1132 1133 /* 1134 * It's not safe for direct compaction to call lock_page. 1135 * For example, during page readahead pages are added locked 1136 * to the LRU. Later, when the IO completes the pages are 1137 * marked uptodate and unlocked. However, the queueing 1138 * could be merging multiple pages for one bio (e.g. 1139 * mpage_readahead). If an allocation happens for the 1140 * second or third page, the process can end up locking 1141 * the same page twice and deadlocking. Rather than 1142 * trying to be clever about what pages can be locked, 1143 * avoid the use of lock_page for direct compaction 1144 * altogether. 1145 */ 1146 if (current->flags & PF_MEMALLOC) 1147 goto out; 1148 1149 /* 1150 * In "light" mode, we can wait for transient locks (eg 1151 * inserting a page into the page table), but it's not 1152 * worth waiting for I/O. 1153 */ 1154 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) 1155 goto out; 1156 1157 folio_lock(src); 1158 } 1159 locked = true; 1160 1161 if (folio_test_writeback(src)) { 1162 /* 1163 * Only in the case of a full synchronous migration is it 1164 * necessary to wait for PageWriteback. In the async case, 1165 * the retry loop is too short and in the sync-light case, 1166 * the overhead of stalling is too much 1167 */ 1168 switch (mode) { 1169 case MIGRATE_SYNC: 1170 case MIGRATE_SYNC_NO_COPY: 1171 break; 1172 default: 1173 rc = -EBUSY; 1174 goto out; 1175 } 1176 folio_wait_writeback(src); 1177 } 1178 1179 /* 1180 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case, 1181 * we cannot notice that anon_vma is freed while we migrate a page. 1182 * This get_anon_vma() delays freeing anon_vma pointer until the end 1183 * of migration. File cache pages are no problem because of page_lock() 1184 * File Caches may use write_page() or lock_page() in migration, then, 1185 * just care Anon page here. 1186 * 1187 * Only folio_get_anon_vma() understands the subtleties of 1188 * getting a hold on an anon_vma from outside one of its mms. 1189 * But if we cannot get anon_vma, then we won't need it anyway, 1190 * because that implies that the anon page is no longer mapped 1191 * (and cannot be remapped so long as we hold the page lock). 1192 */ 1193 if (folio_test_anon(src) && !folio_test_ksm(src)) 1194 anon_vma = folio_get_anon_vma(src); 1195 1196 /* 1197 * Block others from accessing the new page when we get around to 1198 * establishing additional references. We are usually the only one 1199 * holding a reference to dst at this point. We used to have a BUG 1200 * here if folio_trylock(dst) fails, but would like to allow for 1201 * cases where there might be a race with the previous use of dst. 1202 * This is much like races on refcount of oldpage: just don't BUG(). 1203 */ 1204 if (unlikely(!folio_trylock(dst))) 1205 goto out; 1206 dst_locked = true; 1207 1208 if (unlikely(!is_lru)) { 1209 __migrate_folio_record(dst, page_was_mapped, anon_vma); 1210 return MIGRATEPAGE_UNMAP; 1211 } 1212 1213 /* 1214 * Corner case handling: 1215 * 1. When a new swap-cache page is read into, it is added to the LRU 1216 * and treated as swapcache but it has no rmap yet. 1217 * Calling try_to_unmap() against a src->mapping==NULL page will 1218 * trigger a BUG. So handle it here. 1219 * 2. An orphaned page (see truncate_cleanup_page) might have 1220 * fs-private metadata. The page can be picked up due to memory 1221 * offlining. Everywhere else except page reclaim, the page is 1222 * invisible to the vm, so the page can not be migrated. So try to 1223 * free the metadata, so the page can be freed. 1224 */ 1225 if (!src->mapping) { 1226 if (folio_test_private(src)) { 1227 try_to_free_buffers(src); 1228 goto out; 1229 } 1230 } else if (folio_mapped(src)) { 1231 /* Establish migration ptes */ 1232 VM_BUG_ON_FOLIO(folio_test_anon(src) && 1233 !folio_test_ksm(src) && !anon_vma, src); 1234 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); 1235 page_was_mapped = 1; 1236 } 1237 1238 if (!folio_mapped(src)) { 1239 __migrate_folio_record(dst, page_was_mapped, anon_vma); 1240 return MIGRATEPAGE_UNMAP; 1241 } 1242 1243 out: 1244 /* 1245 * A folio that has not been unmapped will be restored to 1246 * right list unless we want to retry. 1247 */ 1248 if (rc == -EAGAIN) 1249 ret = NULL; 1250 1251 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); 1252 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); 1253 1254 return rc; 1255 } 1256 1257 /* Migrate the folio to the newly allocated folio in dst. */ 1258 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, 1259 struct folio *src, struct folio *dst, 1260 enum migrate_mode mode, enum migrate_reason reason, 1261 struct list_head *ret) 1262 { 1263 int rc; 1264 int page_was_mapped = 0; 1265 struct anon_vma *anon_vma = NULL; 1266 bool is_lru = !__PageMovable(&src->page); 1267 struct list_head *prev; 1268 1269 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); 1270 prev = dst->lru.prev; 1271 list_del(&dst->lru); 1272 1273 rc = move_to_new_folio(dst, src, mode); 1274 if (rc) 1275 goto out; 1276 1277 if (unlikely(!is_lru)) 1278 goto out_unlock_both; 1279 1280 /* 1281 * When successful, push dst to LRU immediately: so that if it 1282 * turns out to be an mlocked page, remove_migration_ptes() will 1283 * automatically build up the correct dst->mlock_count for it. 1284 * 1285 * We would like to do something similar for the old page, when 1286 * unsuccessful, and other cases when a page has been temporarily 1287 * isolated from the unevictable LRU: but this case is the easiest. 1288 */ 1289 folio_add_lru(dst); 1290 if (page_was_mapped) 1291 lru_add_drain(); 1292 1293 if (page_was_mapped) 1294 remove_migration_ptes(src, dst, false); 1295 1296 out_unlock_both: 1297 folio_unlock(dst); 1298 set_page_owner_migrate_reason(&dst->page, reason); 1299 /* 1300 * If migration is successful, decrease refcount of dst, 1301 * which will not free the page because new page owner increased 1302 * refcounter. 1303 */ 1304 folio_put(dst); 1305 1306 /* 1307 * A folio that has been migrated has all references removed 1308 * and will be freed. 1309 */ 1310 list_del(&src->lru); 1311 /* Drop an anon_vma reference if we took one */ 1312 if (anon_vma) 1313 put_anon_vma(anon_vma); 1314 folio_unlock(src); 1315 migrate_folio_done(src, reason); 1316 1317 return rc; 1318 out: 1319 /* 1320 * A folio that has not been migrated will be restored to 1321 * right list unless we want to retry. 1322 */ 1323 if (rc == -EAGAIN) { 1324 list_add(&dst->lru, prev); 1325 __migrate_folio_record(dst, page_was_mapped, anon_vma); 1326 return rc; 1327 } 1328 1329 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); 1330 migrate_folio_undo_dst(dst, true, put_new_folio, private); 1331 1332 return rc; 1333 } 1334 1335 /* 1336 * Counterpart of unmap_and_move_page() for hugepage migration. 1337 * 1338 * This function doesn't wait the completion of hugepage I/O 1339 * because there is no race between I/O and migration for hugepage. 1340 * Note that currently hugepage I/O occurs only in direct I/O 1341 * where no lock is held and PG_writeback is irrelevant, 1342 * and writeback status of all subpages are counted in the reference 1343 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1344 * under direct I/O, the reference of the head page is 512 and a bit more.) 1345 * This means that when we try to migrate hugepage whose subpages are 1346 * doing direct I/O, some references remain after try_to_unmap() and 1347 * hugepage migration fails without data corruption. 1348 * 1349 * There is also no race when direct I/O is issued on the page under migration, 1350 * because then pte is replaced with migration swap entry and direct I/O code 1351 * will wait in the page fault for migration to complete. 1352 */ 1353 static int unmap_and_move_huge_page(new_folio_t get_new_folio, 1354 free_folio_t put_new_folio, unsigned long private, 1355 struct folio *src, int force, enum migrate_mode mode, 1356 int reason, struct list_head *ret) 1357 { 1358 struct folio *dst; 1359 int rc = -EAGAIN; 1360 int page_was_mapped = 0; 1361 struct anon_vma *anon_vma = NULL; 1362 struct address_space *mapping = NULL; 1363 1364 if (folio_ref_count(src) == 1) { 1365 /* page was freed from under us. So we are done. */ 1366 folio_putback_active_hugetlb(src); 1367 return MIGRATEPAGE_SUCCESS; 1368 } 1369 1370 dst = get_new_folio(src, private); 1371 if (!dst) 1372 return -ENOMEM; 1373 1374 if (!folio_trylock(src)) { 1375 if (!force) 1376 goto out; 1377 switch (mode) { 1378 case MIGRATE_SYNC: 1379 case MIGRATE_SYNC_NO_COPY: 1380 break; 1381 default: 1382 goto out; 1383 } 1384 folio_lock(src); 1385 } 1386 1387 /* 1388 * Check for pages which are in the process of being freed. Without 1389 * folio_mapping() set, hugetlbfs specific move page routine will not 1390 * be called and we could leak usage counts for subpools. 1391 */ 1392 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { 1393 rc = -EBUSY; 1394 goto out_unlock; 1395 } 1396 1397 if (folio_test_anon(src)) 1398 anon_vma = folio_get_anon_vma(src); 1399 1400 if (unlikely(!folio_trylock(dst))) 1401 goto put_anon; 1402 1403 if (folio_mapped(src)) { 1404 enum ttu_flags ttu = 0; 1405 1406 if (!folio_test_anon(src)) { 1407 /* 1408 * In shared mappings, try_to_unmap could potentially 1409 * call huge_pmd_unshare. Because of this, take 1410 * semaphore in write mode here and set TTU_RMAP_LOCKED 1411 * to let lower levels know we have taken the lock. 1412 */ 1413 mapping = hugetlb_page_mapping_lock_write(&src->page); 1414 if (unlikely(!mapping)) 1415 goto unlock_put_anon; 1416 1417 ttu = TTU_RMAP_LOCKED; 1418 } 1419 1420 try_to_migrate(src, ttu); 1421 page_was_mapped = 1; 1422 1423 if (ttu & TTU_RMAP_LOCKED) 1424 i_mmap_unlock_write(mapping); 1425 } 1426 1427 if (!folio_mapped(src)) 1428 rc = move_to_new_folio(dst, src, mode); 1429 1430 if (page_was_mapped) 1431 remove_migration_ptes(src, 1432 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1433 1434 unlock_put_anon: 1435 folio_unlock(dst); 1436 1437 put_anon: 1438 if (anon_vma) 1439 put_anon_vma(anon_vma); 1440 1441 if (rc == MIGRATEPAGE_SUCCESS) { 1442 move_hugetlb_state(src, dst, reason); 1443 put_new_folio = NULL; 1444 } 1445 1446 out_unlock: 1447 folio_unlock(src); 1448 out: 1449 if (rc == MIGRATEPAGE_SUCCESS) 1450 folio_putback_active_hugetlb(src); 1451 else if (rc != -EAGAIN) 1452 list_move_tail(&src->lru, ret); 1453 1454 /* 1455 * If migration was not successful and there's a freeing callback, use 1456 * it. Otherwise, put_page() will drop the reference grabbed during 1457 * isolation. 1458 */ 1459 if (put_new_folio) 1460 put_new_folio(dst, private); 1461 else 1462 folio_putback_active_hugetlb(dst); 1463 1464 return rc; 1465 } 1466 1467 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) 1468 { 1469 int rc; 1470 1471 folio_lock(folio); 1472 rc = split_folio_to_list(folio, split_folios); 1473 folio_unlock(folio); 1474 if (!rc) 1475 list_move_tail(&folio->lru, split_folios); 1476 1477 return rc; 1478 } 1479 1480 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1481 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR 1482 #else 1483 #define NR_MAX_BATCHED_MIGRATION 512 1484 #endif 1485 #define NR_MAX_MIGRATE_PAGES_RETRY 10 1486 #define NR_MAX_MIGRATE_ASYNC_RETRY 3 1487 #define NR_MAX_MIGRATE_SYNC_RETRY \ 1488 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY) 1489 1490 struct migrate_pages_stats { 1491 int nr_succeeded; /* Normal and large folios migrated successfully, in 1492 units of base pages */ 1493 int nr_failed_pages; /* Normal and large folios failed to be migrated, in 1494 units of base pages. Untried folios aren't counted */ 1495 int nr_thp_succeeded; /* THP migrated successfully */ 1496 int nr_thp_failed; /* THP failed to be migrated */ 1497 int nr_thp_split; /* THP split before migrating */ 1498 }; 1499 1500 /* 1501 * Returns the number of hugetlb folios that were not migrated, or an error code 1502 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable 1503 * any more because the list has become empty or no retryable hugetlb folios 1504 * exist any more. It is caller's responsibility to call putback_movable_pages() 1505 * only if ret != 0. 1506 */ 1507 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, 1508 free_folio_t put_new_folio, unsigned long private, 1509 enum migrate_mode mode, int reason, 1510 struct migrate_pages_stats *stats, 1511 struct list_head *ret_folios) 1512 { 1513 int retry = 1; 1514 int nr_failed = 0; 1515 int nr_retry_pages = 0; 1516 int pass = 0; 1517 struct folio *folio, *folio2; 1518 int rc, nr_pages; 1519 1520 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) { 1521 retry = 0; 1522 nr_retry_pages = 0; 1523 1524 list_for_each_entry_safe(folio, folio2, from, lru) { 1525 if (!folio_test_hugetlb(folio)) 1526 continue; 1527 1528 nr_pages = folio_nr_pages(folio); 1529 1530 cond_resched(); 1531 1532 /* 1533 * Migratability of hugepages depends on architectures and 1534 * their size. This check is necessary because some callers 1535 * of hugepage migration like soft offline and memory 1536 * hotremove don't walk through page tables or check whether 1537 * the hugepage is pmd-based or not before kicking migration. 1538 */ 1539 if (!hugepage_migration_supported(folio_hstate(folio))) { 1540 nr_failed++; 1541 stats->nr_failed_pages += nr_pages; 1542 list_move_tail(&folio->lru, ret_folios); 1543 continue; 1544 } 1545 1546 rc = unmap_and_move_huge_page(get_new_folio, 1547 put_new_folio, private, 1548 folio, pass > 2, mode, 1549 reason, ret_folios); 1550 /* 1551 * The rules are: 1552 * Success: hugetlb folio will be put back 1553 * -EAGAIN: stay on the from list 1554 * -ENOMEM: stay on the from list 1555 * Other errno: put on ret_folios list 1556 */ 1557 switch(rc) { 1558 case -ENOMEM: 1559 /* 1560 * When memory is low, don't bother to try to migrate 1561 * other folios, just exit. 1562 */ 1563 stats->nr_failed_pages += nr_pages + nr_retry_pages; 1564 return -ENOMEM; 1565 case -EAGAIN: 1566 retry++; 1567 nr_retry_pages += nr_pages; 1568 break; 1569 case MIGRATEPAGE_SUCCESS: 1570 stats->nr_succeeded += nr_pages; 1571 break; 1572 default: 1573 /* 1574 * Permanent failure (-EBUSY, etc.): 1575 * unlike -EAGAIN case, the failed folio is 1576 * removed from migration folio list and not 1577 * retried in the next outer loop. 1578 */ 1579 nr_failed++; 1580 stats->nr_failed_pages += nr_pages; 1581 break; 1582 } 1583 } 1584 } 1585 /* 1586 * nr_failed is number of hugetlb folios failed to be migrated. After 1587 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb 1588 * folios as failed. 1589 */ 1590 nr_failed += retry; 1591 stats->nr_failed_pages += nr_retry_pages; 1592 1593 return nr_failed; 1594 } 1595 1596 /* 1597 * migrate_pages_batch() first unmaps folios in the from list as many as 1598 * possible, then move the unmapped folios. 1599 * 1600 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a 1601 * lock or bit when we have locked more than one folio. Which may cause 1602 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the 1603 * length of the from list must be <= 1. 1604 */ 1605 static int migrate_pages_batch(struct list_head *from, 1606 new_folio_t get_new_folio, free_folio_t put_new_folio, 1607 unsigned long private, enum migrate_mode mode, int reason, 1608 struct list_head *ret_folios, struct list_head *split_folios, 1609 struct migrate_pages_stats *stats, int nr_pass) 1610 { 1611 int retry = 1; 1612 int thp_retry = 1; 1613 int nr_failed = 0; 1614 int nr_retry_pages = 0; 1615 int pass = 0; 1616 bool is_thp = false; 1617 struct folio *folio, *folio2, *dst = NULL, *dst2; 1618 int rc, rc_saved = 0, nr_pages; 1619 LIST_HEAD(unmap_folios); 1620 LIST_HEAD(dst_folios); 1621 bool nosplit = (reason == MR_NUMA_MISPLACED); 1622 1623 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && 1624 !list_empty(from) && !list_is_singular(from)); 1625 1626 for (pass = 0; pass < nr_pass && retry; pass++) { 1627 retry = 0; 1628 thp_retry = 0; 1629 nr_retry_pages = 0; 1630 1631 list_for_each_entry_safe(folio, folio2, from, lru) { 1632 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); 1633 nr_pages = folio_nr_pages(folio); 1634 1635 cond_resched(); 1636 1637 /* 1638 * Large folio migration might be unsupported or 1639 * the allocation might be failed so we should retry 1640 * on the same folio with the large folio split 1641 * to normal folios. 1642 * 1643 * Split folios are put in split_folios, and 1644 * we will migrate them after the rest of the 1645 * list is processed. 1646 */ 1647 if (!thp_migration_supported() && is_thp) { 1648 nr_failed++; 1649 stats->nr_thp_failed++; 1650 if (!try_split_folio(folio, split_folios)) { 1651 stats->nr_thp_split++; 1652 continue; 1653 } 1654 stats->nr_failed_pages += nr_pages; 1655 list_move_tail(&folio->lru, ret_folios); 1656 continue; 1657 } 1658 1659 rc = migrate_folio_unmap(get_new_folio, put_new_folio, 1660 private, folio, &dst, mode, reason, 1661 ret_folios); 1662 /* 1663 * The rules are: 1664 * Success: folio will be freed 1665 * Unmap: folio will be put on unmap_folios list, 1666 * dst folio put on dst_folios list 1667 * -EAGAIN: stay on the from list 1668 * -ENOMEM: stay on the from list 1669 * Other errno: put on ret_folios list 1670 */ 1671 switch(rc) { 1672 case -ENOMEM: 1673 /* 1674 * When memory is low, don't bother to try to migrate 1675 * other folios, move unmapped folios, then exit. 1676 */ 1677 nr_failed++; 1678 stats->nr_thp_failed += is_thp; 1679 /* Large folio NUMA faulting doesn't split to retry. */ 1680 if (folio_test_large(folio) && !nosplit) { 1681 int ret = try_split_folio(folio, split_folios); 1682 1683 if (!ret) { 1684 stats->nr_thp_split += is_thp; 1685 break; 1686 } else if (reason == MR_LONGTERM_PIN && 1687 ret == -EAGAIN) { 1688 /* 1689 * Try again to split large folio to 1690 * mitigate the failure of longterm pinning. 1691 */ 1692 retry++; 1693 thp_retry += is_thp; 1694 nr_retry_pages += nr_pages; 1695 /* Undo duplicated failure counting. */ 1696 nr_failed--; 1697 stats->nr_thp_failed -= is_thp; 1698 break; 1699 } 1700 } 1701 1702 stats->nr_failed_pages += nr_pages + nr_retry_pages; 1703 /* nr_failed isn't updated for not used */ 1704 stats->nr_thp_failed += thp_retry; 1705 rc_saved = rc; 1706 if (list_empty(&unmap_folios)) 1707 goto out; 1708 else 1709 goto move; 1710 case -EAGAIN: 1711 retry++; 1712 thp_retry += is_thp; 1713 nr_retry_pages += nr_pages; 1714 break; 1715 case MIGRATEPAGE_SUCCESS: 1716 stats->nr_succeeded += nr_pages; 1717 stats->nr_thp_succeeded += is_thp; 1718 break; 1719 case MIGRATEPAGE_UNMAP: 1720 list_move_tail(&folio->lru, &unmap_folios); 1721 list_add_tail(&dst->lru, &dst_folios); 1722 break; 1723 default: 1724 /* 1725 * Permanent failure (-EBUSY, etc.): 1726 * unlike -EAGAIN case, the failed folio is 1727 * removed from migration folio list and not 1728 * retried in the next outer loop. 1729 */ 1730 nr_failed++; 1731 stats->nr_thp_failed += is_thp; 1732 stats->nr_failed_pages += nr_pages; 1733 break; 1734 } 1735 } 1736 } 1737 nr_failed += retry; 1738 stats->nr_thp_failed += thp_retry; 1739 stats->nr_failed_pages += nr_retry_pages; 1740 move: 1741 /* Flush TLBs for all unmapped folios */ 1742 try_to_unmap_flush(); 1743 1744 retry = 1; 1745 for (pass = 0; pass < nr_pass && retry; pass++) { 1746 retry = 0; 1747 thp_retry = 0; 1748 nr_retry_pages = 0; 1749 1750 dst = list_first_entry(&dst_folios, struct folio, lru); 1751 dst2 = list_next_entry(dst, lru); 1752 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 1753 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); 1754 nr_pages = folio_nr_pages(folio); 1755 1756 cond_resched(); 1757 1758 rc = migrate_folio_move(put_new_folio, private, 1759 folio, dst, mode, 1760 reason, ret_folios); 1761 /* 1762 * The rules are: 1763 * Success: folio will be freed 1764 * -EAGAIN: stay on the unmap_folios list 1765 * Other errno: put on ret_folios list 1766 */ 1767 switch(rc) { 1768 case -EAGAIN: 1769 retry++; 1770 thp_retry += is_thp; 1771 nr_retry_pages += nr_pages; 1772 break; 1773 case MIGRATEPAGE_SUCCESS: 1774 stats->nr_succeeded += nr_pages; 1775 stats->nr_thp_succeeded += is_thp; 1776 break; 1777 default: 1778 nr_failed++; 1779 stats->nr_thp_failed += is_thp; 1780 stats->nr_failed_pages += nr_pages; 1781 break; 1782 } 1783 dst = dst2; 1784 dst2 = list_next_entry(dst, lru); 1785 } 1786 } 1787 nr_failed += retry; 1788 stats->nr_thp_failed += thp_retry; 1789 stats->nr_failed_pages += nr_retry_pages; 1790 1791 rc = rc_saved ? : nr_failed; 1792 out: 1793 /* Cleanup remaining folios */ 1794 dst = list_first_entry(&dst_folios, struct folio, lru); 1795 dst2 = list_next_entry(dst, lru); 1796 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 1797 int page_was_mapped = 0; 1798 struct anon_vma *anon_vma = NULL; 1799 1800 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); 1801 migrate_folio_undo_src(folio, page_was_mapped, anon_vma, 1802 true, ret_folios); 1803 list_del(&dst->lru); 1804 migrate_folio_undo_dst(dst, true, put_new_folio, private); 1805 dst = dst2; 1806 dst2 = list_next_entry(dst, lru); 1807 } 1808 1809 return rc; 1810 } 1811 1812 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, 1813 free_folio_t put_new_folio, unsigned long private, 1814 enum migrate_mode mode, int reason, 1815 struct list_head *ret_folios, struct list_head *split_folios, 1816 struct migrate_pages_stats *stats) 1817 { 1818 int rc, nr_failed = 0; 1819 LIST_HEAD(folios); 1820 struct migrate_pages_stats astats; 1821 1822 memset(&astats, 0, sizeof(astats)); 1823 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ 1824 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, 1825 reason, &folios, split_folios, &astats, 1826 NR_MAX_MIGRATE_ASYNC_RETRY); 1827 stats->nr_succeeded += astats.nr_succeeded; 1828 stats->nr_thp_succeeded += astats.nr_thp_succeeded; 1829 stats->nr_thp_split += astats.nr_thp_split; 1830 if (rc < 0) { 1831 stats->nr_failed_pages += astats.nr_failed_pages; 1832 stats->nr_thp_failed += astats.nr_thp_failed; 1833 list_splice_tail(&folios, ret_folios); 1834 return rc; 1835 } 1836 stats->nr_thp_failed += astats.nr_thp_split; 1837 nr_failed += astats.nr_thp_split; 1838 /* 1839 * Fall back to migrate all failed folios one by one synchronously. All 1840 * failed folios except split THPs will be retried, so their failure 1841 * isn't counted 1842 */ 1843 list_splice_tail_init(&folios, from); 1844 while (!list_empty(from)) { 1845 list_move(from->next, &folios); 1846 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, 1847 private, mode, reason, ret_folios, 1848 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); 1849 list_splice_tail_init(&folios, ret_folios); 1850 if (rc < 0) 1851 return rc; 1852 nr_failed += rc; 1853 } 1854 1855 return nr_failed; 1856 } 1857 1858 /* 1859 * migrate_pages - migrate the folios specified in a list, to the free folios 1860 * supplied as the target for the page migration 1861 * 1862 * @from: The list of folios to be migrated. 1863 * @get_new_folio: The function used to allocate free folios to be used 1864 * as the target of the folio migration. 1865 * @put_new_folio: The function used to free target folios if migration 1866 * fails, or NULL if no special handling is necessary. 1867 * @private: Private data to be passed on to get_new_folio() 1868 * @mode: The migration mode that specifies the constraints for 1869 * folio migration, if any. 1870 * @reason: The reason for folio migration. 1871 * @ret_succeeded: Set to the number of folios migrated successfully if 1872 * the caller passes a non-NULL pointer. 1873 * 1874 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios 1875 * are movable any more because the list has become empty or no retryable folios 1876 * exist any more. It is caller's responsibility to call putback_movable_pages() 1877 * only if ret != 0. 1878 * 1879 * Returns the number of {normal folio, large folio, hugetlb} that were not 1880 * migrated, or an error code. The number of large folio splits will be 1881 * considered as the number of non-migrated large folio, no matter how many 1882 * split folios of the large folio are migrated successfully. 1883 */ 1884 int migrate_pages(struct list_head *from, new_folio_t get_new_folio, 1885 free_folio_t put_new_folio, unsigned long private, 1886 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1887 { 1888 int rc, rc_gather; 1889 int nr_pages; 1890 struct folio *folio, *folio2; 1891 LIST_HEAD(folios); 1892 LIST_HEAD(ret_folios); 1893 LIST_HEAD(split_folios); 1894 struct migrate_pages_stats stats; 1895 1896 trace_mm_migrate_pages_start(mode, reason); 1897 1898 memset(&stats, 0, sizeof(stats)); 1899 1900 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private, 1901 mode, reason, &stats, &ret_folios); 1902 if (rc_gather < 0) 1903 goto out; 1904 1905 again: 1906 nr_pages = 0; 1907 list_for_each_entry_safe(folio, folio2, from, lru) { 1908 /* Retried hugetlb folios will be kept in list */ 1909 if (folio_test_hugetlb(folio)) { 1910 list_move_tail(&folio->lru, &ret_folios); 1911 continue; 1912 } 1913 1914 nr_pages += folio_nr_pages(folio); 1915 if (nr_pages >= NR_MAX_BATCHED_MIGRATION) 1916 break; 1917 } 1918 if (nr_pages >= NR_MAX_BATCHED_MIGRATION) 1919 list_cut_before(&folios, from, &folio2->lru); 1920 else 1921 list_splice_init(from, &folios); 1922 if (mode == MIGRATE_ASYNC) 1923 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, 1924 private, mode, reason, &ret_folios, 1925 &split_folios, &stats, 1926 NR_MAX_MIGRATE_PAGES_RETRY); 1927 else 1928 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio, 1929 private, mode, reason, &ret_folios, 1930 &split_folios, &stats); 1931 list_splice_tail_init(&folios, &ret_folios); 1932 if (rc < 0) { 1933 rc_gather = rc; 1934 list_splice_tail(&split_folios, &ret_folios); 1935 goto out; 1936 } 1937 if (!list_empty(&split_folios)) { 1938 /* 1939 * Failure isn't counted since all split folios of a large folio 1940 * is counted as 1 failure already. And, we only try to migrate 1941 * with minimal effort, force MIGRATE_ASYNC mode and retry once. 1942 */ 1943 migrate_pages_batch(&split_folios, get_new_folio, 1944 put_new_folio, private, MIGRATE_ASYNC, reason, 1945 &ret_folios, NULL, &stats, 1); 1946 list_splice_tail_init(&split_folios, &ret_folios); 1947 } 1948 rc_gather += rc; 1949 if (!list_empty(from)) 1950 goto again; 1951 out: 1952 /* 1953 * Put the permanent failure folio back to migration list, they 1954 * will be put back to the right list by the caller. 1955 */ 1956 list_splice(&ret_folios, from); 1957 1958 /* 1959 * Return 0 in case all split folios of fail-to-migrate large folios 1960 * are migrated successfully. 1961 */ 1962 if (list_empty(from)) 1963 rc_gather = 0; 1964 1965 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded); 1966 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages); 1967 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded); 1968 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed); 1969 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); 1970 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, 1971 stats.nr_thp_succeeded, stats.nr_thp_failed, 1972 stats.nr_thp_split, mode, reason); 1973 1974 if (ret_succeeded) 1975 *ret_succeeded = stats.nr_succeeded; 1976 1977 return rc_gather; 1978 } 1979 1980 struct folio *alloc_migration_target(struct folio *src, unsigned long private) 1981 { 1982 struct migration_target_control *mtc; 1983 gfp_t gfp_mask; 1984 unsigned int order = 0; 1985 int nid; 1986 int zidx; 1987 1988 mtc = (struct migration_target_control *)private; 1989 gfp_mask = mtc->gfp_mask; 1990 nid = mtc->nid; 1991 if (nid == NUMA_NO_NODE) 1992 nid = folio_nid(src); 1993 1994 if (folio_test_hugetlb(src)) { 1995 struct hstate *h = folio_hstate(src); 1996 1997 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1998 return alloc_hugetlb_folio_nodemask(h, nid, 1999 mtc->nmask, gfp_mask); 2000 } 2001 2002 if (folio_test_large(src)) { 2003 /* 2004 * clear __GFP_RECLAIM to make the migration callback 2005 * consistent with regular THP allocations. 2006 */ 2007 gfp_mask &= ~__GFP_RECLAIM; 2008 gfp_mask |= GFP_TRANSHUGE; 2009 order = folio_order(src); 2010 } 2011 zidx = zone_idx(folio_zone(src)); 2012 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 2013 gfp_mask |= __GFP_HIGHMEM; 2014 2015 return __folio_alloc(gfp_mask, order, nid, mtc->nmask); 2016 } 2017 2018 #ifdef CONFIG_NUMA 2019 2020 static int store_status(int __user *status, int start, int value, int nr) 2021 { 2022 while (nr-- > 0) { 2023 if (put_user(value, status + start)) 2024 return -EFAULT; 2025 start++; 2026 } 2027 2028 return 0; 2029 } 2030 2031 static int do_move_pages_to_node(struct mm_struct *mm, 2032 struct list_head *pagelist, int node) 2033 { 2034 int err; 2035 struct migration_target_control mtc = { 2036 .nid = node, 2037 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 2038 }; 2039 2040 err = migrate_pages(pagelist, alloc_migration_target, NULL, 2041 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 2042 if (err) 2043 putback_movable_pages(pagelist); 2044 return err; 2045 } 2046 2047 /* 2048 * Resolves the given address to a struct page, isolates it from the LRU and 2049 * puts it to the given pagelist. 2050 * Returns: 2051 * errno - if the page cannot be found/isolated 2052 * 0 - when it doesn't have to be migrated because it is already on the 2053 * target node 2054 * 1 - when it has been queued 2055 */ 2056 static int add_page_for_migration(struct mm_struct *mm, const void __user *p, 2057 int node, struct list_head *pagelist, bool migrate_all) 2058 { 2059 struct vm_area_struct *vma; 2060 unsigned long addr; 2061 struct page *page; 2062 int err; 2063 bool isolated; 2064 2065 mmap_read_lock(mm); 2066 addr = (unsigned long)untagged_addr_remote(mm, p); 2067 2068 err = -EFAULT; 2069 vma = vma_lookup(mm, addr); 2070 if (!vma || !vma_migratable(vma)) 2071 goto out; 2072 2073 /* FOLL_DUMP to ignore special (like zero) pages */ 2074 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 2075 2076 err = PTR_ERR(page); 2077 if (IS_ERR(page)) 2078 goto out; 2079 2080 err = -ENOENT; 2081 if (!page) 2082 goto out; 2083 2084 if (is_zone_device_page(page)) 2085 goto out_putpage; 2086 2087 err = 0; 2088 if (page_to_nid(page) == node) 2089 goto out_putpage; 2090 2091 err = -EACCES; 2092 if (page_mapcount(page) > 1 && !migrate_all) 2093 goto out_putpage; 2094 2095 if (PageHuge(page)) { 2096 if (PageHead(page)) { 2097 isolated = isolate_hugetlb(page_folio(page), pagelist); 2098 err = isolated ? 1 : -EBUSY; 2099 } 2100 } else { 2101 struct page *head; 2102 2103 head = compound_head(page); 2104 isolated = isolate_lru_page(head); 2105 if (!isolated) { 2106 err = -EBUSY; 2107 goto out_putpage; 2108 } 2109 2110 err = 1; 2111 list_add_tail(&head->lru, pagelist); 2112 mod_node_page_state(page_pgdat(head), 2113 NR_ISOLATED_ANON + page_is_file_lru(head), 2114 thp_nr_pages(head)); 2115 } 2116 out_putpage: 2117 /* 2118 * Either remove the duplicate refcount from 2119 * isolate_lru_page() or drop the page ref if it was 2120 * not isolated. 2121 */ 2122 put_page(page); 2123 out: 2124 mmap_read_unlock(mm); 2125 return err; 2126 } 2127 2128 static int move_pages_and_store_status(struct mm_struct *mm, int node, 2129 struct list_head *pagelist, int __user *status, 2130 int start, int i, unsigned long nr_pages) 2131 { 2132 int err; 2133 2134 if (list_empty(pagelist)) 2135 return 0; 2136 2137 err = do_move_pages_to_node(mm, pagelist, node); 2138 if (err) { 2139 /* 2140 * Positive err means the number of failed 2141 * pages to migrate. Since we are going to 2142 * abort and return the number of non-migrated 2143 * pages, so need to include the rest of the 2144 * nr_pages that have not been attempted as 2145 * well. 2146 */ 2147 if (err > 0) 2148 err += nr_pages - i; 2149 return err; 2150 } 2151 return store_status(status, start, node, i - start); 2152 } 2153 2154 /* 2155 * Migrate an array of page address onto an array of nodes and fill 2156 * the corresponding array of status. 2157 */ 2158 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 2159 unsigned long nr_pages, 2160 const void __user * __user *pages, 2161 const int __user *nodes, 2162 int __user *status, int flags) 2163 { 2164 int current_node = NUMA_NO_NODE; 2165 LIST_HEAD(pagelist); 2166 int start, i; 2167 int err = 0, err1; 2168 2169 lru_cache_disable(); 2170 2171 for (i = start = 0; i < nr_pages; i++) { 2172 const void __user *p; 2173 int node; 2174 2175 err = -EFAULT; 2176 if (get_user(p, pages + i)) 2177 goto out_flush; 2178 if (get_user(node, nodes + i)) 2179 goto out_flush; 2180 2181 err = -ENODEV; 2182 if (node < 0 || node >= MAX_NUMNODES) 2183 goto out_flush; 2184 if (!node_state(node, N_MEMORY)) 2185 goto out_flush; 2186 2187 err = -EACCES; 2188 if (!node_isset(node, task_nodes)) 2189 goto out_flush; 2190 2191 if (current_node == NUMA_NO_NODE) { 2192 current_node = node; 2193 start = i; 2194 } else if (node != current_node) { 2195 err = move_pages_and_store_status(mm, current_node, 2196 &pagelist, status, start, i, nr_pages); 2197 if (err) 2198 goto out; 2199 start = i; 2200 current_node = node; 2201 } 2202 2203 /* 2204 * Errors in the page lookup or isolation are not fatal and we simply 2205 * report them via status 2206 */ 2207 err = add_page_for_migration(mm, p, current_node, &pagelist, 2208 flags & MPOL_MF_MOVE_ALL); 2209 2210 if (err > 0) { 2211 /* The page is successfully queued for migration */ 2212 continue; 2213 } 2214 2215 /* 2216 * The move_pages() man page does not have an -EEXIST choice, so 2217 * use -EFAULT instead. 2218 */ 2219 if (err == -EEXIST) 2220 err = -EFAULT; 2221 2222 /* 2223 * If the page is already on the target node (!err), store the 2224 * node, otherwise, store the err. 2225 */ 2226 err = store_status(status, i, err ? : current_node, 1); 2227 if (err) 2228 goto out_flush; 2229 2230 err = move_pages_and_store_status(mm, current_node, &pagelist, 2231 status, start, i, nr_pages); 2232 if (err) { 2233 /* We have accounted for page i */ 2234 if (err > 0) 2235 err--; 2236 goto out; 2237 } 2238 current_node = NUMA_NO_NODE; 2239 } 2240 out_flush: 2241 /* Make sure we do not overwrite the existing error */ 2242 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 2243 status, start, i, nr_pages); 2244 if (err >= 0) 2245 err = err1; 2246 out: 2247 lru_cache_enable(); 2248 return err; 2249 } 2250 2251 /* 2252 * Determine the nodes of an array of pages and store it in an array of status. 2253 */ 2254 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 2255 const void __user **pages, int *status) 2256 { 2257 unsigned long i; 2258 2259 mmap_read_lock(mm); 2260 2261 for (i = 0; i < nr_pages; i++) { 2262 unsigned long addr = (unsigned long)(*pages); 2263 struct vm_area_struct *vma; 2264 struct page *page; 2265 int err = -EFAULT; 2266 2267 vma = vma_lookup(mm, addr); 2268 if (!vma) 2269 goto set_status; 2270 2271 /* FOLL_DUMP to ignore special (like zero) pages */ 2272 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 2273 2274 err = PTR_ERR(page); 2275 if (IS_ERR(page)) 2276 goto set_status; 2277 2278 err = -ENOENT; 2279 if (!page) 2280 goto set_status; 2281 2282 if (!is_zone_device_page(page)) 2283 err = page_to_nid(page); 2284 2285 put_page(page); 2286 set_status: 2287 *status = err; 2288 2289 pages++; 2290 status++; 2291 } 2292 2293 mmap_read_unlock(mm); 2294 } 2295 2296 static int get_compat_pages_array(const void __user *chunk_pages[], 2297 const void __user * __user *pages, 2298 unsigned long chunk_nr) 2299 { 2300 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 2301 compat_uptr_t p; 2302 int i; 2303 2304 for (i = 0; i < chunk_nr; i++) { 2305 if (get_user(p, pages32 + i)) 2306 return -EFAULT; 2307 chunk_pages[i] = compat_ptr(p); 2308 } 2309 2310 return 0; 2311 } 2312 2313 /* 2314 * Determine the nodes of a user array of pages and store it in 2315 * a user array of status. 2316 */ 2317 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 2318 const void __user * __user *pages, 2319 int __user *status) 2320 { 2321 #define DO_PAGES_STAT_CHUNK_NR 16UL 2322 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 2323 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 2324 2325 while (nr_pages) { 2326 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 2327 2328 if (in_compat_syscall()) { 2329 if (get_compat_pages_array(chunk_pages, pages, 2330 chunk_nr)) 2331 break; 2332 } else { 2333 if (copy_from_user(chunk_pages, pages, 2334 chunk_nr * sizeof(*chunk_pages))) 2335 break; 2336 } 2337 2338 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 2339 2340 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 2341 break; 2342 2343 pages += chunk_nr; 2344 status += chunk_nr; 2345 nr_pages -= chunk_nr; 2346 } 2347 return nr_pages ? -EFAULT : 0; 2348 } 2349 2350 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 2351 { 2352 struct task_struct *task; 2353 struct mm_struct *mm; 2354 2355 /* 2356 * There is no need to check if current process has the right to modify 2357 * the specified process when they are same. 2358 */ 2359 if (!pid) { 2360 mmget(current->mm); 2361 *mem_nodes = cpuset_mems_allowed(current); 2362 return current->mm; 2363 } 2364 2365 /* Find the mm_struct */ 2366 rcu_read_lock(); 2367 task = find_task_by_vpid(pid); 2368 if (!task) { 2369 rcu_read_unlock(); 2370 return ERR_PTR(-ESRCH); 2371 } 2372 get_task_struct(task); 2373 2374 /* 2375 * Check if this process has the right to modify the specified 2376 * process. Use the regular "ptrace_may_access()" checks. 2377 */ 2378 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 2379 rcu_read_unlock(); 2380 mm = ERR_PTR(-EPERM); 2381 goto out; 2382 } 2383 rcu_read_unlock(); 2384 2385 mm = ERR_PTR(security_task_movememory(task)); 2386 if (IS_ERR(mm)) 2387 goto out; 2388 *mem_nodes = cpuset_mems_allowed(task); 2389 mm = get_task_mm(task); 2390 out: 2391 put_task_struct(task); 2392 if (!mm) 2393 mm = ERR_PTR(-EINVAL); 2394 return mm; 2395 } 2396 2397 /* 2398 * Move a list of pages in the address space of the currently executing 2399 * process. 2400 */ 2401 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 2402 const void __user * __user *pages, 2403 const int __user *nodes, 2404 int __user *status, int flags) 2405 { 2406 struct mm_struct *mm; 2407 int err; 2408 nodemask_t task_nodes; 2409 2410 /* Check flags */ 2411 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 2412 return -EINVAL; 2413 2414 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 2415 return -EPERM; 2416 2417 mm = find_mm_struct(pid, &task_nodes); 2418 if (IS_ERR(mm)) 2419 return PTR_ERR(mm); 2420 2421 if (nodes) 2422 err = do_pages_move(mm, task_nodes, nr_pages, pages, 2423 nodes, status, flags); 2424 else 2425 err = do_pages_stat(mm, nr_pages, pages, status); 2426 2427 mmput(mm); 2428 return err; 2429 } 2430 2431 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 2432 const void __user * __user *, pages, 2433 const int __user *, nodes, 2434 int __user *, status, int, flags) 2435 { 2436 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2437 } 2438 2439 #ifdef CONFIG_NUMA_BALANCING 2440 /* 2441 * Returns true if this is a safe migration target node for misplaced NUMA 2442 * pages. Currently it only checks the watermarks which is crude. 2443 */ 2444 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2445 unsigned long nr_migrate_pages) 2446 { 2447 int z; 2448 2449 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2450 struct zone *zone = pgdat->node_zones + z; 2451 2452 if (!managed_zone(zone)) 2453 continue; 2454 2455 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2456 if (!zone_watermark_ok(zone, 0, 2457 high_wmark_pages(zone) + 2458 nr_migrate_pages, 2459 ZONE_MOVABLE, 0)) 2460 continue; 2461 return true; 2462 } 2463 return false; 2464 } 2465 2466 static struct folio *alloc_misplaced_dst_folio(struct folio *src, 2467 unsigned long data) 2468 { 2469 int nid = (int) data; 2470 int order = folio_order(src); 2471 gfp_t gfp = __GFP_THISNODE; 2472 2473 if (order > 0) 2474 gfp |= GFP_TRANSHUGE_LIGHT; 2475 else { 2476 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2477 __GFP_NOWARN; 2478 gfp &= ~__GFP_RECLAIM; 2479 } 2480 return __folio_alloc_node(gfp, order, nid); 2481 } 2482 2483 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2484 { 2485 int nr_pages = thp_nr_pages(page); 2486 int order = compound_order(page); 2487 2488 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2489 2490 /* Do not migrate THP mapped by multiple processes */ 2491 if (PageTransHuge(page) && total_mapcount(page) > 1) 2492 return 0; 2493 2494 /* Avoid migrating to a node that is nearly full */ 2495 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2496 int z; 2497 2498 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2499 return 0; 2500 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2501 if (managed_zone(pgdat->node_zones + z)) 2502 break; 2503 } 2504 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2505 return 0; 2506 } 2507 2508 if (!isolate_lru_page(page)) 2509 return 0; 2510 2511 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 2512 nr_pages); 2513 2514 /* 2515 * Isolating the page has taken another reference, so the 2516 * caller's reference can be safely dropped without the page 2517 * disappearing underneath us during migration. 2518 */ 2519 put_page(page); 2520 return 1; 2521 } 2522 2523 /* 2524 * Attempt to migrate a misplaced page to the specified destination 2525 * node. Caller is expected to have an elevated reference count on 2526 * the page that will be dropped by this function before returning. 2527 */ 2528 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2529 int node) 2530 { 2531 pg_data_t *pgdat = NODE_DATA(node); 2532 int isolated; 2533 int nr_remaining; 2534 unsigned int nr_succeeded; 2535 LIST_HEAD(migratepages); 2536 int nr_pages = thp_nr_pages(page); 2537 2538 /* 2539 * Don't migrate file pages that are mapped in multiple processes 2540 * with execute permissions as they are probably shared libraries. 2541 */ 2542 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2543 (vma->vm_flags & VM_EXEC)) 2544 goto out; 2545 2546 /* 2547 * Also do not migrate dirty pages as not all filesystems can move 2548 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2549 */ 2550 if (page_is_file_lru(page) && PageDirty(page)) 2551 goto out; 2552 2553 isolated = numamigrate_isolate_page(pgdat, page); 2554 if (!isolated) 2555 goto out; 2556 2557 list_add(&page->lru, &migratepages); 2558 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, 2559 NULL, node, MIGRATE_ASYNC, 2560 MR_NUMA_MISPLACED, &nr_succeeded); 2561 if (nr_remaining) { 2562 if (!list_empty(&migratepages)) { 2563 list_del(&page->lru); 2564 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2565 page_is_file_lru(page), -nr_pages); 2566 putback_lru_page(page); 2567 } 2568 isolated = 0; 2569 } 2570 if (nr_succeeded) { 2571 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2572 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2573 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2574 nr_succeeded); 2575 } 2576 BUG_ON(!list_empty(&migratepages)); 2577 return isolated; 2578 2579 out: 2580 put_page(page); 2581 return 0; 2582 } 2583 #endif /* CONFIG_NUMA_BALANCING */ 2584 #endif /* CONFIG_NUMA */ 2585