1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pfn_t.h> 42 #include <linux/memremap.h> 43 #include <linux/userfaultfd_k.h> 44 #include <linux/balloon_compaction.h> 45 #include <linux/page_idle.h> 46 #include <linux/page_owner.h> 47 #include <linux/sched/mm.h> 48 #include <linux/ptrace.h> 49 #include <linux/oom.h> 50 #include <linux/memory.h> 51 #include <linux/random.h> 52 #include <linux/sched/sysctl.h> 53 #include <linux/memory-tiers.h> 54 55 #include <asm/tlbflush.h> 56 57 #include <trace/events/migrate.h> 58 59 #include "internal.h" 60 61 int isolate_movable_page(struct page *page, isolate_mode_t mode) 62 { 63 const struct movable_operations *mops; 64 65 /* 66 * Avoid burning cycles with pages that are yet under __free_pages(), 67 * or just got freed under us. 68 * 69 * In case we 'win' a race for a movable page being freed under us and 70 * raise its refcount preventing __free_pages() from doing its job 71 * the put_page() at the end of this block will take care of 72 * release this page, thus avoiding a nasty leakage. 73 */ 74 if (unlikely(!get_page_unless_zero(page))) 75 goto out; 76 77 /* 78 * Check PageMovable before holding a PG_lock because page's owner 79 * assumes anybody doesn't touch PG_lock of newly allocated page 80 * so unconditionally grabbing the lock ruins page's owner side. 81 */ 82 if (unlikely(!__PageMovable(page))) 83 goto out_putpage; 84 /* 85 * As movable pages are not isolated from LRU lists, concurrent 86 * compaction threads can race against page migration functions 87 * as well as race against the releasing a page. 88 * 89 * In order to avoid having an already isolated movable page 90 * being (wrongly) re-isolated while it is under migration, 91 * or to avoid attempting to isolate pages being released, 92 * lets be sure we have the page lock 93 * before proceeding with the movable page isolation steps. 94 */ 95 if (unlikely(!trylock_page(page))) 96 goto out_putpage; 97 98 if (!PageMovable(page) || PageIsolated(page)) 99 goto out_no_isolated; 100 101 mops = page_movable_ops(page); 102 VM_BUG_ON_PAGE(!mops, page); 103 104 if (!mops->isolate_page(page, mode)) 105 goto out_no_isolated; 106 107 /* Driver shouldn't use PG_isolated bit of page->flags */ 108 WARN_ON_ONCE(PageIsolated(page)); 109 SetPageIsolated(page); 110 unlock_page(page); 111 112 return 0; 113 114 out_no_isolated: 115 unlock_page(page); 116 out_putpage: 117 put_page(page); 118 out: 119 return -EBUSY; 120 } 121 122 static void putback_movable_page(struct page *page) 123 { 124 const struct movable_operations *mops = page_movable_ops(page); 125 126 mops->putback_page(page); 127 ClearPageIsolated(page); 128 } 129 130 /* 131 * Put previously isolated pages back onto the appropriate lists 132 * from where they were once taken off for compaction/migration. 133 * 134 * This function shall be used whenever the isolated pageset has been 135 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 136 * and isolate_hugetlb(). 137 */ 138 void putback_movable_pages(struct list_head *l) 139 { 140 struct page *page; 141 struct page *page2; 142 143 list_for_each_entry_safe(page, page2, l, lru) { 144 if (unlikely(PageHuge(page))) { 145 putback_active_hugepage(page); 146 continue; 147 } 148 list_del(&page->lru); 149 /* 150 * We isolated non-lru movable page so here we can use 151 * __PageMovable because LRU page's mapping cannot have 152 * PAGE_MAPPING_MOVABLE. 153 */ 154 if (unlikely(__PageMovable(page))) { 155 VM_BUG_ON_PAGE(!PageIsolated(page), page); 156 lock_page(page); 157 if (PageMovable(page)) 158 putback_movable_page(page); 159 else 160 ClearPageIsolated(page); 161 unlock_page(page); 162 put_page(page); 163 } else { 164 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 165 page_is_file_lru(page), -thp_nr_pages(page)); 166 putback_lru_page(page); 167 } 168 } 169 } 170 171 /* 172 * Restore a potential migration pte to a working pte entry 173 */ 174 static bool remove_migration_pte(struct folio *folio, 175 struct vm_area_struct *vma, unsigned long addr, void *old) 176 { 177 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 178 179 while (page_vma_mapped_walk(&pvmw)) { 180 rmap_t rmap_flags = RMAP_NONE; 181 pte_t pte; 182 swp_entry_t entry; 183 struct page *new; 184 unsigned long idx = 0; 185 186 /* pgoff is invalid for ksm pages, but they are never large */ 187 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 188 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 189 new = folio_page(folio, idx); 190 191 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 192 /* PMD-mapped THP migration entry */ 193 if (!pvmw.pte) { 194 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 195 !folio_test_pmd_mappable(folio), folio); 196 remove_migration_pmd(&pvmw, new); 197 continue; 198 } 199 #endif 200 201 folio_get(folio); 202 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 203 if (pte_swp_soft_dirty(*pvmw.pte)) 204 pte = pte_mksoft_dirty(pte); 205 206 /* 207 * Recheck VMA as permissions can change since migration started 208 */ 209 entry = pte_to_swp_entry(*pvmw.pte); 210 if (!is_migration_entry_young(entry)) 211 pte = pte_mkold(pte); 212 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 213 pte = pte_mkdirty(pte); 214 if (is_writable_migration_entry(entry)) 215 pte = maybe_mkwrite(pte, vma); 216 else if (pte_swp_uffd_wp(*pvmw.pte)) 217 pte = pte_mkuffd_wp(pte); 218 219 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 220 rmap_flags |= RMAP_EXCLUSIVE; 221 222 if (unlikely(is_device_private_page(new))) { 223 if (pte_write(pte)) 224 entry = make_writable_device_private_entry( 225 page_to_pfn(new)); 226 else 227 entry = make_readable_device_private_entry( 228 page_to_pfn(new)); 229 pte = swp_entry_to_pte(entry); 230 if (pte_swp_soft_dirty(*pvmw.pte)) 231 pte = pte_swp_mksoft_dirty(pte); 232 if (pte_swp_uffd_wp(*pvmw.pte)) 233 pte = pte_swp_mkuffd_wp(pte); 234 } 235 236 #ifdef CONFIG_HUGETLB_PAGE 237 if (folio_test_hugetlb(folio)) { 238 unsigned int shift = huge_page_shift(hstate_vma(vma)); 239 240 pte = pte_mkhuge(pte); 241 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 242 if (folio_test_anon(folio)) 243 hugepage_add_anon_rmap(new, vma, pvmw.address, 244 rmap_flags); 245 else 246 page_dup_file_rmap(new, true); 247 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 248 } else 249 #endif 250 { 251 if (folio_test_anon(folio)) 252 page_add_anon_rmap(new, vma, pvmw.address, 253 rmap_flags); 254 else 255 page_add_file_rmap(new, vma, false); 256 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 257 } 258 if (vma->vm_flags & VM_LOCKED) 259 mlock_page_drain_local(); 260 261 trace_remove_migration_pte(pvmw.address, pte_val(pte), 262 compound_order(new)); 263 264 /* No need to invalidate - it was non-present before */ 265 update_mmu_cache(vma, pvmw.address, pvmw.pte); 266 } 267 268 return true; 269 } 270 271 /* 272 * Get rid of all migration entries and replace them by 273 * references to the indicated page. 274 */ 275 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 276 { 277 struct rmap_walk_control rwc = { 278 .rmap_one = remove_migration_pte, 279 .arg = src, 280 }; 281 282 if (locked) 283 rmap_walk_locked(dst, &rwc); 284 else 285 rmap_walk(dst, &rwc); 286 } 287 288 /* 289 * Something used the pte of a page under migration. We need to 290 * get to the page and wait until migration is finished. 291 * When we return from this function the fault will be retried. 292 */ 293 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 294 spinlock_t *ptl) 295 { 296 pte_t pte; 297 swp_entry_t entry; 298 299 spin_lock(ptl); 300 pte = *ptep; 301 if (!is_swap_pte(pte)) 302 goto out; 303 304 entry = pte_to_swp_entry(pte); 305 if (!is_migration_entry(entry)) 306 goto out; 307 308 migration_entry_wait_on_locked(entry, ptep, ptl); 309 return; 310 out: 311 pte_unmap_unlock(ptep, ptl); 312 } 313 314 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 315 unsigned long address) 316 { 317 spinlock_t *ptl = pte_lockptr(mm, pmd); 318 pte_t *ptep = pte_offset_map(pmd, address); 319 __migration_entry_wait(mm, ptep, ptl); 320 } 321 322 #ifdef CONFIG_HUGETLB_PAGE 323 void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) 324 { 325 pte_t pte; 326 327 spin_lock(ptl); 328 pte = huge_ptep_get(ptep); 329 330 if (unlikely(!is_hugetlb_entry_migration(pte))) 331 spin_unlock(ptl); 332 else 333 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); 334 } 335 336 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) 337 { 338 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); 339 340 __migration_entry_wait_huge(pte, ptl); 341 } 342 #endif 343 344 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 345 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 346 { 347 spinlock_t *ptl; 348 349 ptl = pmd_lock(mm, pmd); 350 if (!is_pmd_migration_entry(*pmd)) 351 goto unlock; 352 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 353 return; 354 unlock: 355 spin_unlock(ptl); 356 } 357 #endif 358 359 static int folio_expected_refs(struct address_space *mapping, 360 struct folio *folio) 361 { 362 int refs = 1; 363 if (!mapping) 364 return refs; 365 366 refs += folio_nr_pages(folio); 367 if (folio_test_private(folio)) 368 refs++; 369 370 return refs; 371 } 372 373 /* 374 * Replace the page in the mapping. 375 * 376 * The number of remaining references must be: 377 * 1 for anonymous pages without a mapping 378 * 2 for pages with a mapping 379 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 380 */ 381 int folio_migrate_mapping(struct address_space *mapping, 382 struct folio *newfolio, struct folio *folio, int extra_count) 383 { 384 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 385 struct zone *oldzone, *newzone; 386 int dirty; 387 int expected_count = folio_expected_refs(mapping, folio) + extra_count; 388 long nr = folio_nr_pages(folio); 389 390 if (!mapping) { 391 /* Anonymous page without mapping */ 392 if (folio_ref_count(folio) != expected_count) 393 return -EAGAIN; 394 395 /* No turning back from here */ 396 newfolio->index = folio->index; 397 newfolio->mapping = folio->mapping; 398 if (folio_test_swapbacked(folio)) 399 __folio_set_swapbacked(newfolio); 400 401 return MIGRATEPAGE_SUCCESS; 402 } 403 404 oldzone = folio_zone(folio); 405 newzone = folio_zone(newfolio); 406 407 xas_lock_irq(&xas); 408 if (!folio_ref_freeze(folio, expected_count)) { 409 xas_unlock_irq(&xas); 410 return -EAGAIN; 411 } 412 413 /* 414 * Now we know that no one else is looking at the folio: 415 * no turning back from here. 416 */ 417 newfolio->index = folio->index; 418 newfolio->mapping = folio->mapping; 419 folio_ref_add(newfolio, nr); /* add cache reference */ 420 if (folio_test_swapbacked(folio)) { 421 __folio_set_swapbacked(newfolio); 422 if (folio_test_swapcache(folio)) { 423 folio_set_swapcache(newfolio); 424 newfolio->private = folio_get_private(folio); 425 } 426 } else { 427 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 428 } 429 430 /* Move dirty while page refs frozen and newpage not yet exposed */ 431 dirty = folio_test_dirty(folio); 432 if (dirty) { 433 folio_clear_dirty(folio); 434 folio_set_dirty(newfolio); 435 } 436 437 xas_store(&xas, newfolio); 438 439 /* 440 * Drop cache reference from old page by unfreezing 441 * to one less reference. 442 * We know this isn't the last reference. 443 */ 444 folio_ref_unfreeze(folio, expected_count - nr); 445 446 xas_unlock(&xas); 447 /* Leave irq disabled to prevent preemption while updating stats */ 448 449 /* 450 * If moved to a different zone then also account 451 * the page for that zone. Other VM counters will be 452 * taken care of when we establish references to the 453 * new page and drop references to the old page. 454 * 455 * Note that anonymous pages are accounted for 456 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 457 * are mapped to swap space. 458 */ 459 if (newzone != oldzone) { 460 struct lruvec *old_lruvec, *new_lruvec; 461 struct mem_cgroup *memcg; 462 463 memcg = folio_memcg(folio); 464 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 465 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 466 467 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 468 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 469 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 470 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 471 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 472 } 473 #ifdef CONFIG_SWAP 474 if (folio_test_swapcache(folio)) { 475 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 476 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 477 } 478 #endif 479 if (dirty && mapping_can_writeback(mapping)) { 480 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 481 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 482 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 483 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 484 } 485 } 486 local_irq_enable(); 487 488 return MIGRATEPAGE_SUCCESS; 489 } 490 EXPORT_SYMBOL(folio_migrate_mapping); 491 492 /* 493 * The expected number of remaining references is the same as that 494 * of folio_migrate_mapping(). 495 */ 496 int migrate_huge_page_move_mapping(struct address_space *mapping, 497 struct folio *dst, struct folio *src) 498 { 499 XA_STATE(xas, &mapping->i_pages, folio_index(src)); 500 int expected_count; 501 502 xas_lock_irq(&xas); 503 expected_count = 2 + folio_has_private(src); 504 if (!folio_ref_freeze(src, expected_count)) { 505 xas_unlock_irq(&xas); 506 return -EAGAIN; 507 } 508 509 dst->index = src->index; 510 dst->mapping = src->mapping; 511 512 folio_get(dst); 513 514 xas_store(&xas, dst); 515 516 folio_ref_unfreeze(src, expected_count - 1); 517 518 xas_unlock_irq(&xas); 519 520 return MIGRATEPAGE_SUCCESS; 521 } 522 523 /* 524 * Copy the flags and some other ancillary information 525 */ 526 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 527 { 528 int cpupid; 529 530 if (folio_test_error(folio)) 531 folio_set_error(newfolio); 532 if (folio_test_referenced(folio)) 533 folio_set_referenced(newfolio); 534 if (folio_test_uptodate(folio)) 535 folio_mark_uptodate(newfolio); 536 if (folio_test_clear_active(folio)) { 537 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 538 folio_set_active(newfolio); 539 } else if (folio_test_clear_unevictable(folio)) 540 folio_set_unevictable(newfolio); 541 if (folio_test_workingset(folio)) 542 folio_set_workingset(newfolio); 543 if (folio_test_checked(folio)) 544 folio_set_checked(newfolio); 545 /* 546 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 547 * migration entries. We can still have PG_anon_exclusive set on an 548 * effectively unmapped and unreferenced first sub-pages of an 549 * anonymous THP: we can simply copy it here via PG_mappedtodisk. 550 */ 551 if (folio_test_mappedtodisk(folio)) 552 folio_set_mappedtodisk(newfolio); 553 554 /* Move dirty on pages not done by folio_migrate_mapping() */ 555 if (folio_test_dirty(folio)) 556 folio_set_dirty(newfolio); 557 558 if (folio_test_young(folio)) 559 folio_set_young(newfolio); 560 if (folio_test_idle(folio)) 561 folio_set_idle(newfolio); 562 563 /* 564 * Copy NUMA information to the new page, to prevent over-eager 565 * future migrations of this same page. 566 */ 567 cpupid = page_cpupid_xchg_last(&folio->page, -1); 568 /* 569 * For memory tiering mode, when migrate between slow and fast 570 * memory node, reset cpupid, because that is used to record 571 * page access time in slow memory node. 572 */ 573 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { 574 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); 575 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); 576 577 if (f_toptier != t_toptier) 578 cpupid = -1; 579 } 580 page_cpupid_xchg_last(&newfolio->page, cpupid); 581 582 folio_migrate_ksm(newfolio, folio); 583 /* 584 * Please do not reorder this without considering how mm/ksm.c's 585 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 586 */ 587 if (folio_test_swapcache(folio)) 588 folio_clear_swapcache(folio); 589 folio_clear_private(folio); 590 591 /* page->private contains hugetlb specific flags */ 592 if (!folio_test_hugetlb(folio)) 593 folio->private = NULL; 594 595 /* 596 * If any waiters have accumulated on the new page then 597 * wake them up. 598 */ 599 if (folio_test_writeback(newfolio)) 600 folio_end_writeback(newfolio); 601 602 /* 603 * PG_readahead shares the same bit with PG_reclaim. The above 604 * end_page_writeback() may clear PG_readahead mistakenly, so set the 605 * bit after that. 606 */ 607 if (folio_test_readahead(folio)) 608 folio_set_readahead(newfolio); 609 610 folio_copy_owner(newfolio, folio); 611 612 if (!folio_test_hugetlb(folio)) 613 mem_cgroup_migrate(folio, newfolio); 614 } 615 EXPORT_SYMBOL(folio_migrate_flags); 616 617 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 618 { 619 folio_copy(newfolio, folio); 620 folio_migrate_flags(newfolio, folio); 621 } 622 EXPORT_SYMBOL(folio_migrate_copy); 623 624 /************************************************************ 625 * Migration functions 626 ***********************************************************/ 627 628 /** 629 * migrate_folio() - Simple folio migration. 630 * @mapping: The address_space containing the folio. 631 * @dst: The folio to migrate the data to. 632 * @src: The folio containing the current data. 633 * @mode: How to migrate the page. 634 * 635 * Common logic to directly migrate a single LRU folio suitable for 636 * folios that do not use PagePrivate/PagePrivate2. 637 * 638 * Folios are locked upon entry and exit. 639 */ 640 int migrate_folio(struct address_space *mapping, struct folio *dst, 641 struct folio *src, enum migrate_mode mode) 642 { 643 int rc; 644 645 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 646 647 rc = folio_migrate_mapping(mapping, dst, src, 0); 648 649 if (rc != MIGRATEPAGE_SUCCESS) 650 return rc; 651 652 if (mode != MIGRATE_SYNC_NO_COPY) 653 folio_migrate_copy(dst, src); 654 else 655 folio_migrate_flags(dst, src); 656 return MIGRATEPAGE_SUCCESS; 657 } 658 EXPORT_SYMBOL(migrate_folio); 659 660 #ifdef CONFIG_BLOCK 661 /* Returns true if all buffers are successfully locked */ 662 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 663 enum migrate_mode mode) 664 { 665 struct buffer_head *bh = head; 666 667 /* Simple case, sync compaction */ 668 if (mode != MIGRATE_ASYNC) { 669 do { 670 lock_buffer(bh); 671 bh = bh->b_this_page; 672 673 } while (bh != head); 674 675 return true; 676 } 677 678 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 679 do { 680 if (!trylock_buffer(bh)) { 681 /* 682 * We failed to lock the buffer and cannot stall in 683 * async migration. Release the taken locks 684 */ 685 struct buffer_head *failed_bh = bh; 686 bh = head; 687 while (bh != failed_bh) { 688 unlock_buffer(bh); 689 bh = bh->b_this_page; 690 } 691 return false; 692 } 693 694 bh = bh->b_this_page; 695 } while (bh != head); 696 return true; 697 } 698 699 static int __buffer_migrate_folio(struct address_space *mapping, 700 struct folio *dst, struct folio *src, enum migrate_mode mode, 701 bool check_refs) 702 { 703 struct buffer_head *bh, *head; 704 int rc; 705 int expected_count; 706 707 head = folio_buffers(src); 708 if (!head) 709 return migrate_folio(mapping, dst, src, mode); 710 711 /* Check whether page does not have extra refs before we do more work */ 712 expected_count = folio_expected_refs(mapping, src); 713 if (folio_ref_count(src) != expected_count) 714 return -EAGAIN; 715 716 if (!buffer_migrate_lock_buffers(head, mode)) 717 return -EAGAIN; 718 719 if (check_refs) { 720 bool busy; 721 bool invalidated = false; 722 723 recheck_buffers: 724 busy = false; 725 spin_lock(&mapping->private_lock); 726 bh = head; 727 do { 728 if (atomic_read(&bh->b_count)) { 729 busy = true; 730 break; 731 } 732 bh = bh->b_this_page; 733 } while (bh != head); 734 if (busy) { 735 if (invalidated) { 736 rc = -EAGAIN; 737 goto unlock_buffers; 738 } 739 spin_unlock(&mapping->private_lock); 740 invalidate_bh_lrus(); 741 invalidated = true; 742 goto recheck_buffers; 743 } 744 } 745 746 rc = folio_migrate_mapping(mapping, dst, src, 0); 747 if (rc != MIGRATEPAGE_SUCCESS) 748 goto unlock_buffers; 749 750 folio_attach_private(dst, folio_detach_private(src)); 751 752 bh = head; 753 do { 754 set_bh_page(bh, &dst->page, bh_offset(bh)); 755 bh = bh->b_this_page; 756 } while (bh != head); 757 758 if (mode != MIGRATE_SYNC_NO_COPY) 759 folio_migrate_copy(dst, src); 760 else 761 folio_migrate_flags(dst, src); 762 763 rc = MIGRATEPAGE_SUCCESS; 764 unlock_buffers: 765 if (check_refs) 766 spin_unlock(&mapping->private_lock); 767 bh = head; 768 do { 769 unlock_buffer(bh); 770 bh = bh->b_this_page; 771 } while (bh != head); 772 773 return rc; 774 } 775 776 /** 777 * buffer_migrate_folio() - Migration function for folios with buffers. 778 * @mapping: The address space containing @src. 779 * @dst: The folio to migrate to. 780 * @src: The folio to migrate from. 781 * @mode: How to migrate the folio. 782 * 783 * This function can only be used if the underlying filesystem guarantees 784 * that no other references to @src exist. For example attached buffer 785 * heads are accessed only under the folio lock. If your filesystem cannot 786 * provide this guarantee, buffer_migrate_folio_norefs() may be more 787 * appropriate. 788 * 789 * Return: 0 on success or a negative errno on failure. 790 */ 791 int buffer_migrate_folio(struct address_space *mapping, 792 struct folio *dst, struct folio *src, enum migrate_mode mode) 793 { 794 return __buffer_migrate_folio(mapping, dst, src, mode, false); 795 } 796 EXPORT_SYMBOL(buffer_migrate_folio); 797 798 /** 799 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 800 * @mapping: The address space containing @src. 801 * @dst: The folio to migrate to. 802 * @src: The folio to migrate from. 803 * @mode: How to migrate the folio. 804 * 805 * Like buffer_migrate_folio() except that this variant is more careful 806 * and checks that there are also no buffer head references. This function 807 * is the right one for mappings where buffer heads are directly looked 808 * up and referenced (such as block device mappings). 809 * 810 * Return: 0 on success or a negative errno on failure. 811 */ 812 int buffer_migrate_folio_norefs(struct address_space *mapping, 813 struct folio *dst, struct folio *src, enum migrate_mode mode) 814 { 815 return __buffer_migrate_folio(mapping, dst, src, mode, true); 816 } 817 #endif 818 819 int filemap_migrate_folio(struct address_space *mapping, 820 struct folio *dst, struct folio *src, enum migrate_mode mode) 821 { 822 int ret; 823 824 ret = folio_migrate_mapping(mapping, dst, src, 0); 825 if (ret != MIGRATEPAGE_SUCCESS) 826 return ret; 827 828 if (folio_get_private(src)) 829 folio_attach_private(dst, folio_detach_private(src)); 830 831 if (mode != MIGRATE_SYNC_NO_COPY) 832 folio_migrate_copy(dst, src); 833 else 834 folio_migrate_flags(dst, src); 835 return MIGRATEPAGE_SUCCESS; 836 } 837 EXPORT_SYMBOL_GPL(filemap_migrate_folio); 838 839 /* 840 * Writeback a folio to clean the dirty state 841 */ 842 static int writeout(struct address_space *mapping, struct folio *folio) 843 { 844 struct writeback_control wbc = { 845 .sync_mode = WB_SYNC_NONE, 846 .nr_to_write = 1, 847 .range_start = 0, 848 .range_end = LLONG_MAX, 849 .for_reclaim = 1 850 }; 851 int rc; 852 853 if (!mapping->a_ops->writepage) 854 /* No write method for the address space */ 855 return -EINVAL; 856 857 if (!folio_clear_dirty_for_io(folio)) 858 /* Someone else already triggered a write */ 859 return -EAGAIN; 860 861 /* 862 * A dirty folio may imply that the underlying filesystem has 863 * the folio on some queue. So the folio must be clean for 864 * migration. Writeout may mean we lose the lock and the 865 * folio state is no longer what we checked for earlier. 866 * At this point we know that the migration attempt cannot 867 * be successful. 868 */ 869 remove_migration_ptes(folio, folio, false); 870 871 rc = mapping->a_ops->writepage(&folio->page, &wbc); 872 873 if (rc != AOP_WRITEPAGE_ACTIVATE) 874 /* unlocked. Relock */ 875 folio_lock(folio); 876 877 return (rc < 0) ? -EIO : -EAGAIN; 878 } 879 880 /* 881 * Default handling if a filesystem does not provide a migration function. 882 */ 883 static int fallback_migrate_folio(struct address_space *mapping, 884 struct folio *dst, struct folio *src, enum migrate_mode mode) 885 { 886 if (folio_test_dirty(src)) { 887 /* Only writeback folios in full synchronous migration */ 888 switch (mode) { 889 case MIGRATE_SYNC: 890 case MIGRATE_SYNC_NO_COPY: 891 break; 892 default: 893 return -EBUSY; 894 } 895 return writeout(mapping, src); 896 } 897 898 /* 899 * Buffers may be managed in a filesystem specific way. 900 * We must have no buffers or drop them. 901 */ 902 if (folio_test_private(src) && 903 !filemap_release_folio(src, GFP_KERNEL)) 904 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 905 906 return migrate_folio(mapping, dst, src, mode); 907 } 908 909 /* 910 * Move a page to a newly allocated page 911 * The page is locked and all ptes have been successfully removed. 912 * 913 * The new page will have replaced the old page if this function 914 * is successful. 915 * 916 * Return value: 917 * < 0 - error code 918 * MIGRATEPAGE_SUCCESS - success 919 */ 920 static int move_to_new_folio(struct folio *dst, struct folio *src, 921 enum migrate_mode mode) 922 { 923 int rc = -EAGAIN; 924 bool is_lru = !__PageMovable(&src->page); 925 926 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 927 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 928 929 if (likely(is_lru)) { 930 struct address_space *mapping = folio_mapping(src); 931 932 if (!mapping) 933 rc = migrate_folio(mapping, dst, src, mode); 934 else if (mapping->a_ops->migrate_folio) 935 /* 936 * Most folios have a mapping and most filesystems 937 * provide a migrate_folio callback. Anonymous folios 938 * are part of swap space which also has its own 939 * migrate_folio callback. This is the most common path 940 * for page migration. 941 */ 942 rc = mapping->a_ops->migrate_folio(mapping, dst, src, 943 mode); 944 else 945 rc = fallback_migrate_folio(mapping, dst, src, mode); 946 } else { 947 const struct movable_operations *mops; 948 949 /* 950 * In case of non-lru page, it could be released after 951 * isolation step. In that case, we shouldn't try migration. 952 */ 953 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 954 if (!folio_test_movable(src)) { 955 rc = MIGRATEPAGE_SUCCESS; 956 folio_clear_isolated(src); 957 goto out; 958 } 959 960 mops = page_movable_ops(&src->page); 961 rc = mops->migrate_page(&dst->page, &src->page, mode); 962 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 963 !folio_test_isolated(src)); 964 } 965 966 /* 967 * When successful, old pagecache src->mapping must be cleared before 968 * src is freed; but stats require that PageAnon be left as PageAnon. 969 */ 970 if (rc == MIGRATEPAGE_SUCCESS) { 971 if (__PageMovable(&src->page)) { 972 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 973 974 /* 975 * We clear PG_movable under page_lock so any compactor 976 * cannot try to migrate this page. 977 */ 978 folio_clear_isolated(src); 979 } 980 981 /* 982 * Anonymous and movable src->mapping will be cleared by 983 * free_pages_prepare so don't reset it here for keeping 984 * the type to work PageAnon, for example. 985 */ 986 if (!folio_mapping_flags(src)) 987 src->mapping = NULL; 988 989 if (likely(!folio_is_zone_device(dst))) 990 flush_dcache_folio(dst); 991 } 992 out: 993 return rc; 994 } 995 996 static int __unmap_and_move(struct page *page, struct page *newpage, 997 int force, enum migrate_mode mode) 998 { 999 struct folio *folio = page_folio(page); 1000 struct folio *dst = page_folio(newpage); 1001 int rc = -EAGAIN; 1002 bool page_was_mapped = false; 1003 struct anon_vma *anon_vma = NULL; 1004 bool is_lru = !__PageMovable(page); 1005 1006 if (!trylock_page(page)) { 1007 if (!force || mode == MIGRATE_ASYNC) 1008 goto out; 1009 1010 /* 1011 * It's not safe for direct compaction to call lock_page. 1012 * For example, during page readahead pages are added locked 1013 * to the LRU. Later, when the IO completes the pages are 1014 * marked uptodate and unlocked. However, the queueing 1015 * could be merging multiple pages for one bio (e.g. 1016 * mpage_readahead). If an allocation happens for the 1017 * second or third page, the process can end up locking 1018 * the same page twice and deadlocking. Rather than 1019 * trying to be clever about what pages can be locked, 1020 * avoid the use of lock_page for direct compaction 1021 * altogether. 1022 */ 1023 if (current->flags & PF_MEMALLOC) 1024 goto out; 1025 1026 lock_page(page); 1027 } 1028 1029 if (PageWriteback(page)) { 1030 /* 1031 * Only in the case of a full synchronous migration is it 1032 * necessary to wait for PageWriteback. In the async case, 1033 * the retry loop is too short and in the sync-light case, 1034 * the overhead of stalling is too much 1035 */ 1036 switch (mode) { 1037 case MIGRATE_SYNC: 1038 case MIGRATE_SYNC_NO_COPY: 1039 break; 1040 default: 1041 rc = -EBUSY; 1042 goto out_unlock; 1043 } 1044 if (!force) 1045 goto out_unlock; 1046 wait_on_page_writeback(page); 1047 } 1048 1049 /* 1050 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 1051 * we cannot notice that anon_vma is freed while we migrates a page. 1052 * This get_anon_vma() delays freeing anon_vma pointer until the end 1053 * of migration. File cache pages are no problem because of page_lock() 1054 * File Caches may use write_page() or lock_page() in migration, then, 1055 * just care Anon page here. 1056 * 1057 * Only page_get_anon_vma() understands the subtleties of 1058 * getting a hold on an anon_vma from outside one of its mms. 1059 * But if we cannot get anon_vma, then we won't need it anyway, 1060 * because that implies that the anon page is no longer mapped 1061 * (and cannot be remapped so long as we hold the page lock). 1062 */ 1063 if (PageAnon(page) && !PageKsm(page)) 1064 anon_vma = page_get_anon_vma(page); 1065 1066 /* 1067 * Block others from accessing the new page when we get around to 1068 * establishing additional references. We are usually the only one 1069 * holding a reference to newpage at this point. We used to have a BUG 1070 * here if trylock_page(newpage) fails, but would like to allow for 1071 * cases where there might be a race with the previous use of newpage. 1072 * This is much like races on refcount of oldpage: just don't BUG(). 1073 */ 1074 if (unlikely(!trylock_page(newpage))) 1075 goto out_unlock; 1076 1077 if (unlikely(!is_lru)) { 1078 rc = move_to_new_folio(dst, folio, mode); 1079 goto out_unlock_both; 1080 } 1081 1082 /* 1083 * Corner case handling: 1084 * 1. When a new swap-cache page is read into, it is added to the LRU 1085 * and treated as swapcache but it has no rmap yet. 1086 * Calling try_to_unmap() against a page->mapping==NULL page will 1087 * trigger a BUG. So handle it here. 1088 * 2. An orphaned page (see truncate_cleanup_page) might have 1089 * fs-private metadata. The page can be picked up due to memory 1090 * offlining. Everywhere else except page reclaim, the page is 1091 * invisible to the vm, so the page can not be migrated. So try to 1092 * free the metadata, so the page can be freed. 1093 */ 1094 if (!page->mapping) { 1095 VM_BUG_ON_PAGE(PageAnon(page), page); 1096 if (page_has_private(page)) { 1097 try_to_free_buffers(folio); 1098 goto out_unlock_both; 1099 } 1100 } else if (page_mapped(page)) { 1101 /* Establish migration ptes */ 1102 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1103 page); 1104 try_to_migrate(folio, 0); 1105 page_was_mapped = true; 1106 } 1107 1108 if (!page_mapped(page)) 1109 rc = move_to_new_folio(dst, folio, mode); 1110 1111 /* 1112 * When successful, push newpage to LRU immediately: so that if it 1113 * turns out to be an mlocked page, remove_migration_ptes() will 1114 * automatically build up the correct newpage->mlock_count for it. 1115 * 1116 * We would like to do something similar for the old page, when 1117 * unsuccessful, and other cases when a page has been temporarily 1118 * isolated from the unevictable LRU: but this case is the easiest. 1119 */ 1120 if (rc == MIGRATEPAGE_SUCCESS) { 1121 lru_cache_add(newpage); 1122 if (page_was_mapped) 1123 lru_add_drain(); 1124 } 1125 1126 if (page_was_mapped) 1127 remove_migration_ptes(folio, 1128 rc == MIGRATEPAGE_SUCCESS ? dst : folio, false); 1129 1130 out_unlock_both: 1131 unlock_page(newpage); 1132 out_unlock: 1133 /* Drop an anon_vma reference if we took one */ 1134 if (anon_vma) 1135 put_anon_vma(anon_vma); 1136 unlock_page(page); 1137 out: 1138 /* 1139 * If migration is successful, decrease refcount of the newpage, 1140 * which will not free the page because new page owner increased 1141 * refcounter. 1142 */ 1143 if (rc == MIGRATEPAGE_SUCCESS) 1144 put_page(newpage); 1145 1146 return rc; 1147 } 1148 1149 /* 1150 * Obtain the lock on page, remove all ptes and migrate the page 1151 * to the newly allocated page in newpage. 1152 */ 1153 static int unmap_and_move(new_page_t get_new_page, 1154 free_page_t put_new_page, 1155 unsigned long private, struct page *page, 1156 int force, enum migrate_mode mode, 1157 enum migrate_reason reason, 1158 struct list_head *ret) 1159 { 1160 int rc = MIGRATEPAGE_SUCCESS; 1161 struct page *newpage = NULL; 1162 1163 if (!thp_migration_supported() && PageTransHuge(page)) 1164 return -ENOSYS; 1165 1166 if (page_count(page) == 1) { 1167 /* Page was freed from under us. So we are done. */ 1168 ClearPageActive(page); 1169 ClearPageUnevictable(page); 1170 /* free_pages_prepare() will clear PG_isolated. */ 1171 goto out; 1172 } 1173 1174 newpage = get_new_page(page, private); 1175 if (!newpage) 1176 return -ENOMEM; 1177 1178 newpage->private = 0; 1179 rc = __unmap_and_move(page, newpage, force, mode); 1180 if (rc == MIGRATEPAGE_SUCCESS) 1181 set_page_owner_migrate_reason(newpage, reason); 1182 1183 out: 1184 if (rc != -EAGAIN) { 1185 /* 1186 * A page that has been migrated has all references 1187 * removed and will be freed. A page that has not been 1188 * migrated will have kept its references and be restored. 1189 */ 1190 list_del(&page->lru); 1191 } 1192 1193 /* 1194 * If migration is successful, releases reference grabbed during 1195 * isolation. Otherwise, restore the page to right list unless 1196 * we want to retry. 1197 */ 1198 if (rc == MIGRATEPAGE_SUCCESS) { 1199 /* 1200 * Compaction can migrate also non-LRU pages which are 1201 * not accounted to NR_ISOLATED_*. They can be recognized 1202 * as __PageMovable 1203 */ 1204 if (likely(!__PageMovable(page))) 1205 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1206 page_is_file_lru(page), -thp_nr_pages(page)); 1207 1208 if (reason != MR_MEMORY_FAILURE) 1209 /* 1210 * We release the page in page_handle_poison. 1211 */ 1212 put_page(page); 1213 } else { 1214 if (rc != -EAGAIN) 1215 list_add_tail(&page->lru, ret); 1216 1217 if (put_new_page) 1218 put_new_page(newpage, private); 1219 else 1220 put_page(newpage); 1221 } 1222 1223 return rc; 1224 } 1225 1226 /* 1227 * Counterpart of unmap_and_move_page() for hugepage migration. 1228 * 1229 * This function doesn't wait the completion of hugepage I/O 1230 * because there is no race between I/O and migration for hugepage. 1231 * Note that currently hugepage I/O occurs only in direct I/O 1232 * where no lock is held and PG_writeback is irrelevant, 1233 * and writeback status of all subpages are counted in the reference 1234 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1235 * under direct I/O, the reference of the head page is 512 and a bit more.) 1236 * This means that when we try to migrate hugepage whose subpages are 1237 * doing direct I/O, some references remain after try_to_unmap() and 1238 * hugepage migration fails without data corruption. 1239 * 1240 * There is also no race when direct I/O is issued on the page under migration, 1241 * because then pte is replaced with migration swap entry and direct I/O code 1242 * will wait in the page fault for migration to complete. 1243 */ 1244 static int unmap_and_move_huge_page(new_page_t get_new_page, 1245 free_page_t put_new_page, unsigned long private, 1246 struct page *hpage, int force, 1247 enum migrate_mode mode, int reason, 1248 struct list_head *ret) 1249 { 1250 struct folio *dst, *src = page_folio(hpage); 1251 int rc = -EAGAIN; 1252 int page_was_mapped = 0; 1253 struct page *new_hpage; 1254 struct anon_vma *anon_vma = NULL; 1255 struct address_space *mapping = NULL; 1256 1257 /* 1258 * Migratability of hugepages depends on architectures and their size. 1259 * This check is necessary because some callers of hugepage migration 1260 * like soft offline and memory hotremove don't walk through page 1261 * tables or check whether the hugepage is pmd-based or not before 1262 * kicking migration. 1263 */ 1264 if (!hugepage_migration_supported(page_hstate(hpage))) 1265 return -ENOSYS; 1266 1267 if (page_count(hpage) == 1) { 1268 /* page was freed from under us. So we are done. */ 1269 putback_active_hugepage(hpage); 1270 return MIGRATEPAGE_SUCCESS; 1271 } 1272 1273 new_hpage = get_new_page(hpage, private); 1274 if (!new_hpage) 1275 return -ENOMEM; 1276 dst = page_folio(new_hpage); 1277 1278 if (!trylock_page(hpage)) { 1279 if (!force) 1280 goto out; 1281 switch (mode) { 1282 case MIGRATE_SYNC: 1283 case MIGRATE_SYNC_NO_COPY: 1284 break; 1285 default: 1286 goto out; 1287 } 1288 lock_page(hpage); 1289 } 1290 1291 /* 1292 * Check for pages which are in the process of being freed. Without 1293 * page_mapping() set, hugetlbfs specific move page routine will not 1294 * be called and we could leak usage counts for subpools. 1295 */ 1296 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1297 rc = -EBUSY; 1298 goto out_unlock; 1299 } 1300 1301 if (PageAnon(hpage)) 1302 anon_vma = page_get_anon_vma(hpage); 1303 1304 if (unlikely(!trylock_page(new_hpage))) 1305 goto put_anon; 1306 1307 if (page_mapped(hpage)) { 1308 enum ttu_flags ttu = 0; 1309 1310 if (!PageAnon(hpage)) { 1311 /* 1312 * In shared mappings, try_to_unmap could potentially 1313 * call huge_pmd_unshare. Because of this, take 1314 * semaphore in write mode here and set TTU_RMAP_LOCKED 1315 * to let lower levels know we have taken the lock. 1316 */ 1317 mapping = hugetlb_page_mapping_lock_write(hpage); 1318 if (unlikely(!mapping)) 1319 goto unlock_put_anon; 1320 1321 ttu = TTU_RMAP_LOCKED; 1322 } 1323 1324 try_to_migrate(src, ttu); 1325 page_was_mapped = 1; 1326 1327 if (ttu & TTU_RMAP_LOCKED) 1328 i_mmap_unlock_write(mapping); 1329 } 1330 1331 if (!page_mapped(hpage)) 1332 rc = move_to_new_folio(dst, src, mode); 1333 1334 if (page_was_mapped) 1335 remove_migration_ptes(src, 1336 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1337 1338 unlock_put_anon: 1339 unlock_page(new_hpage); 1340 1341 put_anon: 1342 if (anon_vma) 1343 put_anon_vma(anon_vma); 1344 1345 if (rc == MIGRATEPAGE_SUCCESS) { 1346 move_hugetlb_state(hpage, new_hpage, reason); 1347 put_new_page = NULL; 1348 } 1349 1350 out_unlock: 1351 unlock_page(hpage); 1352 out: 1353 if (rc == MIGRATEPAGE_SUCCESS) 1354 putback_active_hugepage(hpage); 1355 else if (rc != -EAGAIN) 1356 list_move_tail(&hpage->lru, ret); 1357 1358 /* 1359 * If migration was not successful and there's a freeing callback, use 1360 * it. Otherwise, put_page() will drop the reference grabbed during 1361 * isolation. 1362 */ 1363 if (put_new_page) 1364 put_new_page(new_hpage, private); 1365 else 1366 putback_active_hugepage(new_hpage); 1367 1368 return rc; 1369 } 1370 1371 static inline int try_split_thp(struct page *page, struct list_head *split_pages) 1372 { 1373 int rc; 1374 1375 lock_page(page); 1376 rc = split_huge_page_to_list(page, split_pages); 1377 unlock_page(page); 1378 if (!rc) 1379 list_move_tail(&page->lru, split_pages); 1380 1381 return rc; 1382 } 1383 1384 /* 1385 * migrate_pages - migrate the pages specified in a list, to the free pages 1386 * supplied as the target for the page migration 1387 * 1388 * @from: The list of pages to be migrated. 1389 * @get_new_page: The function used to allocate free pages to be used 1390 * as the target of the page migration. 1391 * @put_new_page: The function used to free target pages if migration 1392 * fails, or NULL if no special handling is necessary. 1393 * @private: Private data to be passed on to get_new_page() 1394 * @mode: The migration mode that specifies the constraints for 1395 * page migration, if any. 1396 * @reason: The reason for page migration. 1397 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1398 * the caller passes a non-NULL pointer. 1399 * 1400 * The function returns after 10 attempts or if no pages are movable any more 1401 * because the list has become empty or no retryable pages exist any more. 1402 * It is caller's responsibility to call putback_movable_pages() to return pages 1403 * to the LRU or free list only if ret != 0. 1404 * 1405 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1406 * an error code. The number of THP splits will be considered as the number of 1407 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1408 */ 1409 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1410 free_page_t put_new_page, unsigned long private, 1411 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1412 { 1413 int retry = 1; 1414 int thp_retry = 1; 1415 int nr_failed = 0; 1416 int nr_failed_pages = 0; 1417 int nr_retry_pages = 0; 1418 int nr_succeeded = 0; 1419 int nr_thp_succeeded = 0; 1420 int nr_thp_failed = 0; 1421 int nr_thp_split = 0; 1422 int pass = 0; 1423 bool is_thp = false; 1424 struct page *page; 1425 struct page *page2; 1426 int rc, nr_subpages; 1427 LIST_HEAD(ret_pages); 1428 LIST_HEAD(thp_split_pages); 1429 bool nosplit = (reason == MR_NUMA_MISPLACED); 1430 bool no_subpage_counting = false; 1431 1432 trace_mm_migrate_pages_start(mode, reason); 1433 1434 thp_subpage_migration: 1435 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1436 retry = 0; 1437 thp_retry = 0; 1438 nr_retry_pages = 0; 1439 1440 list_for_each_entry_safe(page, page2, from, lru) { 1441 /* 1442 * THP statistics is based on the source huge page. 1443 * Capture required information that might get lost 1444 * during migration. 1445 */ 1446 is_thp = PageTransHuge(page) && !PageHuge(page); 1447 nr_subpages = compound_nr(page); 1448 cond_resched(); 1449 1450 if (PageHuge(page)) 1451 rc = unmap_and_move_huge_page(get_new_page, 1452 put_new_page, private, page, 1453 pass > 2, mode, reason, 1454 &ret_pages); 1455 else 1456 rc = unmap_and_move(get_new_page, put_new_page, 1457 private, page, pass > 2, mode, 1458 reason, &ret_pages); 1459 /* 1460 * The rules are: 1461 * Success: non hugetlb page will be freed, hugetlb 1462 * page will be put back 1463 * -EAGAIN: stay on the from list 1464 * -ENOMEM: stay on the from list 1465 * -ENOSYS: stay on the from list 1466 * Other errno: put on ret_pages list then splice to 1467 * from list 1468 */ 1469 switch(rc) { 1470 /* 1471 * THP migration might be unsupported or the 1472 * allocation could've failed so we should 1473 * retry on the same page with the THP split 1474 * to base pages. 1475 * 1476 * Sub-pages are put in thp_split_pages, and 1477 * we will migrate them after the rest of the 1478 * list is processed. 1479 */ 1480 case -ENOSYS: 1481 /* THP migration is unsupported */ 1482 if (is_thp) { 1483 nr_thp_failed++; 1484 if (!try_split_thp(page, &thp_split_pages)) { 1485 nr_thp_split++; 1486 break; 1487 } 1488 /* Hugetlb migration is unsupported */ 1489 } else if (!no_subpage_counting) { 1490 nr_failed++; 1491 } 1492 1493 nr_failed_pages += nr_subpages; 1494 list_move_tail(&page->lru, &ret_pages); 1495 break; 1496 case -ENOMEM: 1497 /* 1498 * When memory is low, don't bother to try to migrate 1499 * other pages, just exit. 1500 */ 1501 if (is_thp) { 1502 nr_thp_failed++; 1503 /* THP NUMA faulting doesn't split THP to retry. */ 1504 if (!nosplit && !try_split_thp(page, &thp_split_pages)) { 1505 nr_thp_split++; 1506 break; 1507 } 1508 } else if (!no_subpage_counting) { 1509 nr_failed++; 1510 } 1511 1512 nr_failed_pages += nr_subpages + nr_retry_pages; 1513 /* 1514 * There might be some subpages of fail-to-migrate THPs 1515 * left in thp_split_pages list. Move them back to migration 1516 * list so that they could be put back to the right list by 1517 * the caller otherwise the page refcnt will be leaked. 1518 */ 1519 list_splice_init(&thp_split_pages, from); 1520 /* nr_failed isn't updated for not used */ 1521 nr_thp_failed += thp_retry; 1522 goto out; 1523 case -EAGAIN: 1524 if (is_thp) 1525 thp_retry++; 1526 else if (!no_subpage_counting) 1527 retry++; 1528 nr_retry_pages += nr_subpages; 1529 break; 1530 case MIGRATEPAGE_SUCCESS: 1531 nr_succeeded += nr_subpages; 1532 if (is_thp) 1533 nr_thp_succeeded++; 1534 break; 1535 default: 1536 /* 1537 * Permanent failure (-EBUSY, etc.): 1538 * unlike -EAGAIN case, the failed page is 1539 * removed from migration page list and not 1540 * retried in the next outer loop. 1541 */ 1542 if (is_thp) 1543 nr_thp_failed++; 1544 else if (!no_subpage_counting) 1545 nr_failed++; 1546 1547 nr_failed_pages += nr_subpages; 1548 break; 1549 } 1550 } 1551 } 1552 nr_failed += retry; 1553 nr_thp_failed += thp_retry; 1554 nr_failed_pages += nr_retry_pages; 1555 /* 1556 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1557 * counting in this round, since all subpages of a THP is counted 1558 * as 1 failure in the first round. 1559 */ 1560 if (!list_empty(&thp_split_pages)) { 1561 /* 1562 * Move non-migrated pages (after 10 retries) to ret_pages 1563 * to avoid migrating them again. 1564 */ 1565 list_splice_init(from, &ret_pages); 1566 list_splice_init(&thp_split_pages, from); 1567 no_subpage_counting = true; 1568 retry = 1; 1569 goto thp_subpage_migration; 1570 } 1571 1572 rc = nr_failed + nr_thp_failed; 1573 out: 1574 /* 1575 * Put the permanent failure page back to migration list, they 1576 * will be put back to the right list by the caller. 1577 */ 1578 list_splice(&ret_pages, from); 1579 1580 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1581 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1582 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1583 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1584 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1585 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1586 nr_thp_failed, nr_thp_split, mode, reason); 1587 1588 if (ret_succeeded) 1589 *ret_succeeded = nr_succeeded; 1590 1591 return rc; 1592 } 1593 1594 struct page *alloc_migration_target(struct page *page, unsigned long private) 1595 { 1596 struct folio *folio = page_folio(page); 1597 struct migration_target_control *mtc; 1598 gfp_t gfp_mask; 1599 unsigned int order = 0; 1600 struct folio *new_folio = NULL; 1601 int nid; 1602 int zidx; 1603 1604 mtc = (struct migration_target_control *)private; 1605 gfp_mask = mtc->gfp_mask; 1606 nid = mtc->nid; 1607 if (nid == NUMA_NO_NODE) 1608 nid = folio_nid(folio); 1609 1610 if (folio_test_hugetlb(folio)) { 1611 struct hstate *h = page_hstate(&folio->page); 1612 1613 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1614 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1615 } 1616 1617 if (folio_test_large(folio)) { 1618 /* 1619 * clear __GFP_RECLAIM to make the migration callback 1620 * consistent with regular THP allocations. 1621 */ 1622 gfp_mask &= ~__GFP_RECLAIM; 1623 gfp_mask |= GFP_TRANSHUGE; 1624 order = folio_order(folio); 1625 } 1626 zidx = zone_idx(folio_zone(folio)); 1627 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1628 gfp_mask |= __GFP_HIGHMEM; 1629 1630 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); 1631 1632 return &new_folio->page; 1633 } 1634 1635 #ifdef CONFIG_NUMA 1636 1637 static int store_status(int __user *status, int start, int value, int nr) 1638 { 1639 while (nr-- > 0) { 1640 if (put_user(value, status + start)) 1641 return -EFAULT; 1642 start++; 1643 } 1644 1645 return 0; 1646 } 1647 1648 static int do_move_pages_to_node(struct mm_struct *mm, 1649 struct list_head *pagelist, int node) 1650 { 1651 int err; 1652 struct migration_target_control mtc = { 1653 .nid = node, 1654 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1655 }; 1656 1657 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1658 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1659 if (err) 1660 putback_movable_pages(pagelist); 1661 return err; 1662 } 1663 1664 /* 1665 * Resolves the given address to a struct page, isolates it from the LRU and 1666 * puts it to the given pagelist. 1667 * Returns: 1668 * errno - if the page cannot be found/isolated 1669 * 0 - when it doesn't have to be migrated because it is already on the 1670 * target node 1671 * 1 - when it has been queued 1672 */ 1673 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1674 int node, struct list_head *pagelist, bool migrate_all) 1675 { 1676 struct vm_area_struct *vma; 1677 struct page *page; 1678 int err; 1679 1680 mmap_read_lock(mm); 1681 err = -EFAULT; 1682 vma = vma_lookup(mm, addr); 1683 if (!vma || !vma_migratable(vma)) 1684 goto out; 1685 1686 /* FOLL_DUMP to ignore special (like zero) pages */ 1687 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1688 1689 err = PTR_ERR(page); 1690 if (IS_ERR(page)) 1691 goto out; 1692 1693 err = -ENOENT; 1694 if (!page || is_zone_device_page(page)) 1695 goto out; 1696 1697 err = 0; 1698 if (page_to_nid(page) == node) 1699 goto out_putpage; 1700 1701 err = -EACCES; 1702 if (page_mapcount(page) > 1 && !migrate_all) 1703 goto out_putpage; 1704 1705 if (PageHuge(page)) { 1706 if (PageHead(page)) { 1707 err = isolate_hugetlb(page, pagelist); 1708 if (!err) 1709 err = 1; 1710 } 1711 } else { 1712 struct page *head; 1713 1714 head = compound_head(page); 1715 err = isolate_lru_page(head); 1716 if (err) 1717 goto out_putpage; 1718 1719 err = 1; 1720 list_add_tail(&head->lru, pagelist); 1721 mod_node_page_state(page_pgdat(head), 1722 NR_ISOLATED_ANON + page_is_file_lru(head), 1723 thp_nr_pages(head)); 1724 } 1725 out_putpage: 1726 /* 1727 * Either remove the duplicate refcount from 1728 * isolate_lru_page() or drop the page ref if it was 1729 * not isolated. 1730 */ 1731 put_page(page); 1732 out: 1733 mmap_read_unlock(mm); 1734 return err; 1735 } 1736 1737 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1738 struct list_head *pagelist, int __user *status, 1739 int start, int i, unsigned long nr_pages) 1740 { 1741 int err; 1742 1743 if (list_empty(pagelist)) 1744 return 0; 1745 1746 err = do_move_pages_to_node(mm, pagelist, node); 1747 if (err) { 1748 /* 1749 * Positive err means the number of failed 1750 * pages to migrate. Since we are going to 1751 * abort and return the number of non-migrated 1752 * pages, so need to include the rest of the 1753 * nr_pages that have not been attempted as 1754 * well. 1755 */ 1756 if (err > 0) 1757 err += nr_pages - i; 1758 return err; 1759 } 1760 return store_status(status, start, node, i - start); 1761 } 1762 1763 /* 1764 * Migrate an array of page address onto an array of nodes and fill 1765 * the corresponding array of status. 1766 */ 1767 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1768 unsigned long nr_pages, 1769 const void __user * __user *pages, 1770 const int __user *nodes, 1771 int __user *status, int flags) 1772 { 1773 int current_node = NUMA_NO_NODE; 1774 LIST_HEAD(pagelist); 1775 int start, i; 1776 int err = 0, err1; 1777 1778 lru_cache_disable(); 1779 1780 for (i = start = 0; i < nr_pages; i++) { 1781 const void __user *p; 1782 unsigned long addr; 1783 int node; 1784 1785 err = -EFAULT; 1786 if (get_user(p, pages + i)) 1787 goto out_flush; 1788 if (get_user(node, nodes + i)) 1789 goto out_flush; 1790 addr = (unsigned long)untagged_addr(p); 1791 1792 err = -ENODEV; 1793 if (node < 0 || node >= MAX_NUMNODES) 1794 goto out_flush; 1795 if (!node_state(node, N_MEMORY)) 1796 goto out_flush; 1797 1798 err = -EACCES; 1799 if (!node_isset(node, task_nodes)) 1800 goto out_flush; 1801 1802 if (current_node == NUMA_NO_NODE) { 1803 current_node = node; 1804 start = i; 1805 } else if (node != current_node) { 1806 err = move_pages_and_store_status(mm, current_node, 1807 &pagelist, status, start, i, nr_pages); 1808 if (err) 1809 goto out; 1810 start = i; 1811 current_node = node; 1812 } 1813 1814 /* 1815 * Errors in the page lookup or isolation are not fatal and we simply 1816 * report them via status 1817 */ 1818 err = add_page_for_migration(mm, addr, current_node, 1819 &pagelist, flags & MPOL_MF_MOVE_ALL); 1820 1821 if (err > 0) { 1822 /* The page is successfully queued for migration */ 1823 continue; 1824 } 1825 1826 /* 1827 * The move_pages() man page does not have an -EEXIST choice, so 1828 * use -EFAULT instead. 1829 */ 1830 if (err == -EEXIST) 1831 err = -EFAULT; 1832 1833 /* 1834 * If the page is already on the target node (!err), store the 1835 * node, otherwise, store the err. 1836 */ 1837 err = store_status(status, i, err ? : current_node, 1); 1838 if (err) 1839 goto out_flush; 1840 1841 err = move_pages_and_store_status(mm, current_node, &pagelist, 1842 status, start, i, nr_pages); 1843 if (err) { 1844 /* We have accounted for page i */ 1845 if (err > 0) 1846 err--; 1847 goto out; 1848 } 1849 current_node = NUMA_NO_NODE; 1850 } 1851 out_flush: 1852 /* Make sure we do not overwrite the existing error */ 1853 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1854 status, start, i, nr_pages); 1855 if (err >= 0) 1856 err = err1; 1857 out: 1858 lru_cache_enable(); 1859 return err; 1860 } 1861 1862 /* 1863 * Determine the nodes of an array of pages and store it in an array of status. 1864 */ 1865 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1866 const void __user **pages, int *status) 1867 { 1868 unsigned long i; 1869 1870 mmap_read_lock(mm); 1871 1872 for (i = 0; i < nr_pages; i++) { 1873 unsigned long addr = (unsigned long)(*pages); 1874 unsigned int foll_flags = FOLL_DUMP; 1875 struct vm_area_struct *vma; 1876 struct page *page; 1877 int err = -EFAULT; 1878 1879 vma = vma_lookup(mm, addr); 1880 if (!vma) 1881 goto set_status; 1882 1883 /* Not all huge page follow APIs support 'FOLL_GET' */ 1884 if (!is_vm_hugetlb_page(vma)) 1885 foll_flags |= FOLL_GET; 1886 1887 /* FOLL_DUMP to ignore special (like zero) pages */ 1888 page = follow_page(vma, addr, foll_flags); 1889 1890 err = PTR_ERR(page); 1891 if (IS_ERR(page)) 1892 goto set_status; 1893 1894 if (page && !is_zone_device_page(page)) { 1895 err = page_to_nid(page); 1896 if (foll_flags & FOLL_GET) 1897 put_page(page); 1898 } else { 1899 err = -ENOENT; 1900 } 1901 set_status: 1902 *status = err; 1903 1904 pages++; 1905 status++; 1906 } 1907 1908 mmap_read_unlock(mm); 1909 } 1910 1911 static int get_compat_pages_array(const void __user *chunk_pages[], 1912 const void __user * __user *pages, 1913 unsigned long chunk_nr) 1914 { 1915 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1916 compat_uptr_t p; 1917 int i; 1918 1919 for (i = 0; i < chunk_nr; i++) { 1920 if (get_user(p, pages32 + i)) 1921 return -EFAULT; 1922 chunk_pages[i] = compat_ptr(p); 1923 } 1924 1925 return 0; 1926 } 1927 1928 /* 1929 * Determine the nodes of a user array of pages and store it in 1930 * a user array of status. 1931 */ 1932 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1933 const void __user * __user *pages, 1934 int __user *status) 1935 { 1936 #define DO_PAGES_STAT_CHUNK_NR 16UL 1937 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1938 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1939 1940 while (nr_pages) { 1941 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 1942 1943 if (in_compat_syscall()) { 1944 if (get_compat_pages_array(chunk_pages, pages, 1945 chunk_nr)) 1946 break; 1947 } else { 1948 if (copy_from_user(chunk_pages, pages, 1949 chunk_nr * sizeof(*chunk_pages))) 1950 break; 1951 } 1952 1953 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1954 1955 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1956 break; 1957 1958 pages += chunk_nr; 1959 status += chunk_nr; 1960 nr_pages -= chunk_nr; 1961 } 1962 return nr_pages ? -EFAULT : 0; 1963 } 1964 1965 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1966 { 1967 struct task_struct *task; 1968 struct mm_struct *mm; 1969 1970 /* 1971 * There is no need to check if current process has the right to modify 1972 * the specified process when they are same. 1973 */ 1974 if (!pid) { 1975 mmget(current->mm); 1976 *mem_nodes = cpuset_mems_allowed(current); 1977 return current->mm; 1978 } 1979 1980 /* Find the mm_struct */ 1981 rcu_read_lock(); 1982 task = find_task_by_vpid(pid); 1983 if (!task) { 1984 rcu_read_unlock(); 1985 return ERR_PTR(-ESRCH); 1986 } 1987 get_task_struct(task); 1988 1989 /* 1990 * Check if this process has the right to modify the specified 1991 * process. Use the regular "ptrace_may_access()" checks. 1992 */ 1993 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1994 rcu_read_unlock(); 1995 mm = ERR_PTR(-EPERM); 1996 goto out; 1997 } 1998 rcu_read_unlock(); 1999 2000 mm = ERR_PTR(security_task_movememory(task)); 2001 if (IS_ERR(mm)) 2002 goto out; 2003 *mem_nodes = cpuset_mems_allowed(task); 2004 mm = get_task_mm(task); 2005 out: 2006 put_task_struct(task); 2007 if (!mm) 2008 mm = ERR_PTR(-EINVAL); 2009 return mm; 2010 } 2011 2012 /* 2013 * Move a list of pages in the address space of the currently executing 2014 * process. 2015 */ 2016 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 2017 const void __user * __user *pages, 2018 const int __user *nodes, 2019 int __user *status, int flags) 2020 { 2021 struct mm_struct *mm; 2022 int err; 2023 nodemask_t task_nodes; 2024 2025 /* Check flags */ 2026 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 2027 return -EINVAL; 2028 2029 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 2030 return -EPERM; 2031 2032 mm = find_mm_struct(pid, &task_nodes); 2033 if (IS_ERR(mm)) 2034 return PTR_ERR(mm); 2035 2036 if (nodes) 2037 err = do_pages_move(mm, task_nodes, nr_pages, pages, 2038 nodes, status, flags); 2039 else 2040 err = do_pages_stat(mm, nr_pages, pages, status); 2041 2042 mmput(mm); 2043 return err; 2044 } 2045 2046 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 2047 const void __user * __user *, pages, 2048 const int __user *, nodes, 2049 int __user *, status, int, flags) 2050 { 2051 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2052 } 2053 2054 #ifdef CONFIG_NUMA_BALANCING 2055 /* 2056 * Returns true if this is a safe migration target node for misplaced NUMA 2057 * pages. Currently it only checks the watermarks which is crude. 2058 */ 2059 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2060 unsigned long nr_migrate_pages) 2061 { 2062 int z; 2063 2064 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2065 struct zone *zone = pgdat->node_zones + z; 2066 2067 if (!managed_zone(zone)) 2068 continue; 2069 2070 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2071 if (!zone_watermark_ok(zone, 0, 2072 high_wmark_pages(zone) + 2073 nr_migrate_pages, 2074 ZONE_MOVABLE, 0)) 2075 continue; 2076 return true; 2077 } 2078 return false; 2079 } 2080 2081 static struct page *alloc_misplaced_dst_page(struct page *page, 2082 unsigned long data) 2083 { 2084 int nid = (int) data; 2085 int order = compound_order(page); 2086 gfp_t gfp = __GFP_THISNODE; 2087 struct folio *new; 2088 2089 if (order > 0) 2090 gfp |= GFP_TRANSHUGE_LIGHT; 2091 else { 2092 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2093 __GFP_NOWARN; 2094 gfp &= ~__GFP_RECLAIM; 2095 } 2096 new = __folio_alloc_node(gfp, order, nid); 2097 2098 return &new->page; 2099 } 2100 2101 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2102 { 2103 int nr_pages = thp_nr_pages(page); 2104 int order = compound_order(page); 2105 2106 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2107 2108 /* Do not migrate THP mapped by multiple processes */ 2109 if (PageTransHuge(page) && total_mapcount(page) > 1) 2110 return 0; 2111 2112 /* Avoid migrating to a node that is nearly full */ 2113 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2114 int z; 2115 2116 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2117 return 0; 2118 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2119 if (managed_zone(pgdat->node_zones + z)) 2120 break; 2121 } 2122 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2123 return 0; 2124 } 2125 2126 if (isolate_lru_page(page)) 2127 return 0; 2128 2129 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 2130 nr_pages); 2131 2132 /* 2133 * Isolating the page has taken another reference, so the 2134 * caller's reference can be safely dropped without the page 2135 * disappearing underneath us during migration. 2136 */ 2137 put_page(page); 2138 return 1; 2139 } 2140 2141 /* 2142 * Attempt to migrate a misplaced page to the specified destination 2143 * node. Caller is expected to have an elevated reference count on 2144 * the page that will be dropped by this function before returning. 2145 */ 2146 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2147 int node) 2148 { 2149 pg_data_t *pgdat = NODE_DATA(node); 2150 int isolated; 2151 int nr_remaining; 2152 unsigned int nr_succeeded; 2153 LIST_HEAD(migratepages); 2154 int nr_pages = thp_nr_pages(page); 2155 2156 /* 2157 * Don't migrate file pages that are mapped in multiple processes 2158 * with execute permissions as they are probably shared libraries. 2159 */ 2160 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2161 (vma->vm_flags & VM_EXEC)) 2162 goto out; 2163 2164 /* 2165 * Also do not migrate dirty pages as not all filesystems can move 2166 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2167 */ 2168 if (page_is_file_lru(page) && PageDirty(page)) 2169 goto out; 2170 2171 isolated = numamigrate_isolate_page(pgdat, page); 2172 if (!isolated) 2173 goto out; 2174 2175 list_add(&page->lru, &migratepages); 2176 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2177 NULL, node, MIGRATE_ASYNC, 2178 MR_NUMA_MISPLACED, &nr_succeeded); 2179 if (nr_remaining) { 2180 if (!list_empty(&migratepages)) { 2181 list_del(&page->lru); 2182 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2183 page_is_file_lru(page), -nr_pages); 2184 putback_lru_page(page); 2185 } 2186 isolated = 0; 2187 } 2188 if (nr_succeeded) { 2189 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2190 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2191 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2192 nr_succeeded); 2193 } 2194 BUG_ON(!list_empty(&migratepages)); 2195 return isolated; 2196 2197 out: 2198 put_page(page); 2199 return 0; 2200 } 2201 #endif /* CONFIG_NUMA_BALANCING */ 2202 #endif /* CONFIG_NUMA */ 2203