1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pagewalk.h> 42 #include <linux/pfn_t.h> 43 #include <linux/memremap.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/balloon_compaction.h> 46 #include <linux/mmu_notifier.h> 47 #include <linux/page_idle.h> 48 #include <linux/page_owner.h> 49 #include <linux/sched/mm.h> 50 #include <linux/ptrace.h> 51 #include <linux/oom.h> 52 #include <linux/memory.h> 53 #include <linux/random.h> 54 55 #include <asm/tlbflush.h> 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/migrate.h> 59 60 #include "internal.h" 61 62 int isolate_movable_page(struct page *page, isolate_mode_t mode) 63 { 64 struct address_space *mapping; 65 66 /* 67 * Avoid burning cycles with pages that are yet under __free_pages(), 68 * or just got freed under us. 69 * 70 * In case we 'win' a race for a movable page being freed under us and 71 * raise its refcount preventing __free_pages() from doing its job 72 * the put_page() at the end of this block will take care of 73 * release this page, thus avoiding a nasty leakage. 74 */ 75 if (unlikely(!get_page_unless_zero(page))) 76 goto out; 77 78 /* 79 * Check PageMovable before holding a PG_lock because page's owner 80 * assumes anybody doesn't touch PG_lock of newly allocated page 81 * so unconditionally grabbing the lock ruins page's owner side. 82 */ 83 if (unlikely(!__PageMovable(page))) 84 goto out_putpage; 85 /* 86 * As movable pages are not isolated from LRU lists, concurrent 87 * compaction threads can race against page migration functions 88 * as well as race against the releasing a page. 89 * 90 * In order to avoid having an already isolated movable page 91 * being (wrongly) re-isolated while it is under migration, 92 * or to avoid attempting to isolate pages being released, 93 * lets be sure we have the page lock 94 * before proceeding with the movable page isolation steps. 95 */ 96 if (unlikely(!trylock_page(page))) 97 goto out_putpage; 98 99 if (!PageMovable(page) || PageIsolated(page)) 100 goto out_no_isolated; 101 102 mapping = page_mapping(page); 103 VM_BUG_ON_PAGE(!mapping, page); 104 105 if (!mapping->a_ops->isolate_page(page, mode)) 106 goto out_no_isolated; 107 108 /* Driver shouldn't use PG_isolated bit of page->flags */ 109 WARN_ON_ONCE(PageIsolated(page)); 110 __SetPageIsolated(page); 111 unlock_page(page); 112 113 return 0; 114 115 out_no_isolated: 116 unlock_page(page); 117 out_putpage: 118 put_page(page); 119 out: 120 return -EBUSY; 121 } 122 123 static void putback_movable_page(struct page *page) 124 { 125 struct address_space *mapping; 126 127 mapping = page_mapping(page); 128 mapping->a_ops->putback_page(page); 129 __ClearPageIsolated(page); 130 } 131 132 /* 133 * Put previously isolated pages back onto the appropriate lists 134 * from where they were once taken off for compaction/migration. 135 * 136 * This function shall be used whenever the isolated pageset has been 137 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 138 * and isolate_huge_page(). 139 */ 140 void putback_movable_pages(struct list_head *l) 141 { 142 struct page *page; 143 struct page *page2; 144 145 list_for_each_entry_safe(page, page2, l, lru) { 146 if (unlikely(PageHuge(page))) { 147 putback_active_hugepage(page); 148 continue; 149 } 150 list_del(&page->lru); 151 /* 152 * We isolated non-lru movable page so here we can use 153 * __PageMovable because LRU page's mapping cannot have 154 * PAGE_MAPPING_MOVABLE. 155 */ 156 if (unlikely(__PageMovable(page))) { 157 VM_BUG_ON_PAGE(!PageIsolated(page), page); 158 lock_page(page); 159 if (PageMovable(page)) 160 putback_movable_page(page); 161 else 162 __ClearPageIsolated(page); 163 unlock_page(page); 164 put_page(page); 165 } else { 166 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 167 page_is_file_lru(page), -thp_nr_pages(page)); 168 putback_lru_page(page); 169 } 170 } 171 } 172 173 /* 174 * Restore a potential migration pte to a working pte entry 175 */ 176 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 177 unsigned long addr, void *old) 178 { 179 struct page_vma_mapped_walk pvmw = { 180 .page = old, 181 .vma = vma, 182 .address = addr, 183 .flags = PVMW_SYNC | PVMW_MIGRATION, 184 }; 185 struct page *new; 186 pte_t pte; 187 swp_entry_t entry; 188 189 VM_BUG_ON_PAGE(PageTail(page), page); 190 while (page_vma_mapped_walk(&pvmw)) { 191 if (PageKsm(page)) 192 new = page; 193 else 194 new = page - pvmw.page->index + 195 linear_page_index(vma, pvmw.address); 196 197 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 198 /* PMD-mapped THP migration entry */ 199 if (!pvmw.pte) { 200 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 201 remove_migration_pmd(&pvmw, new); 202 continue; 203 } 204 #endif 205 206 get_page(new); 207 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 208 if (pte_swp_soft_dirty(*pvmw.pte)) 209 pte = pte_mksoft_dirty(pte); 210 211 /* 212 * Recheck VMA as permissions can change since migration started 213 */ 214 entry = pte_to_swp_entry(*pvmw.pte); 215 if (is_writable_migration_entry(entry)) 216 pte = maybe_mkwrite(pte, vma); 217 else if (pte_swp_uffd_wp(*pvmw.pte)) 218 pte = pte_mkuffd_wp(pte); 219 220 if (unlikely(is_device_private_page(new))) { 221 if (pte_write(pte)) 222 entry = make_writable_device_private_entry( 223 page_to_pfn(new)); 224 else 225 entry = make_readable_device_private_entry( 226 page_to_pfn(new)); 227 pte = swp_entry_to_pte(entry); 228 if (pte_swp_soft_dirty(*pvmw.pte)) 229 pte = pte_swp_mksoft_dirty(pte); 230 if (pte_swp_uffd_wp(*pvmw.pte)) 231 pte = pte_swp_mkuffd_wp(pte); 232 } 233 234 #ifdef CONFIG_HUGETLB_PAGE 235 if (PageHuge(new)) { 236 unsigned int shift = huge_page_shift(hstate_vma(vma)); 237 238 pte = pte_mkhuge(pte); 239 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 240 if (PageAnon(new)) 241 hugepage_add_anon_rmap(new, vma, pvmw.address); 242 else 243 page_dup_rmap(new, true); 244 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 245 } else 246 #endif 247 { 248 if (PageAnon(new)) 249 page_add_anon_rmap(new, vma, pvmw.address, false); 250 else 251 page_add_file_rmap(new, false); 252 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 253 } 254 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 255 mlock_vma_page(new); 256 257 if (PageTransHuge(page) && PageMlocked(page)) 258 clear_page_mlock(page); 259 260 /* No need to invalidate - it was non-present before */ 261 update_mmu_cache(vma, pvmw.address, pvmw.pte); 262 } 263 264 return true; 265 } 266 267 /* 268 * Get rid of all migration entries and replace them by 269 * references to the indicated page. 270 */ 271 void remove_migration_ptes(struct page *old, struct page *new, bool locked) 272 { 273 struct rmap_walk_control rwc = { 274 .rmap_one = remove_migration_pte, 275 .arg = old, 276 }; 277 278 if (locked) 279 rmap_walk_locked(new, &rwc); 280 else 281 rmap_walk(new, &rwc); 282 } 283 284 /* 285 * Something used the pte of a page under migration. We need to 286 * get to the page and wait until migration is finished. 287 * When we return from this function the fault will be retried. 288 */ 289 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 290 spinlock_t *ptl) 291 { 292 pte_t pte; 293 swp_entry_t entry; 294 295 spin_lock(ptl); 296 pte = *ptep; 297 if (!is_swap_pte(pte)) 298 goto out; 299 300 entry = pte_to_swp_entry(pte); 301 if (!is_migration_entry(entry)) 302 goto out; 303 304 migration_entry_wait_on_locked(entry, ptep, ptl); 305 return; 306 out: 307 pte_unmap_unlock(ptep, ptl); 308 } 309 310 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 311 unsigned long address) 312 { 313 spinlock_t *ptl = pte_lockptr(mm, pmd); 314 pte_t *ptep = pte_offset_map(pmd, address); 315 __migration_entry_wait(mm, ptep, ptl); 316 } 317 318 void migration_entry_wait_huge(struct vm_area_struct *vma, 319 struct mm_struct *mm, pte_t *pte) 320 { 321 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 322 __migration_entry_wait(mm, pte, ptl); 323 } 324 325 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 326 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 327 { 328 spinlock_t *ptl; 329 330 ptl = pmd_lock(mm, pmd); 331 if (!is_pmd_migration_entry(*pmd)) 332 goto unlock; 333 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 334 return; 335 unlock: 336 spin_unlock(ptl); 337 } 338 #endif 339 340 static int expected_page_refs(struct address_space *mapping, struct page *page) 341 { 342 int expected_count = 1; 343 344 /* 345 * Device private pages have an extra refcount as they are 346 * ZONE_DEVICE pages. 347 */ 348 expected_count += is_device_private_page(page); 349 if (mapping) 350 expected_count += compound_nr(page) + page_has_private(page); 351 352 return expected_count; 353 } 354 355 /* 356 * Replace the page in the mapping. 357 * 358 * The number of remaining references must be: 359 * 1 for anonymous pages without a mapping 360 * 2 for pages with a mapping 361 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 362 */ 363 int folio_migrate_mapping(struct address_space *mapping, 364 struct folio *newfolio, struct folio *folio, int extra_count) 365 { 366 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 367 struct zone *oldzone, *newzone; 368 int dirty; 369 int expected_count = expected_page_refs(mapping, &folio->page) + extra_count; 370 long nr = folio_nr_pages(folio); 371 372 if (!mapping) { 373 /* Anonymous page without mapping */ 374 if (folio_ref_count(folio) != expected_count) 375 return -EAGAIN; 376 377 /* No turning back from here */ 378 newfolio->index = folio->index; 379 newfolio->mapping = folio->mapping; 380 if (folio_test_swapbacked(folio)) 381 __folio_set_swapbacked(newfolio); 382 383 return MIGRATEPAGE_SUCCESS; 384 } 385 386 oldzone = folio_zone(folio); 387 newzone = folio_zone(newfolio); 388 389 xas_lock_irq(&xas); 390 if (!folio_ref_freeze(folio, expected_count)) { 391 xas_unlock_irq(&xas); 392 return -EAGAIN; 393 } 394 395 /* 396 * Now we know that no one else is looking at the folio: 397 * no turning back from here. 398 */ 399 newfolio->index = folio->index; 400 newfolio->mapping = folio->mapping; 401 folio_ref_add(newfolio, nr); /* add cache reference */ 402 if (folio_test_swapbacked(folio)) { 403 __folio_set_swapbacked(newfolio); 404 if (folio_test_swapcache(folio)) { 405 folio_set_swapcache(newfolio); 406 newfolio->private = folio_get_private(folio); 407 } 408 } else { 409 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 410 } 411 412 /* Move dirty while page refs frozen and newpage not yet exposed */ 413 dirty = folio_test_dirty(folio); 414 if (dirty) { 415 folio_clear_dirty(folio); 416 folio_set_dirty(newfolio); 417 } 418 419 xas_store(&xas, newfolio); 420 421 /* 422 * Drop cache reference from old page by unfreezing 423 * to one less reference. 424 * We know this isn't the last reference. 425 */ 426 folio_ref_unfreeze(folio, expected_count - nr); 427 428 xas_unlock(&xas); 429 /* Leave irq disabled to prevent preemption while updating stats */ 430 431 /* 432 * If moved to a different zone then also account 433 * the page for that zone. Other VM counters will be 434 * taken care of when we establish references to the 435 * new page and drop references to the old page. 436 * 437 * Note that anonymous pages are accounted for 438 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 439 * are mapped to swap space. 440 */ 441 if (newzone != oldzone) { 442 struct lruvec *old_lruvec, *new_lruvec; 443 struct mem_cgroup *memcg; 444 445 memcg = folio_memcg(folio); 446 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 447 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 448 449 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 450 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 451 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 452 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 453 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 454 } 455 #ifdef CONFIG_SWAP 456 if (folio_test_swapcache(folio)) { 457 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 458 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 459 } 460 #endif 461 if (dirty && mapping_can_writeback(mapping)) { 462 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 463 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 464 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 465 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 466 } 467 } 468 local_irq_enable(); 469 470 return MIGRATEPAGE_SUCCESS; 471 } 472 EXPORT_SYMBOL(folio_migrate_mapping); 473 474 /* 475 * The expected number of remaining references is the same as that 476 * of folio_migrate_mapping(). 477 */ 478 int migrate_huge_page_move_mapping(struct address_space *mapping, 479 struct page *newpage, struct page *page) 480 { 481 XA_STATE(xas, &mapping->i_pages, page_index(page)); 482 int expected_count; 483 484 xas_lock_irq(&xas); 485 expected_count = 2 + page_has_private(page); 486 if (page_count(page) != expected_count || xas_load(&xas) != page) { 487 xas_unlock_irq(&xas); 488 return -EAGAIN; 489 } 490 491 if (!page_ref_freeze(page, expected_count)) { 492 xas_unlock_irq(&xas); 493 return -EAGAIN; 494 } 495 496 newpage->index = page->index; 497 newpage->mapping = page->mapping; 498 499 get_page(newpage); 500 501 xas_store(&xas, newpage); 502 503 page_ref_unfreeze(page, expected_count - 1); 504 505 xas_unlock_irq(&xas); 506 507 return MIGRATEPAGE_SUCCESS; 508 } 509 510 /* 511 * Copy the flags and some other ancillary information 512 */ 513 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 514 { 515 int cpupid; 516 517 if (folio_test_error(folio)) 518 folio_set_error(newfolio); 519 if (folio_test_referenced(folio)) 520 folio_set_referenced(newfolio); 521 if (folio_test_uptodate(folio)) 522 folio_mark_uptodate(newfolio); 523 if (folio_test_clear_active(folio)) { 524 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 525 folio_set_active(newfolio); 526 } else if (folio_test_clear_unevictable(folio)) 527 folio_set_unevictable(newfolio); 528 if (folio_test_workingset(folio)) 529 folio_set_workingset(newfolio); 530 if (folio_test_checked(folio)) 531 folio_set_checked(newfolio); 532 if (folio_test_mappedtodisk(folio)) 533 folio_set_mappedtodisk(newfolio); 534 535 /* Move dirty on pages not done by folio_migrate_mapping() */ 536 if (folio_test_dirty(folio)) 537 folio_set_dirty(newfolio); 538 539 if (folio_test_young(folio)) 540 folio_set_young(newfolio); 541 if (folio_test_idle(folio)) 542 folio_set_idle(newfolio); 543 544 /* 545 * Copy NUMA information to the new page, to prevent over-eager 546 * future migrations of this same page. 547 */ 548 cpupid = page_cpupid_xchg_last(&folio->page, -1); 549 page_cpupid_xchg_last(&newfolio->page, cpupid); 550 551 folio_migrate_ksm(newfolio, folio); 552 /* 553 * Please do not reorder this without considering how mm/ksm.c's 554 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 555 */ 556 if (folio_test_swapcache(folio)) 557 folio_clear_swapcache(folio); 558 folio_clear_private(folio); 559 560 /* page->private contains hugetlb specific flags */ 561 if (!folio_test_hugetlb(folio)) 562 folio->private = NULL; 563 564 /* 565 * If any waiters have accumulated on the new page then 566 * wake them up. 567 */ 568 if (folio_test_writeback(newfolio)) 569 folio_end_writeback(newfolio); 570 571 /* 572 * PG_readahead shares the same bit with PG_reclaim. The above 573 * end_page_writeback() may clear PG_readahead mistakenly, so set the 574 * bit after that. 575 */ 576 if (folio_test_readahead(folio)) 577 folio_set_readahead(newfolio); 578 579 folio_copy_owner(newfolio, folio); 580 581 if (!folio_test_hugetlb(folio)) 582 mem_cgroup_migrate(folio, newfolio); 583 } 584 EXPORT_SYMBOL(folio_migrate_flags); 585 586 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 587 { 588 folio_copy(newfolio, folio); 589 folio_migrate_flags(newfolio, folio); 590 } 591 EXPORT_SYMBOL(folio_migrate_copy); 592 593 /************************************************************ 594 * Migration functions 595 ***********************************************************/ 596 597 /* 598 * Common logic to directly migrate a single LRU page suitable for 599 * pages that do not use PagePrivate/PagePrivate2. 600 * 601 * Pages are locked upon entry and exit. 602 */ 603 int migrate_page(struct address_space *mapping, 604 struct page *newpage, struct page *page, 605 enum migrate_mode mode) 606 { 607 struct folio *newfolio = page_folio(newpage); 608 struct folio *folio = page_folio(page); 609 int rc; 610 611 BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */ 612 613 rc = folio_migrate_mapping(mapping, newfolio, folio, 0); 614 615 if (rc != MIGRATEPAGE_SUCCESS) 616 return rc; 617 618 if (mode != MIGRATE_SYNC_NO_COPY) 619 folio_migrate_copy(newfolio, folio); 620 else 621 folio_migrate_flags(newfolio, folio); 622 return MIGRATEPAGE_SUCCESS; 623 } 624 EXPORT_SYMBOL(migrate_page); 625 626 #ifdef CONFIG_BLOCK 627 /* Returns true if all buffers are successfully locked */ 628 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 629 enum migrate_mode mode) 630 { 631 struct buffer_head *bh = head; 632 633 /* Simple case, sync compaction */ 634 if (mode != MIGRATE_ASYNC) { 635 do { 636 lock_buffer(bh); 637 bh = bh->b_this_page; 638 639 } while (bh != head); 640 641 return true; 642 } 643 644 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 645 do { 646 if (!trylock_buffer(bh)) { 647 /* 648 * We failed to lock the buffer and cannot stall in 649 * async migration. Release the taken locks 650 */ 651 struct buffer_head *failed_bh = bh; 652 bh = head; 653 while (bh != failed_bh) { 654 unlock_buffer(bh); 655 bh = bh->b_this_page; 656 } 657 return false; 658 } 659 660 bh = bh->b_this_page; 661 } while (bh != head); 662 return true; 663 } 664 665 static int __buffer_migrate_page(struct address_space *mapping, 666 struct page *newpage, struct page *page, enum migrate_mode mode, 667 bool check_refs) 668 { 669 struct buffer_head *bh, *head; 670 int rc; 671 int expected_count; 672 673 if (!page_has_buffers(page)) 674 return migrate_page(mapping, newpage, page, mode); 675 676 /* Check whether page does not have extra refs before we do more work */ 677 expected_count = expected_page_refs(mapping, page); 678 if (page_count(page) != expected_count) 679 return -EAGAIN; 680 681 head = page_buffers(page); 682 if (!buffer_migrate_lock_buffers(head, mode)) 683 return -EAGAIN; 684 685 if (check_refs) { 686 bool busy; 687 bool invalidated = false; 688 689 recheck_buffers: 690 busy = false; 691 spin_lock(&mapping->private_lock); 692 bh = head; 693 do { 694 if (atomic_read(&bh->b_count)) { 695 busy = true; 696 break; 697 } 698 bh = bh->b_this_page; 699 } while (bh != head); 700 if (busy) { 701 if (invalidated) { 702 rc = -EAGAIN; 703 goto unlock_buffers; 704 } 705 spin_unlock(&mapping->private_lock); 706 invalidate_bh_lrus(); 707 invalidated = true; 708 goto recheck_buffers; 709 } 710 } 711 712 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 713 if (rc != MIGRATEPAGE_SUCCESS) 714 goto unlock_buffers; 715 716 attach_page_private(newpage, detach_page_private(page)); 717 718 bh = head; 719 do { 720 set_bh_page(bh, newpage, bh_offset(bh)); 721 bh = bh->b_this_page; 722 723 } while (bh != head); 724 725 if (mode != MIGRATE_SYNC_NO_COPY) 726 migrate_page_copy(newpage, page); 727 else 728 migrate_page_states(newpage, page); 729 730 rc = MIGRATEPAGE_SUCCESS; 731 unlock_buffers: 732 if (check_refs) 733 spin_unlock(&mapping->private_lock); 734 bh = head; 735 do { 736 unlock_buffer(bh); 737 bh = bh->b_this_page; 738 739 } while (bh != head); 740 741 return rc; 742 } 743 744 /* 745 * Migration function for pages with buffers. This function can only be used 746 * if the underlying filesystem guarantees that no other references to "page" 747 * exist. For example attached buffer heads are accessed only under page lock. 748 */ 749 int buffer_migrate_page(struct address_space *mapping, 750 struct page *newpage, struct page *page, enum migrate_mode mode) 751 { 752 return __buffer_migrate_page(mapping, newpage, page, mode, false); 753 } 754 EXPORT_SYMBOL(buffer_migrate_page); 755 756 /* 757 * Same as above except that this variant is more careful and checks that there 758 * are also no buffer head references. This function is the right one for 759 * mappings where buffer heads are directly looked up and referenced (such as 760 * block device mappings). 761 */ 762 int buffer_migrate_page_norefs(struct address_space *mapping, 763 struct page *newpage, struct page *page, enum migrate_mode mode) 764 { 765 return __buffer_migrate_page(mapping, newpage, page, mode, true); 766 } 767 #endif 768 769 /* 770 * Writeback a page to clean the dirty state 771 */ 772 static int writeout(struct address_space *mapping, struct page *page) 773 { 774 struct writeback_control wbc = { 775 .sync_mode = WB_SYNC_NONE, 776 .nr_to_write = 1, 777 .range_start = 0, 778 .range_end = LLONG_MAX, 779 .for_reclaim = 1 780 }; 781 int rc; 782 783 if (!mapping->a_ops->writepage) 784 /* No write method for the address space */ 785 return -EINVAL; 786 787 if (!clear_page_dirty_for_io(page)) 788 /* Someone else already triggered a write */ 789 return -EAGAIN; 790 791 /* 792 * A dirty page may imply that the underlying filesystem has 793 * the page on some queue. So the page must be clean for 794 * migration. Writeout may mean we loose the lock and the 795 * page state is no longer what we checked for earlier. 796 * At this point we know that the migration attempt cannot 797 * be successful. 798 */ 799 remove_migration_ptes(page, page, false); 800 801 rc = mapping->a_ops->writepage(page, &wbc); 802 803 if (rc != AOP_WRITEPAGE_ACTIVATE) 804 /* unlocked. Relock */ 805 lock_page(page); 806 807 return (rc < 0) ? -EIO : -EAGAIN; 808 } 809 810 /* 811 * Default handling if a filesystem does not provide a migration function. 812 */ 813 static int fallback_migrate_page(struct address_space *mapping, 814 struct page *newpage, struct page *page, enum migrate_mode mode) 815 { 816 if (PageDirty(page)) { 817 /* Only writeback pages in full synchronous migration */ 818 switch (mode) { 819 case MIGRATE_SYNC: 820 case MIGRATE_SYNC_NO_COPY: 821 break; 822 default: 823 return -EBUSY; 824 } 825 return writeout(mapping, page); 826 } 827 828 /* 829 * Buffers may be managed in a filesystem specific way. 830 * We must have no buffers or drop them. 831 */ 832 if (page_has_private(page) && 833 !try_to_release_page(page, GFP_KERNEL)) 834 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 835 836 return migrate_page(mapping, newpage, page, mode); 837 } 838 839 /* 840 * Move a page to a newly allocated page 841 * The page is locked and all ptes have been successfully removed. 842 * 843 * The new page will have replaced the old page if this function 844 * is successful. 845 * 846 * Return value: 847 * < 0 - error code 848 * MIGRATEPAGE_SUCCESS - success 849 */ 850 static int move_to_new_page(struct page *newpage, struct page *page, 851 enum migrate_mode mode) 852 { 853 struct address_space *mapping; 854 int rc = -EAGAIN; 855 bool is_lru = !__PageMovable(page); 856 857 VM_BUG_ON_PAGE(!PageLocked(page), page); 858 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 859 860 mapping = page_mapping(page); 861 862 if (likely(is_lru)) { 863 if (!mapping) 864 rc = migrate_page(mapping, newpage, page, mode); 865 else if (mapping->a_ops->migratepage) 866 /* 867 * Most pages have a mapping and most filesystems 868 * provide a migratepage callback. Anonymous pages 869 * are part of swap space which also has its own 870 * migratepage callback. This is the most common path 871 * for page migration. 872 */ 873 rc = mapping->a_ops->migratepage(mapping, newpage, 874 page, mode); 875 else 876 rc = fallback_migrate_page(mapping, newpage, 877 page, mode); 878 } else { 879 /* 880 * In case of non-lru page, it could be released after 881 * isolation step. In that case, we shouldn't try migration. 882 */ 883 VM_BUG_ON_PAGE(!PageIsolated(page), page); 884 if (!PageMovable(page)) { 885 rc = MIGRATEPAGE_SUCCESS; 886 __ClearPageIsolated(page); 887 goto out; 888 } 889 890 rc = mapping->a_ops->migratepage(mapping, newpage, 891 page, mode); 892 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 893 !PageIsolated(page)); 894 } 895 896 /* 897 * When successful, old pagecache page->mapping must be cleared before 898 * page is freed; but stats require that PageAnon be left as PageAnon. 899 */ 900 if (rc == MIGRATEPAGE_SUCCESS) { 901 if (__PageMovable(page)) { 902 VM_BUG_ON_PAGE(!PageIsolated(page), page); 903 904 /* 905 * We clear PG_movable under page_lock so any compactor 906 * cannot try to migrate this page. 907 */ 908 __ClearPageIsolated(page); 909 } 910 911 /* 912 * Anonymous and movable page->mapping will be cleared by 913 * free_pages_prepare so don't reset it here for keeping 914 * the type to work PageAnon, for example. 915 */ 916 if (!PageMappingFlags(page)) 917 page->mapping = NULL; 918 919 if (likely(!is_zone_device_page(newpage))) 920 flush_dcache_folio(page_folio(newpage)); 921 } 922 out: 923 return rc; 924 } 925 926 static int __unmap_and_move(struct page *page, struct page *newpage, 927 int force, enum migrate_mode mode) 928 { 929 int rc = -EAGAIN; 930 bool page_was_mapped = false; 931 struct anon_vma *anon_vma = NULL; 932 bool is_lru = !__PageMovable(page); 933 934 if (!trylock_page(page)) { 935 if (!force || mode == MIGRATE_ASYNC) 936 goto out; 937 938 /* 939 * It's not safe for direct compaction to call lock_page. 940 * For example, during page readahead pages are added locked 941 * to the LRU. Later, when the IO completes the pages are 942 * marked uptodate and unlocked. However, the queueing 943 * could be merging multiple pages for one bio (e.g. 944 * mpage_readahead). If an allocation happens for the 945 * second or third page, the process can end up locking 946 * the same page twice and deadlocking. Rather than 947 * trying to be clever about what pages can be locked, 948 * avoid the use of lock_page for direct compaction 949 * altogether. 950 */ 951 if (current->flags & PF_MEMALLOC) 952 goto out; 953 954 lock_page(page); 955 } 956 957 if (PageWriteback(page)) { 958 /* 959 * Only in the case of a full synchronous migration is it 960 * necessary to wait for PageWriteback. In the async case, 961 * the retry loop is too short and in the sync-light case, 962 * the overhead of stalling is too much 963 */ 964 switch (mode) { 965 case MIGRATE_SYNC: 966 case MIGRATE_SYNC_NO_COPY: 967 break; 968 default: 969 rc = -EBUSY; 970 goto out_unlock; 971 } 972 if (!force) 973 goto out_unlock; 974 wait_on_page_writeback(page); 975 } 976 977 /* 978 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 979 * we cannot notice that anon_vma is freed while we migrates a page. 980 * This get_anon_vma() delays freeing anon_vma pointer until the end 981 * of migration. File cache pages are no problem because of page_lock() 982 * File Caches may use write_page() or lock_page() in migration, then, 983 * just care Anon page here. 984 * 985 * Only page_get_anon_vma() understands the subtleties of 986 * getting a hold on an anon_vma from outside one of its mms. 987 * But if we cannot get anon_vma, then we won't need it anyway, 988 * because that implies that the anon page is no longer mapped 989 * (and cannot be remapped so long as we hold the page lock). 990 */ 991 if (PageAnon(page) && !PageKsm(page)) 992 anon_vma = page_get_anon_vma(page); 993 994 /* 995 * Block others from accessing the new page when we get around to 996 * establishing additional references. We are usually the only one 997 * holding a reference to newpage at this point. We used to have a BUG 998 * here if trylock_page(newpage) fails, but would like to allow for 999 * cases where there might be a race with the previous use of newpage. 1000 * This is much like races on refcount of oldpage: just don't BUG(). 1001 */ 1002 if (unlikely(!trylock_page(newpage))) 1003 goto out_unlock; 1004 1005 if (unlikely(!is_lru)) { 1006 rc = move_to_new_page(newpage, page, mode); 1007 goto out_unlock_both; 1008 } 1009 1010 /* 1011 * Corner case handling: 1012 * 1. When a new swap-cache page is read into, it is added to the LRU 1013 * and treated as swapcache but it has no rmap yet. 1014 * Calling try_to_unmap() against a page->mapping==NULL page will 1015 * trigger a BUG. So handle it here. 1016 * 2. An orphaned page (see truncate_cleanup_page) might have 1017 * fs-private metadata. The page can be picked up due to memory 1018 * offlining. Everywhere else except page reclaim, the page is 1019 * invisible to the vm, so the page can not be migrated. So try to 1020 * free the metadata, so the page can be freed. 1021 */ 1022 if (!page->mapping) { 1023 VM_BUG_ON_PAGE(PageAnon(page), page); 1024 if (page_has_private(page)) { 1025 try_to_free_buffers(page); 1026 goto out_unlock_both; 1027 } 1028 } else if (page_mapped(page)) { 1029 /* Establish migration ptes */ 1030 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1031 page); 1032 try_to_migrate(page, 0); 1033 page_was_mapped = true; 1034 } 1035 1036 if (!page_mapped(page)) 1037 rc = move_to_new_page(newpage, page, mode); 1038 1039 if (page_was_mapped) 1040 remove_migration_ptes(page, 1041 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 1042 1043 out_unlock_both: 1044 unlock_page(newpage); 1045 out_unlock: 1046 /* Drop an anon_vma reference if we took one */ 1047 if (anon_vma) 1048 put_anon_vma(anon_vma); 1049 unlock_page(page); 1050 out: 1051 /* 1052 * If migration is successful, decrease refcount of the newpage 1053 * which will not free the page because new page owner increased 1054 * refcounter. As well, if it is LRU page, add the page to LRU 1055 * list in here. Use the old state of the isolated source page to 1056 * determine if we migrated a LRU page. newpage was already unlocked 1057 * and possibly modified by its owner - don't rely on the page 1058 * state. 1059 */ 1060 if (rc == MIGRATEPAGE_SUCCESS) { 1061 if (unlikely(!is_lru)) 1062 put_page(newpage); 1063 else 1064 putback_lru_page(newpage); 1065 } 1066 1067 return rc; 1068 } 1069 1070 /* 1071 * Obtain the lock on page, remove all ptes and migrate the page 1072 * to the newly allocated page in newpage. 1073 */ 1074 static int unmap_and_move(new_page_t get_new_page, 1075 free_page_t put_new_page, 1076 unsigned long private, struct page *page, 1077 int force, enum migrate_mode mode, 1078 enum migrate_reason reason, 1079 struct list_head *ret) 1080 { 1081 int rc = MIGRATEPAGE_SUCCESS; 1082 struct page *newpage = NULL; 1083 1084 if (!thp_migration_supported() && PageTransHuge(page)) 1085 return -ENOSYS; 1086 1087 if (page_count(page) == 1) { 1088 /* page was freed from under us. So we are done. */ 1089 ClearPageActive(page); 1090 ClearPageUnevictable(page); 1091 if (unlikely(__PageMovable(page))) { 1092 lock_page(page); 1093 if (!PageMovable(page)) 1094 __ClearPageIsolated(page); 1095 unlock_page(page); 1096 } 1097 goto out; 1098 } 1099 1100 newpage = get_new_page(page, private); 1101 if (!newpage) 1102 return -ENOMEM; 1103 1104 rc = __unmap_and_move(page, newpage, force, mode); 1105 if (rc == MIGRATEPAGE_SUCCESS) 1106 set_page_owner_migrate_reason(newpage, reason); 1107 1108 out: 1109 if (rc != -EAGAIN) { 1110 /* 1111 * A page that has been migrated has all references 1112 * removed and will be freed. A page that has not been 1113 * migrated will have kept its references and be restored. 1114 */ 1115 list_del(&page->lru); 1116 } 1117 1118 /* 1119 * If migration is successful, releases reference grabbed during 1120 * isolation. Otherwise, restore the page to right list unless 1121 * we want to retry. 1122 */ 1123 if (rc == MIGRATEPAGE_SUCCESS) { 1124 /* 1125 * Compaction can migrate also non-LRU pages which are 1126 * not accounted to NR_ISOLATED_*. They can be recognized 1127 * as __PageMovable 1128 */ 1129 if (likely(!__PageMovable(page))) 1130 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1131 page_is_file_lru(page), -thp_nr_pages(page)); 1132 1133 if (reason != MR_MEMORY_FAILURE) 1134 /* 1135 * We release the page in page_handle_poison. 1136 */ 1137 put_page(page); 1138 } else { 1139 if (rc != -EAGAIN) 1140 list_add_tail(&page->lru, ret); 1141 1142 if (put_new_page) 1143 put_new_page(newpage, private); 1144 else 1145 put_page(newpage); 1146 } 1147 1148 return rc; 1149 } 1150 1151 /* 1152 * Counterpart of unmap_and_move_page() for hugepage migration. 1153 * 1154 * This function doesn't wait the completion of hugepage I/O 1155 * because there is no race between I/O and migration for hugepage. 1156 * Note that currently hugepage I/O occurs only in direct I/O 1157 * where no lock is held and PG_writeback is irrelevant, 1158 * and writeback status of all subpages are counted in the reference 1159 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1160 * under direct I/O, the reference of the head page is 512 and a bit more.) 1161 * This means that when we try to migrate hugepage whose subpages are 1162 * doing direct I/O, some references remain after try_to_unmap() and 1163 * hugepage migration fails without data corruption. 1164 * 1165 * There is also no race when direct I/O is issued on the page under migration, 1166 * because then pte is replaced with migration swap entry and direct I/O code 1167 * will wait in the page fault for migration to complete. 1168 */ 1169 static int unmap_and_move_huge_page(new_page_t get_new_page, 1170 free_page_t put_new_page, unsigned long private, 1171 struct page *hpage, int force, 1172 enum migrate_mode mode, int reason, 1173 struct list_head *ret) 1174 { 1175 int rc = -EAGAIN; 1176 int page_was_mapped = 0; 1177 struct page *new_hpage; 1178 struct anon_vma *anon_vma = NULL; 1179 struct address_space *mapping = NULL; 1180 1181 /* 1182 * Migratability of hugepages depends on architectures and their size. 1183 * This check is necessary because some callers of hugepage migration 1184 * like soft offline and memory hotremove don't walk through page 1185 * tables or check whether the hugepage is pmd-based or not before 1186 * kicking migration. 1187 */ 1188 if (!hugepage_migration_supported(page_hstate(hpage))) { 1189 list_move_tail(&hpage->lru, ret); 1190 return -ENOSYS; 1191 } 1192 1193 if (page_count(hpage) == 1) { 1194 /* page was freed from under us. So we are done. */ 1195 putback_active_hugepage(hpage); 1196 return MIGRATEPAGE_SUCCESS; 1197 } 1198 1199 new_hpage = get_new_page(hpage, private); 1200 if (!new_hpage) 1201 return -ENOMEM; 1202 1203 if (!trylock_page(hpage)) { 1204 if (!force) 1205 goto out; 1206 switch (mode) { 1207 case MIGRATE_SYNC: 1208 case MIGRATE_SYNC_NO_COPY: 1209 break; 1210 default: 1211 goto out; 1212 } 1213 lock_page(hpage); 1214 } 1215 1216 /* 1217 * Check for pages which are in the process of being freed. Without 1218 * page_mapping() set, hugetlbfs specific move page routine will not 1219 * be called and we could leak usage counts for subpools. 1220 */ 1221 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1222 rc = -EBUSY; 1223 goto out_unlock; 1224 } 1225 1226 if (PageAnon(hpage)) 1227 anon_vma = page_get_anon_vma(hpage); 1228 1229 if (unlikely(!trylock_page(new_hpage))) 1230 goto put_anon; 1231 1232 if (page_mapped(hpage)) { 1233 bool mapping_locked = false; 1234 enum ttu_flags ttu = 0; 1235 1236 if (!PageAnon(hpage)) { 1237 /* 1238 * In shared mappings, try_to_unmap could potentially 1239 * call huge_pmd_unshare. Because of this, take 1240 * semaphore in write mode here and set TTU_RMAP_LOCKED 1241 * to let lower levels know we have taken the lock. 1242 */ 1243 mapping = hugetlb_page_mapping_lock_write(hpage); 1244 if (unlikely(!mapping)) 1245 goto unlock_put_anon; 1246 1247 mapping_locked = true; 1248 ttu |= TTU_RMAP_LOCKED; 1249 } 1250 1251 try_to_migrate(hpage, ttu); 1252 page_was_mapped = 1; 1253 1254 if (mapping_locked) 1255 i_mmap_unlock_write(mapping); 1256 } 1257 1258 if (!page_mapped(hpage)) 1259 rc = move_to_new_page(new_hpage, hpage, mode); 1260 1261 if (page_was_mapped) 1262 remove_migration_ptes(hpage, 1263 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); 1264 1265 unlock_put_anon: 1266 unlock_page(new_hpage); 1267 1268 put_anon: 1269 if (anon_vma) 1270 put_anon_vma(anon_vma); 1271 1272 if (rc == MIGRATEPAGE_SUCCESS) { 1273 move_hugetlb_state(hpage, new_hpage, reason); 1274 put_new_page = NULL; 1275 } 1276 1277 out_unlock: 1278 unlock_page(hpage); 1279 out: 1280 if (rc == MIGRATEPAGE_SUCCESS) 1281 putback_active_hugepage(hpage); 1282 else if (rc != -EAGAIN) 1283 list_move_tail(&hpage->lru, ret); 1284 1285 /* 1286 * If migration was not successful and there's a freeing callback, use 1287 * it. Otherwise, put_page() will drop the reference grabbed during 1288 * isolation. 1289 */ 1290 if (put_new_page) 1291 put_new_page(new_hpage, private); 1292 else 1293 putback_active_hugepage(new_hpage); 1294 1295 return rc; 1296 } 1297 1298 static inline int try_split_thp(struct page *page, struct page **page2, 1299 struct list_head *from) 1300 { 1301 int rc = 0; 1302 1303 lock_page(page); 1304 rc = split_huge_page_to_list(page, from); 1305 unlock_page(page); 1306 if (!rc) 1307 list_safe_reset_next(page, *page2, lru); 1308 1309 return rc; 1310 } 1311 1312 /* 1313 * migrate_pages - migrate the pages specified in a list, to the free pages 1314 * supplied as the target for the page migration 1315 * 1316 * @from: The list of pages to be migrated. 1317 * @get_new_page: The function used to allocate free pages to be used 1318 * as the target of the page migration. 1319 * @put_new_page: The function used to free target pages if migration 1320 * fails, or NULL if no special handling is necessary. 1321 * @private: Private data to be passed on to get_new_page() 1322 * @mode: The migration mode that specifies the constraints for 1323 * page migration, if any. 1324 * @reason: The reason for page migration. 1325 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1326 * the caller passes a non-NULL pointer. 1327 * 1328 * The function returns after 10 attempts or if no pages are movable any more 1329 * because the list has become empty or no retryable pages exist any more. 1330 * It is caller's responsibility to call putback_movable_pages() to return pages 1331 * to the LRU or free list only if ret != 0. 1332 * 1333 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1334 * an error code. The number of THP splits will be considered as the number of 1335 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1336 */ 1337 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1338 free_page_t put_new_page, unsigned long private, 1339 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1340 { 1341 int retry = 1; 1342 int thp_retry = 1; 1343 int nr_failed = 0; 1344 int nr_failed_pages = 0; 1345 int nr_succeeded = 0; 1346 int nr_thp_succeeded = 0; 1347 int nr_thp_failed = 0; 1348 int nr_thp_split = 0; 1349 int pass = 0; 1350 bool is_thp = false; 1351 struct page *page; 1352 struct page *page2; 1353 int swapwrite = current->flags & PF_SWAPWRITE; 1354 int rc, nr_subpages; 1355 LIST_HEAD(ret_pages); 1356 LIST_HEAD(thp_split_pages); 1357 bool nosplit = (reason == MR_NUMA_MISPLACED); 1358 bool no_subpage_counting = false; 1359 1360 trace_mm_migrate_pages_start(mode, reason); 1361 1362 if (!swapwrite) 1363 current->flags |= PF_SWAPWRITE; 1364 1365 thp_subpage_migration: 1366 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1367 retry = 0; 1368 thp_retry = 0; 1369 1370 list_for_each_entry_safe(page, page2, from, lru) { 1371 retry: 1372 /* 1373 * THP statistics is based on the source huge page. 1374 * Capture required information that might get lost 1375 * during migration. 1376 */ 1377 is_thp = PageTransHuge(page) && !PageHuge(page); 1378 nr_subpages = compound_nr(page); 1379 cond_resched(); 1380 1381 if (PageHuge(page)) 1382 rc = unmap_and_move_huge_page(get_new_page, 1383 put_new_page, private, page, 1384 pass > 2, mode, reason, 1385 &ret_pages); 1386 else 1387 rc = unmap_and_move(get_new_page, put_new_page, 1388 private, page, pass > 2, mode, 1389 reason, &ret_pages); 1390 /* 1391 * The rules are: 1392 * Success: non hugetlb page will be freed, hugetlb 1393 * page will be put back 1394 * -EAGAIN: stay on the from list 1395 * -ENOMEM: stay on the from list 1396 * Other errno: put on ret_pages list then splice to 1397 * from list 1398 */ 1399 switch(rc) { 1400 /* 1401 * THP migration might be unsupported or the 1402 * allocation could've failed so we should 1403 * retry on the same page with the THP split 1404 * to base pages. 1405 * 1406 * Head page is retried immediately and tail 1407 * pages are added to the tail of the list so 1408 * we encounter them after the rest of the list 1409 * is processed. 1410 */ 1411 case -ENOSYS: 1412 /* THP migration is unsupported */ 1413 if (is_thp) { 1414 nr_thp_failed++; 1415 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1416 nr_thp_split++; 1417 goto retry; 1418 } 1419 1420 nr_failed_pages += nr_subpages; 1421 break; 1422 } 1423 1424 /* Hugetlb migration is unsupported */ 1425 if (!no_subpage_counting) 1426 nr_failed++; 1427 nr_failed_pages += nr_subpages; 1428 break; 1429 case -ENOMEM: 1430 /* 1431 * When memory is low, don't bother to try to migrate 1432 * other pages, just exit. 1433 * THP NUMA faulting doesn't split THP to retry. 1434 */ 1435 if (is_thp && !nosplit) { 1436 nr_thp_failed++; 1437 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1438 nr_thp_split++; 1439 goto retry; 1440 } 1441 1442 nr_failed_pages += nr_subpages; 1443 goto out; 1444 } 1445 1446 if (!no_subpage_counting) 1447 nr_failed++; 1448 nr_failed_pages += nr_subpages; 1449 goto out; 1450 case -EAGAIN: 1451 if (is_thp) { 1452 thp_retry++; 1453 break; 1454 } 1455 retry++; 1456 break; 1457 case MIGRATEPAGE_SUCCESS: 1458 nr_succeeded += nr_subpages; 1459 if (is_thp) { 1460 nr_thp_succeeded++; 1461 break; 1462 } 1463 break; 1464 default: 1465 /* 1466 * Permanent failure (-EBUSY, etc.): 1467 * unlike -EAGAIN case, the failed page is 1468 * removed from migration page list and not 1469 * retried in the next outer loop. 1470 */ 1471 if (is_thp) { 1472 nr_thp_failed++; 1473 nr_failed_pages += nr_subpages; 1474 break; 1475 } 1476 1477 if (!no_subpage_counting) 1478 nr_failed++; 1479 nr_failed_pages += nr_subpages; 1480 break; 1481 } 1482 } 1483 } 1484 nr_failed += retry; 1485 nr_thp_failed += thp_retry; 1486 /* 1487 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1488 * counting in this round, since all subpages of a THP is counted 1489 * as 1 failure in the first round. 1490 */ 1491 if (!list_empty(&thp_split_pages)) { 1492 /* 1493 * Move non-migrated pages (after 10 retries) to ret_pages 1494 * to avoid migrating them again. 1495 */ 1496 list_splice_init(from, &ret_pages); 1497 list_splice_init(&thp_split_pages, from); 1498 no_subpage_counting = true; 1499 retry = 1; 1500 goto thp_subpage_migration; 1501 } 1502 1503 rc = nr_failed + nr_thp_failed; 1504 out: 1505 /* 1506 * Put the permanent failure page back to migration list, they 1507 * will be put back to the right list by the caller. 1508 */ 1509 list_splice(&ret_pages, from); 1510 1511 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1512 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1513 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1514 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1515 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1516 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1517 nr_thp_failed, nr_thp_split, mode, reason); 1518 1519 if (!swapwrite) 1520 current->flags &= ~PF_SWAPWRITE; 1521 1522 if (ret_succeeded) 1523 *ret_succeeded = nr_succeeded; 1524 1525 return rc; 1526 } 1527 1528 struct page *alloc_migration_target(struct page *page, unsigned long private) 1529 { 1530 struct migration_target_control *mtc; 1531 gfp_t gfp_mask; 1532 unsigned int order = 0; 1533 struct page *new_page = NULL; 1534 int nid; 1535 int zidx; 1536 1537 mtc = (struct migration_target_control *)private; 1538 gfp_mask = mtc->gfp_mask; 1539 nid = mtc->nid; 1540 if (nid == NUMA_NO_NODE) 1541 nid = page_to_nid(page); 1542 1543 if (PageHuge(page)) { 1544 struct hstate *h = page_hstate(compound_head(page)); 1545 1546 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1547 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1548 } 1549 1550 if (PageTransHuge(page)) { 1551 /* 1552 * clear __GFP_RECLAIM to make the migration callback 1553 * consistent with regular THP allocations. 1554 */ 1555 gfp_mask &= ~__GFP_RECLAIM; 1556 gfp_mask |= GFP_TRANSHUGE; 1557 order = HPAGE_PMD_ORDER; 1558 } 1559 zidx = zone_idx(page_zone(page)); 1560 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1561 gfp_mask |= __GFP_HIGHMEM; 1562 1563 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); 1564 1565 if (new_page && PageTransHuge(new_page)) 1566 prep_transhuge_page(new_page); 1567 1568 return new_page; 1569 } 1570 1571 #ifdef CONFIG_NUMA 1572 1573 static int store_status(int __user *status, int start, int value, int nr) 1574 { 1575 while (nr-- > 0) { 1576 if (put_user(value, status + start)) 1577 return -EFAULT; 1578 start++; 1579 } 1580 1581 return 0; 1582 } 1583 1584 static int do_move_pages_to_node(struct mm_struct *mm, 1585 struct list_head *pagelist, int node) 1586 { 1587 int err; 1588 struct migration_target_control mtc = { 1589 .nid = node, 1590 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1591 }; 1592 1593 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1594 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1595 if (err) 1596 putback_movable_pages(pagelist); 1597 return err; 1598 } 1599 1600 /* 1601 * Resolves the given address to a struct page, isolates it from the LRU and 1602 * puts it to the given pagelist. 1603 * Returns: 1604 * errno - if the page cannot be found/isolated 1605 * 0 - when it doesn't have to be migrated because it is already on the 1606 * target node 1607 * 1 - when it has been queued 1608 */ 1609 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1610 int node, struct list_head *pagelist, bool migrate_all) 1611 { 1612 struct vm_area_struct *vma; 1613 struct page *page; 1614 int err; 1615 1616 mmap_read_lock(mm); 1617 err = -EFAULT; 1618 vma = find_vma(mm, addr); 1619 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1620 goto out; 1621 1622 /* FOLL_DUMP to ignore special (like zero) pages */ 1623 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1624 1625 err = PTR_ERR(page); 1626 if (IS_ERR(page)) 1627 goto out; 1628 1629 err = -ENOENT; 1630 if (!page) 1631 goto out; 1632 1633 err = 0; 1634 if (page_to_nid(page) == node) 1635 goto out_putpage; 1636 1637 err = -EACCES; 1638 if (page_mapcount(page) > 1 && !migrate_all) 1639 goto out_putpage; 1640 1641 if (PageHuge(page)) { 1642 if (PageHead(page)) { 1643 isolate_huge_page(page, pagelist); 1644 err = 1; 1645 } 1646 } else { 1647 struct page *head; 1648 1649 head = compound_head(page); 1650 err = isolate_lru_page(head); 1651 if (err) 1652 goto out_putpage; 1653 1654 err = 1; 1655 list_add_tail(&head->lru, pagelist); 1656 mod_node_page_state(page_pgdat(head), 1657 NR_ISOLATED_ANON + page_is_file_lru(head), 1658 thp_nr_pages(head)); 1659 } 1660 out_putpage: 1661 /* 1662 * Either remove the duplicate refcount from 1663 * isolate_lru_page() or drop the page ref if it was 1664 * not isolated. 1665 */ 1666 put_page(page); 1667 out: 1668 mmap_read_unlock(mm); 1669 return err; 1670 } 1671 1672 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1673 struct list_head *pagelist, int __user *status, 1674 int start, int i, unsigned long nr_pages) 1675 { 1676 int err; 1677 1678 if (list_empty(pagelist)) 1679 return 0; 1680 1681 err = do_move_pages_to_node(mm, pagelist, node); 1682 if (err) { 1683 /* 1684 * Positive err means the number of failed 1685 * pages to migrate. Since we are going to 1686 * abort and return the number of non-migrated 1687 * pages, so need to include the rest of the 1688 * nr_pages that have not been attempted as 1689 * well. 1690 */ 1691 if (err > 0) 1692 err += nr_pages - i - 1; 1693 return err; 1694 } 1695 return store_status(status, start, node, i - start); 1696 } 1697 1698 /* 1699 * Migrate an array of page address onto an array of nodes and fill 1700 * the corresponding array of status. 1701 */ 1702 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1703 unsigned long nr_pages, 1704 const void __user * __user *pages, 1705 const int __user *nodes, 1706 int __user *status, int flags) 1707 { 1708 int current_node = NUMA_NO_NODE; 1709 LIST_HEAD(pagelist); 1710 int start, i; 1711 int err = 0, err1; 1712 1713 lru_cache_disable(); 1714 1715 for (i = start = 0; i < nr_pages; i++) { 1716 const void __user *p; 1717 unsigned long addr; 1718 int node; 1719 1720 err = -EFAULT; 1721 if (get_user(p, pages + i)) 1722 goto out_flush; 1723 if (get_user(node, nodes + i)) 1724 goto out_flush; 1725 addr = (unsigned long)untagged_addr(p); 1726 1727 err = -ENODEV; 1728 if (node < 0 || node >= MAX_NUMNODES) 1729 goto out_flush; 1730 if (!node_state(node, N_MEMORY)) 1731 goto out_flush; 1732 1733 err = -EACCES; 1734 if (!node_isset(node, task_nodes)) 1735 goto out_flush; 1736 1737 if (current_node == NUMA_NO_NODE) { 1738 current_node = node; 1739 start = i; 1740 } else if (node != current_node) { 1741 err = move_pages_and_store_status(mm, current_node, 1742 &pagelist, status, start, i, nr_pages); 1743 if (err) 1744 goto out; 1745 start = i; 1746 current_node = node; 1747 } 1748 1749 /* 1750 * Errors in the page lookup or isolation are not fatal and we simply 1751 * report them via status 1752 */ 1753 err = add_page_for_migration(mm, addr, current_node, 1754 &pagelist, flags & MPOL_MF_MOVE_ALL); 1755 1756 if (err > 0) { 1757 /* The page is successfully queued for migration */ 1758 continue; 1759 } 1760 1761 /* 1762 * The move_pages() man page does not have an -EEXIST choice, so 1763 * use -EFAULT instead. 1764 */ 1765 if (err == -EEXIST) 1766 err = -EFAULT; 1767 1768 /* 1769 * If the page is already on the target node (!err), store the 1770 * node, otherwise, store the err. 1771 */ 1772 err = store_status(status, i, err ? : current_node, 1); 1773 if (err) 1774 goto out_flush; 1775 1776 err = move_pages_and_store_status(mm, current_node, &pagelist, 1777 status, start, i, nr_pages); 1778 if (err) 1779 goto out; 1780 current_node = NUMA_NO_NODE; 1781 } 1782 out_flush: 1783 /* Make sure we do not overwrite the existing error */ 1784 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1785 status, start, i, nr_pages); 1786 if (err >= 0) 1787 err = err1; 1788 out: 1789 lru_cache_enable(); 1790 return err; 1791 } 1792 1793 /* 1794 * Determine the nodes of an array of pages and store it in an array of status. 1795 */ 1796 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1797 const void __user **pages, int *status) 1798 { 1799 unsigned long i; 1800 1801 mmap_read_lock(mm); 1802 1803 for (i = 0; i < nr_pages; i++) { 1804 unsigned long addr = (unsigned long)(*pages); 1805 struct vm_area_struct *vma; 1806 struct page *page; 1807 int err = -EFAULT; 1808 1809 vma = vma_lookup(mm, addr); 1810 if (!vma) 1811 goto set_status; 1812 1813 /* FOLL_DUMP to ignore special (like zero) pages */ 1814 page = follow_page(vma, addr, FOLL_DUMP); 1815 1816 err = PTR_ERR(page); 1817 if (IS_ERR(page)) 1818 goto set_status; 1819 1820 err = page ? page_to_nid(page) : -ENOENT; 1821 set_status: 1822 *status = err; 1823 1824 pages++; 1825 status++; 1826 } 1827 1828 mmap_read_unlock(mm); 1829 } 1830 1831 static int get_compat_pages_array(const void __user *chunk_pages[], 1832 const void __user * __user *pages, 1833 unsigned long chunk_nr) 1834 { 1835 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1836 compat_uptr_t p; 1837 int i; 1838 1839 for (i = 0; i < chunk_nr; i++) { 1840 if (get_user(p, pages32 + i)) 1841 return -EFAULT; 1842 chunk_pages[i] = compat_ptr(p); 1843 } 1844 1845 return 0; 1846 } 1847 1848 /* 1849 * Determine the nodes of a user array of pages and store it in 1850 * a user array of status. 1851 */ 1852 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1853 const void __user * __user *pages, 1854 int __user *status) 1855 { 1856 #define DO_PAGES_STAT_CHUNK_NR 16 1857 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1858 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1859 1860 while (nr_pages) { 1861 unsigned long chunk_nr; 1862 1863 chunk_nr = nr_pages; 1864 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1865 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1866 1867 if (in_compat_syscall()) { 1868 if (get_compat_pages_array(chunk_pages, pages, 1869 chunk_nr)) 1870 break; 1871 } else { 1872 if (copy_from_user(chunk_pages, pages, 1873 chunk_nr * sizeof(*chunk_pages))) 1874 break; 1875 } 1876 1877 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1878 1879 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1880 break; 1881 1882 pages += chunk_nr; 1883 status += chunk_nr; 1884 nr_pages -= chunk_nr; 1885 } 1886 return nr_pages ? -EFAULT : 0; 1887 } 1888 1889 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1890 { 1891 struct task_struct *task; 1892 struct mm_struct *mm; 1893 1894 /* 1895 * There is no need to check if current process has the right to modify 1896 * the specified process when they are same. 1897 */ 1898 if (!pid) { 1899 mmget(current->mm); 1900 *mem_nodes = cpuset_mems_allowed(current); 1901 return current->mm; 1902 } 1903 1904 /* Find the mm_struct */ 1905 rcu_read_lock(); 1906 task = find_task_by_vpid(pid); 1907 if (!task) { 1908 rcu_read_unlock(); 1909 return ERR_PTR(-ESRCH); 1910 } 1911 get_task_struct(task); 1912 1913 /* 1914 * Check if this process has the right to modify the specified 1915 * process. Use the regular "ptrace_may_access()" checks. 1916 */ 1917 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1918 rcu_read_unlock(); 1919 mm = ERR_PTR(-EPERM); 1920 goto out; 1921 } 1922 rcu_read_unlock(); 1923 1924 mm = ERR_PTR(security_task_movememory(task)); 1925 if (IS_ERR(mm)) 1926 goto out; 1927 *mem_nodes = cpuset_mems_allowed(task); 1928 mm = get_task_mm(task); 1929 out: 1930 put_task_struct(task); 1931 if (!mm) 1932 mm = ERR_PTR(-EINVAL); 1933 return mm; 1934 } 1935 1936 /* 1937 * Move a list of pages in the address space of the currently executing 1938 * process. 1939 */ 1940 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1941 const void __user * __user *pages, 1942 const int __user *nodes, 1943 int __user *status, int flags) 1944 { 1945 struct mm_struct *mm; 1946 int err; 1947 nodemask_t task_nodes; 1948 1949 /* Check flags */ 1950 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1951 return -EINVAL; 1952 1953 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1954 return -EPERM; 1955 1956 mm = find_mm_struct(pid, &task_nodes); 1957 if (IS_ERR(mm)) 1958 return PTR_ERR(mm); 1959 1960 if (nodes) 1961 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1962 nodes, status, flags); 1963 else 1964 err = do_pages_stat(mm, nr_pages, pages, status); 1965 1966 mmput(mm); 1967 return err; 1968 } 1969 1970 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1971 const void __user * __user *, pages, 1972 const int __user *, nodes, 1973 int __user *, status, int, flags) 1974 { 1975 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1976 } 1977 1978 #ifdef CONFIG_NUMA_BALANCING 1979 /* 1980 * Returns true if this is a safe migration target node for misplaced NUMA 1981 * pages. Currently it only checks the watermarks which crude 1982 */ 1983 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1984 unsigned long nr_migrate_pages) 1985 { 1986 int z; 1987 1988 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1989 struct zone *zone = pgdat->node_zones + z; 1990 1991 if (!populated_zone(zone)) 1992 continue; 1993 1994 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1995 if (!zone_watermark_ok(zone, 0, 1996 high_wmark_pages(zone) + 1997 nr_migrate_pages, 1998 ZONE_MOVABLE, 0)) 1999 continue; 2000 return true; 2001 } 2002 return false; 2003 } 2004 2005 static struct page *alloc_misplaced_dst_page(struct page *page, 2006 unsigned long data) 2007 { 2008 int nid = (int) data; 2009 struct page *newpage; 2010 2011 newpage = __alloc_pages_node(nid, 2012 (GFP_HIGHUSER_MOVABLE | 2013 __GFP_THISNODE | __GFP_NOMEMALLOC | 2014 __GFP_NORETRY | __GFP_NOWARN) & 2015 ~__GFP_RECLAIM, 0); 2016 2017 return newpage; 2018 } 2019 2020 static struct page *alloc_misplaced_dst_page_thp(struct page *page, 2021 unsigned long data) 2022 { 2023 int nid = (int) data; 2024 struct page *newpage; 2025 2026 newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2027 HPAGE_PMD_ORDER); 2028 if (!newpage) 2029 goto out; 2030 2031 prep_transhuge_page(newpage); 2032 2033 out: 2034 return newpage; 2035 } 2036 2037 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2038 { 2039 int page_lru; 2040 int nr_pages = thp_nr_pages(page); 2041 2042 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 2043 2044 /* Do not migrate THP mapped by multiple processes */ 2045 if (PageTransHuge(page) && total_mapcount(page) > 1) 2046 return 0; 2047 2048 /* Avoid migrating to a node that is nearly full */ 2049 if (!migrate_balanced_pgdat(pgdat, nr_pages)) 2050 return 0; 2051 2052 if (isolate_lru_page(page)) 2053 return 0; 2054 2055 page_lru = page_is_file_lru(page); 2056 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 2057 nr_pages); 2058 2059 /* 2060 * Isolating the page has taken another reference, so the 2061 * caller's reference can be safely dropped without the page 2062 * disappearing underneath us during migration. 2063 */ 2064 put_page(page); 2065 return 1; 2066 } 2067 2068 /* 2069 * Attempt to migrate a misplaced page to the specified destination 2070 * node. Caller is expected to have an elevated reference count on 2071 * the page that will be dropped by this function before returning. 2072 */ 2073 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2074 int node) 2075 { 2076 pg_data_t *pgdat = NODE_DATA(node); 2077 int isolated; 2078 int nr_remaining; 2079 LIST_HEAD(migratepages); 2080 new_page_t *new; 2081 bool compound; 2082 int nr_pages = thp_nr_pages(page); 2083 2084 /* 2085 * PTE mapped THP or HugeTLB page can't reach here so the page could 2086 * be either base page or THP. And it must be head page if it is 2087 * THP. 2088 */ 2089 compound = PageTransHuge(page); 2090 2091 if (compound) 2092 new = alloc_misplaced_dst_page_thp; 2093 else 2094 new = alloc_misplaced_dst_page; 2095 2096 /* 2097 * Don't migrate file pages that are mapped in multiple processes 2098 * with execute permissions as they are probably shared libraries. 2099 */ 2100 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2101 (vma->vm_flags & VM_EXEC)) 2102 goto out; 2103 2104 /* 2105 * Also do not migrate dirty pages as not all filesystems can move 2106 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2107 */ 2108 if (page_is_file_lru(page) && PageDirty(page)) 2109 goto out; 2110 2111 isolated = numamigrate_isolate_page(pgdat, page); 2112 if (!isolated) 2113 goto out; 2114 2115 list_add(&page->lru, &migratepages); 2116 nr_remaining = migrate_pages(&migratepages, *new, NULL, node, 2117 MIGRATE_ASYNC, MR_NUMA_MISPLACED, NULL); 2118 if (nr_remaining) { 2119 if (!list_empty(&migratepages)) { 2120 list_del(&page->lru); 2121 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2122 page_is_file_lru(page), -nr_pages); 2123 putback_lru_page(page); 2124 } 2125 isolated = 0; 2126 } else 2127 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages); 2128 BUG_ON(!list_empty(&migratepages)); 2129 return isolated; 2130 2131 out: 2132 put_page(page); 2133 return 0; 2134 } 2135 #endif /* CONFIG_NUMA_BALANCING */ 2136 #endif /* CONFIG_NUMA */ 2137 2138 #ifdef CONFIG_DEVICE_PRIVATE 2139 static int migrate_vma_collect_skip(unsigned long start, 2140 unsigned long end, 2141 struct mm_walk *walk) 2142 { 2143 struct migrate_vma *migrate = walk->private; 2144 unsigned long addr; 2145 2146 for (addr = start; addr < end; addr += PAGE_SIZE) { 2147 migrate->dst[migrate->npages] = 0; 2148 migrate->src[migrate->npages++] = 0; 2149 } 2150 2151 return 0; 2152 } 2153 2154 static int migrate_vma_collect_hole(unsigned long start, 2155 unsigned long end, 2156 __always_unused int depth, 2157 struct mm_walk *walk) 2158 { 2159 struct migrate_vma *migrate = walk->private; 2160 unsigned long addr; 2161 2162 /* Only allow populating anonymous memory. */ 2163 if (!vma_is_anonymous(walk->vma)) 2164 return migrate_vma_collect_skip(start, end, walk); 2165 2166 for (addr = start; addr < end; addr += PAGE_SIZE) { 2167 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 2168 migrate->dst[migrate->npages] = 0; 2169 migrate->npages++; 2170 migrate->cpages++; 2171 } 2172 2173 return 0; 2174 } 2175 2176 static int migrate_vma_collect_pmd(pmd_t *pmdp, 2177 unsigned long start, 2178 unsigned long end, 2179 struct mm_walk *walk) 2180 { 2181 struct migrate_vma *migrate = walk->private; 2182 struct vm_area_struct *vma = walk->vma; 2183 struct mm_struct *mm = vma->vm_mm; 2184 unsigned long addr = start, unmapped = 0; 2185 spinlock_t *ptl; 2186 pte_t *ptep; 2187 2188 again: 2189 if (pmd_none(*pmdp)) 2190 return migrate_vma_collect_hole(start, end, -1, walk); 2191 2192 if (pmd_trans_huge(*pmdp)) { 2193 struct page *page; 2194 2195 ptl = pmd_lock(mm, pmdp); 2196 if (unlikely(!pmd_trans_huge(*pmdp))) { 2197 spin_unlock(ptl); 2198 goto again; 2199 } 2200 2201 page = pmd_page(*pmdp); 2202 if (is_huge_zero_page(page)) { 2203 spin_unlock(ptl); 2204 split_huge_pmd(vma, pmdp, addr); 2205 if (pmd_trans_unstable(pmdp)) 2206 return migrate_vma_collect_skip(start, end, 2207 walk); 2208 } else { 2209 int ret; 2210 2211 get_page(page); 2212 spin_unlock(ptl); 2213 if (unlikely(!trylock_page(page))) 2214 return migrate_vma_collect_skip(start, end, 2215 walk); 2216 ret = split_huge_page(page); 2217 unlock_page(page); 2218 put_page(page); 2219 if (ret) 2220 return migrate_vma_collect_skip(start, end, 2221 walk); 2222 if (pmd_none(*pmdp)) 2223 return migrate_vma_collect_hole(start, end, -1, 2224 walk); 2225 } 2226 } 2227 2228 if (unlikely(pmd_bad(*pmdp))) 2229 return migrate_vma_collect_skip(start, end, walk); 2230 2231 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2232 arch_enter_lazy_mmu_mode(); 2233 2234 for (; addr < end; addr += PAGE_SIZE, ptep++) { 2235 unsigned long mpfn = 0, pfn; 2236 struct page *page; 2237 swp_entry_t entry; 2238 pte_t pte; 2239 2240 pte = *ptep; 2241 2242 if (pte_none(pte)) { 2243 if (vma_is_anonymous(vma)) { 2244 mpfn = MIGRATE_PFN_MIGRATE; 2245 migrate->cpages++; 2246 } 2247 goto next; 2248 } 2249 2250 if (!pte_present(pte)) { 2251 /* 2252 * Only care about unaddressable device page special 2253 * page table entry. Other special swap entries are not 2254 * migratable, and we ignore regular swapped page. 2255 */ 2256 entry = pte_to_swp_entry(pte); 2257 if (!is_device_private_entry(entry)) 2258 goto next; 2259 2260 page = pfn_swap_entry_to_page(entry); 2261 if (!(migrate->flags & 2262 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 2263 page->pgmap->owner != migrate->pgmap_owner) 2264 goto next; 2265 2266 mpfn = migrate_pfn(page_to_pfn(page)) | 2267 MIGRATE_PFN_MIGRATE; 2268 if (is_writable_device_private_entry(entry)) 2269 mpfn |= MIGRATE_PFN_WRITE; 2270 } else { 2271 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) 2272 goto next; 2273 pfn = pte_pfn(pte); 2274 if (is_zero_pfn(pfn)) { 2275 mpfn = MIGRATE_PFN_MIGRATE; 2276 migrate->cpages++; 2277 goto next; 2278 } 2279 page = vm_normal_page(migrate->vma, addr, pte); 2280 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2281 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2282 } 2283 2284 /* FIXME support THP */ 2285 if (!page || !page->mapping || PageTransCompound(page)) { 2286 mpfn = 0; 2287 goto next; 2288 } 2289 2290 /* 2291 * By getting a reference on the page we pin it and that blocks 2292 * any kind of migration. Side effect is that it "freezes" the 2293 * pte. 2294 * 2295 * We drop this reference after isolating the page from the lru 2296 * for non device page (device page are not on the lru and thus 2297 * can't be dropped from it). 2298 */ 2299 get_page(page); 2300 2301 /* 2302 * Optimize for the common case where page is only mapped once 2303 * in one process. If we can lock the page, then we can safely 2304 * set up a special migration page table entry now. 2305 */ 2306 if (trylock_page(page)) { 2307 pte_t swp_pte; 2308 2309 migrate->cpages++; 2310 ptep_get_and_clear(mm, addr, ptep); 2311 2312 /* Setup special migration page table entry */ 2313 if (mpfn & MIGRATE_PFN_WRITE) 2314 entry = make_writable_migration_entry( 2315 page_to_pfn(page)); 2316 else 2317 entry = make_readable_migration_entry( 2318 page_to_pfn(page)); 2319 swp_pte = swp_entry_to_pte(entry); 2320 if (pte_present(pte)) { 2321 if (pte_soft_dirty(pte)) 2322 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2323 if (pte_uffd_wp(pte)) 2324 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2325 } else { 2326 if (pte_swp_soft_dirty(pte)) 2327 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2328 if (pte_swp_uffd_wp(pte)) 2329 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2330 } 2331 set_pte_at(mm, addr, ptep, swp_pte); 2332 2333 /* 2334 * This is like regular unmap: we remove the rmap and 2335 * drop page refcount. Page won't be freed, as we took 2336 * a reference just above. 2337 */ 2338 page_remove_rmap(page, false); 2339 put_page(page); 2340 2341 if (pte_present(pte)) 2342 unmapped++; 2343 } else { 2344 put_page(page); 2345 mpfn = 0; 2346 } 2347 2348 next: 2349 migrate->dst[migrate->npages] = 0; 2350 migrate->src[migrate->npages++] = mpfn; 2351 } 2352 arch_leave_lazy_mmu_mode(); 2353 pte_unmap_unlock(ptep - 1, ptl); 2354 2355 /* Only flush the TLB if we actually modified any entries */ 2356 if (unmapped) 2357 flush_tlb_range(walk->vma, start, end); 2358 2359 return 0; 2360 } 2361 2362 static const struct mm_walk_ops migrate_vma_walk_ops = { 2363 .pmd_entry = migrate_vma_collect_pmd, 2364 .pte_hole = migrate_vma_collect_hole, 2365 }; 2366 2367 /* 2368 * migrate_vma_collect() - collect pages over a range of virtual addresses 2369 * @migrate: migrate struct containing all migration information 2370 * 2371 * This will walk the CPU page table. For each virtual address backed by a 2372 * valid page, it updates the src array and takes a reference on the page, in 2373 * order to pin the page until we lock it and unmap it. 2374 */ 2375 static void migrate_vma_collect(struct migrate_vma *migrate) 2376 { 2377 struct mmu_notifier_range range; 2378 2379 /* 2380 * Note that the pgmap_owner is passed to the mmu notifier callback so 2381 * that the registered device driver can skip invalidating device 2382 * private page mappings that won't be migrated. 2383 */ 2384 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, 2385 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, 2386 migrate->pgmap_owner); 2387 mmu_notifier_invalidate_range_start(&range); 2388 2389 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 2390 &migrate_vma_walk_ops, migrate); 2391 2392 mmu_notifier_invalidate_range_end(&range); 2393 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 2394 } 2395 2396 /* 2397 * migrate_vma_check_page() - check if page is pinned or not 2398 * @page: struct page to check 2399 * 2400 * Pinned pages cannot be migrated. This is the same test as in 2401 * folio_migrate_mapping(), except that here we allow migration of a 2402 * ZONE_DEVICE page. 2403 */ 2404 static bool migrate_vma_check_page(struct page *page) 2405 { 2406 /* 2407 * One extra ref because caller holds an extra reference, either from 2408 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 2409 * a device page. 2410 */ 2411 int extra = 1; 2412 2413 /* 2414 * FIXME support THP (transparent huge page), it is bit more complex to 2415 * check them than regular pages, because they can be mapped with a pmd 2416 * or with a pte (split pte mapping). 2417 */ 2418 if (PageCompound(page)) 2419 return false; 2420 2421 /* Page from ZONE_DEVICE have one extra reference */ 2422 if (is_zone_device_page(page)) 2423 extra++; 2424 2425 /* For file back page */ 2426 if (page_mapping(page)) 2427 extra += 1 + page_has_private(page); 2428 2429 if ((page_count(page) - extra) > page_mapcount(page)) 2430 return false; 2431 2432 return true; 2433 } 2434 2435 /* 2436 * migrate_vma_unmap() - replace page mapping with special migration pte entry 2437 * @migrate: migrate struct containing all migration information 2438 * 2439 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 2440 * special migration pte entry and check if it has been pinned. Pinned pages are 2441 * restored because we cannot migrate them. 2442 * 2443 * This is the last step before we call the device driver callback to allocate 2444 * destination memory and copy contents of original page over to new page. 2445 */ 2446 static void migrate_vma_unmap(struct migrate_vma *migrate) 2447 { 2448 const unsigned long npages = migrate->npages; 2449 unsigned long i, restore = 0; 2450 bool allow_drain = true; 2451 2452 lru_add_drain(); 2453 2454 for (i = 0; i < npages; i++) { 2455 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2456 2457 if (!page) 2458 continue; 2459 2460 /* ZONE_DEVICE pages are not on LRU */ 2461 if (!is_zone_device_page(page)) { 2462 if (!PageLRU(page) && allow_drain) { 2463 /* Drain CPU's pagevec */ 2464 lru_add_drain_all(); 2465 allow_drain = false; 2466 } 2467 2468 if (isolate_lru_page(page)) { 2469 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2470 migrate->cpages--; 2471 restore++; 2472 continue; 2473 } 2474 2475 /* Drop the reference we took in collect */ 2476 put_page(page); 2477 } 2478 2479 if (page_mapped(page)) 2480 try_to_migrate(page, 0); 2481 2482 if (page_mapped(page) || !migrate_vma_check_page(page)) { 2483 if (!is_zone_device_page(page)) { 2484 get_page(page); 2485 putback_lru_page(page); 2486 } 2487 2488 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2489 migrate->cpages--; 2490 restore++; 2491 continue; 2492 } 2493 } 2494 2495 for (i = 0; i < npages && restore; i++) { 2496 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2497 2498 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2499 continue; 2500 2501 remove_migration_ptes(page, page, false); 2502 2503 migrate->src[i] = 0; 2504 unlock_page(page); 2505 put_page(page); 2506 restore--; 2507 } 2508 } 2509 2510 /** 2511 * migrate_vma_setup() - prepare to migrate a range of memory 2512 * @args: contains the vma, start, and pfns arrays for the migration 2513 * 2514 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 2515 * without an error. 2516 * 2517 * Prepare to migrate a range of memory virtual address range by collecting all 2518 * the pages backing each virtual address in the range, saving them inside the 2519 * src array. Then lock those pages and unmap them. Once the pages are locked 2520 * and unmapped, check whether each page is pinned or not. Pages that aren't 2521 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 2522 * corresponding src array entry. Then restores any pages that are pinned, by 2523 * remapping and unlocking those pages. 2524 * 2525 * The caller should then allocate destination memory and copy source memory to 2526 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 2527 * flag set). Once these are allocated and copied, the caller must update each 2528 * corresponding entry in the dst array with the pfn value of the destination 2529 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via 2530 * lock_page(). 2531 * 2532 * Note that the caller does not have to migrate all the pages that are marked 2533 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 2534 * device memory to system memory. If the caller cannot migrate a device page 2535 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 2536 * consequences for the userspace process, so it must be avoided if at all 2537 * possible. 2538 * 2539 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 2540 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 2541 * allowing the caller to allocate device memory for those unbacked virtual 2542 * addresses. For this the caller simply has to allocate device memory and 2543 * properly set the destination entry like for regular migration. Note that 2544 * this can still fail, and thus inside the device driver you must check if the 2545 * migration was successful for those entries after calling migrate_vma_pages(), 2546 * just like for regular migration. 2547 * 2548 * After that, the callers must call migrate_vma_pages() to go over each entry 2549 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 2550 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 2551 * then migrate_vma_pages() to migrate struct page information from the source 2552 * struct page to the destination struct page. If it fails to migrate the 2553 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 2554 * src array. 2555 * 2556 * At this point all successfully migrated pages have an entry in the src 2557 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 2558 * array entry with MIGRATE_PFN_VALID flag set. 2559 * 2560 * Once migrate_vma_pages() returns the caller may inspect which pages were 2561 * successfully migrated, and which were not. Successfully migrated pages will 2562 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 2563 * 2564 * It is safe to update device page table after migrate_vma_pages() because 2565 * both destination and source page are still locked, and the mmap_lock is held 2566 * in read mode (hence no one can unmap the range being migrated). 2567 * 2568 * Once the caller is done cleaning up things and updating its page table (if it 2569 * chose to do so, this is not an obligation) it finally calls 2570 * migrate_vma_finalize() to update the CPU page table to point to new pages 2571 * for successfully migrated pages or otherwise restore the CPU page table to 2572 * point to the original source pages. 2573 */ 2574 int migrate_vma_setup(struct migrate_vma *args) 2575 { 2576 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 2577 2578 args->start &= PAGE_MASK; 2579 args->end &= PAGE_MASK; 2580 if (!args->vma || is_vm_hugetlb_page(args->vma) || 2581 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 2582 return -EINVAL; 2583 if (nr_pages <= 0) 2584 return -EINVAL; 2585 if (args->start < args->vma->vm_start || 2586 args->start >= args->vma->vm_end) 2587 return -EINVAL; 2588 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 2589 return -EINVAL; 2590 if (!args->src || !args->dst) 2591 return -EINVAL; 2592 2593 memset(args->src, 0, sizeof(*args->src) * nr_pages); 2594 args->cpages = 0; 2595 args->npages = 0; 2596 2597 migrate_vma_collect(args); 2598 2599 if (args->cpages) 2600 migrate_vma_unmap(args); 2601 2602 /* 2603 * At this point pages are locked and unmapped, and thus they have 2604 * stable content and can safely be copied to destination memory that 2605 * is allocated by the drivers. 2606 */ 2607 return 0; 2608 2609 } 2610 EXPORT_SYMBOL(migrate_vma_setup); 2611 2612 /* 2613 * This code closely matches the code in: 2614 * __handle_mm_fault() 2615 * handle_pte_fault() 2616 * do_anonymous_page() 2617 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 2618 * private page. 2619 */ 2620 static void migrate_vma_insert_page(struct migrate_vma *migrate, 2621 unsigned long addr, 2622 struct page *page, 2623 unsigned long *src) 2624 { 2625 struct vm_area_struct *vma = migrate->vma; 2626 struct mm_struct *mm = vma->vm_mm; 2627 bool flush = false; 2628 spinlock_t *ptl; 2629 pte_t entry; 2630 pgd_t *pgdp; 2631 p4d_t *p4dp; 2632 pud_t *pudp; 2633 pmd_t *pmdp; 2634 pte_t *ptep; 2635 2636 /* Only allow populating anonymous memory */ 2637 if (!vma_is_anonymous(vma)) 2638 goto abort; 2639 2640 pgdp = pgd_offset(mm, addr); 2641 p4dp = p4d_alloc(mm, pgdp, addr); 2642 if (!p4dp) 2643 goto abort; 2644 pudp = pud_alloc(mm, p4dp, addr); 2645 if (!pudp) 2646 goto abort; 2647 pmdp = pmd_alloc(mm, pudp, addr); 2648 if (!pmdp) 2649 goto abort; 2650 2651 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 2652 goto abort; 2653 2654 /* 2655 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2656 * pte_offset_map() on pmds where a huge pmd might be created 2657 * from a different thread. 2658 * 2659 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 2660 * parallel threads are excluded by other means. 2661 * 2662 * Here we only have mmap_read_lock(mm). 2663 */ 2664 if (pte_alloc(mm, pmdp)) 2665 goto abort; 2666 2667 /* See the comment in pte_alloc_one_map() */ 2668 if (unlikely(pmd_trans_unstable(pmdp))) 2669 goto abort; 2670 2671 if (unlikely(anon_vma_prepare(vma))) 2672 goto abort; 2673 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 2674 goto abort; 2675 2676 /* 2677 * The memory barrier inside __SetPageUptodate makes sure that 2678 * preceding stores to the page contents become visible before 2679 * the set_pte_at() write. 2680 */ 2681 __SetPageUptodate(page); 2682 2683 if (is_zone_device_page(page)) { 2684 if (is_device_private_page(page)) { 2685 swp_entry_t swp_entry; 2686 2687 if (vma->vm_flags & VM_WRITE) 2688 swp_entry = make_writable_device_private_entry( 2689 page_to_pfn(page)); 2690 else 2691 swp_entry = make_readable_device_private_entry( 2692 page_to_pfn(page)); 2693 entry = swp_entry_to_pte(swp_entry); 2694 } else { 2695 /* 2696 * For now we only support migrating to un-addressable 2697 * device memory. 2698 */ 2699 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); 2700 goto abort; 2701 } 2702 } else { 2703 entry = mk_pte(page, vma->vm_page_prot); 2704 if (vma->vm_flags & VM_WRITE) 2705 entry = pte_mkwrite(pte_mkdirty(entry)); 2706 } 2707 2708 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2709 2710 if (check_stable_address_space(mm)) 2711 goto unlock_abort; 2712 2713 if (pte_present(*ptep)) { 2714 unsigned long pfn = pte_pfn(*ptep); 2715 2716 if (!is_zero_pfn(pfn)) 2717 goto unlock_abort; 2718 flush = true; 2719 } else if (!pte_none(*ptep)) 2720 goto unlock_abort; 2721 2722 /* 2723 * Check for userfaultfd but do not deliver the fault. Instead, 2724 * just back off. 2725 */ 2726 if (userfaultfd_missing(vma)) 2727 goto unlock_abort; 2728 2729 inc_mm_counter(mm, MM_ANONPAGES); 2730 page_add_new_anon_rmap(page, vma, addr, false); 2731 if (!is_zone_device_page(page)) 2732 lru_cache_add_inactive_or_unevictable(page, vma); 2733 get_page(page); 2734 2735 if (flush) { 2736 flush_cache_page(vma, addr, pte_pfn(*ptep)); 2737 ptep_clear_flush_notify(vma, addr, ptep); 2738 set_pte_at_notify(mm, addr, ptep, entry); 2739 update_mmu_cache(vma, addr, ptep); 2740 } else { 2741 /* No need to invalidate - it was non-present before */ 2742 set_pte_at(mm, addr, ptep, entry); 2743 update_mmu_cache(vma, addr, ptep); 2744 } 2745 2746 pte_unmap_unlock(ptep, ptl); 2747 *src = MIGRATE_PFN_MIGRATE; 2748 return; 2749 2750 unlock_abort: 2751 pte_unmap_unlock(ptep, ptl); 2752 abort: 2753 *src &= ~MIGRATE_PFN_MIGRATE; 2754 } 2755 2756 /** 2757 * migrate_vma_pages() - migrate meta-data from src page to dst page 2758 * @migrate: migrate struct containing all migration information 2759 * 2760 * This migrates struct page meta-data from source struct page to destination 2761 * struct page. This effectively finishes the migration from source page to the 2762 * destination page. 2763 */ 2764 void migrate_vma_pages(struct migrate_vma *migrate) 2765 { 2766 const unsigned long npages = migrate->npages; 2767 const unsigned long start = migrate->start; 2768 struct mmu_notifier_range range; 2769 unsigned long addr, i; 2770 bool notified = false; 2771 2772 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 2773 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2774 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2775 struct address_space *mapping; 2776 int r; 2777 2778 if (!newpage) { 2779 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2780 continue; 2781 } 2782 2783 if (!page) { 2784 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2785 continue; 2786 if (!notified) { 2787 notified = true; 2788 2789 mmu_notifier_range_init_owner(&range, 2790 MMU_NOTIFY_MIGRATE, 0, migrate->vma, 2791 migrate->vma->vm_mm, addr, migrate->end, 2792 migrate->pgmap_owner); 2793 mmu_notifier_invalidate_range_start(&range); 2794 } 2795 migrate_vma_insert_page(migrate, addr, newpage, 2796 &migrate->src[i]); 2797 continue; 2798 } 2799 2800 mapping = page_mapping(page); 2801 2802 if (is_zone_device_page(newpage)) { 2803 if (is_device_private_page(newpage)) { 2804 /* 2805 * For now only support private anonymous when 2806 * migrating to un-addressable device memory. 2807 */ 2808 if (mapping) { 2809 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2810 continue; 2811 } 2812 } else { 2813 /* 2814 * Other types of ZONE_DEVICE page are not 2815 * supported. 2816 */ 2817 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2818 continue; 2819 } 2820 } 2821 2822 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 2823 if (r != MIGRATEPAGE_SUCCESS) 2824 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2825 } 2826 2827 /* 2828 * No need to double call mmu_notifier->invalidate_range() callback as 2829 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 2830 * did already call it. 2831 */ 2832 if (notified) 2833 mmu_notifier_invalidate_range_only_end(&range); 2834 } 2835 EXPORT_SYMBOL(migrate_vma_pages); 2836 2837 /** 2838 * migrate_vma_finalize() - restore CPU page table entry 2839 * @migrate: migrate struct containing all migration information 2840 * 2841 * This replaces the special migration pte entry with either a mapping to the 2842 * new page if migration was successful for that page, or to the original page 2843 * otherwise. 2844 * 2845 * This also unlocks the pages and puts them back on the lru, or drops the extra 2846 * refcount, for device pages. 2847 */ 2848 void migrate_vma_finalize(struct migrate_vma *migrate) 2849 { 2850 const unsigned long npages = migrate->npages; 2851 unsigned long i; 2852 2853 for (i = 0; i < npages; i++) { 2854 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2855 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2856 2857 if (!page) { 2858 if (newpage) { 2859 unlock_page(newpage); 2860 put_page(newpage); 2861 } 2862 continue; 2863 } 2864 2865 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 2866 if (newpage) { 2867 unlock_page(newpage); 2868 put_page(newpage); 2869 } 2870 newpage = page; 2871 } 2872 2873 remove_migration_ptes(page, newpage, false); 2874 unlock_page(page); 2875 2876 if (is_zone_device_page(page)) 2877 put_page(page); 2878 else 2879 putback_lru_page(page); 2880 2881 if (newpage != page) { 2882 unlock_page(newpage); 2883 if (is_zone_device_page(newpage)) 2884 put_page(newpage); 2885 else 2886 putback_lru_page(newpage); 2887 } 2888 } 2889 } 2890 EXPORT_SYMBOL(migrate_vma_finalize); 2891 #endif /* CONFIG_DEVICE_PRIVATE */ 2892 2893 /* 2894 * node_demotion[] example: 2895 * 2896 * Consider a system with two sockets. Each socket has 2897 * three classes of memory attached: fast, medium and slow. 2898 * Each memory class is placed in its own NUMA node. The 2899 * CPUs are placed in the node with the "fast" memory. The 2900 * 6 NUMA nodes (0-5) might be split among the sockets like 2901 * this: 2902 * 2903 * Socket A: 0, 1, 2 2904 * Socket B: 3, 4, 5 2905 * 2906 * When Node 0 fills up, its memory should be migrated to 2907 * Node 1. When Node 1 fills up, it should be migrated to 2908 * Node 2. The migration path start on the nodes with the 2909 * processors (since allocations default to this node) and 2910 * fast memory, progress through medium and end with the 2911 * slow memory: 2912 * 2913 * 0 -> 1 -> 2 -> stop 2914 * 3 -> 4 -> 5 -> stop 2915 * 2916 * This is represented in the node_demotion[] like this: 2917 * 2918 * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1 2919 * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2 2920 * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate 2921 * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4 2922 * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5 2923 * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate 2924 * 2925 * Moreover some systems may have multiple slow memory nodes. 2926 * Suppose a system has one socket with 3 memory nodes, node 0 2927 * is fast memory type, and node 1/2 both are slow memory 2928 * type, and the distance between fast memory node and slow 2929 * memory node is same. So the migration path should be: 2930 * 2931 * 0 -> 1/2 -> stop 2932 * 2933 * This is represented in the node_demotion[] like this: 2934 * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2 2935 * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate 2936 * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate 2937 */ 2938 2939 /* 2940 * Writes to this array occur without locking. Cycles are 2941 * not allowed: Node X demotes to Y which demotes to X... 2942 * 2943 * If multiple reads are performed, a single rcu_read_lock() 2944 * must be held over all reads to ensure that no cycles are 2945 * observed. 2946 */ 2947 #define DEFAULT_DEMOTION_TARGET_NODES 15 2948 2949 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES 2950 #define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1) 2951 #else 2952 #define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES 2953 #endif 2954 2955 struct demotion_nodes { 2956 unsigned short nr; 2957 short nodes[DEMOTION_TARGET_NODES]; 2958 }; 2959 2960 static struct demotion_nodes *node_demotion __read_mostly; 2961 2962 /** 2963 * next_demotion_node() - Get the next node in the demotion path 2964 * @node: The starting node to lookup the next node 2965 * 2966 * Return: node id for next memory node in the demotion path hierarchy 2967 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep 2968 * @node online or guarantee that it *continues* to be the next demotion 2969 * target. 2970 */ 2971 int next_demotion_node(int node) 2972 { 2973 struct demotion_nodes *nd; 2974 unsigned short target_nr, index; 2975 int target; 2976 2977 if (!node_demotion) 2978 return NUMA_NO_NODE; 2979 2980 nd = &node_demotion[node]; 2981 2982 /* 2983 * node_demotion[] is updated without excluding this 2984 * function from running. RCU doesn't provide any 2985 * compiler barriers, so the READ_ONCE() is required 2986 * to avoid compiler reordering or read merging. 2987 * 2988 * Make sure to use RCU over entire code blocks if 2989 * node_demotion[] reads need to be consistent. 2990 */ 2991 rcu_read_lock(); 2992 target_nr = READ_ONCE(nd->nr); 2993 2994 switch (target_nr) { 2995 case 0: 2996 target = NUMA_NO_NODE; 2997 goto out; 2998 case 1: 2999 index = 0; 3000 break; 3001 default: 3002 /* 3003 * If there are multiple target nodes, just select one 3004 * target node randomly. 3005 * 3006 * In addition, we can also use round-robin to select 3007 * target node, but we should introduce another variable 3008 * for node_demotion[] to record last selected target node, 3009 * that may cause cache ping-pong due to the changing of 3010 * last target node. Or introducing per-cpu data to avoid 3011 * caching issue, which seems more complicated. So selecting 3012 * target node randomly seems better until now. 3013 */ 3014 index = get_random_int() % target_nr; 3015 break; 3016 } 3017 3018 target = READ_ONCE(nd->nodes[index]); 3019 3020 out: 3021 rcu_read_unlock(); 3022 return target; 3023 } 3024 3025 #if defined(CONFIG_HOTPLUG_CPU) 3026 /* Disable reclaim-based migration. */ 3027 static void __disable_all_migrate_targets(void) 3028 { 3029 int node, i; 3030 3031 if (!node_demotion) 3032 return; 3033 3034 for_each_online_node(node) { 3035 node_demotion[node].nr = 0; 3036 for (i = 0; i < DEMOTION_TARGET_NODES; i++) 3037 node_demotion[node].nodes[i] = NUMA_NO_NODE; 3038 } 3039 } 3040 3041 static void disable_all_migrate_targets(void) 3042 { 3043 __disable_all_migrate_targets(); 3044 3045 /* 3046 * Ensure that the "disable" is visible across the system. 3047 * Readers will see either a combination of before+disable 3048 * state or disable+after. They will never see before and 3049 * after state together. 3050 * 3051 * The before+after state together might have cycles and 3052 * could cause readers to do things like loop until this 3053 * function finishes. This ensures they can only see a 3054 * single "bad" read and would, for instance, only loop 3055 * once. 3056 */ 3057 synchronize_rcu(); 3058 } 3059 3060 /* 3061 * Find an automatic demotion target for 'node'. 3062 * Failing here is OK. It might just indicate 3063 * being at the end of a chain. 3064 */ 3065 static int establish_migrate_target(int node, nodemask_t *used, 3066 int best_distance) 3067 { 3068 int migration_target, index, val; 3069 struct demotion_nodes *nd; 3070 3071 if (!node_demotion) 3072 return NUMA_NO_NODE; 3073 3074 nd = &node_demotion[node]; 3075 3076 migration_target = find_next_best_node(node, used); 3077 if (migration_target == NUMA_NO_NODE) 3078 return NUMA_NO_NODE; 3079 3080 /* 3081 * If the node has been set a migration target node before, 3082 * which means it's the best distance between them. Still 3083 * check if this node can be demoted to other target nodes 3084 * if they have a same best distance. 3085 */ 3086 if (best_distance != -1) { 3087 val = node_distance(node, migration_target); 3088 if (val > best_distance) 3089 return NUMA_NO_NODE; 3090 } 3091 3092 index = nd->nr; 3093 if (WARN_ONCE(index >= DEMOTION_TARGET_NODES, 3094 "Exceeds maximum demotion target nodes\n")) 3095 return NUMA_NO_NODE; 3096 3097 nd->nodes[index] = migration_target; 3098 nd->nr++; 3099 3100 return migration_target; 3101 } 3102 3103 /* 3104 * When memory fills up on a node, memory contents can be 3105 * automatically migrated to another node instead of 3106 * discarded at reclaim. 3107 * 3108 * Establish a "migration path" which will start at nodes 3109 * with CPUs and will follow the priorities used to build the 3110 * page allocator zonelists. 3111 * 3112 * The difference here is that cycles must be avoided. If 3113 * node0 migrates to node1, then neither node1, nor anything 3114 * node1 migrates to can migrate to node0. Also one node can 3115 * be migrated to multiple nodes if the target nodes all have 3116 * a same best-distance against the source node. 3117 * 3118 * This function can run simultaneously with readers of 3119 * node_demotion[]. However, it can not run simultaneously 3120 * with itself. Exclusion is provided by memory hotplug events 3121 * being single-threaded. 3122 */ 3123 static void __set_migration_target_nodes(void) 3124 { 3125 nodemask_t next_pass = NODE_MASK_NONE; 3126 nodemask_t this_pass = NODE_MASK_NONE; 3127 nodemask_t used_targets = NODE_MASK_NONE; 3128 int node, best_distance; 3129 3130 /* 3131 * Avoid any oddities like cycles that could occur 3132 * from changes in the topology. This will leave 3133 * a momentary gap when migration is disabled. 3134 */ 3135 disable_all_migrate_targets(); 3136 3137 /* 3138 * Allocations go close to CPUs, first. Assume that 3139 * the migration path starts at the nodes with CPUs. 3140 */ 3141 next_pass = node_states[N_CPU]; 3142 again: 3143 this_pass = next_pass; 3144 next_pass = NODE_MASK_NONE; 3145 /* 3146 * To avoid cycles in the migration "graph", ensure 3147 * that migration sources are not future targets by 3148 * setting them in 'used_targets'. Do this only 3149 * once per pass so that multiple source nodes can 3150 * share a target node. 3151 * 3152 * 'used_targets' will become unavailable in future 3153 * passes. This limits some opportunities for 3154 * multiple source nodes to share a destination. 3155 */ 3156 nodes_or(used_targets, used_targets, this_pass); 3157 3158 for_each_node_mask(node, this_pass) { 3159 best_distance = -1; 3160 3161 /* 3162 * Try to set up the migration path for the node, and the target 3163 * migration nodes can be multiple, so doing a loop to find all 3164 * the target nodes if they all have a best node distance. 3165 */ 3166 do { 3167 int target_node = 3168 establish_migrate_target(node, &used_targets, 3169 best_distance); 3170 3171 if (target_node == NUMA_NO_NODE) 3172 break; 3173 3174 if (best_distance == -1) 3175 best_distance = node_distance(node, target_node); 3176 3177 /* 3178 * Visit targets from this pass in the next pass. 3179 * Eventually, every node will have been part of 3180 * a pass, and will become set in 'used_targets'. 3181 */ 3182 node_set(target_node, next_pass); 3183 } while (1); 3184 } 3185 /* 3186 * 'next_pass' contains nodes which became migration 3187 * targets in this pass. Make additional passes until 3188 * no more migrations targets are available. 3189 */ 3190 if (!nodes_empty(next_pass)) 3191 goto again; 3192 } 3193 3194 /* 3195 * For callers that do not hold get_online_mems() already. 3196 */ 3197 static void set_migration_target_nodes(void) 3198 { 3199 get_online_mems(); 3200 __set_migration_target_nodes(); 3201 put_online_mems(); 3202 } 3203 3204 /* 3205 * This leaves migrate-on-reclaim transiently disabled between 3206 * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs 3207 * whether reclaim-based migration is enabled or not, which 3208 * ensures that the user can turn reclaim-based migration at 3209 * any time without needing to recalculate migration targets. 3210 * 3211 * These callbacks already hold get_online_mems(). That is why 3212 * __set_migration_target_nodes() can be used as opposed to 3213 * set_migration_target_nodes(). 3214 */ 3215 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, 3216 unsigned long action, void *_arg) 3217 { 3218 struct memory_notify *arg = _arg; 3219 3220 /* 3221 * Only update the node migration order when a node is 3222 * changing status, like online->offline. This avoids 3223 * the overhead of synchronize_rcu() in most cases. 3224 */ 3225 if (arg->status_change_nid < 0) 3226 return notifier_from_errno(0); 3227 3228 switch (action) { 3229 case MEM_GOING_OFFLINE: 3230 /* 3231 * Make sure there are not transient states where 3232 * an offline node is a migration target. This 3233 * will leave migration disabled until the offline 3234 * completes and the MEM_OFFLINE case below runs. 3235 */ 3236 disable_all_migrate_targets(); 3237 break; 3238 case MEM_OFFLINE: 3239 case MEM_ONLINE: 3240 /* 3241 * Recalculate the target nodes once the node 3242 * reaches its final state (online or offline). 3243 */ 3244 __set_migration_target_nodes(); 3245 break; 3246 case MEM_CANCEL_OFFLINE: 3247 /* 3248 * MEM_GOING_OFFLINE disabled all the migration 3249 * targets. Reenable them. 3250 */ 3251 __set_migration_target_nodes(); 3252 break; 3253 case MEM_GOING_ONLINE: 3254 case MEM_CANCEL_ONLINE: 3255 break; 3256 } 3257 3258 return notifier_from_errno(0); 3259 } 3260 3261 /* 3262 * React to hotplug events that might affect the migration targets 3263 * like events that online or offline NUMA nodes. 3264 * 3265 * The ordering is also currently dependent on which nodes have 3266 * CPUs. That means we need CPU on/offline notification too. 3267 */ 3268 static int migration_online_cpu(unsigned int cpu) 3269 { 3270 set_migration_target_nodes(); 3271 return 0; 3272 } 3273 3274 static int migration_offline_cpu(unsigned int cpu) 3275 { 3276 set_migration_target_nodes(); 3277 return 0; 3278 } 3279 3280 static int __init migrate_on_reclaim_init(void) 3281 { 3282 int ret; 3283 3284 node_demotion = kmalloc_array(nr_node_ids, 3285 sizeof(struct demotion_nodes), 3286 GFP_KERNEL); 3287 WARN_ON(!node_demotion); 3288 3289 ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline", 3290 NULL, migration_offline_cpu); 3291 /* 3292 * In the unlikely case that this fails, the automatic 3293 * migration targets may become suboptimal for nodes 3294 * where N_CPU changes. With such a small impact in a 3295 * rare case, do not bother trying to do anything special. 3296 */ 3297 WARN_ON(ret < 0); 3298 ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online", 3299 migration_online_cpu, NULL); 3300 WARN_ON(ret < 0); 3301 3302 hotplug_memory_notifier(migrate_on_reclaim_callback, 100); 3303 return 0; 3304 } 3305 late_initcall(migrate_on_reclaim_init); 3306 #endif /* CONFIG_HOTPLUG_CPU */ 3307 3308 bool numa_demotion_enabled = false; 3309 3310 #ifdef CONFIG_SYSFS 3311 static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 3312 struct kobj_attribute *attr, char *buf) 3313 { 3314 return sysfs_emit(buf, "%s\n", 3315 numa_demotion_enabled ? "true" : "false"); 3316 } 3317 3318 static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 3319 struct kobj_attribute *attr, 3320 const char *buf, size_t count) 3321 { 3322 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 3323 numa_demotion_enabled = true; 3324 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 3325 numa_demotion_enabled = false; 3326 else 3327 return -EINVAL; 3328 3329 return count; 3330 } 3331 3332 static struct kobj_attribute numa_demotion_enabled_attr = 3333 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 3334 numa_demotion_enabled_store); 3335 3336 static struct attribute *numa_attrs[] = { 3337 &numa_demotion_enabled_attr.attr, 3338 NULL, 3339 }; 3340 3341 static const struct attribute_group numa_attr_group = { 3342 .attrs = numa_attrs, 3343 }; 3344 3345 static int __init numa_init_sysfs(void) 3346 { 3347 int err; 3348 struct kobject *numa_kobj; 3349 3350 numa_kobj = kobject_create_and_add("numa", mm_kobj); 3351 if (!numa_kobj) { 3352 pr_err("failed to create numa kobject\n"); 3353 return -ENOMEM; 3354 } 3355 err = sysfs_create_group(numa_kobj, &numa_attr_group); 3356 if (err) { 3357 pr_err("failed to register numa group\n"); 3358 goto delete_obj; 3359 } 3360 return 0; 3361 3362 delete_obj: 3363 kobject_put(numa_kobj); 3364 return err; 3365 } 3366 subsys_initcall(numa_init_sysfs); 3367 #endif 3368