1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pagewalk.h> 42 #include <linux/pfn_t.h> 43 #include <linux/memremap.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/balloon_compaction.h> 46 #include <linux/mmu_notifier.h> 47 #include <linux/page_idle.h> 48 #include <linux/page_owner.h> 49 #include <linux/sched/mm.h> 50 #include <linux/ptrace.h> 51 #include <linux/oom.h> 52 #include <linux/memory.h> 53 #include <linux/random.h> 54 #include <linux/sched/sysctl.h> 55 56 #include <asm/tlbflush.h> 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/migrate.h> 60 61 #include "internal.h" 62 63 int isolate_movable_page(struct page *page, isolate_mode_t mode) 64 { 65 struct address_space *mapping; 66 67 /* 68 * Avoid burning cycles with pages that are yet under __free_pages(), 69 * or just got freed under us. 70 * 71 * In case we 'win' a race for a movable page being freed under us and 72 * raise its refcount preventing __free_pages() from doing its job 73 * the put_page() at the end of this block will take care of 74 * release this page, thus avoiding a nasty leakage. 75 */ 76 if (unlikely(!get_page_unless_zero(page))) 77 goto out; 78 79 /* 80 * Check PageMovable before holding a PG_lock because page's owner 81 * assumes anybody doesn't touch PG_lock of newly allocated page 82 * so unconditionally grabbing the lock ruins page's owner side. 83 */ 84 if (unlikely(!__PageMovable(page))) 85 goto out_putpage; 86 /* 87 * As movable pages are not isolated from LRU lists, concurrent 88 * compaction threads can race against page migration functions 89 * as well as race against the releasing a page. 90 * 91 * In order to avoid having an already isolated movable page 92 * being (wrongly) re-isolated while it is under migration, 93 * or to avoid attempting to isolate pages being released, 94 * lets be sure we have the page lock 95 * before proceeding with the movable page isolation steps. 96 */ 97 if (unlikely(!trylock_page(page))) 98 goto out_putpage; 99 100 if (!PageMovable(page) || PageIsolated(page)) 101 goto out_no_isolated; 102 103 mapping = page_mapping(page); 104 VM_BUG_ON_PAGE(!mapping, page); 105 106 if (!mapping->a_ops->isolate_page(page, mode)) 107 goto out_no_isolated; 108 109 /* Driver shouldn't use PG_isolated bit of page->flags */ 110 WARN_ON_ONCE(PageIsolated(page)); 111 SetPageIsolated(page); 112 unlock_page(page); 113 114 return 0; 115 116 out_no_isolated: 117 unlock_page(page); 118 out_putpage: 119 put_page(page); 120 out: 121 return -EBUSY; 122 } 123 124 static void putback_movable_page(struct page *page) 125 { 126 struct address_space *mapping; 127 128 mapping = page_mapping(page); 129 mapping->a_ops->putback_page(page); 130 ClearPageIsolated(page); 131 } 132 133 /* 134 * Put previously isolated pages back onto the appropriate lists 135 * from where they were once taken off for compaction/migration. 136 * 137 * This function shall be used whenever the isolated pageset has been 138 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 139 * and isolate_huge_page(). 140 */ 141 void putback_movable_pages(struct list_head *l) 142 { 143 struct page *page; 144 struct page *page2; 145 146 list_for_each_entry_safe(page, page2, l, lru) { 147 if (unlikely(PageHuge(page))) { 148 putback_active_hugepage(page); 149 continue; 150 } 151 list_del(&page->lru); 152 /* 153 * We isolated non-lru movable page so here we can use 154 * __PageMovable because LRU page's mapping cannot have 155 * PAGE_MAPPING_MOVABLE. 156 */ 157 if (unlikely(__PageMovable(page))) { 158 VM_BUG_ON_PAGE(!PageIsolated(page), page); 159 lock_page(page); 160 if (PageMovable(page)) 161 putback_movable_page(page); 162 else 163 ClearPageIsolated(page); 164 unlock_page(page); 165 put_page(page); 166 } else { 167 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 168 page_is_file_lru(page), -thp_nr_pages(page)); 169 putback_lru_page(page); 170 } 171 } 172 } 173 174 /* 175 * Restore a potential migration pte to a working pte entry 176 */ 177 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 178 unsigned long addr, void *old) 179 { 180 struct page_vma_mapped_walk pvmw = { 181 .page = old, 182 .vma = vma, 183 .address = addr, 184 .flags = PVMW_SYNC | PVMW_MIGRATION, 185 }; 186 struct page *new; 187 pte_t pte; 188 swp_entry_t entry; 189 190 VM_BUG_ON_PAGE(PageTail(page), page); 191 while (page_vma_mapped_walk(&pvmw)) { 192 if (PageKsm(page)) 193 new = page; 194 else 195 new = page - pvmw.page->index + 196 linear_page_index(vma, pvmw.address); 197 198 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 199 /* PMD-mapped THP migration entry */ 200 if (!pvmw.pte) { 201 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 202 remove_migration_pmd(&pvmw, new); 203 continue; 204 } 205 #endif 206 207 get_page(new); 208 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 209 if (pte_swp_soft_dirty(*pvmw.pte)) 210 pte = pte_mksoft_dirty(pte); 211 212 /* 213 * Recheck VMA as permissions can change since migration started 214 */ 215 entry = pte_to_swp_entry(*pvmw.pte); 216 if (is_writable_migration_entry(entry)) 217 pte = maybe_mkwrite(pte, vma); 218 else if (pte_swp_uffd_wp(*pvmw.pte)) 219 pte = pte_mkuffd_wp(pte); 220 221 if (unlikely(is_device_private_page(new))) { 222 if (pte_write(pte)) 223 entry = make_writable_device_private_entry( 224 page_to_pfn(new)); 225 else 226 entry = make_readable_device_private_entry( 227 page_to_pfn(new)); 228 pte = swp_entry_to_pte(entry); 229 if (pte_swp_soft_dirty(*pvmw.pte)) 230 pte = pte_swp_mksoft_dirty(pte); 231 if (pte_swp_uffd_wp(*pvmw.pte)) 232 pte = pte_swp_mkuffd_wp(pte); 233 } 234 235 #ifdef CONFIG_HUGETLB_PAGE 236 if (PageHuge(new)) { 237 unsigned int shift = huge_page_shift(hstate_vma(vma)); 238 239 pte = pte_mkhuge(pte); 240 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 241 if (PageAnon(new)) 242 hugepage_add_anon_rmap(new, vma, pvmw.address); 243 else 244 page_dup_rmap(new, true); 245 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 246 } else 247 #endif 248 { 249 if (PageAnon(new)) 250 page_add_anon_rmap(new, vma, pvmw.address, false); 251 else 252 page_add_file_rmap(new, false); 253 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 254 } 255 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 256 mlock_vma_page(new); 257 258 if (PageTransHuge(page) && PageMlocked(page)) 259 clear_page_mlock(page); 260 261 /* No need to invalidate - it was non-present before */ 262 update_mmu_cache(vma, pvmw.address, pvmw.pte); 263 } 264 265 return true; 266 } 267 268 /* 269 * Get rid of all migration entries and replace them by 270 * references to the indicated page. 271 */ 272 void remove_migration_ptes(struct page *old, struct page *new, bool locked) 273 { 274 struct rmap_walk_control rwc = { 275 .rmap_one = remove_migration_pte, 276 .arg = old, 277 }; 278 279 if (locked) 280 rmap_walk_locked(new, &rwc); 281 else 282 rmap_walk(new, &rwc); 283 } 284 285 /* 286 * Something used the pte of a page under migration. We need to 287 * get to the page and wait until migration is finished. 288 * When we return from this function the fault will be retried. 289 */ 290 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 291 spinlock_t *ptl) 292 { 293 pte_t pte; 294 swp_entry_t entry; 295 296 spin_lock(ptl); 297 pte = *ptep; 298 if (!is_swap_pte(pte)) 299 goto out; 300 301 entry = pte_to_swp_entry(pte); 302 if (!is_migration_entry(entry)) 303 goto out; 304 305 migration_entry_wait_on_locked(entry, ptep, ptl); 306 return; 307 out: 308 pte_unmap_unlock(ptep, ptl); 309 } 310 311 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 312 unsigned long address) 313 { 314 spinlock_t *ptl = pte_lockptr(mm, pmd); 315 pte_t *ptep = pte_offset_map(pmd, address); 316 __migration_entry_wait(mm, ptep, ptl); 317 } 318 319 void migration_entry_wait_huge(struct vm_area_struct *vma, 320 struct mm_struct *mm, pte_t *pte) 321 { 322 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 323 __migration_entry_wait(mm, pte, ptl); 324 } 325 326 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 327 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 328 { 329 spinlock_t *ptl; 330 331 ptl = pmd_lock(mm, pmd); 332 if (!is_pmd_migration_entry(*pmd)) 333 goto unlock; 334 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 335 return; 336 unlock: 337 spin_unlock(ptl); 338 } 339 #endif 340 341 static int expected_page_refs(struct address_space *mapping, struct page *page) 342 { 343 int expected_count = 1; 344 345 /* 346 * Device private pages have an extra refcount as they are 347 * ZONE_DEVICE pages. 348 */ 349 expected_count += is_device_private_page(page); 350 if (mapping) 351 expected_count += compound_nr(page) + page_has_private(page); 352 353 return expected_count; 354 } 355 356 /* 357 * Replace the page in the mapping. 358 * 359 * The number of remaining references must be: 360 * 1 for anonymous pages without a mapping 361 * 2 for pages with a mapping 362 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 363 */ 364 int folio_migrate_mapping(struct address_space *mapping, 365 struct folio *newfolio, struct folio *folio, int extra_count) 366 { 367 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 368 struct zone *oldzone, *newzone; 369 int dirty; 370 int expected_count = expected_page_refs(mapping, &folio->page) + extra_count; 371 long nr = folio_nr_pages(folio); 372 373 if (!mapping) { 374 /* Anonymous page without mapping */ 375 if (folio_ref_count(folio) != expected_count) 376 return -EAGAIN; 377 378 /* No turning back from here */ 379 newfolio->index = folio->index; 380 newfolio->mapping = folio->mapping; 381 if (folio_test_swapbacked(folio)) 382 __folio_set_swapbacked(newfolio); 383 384 return MIGRATEPAGE_SUCCESS; 385 } 386 387 oldzone = folio_zone(folio); 388 newzone = folio_zone(newfolio); 389 390 xas_lock_irq(&xas); 391 if (!folio_ref_freeze(folio, expected_count)) { 392 xas_unlock_irq(&xas); 393 return -EAGAIN; 394 } 395 396 /* 397 * Now we know that no one else is looking at the folio: 398 * no turning back from here. 399 */ 400 newfolio->index = folio->index; 401 newfolio->mapping = folio->mapping; 402 folio_ref_add(newfolio, nr); /* add cache reference */ 403 if (folio_test_swapbacked(folio)) { 404 __folio_set_swapbacked(newfolio); 405 if (folio_test_swapcache(folio)) { 406 folio_set_swapcache(newfolio); 407 newfolio->private = folio_get_private(folio); 408 } 409 } else { 410 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 411 } 412 413 /* Move dirty while page refs frozen and newpage not yet exposed */ 414 dirty = folio_test_dirty(folio); 415 if (dirty) { 416 folio_clear_dirty(folio); 417 folio_set_dirty(newfolio); 418 } 419 420 xas_store(&xas, newfolio); 421 422 /* 423 * Drop cache reference from old page by unfreezing 424 * to one less reference. 425 * We know this isn't the last reference. 426 */ 427 folio_ref_unfreeze(folio, expected_count - nr); 428 429 xas_unlock(&xas); 430 /* Leave irq disabled to prevent preemption while updating stats */ 431 432 /* 433 * If moved to a different zone then also account 434 * the page for that zone. Other VM counters will be 435 * taken care of when we establish references to the 436 * new page and drop references to the old page. 437 * 438 * Note that anonymous pages are accounted for 439 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 440 * are mapped to swap space. 441 */ 442 if (newzone != oldzone) { 443 struct lruvec *old_lruvec, *new_lruvec; 444 struct mem_cgroup *memcg; 445 446 memcg = folio_memcg(folio); 447 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 448 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 449 450 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 451 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 452 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 453 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 454 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 455 } 456 #ifdef CONFIG_SWAP 457 if (folio_test_swapcache(folio)) { 458 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 459 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 460 } 461 #endif 462 if (dirty && mapping_can_writeback(mapping)) { 463 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 464 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 465 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 466 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 467 } 468 } 469 local_irq_enable(); 470 471 return MIGRATEPAGE_SUCCESS; 472 } 473 EXPORT_SYMBOL(folio_migrate_mapping); 474 475 /* 476 * The expected number of remaining references is the same as that 477 * of folio_migrate_mapping(). 478 */ 479 int migrate_huge_page_move_mapping(struct address_space *mapping, 480 struct page *newpage, struct page *page) 481 { 482 XA_STATE(xas, &mapping->i_pages, page_index(page)); 483 int expected_count; 484 485 xas_lock_irq(&xas); 486 expected_count = 2 + page_has_private(page); 487 if (page_count(page) != expected_count || xas_load(&xas) != page) { 488 xas_unlock_irq(&xas); 489 return -EAGAIN; 490 } 491 492 if (!page_ref_freeze(page, expected_count)) { 493 xas_unlock_irq(&xas); 494 return -EAGAIN; 495 } 496 497 newpage->index = page->index; 498 newpage->mapping = page->mapping; 499 500 get_page(newpage); 501 502 xas_store(&xas, newpage); 503 504 page_ref_unfreeze(page, expected_count - 1); 505 506 xas_unlock_irq(&xas); 507 508 return MIGRATEPAGE_SUCCESS; 509 } 510 511 /* 512 * Copy the flags and some other ancillary information 513 */ 514 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 515 { 516 int cpupid; 517 518 if (folio_test_error(folio)) 519 folio_set_error(newfolio); 520 if (folio_test_referenced(folio)) 521 folio_set_referenced(newfolio); 522 if (folio_test_uptodate(folio)) 523 folio_mark_uptodate(newfolio); 524 if (folio_test_clear_active(folio)) { 525 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 526 folio_set_active(newfolio); 527 } else if (folio_test_clear_unevictable(folio)) 528 folio_set_unevictable(newfolio); 529 if (folio_test_workingset(folio)) 530 folio_set_workingset(newfolio); 531 if (folio_test_checked(folio)) 532 folio_set_checked(newfolio); 533 if (folio_test_mappedtodisk(folio)) 534 folio_set_mappedtodisk(newfolio); 535 536 /* Move dirty on pages not done by folio_migrate_mapping() */ 537 if (folio_test_dirty(folio)) 538 folio_set_dirty(newfolio); 539 540 if (folio_test_young(folio)) 541 folio_set_young(newfolio); 542 if (folio_test_idle(folio)) 543 folio_set_idle(newfolio); 544 545 /* 546 * Copy NUMA information to the new page, to prevent over-eager 547 * future migrations of this same page. 548 */ 549 cpupid = page_cpupid_xchg_last(&folio->page, -1); 550 page_cpupid_xchg_last(&newfolio->page, cpupid); 551 552 folio_migrate_ksm(newfolio, folio); 553 /* 554 * Please do not reorder this without considering how mm/ksm.c's 555 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 556 */ 557 if (folio_test_swapcache(folio)) 558 folio_clear_swapcache(folio); 559 folio_clear_private(folio); 560 561 /* page->private contains hugetlb specific flags */ 562 if (!folio_test_hugetlb(folio)) 563 folio->private = NULL; 564 565 /* 566 * If any waiters have accumulated on the new page then 567 * wake them up. 568 */ 569 if (folio_test_writeback(newfolio)) 570 folio_end_writeback(newfolio); 571 572 /* 573 * PG_readahead shares the same bit with PG_reclaim. The above 574 * end_page_writeback() may clear PG_readahead mistakenly, so set the 575 * bit after that. 576 */ 577 if (folio_test_readahead(folio)) 578 folio_set_readahead(newfolio); 579 580 folio_copy_owner(newfolio, folio); 581 582 if (!folio_test_hugetlb(folio)) 583 mem_cgroup_migrate(folio, newfolio); 584 } 585 EXPORT_SYMBOL(folio_migrate_flags); 586 587 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 588 { 589 folio_copy(newfolio, folio); 590 folio_migrate_flags(newfolio, folio); 591 } 592 EXPORT_SYMBOL(folio_migrate_copy); 593 594 /************************************************************ 595 * Migration functions 596 ***********************************************************/ 597 598 /* 599 * Common logic to directly migrate a single LRU page suitable for 600 * pages that do not use PagePrivate/PagePrivate2. 601 * 602 * Pages are locked upon entry and exit. 603 */ 604 int migrate_page(struct address_space *mapping, 605 struct page *newpage, struct page *page, 606 enum migrate_mode mode) 607 { 608 struct folio *newfolio = page_folio(newpage); 609 struct folio *folio = page_folio(page); 610 int rc; 611 612 BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */ 613 614 rc = folio_migrate_mapping(mapping, newfolio, folio, 0); 615 616 if (rc != MIGRATEPAGE_SUCCESS) 617 return rc; 618 619 if (mode != MIGRATE_SYNC_NO_COPY) 620 folio_migrate_copy(newfolio, folio); 621 else 622 folio_migrate_flags(newfolio, folio); 623 return MIGRATEPAGE_SUCCESS; 624 } 625 EXPORT_SYMBOL(migrate_page); 626 627 #ifdef CONFIG_BLOCK 628 /* Returns true if all buffers are successfully locked */ 629 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 630 enum migrate_mode mode) 631 { 632 struct buffer_head *bh = head; 633 634 /* Simple case, sync compaction */ 635 if (mode != MIGRATE_ASYNC) { 636 do { 637 lock_buffer(bh); 638 bh = bh->b_this_page; 639 640 } while (bh != head); 641 642 return true; 643 } 644 645 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 646 do { 647 if (!trylock_buffer(bh)) { 648 /* 649 * We failed to lock the buffer and cannot stall in 650 * async migration. Release the taken locks 651 */ 652 struct buffer_head *failed_bh = bh; 653 bh = head; 654 while (bh != failed_bh) { 655 unlock_buffer(bh); 656 bh = bh->b_this_page; 657 } 658 return false; 659 } 660 661 bh = bh->b_this_page; 662 } while (bh != head); 663 return true; 664 } 665 666 static int __buffer_migrate_page(struct address_space *mapping, 667 struct page *newpage, struct page *page, enum migrate_mode mode, 668 bool check_refs) 669 { 670 struct buffer_head *bh, *head; 671 int rc; 672 int expected_count; 673 674 if (!page_has_buffers(page)) 675 return migrate_page(mapping, newpage, page, mode); 676 677 /* Check whether page does not have extra refs before we do more work */ 678 expected_count = expected_page_refs(mapping, page); 679 if (page_count(page) != expected_count) 680 return -EAGAIN; 681 682 head = page_buffers(page); 683 if (!buffer_migrate_lock_buffers(head, mode)) 684 return -EAGAIN; 685 686 if (check_refs) { 687 bool busy; 688 bool invalidated = false; 689 690 recheck_buffers: 691 busy = false; 692 spin_lock(&mapping->private_lock); 693 bh = head; 694 do { 695 if (atomic_read(&bh->b_count)) { 696 busy = true; 697 break; 698 } 699 bh = bh->b_this_page; 700 } while (bh != head); 701 if (busy) { 702 if (invalidated) { 703 rc = -EAGAIN; 704 goto unlock_buffers; 705 } 706 spin_unlock(&mapping->private_lock); 707 invalidate_bh_lrus(); 708 invalidated = true; 709 goto recheck_buffers; 710 } 711 } 712 713 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 714 if (rc != MIGRATEPAGE_SUCCESS) 715 goto unlock_buffers; 716 717 attach_page_private(newpage, detach_page_private(page)); 718 719 bh = head; 720 do { 721 set_bh_page(bh, newpage, bh_offset(bh)); 722 bh = bh->b_this_page; 723 724 } while (bh != head); 725 726 if (mode != MIGRATE_SYNC_NO_COPY) 727 migrate_page_copy(newpage, page); 728 else 729 migrate_page_states(newpage, page); 730 731 rc = MIGRATEPAGE_SUCCESS; 732 unlock_buffers: 733 if (check_refs) 734 spin_unlock(&mapping->private_lock); 735 bh = head; 736 do { 737 unlock_buffer(bh); 738 bh = bh->b_this_page; 739 740 } while (bh != head); 741 742 return rc; 743 } 744 745 /* 746 * Migration function for pages with buffers. This function can only be used 747 * if the underlying filesystem guarantees that no other references to "page" 748 * exist. For example attached buffer heads are accessed only under page lock. 749 */ 750 int buffer_migrate_page(struct address_space *mapping, 751 struct page *newpage, struct page *page, enum migrate_mode mode) 752 { 753 return __buffer_migrate_page(mapping, newpage, page, mode, false); 754 } 755 EXPORT_SYMBOL(buffer_migrate_page); 756 757 /* 758 * Same as above except that this variant is more careful and checks that there 759 * are also no buffer head references. This function is the right one for 760 * mappings where buffer heads are directly looked up and referenced (such as 761 * block device mappings). 762 */ 763 int buffer_migrate_page_norefs(struct address_space *mapping, 764 struct page *newpage, struct page *page, enum migrate_mode mode) 765 { 766 return __buffer_migrate_page(mapping, newpage, page, mode, true); 767 } 768 #endif 769 770 /* 771 * Writeback a page to clean the dirty state 772 */ 773 static int writeout(struct address_space *mapping, struct page *page) 774 { 775 struct writeback_control wbc = { 776 .sync_mode = WB_SYNC_NONE, 777 .nr_to_write = 1, 778 .range_start = 0, 779 .range_end = LLONG_MAX, 780 .for_reclaim = 1 781 }; 782 int rc; 783 784 if (!mapping->a_ops->writepage) 785 /* No write method for the address space */ 786 return -EINVAL; 787 788 if (!clear_page_dirty_for_io(page)) 789 /* Someone else already triggered a write */ 790 return -EAGAIN; 791 792 /* 793 * A dirty page may imply that the underlying filesystem has 794 * the page on some queue. So the page must be clean for 795 * migration. Writeout may mean we loose the lock and the 796 * page state is no longer what we checked for earlier. 797 * At this point we know that the migration attempt cannot 798 * be successful. 799 */ 800 remove_migration_ptes(page, page, false); 801 802 rc = mapping->a_ops->writepage(page, &wbc); 803 804 if (rc != AOP_WRITEPAGE_ACTIVATE) 805 /* unlocked. Relock */ 806 lock_page(page); 807 808 return (rc < 0) ? -EIO : -EAGAIN; 809 } 810 811 /* 812 * Default handling if a filesystem does not provide a migration function. 813 */ 814 static int fallback_migrate_page(struct address_space *mapping, 815 struct page *newpage, struct page *page, enum migrate_mode mode) 816 { 817 if (PageDirty(page)) { 818 /* Only writeback pages in full synchronous migration */ 819 switch (mode) { 820 case MIGRATE_SYNC: 821 case MIGRATE_SYNC_NO_COPY: 822 break; 823 default: 824 return -EBUSY; 825 } 826 return writeout(mapping, page); 827 } 828 829 /* 830 * Buffers may be managed in a filesystem specific way. 831 * We must have no buffers or drop them. 832 */ 833 if (page_has_private(page) && 834 !try_to_release_page(page, GFP_KERNEL)) 835 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 836 837 return migrate_page(mapping, newpage, page, mode); 838 } 839 840 /* 841 * Move a page to a newly allocated page 842 * The page is locked and all ptes have been successfully removed. 843 * 844 * The new page will have replaced the old page if this function 845 * is successful. 846 * 847 * Return value: 848 * < 0 - error code 849 * MIGRATEPAGE_SUCCESS - success 850 */ 851 static int move_to_new_page(struct page *newpage, struct page *page, 852 enum migrate_mode mode) 853 { 854 struct address_space *mapping; 855 int rc = -EAGAIN; 856 bool is_lru = !__PageMovable(page); 857 858 VM_BUG_ON_PAGE(!PageLocked(page), page); 859 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 860 861 mapping = page_mapping(page); 862 863 if (likely(is_lru)) { 864 if (!mapping) 865 rc = migrate_page(mapping, newpage, page, mode); 866 else if (mapping->a_ops->migratepage) 867 /* 868 * Most pages have a mapping and most filesystems 869 * provide a migratepage callback. Anonymous pages 870 * are part of swap space which also has its own 871 * migratepage callback. This is the most common path 872 * for page migration. 873 */ 874 rc = mapping->a_ops->migratepage(mapping, newpage, 875 page, mode); 876 else 877 rc = fallback_migrate_page(mapping, newpage, 878 page, mode); 879 } else { 880 /* 881 * In case of non-lru page, it could be released after 882 * isolation step. In that case, we shouldn't try migration. 883 */ 884 VM_BUG_ON_PAGE(!PageIsolated(page), page); 885 if (!PageMovable(page)) { 886 rc = MIGRATEPAGE_SUCCESS; 887 ClearPageIsolated(page); 888 goto out; 889 } 890 891 rc = mapping->a_ops->migratepage(mapping, newpage, 892 page, mode); 893 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 894 !PageIsolated(page)); 895 } 896 897 /* 898 * When successful, old pagecache page->mapping must be cleared before 899 * page is freed; but stats require that PageAnon be left as PageAnon. 900 */ 901 if (rc == MIGRATEPAGE_SUCCESS) { 902 if (__PageMovable(page)) { 903 VM_BUG_ON_PAGE(!PageIsolated(page), page); 904 905 /* 906 * We clear PG_movable under page_lock so any compactor 907 * cannot try to migrate this page. 908 */ 909 ClearPageIsolated(page); 910 } 911 912 /* 913 * Anonymous and movable page->mapping will be cleared by 914 * free_pages_prepare so don't reset it here for keeping 915 * the type to work PageAnon, for example. 916 */ 917 if (!PageMappingFlags(page)) 918 page->mapping = NULL; 919 920 if (likely(!is_zone_device_page(newpage))) 921 flush_dcache_folio(page_folio(newpage)); 922 } 923 out: 924 return rc; 925 } 926 927 static int __unmap_and_move(struct page *page, struct page *newpage, 928 int force, enum migrate_mode mode) 929 { 930 int rc = -EAGAIN; 931 bool page_was_mapped = false; 932 struct anon_vma *anon_vma = NULL; 933 bool is_lru = !__PageMovable(page); 934 935 if (!trylock_page(page)) { 936 if (!force || mode == MIGRATE_ASYNC) 937 goto out; 938 939 /* 940 * It's not safe for direct compaction to call lock_page. 941 * For example, during page readahead pages are added locked 942 * to the LRU. Later, when the IO completes the pages are 943 * marked uptodate and unlocked. However, the queueing 944 * could be merging multiple pages for one bio (e.g. 945 * mpage_readahead). If an allocation happens for the 946 * second or third page, the process can end up locking 947 * the same page twice and deadlocking. Rather than 948 * trying to be clever about what pages can be locked, 949 * avoid the use of lock_page for direct compaction 950 * altogether. 951 */ 952 if (current->flags & PF_MEMALLOC) 953 goto out; 954 955 lock_page(page); 956 } 957 958 if (PageWriteback(page)) { 959 /* 960 * Only in the case of a full synchronous migration is it 961 * necessary to wait for PageWriteback. In the async case, 962 * the retry loop is too short and in the sync-light case, 963 * the overhead of stalling is too much 964 */ 965 switch (mode) { 966 case MIGRATE_SYNC: 967 case MIGRATE_SYNC_NO_COPY: 968 break; 969 default: 970 rc = -EBUSY; 971 goto out_unlock; 972 } 973 if (!force) 974 goto out_unlock; 975 wait_on_page_writeback(page); 976 } 977 978 /* 979 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 980 * we cannot notice that anon_vma is freed while we migrates a page. 981 * This get_anon_vma() delays freeing anon_vma pointer until the end 982 * of migration. File cache pages are no problem because of page_lock() 983 * File Caches may use write_page() or lock_page() in migration, then, 984 * just care Anon page here. 985 * 986 * Only page_get_anon_vma() understands the subtleties of 987 * getting a hold on an anon_vma from outside one of its mms. 988 * But if we cannot get anon_vma, then we won't need it anyway, 989 * because that implies that the anon page is no longer mapped 990 * (and cannot be remapped so long as we hold the page lock). 991 */ 992 if (PageAnon(page) && !PageKsm(page)) 993 anon_vma = page_get_anon_vma(page); 994 995 /* 996 * Block others from accessing the new page when we get around to 997 * establishing additional references. We are usually the only one 998 * holding a reference to newpage at this point. We used to have a BUG 999 * here if trylock_page(newpage) fails, but would like to allow for 1000 * cases where there might be a race with the previous use of newpage. 1001 * This is much like races on refcount of oldpage: just don't BUG(). 1002 */ 1003 if (unlikely(!trylock_page(newpage))) 1004 goto out_unlock; 1005 1006 if (unlikely(!is_lru)) { 1007 rc = move_to_new_page(newpage, page, mode); 1008 goto out_unlock_both; 1009 } 1010 1011 /* 1012 * Corner case handling: 1013 * 1. When a new swap-cache page is read into, it is added to the LRU 1014 * and treated as swapcache but it has no rmap yet. 1015 * Calling try_to_unmap() against a page->mapping==NULL page will 1016 * trigger a BUG. So handle it here. 1017 * 2. An orphaned page (see truncate_cleanup_page) might have 1018 * fs-private metadata. The page can be picked up due to memory 1019 * offlining. Everywhere else except page reclaim, the page is 1020 * invisible to the vm, so the page can not be migrated. So try to 1021 * free the metadata, so the page can be freed. 1022 */ 1023 if (!page->mapping) { 1024 VM_BUG_ON_PAGE(PageAnon(page), page); 1025 if (page_has_private(page)) { 1026 try_to_free_buffers(page); 1027 goto out_unlock_both; 1028 } 1029 } else if (page_mapped(page)) { 1030 /* Establish migration ptes */ 1031 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1032 page); 1033 try_to_migrate(page, 0); 1034 page_was_mapped = true; 1035 } 1036 1037 if (!page_mapped(page)) 1038 rc = move_to_new_page(newpage, page, mode); 1039 1040 if (page_was_mapped) 1041 remove_migration_ptes(page, 1042 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 1043 1044 out_unlock_both: 1045 unlock_page(newpage); 1046 out_unlock: 1047 /* Drop an anon_vma reference if we took one */ 1048 if (anon_vma) 1049 put_anon_vma(anon_vma); 1050 unlock_page(page); 1051 out: 1052 /* 1053 * If migration is successful, decrease refcount of the newpage 1054 * which will not free the page because new page owner increased 1055 * refcounter. As well, if it is LRU page, add the page to LRU 1056 * list in here. Use the old state of the isolated source page to 1057 * determine if we migrated a LRU page. newpage was already unlocked 1058 * and possibly modified by its owner - don't rely on the page 1059 * state. 1060 */ 1061 if (rc == MIGRATEPAGE_SUCCESS) { 1062 if (unlikely(!is_lru)) 1063 put_page(newpage); 1064 else 1065 putback_lru_page(newpage); 1066 } 1067 1068 return rc; 1069 } 1070 1071 /* 1072 * Obtain the lock on page, remove all ptes and migrate the page 1073 * to the newly allocated page in newpage. 1074 */ 1075 static int unmap_and_move(new_page_t get_new_page, 1076 free_page_t put_new_page, 1077 unsigned long private, struct page *page, 1078 int force, enum migrate_mode mode, 1079 enum migrate_reason reason, 1080 struct list_head *ret) 1081 { 1082 int rc = MIGRATEPAGE_SUCCESS; 1083 struct page *newpage = NULL; 1084 1085 if (!thp_migration_supported() && PageTransHuge(page)) 1086 return -ENOSYS; 1087 1088 if (page_count(page) == 1) { 1089 /* page was freed from under us. So we are done. */ 1090 ClearPageActive(page); 1091 ClearPageUnevictable(page); 1092 if (unlikely(__PageMovable(page))) { 1093 lock_page(page); 1094 if (!PageMovable(page)) 1095 ClearPageIsolated(page); 1096 unlock_page(page); 1097 } 1098 goto out; 1099 } 1100 1101 newpage = get_new_page(page, private); 1102 if (!newpage) 1103 return -ENOMEM; 1104 1105 rc = __unmap_and_move(page, newpage, force, mode); 1106 if (rc == MIGRATEPAGE_SUCCESS) 1107 set_page_owner_migrate_reason(newpage, reason); 1108 1109 out: 1110 if (rc != -EAGAIN) { 1111 /* 1112 * A page that has been migrated has all references 1113 * removed and will be freed. A page that has not been 1114 * migrated will have kept its references and be restored. 1115 */ 1116 list_del(&page->lru); 1117 } 1118 1119 /* 1120 * If migration is successful, releases reference grabbed during 1121 * isolation. Otherwise, restore the page to right list unless 1122 * we want to retry. 1123 */ 1124 if (rc == MIGRATEPAGE_SUCCESS) { 1125 /* 1126 * Compaction can migrate also non-LRU pages which are 1127 * not accounted to NR_ISOLATED_*. They can be recognized 1128 * as __PageMovable 1129 */ 1130 if (likely(!__PageMovable(page))) 1131 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1132 page_is_file_lru(page), -thp_nr_pages(page)); 1133 1134 if (reason != MR_MEMORY_FAILURE) 1135 /* 1136 * We release the page in page_handle_poison. 1137 */ 1138 put_page(page); 1139 } else { 1140 if (rc != -EAGAIN) 1141 list_add_tail(&page->lru, ret); 1142 1143 if (put_new_page) 1144 put_new_page(newpage, private); 1145 else 1146 put_page(newpage); 1147 } 1148 1149 return rc; 1150 } 1151 1152 /* 1153 * Counterpart of unmap_and_move_page() for hugepage migration. 1154 * 1155 * This function doesn't wait the completion of hugepage I/O 1156 * because there is no race between I/O and migration for hugepage. 1157 * Note that currently hugepage I/O occurs only in direct I/O 1158 * where no lock is held and PG_writeback is irrelevant, 1159 * and writeback status of all subpages are counted in the reference 1160 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1161 * under direct I/O, the reference of the head page is 512 and a bit more.) 1162 * This means that when we try to migrate hugepage whose subpages are 1163 * doing direct I/O, some references remain after try_to_unmap() and 1164 * hugepage migration fails without data corruption. 1165 * 1166 * There is also no race when direct I/O is issued on the page under migration, 1167 * because then pte is replaced with migration swap entry and direct I/O code 1168 * will wait in the page fault for migration to complete. 1169 */ 1170 static int unmap_and_move_huge_page(new_page_t get_new_page, 1171 free_page_t put_new_page, unsigned long private, 1172 struct page *hpage, int force, 1173 enum migrate_mode mode, int reason, 1174 struct list_head *ret) 1175 { 1176 int rc = -EAGAIN; 1177 int page_was_mapped = 0; 1178 struct page *new_hpage; 1179 struct anon_vma *anon_vma = NULL; 1180 struct address_space *mapping = NULL; 1181 1182 /* 1183 * Migratability of hugepages depends on architectures and their size. 1184 * This check is necessary because some callers of hugepage migration 1185 * like soft offline and memory hotremove don't walk through page 1186 * tables or check whether the hugepage is pmd-based or not before 1187 * kicking migration. 1188 */ 1189 if (!hugepage_migration_supported(page_hstate(hpage))) { 1190 list_move_tail(&hpage->lru, ret); 1191 return -ENOSYS; 1192 } 1193 1194 if (page_count(hpage) == 1) { 1195 /* page was freed from under us. So we are done. */ 1196 putback_active_hugepage(hpage); 1197 return MIGRATEPAGE_SUCCESS; 1198 } 1199 1200 new_hpage = get_new_page(hpage, private); 1201 if (!new_hpage) 1202 return -ENOMEM; 1203 1204 if (!trylock_page(hpage)) { 1205 if (!force) 1206 goto out; 1207 switch (mode) { 1208 case MIGRATE_SYNC: 1209 case MIGRATE_SYNC_NO_COPY: 1210 break; 1211 default: 1212 goto out; 1213 } 1214 lock_page(hpage); 1215 } 1216 1217 /* 1218 * Check for pages which are in the process of being freed. Without 1219 * page_mapping() set, hugetlbfs specific move page routine will not 1220 * be called and we could leak usage counts for subpools. 1221 */ 1222 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1223 rc = -EBUSY; 1224 goto out_unlock; 1225 } 1226 1227 if (PageAnon(hpage)) 1228 anon_vma = page_get_anon_vma(hpage); 1229 1230 if (unlikely(!trylock_page(new_hpage))) 1231 goto put_anon; 1232 1233 if (page_mapped(hpage)) { 1234 bool mapping_locked = false; 1235 enum ttu_flags ttu = 0; 1236 1237 if (!PageAnon(hpage)) { 1238 /* 1239 * In shared mappings, try_to_unmap could potentially 1240 * call huge_pmd_unshare. Because of this, take 1241 * semaphore in write mode here and set TTU_RMAP_LOCKED 1242 * to let lower levels know we have taken the lock. 1243 */ 1244 mapping = hugetlb_page_mapping_lock_write(hpage); 1245 if (unlikely(!mapping)) 1246 goto unlock_put_anon; 1247 1248 mapping_locked = true; 1249 ttu |= TTU_RMAP_LOCKED; 1250 } 1251 1252 try_to_migrate(hpage, ttu); 1253 page_was_mapped = 1; 1254 1255 if (mapping_locked) 1256 i_mmap_unlock_write(mapping); 1257 } 1258 1259 if (!page_mapped(hpage)) 1260 rc = move_to_new_page(new_hpage, hpage, mode); 1261 1262 if (page_was_mapped) 1263 remove_migration_ptes(hpage, 1264 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); 1265 1266 unlock_put_anon: 1267 unlock_page(new_hpage); 1268 1269 put_anon: 1270 if (anon_vma) 1271 put_anon_vma(anon_vma); 1272 1273 if (rc == MIGRATEPAGE_SUCCESS) { 1274 move_hugetlb_state(hpage, new_hpage, reason); 1275 put_new_page = NULL; 1276 } 1277 1278 out_unlock: 1279 unlock_page(hpage); 1280 out: 1281 if (rc == MIGRATEPAGE_SUCCESS) 1282 putback_active_hugepage(hpage); 1283 else if (rc != -EAGAIN) 1284 list_move_tail(&hpage->lru, ret); 1285 1286 /* 1287 * If migration was not successful and there's a freeing callback, use 1288 * it. Otherwise, put_page() will drop the reference grabbed during 1289 * isolation. 1290 */ 1291 if (put_new_page) 1292 put_new_page(new_hpage, private); 1293 else 1294 putback_active_hugepage(new_hpage); 1295 1296 return rc; 1297 } 1298 1299 static inline int try_split_thp(struct page *page, struct page **page2, 1300 struct list_head *from) 1301 { 1302 int rc = 0; 1303 1304 lock_page(page); 1305 rc = split_huge_page_to_list(page, from); 1306 unlock_page(page); 1307 if (!rc) 1308 list_safe_reset_next(page, *page2, lru); 1309 1310 return rc; 1311 } 1312 1313 /* 1314 * migrate_pages - migrate the pages specified in a list, to the free pages 1315 * supplied as the target for the page migration 1316 * 1317 * @from: The list of pages to be migrated. 1318 * @get_new_page: The function used to allocate free pages to be used 1319 * as the target of the page migration. 1320 * @put_new_page: The function used to free target pages if migration 1321 * fails, or NULL if no special handling is necessary. 1322 * @private: Private data to be passed on to get_new_page() 1323 * @mode: The migration mode that specifies the constraints for 1324 * page migration, if any. 1325 * @reason: The reason for page migration. 1326 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1327 * the caller passes a non-NULL pointer. 1328 * 1329 * The function returns after 10 attempts or if no pages are movable any more 1330 * because the list has become empty or no retryable pages exist any more. 1331 * It is caller's responsibility to call putback_movable_pages() to return pages 1332 * to the LRU or free list only if ret != 0. 1333 * 1334 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1335 * an error code. The number of THP splits will be considered as the number of 1336 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1337 */ 1338 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1339 free_page_t put_new_page, unsigned long private, 1340 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1341 { 1342 int retry = 1; 1343 int thp_retry = 1; 1344 int nr_failed = 0; 1345 int nr_failed_pages = 0; 1346 int nr_succeeded = 0; 1347 int nr_thp_succeeded = 0; 1348 int nr_thp_failed = 0; 1349 int nr_thp_split = 0; 1350 int pass = 0; 1351 bool is_thp = false; 1352 struct page *page; 1353 struct page *page2; 1354 int rc, nr_subpages; 1355 LIST_HEAD(ret_pages); 1356 LIST_HEAD(thp_split_pages); 1357 bool nosplit = (reason == MR_NUMA_MISPLACED); 1358 bool no_subpage_counting = false; 1359 1360 trace_mm_migrate_pages_start(mode, reason); 1361 1362 thp_subpage_migration: 1363 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1364 retry = 0; 1365 thp_retry = 0; 1366 1367 list_for_each_entry_safe(page, page2, from, lru) { 1368 retry: 1369 /* 1370 * THP statistics is based on the source huge page. 1371 * Capture required information that might get lost 1372 * during migration. 1373 */ 1374 is_thp = PageTransHuge(page) && !PageHuge(page); 1375 nr_subpages = compound_nr(page); 1376 cond_resched(); 1377 1378 if (PageHuge(page)) 1379 rc = unmap_and_move_huge_page(get_new_page, 1380 put_new_page, private, page, 1381 pass > 2, mode, reason, 1382 &ret_pages); 1383 else 1384 rc = unmap_and_move(get_new_page, put_new_page, 1385 private, page, pass > 2, mode, 1386 reason, &ret_pages); 1387 /* 1388 * The rules are: 1389 * Success: non hugetlb page will be freed, hugetlb 1390 * page will be put back 1391 * -EAGAIN: stay on the from list 1392 * -ENOMEM: stay on the from list 1393 * Other errno: put on ret_pages list then splice to 1394 * from list 1395 */ 1396 switch(rc) { 1397 /* 1398 * THP migration might be unsupported or the 1399 * allocation could've failed so we should 1400 * retry on the same page with the THP split 1401 * to base pages. 1402 * 1403 * Head page is retried immediately and tail 1404 * pages are added to the tail of the list so 1405 * we encounter them after the rest of the list 1406 * is processed. 1407 */ 1408 case -ENOSYS: 1409 /* THP migration is unsupported */ 1410 if (is_thp) { 1411 nr_thp_failed++; 1412 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1413 nr_thp_split++; 1414 goto retry; 1415 } 1416 1417 nr_failed_pages += nr_subpages; 1418 break; 1419 } 1420 1421 /* Hugetlb migration is unsupported */ 1422 if (!no_subpage_counting) 1423 nr_failed++; 1424 nr_failed_pages += nr_subpages; 1425 break; 1426 case -ENOMEM: 1427 /* 1428 * When memory is low, don't bother to try to migrate 1429 * other pages, just exit. 1430 * THP NUMA faulting doesn't split THP to retry. 1431 */ 1432 if (is_thp && !nosplit) { 1433 nr_thp_failed++; 1434 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1435 nr_thp_split++; 1436 goto retry; 1437 } 1438 1439 nr_failed_pages += nr_subpages; 1440 goto out; 1441 } 1442 1443 if (!no_subpage_counting) 1444 nr_failed++; 1445 nr_failed_pages += nr_subpages; 1446 goto out; 1447 case -EAGAIN: 1448 if (is_thp) { 1449 thp_retry++; 1450 break; 1451 } 1452 retry++; 1453 break; 1454 case MIGRATEPAGE_SUCCESS: 1455 nr_succeeded += nr_subpages; 1456 if (is_thp) { 1457 nr_thp_succeeded++; 1458 break; 1459 } 1460 break; 1461 default: 1462 /* 1463 * Permanent failure (-EBUSY, etc.): 1464 * unlike -EAGAIN case, the failed page is 1465 * removed from migration page list and not 1466 * retried in the next outer loop. 1467 */ 1468 if (is_thp) { 1469 nr_thp_failed++; 1470 nr_failed_pages += nr_subpages; 1471 break; 1472 } 1473 1474 if (!no_subpage_counting) 1475 nr_failed++; 1476 nr_failed_pages += nr_subpages; 1477 break; 1478 } 1479 } 1480 } 1481 nr_failed += retry; 1482 nr_thp_failed += thp_retry; 1483 /* 1484 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1485 * counting in this round, since all subpages of a THP is counted 1486 * as 1 failure in the first round. 1487 */ 1488 if (!list_empty(&thp_split_pages)) { 1489 /* 1490 * Move non-migrated pages (after 10 retries) to ret_pages 1491 * to avoid migrating them again. 1492 */ 1493 list_splice_init(from, &ret_pages); 1494 list_splice_init(&thp_split_pages, from); 1495 no_subpage_counting = true; 1496 retry = 1; 1497 goto thp_subpage_migration; 1498 } 1499 1500 rc = nr_failed + nr_thp_failed; 1501 out: 1502 /* 1503 * Put the permanent failure page back to migration list, they 1504 * will be put back to the right list by the caller. 1505 */ 1506 list_splice(&ret_pages, from); 1507 1508 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1509 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1510 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1511 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1512 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1513 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1514 nr_thp_failed, nr_thp_split, mode, reason); 1515 1516 if (ret_succeeded) 1517 *ret_succeeded = nr_succeeded; 1518 1519 return rc; 1520 } 1521 1522 struct page *alloc_migration_target(struct page *page, unsigned long private) 1523 { 1524 struct migration_target_control *mtc; 1525 gfp_t gfp_mask; 1526 unsigned int order = 0; 1527 struct page *new_page = NULL; 1528 int nid; 1529 int zidx; 1530 1531 mtc = (struct migration_target_control *)private; 1532 gfp_mask = mtc->gfp_mask; 1533 nid = mtc->nid; 1534 if (nid == NUMA_NO_NODE) 1535 nid = page_to_nid(page); 1536 1537 if (PageHuge(page)) { 1538 struct hstate *h = page_hstate(compound_head(page)); 1539 1540 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1541 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1542 } 1543 1544 if (PageTransHuge(page)) { 1545 /* 1546 * clear __GFP_RECLAIM to make the migration callback 1547 * consistent with regular THP allocations. 1548 */ 1549 gfp_mask &= ~__GFP_RECLAIM; 1550 gfp_mask |= GFP_TRANSHUGE; 1551 order = HPAGE_PMD_ORDER; 1552 } 1553 zidx = zone_idx(page_zone(page)); 1554 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1555 gfp_mask |= __GFP_HIGHMEM; 1556 1557 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); 1558 1559 if (new_page && PageTransHuge(new_page)) 1560 prep_transhuge_page(new_page); 1561 1562 return new_page; 1563 } 1564 1565 #ifdef CONFIG_NUMA 1566 1567 static int store_status(int __user *status, int start, int value, int nr) 1568 { 1569 while (nr-- > 0) { 1570 if (put_user(value, status + start)) 1571 return -EFAULT; 1572 start++; 1573 } 1574 1575 return 0; 1576 } 1577 1578 static int do_move_pages_to_node(struct mm_struct *mm, 1579 struct list_head *pagelist, int node) 1580 { 1581 int err; 1582 struct migration_target_control mtc = { 1583 .nid = node, 1584 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1585 }; 1586 1587 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1588 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1589 if (err) 1590 putback_movable_pages(pagelist); 1591 return err; 1592 } 1593 1594 /* 1595 * Resolves the given address to a struct page, isolates it from the LRU and 1596 * puts it to the given pagelist. 1597 * Returns: 1598 * errno - if the page cannot be found/isolated 1599 * 0 - when it doesn't have to be migrated because it is already on the 1600 * target node 1601 * 1 - when it has been queued 1602 */ 1603 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1604 int node, struct list_head *pagelist, bool migrate_all) 1605 { 1606 struct vm_area_struct *vma; 1607 struct page *page; 1608 int err; 1609 1610 mmap_read_lock(mm); 1611 err = -EFAULT; 1612 vma = find_vma(mm, addr); 1613 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1614 goto out; 1615 1616 /* FOLL_DUMP to ignore special (like zero) pages */ 1617 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1618 1619 err = PTR_ERR(page); 1620 if (IS_ERR(page)) 1621 goto out; 1622 1623 err = -ENOENT; 1624 if (!page) 1625 goto out; 1626 1627 err = 0; 1628 if (page_to_nid(page) == node) 1629 goto out_putpage; 1630 1631 err = -EACCES; 1632 if (page_mapcount(page) > 1 && !migrate_all) 1633 goto out_putpage; 1634 1635 if (PageHuge(page)) { 1636 if (PageHead(page)) { 1637 isolate_huge_page(page, pagelist); 1638 err = 1; 1639 } 1640 } else { 1641 struct page *head; 1642 1643 head = compound_head(page); 1644 err = isolate_lru_page(head); 1645 if (err) 1646 goto out_putpage; 1647 1648 err = 1; 1649 list_add_tail(&head->lru, pagelist); 1650 mod_node_page_state(page_pgdat(head), 1651 NR_ISOLATED_ANON + page_is_file_lru(head), 1652 thp_nr_pages(head)); 1653 } 1654 out_putpage: 1655 /* 1656 * Either remove the duplicate refcount from 1657 * isolate_lru_page() or drop the page ref if it was 1658 * not isolated. 1659 */ 1660 put_page(page); 1661 out: 1662 mmap_read_unlock(mm); 1663 return err; 1664 } 1665 1666 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1667 struct list_head *pagelist, int __user *status, 1668 int start, int i, unsigned long nr_pages) 1669 { 1670 int err; 1671 1672 if (list_empty(pagelist)) 1673 return 0; 1674 1675 err = do_move_pages_to_node(mm, pagelist, node); 1676 if (err) { 1677 /* 1678 * Positive err means the number of failed 1679 * pages to migrate. Since we are going to 1680 * abort and return the number of non-migrated 1681 * pages, so need to include the rest of the 1682 * nr_pages that have not been attempted as 1683 * well. 1684 */ 1685 if (err > 0) 1686 err += nr_pages - i - 1; 1687 return err; 1688 } 1689 return store_status(status, start, node, i - start); 1690 } 1691 1692 /* 1693 * Migrate an array of page address onto an array of nodes and fill 1694 * the corresponding array of status. 1695 */ 1696 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1697 unsigned long nr_pages, 1698 const void __user * __user *pages, 1699 const int __user *nodes, 1700 int __user *status, int flags) 1701 { 1702 int current_node = NUMA_NO_NODE; 1703 LIST_HEAD(pagelist); 1704 int start, i; 1705 int err = 0, err1; 1706 1707 lru_cache_disable(); 1708 1709 for (i = start = 0; i < nr_pages; i++) { 1710 const void __user *p; 1711 unsigned long addr; 1712 int node; 1713 1714 err = -EFAULT; 1715 if (get_user(p, pages + i)) 1716 goto out_flush; 1717 if (get_user(node, nodes + i)) 1718 goto out_flush; 1719 addr = (unsigned long)untagged_addr(p); 1720 1721 err = -ENODEV; 1722 if (node < 0 || node >= MAX_NUMNODES) 1723 goto out_flush; 1724 if (!node_state(node, N_MEMORY)) 1725 goto out_flush; 1726 1727 err = -EACCES; 1728 if (!node_isset(node, task_nodes)) 1729 goto out_flush; 1730 1731 if (current_node == NUMA_NO_NODE) { 1732 current_node = node; 1733 start = i; 1734 } else if (node != current_node) { 1735 err = move_pages_and_store_status(mm, current_node, 1736 &pagelist, status, start, i, nr_pages); 1737 if (err) 1738 goto out; 1739 start = i; 1740 current_node = node; 1741 } 1742 1743 /* 1744 * Errors in the page lookup or isolation are not fatal and we simply 1745 * report them via status 1746 */ 1747 err = add_page_for_migration(mm, addr, current_node, 1748 &pagelist, flags & MPOL_MF_MOVE_ALL); 1749 1750 if (err > 0) { 1751 /* The page is successfully queued for migration */ 1752 continue; 1753 } 1754 1755 /* 1756 * The move_pages() man page does not have an -EEXIST choice, so 1757 * use -EFAULT instead. 1758 */ 1759 if (err == -EEXIST) 1760 err = -EFAULT; 1761 1762 /* 1763 * If the page is already on the target node (!err), store the 1764 * node, otherwise, store the err. 1765 */ 1766 err = store_status(status, i, err ? : current_node, 1); 1767 if (err) 1768 goto out_flush; 1769 1770 err = move_pages_and_store_status(mm, current_node, &pagelist, 1771 status, start, i, nr_pages); 1772 if (err) 1773 goto out; 1774 current_node = NUMA_NO_NODE; 1775 } 1776 out_flush: 1777 /* Make sure we do not overwrite the existing error */ 1778 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1779 status, start, i, nr_pages); 1780 if (err >= 0) 1781 err = err1; 1782 out: 1783 lru_cache_enable(); 1784 return err; 1785 } 1786 1787 /* 1788 * Determine the nodes of an array of pages and store it in an array of status. 1789 */ 1790 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1791 const void __user **pages, int *status) 1792 { 1793 unsigned long i; 1794 1795 mmap_read_lock(mm); 1796 1797 for (i = 0; i < nr_pages; i++) { 1798 unsigned long addr = (unsigned long)(*pages); 1799 struct vm_area_struct *vma; 1800 struct page *page; 1801 int err = -EFAULT; 1802 1803 vma = vma_lookup(mm, addr); 1804 if (!vma) 1805 goto set_status; 1806 1807 /* FOLL_DUMP to ignore special (like zero) pages */ 1808 page = follow_page(vma, addr, FOLL_DUMP); 1809 1810 err = PTR_ERR(page); 1811 if (IS_ERR(page)) 1812 goto set_status; 1813 1814 err = page ? page_to_nid(page) : -ENOENT; 1815 set_status: 1816 *status = err; 1817 1818 pages++; 1819 status++; 1820 } 1821 1822 mmap_read_unlock(mm); 1823 } 1824 1825 static int get_compat_pages_array(const void __user *chunk_pages[], 1826 const void __user * __user *pages, 1827 unsigned long chunk_nr) 1828 { 1829 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1830 compat_uptr_t p; 1831 int i; 1832 1833 for (i = 0; i < chunk_nr; i++) { 1834 if (get_user(p, pages32 + i)) 1835 return -EFAULT; 1836 chunk_pages[i] = compat_ptr(p); 1837 } 1838 1839 return 0; 1840 } 1841 1842 /* 1843 * Determine the nodes of a user array of pages and store it in 1844 * a user array of status. 1845 */ 1846 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1847 const void __user * __user *pages, 1848 int __user *status) 1849 { 1850 #define DO_PAGES_STAT_CHUNK_NR 16 1851 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1852 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1853 1854 while (nr_pages) { 1855 unsigned long chunk_nr; 1856 1857 chunk_nr = nr_pages; 1858 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1859 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1860 1861 if (in_compat_syscall()) { 1862 if (get_compat_pages_array(chunk_pages, pages, 1863 chunk_nr)) 1864 break; 1865 } else { 1866 if (copy_from_user(chunk_pages, pages, 1867 chunk_nr * sizeof(*chunk_pages))) 1868 break; 1869 } 1870 1871 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1872 1873 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1874 break; 1875 1876 pages += chunk_nr; 1877 status += chunk_nr; 1878 nr_pages -= chunk_nr; 1879 } 1880 return nr_pages ? -EFAULT : 0; 1881 } 1882 1883 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1884 { 1885 struct task_struct *task; 1886 struct mm_struct *mm; 1887 1888 /* 1889 * There is no need to check if current process has the right to modify 1890 * the specified process when they are same. 1891 */ 1892 if (!pid) { 1893 mmget(current->mm); 1894 *mem_nodes = cpuset_mems_allowed(current); 1895 return current->mm; 1896 } 1897 1898 /* Find the mm_struct */ 1899 rcu_read_lock(); 1900 task = find_task_by_vpid(pid); 1901 if (!task) { 1902 rcu_read_unlock(); 1903 return ERR_PTR(-ESRCH); 1904 } 1905 get_task_struct(task); 1906 1907 /* 1908 * Check if this process has the right to modify the specified 1909 * process. Use the regular "ptrace_may_access()" checks. 1910 */ 1911 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1912 rcu_read_unlock(); 1913 mm = ERR_PTR(-EPERM); 1914 goto out; 1915 } 1916 rcu_read_unlock(); 1917 1918 mm = ERR_PTR(security_task_movememory(task)); 1919 if (IS_ERR(mm)) 1920 goto out; 1921 *mem_nodes = cpuset_mems_allowed(task); 1922 mm = get_task_mm(task); 1923 out: 1924 put_task_struct(task); 1925 if (!mm) 1926 mm = ERR_PTR(-EINVAL); 1927 return mm; 1928 } 1929 1930 /* 1931 * Move a list of pages in the address space of the currently executing 1932 * process. 1933 */ 1934 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1935 const void __user * __user *pages, 1936 const int __user *nodes, 1937 int __user *status, int flags) 1938 { 1939 struct mm_struct *mm; 1940 int err; 1941 nodemask_t task_nodes; 1942 1943 /* Check flags */ 1944 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1945 return -EINVAL; 1946 1947 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1948 return -EPERM; 1949 1950 mm = find_mm_struct(pid, &task_nodes); 1951 if (IS_ERR(mm)) 1952 return PTR_ERR(mm); 1953 1954 if (nodes) 1955 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1956 nodes, status, flags); 1957 else 1958 err = do_pages_stat(mm, nr_pages, pages, status); 1959 1960 mmput(mm); 1961 return err; 1962 } 1963 1964 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1965 const void __user * __user *, pages, 1966 const int __user *, nodes, 1967 int __user *, status, int, flags) 1968 { 1969 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1970 } 1971 1972 #ifdef CONFIG_NUMA_BALANCING 1973 /* 1974 * Returns true if this is a safe migration target node for misplaced NUMA 1975 * pages. Currently it only checks the watermarks which crude 1976 */ 1977 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1978 unsigned long nr_migrate_pages) 1979 { 1980 int z; 1981 1982 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1983 struct zone *zone = pgdat->node_zones + z; 1984 1985 if (!populated_zone(zone)) 1986 continue; 1987 1988 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1989 if (!zone_watermark_ok(zone, 0, 1990 high_wmark_pages(zone) + 1991 nr_migrate_pages, 1992 ZONE_MOVABLE, 0)) 1993 continue; 1994 return true; 1995 } 1996 return false; 1997 } 1998 1999 static struct page *alloc_misplaced_dst_page(struct page *page, 2000 unsigned long data) 2001 { 2002 int nid = (int) data; 2003 struct page *newpage; 2004 2005 newpage = __alloc_pages_node(nid, 2006 (GFP_HIGHUSER_MOVABLE | 2007 __GFP_THISNODE | __GFP_NOMEMALLOC | 2008 __GFP_NORETRY | __GFP_NOWARN) & 2009 ~__GFP_RECLAIM, 0); 2010 2011 return newpage; 2012 } 2013 2014 static struct page *alloc_misplaced_dst_page_thp(struct page *page, 2015 unsigned long data) 2016 { 2017 int nid = (int) data; 2018 struct page *newpage; 2019 2020 newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2021 HPAGE_PMD_ORDER); 2022 if (!newpage) 2023 goto out; 2024 2025 prep_transhuge_page(newpage); 2026 2027 out: 2028 return newpage; 2029 } 2030 2031 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2032 { 2033 int page_lru; 2034 int nr_pages = thp_nr_pages(page); 2035 int order = compound_order(page); 2036 2037 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2038 2039 /* Do not migrate THP mapped by multiple processes */ 2040 if (PageTransHuge(page) && total_mapcount(page) > 1) 2041 return 0; 2042 2043 /* Avoid migrating to a node that is nearly full */ 2044 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2045 int z; 2046 2047 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2048 return 0; 2049 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2050 if (populated_zone(pgdat->node_zones + z)) 2051 break; 2052 } 2053 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2054 return 0; 2055 } 2056 2057 if (isolate_lru_page(page)) 2058 return 0; 2059 2060 page_lru = page_is_file_lru(page); 2061 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 2062 nr_pages); 2063 2064 /* 2065 * Isolating the page has taken another reference, so the 2066 * caller's reference can be safely dropped without the page 2067 * disappearing underneath us during migration. 2068 */ 2069 put_page(page); 2070 return 1; 2071 } 2072 2073 /* 2074 * Attempt to migrate a misplaced page to the specified destination 2075 * node. Caller is expected to have an elevated reference count on 2076 * the page that will be dropped by this function before returning. 2077 */ 2078 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2079 int node) 2080 { 2081 pg_data_t *pgdat = NODE_DATA(node); 2082 int isolated; 2083 int nr_remaining; 2084 unsigned int nr_succeeded; 2085 LIST_HEAD(migratepages); 2086 new_page_t *new; 2087 bool compound; 2088 int nr_pages = thp_nr_pages(page); 2089 2090 /* 2091 * PTE mapped THP or HugeTLB page can't reach here so the page could 2092 * be either base page or THP. And it must be head page if it is 2093 * THP. 2094 */ 2095 compound = PageTransHuge(page); 2096 2097 if (compound) 2098 new = alloc_misplaced_dst_page_thp; 2099 else 2100 new = alloc_misplaced_dst_page; 2101 2102 /* 2103 * Don't migrate file pages that are mapped in multiple processes 2104 * with execute permissions as they are probably shared libraries. 2105 */ 2106 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2107 (vma->vm_flags & VM_EXEC)) 2108 goto out; 2109 2110 /* 2111 * Also do not migrate dirty pages as not all filesystems can move 2112 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2113 */ 2114 if (page_is_file_lru(page) && PageDirty(page)) 2115 goto out; 2116 2117 isolated = numamigrate_isolate_page(pgdat, page); 2118 if (!isolated) 2119 goto out; 2120 2121 list_add(&page->lru, &migratepages); 2122 nr_remaining = migrate_pages(&migratepages, *new, NULL, node, 2123 MIGRATE_ASYNC, MR_NUMA_MISPLACED, 2124 &nr_succeeded); 2125 if (nr_remaining) { 2126 if (!list_empty(&migratepages)) { 2127 list_del(&page->lru); 2128 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2129 page_is_file_lru(page), -nr_pages); 2130 putback_lru_page(page); 2131 } 2132 isolated = 0; 2133 } 2134 if (nr_succeeded) { 2135 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2136 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2137 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2138 nr_succeeded); 2139 } 2140 BUG_ON(!list_empty(&migratepages)); 2141 return isolated; 2142 2143 out: 2144 put_page(page); 2145 return 0; 2146 } 2147 #endif /* CONFIG_NUMA_BALANCING */ 2148 #endif /* CONFIG_NUMA */ 2149 2150 #ifdef CONFIG_DEVICE_PRIVATE 2151 static int migrate_vma_collect_skip(unsigned long start, 2152 unsigned long end, 2153 struct mm_walk *walk) 2154 { 2155 struct migrate_vma *migrate = walk->private; 2156 unsigned long addr; 2157 2158 for (addr = start; addr < end; addr += PAGE_SIZE) { 2159 migrate->dst[migrate->npages] = 0; 2160 migrate->src[migrate->npages++] = 0; 2161 } 2162 2163 return 0; 2164 } 2165 2166 static int migrate_vma_collect_hole(unsigned long start, 2167 unsigned long end, 2168 __always_unused int depth, 2169 struct mm_walk *walk) 2170 { 2171 struct migrate_vma *migrate = walk->private; 2172 unsigned long addr; 2173 2174 /* Only allow populating anonymous memory. */ 2175 if (!vma_is_anonymous(walk->vma)) 2176 return migrate_vma_collect_skip(start, end, walk); 2177 2178 for (addr = start; addr < end; addr += PAGE_SIZE) { 2179 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 2180 migrate->dst[migrate->npages] = 0; 2181 migrate->npages++; 2182 migrate->cpages++; 2183 } 2184 2185 return 0; 2186 } 2187 2188 static int migrate_vma_collect_pmd(pmd_t *pmdp, 2189 unsigned long start, 2190 unsigned long end, 2191 struct mm_walk *walk) 2192 { 2193 struct migrate_vma *migrate = walk->private; 2194 struct vm_area_struct *vma = walk->vma; 2195 struct mm_struct *mm = vma->vm_mm; 2196 unsigned long addr = start, unmapped = 0; 2197 spinlock_t *ptl; 2198 pte_t *ptep; 2199 2200 again: 2201 if (pmd_none(*pmdp)) 2202 return migrate_vma_collect_hole(start, end, -1, walk); 2203 2204 if (pmd_trans_huge(*pmdp)) { 2205 struct page *page; 2206 2207 ptl = pmd_lock(mm, pmdp); 2208 if (unlikely(!pmd_trans_huge(*pmdp))) { 2209 spin_unlock(ptl); 2210 goto again; 2211 } 2212 2213 page = pmd_page(*pmdp); 2214 if (is_huge_zero_page(page)) { 2215 spin_unlock(ptl); 2216 split_huge_pmd(vma, pmdp, addr); 2217 if (pmd_trans_unstable(pmdp)) 2218 return migrate_vma_collect_skip(start, end, 2219 walk); 2220 } else { 2221 int ret; 2222 2223 get_page(page); 2224 spin_unlock(ptl); 2225 if (unlikely(!trylock_page(page))) 2226 return migrate_vma_collect_skip(start, end, 2227 walk); 2228 ret = split_huge_page(page); 2229 unlock_page(page); 2230 put_page(page); 2231 if (ret) 2232 return migrate_vma_collect_skip(start, end, 2233 walk); 2234 if (pmd_none(*pmdp)) 2235 return migrate_vma_collect_hole(start, end, -1, 2236 walk); 2237 } 2238 } 2239 2240 if (unlikely(pmd_bad(*pmdp))) 2241 return migrate_vma_collect_skip(start, end, walk); 2242 2243 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2244 arch_enter_lazy_mmu_mode(); 2245 2246 for (; addr < end; addr += PAGE_SIZE, ptep++) { 2247 unsigned long mpfn = 0, pfn; 2248 struct page *page; 2249 swp_entry_t entry; 2250 pte_t pte; 2251 2252 pte = *ptep; 2253 2254 if (pte_none(pte)) { 2255 if (vma_is_anonymous(vma)) { 2256 mpfn = MIGRATE_PFN_MIGRATE; 2257 migrate->cpages++; 2258 } 2259 goto next; 2260 } 2261 2262 if (!pte_present(pte)) { 2263 /* 2264 * Only care about unaddressable device page special 2265 * page table entry. Other special swap entries are not 2266 * migratable, and we ignore regular swapped page. 2267 */ 2268 entry = pte_to_swp_entry(pte); 2269 if (!is_device_private_entry(entry)) 2270 goto next; 2271 2272 page = pfn_swap_entry_to_page(entry); 2273 if (!(migrate->flags & 2274 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 2275 page->pgmap->owner != migrate->pgmap_owner) 2276 goto next; 2277 2278 mpfn = migrate_pfn(page_to_pfn(page)) | 2279 MIGRATE_PFN_MIGRATE; 2280 if (is_writable_device_private_entry(entry)) 2281 mpfn |= MIGRATE_PFN_WRITE; 2282 } else { 2283 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) 2284 goto next; 2285 pfn = pte_pfn(pte); 2286 if (is_zero_pfn(pfn)) { 2287 mpfn = MIGRATE_PFN_MIGRATE; 2288 migrate->cpages++; 2289 goto next; 2290 } 2291 page = vm_normal_page(migrate->vma, addr, pte); 2292 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2293 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2294 } 2295 2296 /* FIXME support THP */ 2297 if (!page || !page->mapping || PageTransCompound(page)) { 2298 mpfn = 0; 2299 goto next; 2300 } 2301 2302 /* 2303 * By getting a reference on the page we pin it and that blocks 2304 * any kind of migration. Side effect is that it "freezes" the 2305 * pte. 2306 * 2307 * We drop this reference after isolating the page from the lru 2308 * for non device page (device page are not on the lru and thus 2309 * can't be dropped from it). 2310 */ 2311 get_page(page); 2312 2313 /* 2314 * Optimize for the common case where page is only mapped once 2315 * in one process. If we can lock the page, then we can safely 2316 * set up a special migration page table entry now. 2317 */ 2318 if (trylock_page(page)) { 2319 pte_t swp_pte; 2320 2321 migrate->cpages++; 2322 ptep_get_and_clear(mm, addr, ptep); 2323 2324 /* Setup special migration page table entry */ 2325 if (mpfn & MIGRATE_PFN_WRITE) 2326 entry = make_writable_migration_entry( 2327 page_to_pfn(page)); 2328 else 2329 entry = make_readable_migration_entry( 2330 page_to_pfn(page)); 2331 swp_pte = swp_entry_to_pte(entry); 2332 if (pte_present(pte)) { 2333 if (pte_soft_dirty(pte)) 2334 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2335 if (pte_uffd_wp(pte)) 2336 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2337 } else { 2338 if (pte_swp_soft_dirty(pte)) 2339 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2340 if (pte_swp_uffd_wp(pte)) 2341 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2342 } 2343 set_pte_at(mm, addr, ptep, swp_pte); 2344 2345 /* 2346 * This is like regular unmap: we remove the rmap and 2347 * drop page refcount. Page won't be freed, as we took 2348 * a reference just above. 2349 */ 2350 page_remove_rmap(page, false); 2351 put_page(page); 2352 2353 if (pte_present(pte)) 2354 unmapped++; 2355 } else { 2356 put_page(page); 2357 mpfn = 0; 2358 } 2359 2360 next: 2361 migrate->dst[migrate->npages] = 0; 2362 migrate->src[migrate->npages++] = mpfn; 2363 } 2364 arch_leave_lazy_mmu_mode(); 2365 pte_unmap_unlock(ptep - 1, ptl); 2366 2367 /* Only flush the TLB if we actually modified any entries */ 2368 if (unmapped) 2369 flush_tlb_range(walk->vma, start, end); 2370 2371 return 0; 2372 } 2373 2374 static const struct mm_walk_ops migrate_vma_walk_ops = { 2375 .pmd_entry = migrate_vma_collect_pmd, 2376 .pte_hole = migrate_vma_collect_hole, 2377 }; 2378 2379 /* 2380 * migrate_vma_collect() - collect pages over a range of virtual addresses 2381 * @migrate: migrate struct containing all migration information 2382 * 2383 * This will walk the CPU page table. For each virtual address backed by a 2384 * valid page, it updates the src array and takes a reference on the page, in 2385 * order to pin the page until we lock it and unmap it. 2386 */ 2387 static void migrate_vma_collect(struct migrate_vma *migrate) 2388 { 2389 struct mmu_notifier_range range; 2390 2391 /* 2392 * Note that the pgmap_owner is passed to the mmu notifier callback so 2393 * that the registered device driver can skip invalidating device 2394 * private page mappings that won't be migrated. 2395 */ 2396 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, 2397 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, 2398 migrate->pgmap_owner); 2399 mmu_notifier_invalidate_range_start(&range); 2400 2401 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 2402 &migrate_vma_walk_ops, migrate); 2403 2404 mmu_notifier_invalidate_range_end(&range); 2405 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 2406 } 2407 2408 /* 2409 * migrate_vma_check_page() - check if page is pinned or not 2410 * @page: struct page to check 2411 * 2412 * Pinned pages cannot be migrated. This is the same test as in 2413 * folio_migrate_mapping(), except that here we allow migration of a 2414 * ZONE_DEVICE page. 2415 */ 2416 static bool migrate_vma_check_page(struct page *page) 2417 { 2418 /* 2419 * One extra ref because caller holds an extra reference, either from 2420 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 2421 * a device page. 2422 */ 2423 int extra = 1; 2424 2425 /* 2426 * FIXME support THP (transparent huge page), it is bit more complex to 2427 * check them than regular pages, because they can be mapped with a pmd 2428 * or with a pte (split pte mapping). 2429 */ 2430 if (PageCompound(page)) 2431 return false; 2432 2433 /* Page from ZONE_DEVICE have one extra reference */ 2434 if (is_zone_device_page(page)) 2435 extra++; 2436 2437 /* For file back page */ 2438 if (page_mapping(page)) 2439 extra += 1 + page_has_private(page); 2440 2441 if ((page_count(page) - extra) > page_mapcount(page)) 2442 return false; 2443 2444 return true; 2445 } 2446 2447 /* 2448 * migrate_vma_unmap() - replace page mapping with special migration pte entry 2449 * @migrate: migrate struct containing all migration information 2450 * 2451 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 2452 * special migration pte entry and check if it has been pinned. Pinned pages are 2453 * restored because we cannot migrate them. 2454 * 2455 * This is the last step before we call the device driver callback to allocate 2456 * destination memory and copy contents of original page over to new page. 2457 */ 2458 static void migrate_vma_unmap(struct migrate_vma *migrate) 2459 { 2460 const unsigned long npages = migrate->npages; 2461 unsigned long i, restore = 0; 2462 bool allow_drain = true; 2463 2464 lru_add_drain(); 2465 2466 for (i = 0; i < npages; i++) { 2467 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2468 2469 if (!page) 2470 continue; 2471 2472 /* ZONE_DEVICE pages are not on LRU */ 2473 if (!is_zone_device_page(page)) { 2474 if (!PageLRU(page) && allow_drain) { 2475 /* Drain CPU's pagevec */ 2476 lru_add_drain_all(); 2477 allow_drain = false; 2478 } 2479 2480 if (isolate_lru_page(page)) { 2481 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2482 migrate->cpages--; 2483 restore++; 2484 continue; 2485 } 2486 2487 /* Drop the reference we took in collect */ 2488 put_page(page); 2489 } 2490 2491 if (page_mapped(page)) 2492 try_to_migrate(page, 0); 2493 2494 if (page_mapped(page) || !migrate_vma_check_page(page)) { 2495 if (!is_zone_device_page(page)) { 2496 get_page(page); 2497 putback_lru_page(page); 2498 } 2499 2500 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2501 migrate->cpages--; 2502 restore++; 2503 continue; 2504 } 2505 } 2506 2507 for (i = 0; i < npages && restore; i++) { 2508 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2509 2510 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2511 continue; 2512 2513 remove_migration_ptes(page, page, false); 2514 2515 migrate->src[i] = 0; 2516 unlock_page(page); 2517 put_page(page); 2518 restore--; 2519 } 2520 } 2521 2522 /** 2523 * migrate_vma_setup() - prepare to migrate a range of memory 2524 * @args: contains the vma, start, and pfns arrays for the migration 2525 * 2526 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 2527 * without an error. 2528 * 2529 * Prepare to migrate a range of memory virtual address range by collecting all 2530 * the pages backing each virtual address in the range, saving them inside the 2531 * src array. Then lock those pages and unmap them. Once the pages are locked 2532 * and unmapped, check whether each page is pinned or not. Pages that aren't 2533 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 2534 * corresponding src array entry. Then restores any pages that are pinned, by 2535 * remapping and unlocking those pages. 2536 * 2537 * The caller should then allocate destination memory and copy source memory to 2538 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 2539 * flag set). Once these are allocated and copied, the caller must update each 2540 * corresponding entry in the dst array with the pfn value of the destination 2541 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via 2542 * lock_page(). 2543 * 2544 * Note that the caller does not have to migrate all the pages that are marked 2545 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 2546 * device memory to system memory. If the caller cannot migrate a device page 2547 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 2548 * consequences for the userspace process, so it must be avoided if at all 2549 * possible. 2550 * 2551 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 2552 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 2553 * allowing the caller to allocate device memory for those unbacked virtual 2554 * addresses. For this the caller simply has to allocate device memory and 2555 * properly set the destination entry like for regular migration. Note that 2556 * this can still fail, and thus inside the device driver you must check if the 2557 * migration was successful for those entries after calling migrate_vma_pages(), 2558 * just like for regular migration. 2559 * 2560 * After that, the callers must call migrate_vma_pages() to go over each entry 2561 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 2562 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 2563 * then migrate_vma_pages() to migrate struct page information from the source 2564 * struct page to the destination struct page. If it fails to migrate the 2565 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 2566 * src array. 2567 * 2568 * At this point all successfully migrated pages have an entry in the src 2569 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 2570 * array entry with MIGRATE_PFN_VALID flag set. 2571 * 2572 * Once migrate_vma_pages() returns the caller may inspect which pages were 2573 * successfully migrated, and which were not. Successfully migrated pages will 2574 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 2575 * 2576 * It is safe to update device page table after migrate_vma_pages() because 2577 * both destination and source page are still locked, and the mmap_lock is held 2578 * in read mode (hence no one can unmap the range being migrated). 2579 * 2580 * Once the caller is done cleaning up things and updating its page table (if it 2581 * chose to do so, this is not an obligation) it finally calls 2582 * migrate_vma_finalize() to update the CPU page table to point to new pages 2583 * for successfully migrated pages or otherwise restore the CPU page table to 2584 * point to the original source pages. 2585 */ 2586 int migrate_vma_setup(struct migrate_vma *args) 2587 { 2588 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 2589 2590 args->start &= PAGE_MASK; 2591 args->end &= PAGE_MASK; 2592 if (!args->vma || is_vm_hugetlb_page(args->vma) || 2593 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 2594 return -EINVAL; 2595 if (nr_pages <= 0) 2596 return -EINVAL; 2597 if (args->start < args->vma->vm_start || 2598 args->start >= args->vma->vm_end) 2599 return -EINVAL; 2600 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 2601 return -EINVAL; 2602 if (!args->src || !args->dst) 2603 return -EINVAL; 2604 2605 memset(args->src, 0, sizeof(*args->src) * nr_pages); 2606 args->cpages = 0; 2607 args->npages = 0; 2608 2609 migrate_vma_collect(args); 2610 2611 if (args->cpages) 2612 migrate_vma_unmap(args); 2613 2614 /* 2615 * At this point pages are locked and unmapped, and thus they have 2616 * stable content and can safely be copied to destination memory that 2617 * is allocated by the drivers. 2618 */ 2619 return 0; 2620 2621 } 2622 EXPORT_SYMBOL(migrate_vma_setup); 2623 2624 /* 2625 * This code closely matches the code in: 2626 * __handle_mm_fault() 2627 * handle_pte_fault() 2628 * do_anonymous_page() 2629 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 2630 * private page. 2631 */ 2632 static void migrate_vma_insert_page(struct migrate_vma *migrate, 2633 unsigned long addr, 2634 struct page *page, 2635 unsigned long *src) 2636 { 2637 struct vm_area_struct *vma = migrate->vma; 2638 struct mm_struct *mm = vma->vm_mm; 2639 bool flush = false; 2640 spinlock_t *ptl; 2641 pte_t entry; 2642 pgd_t *pgdp; 2643 p4d_t *p4dp; 2644 pud_t *pudp; 2645 pmd_t *pmdp; 2646 pte_t *ptep; 2647 2648 /* Only allow populating anonymous memory */ 2649 if (!vma_is_anonymous(vma)) 2650 goto abort; 2651 2652 pgdp = pgd_offset(mm, addr); 2653 p4dp = p4d_alloc(mm, pgdp, addr); 2654 if (!p4dp) 2655 goto abort; 2656 pudp = pud_alloc(mm, p4dp, addr); 2657 if (!pudp) 2658 goto abort; 2659 pmdp = pmd_alloc(mm, pudp, addr); 2660 if (!pmdp) 2661 goto abort; 2662 2663 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 2664 goto abort; 2665 2666 /* 2667 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2668 * pte_offset_map() on pmds where a huge pmd might be created 2669 * from a different thread. 2670 * 2671 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 2672 * parallel threads are excluded by other means. 2673 * 2674 * Here we only have mmap_read_lock(mm). 2675 */ 2676 if (pte_alloc(mm, pmdp)) 2677 goto abort; 2678 2679 /* See the comment in pte_alloc_one_map() */ 2680 if (unlikely(pmd_trans_unstable(pmdp))) 2681 goto abort; 2682 2683 if (unlikely(anon_vma_prepare(vma))) 2684 goto abort; 2685 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 2686 goto abort; 2687 2688 /* 2689 * The memory barrier inside __SetPageUptodate makes sure that 2690 * preceding stores to the page contents become visible before 2691 * the set_pte_at() write. 2692 */ 2693 __SetPageUptodate(page); 2694 2695 if (is_zone_device_page(page)) { 2696 if (is_device_private_page(page)) { 2697 swp_entry_t swp_entry; 2698 2699 if (vma->vm_flags & VM_WRITE) 2700 swp_entry = make_writable_device_private_entry( 2701 page_to_pfn(page)); 2702 else 2703 swp_entry = make_readable_device_private_entry( 2704 page_to_pfn(page)); 2705 entry = swp_entry_to_pte(swp_entry); 2706 } else { 2707 /* 2708 * For now we only support migrating to un-addressable 2709 * device memory. 2710 */ 2711 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); 2712 goto abort; 2713 } 2714 } else { 2715 entry = mk_pte(page, vma->vm_page_prot); 2716 if (vma->vm_flags & VM_WRITE) 2717 entry = pte_mkwrite(pte_mkdirty(entry)); 2718 } 2719 2720 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2721 2722 if (check_stable_address_space(mm)) 2723 goto unlock_abort; 2724 2725 if (pte_present(*ptep)) { 2726 unsigned long pfn = pte_pfn(*ptep); 2727 2728 if (!is_zero_pfn(pfn)) 2729 goto unlock_abort; 2730 flush = true; 2731 } else if (!pte_none(*ptep)) 2732 goto unlock_abort; 2733 2734 /* 2735 * Check for userfaultfd but do not deliver the fault. Instead, 2736 * just back off. 2737 */ 2738 if (userfaultfd_missing(vma)) 2739 goto unlock_abort; 2740 2741 inc_mm_counter(mm, MM_ANONPAGES); 2742 page_add_new_anon_rmap(page, vma, addr, false); 2743 if (!is_zone_device_page(page)) 2744 lru_cache_add_inactive_or_unevictable(page, vma); 2745 get_page(page); 2746 2747 if (flush) { 2748 flush_cache_page(vma, addr, pte_pfn(*ptep)); 2749 ptep_clear_flush_notify(vma, addr, ptep); 2750 set_pte_at_notify(mm, addr, ptep, entry); 2751 update_mmu_cache(vma, addr, ptep); 2752 } else { 2753 /* No need to invalidate - it was non-present before */ 2754 set_pte_at(mm, addr, ptep, entry); 2755 update_mmu_cache(vma, addr, ptep); 2756 } 2757 2758 pte_unmap_unlock(ptep, ptl); 2759 *src = MIGRATE_PFN_MIGRATE; 2760 return; 2761 2762 unlock_abort: 2763 pte_unmap_unlock(ptep, ptl); 2764 abort: 2765 *src &= ~MIGRATE_PFN_MIGRATE; 2766 } 2767 2768 /** 2769 * migrate_vma_pages() - migrate meta-data from src page to dst page 2770 * @migrate: migrate struct containing all migration information 2771 * 2772 * This migrates struct page meta-data from source struct page to destination 2773 * struct page. This effectively finishes the migration from source page to the 2774 * destination page. 2775 */ 2776 void migrate_vma_pages(struct migrate_vma *migrate) 2777 { 2778 const unsigned long npages = migrate->npages; 2779 const unsigned long start = migrate->start; 2780 struct mmu_notifier_range range; 2781 unsigned long addr, i; 2782 bool notified = false; 2783 2784 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 2785 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2786 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2787 struct address_space *mapping; 2788 int r; 2789 2790 if (!newpage) { 2791 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2792 continue; 2793 } 2794 2795 if (!page) { 2796 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2797 continue; 2798 if (!notified) { 2799 notified = true; 2800 2801 mmu_notifier_range_init_owner(&range, 2802 MMU_NOTIFY_MIGRATE, 0, migrate->vma, 2803 migrate->vma->vm_mm, addr, migrate->end, 2804 migrate->pgmap_owner); 2805 mmu_notifier_invalidate_range_start(&range); 2806 } 2807 migrate_vma_insert_page(migrate, addr, newpage, 2808 &migrate->src[i]); 2809 continue; 2810 } 2811 2812 mapping = page_mapping(page); 2813 2814 if (is_zone_device_page(newpage)) { 2815 if (is_device_private_page(newpage)) { 2816 /* 2817 * For now only support private anonymous when 2818 * migrating to un-addressable device memory. 2819 */ 2820 if (mapping) { 2821 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2822 continue; 2823 } 2824 } else { 2825 /* 2826 * Other types of ZONE_DEVICE page are not 2827 * supported. 2828 */ 2829 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2830 continue; 2831 } 2832 } 2833 2834 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 2835 if (r != MIGRATEPAGE_SUCCESS) 2836 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2837 } 2838 2839 /* 2840 * No need to double call mmu_notifier->invalidate_range() callback as 2841 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 2842 * did already call it. 2843 */ 2844 if (notified) 2845 mmu_notifier_invalidate_range_only_end(&range); 2846 } 2847 EXPORT_SYMBOL(migrate_vma_pages); 2848 2849 /** 2850 * migrate_vma_finalize() - restore CPU page table entry 2851 * @migrate: migrate struct containing all migration information 2852 * 2853 * This replaces the special migration pte entry with either a mapping to the 2854 * new page if migration was successful for that page, or to the original page 2855 * otherwise. 2856 * 2857 * This also unlocks the pages and puts them back on the lru, or drops the extra 2858 * refcount, for device pages. 2859 */ 2860 void migrate_vma_finalize(struct migrate_vma *migrate) 2861 { 2862 const unsigned long npages = migrate->npages; 2863 unsigned long i; 2864 2865 for (i = 0; i < npages; i++) { 2866 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2867 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2868 2869 if (!page) { 2870 if (newpage) { 2871 unlock_page(newpage); 2872 put_page(newpage); 2873 } 2874 continue; 2875 } 2876 2877 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 2878 if (newpage) { 2879 unlock_page(newpage); 2880 put_page(newpage); 2881 } 2882 newpage = page; 2883 } 2884 2885 remove_migration_ptes(page, newpage, false); 2886 unlock_page(page); 2887 2888 if (is_zone_device_page(page)) 2889 put_page(page); 2890 else 2891 putback_lru_page(page); 2892 2893 if (newpage != page) { 2894 unlock_page(newpage); 2895 if (is_zone_device_page(newpage)) 2896 put_page(newpage); 2897 else 2898 putback_lru_page(newpage); 2899 } 2900 } 2901 } 2902 EXPORT_SYMBOL(migrate_vma_finalize); 2903 #endif /* CONFIG_DEVICE_PRIVATE */ 2904 2905 /* 2906 * node_demotion[] example: 2907 * 2908 * Consider a system with two sockets. Each socket has 2909 * three classes of memory attached: fast, medium and slow. 2910 * Each memory class is placed in its own NUMA node. The 2911 * CPUs are placed in the node with the "fast" memory. The 2912 * 6 NUMA nodes (0-5) might be split among the sockets like 2913 * this: 2914 * 2915 * Socket A: 0, 1, 2 2916 * Socket B: 3, 4, 5 2917 * 2918 * When Node 0 fills up, its memory should be migrated to 2919 * Node 1. When Node 1 fills up, it should be migrated to 2920 * Node 2. The migration path start on the nodes with the 2921 * processors (since allocations default to this node) and 2922 * fast memory, progress through medium and end with the 2923 * slow memory: 2924 * 2925 * 0 -> 1 -> 2 -> stop 2926 * 3 -> 4 -> 5 -> stop 2927 * 2928 * This is represented in the node_demotion[] like this: 2929 * 2930 * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1 2931 * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2 2932 * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate 2933 * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4 2934 * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5 2935 * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate 2936 * 2937 * Moreover some systems may have multiple slow memory nodes. 2938 * Suppose a system has one socket with 3 memory nodes, node 0 2939 * is fast memory type, and node 1/2 both are slow memory 2940 * type, and the distance between fast memory node and slow 2941 * memory node is same. So the migration path should be: 2942 * 2943 * 0 -> 1/2 -> stop 2944 * 2945 * This is represented in the node_demotion[] like this: 2946 * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2 2947 * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate 2948 * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate 2949 */ 2950 2951 /* 2952 * Writes to this array occur without locking. Cycles are 2953 * not allowed: Node X demotes to Y which demotes to X... 2954 * 2955 * If multiple reads are performed, a single rcu_read_lock() 2956 * must be held over all reads to ensure that no cycles are 2957 * observed. 2958 */ 2959 #define DEFAULT_DEMOTION_TARGET_NODES 15 2960 2961 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES 2962 #define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1) 2963 #else 2964 #define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES 2965 #endif 2966 2967 struct demotion_nodes { 2968 unsigned short nr; 2969 short nodes[DEMOTION_TARGET_NODES]; 2970 }; 2971 2972 static struct demotion_nodes *node_demotion __read_mostly; 2973 2974 /** 2975 * next_demotion_node() - Get the next node in the demotion path 2976 * @node: The starting node to lookup the next node 2977 * 2978 * Return: node id for next memory node in the demotion path hierarchy 2979 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep 2980 * @node online or guarantee that it *continues* to be the next demotion 2981 * target. 2982 */ 2983 int next_demotion_node(int node) 2984 { 2985 struct demotion_nodes *nd; 2986 unsigned short target_nr, index; 2987 int target; 2988 2989 if (!node_demotion) 2990 return NUMA_NO_NODE; 2991 2992 nd = &node_demotion[node]; 2993 2994 /* 2995 * node_demotion[] is updated without excluding this 2996 * function from running. RCU doesn't provide any 2997 * compiler barriers, so the READ_ONCE() is required 2998 * to avoid compiler reordering or read merging. 2999 * 3000 * Make sure to use RCU over entire code blocks if 3001 * node_demotion[] reads need to be consistent. 3002 */ 3003 rcu_read_lock(); 3004 target_nr = READ_ONCE(nd->nr); 3005 3006 switch (target_nr) { 3007 case 0: 3008 target = NUMA_NO_NODE; 3009 goto out; 3010 case 1: 3011 index = 0; 3012 break; 3013 default: 3014 /* 3015 * If there are multiple target nodes, just select one 3016 * target node randomly. 3017 * 3018 * In addition, we can also use round-robin to select 3019 * target node, but we should introduce another variable 3020 * for node_demotion[] to record last selected target node, 3021 * that may cause cache ping-pong due to the changing of 3022 * last target node. Or introducing per-cpu data to avoid 3023 * caching issue, which seems more complicated. So selecting 3024 * target node randomly seems better until now. 3025 */ 3026 index = get_random_int() % target_nr; 3027 break; 3028 } 3029 3030 target = READ_ONCE(nd->nodes[index]); 3031 3032 out: 3033 rcu_read_unlock(); 3034 return target; 3035 } 3036 3037 #if defined(CONFIG_HOTPLUG_CPU) 3038 /* Disable reclaim-based migration. */ 3039 static void __disable_all_migrate_targets(void) 3040 { 3041 int node, i; 3042 3043 if (!node_demotion) 3044 return; 3045 3046 for_each_online_node(node) { 3047 node_demotion[node].nr = 0; 3048 for (i = 0; i < DEMOTION_TARGET_NODES; i++) 3049 node_demotion[node].nodes[i] = NUMA_NO_NODE; 3050 } 3051 } 3052 3053 static void disable_all_migrate_targets(void) 3054 { 3055 __disable_all_migrate_targets(); 3056 3057 /* 3058 * Ensure that the "disable" is visible across the system. 3059 * Readers will see either a combination of before+disable 3060 * state or disable+after. They will never see before and 3061 * after state together. 3062 * 3063 * The before+after state together might have cycles and 3064 * could cause readers to do things like loop until this 3065 * function finishes. This ensures they can only see a 3066 * single "bad" read and would, for instance, only loop 3067 * once. 3068 */ 3069 synchronize_rcu(); 3070 } 3071 3072 /* 3073 * Find an automatic demotion target for 'node'. 3074 * Failing here is OK. It might just indicate 3075 * being at the end of a chain. 3076 */ 3077 static int establish_migrate_target(int node, nodemask_t *used, 3078 int best_distance) 3079 { 3080 int migration_target, index, val; 3081 struct demotion_nodes *nd; 3082 3083 if (!node_demotion) 3084 return NUMA_NO_NODE; 3085 3086 nd = &node_demotion[node]; 3087 3088 migration_target = find_next_best_node(node, used); 3089 if (migration_target == NUMA_NO_NODE) 3090 return NUMA_NO_NODE; 3091 3092 /* 3093 * If the node has been set a migration target node before, 3094 * which means it's the best distance between them. Still 3095 * check if this node can be demoted to other target nodes 3096 * if they have a same best distance. 3097 */ 3098 if (best_distance != -1) { 3099 val = node_distance(node, migration_target); 3100 if (val > best_distance) 3101 goto out_clear; 3102 } 3103 3104 index = nd->nr; 3105 if (WARN_ONCE(index >= DEMOTION_TARGET_NODES, 3106 "Exceeds maximum demotion target nodes\n")) 3107 goto out_clear; 3108 3109 nd->nodes[index] = migration_target; 3110 nd->nr++; 3111 3112 return migration_target; 3113 out_clear: 3114 node_clear(migration_target, *used); 3115 return NUMA_NO_NODE; 3116 } 3117 3118 /* 3119 * When memory fills up on a node, memory contents can be 3120 * automatically migrated to another node instead of 3121 * discarded at reclaim. 3122 * 3123 * Establish a "migration path" which will start at nodes 3124 * with CPUs and will follow the priorities used to build the 3125 * page allocator zonelists. 3126 * 3127 * The difference here is that cycles must be avoided. If 3128 * node0 migrates to node1, then neither node1, nor anything 3129 * node1 migrates to can migrate to node0. Also one node can 3130 * be migrated to multiple nodes if the target nodes all have 3131 * a same best-distance against the source node. 3132 * 3133 * This function can run simultaneously with readers of 3134 * node_demotion[]. However, it can not run simultaneously 3135 * with itself. Exclusion is provided by memory hotplug events 3136 * being single-threaded. 3137 */ 3138 static void __set_migration_target_nodes(void) 3139 { 3140 nodemask_t next_pass = NODE_MASK_NONE; 3141 nodemask_t this_pass = NODE_MASK_NONE; 3142 nodemask_t used_targets = NODE_MASK_NONE; 3143 int node, best_distance; 3144 3145 /* 3146 * Avoid any oddities like cycles that could occur 3147 * from changes in the topology. This will leave 3148 * a momentary gap when migration is disabled. 3149 */ 3150 disable_all_migrate_targets(); 3151 3152 /* 3153 * Allocations go close to CPUs, first. Assume that 3154 * the migration path starts at the nodes with CPUs. 3155 */ 3156 next_pass = node_states[N_CPU]; 3157 again: 3158 this_pass = next_pass; 3159 next_pass = NODE_MASK_NONE; 3160 /* 3161 * To avoid cycles in the migration "graph", ensure 3162 * that migration sources are not future targets by 3163 * setting them in 'used_targets'. Do this only 3164 * once per pass so that multiple source nodes can 3165 * share a target node. 3166 * 3167 * 'used_targets' will become unavailable in future 3168 * passes. This limits some opportunities for 3169 * multiple source nodes to share a destination. 3170 */ 3171 nodes_or(used_targets, used_targets, this_pass); 3172 3173 for_each_node_mask(node, this_pass) { 3174 best_distance = -1; 3175 3176 /* 3177 * Try to set up the migration path for the node, and the target 3178 * migration nodes can be multiple, so doing a loop to find all 3179 * the target nodes if they all have a best node distance. 3180 */ 3181 do { 3182 int target_node = 3183 establish_migrate_target(node, &used_targets, 3184 best_distance); 3185 3186 if (target_node == NUMA_NO_NODE) 3187 break; 3188 3189 if (best_distance == -1) 3190 best_distance = node_distance(node, target_node); 3191 3192 /* 3193 * Visit targets from this pass in the next pass. 3194 * Eventually, every node will have been part of 3195 * a pass, and will become set in 'used_targets'. 3196 */ 3197 node_set(target_node, next_pass); 3198 } while (1); 3199 } 3200 /* 3201 * 'next_pass' contains nodes which became migration 3202 * targets in this pass. Make additional passes until 3203 * no more migrations targets are available. 3204 */ 3205 if (!nodes_empty(next_pass)) 3206 goto again; 3207 } 3208 3209 /* 3210 * For callers that do not hold get_online_mems() already. 3211 */ 3212 void set_migration_target_nodes(void) 3213 { 3214 get_online_mems(); 3215 __set_migration_target_nodes(); 3216 put_online_mems(); 3217 } 3218 3219 /* 3220 * This leaves migrate-on-reclaim transiently disabled between 3221 * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs 3222 * whether reclaim-based migration is enabled or not, which 3223 * ensures that the user can turn reclaim-based migration at 3224 * any time without needing to recalculate migration targets. 3225 * 3226 * These callbacks already hold get_online_mems(). That is why 3227 * __set_migration_target_nodes() can be used as opposed to 3228 * set_migration_target_nodes(). 3229 */ 3230 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, 3231 unsigned long action, void *_arg) 3232 { 3233 struct memory_notify *arg = _arg; 3234 3235 /* 3236 * Only update the node migration order when a node is 3237 * changing status, like online->offline. This avoids 3238 * the overhead of synchronize_rcu() in most cases. 3239 */ 3240 if (arg->status_change_nid < 0) 3241 return notifier_from_errno(0); 3242 3243 switch (action) { 3244 case MEM_GOING_OFFLINE: 3245 /* 3246 * Make sure there are not transient states where 3247 * an offline node is a migration target. This 3248 * will leave migration disabled until the offline 3249 * completes and the MEM_OFFLINE case below runs. 3250 */ 3251 disable_all_migrate_targets(); 3252 break; 3253 case MEM_OFFLINE: 3254 case MEM_ONLINE: 3255 /* 3256 * Recalculate the target nodes once the node 3257 * reaches its final state (online or offline). 3258 */ 3259 __set_migration_target_nodes(); 3260 break; 3261 case MEM_CANCEL_OFFLINE: 3262 /* 3263 * MEM_GOING_OFFLINE disabled all the migration 3264 * targets. Reenable them. 3265 */ 3266 __set_migration_target_nodes(); 3267 break; 3268 case MEM_GOING_ONLINE: 3269 case MEM_CANCEL_ONLINE: 3270 break; 3271 } 3272 3273 return notifier_from_errno(0); 3274 } 3275 3276 void __init migrate_on_reclaim_init(void) 3277 { 3278 node_demotion = kmalloc_array(nr_node_ids, 3279 sizeof(struct demotion_nodes), 3280 GFP_KERNEL); 3281 WARN_ON(!node_demotion); 3282 3283 hotplug_memory_notifier(migrate_on_reclaim_callback, 100); 3284 /* 3285 * At this point, all numa nodes with memory/CPus have their state 3286 * properly set, so we can build the demotion order now. 3287 * Let us hold the cpu_hotplug lock just, as we could possibily have 3288 * CPU hotplug events during boot. 3289 */ 3290 cpus_read_lock(); 3291 set_migration_target_nodes(); 3292 cpus_read_unlock(); 3293 } 3294 #endif /* CONFIG_HOTPLUG_CPU */ 3295 3296 bool numa_demotion_enabled = false; 3297 3298 #ifdef CONFIG_SYSFS 3299 static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 3300 struct kobj_attribute *attr, char *buf) 3301 { 3302 return sysfs_emit(buf, "%s\n", 3303 numa_demotion_enabled ? "true" : "false"); 3304 } 3305 3306 static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 3307 struct kobj_attribute *attr, 3308 const char *buf, size_t count) 3309 { 3310 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 3311 numa_demotion_enabled = true; 3312 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 3313 numa_demotion_enabled = false; 3314 else 3315 return -EINVAL; 3316 3317 return count; 3318 } 3319 3320 static struct kobj_attribute numa_demotion_enabled_attr = 3321 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 3322 numa_demotion_enabled_store); 3323 3324 static struct attribute *numa_attrs[] = { 3325 &numa_demotion_enabled_attr.attr, 3326 NULL, 3327 }; 3328 3329 static const struct attribute_group numa_attr_group = { 3330 .attrs = numa_attrs, 3331 }; 3332 3333 static int __init numa_init_sysfs(void) 3334 { 3335 int err; 3336 struct kobject *numa_kobj; 3337 3338 numa_kobj = kobject_create_and_add("numa", mm_kobj); 3339 if (!numa_kobj) { 3340 pr_err("failed to create numa kobject\n"); 3341 return -ENOMEM; 3342 } 3343 err = sysfs_create_group(numa_kobj, &numa_attr_group); 3344 if (err) { 3345 pr_err("failed to register numa group\n"); 3346 goto delete_obj; 3347 } 3348 return 0; 3349 3350 delete_obj: 3351 kobject_put(numa_kobj); 3352 return err; 3353 } 3354 subsys_initcall(numa_init_sysfs); 3355 #endif 3356