1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pfn_t.h> 42 #include <linux/memremap.h> 43 #include <linux/userfaultfd_k.h> 44 #include <linux/balloon_compaction.h> 45 #include <linux/page_idle.h> 46 #include <linux/page_owner.h> 47 #include <linux/sched/mm.h> 48 #include <linux/ptrace.h> 49 #include <linux/oom.h> 50 #include <linux/memory.h> 51 #include <linux/random.h> 52 #include <linux/sched/sysctl.h> 53 54 #include <asm/tlbflush.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/migrate.h> 58 59 #include "internal.h" 60 61 int isolate_movable_page(struct page *page, isolate_mode_t mode) 62 { 63 struct address_space *mapping; 64 65 /* 66 * Avoid burning cycles with pages that are yet under __free_pages(), 67 * or just got freed under us. 68 * 69 * In case we 'win' a race for a movable page being freed under us and 70 * raise its refcount preventing __free_pages() from doing its job 71 * the put_page() at the end of this block will take care of 72 * release this page, thus avoiding a nasty leakage. 73 */ 74 if (unlikely(!get_page_unless_zero(page))) 75 goto out; 76 77 /* 78 * Check PageMovable before holding a PG_lock because page's owner 79 * assumes anybody doesn't touch PG_lock of newly allocated page 80 * so unconditionally grabbing the lock ruins page's owner side. 81 */ 82 if (unlikely(!__PageMovable(page))) 83 goto out_putpage; 84 /* 85 * As movable pages are not isolated from LRU lists, concurrent 86 * compaction threads can race against page migration functions 87 * as well as race against the releasing a page. 88 * 89 * In order to avoid having an already isolated movable page 90 * being (wrongly) re-isolated while it is under migration, 91 * or to avoid attempting to isolate pages being released, 92 * lets be sure we have the page lock 93 * before proceeding with the movable page isolation steps. 94 */ 95 if (unlikely(!trylock_page(page))) 96 goto out_putpage; 97 98 if (!PageMovable(page) || PageIsolated(page)) 99 goto out_no_isolated; 100 101 mapping = page_mapping(page); 102 VM_BUG_ON_PAGE(!mapping, page); 103 104 if (!mapping->a_ops->isolate_page(page, mode)) 105 goto out_no_isolated; 106 107 /* Driver shouldn't use PG_isolated bit of page->flags */ 108 WARN_ON_ONCE(PageIsolated(page)); 109 SetPageIsolated(page); 110 unlock_page(page); 111 112 return 0; 113 114 out_no_isolated: 115 unlock_page(page); 116 out_putpage: 117 put_page(page); 118 out: 119 return -EBUSY; 120 } 121 122 static void putback_movable_page(struct page *page) 123 { 124 struct address_space *mapping; 125 126 mapping = page_mapping(page); 127 mapping->a_ops->putback_page(page); 128 ClearPageIsolated(page); 129 } 130 131 /* 132 * Put previously isolated pages back onto the appropriate lists 133 * from where they were once taken off for compaction/migration. 134 * 135 * This function shall be used whenever the isolated pageset has been 136 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 137 * and isolate_huge_page(). 138 */ 139 void putback_movable_pages(struct list_head *l) 140 { 141 struct page *page; 142 struct page *page2; 143 144 list_for_each_entry_safe(page, page2, l, lru) { 145 if (unlikely(PageHuge(page))) { 146 putback_active_hugepage(page); 147 continue; 148 } 149 list_del(&page->lru); 150 /* 151 * We isolated non-lru movable page so here we can use 152 * __PageMovable because LRU page's mapping cannot have 153 * PAGE_MAPPING_MOVABLE. 154 */ 155 if (unlikely(__PageMovable(page))) { 156 VM_BUG_ON_PAGE(!PageIsolated(page), page); 157 lock_page(page); 158 if (PageMovable(page)) 159 putback_movable_page(page); 160 else 161 ClearPageIsolated(page); 162 unlock_page(page); 163 put_page(page); 164 } else { 165 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 166 page_is_file_lru(page), -thp_nr_pages(page)); 167 putback_lru_page(page); 168 } 169 } 170 } 171 172 /* 173 * Restore a potential migration pte to a working pte entry 174 */ 175 static bool remove_migration_pte(struct folio *folio, 176 struct vm_area_struct *vma, unsigned long addr, void *old) 177 { 178 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 179 180 while (page_vma_mapped_walk(&pvmw)) { 181 pte_t pte; 182 swp_entry_t entry; 183 struct page *new; 184 unsigned long idx = 0; 185 186 /* pgoff is invalid for ksm pages, but they are never large */ 187 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 188 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 189 new = folio_page(folio, idx); 190 191 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 192 /* PMD-mapped THP migration entry */ 193 if (!pvmw.pte) { 194 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 195 !folio_test_pmd_mappable(folio), folio); 196 remove_migration_pmd(&pvmw, new); 197 continue; 198 } 199 #endif 200 201 folio_get(folio); 202 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 203 if (pte_swp_soft_dirty(*pvmw.pte)) 204 pte = pte_mksoft_dirty(pte); 205 206 /* 207 * Recheck VMA as permissions can change since migration started 208 */ 209 entry = pte_to_swp_entry(*pvmw.pte); 210 if (is_writable_migration_entry(entry)) 211 pte = maybe_mkwrite(pte, vma); 212 else if (pte_swp_uffd_wp(*pvmw.pte)) 213 pte = pte_mkuffd_wp(pte); 214 215 if (unlikely(is_device_private_page(new))) { 216 if (pte_write(pte)) 217 entry = make_writable_device_private_entry( 218 page_to_pfn(new)); 219 else 220 entry = make_readable_device_private_entry( 221 page_to_pfn(new)); 222 pte = swp_entry_to_pte(entry); 223 if (pte_swp_soft_dirty(*pvmw.pte)) 224 pte = pte_swp_mksoft_dirty(pte); 225 if (pte_swp_uffd_wp(*pvmw.pte)) 226 pte = pte_swp_mkuffd_wp(pte); 227 } 228 229 #ifdef CONFIG_HUGETLB_PAGE 230 if (folio_test_hugetlb(folio)) { 231 unsigned int shift = huge_page_shift(hstate_vma(vma)); 232 233 pte = pte_mkhuge(pte); 234 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 235 if (folio_test_anon(folio)) 236 hugepage_add_anon_rmap(new, vma, pvmw.address); 237 else 238 page_dup_rmap(new, true); 239 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 240 } else 241 #endif 242 { 243 if (folio_test_anon(folio)) 244 page_add_anon_rmap(new, vma, pvmw.address, false); 245 else 246 page_add_file_rmap(new, vma, false); 247 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 248 } 249 if (vma->vm_flags & VM_LOCKED) 250 mlock_page_drain(smp_processor_id()); 251 252 /* No need to invalidate - it was non-present before */ 253 update_mmu_cache(vma, pvmw.address, pvmw.pte); 254 } 255 256 return true; 257 } 258 259 /* 260 * Get rid of all migration entries and replace them by 261 * references to the indicated page. 262 */ 263 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 264 { 265 struct rmap_walk_control rwc = { 266 .rmap_one = remove_migration_pte, 267 .arg = src, 268 }; 269 270 if (locked) 271 rmap_walk_locked(dst, &rwc); 272 else 273 rmap_walk(dst, &rwc); 274 } 275 276 /* 277 * Something used the pte of a page under migration. We need to 278 * get to the page and wait until migration is finished. 279 * When we return from this function the fault will be retried. 280 */ 281 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 282 spinlock_t *ptl) 283 { 284 pte_t pte; 285 swp_entry_t entry; 286 287 spin_lock(ptl); 288 pte = *ptep; 289 if (!is_swap_pte(pte)) 290 goto out; 291 292 entry = pte_to_swp_entry(pte); 293 if (!is_migration_entry(entry)) 294 goto out; 295 296 migration_entry_wait_on_locked(entry, ptep, ptl); 297 return; 298 out: 299 pte_unmap_unlock(ptep, ptl); 300 } 301 302 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 303 unsigned long address) 304 { 305 spinlock_t *ptl = pte_lockptr(mm, pmd); 306 pte_t *ptep = pte_offset_map(pmd, address); 307 __migration_entry_wait(mm, ptep, ptl); 308 } 309 310 void migration_entry_wait_huge(struct vm_area_struct *vma, 311 struct mm_struct *mm, pte_t *pte) 312 { 313 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 314 __migration_entry_wait(mm, pte, ptl); 315 } 316 317 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 318 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 319 { 320 spinlock_t *ptl; 321 322 ptl = pmd_lock(mm, pmd); 323 if (!is_pmd_migration_entry(*pmd)) 324 goto unlock; 325 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 326 return; 327 unlock: 328 spin_unlock(ptl); 329 } 330 #endif 331 332 static int expected_page_refs(struct address_space *mapping, struct page *page) 333 { 334 int expected_count = 1; 335 336 if (mapping) 337 expected_count += compound_nr(page) + page_has_private(page); 338 return expected_count; 339 } 340 341 /* 342 * Replace the page in the mapping. 343 * 344 * The number of remaining references must be: 345 * 1 for anonymous pages without a mapping 346 * 2 for pages with a mapping 347 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 348 */ 349 int folio_migrate_mapping(struct address_space *mapping, 350 struct folio *newfolio, struct folio *folio, int extra_count) 351 { 352 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 353 struct zone *oldzone, *newzone; 354 int dirty; 355 int expected_count = expected_page_refs(mapping, &folio->page) + extra_count; 356 long nr = folio_nr_pages(folio); 357 358 if (!mapping) { 359 /* Anonymous page without mapping */ 360 if (folio_ref_count(folio) != expected_count) 361 return -EAGAIN; 362 363 /* No turning back from here */ 364 newfolio->index = folio->index; 365 newfolio->mapping = folio->mapping; 366 if (folio_test_swapbacked(folio)) 367 __folio_set_swapbacked(newfolio); 368 369 return MIGRATEPAGE_SUCCESS; 370 } 371 372 oldzone = folio_zone(folio); 373 newzone = folio_zone(newfolio); 374 375 xas_lock_irq(&xas); 376 if (!folio_ref_freeze(folio, expected_count)) { 377 xas_unlock_irq(&xas); 378 return -EAGAIN; 379 } 380 381 /* 382 * Now we know that no one else is looking at the folio: 383 * no turning back from here. 384 */ 385 newfolio->index = folio->index; 386 newfolio->mapping = folio->mapping; 387 folio_ref_add(newfolio, nr); /* add cache reference */ 388 if (folio_test_swapbacked(folio)) { 389 __folio_set_swapbacked(newfolio); 390 if (folio_test_swapcache(folio)) { 391 folio_set_swapcache(newfolio); 392 newfolio->private = folio_get_private(folio); 393 } 394 } else { 395 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 396 } 397 398 /* Move dirty while page refs frozen and newpage not yet exposed */ 399 dirty = folio_test_dirty(folio); 400 if (dirty) { 401 folio_clear_dirty(folio); 402 folio_set_dirty(newfolio); 403 } 404 405 xas_store(&xas, newfolio); 406 407 /* 408 * Drop cache reference from old page by unfreezing 409 * to one less reference. 410 * We know this isn't the last reference. 411 */ 412 folio_ref_unfreeze(folio, expected_count - nr); 413 414 xas_unlock(&xas); 415 /* Leave irq disabled to prevent preemption while updating stats */ 416 417 /* 418 * If moved to a different zone then also account 419 * the page for that zone. Other VM counters will be 420 * taken care of when we establish references to the 421 * new page and drop references to the old page. 422 * 423 * Note that anonymous pages are accounted for 424 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 425 * are mapped to swap space. 426 */ 427 if (newzone != oldzone) { 428 struct lruvec *old_lruvec, *new_lruvec; 429 struct mem_cgroup *memcg; 430 431 memcg = folio_memcg(folio); 432 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 433 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 434 435 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 436 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 437 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 438 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 439 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 440 } 441 #ifdef CONFIG_SWAP 442 if (folio_test_swapcache(folio)) { 443 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 444 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 445 } 446 #endif 447 if (dirty && mapping_can_writeback(mapping)) { 448 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 449 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 450 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 451 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 452 } 453 } 454 local_irq_enable(); 455 456 return MIGRATEPAGE_SUCCESS; 457 } 458 EXPORT_SYMBOL(folio_migrate_mapping); 459 460 /* 461 * The expected number of remaining references is the same as that 462 * of folio_migrate_mapping(). 463 */ 464 int migrate_huge_page_move_mapping(struct address_space *mapping, 465 struct page *newpage, struct page *page) 466 { 467 XA_STATE(xas, &mapping->i_pages, page_index(page)); 468 int expected_count; 469 470 xas_lock_irq(&xas); 471 expected_count = 2 + page_has_private(page); 472 if (page_count(page) != expected_count || xas_load(&xas) != page) { 473 xas_unlock_irq(&xas); 474 return -EAGAIN; 475 } 476 477 if (!page_ref_freeze(page, expected_count)) { 478 xas_unlock_irq(&xas); 479 return -EAGAIN; 480 } 481 482 newpage->index = page->index; 483 newpage->mapping = page->mapping; 484 485 get_page(newpage); 486 487 xas_store(&xas, newpage); 488 489 page_ref_unfreeze(page, expected_count - 1); 490 491 xas_unlock_irq(&xas); 492 493 return MIGRATEPAGE_SUCCESS; 494 } 495 496 /* 497 * Copy the flags and some other ancillary information 498 */ 499 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 500 { 501 int cpupid; 502 503 if (folio_test_error(folio)) 504 folio_set_error(newfolio); 505 if (folio_test_referenced(folio)) 506 folio_set_referenced(newfolio); 507 if (folio_test_uptodate(folio)) 508 folio_mark_uptodate(newfolio); 509 if (folio_test_clear_active(folio)) { 510 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 511 folio_set_active(newfolio); 512 } else if (folio_test_clear_unevictable(folio)) 513 folio_set_unevictable(newfolio); 514 if (folio_test_workingset(folio)) 515 folio_set_workingset(newfolio); 516 if (folio_test_checked(folio)) 517 folio_set_checked(newfolio); 518 if (folio_test_mappedtodisk(folio)) 519 folio_set_mappedtodisk(newfolio); 520 521 /* Move dirty on pages not done by folio_migrate_mapping() */ 522 if (folio_test_dirty(folio)) 523 folio_set_dirty(newfolio); 524 525 if (folio_test_young(folio)) 526 folio_set_young(newfolio); 527 if (folio_test_idle(folio)) 528 folio_set_idle(newfolio); 529 530 /* 531 * Copy NUMA information to the new page, to prevent over-eager 532 * future migrations of this same page. 533 */ 534 cpupid = page_cpupid_xchg_last(&folio->page, -1); 535 page_cpupid_xchg_last(&newfolio->page, cpupid); 536 537 folio_migrate_ksm(newfolio, folio); 538 /* 539 * Please do not reorder this without considering how mm/ksm.c's 540 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 541 */ 542 if (folio_test_swapcache(folio)) 543 folio_clear_swapcache(folio); 544 folio_clear_private(folio); 545 546 /* page->private contains hugetlb specific flags */ 547 if (!folio_test_hugetlb(folio)) 548 folio->private = NULL; 549 550 /* 551 * If any waiters have accumulated on the new page then 552 * wake them up. 553 */ 554 if (folio_test_writeback(newfolio)) 555 folio_end_writeback(newfolio); 556 557 /* 558 * PG_readahead shares the same bit with PG_reclaim. The above 559 * end_page_writeback() may clear PG_readahead mistakenly, so set the 560 * bit after that. 561 */ 562 if (folio_test_readahead(folio)) 563 folio_set_readahead(newfolio); 564 565 folio_copy_owner(newfolio, folio); 566 567 if (!folio_test_hugetlb(folio)) 568 mem_cgroup_migrate(folio, newfolio); 569 } 570 EXPORT_SYMBOL(folio_migrate_flags); 571 572 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 573 { 574 folio_copy(newfolio, folio); 575 folio_migrate_flags(newfolio, folio); 576 } 577 EXPORT_SYMBOL(folio_migrate_copy); 578 579 /************************************************************ 580 * Migration functions 581 ***********************************************************/ 582 583 /* 584 * Common logic to directly migrate a single LRU page suitable for 585 * pages that do not use PagePrivate/PagePrivate2. 586 * 587 * Pages are locked upon entry and exit. 588 */ 589 int migrate_page(struct address_space *mapping, 590 struct page *newpage, struct page *page, 591 enum migrate_mode mode) 592 { 593 struct folio *newfolio = page_folio(newpage); 594 struct folio *folio = page_folio(page); 595 int rc; 596 597 BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */ 598 599 rc = folio_migrate_mapping(mapping, newfolio, folio, 0); 600 601 if (rc != MIGRATEPAGE_SUCCESS) 602 return rc; 603 604 if (mode != MIGRATE_SYNC_NO_COPY) 605 folio_migrate_copy(newfolio, folio); 606 else 607 folio_migrate_flags(newfolio, folio); 608 return MIGRATEPAGE_SUCCESS; 609 } 610 EXPORT_SYMBOL(migrate_page); 611 612 #ifdef CONFIG_BLOCK 613 /* Returns true if all buffers are successfully locked */ 614 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 615 enum migrate_mode mode) 616 { 617 struct buffer_head *bh = head; 618 619 /* Simple case, sync compaction */ 620 if (mode != MIGRATE_ASYNC) { 621 do { 622 lock_buffer(bh); 623 bh = bh->b_this_page; 624 625 } while (bh != head); 626 627 return true; 628 } 629 630 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 631 do { 632 if (!trylock_buffer(bh)) { 633 /* 634 * We failed to lock the buffer and cannot stall in 635 * async migration. Release the taken locks 636 */ 637 struct buffer_head *failed_bh = bh; 638 bh = head; 639 while (bh != failed_bh) { 640 unlock_buffer(bh); 641 bh = bh->b_this_page; 642 } 643 return false; 644 } 645 646 bh = bh->b_this_page; 647 } while (bh != head); 648 return true; 649 } 650 651 static int __buffer_migrate_page(struct address_space *mapping, 652 struct page *newpage, struct page *page, enum migrate_mode mode, 653 bool check_refs) 654 { 655 struct buffer_head *bh, *head; 656 int rc; 657 int expected_count; 658 659 if (!page_has_buffers(page)) 660 return migrate_page(mapping, newpage, page, mode); 661 662 /* Check whether page does not have extra refs before we do more work */ 663 expected_count = expected_page_refs(mapping, page); 664 if (page_count(page) != expected_count) 665 return -EAGAIN; 666 667 head = page_buffers(page); 668 if (!buffer_migrate_lock_buffers(head, mode)) 669 return -EAGAIN; 670 671 if (check_refs) { 672 bool busy; 673 bool invalidated = false; 674 675 recheck_buffers: 676 busy = false; 677 spin_lock(&mapping->private_lock); 678 bh = head; 679 do { 680 if (atomic_read(&bh->b_count)) { 681 busy = true; 682 break; 683 } 684 bh = bh->b_this_page; 685 } while (bh != head); 686 if (busy) { 687 if (invalidated) { 688 rc = -EAGAIN; 689 goto unlock_buffers; 690 } 691 spin_unlock(&mapping->private_lock); 692 invalidate_bh_lrus(); 693 invalidated = true; 694 goto recheck_buffers; 695 } 696 } 697 698 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 699 if (rc != MIGRATEPAGE_SUCCESS) 700 goto unlock_buffers; 701 702 attach_page_private(newpage, detach_page_private(page)); 703 704 bh = head; 705 do { 706 set_bh_page(bh, newpage, bh_offset(bh)); 707 bh = bh->b_this_page; 708 709 } while (bh != head); 710 711 if (mode != MIGRATE_SYNC_NO_COPY) 712 migrate_page_copy(newpage, page); 713 else 714 migrate_page_states(newpage, page); 715 716 rc = MIGRATEPAGE_SUCCESS; 717 unlock_buffers: 718 if (check_refs) 719 spin_unlock(&mapping->private_lock); 720 bh = head; 721 do { 722 unlock_buffer(bh); 723 bh = bh->b_this_page; 724 725 } while (bh != head); 726 727 return rc; 728 } 729 730 /* 731 * Migration function for pages with buffers. This function can only be used 732 * if the underlying filesystem guarantees that no other references to "page" 733 * exist. For example attached buffer heads are accessed only under page lock. 734 */ 735 int buffer_migrate_page(struct address_space *mapping, 736 struct page *newpage, struct page *page, enum migrate_mode mode) 737 { 738 return __buffer_migrate_page(mapping, newpage, page, mode, false); 739 } 740 EXPORT_SYMBOL(buffer_migrate_page); 741 742 /* 743 * Same as above except that this variant is more careful and checks that there 744 * are also no buffer head references. This function is the right one for 745 * mappings where buffer heads are directly looked up and referenced (such as 746 * block device mappings). 747 */ 748 int buffer_migrate_page_norefs(struct address_space *mapping, 749 struct page *newpage, struct page *page, enum migrate_mode mode) 750 { 751 return __buffer_migrate_page(mapping, newpage, page, mode, true); 752 } 753 #endif 754 755 /* 756 * Writeback a page to clean the dirty state 757 */ 758 static int writeout(struct address_space *mapping, struct page *page) 759 { 760 struct folio *folio = page_folio(page); 761 struct writeback_control wbc = { 762 .sync_mode = WB_SYNC_NONE, 763 .nr_to_write = 1, 764 .range_start = 0, 765 .range_end = LLONG_MAX, 766 .for_reclaim = 1 767 }; 768 int rc; 769 770 if (!mapping->a_ops->writepage) 771 /* No write method for the address space */ 772 return -EINVAL; 773 774 if (!clear_page_dirty_for_io(page)) 775 /* Someone else already triggered a write */ 776 return -EAGAIN; 777 778 /* 779 * A dirty page may imply that the underlying filesystem has 780 * the page on some queue. So the page must be clean for 781 * migration. Writeout may mean we loose the lock and the 782 * page state is no longer what we checked for earlier. 783 * At this point we know that the migration attempt cannot 784 * be successful. 785 */ 786 remove_migration_ptes(folio, folio, false); 787 788 rc = mapping->a_ops->writepage(page, &wbc); 789 790 if (rc != AOP_WRITEPAGE_ACTIVATE) 791 /* unlocked. Relock */ 792 lock_page(page); 793 794 return (rc < 0) ? -EIO : -EAGAIN; 795 } 796 797 /* 798 * Default handling if a filesystem does not provide a migration function. 799 */ 800 static int fallback_migrate_page(struct address_space *mapping, 801 struct page *newpage, struct page *page, enum migrate_mode mode) 802 { 803 if (PageDirty(page)) { 804 /* Only writeback pages in full synchronous migration */ 805 switch (mode) { 806 case MIGRATE_SYNC: 807 case MIGRATE_SYNC_NO_COPY: 808 break; 809 default: 810 return -EBUSY; 811 } 812 return writeout(mapping, page); 813 } 814 815 /* 816 * Buffers may be managed in a filesystem specific way. 817 * We must have no buffers or drop them. 818 */ 819 if (page_has_private(page) && 820 !try_to_release_page(page, GFP_KERNEL)) 821 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 822 823 return migrate_page(mapping, newpage, page, mode); 824 } 825 826 /* 827 * Move a page to a newly allocated page 828 * The page is locked and all ptes have been successfully removed. 829 * 830 * The new page will have replaced the old page if this function 831 * is successful. 832 * 833 * Return value: 834 * < 0 - error code 835 * MIGRATEPAGE_SUCCESS - success 836 */ 837 static int move_to_new_page(struct page *newpage, struct page *page, 838 enum migrate_mode mode) 839 { 840 struct address_space *mapping; 841 int rc = -EAGAIN; 842 bool is_lru = !__PageMovable(page); 843 844 VM_BUG_ON_PAGE(!PageLocked(page), page); 845 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 846 847 mapping = page_mapping(page); 848 849 if (likely(is_lru)) { 850 if (!mapping) 851 rc = migrate_page(mapping, newpage, page, mode); 852 else if (mapping->a_ops->migratepage) 853 /* 854 * Most pages have a mapping and most filesystems 855 * provide a migratepage callback. Anonymous pages 856 * are part of swap space which also has its own 857 * migratepage callback. This is the most common path 858 * for page migration. 859 */ 860 rc = mapping->a_ops->migratepage(mapping, newpage, 861 page, mode); 862 else 863 rc = fallback_migrate_page(mapping, newpage, 864 page, mode); 865 } else { 866 /* 867 * In case of non-lru page, it could be released after 868 * isolation step. In that case, we shouldn't try migration. 869 */ 870 VM_BUG_ON_PAGE(!PageIsolated(page), page); 871 if (!PageMovable(page)) { 872 rc = MIGRATEPAGE_SUCCESS; 873 ClearPageIsolated(page); 874 goto out; 875 } 876 877 rc = mapping->a_ops->migratepage(mapping, newpage, 878 page, mode); 879 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 880 !PageIsolated(page)); 881 } 882 883 /* 884 * When successful, old pagecache page->mapping must be cleared before 885 * page is freed; but stats require that PageAnon be left as PageAnon. 886 */ 887 if (rc == MIGRATEPAGE_SUCCESS) { 888 if (__PageMovable(page)) { 889 VM_BUG_ON_PAGE(!PageIsolated(page), page); 890 891 /* 892 * We clear PG_movable under page_lock so any compactor 893 * cannot try to migrate this page. 894 */ 895 ClearPageIsolated(page); 896 } 897 898 /* 899 * Anonymous and movable page->mapping will be cleared by 900 * free_pages_prepare so don't reset it here for keeping 901 * the type to work PageAnon, for example. 902 */ 903 if (!PageMappingFlags(page)) 904 page->mapping = NULL; 905 906 if (likely(!is_zone_device_page(newpage))) 907 flush_dcache_folio(page_folio(newpage)); 908 } 909 out: 910 return rc; 911 } 912 913 static int __unmap_and_move(struct page *page, struct page *newpage, 914 int force, enum migrate_mode mode) 915 { 916 struct folio *folio = page_folio(page); 917 struct folio *dst = page_folio(newpage); 918 int rc = -EAGAIN; 919 bool page_was_mapped = false; 920 struct anon_vma *anon_vma = NULL; 921 bool is_lru = !__PageMovable(page); 922 923 if (!trylock_page(page)) { 924 if (!force || mode == MIGRATE_ASYNC) 925 goto out; 926 927 /* 928 * It's not safe for direct compaction to call lock_page. 929 * For example, during page readahead pages are added locked 930 * to the LRU. Later, when the IO completes the pages are 931 * marked uptodate and unlocked. However, the queueing 932 * could be merging multiple pages for one bio (e.g. 933 * mpage_readahead). If an allocation happens for the 934 * second or third page, the process can end up locking 935 * the same page twice and deadlocking. Rather than 936 * trying to be clever about what pages can be locked, 937 * avoid the use of lock_page for direct compaction 938 * altogether. 939 */ 940 if (current->flags & PF_MEMALLOC) 941 goto out; 942 943 lock_page(page); 944 } 945 946 if (PageWriteback(page)) { 947 /* 948 * Only in the case of a full synchronous migration is it 949 * necessary to wait for PageWriteback. In the async case, 950 * the retry loop is too short and in the sync-light case, 951 * the overhead of stalling is too much 952 */ 953 switch (mode) { 954 case MIGRATE_SYNC: 955 case MIGRATE_SYNC_NO_COPY: 956 break; 957 default: 958 rc = -EBUSY; 959 goto out_unlock; 960 } 961 if (!force) 962 goto out_unlock; 963 wait_on_page_writeback(page); 964 } 965 966 /* 967 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 968 * we cannot notice that anon_vma is freed while we migrates a page. 969 * This get_anon_vma() delays freeing anon_vma pointer until the end 970 * of migration. File cache pages are no problem because of page_lock() 971 * File Caches may use write_page() or lock_page() in migration, then, 972 * just care Anon page here. 973 * 974 * Only page_get_anon_vma() understands the subtleties of 975 * getting a hold on an anon_vma from outside one of its mms. 976 * But if we cannot get anon_vma, then we won't need it anyway, 977 * because that implies that the anon page is no longer mapped 978 * (and cannot be remapped so long as we hold the page lock). 979 */ 980 if (PageAnon(page) && !PageKsm(page)) 981 anon_vma = page_get_anon_vma(page); 982 983 /* 984 * Block others from accessing the new page when we get around to 985 * establishing additional references. We are usually the only one 986 * holding a reference to newpage at this point. We used to have a BUG 987 * here if trylock_page(newpage) fails, but would like to allow for 988 * cases where there might be a race with the previous use of newpage. 989 * This is much like races on refcount of oldpage: just don't BUG(). 990 */ 991 if (unlikely(!trylock_page(newpage))) 992 goto out_unlock; 993 994 if (unlikely(!is_lru)) { 995 rc = move_to_new_page(newpage, page, mode); 996 goto out_unlock_both; 997 } 998 999 /* 1000 * Corner case handling: 1001 * 1. When a new swap-cache page is read into, it is added to the LRU 1002 * and treated as swapcache but it has no rmap yet. 1003 * Calling try_to_unmap() against a page->mapping==NULL page will 1004 * trigger a BUG. So handle it here. 1005 * 2. An orphaned page (see truncate_cleanup_page) might have 1006 * fs-private metadata. The page can be picked up due to memory 1007 * offlining. Everywhere else except page reclaim, the page is 1008 * invisible to the vm, so the page can not be migrated. So try to 1009 * free the metadata, so the page can be freed. 1010 */ 1011 if (!page->mapping) { 1012 VM_BUG_ON_PAGE(PageAnon(page), page); 1013 if (page_has_private(page)) { 1014 try_to_free_buffers(page); 1015 goto out_unlock_both; 1016 } 1017 } else if (page_mapped(page)) { 1018 /* Establish migration ptes */ 1019 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1020 page); 1021 try_to_migrate(folio, 0); 1022 page_was_mapped = true; 1023 } 1024 1025 if (!page_mapped(page)) 1026 rc = move_to_new_page(newpage, page, mode); 1027 1028 /* 1029 * When successful, push newpage to LRU immediately: so that if it 1030 * turns out to be an mlocked page, remove_migration_ptes() will 1031 * automatically build up the correct newpage->mlock_count for it. 1032 * 1033 * We would like to do something similar for the old page, when 1034 * unsuccessful, and other cases when a page has been temporarily 1035 * isolated from the unevictable LRU: but this case is the easiest. 1036 */ 1037 if (rc == MIGRATEPAGE_SUCCESS) { 1038 lru_cache_add(newpage); 1039 if (page_was_mapped) 1040 lru_add_drain(); 1041 } 1042 1043 if (page_was_mapped) 1044 remove_migration_ptes(folio, 1045 rc == MIGRATEPAGE_SUCCESS ? dst : folio, false); 1046 1047 out_unlock_both: 1048 unlock_page(newpage); 1049 out_unlock: 1050 /* Drop an anon_vma reference if we took one */ 1051 if (anon_vma) 1052 put_anon_vma(anon_vma); 1053 unlock_page(page); 1054 out: 1055 /* 1056 * If migration is successful, decrease refcount of the newpage, 1057 * which will not free the page because new page owner increased 1058 * refcounter. 1059 */ 1060 if (rc == MIGRATEPAGE_SUCCESS) 1061 put_page(newpage); 1062 1063 return rc; 1064 } 1065 1066 /* 1067 * Obtain the lock on page, remove all ptes and migrate the page 1068 * to the newly allocated page in newpage. 1069 */ 1070 static int unmap_and_move(new_page_t get_new_page, 1071 free_page_t put_new_page, 1072 unsigned long private, struct page *page, 1073 int force, enum migrate_mode mode, 1074 enum migrate_reason reason, 1075 struct list_head *ret) 1076 { 1077 int rc = MIGRATEPAGE_SUCCESS; 1078 struct page *newpage = NULL; 1079 1080 if (!thp_migration_supported() && PageTransHuge(page)) 1081 return -ENOSYS; 1082 1083 if (page_count(page) == 1) { 1084 /* page was freed from under us. So we are done. */ 1085 ClearPageActive(page); 1086 ClearPageUnevictable(page); 1087 if (unlikely(__PageMovable(page))) { 1088 lock_page(page); 1089 if (!PageMovable(page)) 1090 ClearPageIsolated(page); 1091 unlock_page(page); 1092 } 1093 goto out; 1094 } 1095 1096 newpage = get_new_page(page, private); 1097 if (!newpage) 1098 return -ENOMEM; 1099 1100 rc = __unmap_and_move(page, newpage, force, mode); 1101 if (rc == MIGRATEPAGE_SUCCESS) 1102 set_page_owner_migrate_reason(newpage, reason); 1103 1104 out: 1105 if (rc != -EAGAIN) { 1106 /* 1107 * A page that has been migrated has all references 1108 * removed and will be freed. A page that has not been 1109 * migrated will have kept its references and be restored. 1110 */ 1111 list_del(&page->lru); 1112 } 1113 1114 /* 1115 * If migration is successful, releases reference grabbed during 1116 * isolation. Otherwise, restore the page to right list unless 1117 * we want to retry. 1118 */ 1119 if (rc == MIGRATEPAGE_SUCCESS) { 1120 /* 1121 * Compaction can migrate also non-LRU pages which are 1122 * not accounted to NR_ISOLATED_*. They can be recognized 1123 * as __PageMovable 1124 */ 1125 if (likely(!__PageMovable(page))) 1126 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1127 page_is_file_lru(page), -thp_nr_pages(page)); 1128 1129 if (reason != MR_MEMORY_FAILURE) 1130 /* 1131 * We release the page in page_handle_poison. 1132 */ 1133 put_page(page); 1134 } else { 1135 if (rc != -EAGAIN) 1136 list_add_tail(&page->lru, ret); 1137 1138 if (put_new_page) 1139 put_new_page(newpage, private); 1140 else 1141 put_page(newpage); 1142 } 1143 1144 return rc; 1145 } 1146 1147 /* 1148 * Counterpart of unmap_and_move_page() for hugepage migration. 1149 * 1150 * This function doesn't wait the completion of hugepage I/O 1151 * because there is no race between I/O and migration for hugepage. 1152 * Note that currently hugepage I/O occurs only in direct I/O 1153 * where no lock is held and PG_writeback is irrelevant, 1154 * and writeback status of all subpages are counted in the reference 1155 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1156 * under direct I/O, the reference of the head page is 512 and a bit more.) 1157 * This means that when we try to migrate hugepage whose subpages are 1158 * doing direct I/O, some references remain after try_to_unmap() and 1159 * hugepage migration fails without data corruption. 1160 * 1161 * There is also no race when direct I/O is issued on the page under migration, 1162 * because then pte is replaced with migration swap entry and direct I/O code 1163 * will wait in the page fault for migration to complete. 1164 */ 1165 static int unmap_and_move_huge_page(new_page_t get_new_page, 1166 free_page_t put_new_page, unsigned long private, 1167 struct page *hpage, int force, 1168 enum migrate_mode mode, int reason, 1169 struct list_head *ret) 1170 { 1171 struct folio *dst, *src = page_folio(hpage); 1172 int rc = -EAGAIN; 1173 int page_was_mapped = 0; 1174 struct page *new_hpage; 1175 struct anon_vma *anon_vma = NULL; 1176 struct address_space *mapping = NULL; 1177 1178 /* 1179 * Migratability of hugepages depends on architectures and their size. 1180 * This check is necessary because some callers of hugepage migration 1181 * like soft offline and memory hotremove don't walk through page 1182 * tables or check whether the hugepage is pmd-based or not before 1183 * kicking migration. 1184 */ 1185 if (!hugepage_migration_supported(page_hstate(hpage))) { 1186 list_move_tail(&hpage->lru, ret); 1187 return -ENOSYS; 1188 } 1189 1190 if (page_count(hpage) == 1) { 1191 /* page was freed from under us. So we are done. */ 1192 putback_active_hugepage(hpage); 1193 return MIGRATEPAGE_SUCCESS; 1194 } 1195 1196 new_hpage = get_new_page(hpage, private); 1197 if (!new_hpage) 1198 return -ENOMEM; 1199 dst = page_folio(new_hpage); 1200 1201 if (!trylock_page(hpage)) { 1202 if (!force) 1203 goto out; 1204 switch (mode) { 1205 case MIGRATE_SYNC: 1206 case MIGRATE_SYNC_NO_COPY: 1207 break; 1208 default: 1209 goto out; 1210 } 1211 lock_page(hpage); 1212 } 1213 1214 /* 1215 * Check for pages which are in the process of being freed. Without 1216 * page_mapping() set, hugetlbfs specific move page routine will not 1217 * be called and we could leak usage counts for subpools. 1218 */ 1219 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1220 rc = -EBUSY; 1221 goto out_unlock; 1222 } 1223 1224 if (PageAnon(hpage)) 1225 anon_vma = page_get_anon_vma(hpage); 1226 1227 if (unlikely(!trylock_page(new_hpage))) 1228 goto put_anon; 1229 1230 if (page_mapped(hpage)) { 1231 bool mapping_locked = false; 1232 enum ttu_flags ttu = 0; 1233 1234 if (!PageAnon(hpage)) { 1235 /* 1236 * In shared mappings, try_to_unmap could potentially 1237 * call huge_pmd_unshare. Because of this, take 1238 * semaphore in write mode here and set TTU_RMAP_LOCKED 1239 * to let lower levels know we have taken the lock. 1240 */ 1241 mapping = hugetlb_page_mapping_lock_write(hpage); 1242 if (unlikely(!mapping)) 1243 goto unlock_put_anon; 1244 1245 mapping_locked = true; 1246 ttu |= TTU_RMAP_LOCKED; 1247 } 1248 1249 try_to_migrate(src, ttu); 1250 page_was_mapped = 1; 1251 1252 if (mapping_locked) 1253 i_mmap_unlock_write(mapping); 1254 } 1255 1256 if (!page_mapped(hpage)) 1257 rc = move_to_new_page(new_hpage, hpage, mode); 1258 1259 if (page_was_mapped) 1260 remove_migration_ptes(src, 1261 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1262 1263 unlock_put_anon: 1264 unlock_page(new_hpage); 1265 1266 put_anon: 1267 if (anon_vma) 1268 put_anon_vma(anon_vma); 1269 1270 if (rc == MIGRATEPAGE_SUCCESS) { 1271 move_hugetlb_state(hpage, new_hpage, reason); 1272 put_new_page = NULL; 1273 } 1274 1275 out_unlock: 1276 unlock_page(hpage); 1277 out: 1278 if (rc == MIGRATEPAGE_SUCCESS) 1279 putback_active_hugepage(hpage); 1280 else if (rc != -EAGAIN) 1281 list_move_tail(&hpage->lru, ret); 1282 1283 /* 1284 * If migration was not successful and there's a freeing callback, use 1285 * it. Otherwise, put_page() will drop the reference grabbed during 1286 * isolation. 1287 */ 1288 if (put_new_page) 1289 put_new_page(new_hpage, private); 1290 else 1291 putback_active_hugepage(new_hpage); 1292 1293 return rc; 1294 } 1295 1296 static inline int try_split_thp(struct page *page, struct page **page2, 1297 struct list_head *from) 1298 { 1299 int rc = 0; 1300 1301 lock_page(page); 1302 rc = split_huge_page_to_list(page, from); 1303 unlock_page(page); 1304 if (!rc) 1305 list_safe_reset_next(page, *page2, lru); 1306 1307 return rc; 1308 } 1309 1310 /* 1311 * migrate_pages - migrate the pages specified in a list, to the free pages 1312 * supplied as the target for the page migration 1313 * 1314 * @from: The list of pages to be migrated. 1315 * @get_new_page: The function used to allocate free pages to be used 1316 * as the target of the page migration. 1317 * @put_new_page: The function used to free target pages if migration 1318 * fails, or NULL if no special handling is necessary. 1319 * @private: Private data to be passed on to get_new_page() 1320 * @mode: The migration mode that specifies the constraints for 1321 * page migration, if any. 1322 * @reason: The reason for page migration. 1323 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1324 * the caller passes a non-NULL pointer. 1325 * 1326 * The function returns after 10 attempts or if no pages are movable any more 1327 * because the list has become empty or no retryable pages exist any more. 1328 * It is caller's responsibility to call putback_movable_pages() to return pages 1329 * to the LRU or free list only if ret != 0. 1330 * 1331 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1332 * an error code. The number of THP splits will be considered as the number of 1333 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1334 */ 1335 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1336 free_page_t put_new_page, unsigned long private, 1337 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1338 { 1339 int retry = 1; 1340 int thp_retry = 1; 1341 int nr_failed = 0; 1342 int nr_failed_pages = 0; 1343 int nr_succeeded = 0; 1344 int nr_thp_succeeded = 0; 1345 int nr_thp_failed = 0; 1346 int nr_thp_split = 0; 1347 int pass = 0; 1348 bool is_thp = false; 1349 struct page *page; 1350 struct page *page2; 1351 int rc, nr_subpages; 1352 LIST_HEAD(ret_pages); 1353 LIST_HEAD(thp_split_pages); 1354 bool nosplit = (reason == MR_NUMA_MISPLACED); 1355 bool no_subpage_counting = false; 1356 1357 trace_mm_migrate_pages_start(mode, reason); 1358 1359 thp_subpage_migration: 1360 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1361 retry = 0; 1362 thp_retry = 0; 1363 1364 list_for_each_entry_safe(page, page2, from, lru) { 1365 retry: 1366 /* 1367 * THP statistics is based on the source huge page. 1368 * Capture required information that might get lost 1369 * during migration. 1370 */ 1371 is_thp = PageTransHuge(page) && !PageHuge(page); 1372 nr_subpages = compound_nr(page); 1373 cond_resched(); 1374 1375 if (PageHuge(page)) 1376 rc = unmap_and_move_huge_page(get_new_page, 1377 put_new_page, private, page, 1378 pass > 2, mode, reason, 1379 &ret_pages); 1380 else 1381 rc = unmap_and_move(get_new_page, put_new_page, 1382 private, page, pass > 2, mode, 1383 reason, &ret_pages); 1384 /* 1385 * The rules are: 1386 * Success: non hugetlb page will be freed, hugetlb 1387 * page will be put back 1388 * -EAGAIN: stay on the from list 1389 * -ENOMEM: stay on the from list 1390 * Other errno: put on ret_pages list then splice to 1391 * from list 1392 */ 1393 switch(rc) { 1394 /* 1395 * THP migration might be unsupported or the 1396 * allocation could've failed so we should 1397 * retry on the same page with the THP split 1398 * to base pages. 1399 * 1400 * Head page is retried immediately and tail 1401 * pages are added to the tail of the list so 1402 * we encounter them after the rest of the list 1403 * is processed. 1404 */ 1405 case -ENOSYS: 1406 /* THP migration is unsupported */ 1407 if (is_thp) { 1408 nr_thp_failed++; 1409 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1410 nr_thp_split++; 1411 goto retry; 1412 } 1413 1414 nr_failed_pages += nr_subpages; 1415 break; 1416 } 1417 1418 /* Hugetlb migration is unsupported */ 1419 if (!no_subpage_counting) 1420 nr_failed++; 1421 nr_failed_pages += nr_subpages; 1422 break; 1423 case -ENOMEM: 1424 /* 1425 * When memory is low, don't bother to try to migrate 1426 * other pages, just exit. 1427 * THP NUMA faulting doesn't split THP to retry. 1428 */ 1429 if (is_thp && !nosplit) { 1430 nr_thp_failed++; 1431 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1432 nr_thp_split++; 1433 goto retry; 1434 } 1435 1436 nr_failed_pages += nr_subpages; 1437 goto out; 1438 } 1439 1440 if (!no_subpage_counting) 1441 nr_failed++; 1442 nr_failed_pages += nr_subpages; 1443 goto out; 1444 case -EAGAIN: 1445 if (is_thp) { 1446 thp_retry++; 1447 break; 1448 } 1449 retry++; 1450 break; 1451 case MIGRATEPAGE_SUCCESS: 1452 nr_succeeded += nr_subpages; 1453 if (is_thp) { 1454 nr_thp_succeeded++; 1455 break; 1456 } 1457 break; 1458 default: 1459 /* 1460 * Permanent failure (-EBUSY, etc.): 1461 * unlike -EAGAIN case, the failed page is 1462 * removed from migration page list and not 1463 * retried in the next outer loop. 1464 */ 1465 if (is_thp) { 1466 nr_thp_failed++; 1467 nr_failed_pages += nr_subpages; 1468 break; 1469 } 1470 1471 if (!no_subpage_counting) 1472 nr_failed++; 1473 nr_failed_pages += nr_subpages; 1474 break; 1475 } 1476 } 1477 } 1478 nr_failed += retry; 1479 nr_thp_failed += thp_retry; 1480 /* 1481 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1482 * counting in this round, since all subpages of a THP is counted 1483 * as 1 failure in the first round. 1484 */ 1485 if (!list_empty(&thp_split_pages)) { 1486 /* 1487 * Move non-migrated pages (after 10 retries) to ret_pages 1488 * to avoid migrating them again. 1489 */ 1490 list_splice_init(from, &ret_pages); 1491 list_splice_init(&thp_split_pages, from); 1492 no_subpage_counting = true; 1493 retry = 1; 1494 goto thp_subpage_migration; 1495 } 1496 1497 rc = nr_failed + nr_thp_failed; 1498 out: 1499 /* 1500 * Put the permanent failure page back to migration list, they 1501 * will be put back to the right list by the caller. 1502 */ 1503 list_splice(&ret_pages, from); 1504 1505 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1506 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1507 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1508 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1509 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1510 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1511 nr_thp_failed, nr_thp_split, mode, reason); 1512 1513 if (ret_succeeded) 1514 *ret_succeeded = nr_succeeded; 1515 1516 return rc; 1517 } 1518 1519 struct page *alloc_migration_target(struct page *page, unsigned long private) 1520 { 1521 struct migration_target_control *mtc; 1522 gfp_t gfp_mask; 1523 unsigned int order = 0; 1524 struct page *new_page = NULL; 1525 int nid; 1526 int zidx; 1527 1528 mtc = (struct migration_target_control *)private; 1529 gfp_mask = mtc->gfp_mask; 1530 nid = mtc->nid; 1531 if (nid == NUMA_NO_NODE) 1532 nid = page_to_nid(page); 1533 1534 if (PageHuge(page)) { 1535 struct hstate *h = page_hstate(compound_head(page)); 1536 1537 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1538 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1539 } 1540 1541 if (PageTransHuge(page)) { 1542 /* 1543 * clear __GFP_RECLAIM to make the migration callback 1544 * consistent with regular THP allocations. 1545 */ 1546 gfp_mask &= ~__GFP_RECLAIM; 1547 gfp_mask |= GFP_TRANSHUGE; 1548 order = HPAGE_PMD_ORDER; 1549 } 1550 zidx = zone_idx(page_zone(page)); 1551 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1552 gfp_mask |= __GFP_HIGHMEM; 1553 1554 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); 1555 1556 if (new_page && PageTransHuge(new_page)) 1557 prep_transhuge_page(new_page); 1558 1559 return new_page; 1560 } 1561 1562 #ifdef CONFIG_NUMA 1563 1564 static int store_status(int __user *status, int start, int value, int nr) 1565 { 1566 while (nr-- > 0) { 1567 if (put_user(value, status + start)) 1568 return -EFAULT; 1569 start++; 1570 } 1571 1572 return 0; 1573 } 1574 1575 static int do_move_pages_to_node(struct mm_struct *mm, 1576 struct list_head *pagelist, int node) 1577 { 1578 int err; 1579 struct migration_target_control mtc = { 1580 .nid = node, 1581 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1582 }; 1583 1584 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1585 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1586 if (err) 1587 putback_movable_pages(pagelist); 1588 return err; 1589 } 1590 1591 /* 1592 * Resolves the given address to a struct page, isolates it from the LRU and 1593 * puts it to the given pagelist. 1594 * Returns: 1595 * errno - if the page cannot be found/isolated 1596 * 0 - when it doesn't have to be migrated because it is already on the 1597 * target node 1598 * 1 - when it has been queued 1599 */ 1600 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1601 int node, struct list_head *pagelist, bool migrate_all) 1602 { 1603 struct vm_area_struct *vma; 1604 struct page *page; 1605 int err; 1606 1607 mmap_read_lock(mm); 1608 err = -EFAULT; 1609 vma = find_vma(mm, addr); 1610 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1611 goto out; 1612 1613 /* FOLL_DUMP to ignore special (like zero) pages */ 1614 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1615 1616 err = PTR_ERR(page); 1617 if (IS_ERR(page)) 1618 goto out; 1619 1620 err = -ENOENT; 1621 if (!page) 1622 goto out; 1623 1624 err = 0; 1625 if (page_to_nid(page) == node) 1626 goto out_putpage; 1627 1628 err = -EACCES; 1629 if (page_mapcount(page) > 1 && !migrate_all) 1630 goto out_putpage; 1631 1632 if (PageHuge(page)) { 1633 if (PageHead(page)) { 1634 isolate_huge_page(page, pagelist); 1635 err = 1; 1636 } 1637 } else { 1638 struct page *head; 1639 1640 head = compound_head(page); 1641 err = isolate_lru_page(head); 1642 if (err) 1643 goto out_putpage; 1644 1645 err = 1; 1646 list_add_tail(&head->lru, pagelist); 1647 mod_node_page_state(page_pgdat(head), 1648 NR_ISOLATED_ANON + page_is_file_lru(head), 1649 thp_nr_pages(head)); 1650 } 1651 out_putpage: 1652 /* 1653 * Either remove the duplicate refcount from 1654 * isolate_lru_page() or drop the page ref if it was 1655 * not isolated. 1656 */ 1657 put_page(page); 1658 out: 1659 mmap_read_unlock(mm); 1660 return err; 1661 } 1662 1663 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1664 struct list_head *pagelist, int __user *status, 1665 int start, int i, unsigned long nr_pages) 1666 { 1667 int err; 1668 1669 if (list_empty(pagelist)) 1670 return 0; 1671 1672 err = do_move_pages_to_node(mm, pagelist, node); 1673 if (err) { 1674 /* 1675 * Positive err means the number of failed 1676 * pages to migrate. Since we are going to 1677 * abort and return the number of non-migrated 1678 * pages, so need to include the rest of the 1679 * nr_pages that have not been attempted as 1680 * well. 1681 */ 1682 if (err > 0) 1683 err += nr_pages - i - 1; 1684 return err; 1685 } 1686 return store_status(status, start, node, i - start); 1687 } 1688 1689 /* 1690 * Migrate an array of page address onto an array of nodes and fill 1691 * the corresponding array of status. 1692 */ 1693 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1694 unsigned long nr_pages, 1695 const void __user * __user *pages, 1696 const int __user *nodes, 1697 int __user *status, int flags) 1698 { 1699 int current_node = NUMA_NO_NODE; 1700 LIST_HEAD(pagelist); 1701 int start, i; 1702 int err = 0, err1; 1703 1704 lru_cache_disable(); 1705 1706 for (i = start = 0; i < nr_pages; i++) { 1707 const void __user *p; 1708 unsigned long addr; 1709 int node; 1710 1711 err = -EFAULT; 1712 if (get_user(p, pages + i)) 1713 goto out_flush; 1714 if (get_user(node, nodes + i)) 1715 goto out_flush; 1716 addr = (unsigned long)untagged_addr(p); 1717 1718 err = -ENODEV; 1719 if (node < 0 || node >= MAX_NUMNODES) 1720 goto out_flush; 1721 if (!node_state(node, N_MEMORY)) 1722 goto out_flush; 1723 1724 err = -EACCES; 1725 if (!node_isset(node, task_nodes)) 1726 goto out_flush; 1727 1728 if (current_node == NUMA_NO_NODE) { 1729 current_node = node; 1730 start = i; 1731 } else if (node != current_node) { 1732 err = move_pages_and_store_status(mm, current_node, 1733 &pagelist, status, start, i, nr_pages); 1734 if (err) 1735 goto out; 1736 start = i; 1737 current_node = node; 1738 } 1739 1740 /* 1741 * Errors in the page lookup or isolation are not fatal and we simply 1742 * report them via status 1743 */ 1744 err = add_page_for_migration(mm, addr, current_node, 1745 &pagelist, flags & MPOL_MF_MOVE_ALL); 1746 1747 if (err > 0) { 1748 /* The page is successfully queued for migration */ 1749 continue; 1750 } 1751 1752 /* 1753 * The move_pages() man page does not have an -EEXIST choice, so 1754 * use -EFAULT instead. 1755 */ 1756 if (err == -EEXIST) 1757 err = -EFAULT; 1758 1759 /* 1760 * If the page is already on the target node (!err), store the 1761 * node, otherwise, store the err. 1762 */ 1763 err = store_status(status, i, err ? : current_node, 1); 1764 if (err) 1765 goto out_flush; 1766 1767 err = move_pages_and_store_status(mm, current_node, &pagelist, 1768 status, start, i, nr_pages); 1769 if (err) 1770 goto out; 1771 current_node = NUMA_NO_NODE; 1772 } 1773 out_flush: 1774 /* Make sure we do not overwrite the existing error */ 1775 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1776 status, start, i, nr_pages); 1777 if (err >= 0) 1778 err = err1; 1779 out: 1780 lru_cache_enable(); 1781 return err; 1782 } 1783 1784 /* 1785 * Determine the nodes of an array of pages and store it in an array of status. 1786 */ 1787 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1788 const void __user **pages, int *status) 1789 { 1790 unsigned long i; 1791 1792 mmap_read_lock(mm); 1793 1794 for (i = 0; i < nr_pages; i++) { 1795 unsigned long addr = (unsigned long)(*pages); 1796 struct vm_area_struct *vma; 1797 struct page *page; 1798 int err = -EFAULT; 1799 1800 vma = vma_lookup(mm, addr); 1801 if (!vma) 1802 goto set_status; 1803 1804 /* FOLL_DUMP to ignore special (like zero) pages */ 1805 page = follow_page(vma, addr, FOLL_DUMP); 1806 1807 err = PTR_ERR(page); 1808 if (IS_ERR(page)) 1809 goto set_status; 1810 1811 err = page ? page_to_nid(page) : -ENOENT; 1812 set_status: 1813 *status = err; 1814 1815 pages++; 1816 status++; 1817 } 1818 1819 mmap_read_unlock(mm); 1820 } 1821 1822 static int get_compat_pages_array(const void __user *chunk_pages[], 1823 const void __user * __user *pages, 1824 unsigned long chunk_nr) 1825 { 1826 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1827 compat_uptr_t p; 1828 int i; 1829 1830 for (i = 0; i < chunk_nr; i++) { 1831 if (get_user(p, pages32 + i)) 1832 return -EFAULT; 1833 chunk_pages[i] = compat_ptr(p); 1834 } 1835 1836 return 0; 1837 } 1838 1839 /* 1840 * Determine the nodes of a user array of pages and store it in 1841 * a user array of status. 1842 */ 1843 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1844 const void __user * __user *pages, 1845 int __user *status) 1846 { 1847 #define DO_PAGES_STAT_CHUNK_NR 16 1848 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1849 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1850 1851 while (nr_pages) { 1852 unsigned long chunk_nr; 1853 1854 chunk_nr = nr_pages; 1855 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1856 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1857 1858 if (in_compat_syscall()) { 1859 if (get_compat_pages_array(chunk_pages, pages, 1860 chunk_nr)) 1861 break; 1862 } else { 1863 if (copy_from_user(chunk_pages, pages, 1864 chunk_nr * sizeof(*chunk_pages))) 1865 break; 1866 } 1867 1868 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1869 1870 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1871 break; 1872 1873 pages += chunk_nr; 1874 status += chunk_nr; 1875 nr_pages -= chunk_nr; 1876 } 1877 return nr_pages ? -EFAULT : 0; 1878 } 1879 1880 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1881 { 1882 struct task_struct *task; 1883 struct mm_struct *mm; 1884 1885 /* 1886 * There is no need to check if current process has the right to modify 1887 * the specified process when they are same. 1888 */ 1889 if (!pid) { 1890 mmget(current->mm); 1891 *mem_nodes = cpuset_mems_allowed(current); 1892 return current->mm; 1893 } 1894 1895 /* Find the mm_struct */ 1896 rcu_read_lock(); 1897 task = find_task_by_vpid(pid); 1898 if (!task) { 1899 rcu_read_unlock(); 1900 return ERR_PTR(-ESRCH); 1901 } 1902 get_task_struct(task); 1903 1904 /* 1905 * Check if this process has the right to modify the specified 1906 * process. Use the regular "ptrace_may_access()" checks. 1907 */ 1908 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1909 rcu_read_unlock(); 1910 mm = ERR_PTR(-EPERM); 1911 goto out; 1912 } 1913 rcu_read_unlock(); 1914 1915 mm = ERR_PTR(security_task_movememory(task)); 1916 if (IS_ERR(mm)) 1917 goto out; 1918 *mem_nodes = cpuset_mems_allowed(task); 1919 mm = get_task_mm(task); 1920 out: 1921 put_task_struct(task); 1922 if (!mm) 1923 mm = ERR_PTR(-EINVAL); 1924 return mm; 1925 } 1926 1927 /* 1928 * Move a list of pages in the address space of the currently executing 1929 * process. 1930 */ 1931 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1932 const void __user * __user *pages, 1933 const int __user *nodes, 1934 int __user *status, int flags) 1935 { 1936 struct mm_struct *mm; 1937 int err; 1938 nodemask_t task_nodes; 1939 1940 /* Check flags */ 1941 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1942 return -EINVAL; 1943 1944 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1945 return -EPERM; 1946 1947 mm = find_mm_struct(pid, &task_nodes); 1948 if (IS_ERR(mm)) 1949 return PTR_ERR(mm); 1950 1951 if (nodes) 1952 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1953 nodes, status, flags); 1954 else 1955 err = do_pages_stat(mm, nr_pages, pages, status); 1956 1957 mmput(mm); 1958 return err; 1959 } 1960 1961 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1962 const void __user * __user *, pages, 1963 const int __user *, nodes, 1964 int __user *, status, int, flags) 1965 { 1966 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1967 } 1968 1969 #ifdef CONFIG_NUMA_BALANCING 1970 /* 1971 * Returns true if this is a safe migration target node for misplaced NUMA 1972 * pages. Currently it only checks the watermarks which crude 1973 */ 1974 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1975 unsigned long nr_migrate_pages) 1976 { 1977 int z; 1978 1979 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1980 struct zone *zone = pgdat->node_zones + z; 1981 1982 if (!populated_zone(zone)) 1983 continue; 1984 1985 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1986 if (!zone_watermark_ok(zone, 0, 1987 high_wmark_pages(zone) + 1988 nr_migrate_pages, 1989 ZONE_MOVABLE, 0)) 1990 continue; 1991 return true; 1992 } 1993 return false; 1994 } 1995 1996 static struct page *alloc_misplaced_dst_page(struct page *page, 1997 unsigned long data) 1998 { 1999 int nid = (int) data; 2000 struct page *newpage; 2001 2002 newpage = __alloc_pages_node(nid, 2003 (GFP_HIGHUSER_MOVABLE | 2004 __GFP_THISNODE | __GFP_NOMEMALLOC | 2005 __GFP_NORETRY | __GFP_NOWARN) & 2006 ~__GFP_RECLAIM, 0); 2007 2008 return newpage; 2009 } 2010 2011 static struct page *alloc_misplaced_dst_page_thp(struct page *page, 2012 unsigned long data) 2013 { 2014 int nid = (int) data; 2015 struct page *newpage; 2016 2017 newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2018 HPAGE_PMD_ORDER); 2019 if (!newpage) 2020 goto out; 2021 2022 prep_transhuge_page(newpage); 2023 2024 out: 2025 return newpage; 2026 } 2027 2028 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2029 { 2030 int page_lru; 2031 int nr_pages = thp_nr_pages(page); 2032 int order = compound_order(page); 2033 2034 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2035 2036 /* Do not migrate THP mapped by multiple processes */ 2037 if (PageTransHuge(page) && total_mapcount(page) > 1) 2038 return 0; 2039 2040 /* Avoid migrating to a node that is nearly full */ 2041 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2042 int z; 2043 2044 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2045 return 0; 2046 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2047 if (populated_zone(pgdat->node_zones + z)) 2048 break; 2049 } 2050 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2051 return 0; 2052 } 2053 2054 if (isolate_lru_page(page)) 2055 return 0; 2056 2057 page_lru = page_is_file_lru(page); 2058 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 2059 nr_pages); 2060 2061 /* 2062 * Isolating the page has taken another reference, so the 2063 * caller's reference can be safely dropped without the page 2064 * disappearing underneath us during migration. 2065 */ 2066 put_page(page); 2067 return 1; 2068 } 2069 2070 /* 2071 * Attempt to migrate a misplaced page to the specified destination 2072 * node. Caller is expected to have an elevated reference count on 2073 * the page that will be dropped by this function before returning. 2074 */ 2075 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2076 int node) 2077 { 2078 pg_data_t *pgdat = NODE_DATA(node); 2079 int isolated; 2080 int nr_remaining; 2081 unsigned int nr_succeeded; 2082 LIST_HEAD(migratepages); 2083 new_page_t *new; 2084 bool compound; 2085 int nr_pages = thp_nr_pages(page); 2086 2087 /* 2088 * PTE mapped THP or HugeTLB page can't reach here so the page could 2089 * be either base page or THP. And it must be head page if it is 2090 * THP. 2091 */ 2092 compound = PageTransHuge(page); 2093 2094 if (compound) 2095 new = alloc_misplaced_dst_page_thp; 2096 else 2097 new = alloc_misplaced_dst_page; 2098 2099 /* 2100 * Don't migrate file pages that are mapped in multiple processes 2101 * with execute permissions as they are probably shared libraries. 2102 */ 2103 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2104 (vma->vm_flags & VM_EXEC)) 2105 goto out; 2106 2107 /* 2108 * Also do not migrate dirty pages as not all filesystems can move 2109 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2110 */ 2111 if (page_is_file_lru(page) && PageDirty(page)) 2112 goto out; 2113 2114 isolated = numamigrate_isolate_page(pgdat, page); 2115 if (!isolated) 2116 goto out; 2117 2118 list_add(&page->lru, &migratepages); 2119 nr_remaining = migrate_pages(&migratepages, *new, NULL, node, 2120 MIGRATE_ASYNC, MR_NUMA_MISPLACED, 2121 &nr_succeeded); 2122 if (nr_remaining) { 2123 if (!list_empty(&migratepages)) { 2124 list_del(&page->lru); 2125 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2126 page_is_file_lru(page), -nr_pages); 2127 putback_lru_page(page); 2128 } 2129 isolated = 0; 2130 } 2131 if (nr_succeeded) { 2132 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2133 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2134 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2135 nr_succeeded); 2136 } 2137 BUG_ON(!list_empty(&migratepages)); 2138 return isolated; 2139 2140 out: 2141 put_page(page); 2142 return 0; 2143 } 2144 #endif /* CONFIG_NUMA_BALANCING */ 2145 #endif /* CONFIG_NUMA */ 2146 2147 /* 2148 * node_demotion[] example: 2149 * 2150 * Consider a system with two sockets. Each socket has 2151 * three classes of memory attached: fast, medium and slow. 2152 * Each memory class is placed in its own NUMA node. The 2153 * CPUs are placed in the node with the "fast" memory. The 2154 * 6 NUMA nodes (0-5) might be split among the sockets like 2155 * this: 2156 * 2157 * Socket A: 0, 1, 2 2158 * Socket B: 3, 4, 5 2159 * 2160 * When Node 0 fills up, its memory should be migrated to 2161 * Node 1. When Node 1 fills up, it should be migrated to 2162 * Node 2. The migration path start on the nodes with the 2163 * processors (since allocations default to this node) and 2164 * fast memory, progress through medium and end with the 2165 * slow memory: 2166 * 2167 * 0 -> 1 -> 2 -> stop 2168 * 3 -> 4 -> 5 -> stop 2169 * 2170 * This is represented in the node_demotion[] like this: 2171 * 2172 * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1 2173 * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2 2174 * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate 2175 * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4 2176 * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5 2177 * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate 2178 * 2179 * Moreover some systems may have multiple slow memory nodes. 2180 * Suppose a system has one socket with 3 memory nodes, node 0 2181 * is fast memory type, and node 1/2 both are slow memory 2182 * type, and the distance between fast memory node and slow 2183 * memory node is same. So the migration path should be: 2184 * 2185 * 0 -> 1/2 -> stop 2186 * 2187 * This is represented in the node_demotion[] like this: 2188 * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2 2189 * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate 2190 * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate 2191 */ 2192 2193 /* 2194 * Writes to this array occur without locking. Cycles are 2195 * not allowed: Node X demotes to Y which demotes to X... 2196 * 2197 * If multiple reads are performed, a single rcu_read_lock() 2198 * must be held over all reads to ensure that no cycles are 2199 * observed. 2200 */ 2201 #define DEFAULT_DEMOTION_TARGET_NODES 15 2202 2203 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES 2204 #define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1) 2205 #else 2206 #define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES 2207 #endif 2208 2209 struct demotion_nodes { 2210 unsigned short nr; 2211 short nodes[DEMOTION_TARGET_NODES]; 2212 }; 2213 2214 static struct demotion_nodes *node_demotion __read_mostly; 2215 2216 /** 2217 * next_demotion_node() - Get the next node in the demotion path 2218 * @node: The starting node to lookup the next node 2219 * 2220 * Return: node id for next memory node in the demotion path hierarchy 2221 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep 2222 * @node online or guarantee that it *continues* to be the next demotion 2223 * target. 2224 */ 2225 int next_demotion_node(int node) 2226 { 2227 struct demotion_nodes *nd; 2228 unsigned short target_nr, index; 2229 int target; 2230 2231 if (!node_demotion) 2232 return NUMA_NO_NODE; 2233 2234 nd = &node_demotion[node]; 2235 2236 /* 2237 * node_demotion[] is updated without excluding this 2238 * function from running. RCU doesn't provide any 2239 * compiler barriers, so the READ_ONCE() is required 2240 * to avoid compiler reordering or read merging. 2241 * 2242 * Make sure to use RCU over entire code blocks if 2243 * node_demotion[] reads need to be consistent. 2244 */ 2245 rcu_read_lock(); 2246 target_nr = READ_ONCE(nd->nr); 2247 2248 switch (target_nr) { 2249 case 0: 2250 target = NUMA_NO_NODE; 2251 goto out; 2252 case 1: 2253 index = 0; 2254 break; 2255 default: 2256 /* 2257 * If there are multiple target nodes, just select one 2258 * target node randomly. 2259 * 2260 * In addition, we can also use round-robin to select 2261 * target node, but we should introduce another variable 2262 * for node_demotion[] to record last selected target node, 2263 * that may cause cache ping-pong due to the changing of 2264 * last target node. Or introducing per-cpu data to avoid 2265 * caching issue, which seems more complicated. So selecting 2266 * target node randomly seems better until now. 2267 */ 2268 index = get_random_int() % target_nr; 2269 break; 2270 } 2271 2272 target = READ_ONCE(nd->nodes[index]); 2273 2274 out: 2275 rcu_read_unlock(); 2276 return target; 2277 } 2278 2279 #if defined(CONFIG_HOTPLUG_CPU) 2280 /* Disable reclaim-based migration. */ 2281 static void __disable_all_migrate_targets(void) 2282 { 2283 int node, i; 2284 2285 if (!node_demotion) 2286 return; 2287 2288 for_each_online_node(node) { 2289 node_demotion[node].nr = 0; 2290 for (i = 0; i < DEMOTION_TARGET_NODES; i++) 2291 node_demotion[node].nodes[i] = NUMA_NO_NODE; 2292 } 2293 } 2294 2295 static void disable_all_migrate_targets(void) 2296 { 2297 __disable_all_migrate_targets(); 2298 2299 /* 2300 * Ensure that the "disable" is visible across the system. 2301 * Readers will see either a combination of before+disable 2302 * state or disable+after. They will never see before and 2303 * after state together. 2304 * 2305 * The before+after state together might have cycles and 2306 * could cause readers to do things like loop until this 2307 * function finishes. This ensures they can only see a 2308 * single "bad" read and would, for instance, only loop 2309 * once. 2310 */ 2311 synchronize_rcu(); 2312 } 2313 2314 /* 2315 * Find an automatic demotion target for 'node'. 2316 * Failing here is OK. It might just indicate 2317 * being at the end of a chain. 2318 */ 2319 static int establish_migrate_target(int node, nodemask_t *used, 2320 int best_distance) 2321 { 2322 int migration_target, index, val; 2323 struct demotion_nodes *nd; 2324 2325 if (!node_demotion) 2326 return NUMA_NO_NODE; 2327 2328 nd = &node_demotion[node]; 2329 2330 migration_target = find_next_best_node(node, used); 2331 if (migration_target == NUMA_NO_NODE) 2332 return NUMA_NO_NODE; 2333 2334 /* 2335 * If the node has been set a migration target node before, 2336 * which means it's the best distance between them. Still 2337 * check if this node can be demoted to other target nodes 2338 * if they have a same best distance. 2339 */ 2340 if (best_distance != -1) { 2341 val = node_distance(node, migration_target); 2342 if (val > best_distance) 2343 goto out_clear; 2344 } 2345 2346 index = nd->nr; 2347 if (WARN_ONCE(index >= DEMOTION_TARGET_NODES, 2348 "Exceeds maximum demotion target nodes\n")) 2349 goto out_clear; 2350 2351 nd->nodes[index] = migration_target; 2352 nd->nr++; 2353 2354 return migration_target; 2355 out_clear: 2356 node_clear(migration_target, *used); 2357 return NUMA_NO_NODE; 2358 } 2359 2360 /* 2361 * When memory fills up on a node, memory contents can be 2362 * automatically migrated to another node instead of 2363 * discarded at reclaim. 2364 * 2365 * Establish a "migration path" which will start at nodes 2366 * with CPUs and will follow the priorities used to build the 2367 * page allocator zonelists. 2368 * 2369 * The difference here is that cycles must be avoided. If 2370 * node0 migrates to node1, then neither node1, nor anything 2371 * node1 migrates to can migrate to node0. Also one node can 2372 * be migrated to multiple nodes if the target nodes all have 2373 * a same best-distance against the source node. 2374 * 2375 * This function can run simultaneously with readers of 2376 * node_demotion[]. However, it can not run simultaneously 2377 * with itself. Exclusion is provided by memory hotplug events 2378 * being single-threaded. 2379 */ 2380 static void __set_migration_target_nodes(void) 2381 { 2382 nodemask_t next_pass = NODE_MASK_NONE; 2383 nodemask_t this_pass = NODE_MASK_NONE; 2384 nodemask_t used_targets = NODE_MASK_NONE; 2385 int node, best_distance; 2386 2387 /* 2388 * Avoid any oddities like cycles that could occur 2389 * from changes in the topology. This will leave 2390 * a momentary gap when migration is disabled. 2391 */ 2392 disable_all_migrate_targets(); 2393 2394 /* 2395 * Allocations go close to CPUs, first. Assume that 2396 * the migration path starts at the nodes with CPUs. 2397 */ 2398 next_pass = node_states[N_CPU]; 2399 again: 2400 this_pass = next_pass; 2401 next_pass = NODE_MASK_NONE; 2402 /* 2403 * To avoid cycles in the migration "graph", ensure 2404 * that migration sources are not future targets by 2405 * setting them in 'used_targets'. Do this only 2406 * once per pass so that multiple source nodes can 2407 * share a target node. 2408 * 2409 * 'used_targets' will become unavailable in future 2410 * passes. This limits some opportunities for 2411 * multiple source nodes to share a destination. 2412 */ 2413 nodes_or(used_targets, used_targets, this_pass); 2414 2415 for_each_node_mask(node, this_pass) { 2416 best_distance = -1; 2417 2418 /* 2419 * Try to set up the migration path for the node, and the target 2420 * migration nodes can be multiple, so doing a loop to find all 2421 * the target nodes if they all have a best node distance. 2422 */ 2423 do { 2424 int target_node = 2425 establish_migrate_target(node, &used_targets, 2426 best_distance); 2427 2428 if (target_node == NUMA_NO_NODE) 2429 break; 2430 2431 if (best_distance == -1) 2432 best_distance = node_distance(node, target_node); 2433 2434 /* 2435 * Visit targets from this pass in the next pass. 2436 * Eventually, every node will have been part of 2437 * a pass, and will become set in 'used_targets'. 2438 */ 2439 node_set(target_node, next_pass); 2440 } while (1); 2441 } 2442 /* 2443 * 'next_pass' contains nodes which became migration 2444 * targets in this pass. Make additional passes until 2445 * no more migrations targets are available. 2446 */ 2447 if (!nodes_empty(next_pass)) 2448 goto again; 2449 } 2450 2451 /* 2452 * For callers that do not hold get_online_mems() already. 2453 */ 2454 void set_migration_target_nodes(void) 2455 { 2456 get_online_mems(); 2457 __set_migration_target_nodes(); 2458 put_online_mems(); 2459 } 2460 2461 /* 2462 * This leaves migrate-on-reclaim transiently disabled between 2463 * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs 2464 * whether reclaim-based migration is enabled or not, which 2465 * ensures that the user can turn reclaim-based migration at 2466 * any time without needing to recalculate migration targets. 2467 * 2468 * These callbacks already hold get_online_mems(). That is why 2469 * __set_migration_target_nodes() can be used as opposed to 2470 * set_migration_target_nodes(). 2471 */ 2472 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, 2473 unsigned long action, void *_arg) 2474 { 2475 struct memory_notify *arg = _arg; 2476 2477 /* 2478 * Only update the node migration order when a node is 2479 * changing status, like online->offline. This avoids 2480 * the overhead of synchronize_rcu() in most cases. 2481 */ 2482 if (arg->status_change_nid < 0) 2483 return notifier_from_errno(0); 2484 2485 switch (action) { 2486 case MEM_GOING_OFFLINE: 2487 /* 2488 * Make sure there are not transient states where 2489 * an offline node is a migration target. This 2490 * will leave migration disabled until the offline 2491 * completes and the MEM_OFFLINE case below runs. 2492 */ 2493 disable_all_migrate_targets(); 2494 break; 2495 case MEM_OFFLINE: 2496 case MEM_ONLINE: 2497 /* 2498 * Recalculate the target nodes once the node 2499 * reaches its final state (online or offline). 2500 */ 2501 __set_migration_target_nodes(); 2502 break; 2503 case MEM_CANCEL_OFFLINE: 2504 /* 2505 * MEM_GOING_OFFLINE disabled all the migration 2506 * targets. Reenable them. 2507 */ 2508 __set_migration_target_nodes(); 2509 break; 2510 case MEM_GOING_ONLINE: 2511 case MEM_CANCEL_ONLINE: 2512 break; 2513 } 2514 2515 return notifier_from_errno(0); 2516 } 2517 2518 void __init migrate_on_reclaim_init(void) 2519 { 2520 node_demotion = kmalloc_array(nr_node_ids, 2521 sizeof(struct demotion_nodes), 2522 GFP_KERNEL); 2523 WARN_ON(!node_demotion); 2524 2525 hotplug_memory_notifier(migrate_on_reclaim_callback, 100); 2526 /* 2527 * At this point, all numa nodes with memory/CPus have their state 2528 * properly set, so we can build the demotion order now. 2529 * Let us hold the cpu_hotplug lock just, as we could possibily have 2530 * CPU hotplug events during boot. 2531 */ 2532 cpus_read_lock(); 2533 set_migration_target_nodes(); 2534 cpus_read_unlock(); 2535 } 2536 #endif /* CONFIG_HOTPLUG_CPU */ 2537 2538 bool numa_demotion_enabled = false; 2539 2540 #ifdef CONFIG_SYSFS 2541 static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 2542 struct kobj_attribute *attr, char *buf) 2543 { 2544 return sysfs_emit(buf, "%s\n", 2545 numa_demotion_enabled ? "true" : "false"); 2546 } 2547 2548 static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 2549 struct kobj_attribute *attr, 2550 const char *buf, size_t count) 2551 { 2552 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 2553 numa_demotion_enabled = true; 2554 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 2555 numa_demotion_enabled = false; 2556 else 2557 return -EINVAL; 2558 2559 return count; 2560 } 2561 2562 static struct kobj_attribute numa_demotion_enabled_attr = 2563 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 2564 numa_demotion_enabled_store); 2565 2566 static struct attribute *numa_attrs[] = { 2567 &numa_demotion_enabled_attr.attr, 2568 NULL, 2569 }; 2570 2571 static const struct attribute_group numa_attr_group = { 2572 .attrs = numa_attrs, 2573 }; 2574 2575 static int __init numa_init_sysfs(void) 2576 { 2577 int err; 2578 struct kobject *numa_kobj; 2579 2580 numa_kobj = kobject_create_and_add("numa", mm_kobj); 2581 if (!numa_kobj) { 2582 pr_err("failed to create numa kobject\n"); 2583 return -ENOMEM; 2584 } 2585 err = sysfs_create_group(numa_kobj, &numa_attr_group); 2586 if (err) { 2587 pr_err("failed to register numa group\n"); 2588 goto delete_obj; 2589 } 2590 return 0; 2591 2592 delete_obj: 2593 kobject_put(numa_kobj); 2594 return err; 2595 } 2596 subsys_initcall(numa_init_sysfs); 2597 #endif 2598