1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pagewalk.h> 42 #include <linux/pfn_t.h> 43 #include <linux/memremap.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/balloon_compaction.h> 46 #include <linux/mmu_notifier.h> 47 #include <linux/page_idle.h> 48 #include <linux/page_owner.h> 49 #include <linux/sched/mm.h> 50 #include <linux/ptrace.h> 51 #include <linux/oom.h> 52 53 #include <asm/tlbflush.h> 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/migrate.h> 57 58 #include "internal.h" 59 60 int isolate_movable_page(struct page *page, isolate_mode_t mode) 61 { 62 struct address_space *mapping; 63 64 /* 65 * Avoid burning cycles with pages that are yet under __free_pages(), 66 * or just got freed under us. 67 * 68 * In case we 'win' a race for a movable page being freed under us and 69 * raise its refcount preventing __free_pages() from doing its job 70 * the put_page() at the end of this block will take care of 71 * release this page, thus avoiding a nasty leakage. 72 */ 73 if (unlikely(!get_page_unless_zero(page))) 74 goto out; 75 76 /* 77 * Check PageMovable before holding a PG_lock because page's owner 78 * assumes anybody doesn't touch PG_lock of newly allocated page 79 * so unconditionally grabbing the lock ruins page's owner side. 80 */ 81 if (unlikely(!__PageMovable(page))) 82 goto out_putpage; 83 /* 84 * As movable pages are not isolated from LRU lists, concurrent 85 * compaction threads can race against page migration functions 86 * as well as race against the releasing a page. 87 * 88 * In order to avoid having an already isolated movable page 89 * being (wrongly) re-isolated while it is under migration, 90 * or to avoid attempting to isolate pages being released, 91 * lets be sure we have the page lock 92 * before proceeding with the movable page isolation steps. 93 */ 94 if (unlikely(!trylock_page(page))) 95 goto out_putpage; 96 97 if (!PageMovable(page) || PageIsolated(page)) 98 goto out_no_isolated; 99 100 mapping = page_mapping(page); 101 VM_BUG_ON_PAGE(!mapping, page); 102 103 if (!mapping->a_ops->isolate_page(page, mode)) 104 goto out_no_isolated; 105 106 /* Driver shouldn't use PG_isolated bit of page->flags */ 107 WARN_ON_ONCE(PageIsolated(page)); 108 __SetPageIsolated(page); 109 unlock_page(page); 110 111 return 0; 112 113 out_no_isolated: 114 unlock_page(page); 115 out_putpage: 116 put_page(page); 117 out: 118 return -EBUSY; 119 } 120 121 static void putback_movable_page(struct page *page) 122 { 123 struct address_space *mapping; 124 125 mapping = page_mapping(page); 126 mapping->a_ops->putback_page(page); 127 __ClearPageIsolated(page); 128 } 129 130 /* 131 * Put previously isolated pages back onto the appropriate lists 132 * from where they were once taken off for compaction/migration. 133 * 134 * This function shall be used whenever the isolated pageset has been 135 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 136 * and isolate_huge_page(). 137 */ 138 void putback_movable_pages(struct list_head *l) 139 { 140 struct page *page; 141 struct page *page2; 142 143 list_for_each_entry_safe(page, page2, l, lru) { 144 if (unlikely(PageHuge(page))) { 145 putback_active_hugepage(page); 146 continue; 147 } 148 list_del(&page->lru); 149 /* 150 * We isolated non-lru movable page so here we can use 151 * __PageMovable because LRU page's mapping cannot have 152 * PAGE_MAPPING_MOVABLE. 153 */ 154 if (unlikely(__PageMovable(page))) { 155 VM_BUG_ON_PAGE(!PageIsolated(page), page); 156 lock_page(page); 157 if (PageMovable(page)) 158 putback_movable_page(page); 159 else 160 __ClearPageIsolated(page); 161 unlock_page(page); 162 put_page(page); 163 } else { 164 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 165 page_is_file_lru(page), -thp_nr_pages(page)); 166 putback_lru_page(page); 167 } 168 } 169 } 170 171 /* 172 * Restore a potential migration pte to a working pte entry 173 */ 174 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 175 unsigned long addr, void *old) 176 { 177 struct page_vma_mapped_walk pvmw = { 178 .page = old, 179 .vma = vma, 180 .address = addr, 181 .flags = PVMW_SYNC | PVMW_MIGRATION, 182 }; 183 struct page *new; 184 pte_t pte; 185 swp_entry_t entry; 186 187 VM_BUG_ON_PAGE(PageTail(page), page); 188 while (page_vma_mapped_walk(&pvmw)) { 189 if (PageKsm(page)) 190 new = page; 191 else 192 new = page - pvmw.page->index + 193 linear_page_index(vma, pvmw.address); 194 195 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 196 /* PMD-mapped THP migration entry */ 197 if (!pvmw.pte) { 198 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 199 remove_migration_pmd(&pvmw, new); 200 continue; 201 } 202 #endif 203 204 get_page(new); 205 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 206 if (pte_swp_soft_dirty(*pvmw.pte)) 207 pte = pte_mksoft_dirty(pte); 208 209 /* 210 * Recheck VMA as permissions can change since migration started 211 */ 212 entry = pte_to_swp_entry(*pvmw.pte); 213 if (is_write_migration_entry(entry)) 214 pte = maybe_mkwrite(pte, vma); 215 else if (pte_swp_uffd_wp(*pvmw.pte)) 216 pte = pte_mkuffd_wp(pte); 217 218 if (unlikely(is_device_private_page(new))) { 219 entry = make_device_private_entry(new, pte_write(pte)); 220 pte = swp_entry_to_pte(entry); 221 if (pte_swp_soft_dirty(*pvmw.pte)) 222 pte = pte_swp_mksoft_dirty(pte); 223 if (pte_swp_uffd_wp(*pvmw.pte)) 224 pte = pte_swp_mkuffd_wp(pte); 225 } 226 227 #ifdef CONFIG_HUGETLB_PAGE 228 if (PageHuge(new)) { 229 unsigned int shift = huge_page_shift(hstate_vma(vma)); 230 231 pte = pte_mkhuge(pte); 232 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 233 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 234 if (PageAnon(new)) 235 hugepage_add_anon_rmap(new, vma, pvmw.address); 236 else 237 page_dup_rmap(new, true); 238 } else 239 #endif 240 { 241 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 242 243 if (PageAnon(new)) 244 page_add_anon_rmap(new, vma, pvmw.address, false); 245 else 246 page_add_file_rmap(new, false); 247 } 248 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 249 mlock_vma_page(new); 250 251 if (PageTransHuge(page) && PageMlocked(page)) 252 clear_page_mlock(page); 253 254 /* No need to invalidate - it was non-present before */ 255 update_mmu_cache(vma, pvmw.address, pvmw.pte); 256 } 257 258 return true; 259 } 260 261 /* 262 * Get rid of all migration entries and replace them by 263 * references to the indicated page. 264 */ 265 void remove_migration_ptes(struct page *old, struct page *new, bool locked) 266 { 267 struct rmap_walk_control rwc = { 268 .rmap_one = remove_migration_pte, 269 .arg = old, 270 }; 271 272 if (locked) 273 rmap_walk_locked(new, &rwc); 274 else 275 rmap_walk(new, &rwc); 276 } 277 278 /* 279 * Something used the pte of a page under migration. We need to 280 * get to the page and wait until migration is finished. 281 * When we return from this function the fault will be retried. 282 */ 283 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 284 spinlock_t *ptl) 285 { 286 pte_t pte; 287 swp_entry_t entry; 288 struct page *page; 289 290 spin_lock(ptl); 291 pte = *ptep; 292 if (!is_swap_pte(pte)) 293 goto out; 294 295 entry = pte_to_swp_entry(pte); 296 if (!is_migration_entry(entry)) 297 goto out; 298 299 page = migration_entry_to_page(entry); 300 page = compound_head(page); 301 302 /* 303 * Once page cache replacement of page migration started, page_count 304 * is zero; but we must not call put_and_wait_on_page_locked() without 305 * a ref. Use get_page_unless_zero(), and just fault again if it fails. 306 */ 307 if (!get_page_unless_zero(page)) 308 goto out; 309 pte_unmap_unlock(ptep, ptl); 310 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); 311 return; 312 out: 313 pte_unmap_unlock(ptep, ptl); 314 } 315 316 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 317 unsigned long address) 318 { 319 spinlock_t *ptl = pte_lockptr(mm, pmd); 320 pte_t *ptep = pte_offset_map(pmd, address); 321 __migration_entry_wait(mm, ptep, ptl); 322 } 323 324 void migration_entry_wait_huge(struct vm_area_struct *vma, 325 struct mm_struct *mm, pte_t *pte) 326 { 327 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 328 __migration_entry_wait(mm, pte, ptl); 329 } 330 331 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 332 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 333 { 334 spinlock_t *ptl; 335 struct page *page; 336 337 ptl = pmd_lock(mm, pmd); 338 if (!is_pmd_migration_entry(*pmd)) 339 goto unlock; 340 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); 341 if (!get_page_unless_zero(page)) 342 goto unlock; 343 spin_unlock(ptl); 344 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); 345 return; 346 unlock: 347 spin_unlock(ptl); 348 } 349 #endif 350 351 static int expected_page_refs(struct address_space *mapping, struct page *page) 352 { 353 int expected_count = 1; 354 355 /* 356 * Device private pages have an extra refcount as they are 357 * ZONE_DEVICE pages. 358 */ 359 expected_count += is_device_private_page(page); 360 if (mapping) 361 expected_count += thp_nr_pages(page) + page_has_private(page); 362 363 return expected_count; 364 } 365 366 /* 367 * Replace the page in the mapping. 368 * 369 * The number of remaining references must be: 370 * 1 for anonymous pages without a mapping 371 * 2 for pages with a mapping 372 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 373 */ 374 int migrate_page_move_mapping(struct address_space *mapping, 375 struct page *newpage, struct page *page, int extra_count) 376 { 377 XA_STATE(xas, &mapping->i_pages, page_index(page)); 378 struct zone *oldzone, *newzone; 379 int dirty; 380 int expected_count = expected_page_refs(mapping, page) + extra_count; 381 int nr = thp_nr_pages(page); 382 383 if (!mapping) { 384 /* Anonymous page without mapping */ 385 if (page_count(page) != expected_count) 386 return -EAGAIN; 387 388 /* No turning back from here */ 389 newpage->index = page->index; 390 newpage->mapping = page->mapping; 391 if (PageSwapBacked(page)) 392 __SetPageSwapBacked(newpage); 393 394 return MIGRATEPAGE_SUCCESS; 395 } 396 397 oldzone = page_zone(page); 398 newzone = page_zone(newpage); 399 400 xas_lock_irq(&xas); 401 if (page_count(page) != expected_count || xas_load(&xas) != page) { 402 xas_unlock_irq(&xas); 403 return -EAGAIN; 404 } 405 406 if (!page_ref_freeze(page, expected_count)) { 407 xas_unlock_irq(&xas); 408 return -EAGAIN; 409 } 410 411 /* 412 * Now we know that no one else is looking at the page: 413 * no turning back from here. 414 */ 415 newpage->index = page->index; 416 newpage->mapping = page->mapping; 417 page_ref_add(newpage, nr); /* add cache reference */ 418 if (PageSwapBacked(page)) { 419 __SetPageSwapBacked(newpage); 420 if (PageSwapCache(page)) { 421 SetPageSwapCache(newpage); 422 set_page_private(newpage, page_private(page)); 423 } 424 } else { 425 VM_BUG_ON_PAGE(PageSwapCache(page), page); 426 } 427 428 /* Move dirty while page refs frozen and newpage not yet exposed */ 429 dirty = PageDirty(page); 430 if (dirty) { 431 ClearPageDirty(page); 432 SetPageDirty(newpage); 433 } 434 435 xas_store(&xas, newpage); 436 if (PageTransHuge(page)) { 437 int i; 438 439 for (i = 1; i < nr; i++) { 440 xas_next(&xas); 441 xas_store(&xas, newpage); 442 } 443 } 444 445 /* 446 * Drop cache reference from old page by unfreezing 447 * to one less reference. 448 * We know this isn't the last reference. 449 */ 450 page_ref_unfreeze(page, expected_count - nr); 451 452 xas_unlock(&xas); 453 /* Leave irq disabled to prevent preemption while updating stats */ 454 455 /* 456 * If moved to a different zone then also account 457 * the page for that zone. Other VM counters will be 458 * taken care of when we establish references to the 459 * new page and drop references to the old page. 460 * 461 * Note that anonymous pages are accounted for 462 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 463 * are mapped to swap space. 464 */ 465 if (newzone != oldzone) { 466 struct lruvec *old_lruvec, *new_lruvec; 467 struct mem_cgroup *memcg; 468 469 memcg = page_memcg(page); 470 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 471 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 472 473 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 474 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 475 if (PageSwapBacked(page) && !PageSwapCache(page)) { 476 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 477 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 478 } 479 #ifdef CONFIG_SWAP 480 if (PageSwapCache(page)) { 481 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 482 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 483 } 484 #endif 485 if (dirty && mapping_can_writeback(mapping)) { 486 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 487 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 488 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 489 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 490 } 491 } 492 local_irq_enable(); 493 494 return MIGRATEPAGE_SUCCESS; 495 } 496 EXPORT_SYMBOL(migrate_page_move_mapping); 497 498 /* 499 * The expected number of remaining references is the same as that 500 * of migrate_page_move_mapping(). 501 */ 502 int migrate_huge_page_move_mapping(struct address_space *mapping, 503 struct page *newpage, struct page *page) 504 { 505 XA_STATE(xas, &mapping->i_pages, page_index(page)); 506 int expected_count; 507 508 xas_lock_irq(&xas); 509 expected_count = 2 + page_has_private(page); 510 if (page_count(page) != expected_count || xas_load(&xas) != page) { 511 xas_unlock_irq(&xas); 512 return -EAGAIN; 513 } 514 515 if (!page_ref_freeze(page, expected_count)) { 516 xas_unlock_irq(&xas); 517 return -EAGAIN; 518 } 519 520 newpage->index = page->index; 521 newpage->mapping = page->mapping; 522 523 get_page(newpage); 524 525 xas_store(&xas, newpage); 526 527 page_ref_unfreeze(page, expected_count - 1); 528 529 xas_unlock_irq(&xas); 530 531 return MIGRATEPAGE_SUCCESS; 532 } 533 534 /* 535 * Gigantic pages are so large that we do not guarantee that page++ pointer 536 * arithmetic will work across the entire page. We need something more 537 * specialized. 538 */ 539 static void __copy_gigantic_page(struct page *dst, struct page *src, 540 int nr_pages) 541 { 542 int i; 543 struct page *dst_base = dst; 544 struct page *src_base = src; 545 546 for (i = 0; i < nr_pages; ) { 547 cond_resched(); 548 copy_highpage(dst, src); 549 550 i++; 551 dst = mem_map_next(dst, dst_base, i); 552 src = mem_map_next(src, src_base, i); 553 } 554 } 555 556 void copy_huge_page(struct page *dst, struct page *src) 557 { 558 int i; 559 int nr_pages; 560 561 if (PageHuge(src)) { 562 /* hugetlbfs page */ 563 struct hstate *h = page_hstate(src); 564 nr_pages = pages_per_huge_page(h); 565 566 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 567 __copy_gigantic_page(dst, src, nr_pages); 568 return; 569 } 570 } else { 571 /* thp page */ 572 BUG_ON(!PageTransHuge(src)); 573 nr_pages = thp_nr_pages(src); 574 } 575 576 for (i = 0; i < nr_pages; i++) { 577 cond_resched(); 578 copy_highpage(dst + i, src + i); 579 } 580 } 581 582 /* 583 * Copy the page to its new location 584 */ 585 void migrate_page_states(struct page *newpage, struct page *page) 586 { 587 int cpupid; 588 589 if (PageError(page)) 590 SetPageError(newpage); 591 if (PageReferenced(page)) 592 SetPageReferenced(newpage); 593 if (PageUptodate(page)) 594 SetPageUptodate(newpage); 595 if (TestClearPageActive(page)) { 596 VM_BUG_ON_PAGE(PageUnevictable(page), page); 597 SetPageActive(newpage); 598 } else if (TestClearPageUnevictable(page)) 599 SetPageUnevictable(newpage); 600 if (PageWorkingset(page)) 601 SetPageWorkingset(newpage); 602 if (PageChecked(page)) 603 SetPageChecked(newpage); 604 if (PageMappedToDisk(page)) 605 SetPageMappedToDisk(newpage); 606 607 /* Move dirty on pages not done by migrate_page_move_mapping() */ 608 if (PageDirty(page)) 609 SetPageDirty(newpage); 610 611 if (page_is_young(page)) 612 set_page_young(newpage); 613 if (page_is_idle(page)) 614 set_page_idle(newpage); 615 616 /* 617 * Copy NUMA information to the new page, to prevent over-eager 618 * future migrations of this same page. 619 */ 620 cpupid = page_cpupid_xchg_last(page, -1); 621 page_cpupid_xchg_last(newpage, cpupid); 622 623 ksm_migrate_page(newpage, page); 624 /* 625 * Please do not reorder this without considering how mm/ksm.c's 626 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 627 */ 628 if (PageSwapCache(page)) 629 ClearPageSwapCache(page); 630 ClearPagePrivate(page); 631 632 /* page->private contains hugetlb specific flags */ 633 if (!PageHuge(page)) 634 set_page_private(page, 0); 635 636 /* 637 * If any waiters have accumulated on the new page then 638 * wake them up. 639 */ 640 if (PageWriteback(newpage)) 641 end_page_writeback(newpage); 642 643 /* 644 * PG_readahead shares the same bit with PG_reclaim. The above 645 * end_page_writeback() may clear PG_readahead mistakenly, so set the 646 * bit after that. 647 */ 648 if (PageReadahead(page)) 649 SetPageReadahead(newpage); 650 651 copy_page_owner(page, newpage); 652 653 if (!PageHuge(page)) 654 mem_cgroup_migrate(page, newpage); 655 } 656 EXPORT_SYMBOL(migrate_page_states); 657 658 void migrate_page_copy(struct page *newpage, struct page *page) 659 { 660 if (PageHuge(page) || PageTransHuge(page)) 661 copy_huge_page(newpage, page); 662 else 663 copy_highpage(newpage, page); 664 665 migrate_page_states(newpage, page); 666 } 667 EXPORT_SYMBOL(migrate_page_copy); 668 669 /************************************************************ 670 * Migration functions 671 ***********************************************************/ 672 673 /* 674 * Common logic to directly migrate a single LRU page suitable for 675 * pages that do not use PagePrivate/PagePrivate2. 676 * 677 * Pages are locked upon entry and exit. 678 */ 679 int migrate_page(struct address_space *mapping, 680 struct page *newpage, struct page *page, 681 enum migrate_mode mode) 682 { 683 int rc; 684 685 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 686 687 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 688 689 if (rc != MIGRATEPAGE_SUCCESS) 690 return rc; 691 692 if (mode != MIGRATE_SYNC_NO_COPY) 693 migrate_page_copy(newpage, page); 694 else 695 migrate_page_states(newpage, page); 696 return MIGRATEPAGE_SUCCESS; 697 } 698 EXPORT_SYMBOL(migrate_page); 699 700 #ifdef CONFIG_BLOCK 701 /* Returns true if all buffers are successfully locked */ 702 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 703 enum migrate_mode mode) 704 { 705 struct buffer_head *bh = head; 706 707 /* Simple case, sync compaction */ 708 if (mode != MIGRATE_ASYNC) { 709 do { 710 lock_buffer(bh); 711 bh = bh->b_this_page; 712 713 } while (bh != head); 714 715 return true; 716 } 717 718 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 719 do { 720 if (!trylock_buffer(bh)) { 721 /* 722 * We failed to lock the buffer and cannot stall in 723 * async migration. Release the taken locks 724 */ 725 struct buffer_head *failed_bh = bh; 726 bh = head; 727 while (bh != failed_bh) { 728 unlock_buffer(bh); 729 bh = bh->b_this_page; 730 } 731 return false; 732 } 733 734 bh = bh->b_this_page; 735 } while (bh != head); 736 return true; 737 } 738 739 static int __buffer_migrate_page(struct address_space *mapping, 740 struct page *newpage, struct page *page, enum migrate_mode mode, 741 bool check_refs) 742 { 743 struct buffer_head *bh, *head; 744 int rc; 745 int expected_count; 746 747 if (!page_has_buffers(page)) 748 return migrate_page(mapping, newpage, page, mode); 749 750 /* Check whether page does not have extra refs before we do more work */ 751 expected_count = expected_page_refs(mapping, page); 752 if (page_count(page) != expected_count) 753 return -EAGAIN; 754 755 head = page_buffers(page); 756 if (!buffer_migrate_lock_buffers(head, mode)) 757 return -EAGAIN; 758 759 if (check_refs) { 760 bool busy; 761 bool invalidated = false; 762 763 recheck_buffers: 764 busy = false; 765 spin_lock(&mapping->private_lock); 766 bh = head; 767 do { 768 if (atomic_read(&bh->b_count)) { 769 busy = true; 770 break; 771 } 772 bh = bh->b_this_page; 773 } while (bh != head); 774 if (busy) { 775 if (invalidated) { 776 rc = -EAGAIN; 777 goto unlock_buffers; 778 } 779 spin_unlock(&mapping->private_lock); 780 invalidate_bh_lrus(); 781 invalidated = true; 782 goto recheck_buffers; 783 } 784 } 785 786 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 787 if (rc != MIGRATEPAGE_SUCCESS) 788 goto unlock_buffers; 789 790 attach_page_private(newpage, detach_page_private(page)); 791 792 bh = head; 793 do { 794 set_bh_page(bh, newpage, bh_offset(bh)); 795 bh = bh->b_this_page; 796 797 } while (bh != head); 798 799 if (mode != MIGRATE_SYNC_NO_COPY) 800 migrate_page_copy(newpage, page); 801 else 802 migrate_page_states(newpage, page); 803 804 rc = MIGRATEPAGE_SUCCESS; 805 unlock_buffers: 806 if (check_refs) 807 spin_unlock(&mapping->private_lock); 808 bh = head; 809 do { 810 unlock_buffer(bh); 811 bh = bh->b_this_page; 812 813 } while (bh != head); 814 815 return rc; 816 } 817 818 /* 819 * Migration function for pages with buffers. This function can only be used 820 * if the underlying filesystem guarantees that no other references to "page" 821 * exist. For example attached buffer heads are accessed only under page lock. 822 */ 823 int buffer_migrate_page(struct address_space *mapping, 824 struct page *newpage, struct page *page, enum migrate_mode mode) 825 { 826 return __buffer_migrate_page(mapping, newpage, page, mode, false); 827 } 828 EXPORT_SYMBOL(buffer_migrate_page); 829 830 /* 831 * Same as above except that this variant is more careful and checks that there 832 * are also no buffer head references. This function is the right one for 833 * mappings where buffer heads are directly looked up and referenced (such as 834 * block device mappings). 835 */ 836 int buffer_migrate_page_norefs(struct address_space *mapping, 837 struct page *newpage, struct page *page, enum migrate_mode mode) 838 { 839 return __buffer_migrate_page(mapping, newpage, page, mode, true); 840 } 841 #endif 842 843 /* 844 * Writeback a page to clean the dirty state 845 */ 846 static int writeout(struct address_space *mapping, struct page *page) 847 { 848 struct writeback_control wbc = { 849 .sync_mode = WB_SYNC_NONE, 850 .nr_to_write = 1, 851 .range_start = 0, 852 .range_end = LLONG_MAX, 853 .for_reclaim = 1 854 }; 855 int rc; 856 857 if (!mapping->a_ops->writepage) 858 /* No write method for the address space */ 859 return -EINVAL; 860 861 if (!clear_page_dirty_for_io(page)) 862 /* Someone else already triggered a write */ 863 return -EAGAIN; 864 865 /* 866 * A dirty page may imply that the underlying filesystem has 867 * the page on some queue. So the page must be clean for 868 * migration. Writeout may mean we loose the lock and the 869 * page state is no longer what we checked for earlier. 870 * At this point we know that the migration attempt cannot 871 * be successful. 872 */ 873 remove_migration_ptes(page, page, false); 874 875 rc = mapping->a_ops->writepage(page, &wbc); 876 877 if (rc != AOP_WRITEPAGE_ACTIVATE) 878 /* unlocked. Relock */ 879 lock_page(page); 880 881 return (rc < 0) ? -EIO : -EAGAIN; 882 } 883 884 /* 885 * Default handling if a filesystem does not provide a migration function. 886 */ 887 static int fallback_migrate_page(struct address_space *mapping, 888 struct page *newpage, struct page *page, enum migrate_mode mode) 889 { 890 if (PageDirty(page)) { 891 /* Only writeback pages in full synchronous migration */ 892 switch (mode) { 893 case MIGRATE_SYNC: 894 case MIGRATE_SYNC_NO_COPY: 895 break; 896 default: 897 return -EBUSY; 898 } 899 return writeout(mapping, page); 900 } 901 902 /* 903 * Buffers may be managed in a filesystem specific way. 904 * We must have no buffers or drop them. 905 */ 906 if (page_has_private(page) && 907 !try_to_release_page(page, GFP_KERNEL)) 908 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 909 910 return migrate_page(mapping, newpage, page, mode); 911 } 912 913 /* 914 * Move a page to a newly allocated page 915 * The page is locked and all ptes have been successfully removed. 916 * 917 * The new page will have replaced the old page if this function 918 * is successful. 919 * 920 * Return value: 921 * < 0 - error code 922 * MIGRATEPAGE_SUCCESS - success 923 */ 924 static int move_to_new_page(struct page *newpage, struct page *page, 925 enum migrate_mode mode) 926 { 927 struct address_space *mapping; 928 int rc = -EAGAIN; 929 bool is_lru = !__PageMovable(page); 930 931 VM_BUG_ON_PAGE(!PageLocked(page), page); 932 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 933 934 mapping = page_mapping(page); 935 936 if (likely(is_lru)) { 937 if (!mapping) 938 rc = migrate_page(mapping, newpage, page, mode); 939 else if (mapping->a_ops->migratepage) 940 /* 941 * Most pages have a mapping and most filesystems 942 * provide a migratepage callback. Anonymous pages 943 * are part of swap space which also has its own 944 * migratepage callback. This is the most common path 945 * for page migration. 946 */ 947 rc = mapping->a_ops->migratepage(mapping, newpage, 948 page, mode); 949 else 950 rc = fallback_migrate_page(mapping, newpage, 951 page, mode); 952 } else { 953 /* 954 * In case of non-lru page, it could be released after 955 * isolation step. In that case, we shouldn't try migration. 956 */ 957 VM_BUG_ON_PAGE(!PageIsolated(page), page); 958 if (!PageMovable(page)) { 959 rc = MIGRATEPAGE_SUCCESS; 960 __ClearPageIsolated(page); 961 goto out; 962 } 963 964 rc = mapping->a_ops->migratepage(mapping, newpage, 965 page, mode); 966 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 967 !PageIsolated(page)); 968 } 969 970 /* 971 * When successful, old pagecache page->mapping must be cleared before 972 * page is freed; but stats require that PageAnon be left as PageAnon. 973 */ 974 if (rc == MIGRATEPAGE_SUCCESS) { 975 if (__PageMovable(page)) { 976 VM_BUG_ON_PAGE(!PageIsolated(page), page); 977 978 /* 979 * We clear PG_movable under page_lock so any compactor 980 * cannot try to migrate this page. 981 */ 982 __ClearPageIsolated(page); 983 } 984 985 /* 986 * Anonymous and movable page->mapping will be cleared by 987 * free_pages_prepare so don't reset it here for keeping 988 * the type to work PageAnon, for example. 989 */ 990 if (!PageMappingFlags(page)) 991 page->mapping = NULL; 992 993 if (likely(!is_zone_device_page(newpage))) 994 flush_dcache_page(newpage); 995 996 } 997 out: 998 return rc; 999 } 1000 1001 static int __unmap_and_move(struct page *page, struct page *newpage, 1002 int force, enum migrate_mode mode) 1003 { 1004 int rc = -EAGAIN; 1005 int page_was_mapped = 0; 1006 struct anon_vma *anon_vma = NULL; 1007 bool is_lru = !__PageMovable(page); 1008 1009 if (!trylock_page(page)) { 1010 if (!force || mode == MIGRATE_ASYNC) 1011 goto out; 1012 1013 /* 1014 * It's not safe for direct compaction to call lock_page. 1015 * For example, during page readahead pages are added locked 1016 * to the LRU. Later, when the IO completes the pages are 1017 * marked uptodate and unlocked. However, the queueing 1018 * could be merging multiple pages for one bio (e.g. 1019 * mpage_readahead). If an allocation happens for the 1020 * second or third page, the process can end up locking 1021 * the same page twice and deadlocking. Rather than 1022 * trying to be clever about what pages can be locked, 1023 * avoid the use of lock_page for direct compaction 1024 * altogether. 1025 */ 1026 if (current->flags & PF_MEMALLOC) 1027 goto out; 1028 1029 lock_page(page); 1030 } 1031 1032 if (PageWriteback(page)) { 1033 /* 1034 * Only in the case of a full synchronous migration is it 1035 * necessary to wait for PageWriteback. In the async case, 1036 * the retry loop is too short and in the sync-light case, 1037 * the overhead of stalling is too much 1038 */ 1039 switch (mode) { 1040 case MIGRATE_SYNC: 1041 case MIGRATE_SYNC_NO_COPY: 1042 break; 1043 default: 1044 rc = -EBUSY; 1045 goto out_unlock; 1046 } 1047 if (!force) 1048 goto out_unlock; 1049 wait_on_page_writeback(page); 1050 } 1051 1052 /* 1053 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 1054 * we cannot notice that anon_vma is freed while we migrates a page. 1055 * This get_anon_vma() delays freeing anon_vma pointer until the end 1056 * of migration. File cache pages are no problem because of page_lock() 1057 * File Caches may use write_page() or lock_page() in migration, then, 1058 * just care Anon page here. 1059 * 1060 * Only page_get_anon_vma() understands the subtleties of 1061 * getting a hold on an anon_vma from outside one of its mms. 1062 * But if we cannot get anon_vma, then we won't need it anyway, 1063 * because that implies that the anon page is no longer mapped 1064 * (and cannot be remapped so long as we hold the page lock). 1065 */ 1066 if (PageAnon(page) && !PageKsm(page)) 1067 anon_vma = page_get_anon_vma(page); 1068 1069 /* 1070 * Block others from accessing the new page when we get around to 1071 * establishing additional references. We are usually the only one 1072 * holding a reference to newpage at this point. We used to have a BUG 1073 * here if trylock_page(newpage) fails, but would like to allow for 1074 * cases where there might be a race with the previous use of newpage. 1075 * This is much like races on refcount of oldpage: just don't BUG(). 1076 */ 1077 if (unlikely(!trylock_page(newpage))) 1078 goto out_unlock; 1079 1080 if (unlikely(!is_lru)) { 1081 rc = move_to_new_page(newpage, page, mode); 1082 goto out_unlock_both; 1083 } 1084 1085 /* 1086 * Corner case handling: 1087 * 1. When a new swap-cache page is read into, it is added to the LRU 1088 * and treated as swapcache but it has no rmap yet. 1089 * Calling try_to_unmap() against a page->mapping==NULL page will 1090 * trigger a BUG. So handle it here. 1091 * 2. An orphaned page (see truncate_cleanup_page) might have 1092 * fs-private metadata. The page can be picked up due to memory 1093 * offlining. Everywhere else except page reclaim, the page is 1094 * invisible to the vm, so the page can not be migrated. So try to 1095 * free the metadata, so the page can be freed. 1096 */ 1097 if (!page->mapping) { 1098 VM_BUG_ON_PAGE(PageAnon(page), page); 1099 if (page_has_private(page)) { 1100 try_to_free_buffers(page); 1101 goto out_unlock_both; 1102 } 1103 } else if (page_mapped(page)) { 1104 /* Establish migration ptes */ 1105 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1106 page); 1107 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK); 1108 page_was_mapped = 1; 1109 } 1110 1111 if (!page_mapped(page)) 1112 rc = move_to_new_page(newpage, page, mode); 1113 1114 if (page_was_mapped) 1115 remove_migration_ptes(page, 1116 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 1117 1118 out_unlock_both: 1119 unlock_page(newpage); 1120 out_unlock: 1121 /* Drop an anon_vma reference if we took one */ 1122 if (anon_vma) 1123 put_anon_vma(anon_vma); 1124 unlock_page(page); 1125 out: 1126 /* 1127 * If migration is successful, decrease refcount of the newpage 1128 * which will not free the page because new page owner increased 1129 * refcounter. As well, if it is LRU page, add the page to LRU 1130 * list in here. Use the old state of the isolated source page to 1131 * determine if we migrated a LRU page. newpage was already unlocked 1132 * and possibly modified by its owner - don't rely on the page 1133 * state. 1134 */ 1135 if (rc == MIGRATEPAGE_SUCCESS) { 1136 if (unlikely(!is_lru)) 1137 put_page(newpage); 1138 else 1139 putback_lru_page(newpage); 1140 } 1141 1142 return rc; 1143 } 1144 1145 /* 1146 * Obtain the lock on page, remove all ptes and migrate the page 1147 * to the newly allocated page in newpage. 1148 */ 1149 static int unmap_and_move(new_page_t get_new_page, 1150 free_page_t put_new_page, 1151 unsigned long private, struct page *page, 1152 int force, enum migrate_mode mode, 1153 enum migrate_reason reason, 1154 struct list_head *ret) 1155 { 1156 int rc = MIGRATEPAGE_SUCCESS; 1157 struct page *newpage = NULL; 1158 1159 if (!thp_migration_supported() && PageTransHuge(page)) 1160 return -ENOSYS; 1161 1162 if (page_count(page) == 1) { 1163 /* page was freed from under us. So we are done. */ 1164 ClearPageActive(page); 1165 ClearPageUnevictable(page); 1166 if (unlikely(__PageMovable(page))) { 1167 lock_page(page); 1168 if (!PageMovable(page)) 1169 __ClearPageIsolated(page); 1170 unlock_page(page); 1171 } 1172 goto out; 1173 } 1174 1175 newpage = get_new_page(page, private); 1176 if (!newpage) 1177 return -ENOMEM; 1178 1179 rc = __unmap_and_move(page, newpage, force, mode); 1180 if (rc == MIGRATEPAGE_SUCCESS) 1181 set_page_owner_migrate_reason(newpage, reason); 1182 1183 out: 1184 if (rc != -EAGAIN) { 1185 /* 1186 * A page that has been migrated has all references 1187 * removed and will be freed. A page that has not been 1188 * migrated will have kept its references and be restored. 1189 */ 1190 list_del(&page->lru); 1191 } 1192 1193 /* 1194 * If migration is successful, releases reference grabbed during 1195 * isolation. Otherwise, restore the page to right list unless 1196 * we want to retry. 1197 */ 1198 if (rc == MIGRATEPAGE_SUCCESS) { 1199 /* 1200 * Compaction can migrate also non-LRU pages which are 1201 * not accounted to NR_ISOLATED_*. They can be recognized 1202 * as __PageMovable 1203 */ 1204 if (likely(!__PageMovable(page))) 1205 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1206 page_is_file_lru(page), -thp_nr_pages(page)); 1207 1208 if (reason != MR_MEMORY_FAILURE) 1209 /* 1210 * We release the page in page_handle_poison. 1211 */ 1212 put_page(page); 1213 } else { 1214 if (rc != -EAGAIN) 1215 list_add_tail(&page->lru, ret); 1216 1217 if (put_new_page) 1218 put_new_page(newpage, private); 1219 else 1220 put_page(newpage); 1221 } 1222 1223 return rc; 1224 } 1225 1226 /* 1227 * Counterpart of unmap_and_move_page() for hugepage migration. 1228 * 1229 * This function doesn't wait the completion of hugepage I/O 1230 * because there is no race between I/O and migration for hugepage. 1231 * Note that currently hugepage I/O occurs only in direct I/O 1232 * where no lock is held and PG_writeback is irrelevant, 1233 * and writeback status of all subpages are counted in the reference 1234 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1235 * under direct I/O, the reference of the head page is 512 and a bit more.) 1236 * This means that when we try to migrate hugepage whose subpages are 1237 * doing direct I/O, some references remain after try_to_unmap() and 1238 * hugepage migration fails without data corruption. 1239 * 1240 * There is also no race when direct I/O is issued on the page under migration, 1241 * because then pte is replaced with migration swap entry and direct I/O code 1242 * will wait in the page fault for migration to complete. 1243 */ 1244 static int unmap_and_move_huge_page(new_page_t get_new_page, 1245 free_page_t put_new_page, unsigned long private, 1246 struct page *hpage, int force, 1247 enum migrate_mode mode, int reason, 1248 struct list_head *ret) 1249 { 1250 int rc = -EAGAIN; 1251 int page_was_mapped = 0; 1252 struct page *new_hpage; 1253 struct anon_vma *anon_vma = NULL; 1254 struct address_space *mapping = NULL; 1255 1256 /* 1257 * Migratability of hugepages depends on architectures and their size. 1258 * This check is necessary because some callers of hugepage migration 1259 * like soft offline and memory hotremove don't walk through page 1260 * tables or check whether the hugepage is pmd-based or not before 1261 * kicking migration. 1262 */ 1263 if (!hugepage_migration_supported(page_hstate(hpage))) { 1264 list_move_tail(&hpage->lru, ret); 1265 return -ENOSYS; 1266 } 1267 1268 if (page_count(hpage) == 1) { 1269 /* page was freed from under us. So we are done. */ 1270 putback_active_hugepage(hpage); 1271 return MIGRATEPAGE_SUCCESS; 1272 } 1273 1274 new_hpage = get_new_page(hpage, private); 1275 if (!new_hpage) 1276 return -ENOMEM; 1277 1278 if (!trylock_page(hpage)) { 1279 if (!force) 1280 goto out; 1281 switch (mode) { 1282 case MIGRATE_SYNC: 1283 case MIGRATE_SYNC_NO_COPY: 1284 break; 1285 default: 1286 goto out; 1287 } 1288 lock_page(hpage); 1289 } 1290 1291 /* 1292 * Check for pages which are in the process of being freed. Without 1293 * page_mapping() set, hugetlbfs specific move page routine will not 1294 * be called and we could leak usage counts for subpools. 1295 */ 1296 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1297 rc = -EBUSY; 1298 goto out_unlock; 1299 } 1300 1301 if (PageAnon(hpage)) 1302 anon_vma = page_get_anon_vma(hpage); 1303 1304 if (unlikely(!trylock_page(new_hpage))) 1305 goto put_anon; 1306 1307 if (page_mapped(hpage)) { 1308 bool mapping_locked = false; 1309 enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK; 1310 1311 if (!PageAnon(hpage)) { 1312 /* 1313 * In shared mappings, try_to_unmap could potentially 1314 * call huge_pmd_unshare. Because of this, take 1315 * semaphore in write mode here and set TTU_RMAP_LOCKED 1316 * to let lower levels know we have taken the lock. 1317 */ 1318 mapping = hugetlb_page_mapping_lock_write(hpage); 1319 if (unlikely(!mapping)) 1320 goto unlock_put_anon; 1321 1322 mapping_locked = true; 1323 ttu |= TTU_RMAP_LOCKED; 1324 } 1325 1326 try_to_unmap(hpage, ttu); 1327 page_was_mapped = 1; 1328 1329 if (mapping_locked) 1330 i_mmap_unlock_write(mapping); 1331 } 1332 1333 if (!page_mapped(hpage)) 1334 rc = move_to_new_page(new_hpage, hpage, mode); 1335 1336 if (page_was_mapped) 1337 remove_migration_ptes(hpage, 1338 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); 1339 1340 unlock_put_anon: 1341 unlock_page(new_hpage); 1342 1343 put_anon: 1344 if (anon_vma) 1345 put_anon_vma(anon_vma); 1346 1347 if (rc == MIGRATEPAGE_SUCCESS) { 1348 move_hugetlb_state(hpage, new_hpage, reason); 1349 put_new_page = NULL; 1350 } 1351 1352 out_unlock: 1353 unlock_page(hpage); 1354 out: 1355 if (rc == MIGRATEPAGE_SUCCESS) 1356 putback_active_hugepage(hpage); 1357 else if (rc != -EAGAIN) 1358 list_move_tail(&hpage->lru, ret); 1359 1360 /* 1361 * If migration was not successful and there's a freeing callback, use 1362 * it. Otherwise, put_page() will drop the reference grabbed during 1363 * isolation. 1364 */ 1365 if (put_new_page) 1366 put_new_page(new_hpage, private); 1367 else 1368 putback_active_hugepage(new_hpage); 1369 1370 return rc; 1371 } 1372 1373 static inline int try_split_thp(struct page *page, struct page **page2, 1374 struct list_head *from) 1375 { 1376 int rc = 0; 1377 1378 lock_page(page); 1379 rc = split_huge_page_to_list(page, from); 1380 unlock_page(page); 1381 if (!rc) 1382 list_safe_reset_next(page, *page2, lru); 1383 1384 return rc; 1385 } 1386 1387 /* 1388 * migrate_pages - migrate the pages specified in a list, to the free pages 1389 * supplied as the target for the page migration 1390 * 1391 * @from: The list of pages to be migrated. 1392 * @get_new_page: The function used to allocate free pages to be used 1393 * as the target of the page migration. 1394 * @put_new_page: The function used to free target pages if migration 1395 * fails, or NULL if no special handling is necessary. 1396 * @private: Private data to be passed on to get_new_page() 1397 * @mode: The migration mode that specifies the constraints for 1398 * page migration, if any. 1399 * @reason: The reason for page migration. 1400 * 1401 * The function returns after 10 attempts or if no pages are movable any more 1402 * because the list has become empty or no retryable pages exist any more. 1403 * It is caller's responsibility to call putback_movable_pages() to return pages 1404 * to the LRU or free list only if ret != 0. 1405 * 1406 * Returns the number of pages that were not migrated, or an error code. 1407 */ 1408 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1409 free_page_t put_new_page, unsigned long private, 1410 enum migrate_mode mode, int reason) 1411 { 1412 int retry = 1; 1413 int thp_retry = 1; 1414 int nr_failed = 0; 1415 int nr_succeeded = 0; 1416 int nr_thp_succeeded = 0; 1417 int nr_thp_failed = 0; 1418 int nr_thp_split = 0; 1419 int pass = 0; 1420 bool is_thp = false; 1421 struct page *page; 1422 struct page *page2; 1423 int swapwrite = current->flags & PF_SWAPWRITE; 1424 int rc, nr_subpages; 1425 LIST_HEAD(ret_pages); 1426 bool nosplit = (reason == MR_NUMA_MISPLACED); 1427 1428 trace_mm_migrate_pages_start(mode, reason); 1429 1430 if (!swapwrite) 1431 current->flags |= PF_SWAPWRITE; 1432 1433 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1434 retry = 0; 1435 thp_retry = 0; 1436 1437 list_for_each_entry_safe(page, page2, from, lru) { 1438 retry: 1439 /* 1440 * THP statistics is based on the source huge page. 1441 * Capture required information that might get lost 1442 * during migration. 1443 */ 1444 is_thp = PageTransHuge(page) && !PageHuge(page); 1445 nr_subpages = thp_nr_pages(page); 1446 cond_resched(); 1447 1448 if (PageHuge(page)) 1449 rc = unmap_and_move_huge_page(get_new_page, 1450 put_new_page, private, page, 1451 pass > 2, mode, reason, 1452 &ret_pages); 1453 else 1454 rc = unmap_and_move(get_new_page, put_new_page, 1455 private, page, pass > 2, mode, 1456 reason, &ret_pages); 1457 /* 1458 * The rules are: 1459 * Success: non hugetlb page will be freed, hugetlb 1460 * page will be put back 1461 * -EAGAIN: stay on the from list 1462 * -ENOMEM: stay on the from list 1463 * Other errno: put on ret_pages list then splice to 1464 * from list 1465 */ 1466 switch(rc) { 1467 /* 1468 * THP migration might be unsupported or the 1469 * allocation could've failed so we should 1470 * retry on the same page with the THP split 1471 * to base pages. 1472 * 1473 * Head page is retried immediately and tail 1474 * pages are added to the tail of the list so 1475 * we encounter them after the rest of the list 1476 * is processed. 1477 */ 1478 case -ENOSYS: 1479 /* THP migration is unsupported */ 1480 if (is_thp) { 1481 if (!try_split_thp(page, &page2, from)) { 1482 nr_thp_split++; 1483 goto retry; 1484 } 1485 1486 nr_thp_failed++; 1487 nr_failed += nr_subpages; 1488 break; 1489 } 1490 1491 /* Hugetlb migration is unsupported */ 1492 nr_failed++; 1493 break; 1494 case -ENOMEM: 1495 /* 1496 * When memory is low, don't bother to try to migrate 1497 * other pages, just exit. 1498 * THP NUMA faulting doesn't split THP to retry. 1499 */ 1500 if (is_thp && !nosplit) { 1501 if (!try_split_thp(page, &page2, from)) { 1502 nr_thp_split++; 1503 goto retry; 1504 } 1505 1506 nr_thp_failed++; 1507 nr_failed += nr_subpages; 1508 goto out; 1509 } 1510 nr_failed++; 1511 goto out; 1512 case -EAGAIN: 1513 if (is_thp) { 1514 thp_retry++; 1515 break; 1516 } 1517 retry++; 1518 break; 1519 case MIGRATEPAGE_SUCCESS: 1520 if (is_thp) { 1521 nr_thp_succeeded++; 1522 nr_succeeded += nr_subpages; 1523 break; 1524 } 1525 nr_succeeded++; 1526 break; 1527 default: 1528 /* 1529 * Permanent failure (-EBUSY, etc.): 1530 * unlike -EAGAIN case, the failed page is 1531 * removed from migration page list and not 1532 * retried in the next outer loop. 1533 */ 1534 if (is_thp) { 1535 nr_thp_failed++; 1536 nr_failed += nr_subpages; 1537 break; 1538 } 1539 nr_failed++; 1540 break; 1541 } 1542 } 1543 } 1544 nr_failed += retry + thp_retry; 1545 nr_thp_failed += thp_retry; 1546 rc = nr_failed; 1547 out: 1548 /* 1549 * Put the permanent failure page back to migration list, they 1550 * will be put back to the right list by the caller. 1551 */ 1552 list_splice(&ret_pages, from); 1553 1554 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1555 count_vm_events(PGMIGRATE_FAIL, nr_failed); 1556 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1557 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1558 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1559 trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded, 1560 nr_thp_failed, nr_thp_split, mode, reason); 1561 1562 if (!swapwrite) 1563 current->flags &= ~PF_SWAPWRITE; 1564 1565 return rc; 1566 } 1567 1568 struct page *alloc_migration_target(struct page *page, unsigned long private) 1569 { 1570 struct migration_target_control *mtc; 1571 gfp_t gfp_mask; 1572 unsigned int order = 0; 1573 struct page *new_page = NULL; 1574 int nid; 1575 int zidx; 1576 1577 mtc = (struct migration_target_control *)private; 1578 gfp_mask = mtc->gfp_mask; 1579 nid = mtc->nid; 1580 if (nid == NUMA_NO_NODE) 1581 nid = page_to_nid(page); 1582 1583 if (PageHuge(page)) { 1584 struct hstate *h = page_hstate(compound_head(page)); 1585 1586 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1587 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1588 } 1589 1590 if (PageTransHuge(page)) { 1591 /* 1592 * clear __GFP_RECLAIM to make the migration callback 1593 * consistent with regular THP allocations. 1594 */ 1595 gfp_mask &= ~__GFP_RECLAIM; 1596 gfp_mask |= GFP_TRANSHUGE; 1597 order = HPAGE_PMD_ORDER; 1598 } 1599 zidx = zone_idx(page_zone(page)); 1600 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1601 gfp_mask |= __GFP_HIGHMEM; 1602 1603 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); 1604 1605 if (new_page && PageTransHuge(new_page)) 1606 prep_transhuge_page(new_page); 1607 1608 return new_page; 1609 } 1610 1611 #ifdef CONFIG_NUMA 1612 1613 static int store_status(int __user *status, int start, int value, int nr) 1614 { 1615 while (nr-- > 0) { 1616 if (put_user(value, status + start)) 1617 return -EFAULT; 1618 start++; 1619 } 1620 1621 return 0; 1622 } 1623 1624 static int do_move_pages_to_node(struct mm_struct *mm, 1625 struct list_head *pagelist, int node) 1626 { 1627 int err; 1628 struct migration_target_control mtc = { 1629 .nid = node, 1630 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1631 }; 1632 1633 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1634 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL); 1635 if (err) 1636 putback_movable_pages(pagelist); 1637 return err; 1638 } 1639 1640 /* 1641 * Resolves the given address to a struct page, isolates it from the LRU and 1642 * puts it to the given pagelist. 1643 * Returns: 1644 * errno - if the page cannot be found/isolated 1645 * 0 - when it doesn't have to be migrated because it is already on the 1646 * target node 1647 * 1 - when it has been queued 1648 */ 1649 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1650 int node, struct list_head *pagelist, bool migrate_all) 1651 { 1652 struct vm_area_struct *vma; 1653 struct page *page; 1654 unsigned int follflags; 1655 int err; 1656 1657 mmap_read_lock(mm); 1658 err = -EFAULT; 1659 vma = find_vma(mm, addr); 1660 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1661 goto out; 1662 1663 /* FOLL_DUMP to ignore special (like zero) pages */ 1664 follflags = FOLL_GET | FOLL_DUMP; 1665 page = follow_page(vma, addr, follflags); 1666 1667 err = PTR_ERR(page); 1668 if (IS_ERR(page)) 1669 goto out; 1670 1671 err = -ENOENT; 1672 if (!page) 1673 goto out; 1674 1675 err = 0; 1676 if (page_to_nid(page) == node) 1677 goto out_putpage; 1678 1679 err = -EACCES; 1680 if (page_mapcount(page) > 1 && !migrate_all) 1681 goto out_putpage; 1682 1683 if (PageHuge(page)) { 1684 if (PageHead(page)) { 1685 isolate_huge_page(page, pagelist); 1686 err = 1; 1687 } 1688 } else { 1689 struct page *head; 1690 1691 head = compound_head(page); 1692 err = isolate_lru_page(head); 1693 if (err) 1694 goto out_putpage; 1695 1696 err = 1; 1697 list_add_tail(&head->lru, pagelist); 1698 mod_node_page_state(page_pgdat(head), 1699 NR_ISOLATED_ANON + page_is_file_lru(head), 1700 thp_nr_pages(head)); 1701 } 1702 out_putpage: 1703 /* 1704 * Either remove the duplicate refcount from 1705 * isolate_lru_page() or drop the page ref if it was 1706 * not isolated. 1707 */ 1708 put_page(page); 1709 out: 1710 mmap_read_unlock(mm); 1711 return err; 1712 } 1713 1714 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1715 struct list_head *pagelist, int __user *status, 1716 int start, int i, unsigned long nr_pages) 1717 { 1718 int err; 1719 1720 if (list_empty(pagelist)) 1721 return 0; 1722 1723 err = do_move_pages_to_node(mm, pagelist, node); 1724 if (err) { 1725 /* 1726 * Positive err means the number of failed 1727 * pages to migrate. Since we are going to 1728 * abort and return the number of non-migrated 1729 * pages, so need to include the rest of the 1730 * nr_pages that have not been attempted as 1731 * well. 1732 */ 1733 if (err > 0) 1734 err += nr_pages - i - 1; 1735 return err; 1736 } 1737 return store_status(status, start, node, i - start); 1738 } 1739 1740 /* 1741 * Migrate an array of page address onto an array of nodes and fill 1742 * the corresponding array of status. 1743 */ 1744 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1745 unsigned long nr_pages, 1746 const void __user * __user *pages, 1747 const int __user *nodes, 1748 int __user *status, int flags) 1749 { 1750 int current_node = NUMA_NO_NODE; 1751 LIST_HEAD(pagelist); 1752 int start, i; 1753 int err = 0, err1; 1754 1755 lru_cache_disable(); 1756 1757 for (i = start = 0; i < nr_pages; i++) { 1758 const void __user *p; 1759 unsigned long addr; 1760 int node; 1761 1762 err = -EFAULT; 1763 if (get_user(p, pages + i)) 1764 goto out_flush; 1765 if (get_user(node, nodes + i)) 1766 goto out_flush; 1767 addr = (unsigned long)untagged_addr(p); 1768 1769 err = -ENODEV; 1770 if (node < 0 || node >= MAX_NUMNODES) 1771 goto out_flush; 1772 if (!node_state(node, N_MEMORY)) 1773 goto out_flush; 1774 1775 err = -EACCES; 1776 if (!node_isset(node, task_nodes)) 1777 goto out_flush; 1778 1779 if (current_node == NUMA_NO_NODE) { 1780 current_node = node; 1781 start = i; 1782 } else if (node != current_node) { 1783 err = move_pages_and_store_status(mm, current_node, 1784 &pagelist, status, start, i, nr_pages); 1785 if (err) 1786 goto out; 1787 start = i; 1788 current_node = node; 1789 } 1790 1791 /* 1792 * Errors in the page lookup or isolation are not fatal and we simply 1793 * report them via status 1794 */ 1795 err = add_page_for_migration(mm, addr, current_node, 1796 &pagelist, flags & MPOL_MF_MOVE_ALL); 1797 1798 if (err > 0) { 1799 /* The page is successfully queued for migration */ 1800 continue; 1801 } 1802 1803 /* 1804 * If the page is already on the target node (!err), store the 1805 * node, otherwise, store the err. 1806 */ 1807 err = store_status(status, i, err ? : current_node, 1); 1808 if (err) 1809 goto out_flush; 1810 1811 err = move_pages_and_store_status(mm, current_node, &pagelist, 1812 status, start, i, nr_pages); 1813 if (err) 1814 goto out; 1815 current_node = NUMA_NO_NODE; 1816 } 1817 out_flush: 1818 /* Make sure we do not overwrite the existing error */ 1819 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1820 status, start, i, nr_pages); 1821 if (err >= 0) 1822 err = err1; 1823 out: 1824 lru_cache_enable(); 1825 return err; 1826 } 1827 1828 /* 1829 * Determine the nodes of an array of pages and store it in an array of status. 1830 */ 1831 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1832 const void __user **pages, int *status) 1833 { 1834 unsigned long i; 1835 1836 mmap_read_lock(mm); 1837 1838 for (i = 0; i < nr_pages; i++) { 1839 unsigned long addr = (unsigned long)(*pages); 1840 struct vm_area_struct *vma; 1841 struct page *page; 1842 int err = -EFAULT; 1843 1844 vma = vma_lookup(mm, addr); 1845 if (!vma) 1846 goto set_status; 1847 1848 /* FOLL_DUMP to ignore special (like zero) pages */ 1849 page = follow_page(vma, addr, FOLL_DUMP); 1850 1851 err = PTR_ERR(page); 1852 if (IS_ERR(page)) 1853 goto set_status; 1854 1855 err = page ? page_to_nid(page) : -ENOENT; 1856 set_status: 1857 *status = err; 1858 1859 pages++; 1860 status++; 1861 } 1862 1863 mmap_read_unlock(mm); 1864 } 1865 1866 /* 1867 * Determine the nodes of a user array of pages and store it in 1868 * a user array of status. 1869 */ 1870 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1871 const void __user * __user *pages, 1872 int __user *status) 1873 { 1874 #define DO_PAGES_STAT_CHUNK_NR 16 1875 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1876 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1877 1878 while (nr_pages) { 1879 unsigned long chunk_nr; 1880 1881 chunk_nr = nr_pages; 1882 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1883 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1884 1885 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1886 break; 1887 1888 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1889 1890 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1891 break; 1892 1893 pages += chunk_nr; 1894 status += chunk_nr; 1895 nr_pages -= chunk_nr; 1896 } 1897 return nr_pages ? -EFAULT : 0; 1898 } 1899 1900 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1901 { 1902 struct task_struct *task; 1903 struct mm_struct *mm; 1904 1905 /* 1906 * There is no need to check if current process has the right to modify 1907 * the specified process when they are same. 1908 */ 1909 if (!pid) { 1910 mmget(current->mm); 1911 *mem_nodes = cpuset_mems_allowed(current); 1912 return current->mm; 1913 } 1914 1915 /* Find the mm_struct */ 1916 rcu_read_lock(); 1917 task = find_task_by_vpid(pid); 1918 if (!task) { 1919 rcu_read_unlock(); 1920 return ERR_PTR(-ESRCH); 1921 } 1922 get_task_struct(task); 1923 1924 /* 1925 * Check if this process has the right to modify the specified 1926 * process. Use the regular "ptrace_may_access()" checks. 1927 */ 1928 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1929 rcu_read_unlock(); 1930 mm = ERR_PTR(-EPERM); 1931 goto out; 1932 } 1933 rcu_read_unlock(); 1934 1935 mm = ERR_PTR(security_task_movememory(task)); 1936 if (IS_ERR(mm)) 1937 goto out; 1938 *mem_nodes = cpuset_mems_allowed(task); 1939 mm = get_task_mm(task); 1940 out: 1941 put_task_struct(task); 1942 if (!mm) 1943 mm = ERR_PTR(-EINVAL); 1944 return mm; 1945 } 1946 1947 /* 1948 * Move a list of pages in the address space of the currently executing 1949 * process. 1950 */ 1951 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1952 const void __user * __user *pages, 1953 const int __user *nodes, 1954 int __user *status, int flags) 1955 { 1956 struct mm_struct *mm; 1957 int err; 1958 nodemask_t task_nodes; 1959 1960 /* Check flags */ 1961 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1962 return -EINVAL; 1963 1964 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1965 return -EPERM; 1966 1967 mm = find_mm_struct(pid, &task_nodes); 1968 if (IS_ERR(mm)) 1969 return PTR_ERR(mm); 1970 1971 if (nodes) 1972 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1973 nodes, status, flags); 1974 else 1975 err = do_pages_stat(mm, nr_pages, pages, status); 1976 1977 mmput(mm); 1978 return err; 1979 } 1980 1981 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1982 const void __user * __user *, pages, 1983 const int __user *, nodes, 1984 int __user *, status, int, flags) 1985 { 1986 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1987 } 1988 1989 #ifdef CONFIG_COMPAT 1990 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, 1991 compat_uptr_t __user *, pages32, 1992 const int __user *, nodes, 1993 int __user *, status, 1994 int, flags) 1995 { 1996 const void __user * __user *pages; 1997 int i; 1998 1999 pages = compat_alloc_user_space(nr_pages * sizeof(void *)); 2000 for (i = 0; i < nr_pages; i++) { 2001 compat_uptr_t p; 2002 2003 if (get_user(p, pages32 + i) || 2004 put_user(compat_ptr(p), pages + i)) 2005 return -EFAULT; 2006 } 2007 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2008 } 2009 #endif /* CONFIG_COMPAT */ 2010 2011 #ifdef CONFIG_NUMA_BALANCING 2012 /* 2013 * Returns true if this is a safe migration target node for misplaced NUMA 2014 * pages. Currently it only checks the watermarks which crude 2015 */ 2016 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2017 unsigned long nr_migrate_pages) 2018 { 2019 int z; 2020 2021 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2022 struct zone *zone = pgdat->node_zones + z; 2023 2024 if (!populated_zone(zone)) 2025 continue; 2026 2027 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2028 if (!zone_watermark_ok(zone, 0, 2029 high_wmark_pages(zone) + 2030 nr_migrate_pages, 2031 ZONE_MOVABLE, 0)) 2032 continue; 2033 return true; 2034 } 2035 return false; 2036 } 2037 2038 static struct page *alloc_misplaced_dst_page(struct page *page, 2039 unsigned long data) 2040 { 2041 int nid = (int) data; 2042 struct page *newpage; 2043 2044 newpage = __alloc_pages_node(nid, 2045 (GFP_HIGHUSER_MOVABLE | 2046 __GFP_THISNODE | __GFP_NOMEMALLOC | 2047 __GFP_NORETRY | __GFP_NOWARN) & 2048 ~__GFP_RECLAIM, 0); 2049 2050 return newpage; 2051 } 2052 2053 static struct page *alloc_misplaced_dst_page_thp(struct page *page, 2054 unsigned long data) 2055 { 2056 int nid = (int) data; 2057 struct page *newpage; 2058 2059 newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2060 HPAGE_PMD_ORDER); 2061 if (!newpage) 2062 goto out; 2063 2064 prep_transhuge_page(newpage); 2065 2066 out: 2067 return newpage; 2068 } 2069 2070 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2071 { 2072 int page_lru; 2073 2074 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 2075 2076 /* Avoid migrating to a node that is nearly full */ 2077 if (!migrate_balanced_pgdat(pgdat, compound_nr(page))) 2078 return 0; 2079 2080 if (isolate_lru_page(page)) 2081 return 0; 2082 2083 /* 2084 * migrate_misplaced_transhuge_page() skips page migration's usual 2085 * check on page_count(), so we must do it here, now that the page 2086 * has been isolated: a GUP pin, or any other pin, prevents migration. 2087 * The expected page count is 3: 1 for page's mapcount and 1 for the 2088 * caller's pin and 1 for the reference taken by isolate_lru_page(). 2089 */ 2090 if (PageTransHuge(page) && page_count(page) != 3) { 2091 putback_lru_page(page); 2092 return 0; 2093 } 2094 2095 page_lru = page_is_file_lru(page); 2096 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 2097 thp_nr_pages(page)); 2098 2099 /* 2100 * Isolating the page has taken another reference, so the 2101 * caller's reference can be safely dropped without the page 2102 * disappearing underneath us during migration. 2103 */ 2104 put_page(page); 2105 return 1; 2106 } 2107 2108 /* 2109 * Attempt to migrate a misplaced page to the specified destination 2110 * node. Caller is expected to have an elevated reference count on 2111 * the page that will be dropped by this function before returning. 2112 */ 2113 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2114 int node) 2115 { 2116 pg_data_t *pgdat = NODE_DATA(node); 2117 int isolated; 2118 int nr_remaining; 2119 LIST_HEAD(migratepages); 2120 new_page_t *new; 2121 bool compound; 2122 unsigned int nr_pages = thp_nr_pages(page); 2123 2124 /* 2125 * PTE mapped THP or HugeTLB page can't reach here so the page could 2126 * be either base page or THP. And it must be head page if it is 2127 * THP. 2128 */ 2129 compound = PageTransHuge(page); 2130 2131 if (compound) 2132 new = alloc_misplaced_dst_page_thp; 2133 else 2134 new = alloc_misplaced_dst_page; 2135 2136 /* 2137 * Don't migrate file pages that are mapped in multiple processes 2138 * with execute permissions as they are probably shared libraries. 2139 */ 2140 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2141 (vma->vm_flags & VM_EXEC)) 2142 goto out; 2143 2144 /* 2145 * Also do not migrate dirty pages as not all filesystems can move 2146 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2147 */ 2148 if (page_is_file_lru(page) && PageDirty(page)) 2149 goto out; 2150 2151 isolated = numamigrate_isolate_page(pgdat, page); 2152 if (!isolated) 2153 goto out; 2154 2155 list_add(&page->lru, &migratepages); 2156 nr_remaining = migrate_pages(&migratepages, *new, NULL, node, 2157 MIGRATE_ASYNC, MR_NUMA_MISPLACED); 2158 if (nr_remaining) { 2159 if (!list_empty(&migratepages)) { 2160 list_del(&page->lru); 2161 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2162 page_is_file_lru(page), -nr_pages); 2163 putback_lru_page(page); 2164 } 2165 isolated = 0; 2166 } else 2167 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages); 2168 BUG_ON(!list_empty(&migratepages)); 2169 return isolated; 2170 2171 out: 2172 put_page(page); 2173 return 0; 2174 } 2175 #endif /* CONFIG_NUMA_BALANCING */ 2176 #endif /* CONFIG_NUMA */ 2177 2178 #ifdef CONFIG_DEVICE_PRIVATE 2179 static int migrate_vma_collect_skip(unsigned long start, 2180 unsigned long end, 2181 struct mm_walk *walk) 2182 { 2183 struct migrate_vma *migrate = walk->private; 2184 unsigned long addr; 2185 2186 for (addr = start; addr < end; addr += PAGE_SIZE) { 2187 migrate->dst[migrate->npages] = 0; 2188 migrate->src[migrate->npages++] = 0; 2189 } 2190 2191 return 0; 2192 } 2193 2194 static int migrate_vma_collect_hole(unsigned long start, 2195 unsigned long end, 2196 __always_unused int depth, 2197 struct mm_walk *walk) 2198 { 2199 struct migrate_vma *migrate = walk->private; 2200 unsigned long addr; 2201 2202 /* Only allow populating anonymous memory. */ 2203 if (!vma_is_anonymous(walk->vma)) 2204 return migrate_vma_collect_skip(start, end, walk); 2205 2206 for (addr = start; addr < end; addr += PAGE_SIZE) { 2207 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 2208 migrate->dst[migrate->npages] = 0; 2209 migrate->npages++; 2210 migrate->cpages++; 2211 } 2212 2213 return 0; 2214 } 2215 2216 static int migrate_vma_collect_pmd(pmd_t *pmdp, 2217 unsigned long start, 2218 unsigned long end, 2219 struct mm_walk *walk) 2220 { 2221 struct migrate_vma *migrate = walk->private; 2222 struct vm_area_struct *vma = walk->vma; 2223 struct mm_struct *mm = vma->vm_mm; 2224 unsigned long addr = start, unmapped = 0; 2225 spinlock_t *ptl; 2226 pte_t *ptep; 2227 2228 again: 2229 if (pmd_none(*pmdp)) 2230 return migrate_vma_collect_hole(start, end, -1, walk); 2231 2232 if (pmd_trans_huge(*pmdp)) { 2233 struct page *page; 2234 2235 ptl = pmd_lock(mm, pmdp); 2236 if (unlikely(!pmd_trans_huge(*pmdp))) { 2237 spin_unlock(ptl); 2238 goto again; 2239 } 2240 2241 page = pmd_page(*pmdp); 2242 if (is_huge_zero_page(page)) { 2243 spin_unlock(ptl); 2244 split_huge_pmd(vma, pmdp, addr); 2245 if (pmd_trans_unstable(pmdp)) 2246 return migrate_vma_collect_skip(start, end, 2247 walk); 2248 } else { 2249 int ret; 2250 2251 get_page(page); 2252 spin_unlock(ptl); 2253 if (unlikely(!trylock_page(page))) 2254 return migrate_vma_collect_skip(start, end, 2255 walk); 2256 ret = split_huge_page(page); 2257 unlock_page(page); 2258 put_page(page); 2259 if (ret) 2260 return migrate_vma_collect_skip(start, end, 2261 walk); 2262 if (pmd_none(*pmdp)) 2263 return migrate_vma_collect_hole(start, end, -1, 2264 walk); 2265 } 2266 } 2267 2268 if (unlikely(pmd_bad(*pmdp))) 2269 return migrate_vma_collect_skip(start, end, walk); 2270 2271 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2272 arch_enter_lazy_mmu_mode(); 2273 2274 for (; addr < end; addr += PAGE_SIZE, ptep++) { 2275 unsigned long mpfn = 0, pfn; 2276 struct page *page; 2277 swp_entry_t entry; 2278 pte_t pte; 2279 2280 pte = *ptep; 2281 2282 if (pte_none(pte)) { 2283 if (vma_is_anonymous(vma)) { 2284 mpfn = MIGRATE_PFN_MIGRATE; 2285 migrate->cpages++; 2286 } 2287 goto next; 2288 } 2289 2290 if (!pte_present(pte)) { 2291 /* 2292 * Only care about unaddressable device page special 2293 * page table entry. Other special swap entries are not 2294 * migratable, and we ignore regular swapped page. 2295 */ 2296 entry = pte_to_swp_entry(pte); 2297 if (!is_device_private_entry(entry)) 2298 goto next; 2299 2300 page = device_private_entry_to_page(entry); 2301 if (!(migrate->flags & 2302 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 2303 page->pgmap->owner != migrate->pgmap_owner) 2304 goto next; 2305 2306 mpfn = migrate_pfn(page_to_pfn(page)) | 2307 MIGRATE_PFN_MIGRATE; 2308 if (is_write_device_private_entry(entry)) 2309 mpfn |= MIGRATE_PFN_WRITE; 2310 } else { 2311 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) 2312 goto next; 2313 pfn = pte_pfn(pte); 2314 if (is_zero_pfn(pfn)) { 2315 mpfn = MIGRATE_PFN_MIGRATE; 2316 migrate->cpages++; 2317 goto next; 2318 } 2319 page = vm_normal_page(migrate->vma, addr, pte); 2320 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2321 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2322 } 2323 2324 /* FIXME support THP */ 2325 if (!page || !page->mapping || PageTransCompound(page)) { 2326 mpfn = 0; 2327 goto next; 2328 } 2329 2330 /* 2331 * By getting a reference on the page we pin it and that blocks 2332 * any kind of migration. Side effect is that it "freezes" the 2333 * pte. 2334 * 2335 * We drop this reference after isolating the page from the lru 2336 * for non device page (device page are not on the lru and thus 2337 * can't be dropped from it). 2338 */ 2339 get_page(page); 2340 migrate->cpages++; 2341 2342 /* 2343 * Optimize for the common case where page is only mapped once 2344 * in one process. If we can lock the page, then we can safely 2345 * set up a special migration page table entry now. 2346 */ 2347 if (trylock_page(page)) { 2348 pte_t swp_pte; 2349 2350 mpfn |= MIGRATE_PFN_LOCKED; 2351 ptep_get_and_clear(mm, addr, ptep); 2352 2353 /* Setup special migration page table entry */ 2354 entry = make_migration_entry(page, mpfn & 2355 MIGRATE_PFN_WRITE); 2356 swp_pte = swp_entry_to_pte(entry); 2357 if (pte_present(pte)) { 2358 if (pte_soft_dirty(pte)) 2359 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2360 if (pte_uffd_wp(pte)) 2361 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2362 } else { 2363 if (pte_swp_soft_dirty(pte)) 2364 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2365 if (pte_swp_uffd_wp(pte)) 2366 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2367 } 2368 set_pte_at(mm, addr, ptep, swp_pte); 2369 2370 /* 2371 * This is like regular unmap: we remove the rmap and 2372 * drop page refcount. Page won't be freed, as we took 2373 * a reference just above. 2374 */ 2375 page_remove_rmap(page, false); 2376 put_page(page); 2377 2378 if (pte_present(pte)) 2379 unmapped++; 2380 } 2381 2382 next: 2383 migrate->dst[migrate->npages] = 0; 2384 migrate->src[migrate->npages++] = mpfn; 2385 } 2386 arch_leave_lazy_mmu_mode(); 2387 pte_unmap_unlock(ptep - 1, ptl); 2388 2389 /* Only flush the TLB if we actually modified any entries */ 2390 if (unmapped) 2391 flush_tlb_range(walk->vma, start, end); 2392 2393 return 0; 2394 } 2395 2396 static const struct mm_walk_ops migrate_vma_walk_ops = { 2397 .pmd_entry = migrate_vma_collect_pmd, 2398 .pte_hole = migrate_vma_collect_hole, 2399 }; 2400 2401 /* 2402 * migrate_vma_collect() - collect pages over a range of virtual addresses 2403 * @migrate: migrate struct containing all migration information 2404 * 2405 * This will walk the CPU page table. For each virtual address backed by a 2406 * valid page, it updates the src array and takes a reference on the page, in 2407 * order to pin the page until we lock it and unmap it. 2408 */ 2409 static void migrate_vma_collect(struct migrate_vma *migrate) 2410 { 2411 struct mmu_notifier_range range; 2412 2413 /* 2414 * Note that the pgmap_owner is passed to the mmu notifier callback so 2415 * that the registered device driver can skip invalidating device 2416 * private page mappings that won't be migrated. 2417 */ 2418 mmu_notifier_range_init_migrate(&range, 0, migrate->vma, 2419 migrate->vma->vm_mm, migrate->start, migrate->end, 2420 migrate->pgmap_owner); 2421 mmu_notifier_invalidate_range_start(&range); 2422 2423 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 2424 &migrate_vma_walk_ops, migrate); 2425 2426 mmu_notifier_invalidate_range_end(&range); 2427 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 2428 } 2429 2430 /* 2431 * migrate_vma_check_page() - check if page is pinned or not 2432 * @page: struct page to check 2433 * 2434 * Pinned pages cannot be migrated. This is the same test as in 2435 * migrate_page_move_mapping(), except that here we allow migration of a 2436 * ZONE_DEVICE page. 2437 */ 2438 static bool migrate_vma_check_page(struct page *page) 2439 { 2440 /* 2441 * One extra ref because caller holds an extra reference, either from 2442 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 2443 * a device page. 2444 */ 2445 int extra = 1; 2446 2447 /* 2448 * FIXME support THP (transparent huge page), it is bit more complex to 2449 * check them than regular pages, because they can be mapped with a pmd 2450 * or with a pte (split pte mapping). 2451 */ 2452 if (PageCompound(page)) 2453 return false; 2454 2455 /* Page from ZONE_DEVICE have one extra reference */ 2456 if (is_zone_device_page(page)) { 2457 /* 2458 * Private page can never be pin as they have no valid pte and 2459 * GUP will fail for those. Yet if there is a pending migration 2460 * a thread might try to wait on the pte migration entry and 2461 * will bump the page reference count. Sadly there is no way to 2462 * differentiate a regular pin from migration wait. Hence to 2463 * avoid 2 racing thread trying to migrate back to CPU to enter 2464 * infinite loop (one stopping migration because the other is 2465 * waiting on pte migration entry). We always return true here. 2466 * 2467 * FIXME proper solution is to rework migration_entry_wait() so 2468 * it does not need to take a reference on page. 2469 */ 2470 return is_device_private_page(page); 2471 } 2472 2473 /* For file back page */ 2474 if (page_mapping(page)) 2475 extra += 1 + page_has_private(page); 2476 2477 if ((page_count(page) - extra) > page_mapcount(page)) 2478 return false; 2479 2480 return true; 2481 } 2482 2483 /* 2484 * migrate_vma_prepare() - lock pages and isolate them from the lru 2485 * @migrate: migrate struct containing all migration information 2486 * 2487 * This locks pages that have been collected by migrate_vma_collect(). Once each 2488 * page is locked it is isolated from the lru (for non-device pages). Finally, 2489 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be 2490 * migrated by concurrent kernel threads. 2491 */ 2492 static void migrate_vma_prepare(struct migrate_vma *migrate) 2493 { 2494 const unsigned long npages = migrate->npages; 2495 const unsigned long start = migrate->start; 2496 unsigned long addr, i, restore = 0; 2497 bool allow_drain = true; 2498 2499 lru_add_drain(); 2500 2501 for (i = 0; (i < npages) && migrate->cpages; i++) { 2502 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2503 bool remap = true; 2504 2505 if (!page) 2506 continue; 2507 2508 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { 2509 /* 2510 * Because we are migrating several pages there can be 2511 * a deadlock between 2 concurrent migration where each 2512 * are waiting on each other page lock. 2513 * 2514 * Make migrate_vma() a best effort thing and backoff 2515 * for any page we can not lock right away. 2516 */ 2517 if (!trylock_page(page)) { 2518 migrate->src[i] = 0; 2519 migrate->cpages--; 2520 put_page(page); 2521 continue; 2522 } 2523 remap = false; 2524 migrate->src[i] |= MIGRATE_PFN_LOCKED; 2525 } 2526 2527 /* ZONE_DEVICE pages are not on LRU */ 2528 if (!is_zone_device_page(page)) { 2529 if (!PageLRU(page) && allow_drain) { 2530 /* Drain CPU's pagevec */ 2531 lru_add_drain_all(); 2532 allow_drain = false; 2533 } 2534 2535 if (isolate_lru_page(page)) { 2536 if (remap) { 2537 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2538 migrate->cpages--; 2539 restore++; 2540 } else { 2541 migrate->src[i] = 0; 2542 unlock_page(page); 2543 migrate->cpages--; 2544 put_page(page); 2545 } 2546 continue; 2547 } 2548 2549 /* Drop the reference we took in collect */ 2550 put_page(page); 2551 } 2552 2553 if (!migrate_vma_check_page(page)) { 2554 if (remap) { 2555 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2556 migrate->cpages--; 2557 restore++; 2558 2559 if (!is_zone_device_page(page)) { 2560 get_page(page); 2561 putback_lru_page(page); 2562 } 2563 } else { 2564 migrate->src[i] = 0; 2565 unlock_page(page); 2566 migrate->cpages--; 2567 2568 if (!is_zone_device_page(page)) 2569 putback_lru_page(page); 2570 else 2571 put_page(page); 2572 } 2573 } 2574 } 2575 2576 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { 2577 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2578 2579 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2580 continue; 2581 2582 remove_migration_pte(page, migrate->vma, addr, page); 2583 2584 migrate->src[i] = 0; 2585 unlock_page(page); 2586 put_page(page); 2587 restore--; 2588 } 2589 } 2590 2591 /* 2592 * migrate_vma_unmap() - replace page mapping with special migration pte entry 2593 * @migrate: migrate struct containing all migration information 2594 * 2595 * Replace page mapping (CPU page table pte) with a special migration pte entry 2596 * and check again if it has been pinned. Pinned pages are restored because we 2597 * cannot migrate them. 2598 * 2599 * This is the last step before we call the device driver callback to allocate 2600 * destination memory and copy contents of original page over to new page. 2601 */ 2602 static void migrate_vma_unmap(struct migrate_vma *migrate) 2603 { 2604 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK; 2605 const unsigned long npages = migrate->npages; 2606 const unsigned long start = migrate->start; 2607 unsigned long addr, i, restore = 0; 2608 2609 for (i = 0; i < npages; i++) { 2610 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2611 2612 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2613 continue; 2614 2615 if (page_mapped(page)) { 2616 try_to_unmap(page, flags); 2617 if (page_mapped(page)) 2618 goto restore; 2619 } 2620 2621 if (migrate_vma_check_page(page)) 2622 continue; 2623 2624 restore: 2625 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2626 migrate->cpages--; 2627 restore++; 2628 } 2629 2630 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { 2631 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2632 2633 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2634 continue; 2635 2636 remove_migration_ptes(page, page, false); 2637 2638 migrate->src[i] = 0; 2639 unlock_page(page); 2640 restore--; 2641 2642 if (is_zone_device_page(page)) 2643 put_page(page); 2644 else 2645 putback_lru_page(page); 2646 } 2647 } 2648 2649 /** 2650 * migrate_vma_setup() - prepare to migrate a range of memory 2651 * @args: contains the vma, start, and pfns arrays for the migration 2652 * 2653 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 2654 * without an error. 2655 * 2656 * Prepare to migrate a range of memory virtual address range by collecting all 2657 * the pages backing each virtual address in the range, saving them inside the 2658 * src array. Then lock those pages and unmap them. Once the pages are locked 2659 * and unmapped, check whether each page is pinned or not. Pages that aren't 2660 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 2661 * corresponding src array entry. Then restores any pages that are pinned, by 2662 * remapping and unlocking those pages. 2663 * 2664 * The caller should then allocate destination memory and copy source memory to 2665 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 2666 * flag set). Once these are allocated and copied, the caller must update each 2667 * corresponding entry in the dst array with the pfn value of the destination 2668 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set 2669 * (destination pages must have their struct pages locked, via lock_page()). 2670 * 2671 * Note that the caller does not have to migrate all the pages that are marked 2672 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 2673 * device memory to system memory. If the caller cannot migrate a device page 2674 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 2675 * consequences for the userspace process, so it must be avoided if at all 2676 * possible. 2677 * 2678 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 2679 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 2680 * allowing the caller to allocate device memory for those unbacked virtual 2681 * addresses. For this the caller simply has to allocate device memory and 2682 * properly set the destination entry like for regular migration. Note that 2683 * this can still fail, and thus inside the device driver you must check if the 2684 * migration was successful for those entries after calling migrate_vma_pages(), 2685 * just like for regular migration. 2686 * 2687 * After that, the callers must call migrate_vma_pages() to go over each entry 2688 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 2689 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 2690 * then migrate_vma_pages() to migrate struct page information from the source 2691 * struct page to the destination struct page. If it fails to migrate the 2692 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 2693 * src array. 2694 * 2695 * At this point all successfully migrated pages have an entry in the src 2696 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 2697 * array entry with MIGRATE_PFN_VALID flag set. 2698 * 2699 * Once migrate_vma_pages() returns the caller may inspect which pages were 2700 * successfully migrated, and which were not. Successfully migrated pages will 2701 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 2702 * 2703 * It is safe to update device page table after migrate_vma_pages() because 2704 * both destination and source page are still locked, and the mmap_lock is held 2705 * in read mode (hence no one can unmap the range being migrated). 2706 * 2707 * Once the caller is done cleaning up things and updating its page table (if it 2708 * chose to do so, this is not an obligation) it finally calls 2709 * migrate_vma_finalize() to update the CPU page table to point to new pages 2710 * for successfully migrated pages or otherwise restore the CPU page table to 2711 * point to the original source pages. 2712 */ 2713 int migrate_vma_setup(struct migrate_vma *args) 2714 { 2715 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 2716 2717 args->start &= PAGE_MASK; 2718 args->end &= PAGE_MASK; 2719 if (!args->vma || is_vm_hugetlb_page(args->vma) || 2720 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 2721 return -EINVAL; 2722 if (nr_pages <= 0) 2723 return -EINVAL; 2724 if (args->start < args->vma->vm_start || 2725 args->start >= args->vma->vm_end) 2726 return -EINVAL; 2727 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 2728 return -EINVAL; 2729 if (!args->src || !args->dst) 2730 return -EINVAL; 2731 2732 memset(args->src, 0, sizeof(*args->src) * nr_pages); 2733 args->cpages = 0; 2734 args->npages = 0; 2735 2736 migrate_vma_collect(args); 2737 2738 if (args->cpages) 2739 migrate_vma_prepare(args); 2740 if (args->cpages) 2741 migrate_vma_unmap(args); 2742 2743 /* 2744 * At this point pages are locked and unmapped, and thus they have 2745 * stable content and can safely be copied to destination memory that 2746 * is allocated by the drivers. 2747 */ 2748 return 0; 2749 2750 } 2751 EXPORT_SYMBOL(migrate_vma_setup); 2752 2753 /* 2754 * This code closely matches the code in: 2755 * __handle_mm_fault() 2756 * handle_pte_fault() 2757 * do_anonymous_page() 2758 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 2759 * private page. 2760 */ 2761 static void migrate_vma_insert_page(struct migrate_vma *migrate, 2762 unsigned long addr, 2763 struct page *page, 2764 unsigned long *src) 2765 { 2766 struct vm_area_struct *vma = migrate->vma; 2767 struct mm_struct *mm = vma->vm_mm; 2768 bool flush = false; 2769 spinlock_t *ptl; 2770 pte_t entry; 2771 pgd_t *pgdp; 2772 p4d_t *p4dp; 2773 pud_t *pudp; 2774 pmd_t *pmdp; 2775 pte_t *ptep; 2776 2777 /* Only allow populating anonymous memory */ 2778 if (!vma_is_anonymous(vma)) 2779 goto abort; 2780 2781 pgdp = pgd_offset(mm, addr); 2782 p4dp = p4d_alloc(mm, pgdp, addr); 2783 if (!p4dp) 2784 goto abort; 2785 pudp = pud_alloc(mm, p4dp, addr); 2786 if (!pudp) 2787 goto abort; 2788 pmdp = pmd_alloc(mm, pudp, addr); 2789 if (!pmdp) 2790 goto abort; 2791 2792 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 2793 goto abort; 2794 2795 /* 2796 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2797 * pte_offset_map() on pmds where a huge pmd might be created 2798 * from a different thread. 2799 * 2800 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 2801 * parallel threads are excluded by other means. 2802 * 2803 * Here we only have mmap_read_lock(mm). 2804 */ 2805 if (pte_alloc(mm, pmdp)) 2806 goto abort; 2807 2808 /* See the comment in pte_alloc_one_map() */ 2809 if (unlikely(pmd_trans_unstable(pmdp))) 2810 goto abort; 2811 2812 if (unlikely(anon_vma_prepare(vma))) 2813 goto abort; 2814 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) 2815 goto abort; 2816 2817 /* 2818 * The memory barrier inside __SetPageUptodate makes sure that 2819 * preceding stores to the page contents become visible before 2820 * the set_pte_at() write. 2821 */ 2822 __SetPageUptodate(page); 2823 2824 if (is_zone_device_page(page)) { 2825 if (is_device_private_page(page)) { 2826 swp_entry_t swp_entry; 2827 2828 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); 2829 entry = swp_entry_to_pte(swp_entry); 2830 } else { 2831 /* 2832 * For now we only support migrating to un-addressable 2833 * device memory. 2834 */ 2835 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); 2836 goto abort; 2837 } 2838 } else { 2839 entry = mk_pte(page, vma->vm_page_prot); 2840 if (vma->vm_flags & VM_WRITE) 2841 entry = pte_mkwrite(pte_mkdirty(entry)); 2842 } 2843 2844 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2845 2846 if (check_stable_address_space(mm)) 2847 goto unlock_abort; 2848 2849 if (pte_present(*ptep)) { 2850 unsigned long pfn = pte_pfn(*ptep); 2851 2852 if (!is_zero_pfn(pfn)) 2853 goto unlock_abort; 2854 flush = true; 2855 } else if (!pte_none(*ptep)) 2856 goto unlock_abort; 2857 2858 /* 2859 * Check for userfaultfd but do not deliver the fault. Instead, 2860 * just back off. 2861 */ 2862 if (userfaultfd_missing(vma)) 2863 goto unlock_abort; 2864 2865 inc_mm_counter(mm, MM_ANONPAGES); 2866 page_add_new_anon_rmap(page, vma, addr, false); 2867 if (!is_zone_device_page(page)) 2868 lru_cache_add_inactive_or_unevictable(page, vma); 2869 get_page(page); 2870 2871 if (flush) { 2872 flush_cache_page(vma, addr, pte_pfn(*ptep)); 2873 ptep_clear_flush_notify(vma, addr, ptep); 2874 set_pte_at_notify(mm, addr, ptep, entry); 2875 update_mmu_cache(vma, addr, ptep); 2876 } else { 2877 /* No need to invalidate - it was non-present before */ 2878 set_pte_at(mm, addr, ptep, entry); 2879 update_mmu_cache(vma, addr, ptep); 2880 } 2881 2882 pte_unmap_unlock(ptep, ptl); 2883 *src = MIGRATE_PFN_MIGRATE; 2884 return; 2885 2886 unlock_abort: 2887 pte_unmap_unlock(ptep, ptl); 2888 abort: 2889 *src &= ~MIGRATE_PFN_MIGRATE; 2890 } 2891 2892 /** 2893 * migrate_vma_pages() - migrate meta-data from src page to dst page 2894 * @migrate: migrate struct containing all migration information 2895 * 2896 * This migrates struct page meta-data from source struct page to destination 2897 * struct page. This effectively finishes the migration from source page to the 2898 * destination page. 2899 */ 2900 void migrate_vma_pages(struct migrate_vma *migrate) 2901 { 2902 const unsigned long npages = migrate->npages; 2903 const unsigned long start = migrate->start; 2904 struct mmu_notifier_range range; 2905 unsigned long addr, i; 2906 bool notified = false; 2907 2908 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 2909 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2910 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2911 struct address_space *mapping; 2912 int r; 2913 2914 if (!newpage) { 2915 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2916 continue; 2917 } 2918 2919 if (!page) { 2920 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2921 continue; 2922 if (!notified) { 2923 notified = true; 2924 2925 mmu_notifier_range_init_migrate(&range, 0, 2926 migrate->vma, migrate->vma->vm_mm, 2927 addr, migrate->end, 2928 migrate->pgmap_owner); 2929 mmu_notifier_invalidate_range_start(&range); 2930 } 2931 migrate_vma_insert_page(migrate, addr, newpage, 2932 &migrate->src[i]); 2933 continue; 2934 } 2935 2936 mapping = page_mapping(page); 2937 2938 if (is_zone_device_page(newpage)) { 2939 if (is_device_private_page(newpage)) { 2940 /* 2941 * For now only support private anonymous when 2942 * migrating to un-addressable device memory. 2943 */ 2944 if (mapping) { 2945 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2946 continue; 2947 } 2948 } else { 2949 /* 2950 * Other types of ZONE_DEVICE page are not 2951 * supported. 2952 */ 2953 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2954 continue; 2955 } 2956 } 2957 2958 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 2959 if (r != MIGRATEPAGE_SUCCESS) 2960 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2961 } 2962 2963 /* 2964 * No need to double call mmu_notifier->invalidate_range() callback as 2965 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 2966 * did already call it. 2967 */ 2968 if (notified) 2969 mmu_notifier_invalidate_range_only_end(&range); 2970 } 2971 EXPORT_SYMBOL(migrate_vma_pages); 2972 2973 /** 2974 * migrate_vma_finalize() - restore CPU page table entry 2975 * @migrate: migrate struct containing all migration information 2976 * 2977 * This replaces the special migration pte entry with either a mapping to the 2978 * new page if migration was successful for that page, or to the original page 2979 * otherwise. 2980 * 2981 * This also unlocks the pages and puts them back on the lru, or drops the extra 2982 * refcount, for device pages. 2983 */ 2984 void migrate_vma_finalize(struct migrate_vma *migrate) 2985 { 2986 const unsigned long npages = migrate->npages; 2987 unsigned long i; 2988 2989 for (i = 0; i < npages; i++) { 2990 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2991 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2992 2993 if (!page) { 2994 if (newpage) { 2995 unlock_page(newpage); 2996 put_page(newpage); 2997 } 2998 continue; 2999 } 3000 3001 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 3002 if (newpage) { 3003 unlock_page(newpage); 3004 put_page(newpage); 3005 } 3006 newpage = page; 3007 } 3008 3009 remove_migration_ptes(page, newpage, false); 3010 unlock_page(page); 3011 3012 if (is_zone_device_page(page)) 3013 put_page(page); 3014 else 3015 putback_lru_page(page); 3016 3017 if (newpage != page) { 3018 unlock_page(newpage); 3019 if (is_zone_device_page(newpage)) 3020 put_page(newpage); 3021 else 3022 putback_lru_page(newpage); 3023 } 3024 } 3025 } 3026 EXPORT_SYMBOL(migrate_vma_finalize); 3027 #endif /* CONFIG_DEVICE_PRIVATE */ 3028