1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Device Memory Migration functionality. 4 * 5 * Originally written by Jérôme Glisse. 6 */ 7 #include <linux/export.h> 8 #include <linux/memremap.h> 9 #include <linux/migrate.h> 10 #include <linux/mm.h> 11 #include <linux/mm_inline.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/oom.h> 14 #include <linux/pagewalk.h> 15 #include <linux/rmap.h> 16 #include <linux/swapops.h> 17 #include <asm/tlbflush.h> 18 #include "internal.h" 19 20 static int migrate_vma_collect_skip(unsigned long start, 21 unsigned long end, 22 struct mm_walk *walk) 23 { 24 struct migrate_vma *migrate = walk->private; 25 unsigned long addr; 26 27 for (addr = start; addr < end; addr += PAGE_SIZE) { 28 migrate->dst[migrate->npages] = 0; 29 migrate->src[migrate->npages++] = 0; 30 } 31 32 return 0; 33 } 34 35 static int migrate_vma_collect_hole(unsigned long start, 36 unsigned long end, 37 __always_unused int depth, 38 struct mm_walk *walk) 39 { 40 struct migrate_vma *migrate = walk->private; 41 unsigned long addr; 42 43 /* Only allow populating anonymous memory. */ 44 if (!vma_is_anonymous(walk->vma)) 45 return migrate_vma_collect_skip(start, end, walk); 46 47 for (addr = start; addr < end; addr += PAGE_SIZE) { 48 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 49 migrate->dst[migrate->npages] = 0; 50 migrate->npages++; 51 migrate->cpages++; 52 } 53 54 return 0; 55 } 56 57 static int migrate_vma_collect_pmd(pmd_t *pmdp, 58 unsigned long start, 59 unsigned long end, 60 struct mm_walk *walk) 61 { 62 struct migrate_vma *migrate = walk->private; 63 struct vm_area_struct *vma = walk->vma; 64 struct mm_struct *mm = vma->vm_mm; 65 unsigned long addr = start, unmapped = 0; 66 spinlock_t *ptl; 67 pte_t *ptep; 68 69 again: 70 if (pmd_none(*pmdp)) 71 return migrate_vma_collect_hole(start, end, -1, walk); 72 73 if (pmd_trans_huge(*pmdp)) { 74 struct page *page; 75 76 ptl = pmd_lock(mm, pmdp); 77 if (unlikely(!pmd_trans_huge(*pmdp))) { 78 spin_unlock(ptl); 79 goto again; 80 } 81 82 page = pmd_page(*pmdp); 83 if (is_huge_zero_page(page)) { 84 spin_unlock(ptl); 85 split_huge_pmd(vma, pmdp, addr); 86 if (pmd_trans_unstable(pmdp)) 87 return migrate_vma_collect_skip(start, end, 88 walk); 89 } else { 90 int ret; 91 92 get_page(page); 93 spin_unlock(ptl); 94 if (unlikely(!trylock_page(page))) 95 return migrate_vma_collect_skip(start, end, 96 walk); 97 ret = split_huge_page(page); 98 unlock_page(page); 99 put_page(page); 100 if (ret) 101 return migrate_vma_collect_skip(start, end, 102 walk); 103 if (pmd_none(*pmdp)) 104 return migrate_vma_collect_hole(start, end, -1, 105 walk); 106 } 107 } 108 109 if (unlikely(pmd_bad(*pmdp))) 110 return migrate_vma_collect_skip(start, end, walk); 111 112 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 113 arch_enter_lazy_mmu_mode(); 114 115 for (; addr < end; addr += PAGE_SIZE, ptep++) { 116 unsigned long mpfn = 0, pfn; 117 struct page *page; 118 swp_entry_t entry; 119 pte_t pte; 120 121 pte = *ptep; 122 123 if (pte_none(pte)) { 124 if (vma_is_anonymous(vma)) { 125 mpfn = MIGRATE_PFN_MIGRATE; 126 migrate->cpages++; 127 } 128 goto next; 129 } 130 131 if (!pte_present(pte)) { 132 /* 133 * Only care about unaddressable device page special 134 * page table entry. Other special swap entries are not 135 * migratable, and we ignore regular swapped page. 136 */ 137 entry = pte_to_swp_entry(pte); 138 if (!is_device_private_entry(entry)) 139 goto next; 140 141 page = pfn_swap_entry_to_page(entry); 142 if (!(migrate->flags & 143 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 144 page->pgmap->owner != migrate->pgmap_owner) 145 goto next; 146 147 mpfn = migrate_pfn(page_to_pfn(page)) | 148 MIGRATE_PFN_MIGRATE; 149 if (is_writable_device_private_entry(entry)) 150 mpfn |= MIGRATE_PFN_WRITE; 151 } else { 152 pfn = pte_pfn(pte); 153 if (is_zero_pfn(pfn) && 154 (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { 155 mpfn = MIGRATE_PFN_MIGRATE; 156 migrate->cpages++; 157 goto next; 158 } 159 page = vm_normal_page(migrate->vma, addr, pte); 160 if (page && !is_zone_device_page(page) && 161 !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) 162 goto next; 163 else if (page && is_device_coherent_page(page) && 164 (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || 165 page->pgmap->owner != migrate->pgmap_owner)) 166 goto next; 167 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 168 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 169 } 170 171 /* FIXME support THP */ 172 if (!page || !page->mapping || PageTransCompound(page)) { 173 mpfn = 0; 174 goto next; 175 } 176 177 /* 178 * By getting a reference on the page we pin it and that blocks 179 * any kind of migration. Side effect is that it "freezes" the 180 * pte. 181 * 182 * We drop this reference after isolating the page from the lru 183 * for non device page (device page are not on the lru and thus 184 * can't be dropped from it). 185 */ 186 get_page(page); 187 188 /* 189 * We rely on trylock_page() to avoid deadlock between 190 * concurrent migrations where each is waiting on the others 191 * page lock. If we can't immediately lock the page we fail this 192 * migration as it is only best effort anyway. 193 * 194 * If we can lock the page it's safe to set up a migration entry 195 * now. In the common case where the page is mapped once in a 196 * single process setting up the migration entry now is an 197 * optimisation to avoid walking the rmap later with 198 * try_to_migrate(). 199 */ 200 if (trylock_page(page)) { 201 bool anon_exclusive; 202 pte_t swp_pte; 203 204 flush_cache_page(vma, addr, pte_pfn(*ptep)); 205 anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 206 if (anon_exclusive) { 207 pte = ptep_clear_flush(vma, addr, ptep); 208 209 if (page_try_share_anon_rmap(page)) { 210 set_pte_at(mm, addr, ptep, pte); 211 unlock_page(page); 212 put_page(page); 213 mpfn = 0; 214 goto next; 215 } 216 } else { 217 pte = ptep_get_and_clear(mm, addr, ptep); 218 } 219 220 migrate->cpages++; 221 222 /* Set the dirty flag on the folio now the pte is gone. */ 223 if (pte_dirty(pte)) 224 folio_mark_dirty(page_folio(page)); 225 226 /* Setup special migration page table entry */ 227 if (mpfn & MIGRATE_PFN_WRITE) 228 entry = make_writable_migration_entry( 229 page_to_pfn(page)); 230 else if (anon_exclusive) 231 entry = make_readable_exclusive_migration_entry( 232 page_to_pfn(page)); 233 else 234 entry = make_readable_migration_entry( 235 page_to_pfn(page)); 236 if (pte_present(pte)) { 237 if (pte_young(pte)) 238 entry = make_migration_entry_young(entry); 239 if (pte_dirty(pte)) 240 entry = make_migration_entry_dirty(entry); 241 } 242 swp_pte = swp_entry_to_pte(entry); 243 if (pte_present(pte)) { 244 if (pte_soft_dirty(pte)) 245 swp_pte = pte_swp_mksoft_dirty(swp_pte); 246 if (pte_uffd_wp(pte)) 247 swp_pte = pte_swp_mkuffd_wp(swp_pte); 248 } else { 249 if (pte_swp_soft_dirty(pte)) 250 swp_pte = pte_swp_mksoft_dirty(swp_pte); 251 if (pte_swp_uffd_wp(pte)) 252 swp_pte = pte_swp_mkuffd_wp(swp_pte); 253 } 254 set_pte_at(mm, addr, ptep, swp_pte); 255 256 /* 257 * This is like regular unmap: we remove the rmap and 258 * drop page refcount. Page won't be freed, as we took 259 * a reference just above. 260 */ 261 page_remove_rmap(page, vma, false); 262 put_page(page); 263 264 if (pte_present(pte)) 265 unmapped++; 266 } else { 267 put_page(page); 268 mpfn = 0; 269 } 270 271 next: 272 migrate->dst[migrate->npages] = 0; 273 migrate->src[migrate->npages++] = mpfn; 274 } 275 276 /* Only flush the TLB if we actually modified any entries */ 277 if (unmapped) 278 flush_tlb_range(walk->vma, start, end); 279 280 arch_leave_lazy_mmu_mode(); 281 pte_unmap_unlock(ptep - 1, ptl); 282 283 return 0; 284 } 285 286 static const struct mm_walk_ops migrate_vma_walk_ops = { 287 .pmd_entry = migrate_vma_collect_pmd, 288 .pte_hole = migrate_vma_collect_hole, 289 }; 290 291 /* 292 * migrate_vma_collect() - collect pages over a range of virtual addresses 293 * @migrate: migrate struct containing all migration information 294 * 295 * This will walk the CPU page table. For each virtual address backed by a 296 * valid page, it updates the src array and takes a reference on the page, in 297 * order to pin the page until we lock it and unmap it. 298 */ 299 static void migrate_vma_collect(struct migrate_vma *migrate) 300 { 301 struct mmu_notifier_range range; 302 303 /* 304 * Note that the pgmap_owner is passed to the mmu notifier callback so 305 * that the registered device driver can skip invalidating device 306 * private page mappings that won't be migrated. 307 */ 308 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, 309 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, 310 migrate->pgmap_owner); 311 mmu_notifier_invalidate_range_start(&range); 312 313 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 314 &migrate_vma_walk_ops, migrate); 315 316 mmu_notifier_invalidate_range_end(&range); 317 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 318 } 319 320 /* 321 * migrate_vma_check_page() - check if page is pinned or not 322 * @page: struct page to check 323 * 324 * Pinned pages cannot be migrated. This is the same test as in 325 * folio_migrate_mapping(), except that here we allow migration of a 326 * ZONE_DEVICE page. 327 */ 328 static bool migrate_vma_check_page(struct page *page) 329 { 330 /* 331 * One extra ref because caller holds an extra reference, either from 332 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 333 * a device page. 334 */ 335 int extra = 1; 336 337 /* 338 * FIXME support THP (transparent huge page), it is bit more complex to 339 * check them than regular pages, because they can be mapped with a pmd 340 * or with a pte (split pte mapping). 341 */ 342 if (PageCompound(page)) 343 return false; 344 345 /* Page from ZONE_DEVICE have one extra reference */ 346 if (is_zone_device_page(page)) 347 extra++; 348 349 /* For file back page */ 350 if (page_mapping(page)) 351 extra += 1 + page_has_private(page); 352 353 if ((page_count(page) - extra) > page_mapcount(page)) 354 return false; 355 356 return true; 357 } 358 359 /* 360 * migrate_vma_unmap() - replace page mapping with special migration pte entry 361 * @migrate: migrate struct containing all migration information 362 * 363 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 364 * special migration pte entry and check if it has been pinned. Pinned pages are 365 * restored because we cannot migrate them. 366 * 367 * This is the last step before we call the device driver callback to allocate 368 * destination memory and copy contents of original page over to new page. 369 */ 370 static void migrate_vma_unmap(struct migrate_vma *migrate) 371 { 372 const unsigned long npages = migrate->npages; 373 unsigned long i, restore = 0; 374 bool allow_drain = true; 375 376 lru_add_drain(); 377 378 for (i = 0; i < npages; i++) { 379 struct page *page = migrate_pfn_to_page(migrate->src[i]); 380 struct folio *folio; 381 382 if (!page) 383 continue; 384 385 /* ZONE_DEVICE pages are not on LRU */ 386 if (!is_zone_device_page(page)) { 387 if (!PageLRU(page) && allow_drain) { 388 /* Drain CPU's pagevec */ 389 lru_add_drain_all(); 390 allow_drain = false; 391 } 392 393 if (isolate_lru_page(page)) { 394 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 395 migrate->cpages--; 396 restore++; 397 continue; 398 } 399 400 /* Drop the reference we took in collect */ 401 put_page(page); 402 } 403 404 folio = page_folio(page); 405 if (folio_mapped(folio)) 406 try_to_migrate(folio, 0); 407 408 if (page_mapped(page) || !migrate_vma_check_page(page)) { 409 if (!is_zone_device_page(page)) { 410 get_page(page); 411 putback_lru_page(page); 412 } 413 414 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 415 migrate->cpages--; 416 restore++; 417 continue; 418 } 419 } 420 421 for (i = 0; i < npages && restore; i++) { 422 struct page *page = migrate_pfn_to_page(migrate->src[i]); 423 struct folio *folio; 424 425 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 426 continue; 427 428 folio = page_folio(page); 429 remove_migration_ptes(folio, folio, false); 430 431 migrate->src[i] = 0; 432 folio_unlock(folio); 433 folio_put(folio); 434 restore--; 435 } 436 } 437 438 /** 439 * migrate_vma_setup() - prepare to migrate a range of memory 440 * @args: contains the vma, start, and pfns arrays for the migration 441 * 442 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 443 * without an error. 444 * 445 * Prepare to migrate a range of memory virtual address range by collecting all 446 * the pages backing each virtual address in the range, saving them inside the 447 * src array. Then lock those pages and unmap them. Once the pages are locked 448 * and unmapped, check whether each page is pinned or not. Pages that aren't 449 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 450 * corresponding src array entry. Then restores any pages that are pinned, by 451 * remapping and unlocking those pages. 452 * 453 * The caller should then allocate destination memory and copy source memory to 454 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 455 * flag set). Once these are allocated and copied, the caller must update each 456 * corresponding entry in the dst array with the pfn value of the destination 457 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via 458 * lock_page(). 459 * 460 * Note that the caller does not have to migrate all the pages that are marked 461 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 462 * device memory to system memory. If the caller cannot migrate a device page 463 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 464 * consequences for the userspace process, so it must be avoided if at all 465 * possible. 466 * 467 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 468 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 469 * allowing the caller to allocate device memory for those unbacked virtual 470 * addresses. For this the caller simply has to allocate device memory and 471 * properly set the destination entry like for regular migration. Note that 472 * this can still fail, and thus inside the device driver you must check if the 473 * migration was successful for those entries after calling migrate_vma_pages(), 474 * just like for regular migration. 475 * 476 * After that, the callers must call migrate_vma_pages() to go over each entry 477 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 478 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 479 * then migrate_vma_pages() to migrate struct page information from the source 480 * struct page to the destination struct page. If it fails to migrate the 481 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 482 * src array. 483 * 484 * At this point all successfully migrated pages have an entry in the src 485 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 486 * array entry with MIGRATE_PFN_VALID flag set. 487 * 488 * Once migrate_vma_pages() returns the caller may inspect which pages were 489 * successfully migrated, and which were not. Successfully migrated pages will 490 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 491 * 492 * It is safe to update device page table after migrate_vma_pages() because 493 * both destination and source page are still locked, and the mmap_lock is held 494 * in read mode (hence no one can unmap the range being migrated). 495 * 496 * Once the caller is done cleaning up things and updating its page table (if it 497 * chose to do so, this is not an obligation) it finally calls 498 * migrate_vma_finalize() to update the CPU page table to point to new pages 499 * for successfully migrated pages or otherwise restore the CPU page table to 500 * point to the original source pages. 501 */ 502 int migrate_vma_setup(struct migrate_vma *args) 503 { 504 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 505 506 args->start &= PAGE_MASK; 507 args->end &= PAGE_MASK; 508 if (!args->vma || is_vm_hugetlb_page(args->vma) || 509 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 510 return -EINVAL; 511 if (nr_pages <= 0) 512 return -EINVAL; 513 if (args->start < args->vma->vm_start || 514 args->start >= args->vma->vm_end) 515 return -EINVAL; 516 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 517 return -EINVAL; 518 if (!args->src || !args->dst) 519 return -EINVAL; 520 521 memset(args->src, 0, sizeof(*args->src) * nr_pages); 522 args->cpages = 0; 523 args->npages = 0; 524 525 migrate_vma_collect(args); 526 527 if (args->cpages) 528 migrate_vma_unmap(args); 529 530 /* 531 * At this point pages are locked and unmapped, and thus they have 532 * stable content and can safely be copied to destination memory that 533 * is allocated by the drivers. 534 */ 535 return 0; 536 537 } 538 EXPORT_SYMBOL(migrate_vma_setup); 539 540 /* 541 * This code closely matches the code in: 542 * __handle_mm_fault() 543 * handle_pte_fault() 544 * do_anonymous_page() 545 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 546 * private or coherent page. 547 */ 548 static void migrate_vma_insert_page(struct migrate_vma *migrate, 549 unsigned long addr, 550 struct page *page, 551 unsigned long *src) 552 { 553 struct vm_area_struct *vma = migrate->vma; 554 struct mm_struct *mm = vma->vm_mm; 555 bool flush = false; 556 spinlock_t *ptl; 557 pte_t entry; 558 pgd_t *pgdp; 559 p4d_t *p4dp; 560 pud_t *pudp; 561 pmd_t *pmdp; 562 pte_t *ptep; 563 564 /* Only allow populating anonymous memory */ 565 if (!vma_is_anonymous(vma)) 566 goto abort; 567 568 pgdp = pgd_offset(mm, addr); 569 p4dp = p4d_alloc(mm, pgdp, addr); 570 if (!p4dp) 571 goto abort; 572 pudp = pud_alloc(mm, p4dp, addr); 573 if (!pudp) 574 goto abort; 575 pmdp = pmd_alloc(mm, pudp, addr); 576 if (!pmdp) 577 goto abort; 578 579 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 580 goto abort; 581 582 /* 583 * Use pte_alloc() instead of pte_alloc_map(). We can't run 584 * pte_offset_map() on pmds where a huge pmd might be created 585 * from a different thread. 586 * 587 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 588 * parallel threads are excluded by other means. 589 * 590 * Here we only have mmap_read_lock(mm). 591 */ 592 if (pte_alloc(mm, pmdp)) 593 goto abort; 594 595 /* See the comment in pte_alloc_one_map() */ 596 if (unlikely(pmd_trans_unstable(pmdp))) 597 goto abort; 598 599 if (unlikely(anon_vma_prepare(vma))) 600 goto abort; 601 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 602 goto abort; 603 604 /* 605 * The memory barrier inside __SetPageUptodate makes sure that 606 * preceding stores to the page contents become visible before 607 * the set_pte_at() write. 608 */ 609 __SetPageUptodate(page); 610 611 if (is_device_private_page(page)) { 612 swp_entry_t swp_entry; 613 614 if (vma->vm_flags & VM_WRITE) 615 swp_entry = make_writable_device_private_entry( 616 page_to_pfn(page)); 617 else 618 swp_entry = make_readable_device_private_entry( 619 page_to_pfn(page)); 620 entry = swp_entry_to_pte(swp_entry); 621 } else { 622 if (is_zone_device_page(page) && 623 !is_device_coherent_page(page)) { 624 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); 625 goto abort; 626 } 627 entry = mk_pte(page, vma->vm_page_prot); 628 if (vma->vm_flags & VM_WRITE) 629 entry = pte_mkwrite(pte_mkdirty(entry)); 630 } 631 632 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 633 634 if (check_stable_address_space(mm)) 635 goto unlock_abort; 636 637 if (pte_present(*ptep)) { 638 unsigned long pfn = pte_pfn(*ptep); 639 640 if (!is_zero_pfn(pfn)) 641 goto unlock_abort; 642 flush = true; 643 } else if (!pte_none(*ptep)) 644 goto unlock_abort; 645 646 /* 647 * Check for userfaultfd but do not deliver the fault. Instead, 648 * just back off. 649 */ 650 if (userfaultfd_missing(vma)) 651 goto unlock_abort; 652 653 inc_mm_counter(mm, MM_ANONPAGES); 654 page_add_new_anon_rmap(page, vma, addr); 655 if (!is_zone_device_page(page)) 656 lru_cache_add_inactive_or_unevictable(page, vma); 657 get_page(page); 658 659 if (flush) { 660 flush_cache_page(vma, addr, pte_pfn(*ptep)); 661 ptep_clear_flush_notify(vma, addr, ptep); 662 set_pte_at_notify(mm, addr, ptep, entry); 663 update_mmu_cache(vma, addr, ptep); 664 } else { 665 /* No need to invalidate - it was non-present before */ 666 set_pte_at(mm, addr, ptep, entry); 667 update_mmu_cache(vma, addr, ptep); 668 } 669 670 pte_unmap_unlock(ptep, ptl); 671 *src = MIGRATE_PFN_MIGRATE; 672 return; 673 674 unlock_abort: 675 pte_unmap_unlock(ptep, ptl); 676 abort: 677 *src &= ~MIGRATE_PFN_MIGRATE; 678 } 679 680 /** 681 * migrate_vma_pages() - migrate meta-data from src page to dst page 682 * @migrate: migrate struct containing all migration information 683 * 684 * This migrates struct page meta-data from source struct page to destination 685 * struct page. This effectively finishes the migration from source page to the 686 * destination page. 687 */ 688 void migrate_vma_pages(struct migrate_vma *migrate) 689 { 690 const unsigned long npages = migrate->npages; 691 const unsigned long start = migrate->start; 692 struct mmu_notifier_range range; 693 unsigned long addr, i; 694 bool notified = false; 695 696 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 697 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 698 struct page *page = migrate_pfn_to_page(migrate->src[i]); 699 struct address_space *mapping; 700 int r; 701 702 if (!newpage) { 703 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 704 continue; 705 } 706 707 if (!page) { 708 /* 709 * The only time there is no vma is when called from 710 * migrate_device_coherent_page(). However this isn't 711 * called if the page could not be unmapped. 712 */ 713 VM_BUG_ON(!migrate->vma); 714 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 715 continue; 716 if (!notified) { 717 notified = true; 718 719 mmu_notifier_range_init_owner(&range, 720 MMU_NOTIFY_MIGRATE, 0, migrate->vma, 721 migrate->vma->vm_mm, addr, migrate->end, 722 migrate->pgmap_owner); 723 mmu_notifier_invalidate_range_start(&range); 724 } 725 migrate_vma_insert_page(migrate, addr, newpage, 726 &migrate->src[i]); 727 continue; 728 } 729 730 mapping = page_mapping(page); 731 732 if (is_device_private_page(newpage) || 733 is_device_coherent_page(newpage)) { 734 /* 735 * For now only support anonymous memory migrating to 736 * device private or coherent memory. 737 */ 738 if (mapping) { 739 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 740 continue; 741 } 742 } else if (is_zone_device_page(newpage)) { 743 /* 744 * Other types of ZONE_DEVICE page are not supported. 745 */ 746 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 747 continue; 748 } 749 750 r = migrate_folio(mapping, page_folio(newpage), 751 page_folio(page), MIGRATE_SYNC_NO_COPY); 752 if (r != MIGRATEPAGE_SUCCESS) 753 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 754 } 755 756 /* 757 * No need to double call mmu_notifier->invalidate_range() callback as 758 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 759 * did already call it. 760 */ 761 if (notified) 762 mmu_notifier_invalidate_range_only_end(&range); 763 } 764 EXPORT_SYMBOL(migrate_vma_pages); 765 766 /** 767 * migrate_vma_finalize() - restore CPU page table entry 768 * @migrate: migrate struct containing all migration information 769 * 770 * This replaces the special migration pte entry with either a mapping to the 771 * new page if migration was successful for that page, or to the original page 772 * otherwise. 773 * 774 * This also unlocks the pages and puts them back on the lru, or drops the extra 775 * refcount, for device pages. 776 */ 777 void migrate_vma_finalize(struct migrate_vma *migrate) 778 { 779 const unsigned long npages = migrate->npages; 780 unsigned long i; 781 782 for (i = 0; i < npages; i++) { 783 struct folio *dst, *src; 784 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 785 struct page *page = migrate_pfn_to_page(migrate->src[i]); 786 787 if (!page) { 788 if (newpage) { 789 unlock_page(newpage); 790 put_page(newpage); 791 } 792 continue; 793 } 794 795 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 796 if (newpage) { 797 unlock_page(newpage); 798 put_page(newpage); 799 } 800 newpage = page; 801 } 802 803 src = page_folio(page); 804 dst = page_folio(newpage); 805 remove_migration_ptes(src, dst, false); 806 folio_unlock(src); 807 808 if (is_zone_device_page(page)) 809 put_page(page); 810 else 811 putback_lru_page(page); 812 813 if (newpage != page) { 814 unlock_page(newpage); 815 if (is_zone_device_page(newpage)) 816 put_page(newpage); 817 else 818 putback_lru_page(newpage); 819 } 820 } 821 } 822 EXPORT_SYMBOL(migrate_vma_finalize); 823 824 /* 825 * Migrate a device coherent page back to normal memory. The caller should have 826 * a reference on page which will be copied to the new page if migration is 827 * successful or dropped on failure. 828 */ 829 int migrate_device_coherent_page(struct page *page) 830 { 831 unsigned long src_pfn, dst_pfn = 0; 832 struct migrate_vma args; 833 struct page *dpage; 834 835 WARN_ON_ONCE(PageCompound(page)); 836 837 lock_page(page); 838 src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; 839 args.src = &src_pfn; 840 args.dst = &dst_pfn; 841 args.cpages = 1; 842 args.npages = 1; 843 args.vma = NULL; 844 845 /* 846 * We don't have a VMA and don't need to walk the page tables to find 847 * the source page. So call migrate_vma_unmap() directly to unmap the 848 * page as migrate_vma_setup() will fail if args.vma == NULL. 849 */ 850 migrate_vma_unmap(&args); 851 if (!(src_pfn & MIGRATE_PFN_MIGRATE)) 852 return -EBUSY; 853 854 dpage = alloc_page(GFP_USER | __GFP_NOWARN); 855 if (dpage) { 856 lock_page(dpage); 857 dst_pfn = migrate_pfn(page_to_pfn(dpage)); 858 } 859 860 migrate_vma_pages(&args); 861 if (src_pfn & MIGRATE_PFN_MIGRATE) 862 copy_highpage(dpage, page); 863 migrate_vma_finalize(&args); 864 865 if (src_pfn & MIGRATE_PFN_MIGRATE) 866 return 0; 867 return -EBUSY; 868 } 869