1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/export.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/nsproxy.h> 23 #include <linux/pagevec.h> 24 #include <linux/ksm.h> 25 #include <linux/rmap.h> 26 #include <linux/topology.h> 27 #include <linux/cpu.h> 28 #include <linux/cpuset.h> 29 #include <linux/writeback.h> 30 #include <linux/mempolicy.h> 31 #include <linux/vmalloc.h> 32 #include <linux/security.h> 33 #include <linux/memcontrol.h> 34 #include <linux/syscalls.h> 35 #include <linux/hugetlb.h> 36 #include <linux/hugetlb_cgroup.h> 37 #include <linux/gfp.h> 38 #include <linux/balloon_compaction.h> 39 #include <linux/mmu_notifier.h> 40 41 #include <asm/tlbflush.h> 42 43 #define CREATE_TRACE_POINTS 44 #include <trace/events/migrate.h> 45 46 #include "internal.h" 47 48 /* 49 * migrate_prep() needs to be called before we start compiling a list of pages 50 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 51 * undesirable, use migrate_prep_local() 52 */ 53 int migrate_prep(void) 54 { 55 /* 56 * Clear the LRU lists so pages can be isolated. 57 * Note that pages may be moved off the LRU after we have 58 * drained them. Those pages will fail to migrate like other 59 * pages that may be busy. 60 */ 61 lru_add_drain_all(); 62 63 return 0; 64 } 65 66 /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 67 int migrate_prep_local(void) 68 { 69 lru_add_drain(); 70 71 return 0; 72 } 73 74 /* 75 * Add isolated pages on the list back to the LRU under page lock 76 * to avoid leaking evictable pages back onto unevictable list. 77 */ 78 void putback_lru_pages(struct list_head *l) 79 { 80 struct page *page; 81 struct page *page2; 82 83 list_for_each_entry_safe(page, page2, l, lru) { 84 list_del(&page->lru); 85 dec_zone_page_state(page, NR_ISOLATED_ANON + 86 page_is_file_cache(page)); 87 putback_lru_page(page); 88 } 89 } 90 91 /* 92 * Put previously isolated pages back onto the appropriate lists 93 * from where they were once taken off for compaction/migration. 94 * 95 * This function shall be used instead of putback_lru_pages(), 96 * whenever the isolated pageset has been built by isolate_migratepages_range() 97 */ 98 void putback_movable_pages(struct list_head *l) 99 { 100 struct page *page; 101 struct page *page2; 102 103 list_for_each_entry_safe(page, page2, l, lru) { 104 if (unlikely(PageHuge(page))) { 105 putback_active_hugepage(page); 106 continue; 107 } 108 list_del(&page->lru); 109 dec_zone_page_state(page, NR_ISOLATED_ANON + 110 page_is_file_cache(page)); 111 if (unlikely(isolated_balloon_page(page))) 112 balloon_page_putback(page); 113 else 114 putback_lru_page(page); 115 } 116 } 117 118 /* 119 * Restore a potential migration pte to a working pte entry 120 */ 121 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 122 unsigned long addr, void *old) 123 { 124 struct mm_struct *mm = vma->vm_mm; 125 swp_entry_t entry; 126 pmd_t *pmd; 127 pte_t *ptep, pte; 128 spinlock_t *ptl; 129 130 if (unlikely(PageHuge(new))) { 131 ptep = huge_pte_offset(mm, addr); 132 if (!ptep) 133 goto out; 134 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); 135 } else { 136 pmd = mm_find_pmd(mm, addr); 137 if (!pmd) 138 goto out; 139 if (pmd_trans_huge(*pmd)) 140 goto out; 141 142 ptep = pte_offset_map(pmd, addr); 143 144 /* 145 * Peek to check is_swap_pte() before taking ptlock? No, we 146 * can race mremap's move_ptes(), which skips anon_vma lock. 147 */ 148 149 ptl = pte_lockptr(mm, pmd); 150 } 151 152 spin_lock(ptl); 153 pte = *ptep; 154 if (!is_swap_pte(pte)) 155 goto unlock; 156 157 entry = pte_to_swp_entry(pte); 158 159 if (!is_migration_entry(entry) || 160 migration_entry_to_page(entry) != old) 161 goto unlock; 162 163 get_page(new); 164 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 165 if (pte_swp_soft_dirty(*ptep)) 166 pte = pte_mksoft_dirty(pte); 167 if (is_write_migration_entry(entry)) 168 pte = pte_mkwrite(pte); 169 #ifdef CONFIG_HUGETLB_PAGE 170 if (PageHuge(new)) { 171 pte = pte_mkhuge(pte); 172 pte = arch_make_huge_pte(pte, vma, new, 0); 173 } 174 #endif 175 flush_dcache_page(new); 176 set_pte_at(mm, addr, ptep, pte); 177 178 if (PageHuge(new)) { 179 if (PageAnon(new)) 180 hugepage_add_anon_rmap(new, vma, addr); 181 else 182 page_dup_rmap(new); 183 } else if (PageAnon(new)) 184 page_add_anon_rmap(new, vma, addr); 185 else 186 page_add_file_rmap(new); 187 188 /* No need to invalidate - it was non-present before */ 189 update_mmu_cache(vma, addr, ptep); 190 unlock: 191 pte_unmap_unlock(ptep, ptl); 192 out: 193 return SWAP_AGAIN; 194 } 195 196 /* 197 * Get rid of all migration entries and replace them by 198 * references to the indicated page. 199 */ 200 static void remove_migration_ptes(struct page *old, struct page *new) 201 { 202 rmap_walk(new, remove_migration_pte, old); 203 } 204 205 /* 206 * Something used the pte of a page under migration. We need to 207 * get to the page and wait until migration is finished. 208 * When we return from this function the fault will be retried. 209 */ 210 static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 211 spinlock_t *ptl) 212 { 213 pte_t pte; 214 swp_entry_t entry; 215 struct page *page; 216 217 spin_lock(ptl); 218 pte = *ptep; 219 if (!is_swap_pte(pte)) 220 goto out; 221 222 entry = pte_to_swp_entry(pte); 223 if (!is_migration_entry(entry)) 224 goto out; 225 226 page = migration_entry_to_page(entry); 227 228 /* 229 * Once radix-tree replacement of page migration started, page_count 230 * *must* be zero. And, we don't want to call wait_on_page_locked() 231 * against a page without get_page(). 232 * So, we use get_page_unless_zero(), here. Even failed, page fault 233 * will occur again. 234 */ 235 if (!get_page_unless_zero(page)) 236 goto out; 237 pte_unmap_unlock(ptep, ptl); 238 wait_on_page_locked(page); 239 put_page(page); 240 return; 241 out: 242 pte_unmap_unlock(ptep, ptl); 243 } 244 245 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 246 unsigned long address) 247 { 248 spinlock_t *ptl = pte_lockptr(mm, pmd); 249 pte_t *ptep = pte_offset_map(pmd, address); 250 __migration_entry_wait(mm, ptep, ptl); 251 } 252 253 void migration_entry_wait_huge(struct vm_area_struct *vma, 254 struct mm_struct *mm, pte_t *pte) 255 { 256 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 257 __migration_entry_wait(mm, pte, ptl); 258 } 259 260 #ifdef CONFIG_BLOCK 261 /* Returns true if all buffers are successfully locked */ 262 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 263 enum migrate_mode mode) 264 { 265 struct buffer_head *bh = head; 266 267 /* Simple case, sync compaction */ 268 if (mode != MIGRATE_ASYNC) { 269 do { 270 get_bh(bh); 271 lock_buffer(bh); 272 bh = bh->b_this_page; 273 274 } while (bh != head); 275 276 return true; 277 } 278 279 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 280 do { 281 get_bh(bh); 282 if (!trylock_buffer(bh)) { 283 /* 284 * We failed to lock the buffer and cannot stall in 285 * async migration. Release the taken locks 286 */ 287 struct buffer_head *failed_bh = bh; 288 put_bh(failed_bh); 289 bh = head; 290 while (bh != failed_bh) { 291 unlock_buffer(bh); 292 put_bh(bh); 293 bh = bh->b_this_page; 294 } 295 return false; 296 } 297 298 bh = bh->b_this_page; 299 } while (bh != head); 300 return true; 301 } 302 #else 303 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 304 enum migrate_mode mode) 305 { 306 return true; 307 } 308 #endif /* CONFIG_BLOCK */ 309 310 /* 311 * Replace the page in the mapping. 312 * 313 * The number of remaining references must be: 314 * 1 for anonymous pages without a mapping 315 * 2 for pages with a mapping 316 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 317 */ 318 int migrate_page_move_mapping(struct address_space *mapping, 319 struct page *newpage, struct page *page, 320 struct buffer_head *head, enum migrate_mode mode, 321 int extra_count) 322 { 323 int expected_count = 1 + extra_count; 324 void **pslot; 325 326 if (!mapping) { 327 /* Anonymous page without mapping */ 328 if (page_count(page) != expected_count) 329 return -EAGAIN; 330 return MIGRATEPAGE_SUCCESS; 331 } 332 333 spin_lock_irq(&mapping->tree_lock); 334 335 pslot = radix_tree_lookup_slot(&mapping->page_tree, 336 page_index(page)); 337 338 expected_count += 1 + page_has_private(page); 339 if (page_count(page) != expected_count || 340 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 341 spin_unlock_irq(&mapping->tree_lock); 342 return -EAGAIN; 343 } 344 345 if (!page_freeze_refs(page, expected_count)) { 346 spin_unlock_irq(&mapping->tree_lock); 347 return -EAGAIN; 348 } 349 350 /* 351 * In the async migration case of moving a page with buffers, lock the 352 * buffers using trylock before the mapping is moved. If the mapping 353 * was moved, we later failed to lock the buffers and could not move 354 * the mapping back due to an elevated page count, we would have to 355 * block waiting on other references to be dropped. 356 */ 357 if (mode == MIGRATE_ASYNC && head && 358 !buffer_migrate_lock_buffers(head, mode)) { 359 page_unfreeze_refs(page, expected_count); 360 spin_unlock_irq(&mapping->tree_lock); 361 return -EAGAIN; 362 } 363 364 /* 365 * Now we know that no one else is looking at the page. 366 */ 367 get_page(newpage); /* add cache reference */ 368 if (PageSwapCache(page)) { 369 SetPageSwapCache(newpage); 370 set_page_private(newpage, page_private(page)); 371 } 372 373 radix_tree_replace_slot(pslot, newpage); 374 375 /* 376 * Drop cache reference from old page by unfreezing 377 * to one less reference. 378 * We know this isn't the last reference. 379 */ 380 page_unfreeze_refs(page, expected_count - 1); 381 382 /* 383 * If moved to a different zone then also account 384 * the page for that zone. Other VM counters will be 385 * taken care of when we establish references to the 386 * new page and drop references to the old page. 387 * 388 * Note that anonymous pages are accounted for 389 * via NR_FILE_PAGES and NR_ANON_PAGES if they 390 * are mapped to swap space. 391 */ 392 __dec_zone_page_state(page, NR_FILE_PAGES); 393 __inc_zone_page_state(newpage, NR_FILE_PAGES); 394 if (!PageSwapCache(page) && PageSwapBacked(page)) { 395 __dec_zone_page_state(page, NR_SHMEM); 396 __inc_zone_page_state(newpage, NR_SHMEM); 397 } 398 spin_unlock_irq(&mapping->tree_lock); 399 400 return MIGRATEPAGE_SUCCESS; 401 } 402 403 /* 404 * The expected number of remaining references is the same as that 405 * of migrate_page_move_mapping(). 406 */ 407 int migrate_huge_page_move_mapping(struct address_space *mapping, 408 struct page *newpage, struct page *page) 409 { 410 int expected_count; 411 void **pslot; 412 413 if (!mapping) { 414 if (page_count(page) != 1) 415 return -EAGAIN; 416 return MIGRATEPAGE_SUCCESS; 417 } 418 419 spin_lock_irq(&mapping->tree_lock); 420 421 pslot = radix_tree_lookup_slot(&mapping->page_tree, 422 page_index(page)); 423 424 expected_count = 2 + page_has_private(page); 425 if (page_count(page) != expected_count || 426 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 427 spin_unlock_irq(&mapping->tree_lock); 428 return -EAGAIN; 429 } 430 431 if (!page_freeze_refs(page, expected_count)) { 432 spin_unlock_irq(&mapping->tree_lock); 433 return -EAGAIN; 434 } 435 436 get_page(newpage); 437 438 radix_tree_replace_slot(pslot, newpage); 439 440 page_unfreeze_refs(page, expected_count - 1); 441 442 spin_unlock_irq(&mapping->tree_lock); 443 return MIGRATEPAGE_SUCCESS; 444 } 445 446 /* 447 * Gigantic pages are so large that we do not guarantee that page++ pointer 448 * arithmetic will work across the entire page. We need something more 449 * specialized. 450 */ 451 static void __copy_gigantic_page(struct page *dst, struct page *src, 452 int nr_pages) 453 { 454 int i; 455 struct page *dst_base = dst; 456 struct page *src_base = src; 457 458 for (i = 0; i < nr_pages; ) { 459 cond_resched(); 460 copy_highpage(dst, src); 461 462 i++; 463 dst = mem_map_next(dst, dst_base, i); 464 src = mem_map_next(src, src_base, i); 465 } 466 } 467 468 static void copy_huge_page(struct page *dst, struct page *src) 469 { 470 int i; 471 int nr_pages; 472 473 if (PageHuge(src)) { 474 /* hugetlbfs page */ 475 struct hstate *h = page_hstate(src); 476 nr_pages = pages_per_huge_page(h); 477 478 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 479 __copy_gigantic_page(dst, src, nr_pages); 480 return; 481 } 482 } else { 483 /* thp page */ 484 BUG_ON(!PageTransHuge(src)); 485 nr_pages = hpage_nr_pages(src); 486 } 487 488 for (i = 0; i < nr_pages; i++) { 489 cond_resched(); 490 copy_highpage(dst + i, src + i); 491 } 492 } 493 494 /* 495 * Copy the page to its new location 496 */ 497 void migrate_page_copy(struct page *newpage, struct page *page) 498 { 499 int cpupid; 500 501 if (PageHuge(page) || PageTransHuge(page)) 502 copy_huge_page(newpage, page); 503 else 504 copy_highpage(newpage, page); 505 506 if (PageError(page)) 507 SetPageError(newpage); 508 if (PageReferenced(page)) 509 SetPageReferenced(newpage); 510 if (PageUptodate(page)) 511 SetPageUptodate(newpage); 512 if (TestClearPageActive(page)) { 513 VM_BUG_ON(PageUnevictable(page)); 514 SetPageActive(newpage); 515 } else if (TestClearPageUnevictable(page)) 516 SetPageUnevictable(newpage); 517 if (PageChecked(page)) 518 SetPageChecked(newpage); 519 if (PageMappedToDisk(page)) 520 SetPageMappedToDisk(newpage); 521 522 if (PageDirty(page)) { 523 clear_page_dirty_for_io(page); 524 /* 525 * Want to mark the page and the radix tree as dirty, and 526 * redo the accounting that clear_page_dirty_for_io undid, 527 * but we can't use set_page_dirty because that function 528 * is actually a signal that all of the page has become dirty. 529 * Whereas only part of our page may be dirty. 530 */ 531 if (PageSwapBacked(page)) 532 SetPageDirty(newpage); 533 else 534 __set_page_dirty_nobuffers(newpage); 535 } 536 537 /* 538 * Copy NUMA information to the new page, to prevent over-eager 539 * future migrations of this same page. 540 */ 541 cpupid = page_cpupid_xchg_last(page, -1); 542 page_cpupid_xchg_last(newpage, cpupid); 543 544 mlock_migrate_page(newpage, page); 545 ksm_migrate_page(newpage, page); 546 /* 547 * Please do not reorder this without considering how mm/ksm.c's 548 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 549 */ 550 ClearPageSwapCache(page); 551 ClearPagePrivate(page); 552 set_page_private(page, 0); 553 554 /* 555 * If any waiters have accumulated on the new page then 556 * wake them up. 557 */ 558 if (PageWriteback(newpage)) 559 end_page_writeback(newpage); 560 } 561 562 /************************************************************ 563 * Migration functions 564 ***********************************************************/ 565 566 /* Always fail migration. Used for mappings that are not movable */ 567 int fail_migrate_page(struct address_space *mapping, 568 struct page *newpage, struct page *page) 569 { 570 return -EIO; 571 } 572 EXPORT_SYMBOL(fail_migrate_page); 573 574 /* 575 * Common logic to directly migrate a single page suitable for 576 * pages that do not use PagePrivate/PagePrivate2. 577 * 578 * Pages are locked upon entry and exit. 579 */ 580 int migrate_page(struct address_space *mapping, 581 struct page *newpage, struct page *page, 582 enum migrate_mode mode) 583 { 584 int rc; 585 586 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 587 588 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); 589 590 if (rc != MIGRATEPAGE_SUCCESS) 591 return rc; 592 593 migrate_page_copy(newpage, page); 594 return MIGRATEPAGE_SUCCESS; 595 } 596 EXPORT_SYMBOL(migrate_page); 597 598 #ifdef CONFIG_BLOCK 599 /* 600 * Migration function for pages with buffers. This function can only be used 601 * if the underlying filesystem guarantees that no other references to "page" 602 * exist. 603 */ 604 int buffer_migrate_page(struct address_space *mapping, 605 struct page *newpage, struct page *page, enum migrate_mode mode) 606 { 607 struct buffer_head *bh, *head; 608 int rc; 609 610 if (!page_has_buffers(page)) 611 return migrate_page(mapping, newpage, page, mode); 612 613 head = page_buffers(page); 614 615 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); 616 617 if (rc != MIGRATEPAGE_SUCCESS) 618 return rc; 619 620 /* 621 * In the async case, migrate_page_move_mapping locked the buffers 622 * with an IRQ-safe spinlock held. In the sync case, the buffers 623 * need to be locked now 624 */ 625 if (mode != MIGRATE_ASYNC) 626 BUG_ON(!buffer_migrate_lock_buffers(head, mode)); 627 628 ClearPagePrivate(page); 629 set_page_private(newpage, page_private(page)); 630 set_page_private(page, 0); 631 put_page(page); 632 get_page(newpage); 633 634 bh = head; 635 do { 636 set_bh_page(bh, newpage, bh_offset(bh)); 637 bh = bh->b_this_page; 638 639 } while (bh != head); 640 641 SetPagePrivate(newpage); 642 643 migrate_page_copy(newpage, page); 644 645 bh = head; 646 do { 647 unlock_buffer(bh); 648 put_bh(bh); 649 bh = bh->b_this_page; 650 651 } while (bh != head); 652 653 return MIGRATEPAGE_SUCCESS; 654 } 655 EXPORT_SYMBOL(buffer_migrate_page); 656 #endif 657 658 /* 659 * Writeback a page to clean the dirty state 660 */ 661 static int writeout(struct address_space *mapping, struct page *page) 662 { 663 struct writeback_control wbc = { 664 .sync_mode = WB_SYNC_NONE, 665 .nr_to_write = 1, 666 .range_start = 0, 667 .range_end = LLONG_MAX, 668 .for_reclaim = 1 669 }; 670 int rc; 671 672 if (!mapping->a_ops->writepage) 673 /* No write method for the address space */ 674 return -EINVAL; 675 676 if (!clear_page_dirty_for_io(page)) 677 /* Someone else already triggered a write */ 678 return -EAGAIN; 679 680 /* 681 * A dirty page may imply that the underlying filesystem has 682 * the page on some queue. So the page must be clean for 683 * migration. Writeout may mean we loose the lock and the 684 * page state is no longer what we checked for earlier. 685 * At this point we know that the migration attempt cannot 686 * be successful. 687 */ 688 remove_migration_ptes(page, page); 689 690 rc = mapping->a_ops->writepage(page, &wbc); 691 692 if (rc != AOP_WRITEPAGE_ACTIVATE) 693 /* unlocked. Relock */ 694 lock_page(page); 695 696 return (rc < 0) ? -EIO : -EAGAIN; 697 } 698 699 /* 700 * Default handling if a filesystem does not provide a migration function. 701 */ 702 static int fallback_migrate_page(struct address_space *mapping, 703 struct page *newpage, struct page *page, enum migrate_mode mode) 704 { 705 if (PageDirty(page)) { 706 /* Only writeback pages in full synchronous migration */ 707 if (mode != MIGRATE_SYNC) 708 return -EBUSY; 709 return writeout(mapping, page); 710 } 711 712 /* 713 * Buffers may be managed in a filesystem specific way. 714 * We must have no buffers or drop them. 715 */ 716 if (page_has_private(page) && 717 !try_to_release_page(page, GFP_KERNEL)) 718 return -EAGAIN; 719 720 return migrate_page(mapping, newpage, page, mode); 721 } 722 723 /* 724 * Move a page to a newly allocated page 725 * The page is locked and all ptes have been successfully removed. 726 * 727 * The new page will have replaced the old page if this function 728 * is successful. 729 * 730 * Return value: 731 * < 0 - error code 732 * MIGRATEPAGE_SUCCESS - success 733 */ 734 static int move_to_new_page(struct page *newpage, struct page *page, 735 int remap_swapcache, enum migrate_mode mode) 736 { 737 struct address_space *mapping; 738 int rc; 739 740 /* 741 * Block others from accessing the page when we get around to 742 * establishing additional references. We are the only one 743 * holding a reference to the new page at this point. 744 */ 745 if (!trylock_page(newpage)) 746 BUG(); 747 748 /* Prepare mapping for the new page.*/ 749 newpage->index = page->index; 750 newpage->mapping = page->mapping; 751 if (PageSwapBacked(page)) 752 SetPageSwapBacked(newpage); 753 754 mapping = page_mapping(page); 755 if (!mapping) 756 rc = migrate_page(mapping, newpage, page, mode); 757 else if (mapping->a_ops->migratepage) 758 /* 759 * Most pages have a mapping and most filesystems provide a 760 * migratepage callback. Anonymous pages are part of swap 761 * space which also has its own migratepage callback. This 762 * is the most common path for page migration. 763 */ 764 rc = mapping->a_ops->migratepage(mapping, 765 newpage, page, mode); 766 else 767 rc = fallback_migrate_page(mapping, newpage, page, mode); 768 769 if (rc != MIGRATEPAGE_SUCCESS) { 770 newpage->mapping = NULL; 771 } else { 772 if (remap_swapcache) 773 remove_migration_ptes(page, newpage); 774 page->mapping = NULL; 775 } 776 777 unlock_page(newpage); 778 779 return rc; 780 } 781 782 static int __unmap_and_move(struct page *page, struct page *newpage, 783 int force, enum migrate_mode mode) 784 { 785 int rc = -EAGAIN; 786 int remap_swapcache = 1; 787 struct mem_cgroup *mem; 788 struct anon_vma *anon_vma = NULL; 789 790 if (!trylock_page(page)) { 791 if (!force || mode == MIGRATE_ASYNC) 792 goto out; 793 794 /* 795 * It's not safe for direct compaction to call lock_page. 796 * For example, during page readahead pages are added locked 797 * to the LRU. Later, when the IO completes the pages are 798 * marked uptodate and unlocked. However, the queueing 799 * could be merging multiple pages for one bio (e.g. 800 * mpage_readpages). If an allocation happens for the 801 * second or third page, the process can end up locking 802 * the same page twice and deadlocking. Rather than 803 * trying to be clever about what pages can be locked, 804 * avoid the use of lock_page for direct compaction 805 * altogether. 806 */ 807 if (current->flags & PF_MEMALLOC) 808 goto out; 809 810 lock_page(page); 811 } 812 813 /* charge against new page */ 814 mem_cgroup_prepare_migration(page, newpage, &mem); 815 816 if (PageWriteback(page)) { 817 /* 818 * Only in the case of a full synchronous migration is it 819 * necessary to wait for PageWriteback. In the async case, 820 * the retry loop is too short and in the sync-light case, 821 * the overhead of stalling is too much 822 */ 823 if (mode != MIGRATE_SYNC) { 824 rc = -EBUSY; 825 goto uncharge; 826 } 827 if (!force) 828 goto uncharge; 829 wait_on_page_writeback(page); 830 } 831 /* 832 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 833 * we cannot notice that anon_vma is freed while we migrates a page. 834 * This get_anon_vma() delays freeing anon_vma pointer until the end 835 * of migration. File cache pages are no problem because of page_lock() 836 * File Caches may use write_page() or lock_page() in migration, then, 837 * just care Anon page here. 838 */ 839 if (PageAnon(page) && !PageKsm(page)) { 840 /* 841 * Only page_lock_anon_vma_read() understands the subtleties of 842 * getting a hold on an anon_vma from outside one of its mms. 843 */ 844 anon_vma = page_get_anon_vma(page); 845 if (anon_vma) { 846 /* 847 * Anon page 848 */ 849 } else if (PageSwapCache(page)) { 850 /* 851 * We cannot be sure that the anon_vma of an unmapped 852 * swapcache page is safe to use because we don't 853 * know in advance if the VMA that this page belonged 854 * to still exists. If the VMA and others sharing the 855 * data have been freed, then the anon_vma could 856 * already be invalid. 857 * 858 * To avoid this possibility, swapcache pages get 859 * migrated but are not remapped when migration 860 * completes 861 */ 862 remap_swapcache = 0; 863 } else { 864 goto uncharge; 865 } 866 } 867 868 if (unlikely(balloon_page_movable(page))) { 869 /* 870 * A ballooned page does not need any special attention from 871 * physical to virtual reverse mapping procedures. 872 * Skip any attempt to unmap PTEs or to remap swap cache, 873 * in order to avoid burning cycles at rmap level, and perform 874 * the page migration right away (proteced by page lock). 875 */ 876 rc = balloon_page_migrate(newpage, page, mode); 877 goto uncharge; 878 } 879 880 /* 881 * Corner case handling: 882 * 1. When a new swap-cache page is read into, it is added to the LRU 883 * and treated as swapcache but it has no rmap yet. 884 * Calling try_to_unmap() against a page->mapping==NULL page will 885 * trigger a BUG. So handle it here. 886 * 2. An orphaned page (see truncate_complete_page) might have 887 * fs-private metadata. The page can be picked up due to memory 888 * offlining. Everywhere else except page reclaim, the page is 889 * invisible to the vm, so the page can not be migrated. So try to 890 * free the metadata, so the page can be freed. 891 */ 892 if (!page->mapping) { 893 VM_BUG_ON(PageAnon(page)); 894 if (page_has_private(page)) { 895 try_to_free_buffers(page); 896 goto uncharge; 897 } 898 goto skip_unmap; 899 } 900 901 /* Establish migration ptes or remove ptes */ 902 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 903 904 skip_unmap: 905 if (!page_mapped(page)) 906 rc = move_to_new_page(newpage, page, remap_swapcache, mode); 907 908 if (rc && remap_swapcache) 909 remove_migration_ptes(page, page); 910 911 /* Drop an anon_vma reference if we took one */ 912 if (anon_vma) 913 put_anon_vma(anon_vma); 914 915 uncharge: 916 mem_cgroup_end_migration(mem, page, newpage, 917 (rc == MIGRATEPAGE_SUCCESS || 918 rc == MIGRATEPAGE_BALLOON_SUCCESS)); 919 unlock_page(page); 920 out: 921 return rc; 922 } 923 924 /* 925 * Obtain the lock on page, remove all ptes and migrate the page 926 * to the newly allocated page in newpage. 927 */ 928 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 929 struct page *page, int force, enum migrate_mode mode) 930 { 931 int rc = 0; 932 int *result = NULL; 933 struct page *newpage = get_new_page(page, private, &result); 934 935 if (!newpage) 936 return -ENOMEM; 937 938 if (page_count(page) == 1) { 939 /* page was freed from under us. So we are done. */ 940 goto out; 941 } 942 943 if (unlikely(PageTransHuge(page))) 944 if (unlikely(split_huge_page(page))) 945 goto out; 946 947 rc = __unmap_and_move(page, newpage, force, mode); 948 949 if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { 950 /* 951 * A ballooned page has been migrated already. 952 * Now, it's the time to wrap-up counters, 953 * handle the page back to Buddy and return. 954 */ 955 dec_zone_page_state(page, NR_ISOLATED_ANON + 956 page_is_file_cache(page)); 957 balloon_page_free(page); 958 return MIGRATEPAGE_SUCCESS; 959 } 960 out: 961 if (rc != -EAGAIN) { 962 /* 963 * A page that has been migrated has all references 964 * removed and will be freed. A page that has not been 965 * migrated will have kepts its references and be 966 * restored. 967 */ 968 list_del(&page->lru); 969 dec_zone_page_state(page, NR_ISOLATED_ANON + 970 page_is_file_cache(page)); 971 putback_lru_page(page); 972 } 973 /* 974 * Move the new page to the LRU. If migration was not successful 975 * then this will free the page. 976 */ 977 putback_lru_page(newpage); 978 if (result) { 979 if (rc) 980 *result = rc; 981 else 982 *result = page_to_nid(newpage); 983 } 984 return rc; 985 } 986 987 /* 988 * Counterpart of unmap_and_move_page() for hugepage migration. 989 * 990 * This function doesn't wait the completion of hugepage I/O 991 * because there is no race between I/O and migration for hugepage. 992 * Note that currently hugepage I/O occurs only in direct I/O 993 * where no lock is held and PG_writeback is irrelevant, 994 * and writeback status of all subpages are counted in the reference 995 * count of the head page (i.e. if all subpages of a 2MB hugepage are 996 * under direct I/O, the reference of the head page is 512 and a bit more.) 997 * This means that when we try to migrate hugepage whose subpages are 998 * doing direct I/O, some references remain after try_to_unmap() and 999 * hugepage migration fails without data corruption. 1000 * 1001 * There is also no race when direct I/O is issued on the page under migration, 1002 * because then pte is replaced with migration swap entry and direct I/O code 1003 * will wait in the page fault for migration to complete. 1004 */ 1005 static int unmap_and_move_huge_page(new_page_t get_new_page, 1006 unsigned long private, struct page *hpage, 1007 int force, enum migrate_mode mode) 1008 { 1009 int rc = 0; 1010 int *result = NULL; 1011 struct page *new_hpage = get_new_page(hpage, private, &result); 1012 struct anon_vma *anon_vma = NULL; 1013 1014 /* 1015 * Movability of hugepages depends on architectures and hugepage size. 1016 * This check is necessary because some callers of hugepage migration 1017 * like soft offline and memory hotremove don't walk through page 1018 * tables or check whether the hugepage is pmd-based or not before 1019 * kicking migration. 1020 */ 1021 if (!hugepage_migration_support(page_hstate(hpage))) 1022 return -ENOSYS; 1023 1024 if (!new_hpage) 1025 return -ENOMEM; 1026 1027 rc = -EAGAIN; 1028 1029 if (!trylock_page(hpage)) { 1030 if (!force || mode != MIGRATE_SYNC) 1031 goto out; 1032 lock_page(hpage); 1033 } 1034 1035 if (PageAnon(hpage)) 1036 anon_vma = page_get_anon_vma(hpage); 1037 1038 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1039 1040 if (!page_mapped(hpage)) 1041 rc = move_to_new_page(new_hpage, hpage, 1, mode); 1042 1043 if (rc) 1044 remove_migration_ptes(hpage, hpage); 1045 1046 if (anon_vma) 1047 put_anon_vma(anon_vma); 1048 1049 if (!rc) 1050 hugetlb_cgroup_migrate(hpage, new_hpage); 1051 1052 unlock_page(hpage); 1053 out: 1054 if (rc != -EAGAIN) 1055 putback_active_hugepage(hpage); 1056 put_page(new_hpage); 1057 if (result) { 1058 if (rc) 1059 *result = rc; 1060 else 1061 *result = page_to_nid(new_hpage); 1062 } 1063 return rc; 1064 } 1065 1066 /* 1067 * migrate_pages - migrate the pages specified in a list, to the free pages 1068 * supplied as the target for the page migration 1069 * 1070 * @from: The list of pages to be migrated. 1071 * @get_new_page: The function used to allocate free pages to be used 1072 * as the target of the page migration. 1073 * @private: Private data to be passed on to get_new_page() 1074 * @mode: The migration mode that specifies the constraints for 1075 * page migration, if any. 1076 * @reason: The reason for page migration. 1077 * 1078 * The function returns after 10 attempts or if no pages are movable any more 1079 * because the list has become empty or no retryable pages exist any more. 1080 * The caller should call putback_lru_pages() to return pages to the LRU 1081 * or free list only if ret != 0. 1082 * 1083 * Returns the number of pages that were not migrated, or an error code. 1084 */ 1085 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1086 unsigned long private, enum migrate_mode mode, int reason) 1087 { 1088 int retry = 1; 1089 int nr_failed = 0; 1090 int nr_succeeded = 0; 1091 int pass = 0; 1092 struct page *page; 1093 struct page *page2; 1094 int swapwrite = current->flags & PF_SWAPWRITE; 1095 int rc; 1096 1097 if (!swapwrite) 1098 current->flags |= PF_SWAPWRITE; 1099 1100 for(pass = 0; pass < 10 && retry; pass++) { 1101 retry = 0; 1102 1103 list_for_each_entry_safe(page, page2, from, lru) { 1104 cond_resched(); 1105 1106 if (PageHuge(page)) 1107 rc = unmap_and_move_huge_page(get_new_page, 1108 private, page, pass > 2, mode); 1109 else 1110 rc = unmap_and_move(get_new_page, private, 1111 page, pass > 2, mode); 1112 1113 switch(rc) { 1114 case -ENOMEM: 1115 goto out; 1116 case -EAGAIN: 1117 retry++; 1118 break; 1119 case MIGRATEPAGE_SUCCESS: 1120 nr_succeeded++; 1121 break; 1122 default: 1123 /* Permanent failure */ 1124 nr_failed++; 1125 break; 1126 } 1127 } 1128 } 1129 rc = nr_failed + retry; 1130 out: 1131 if (nr_succeeded) 1132 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1133 if (nr_failed) 1134 count_vm_events(PGMIGRATE_FAIL, nr_failed); 1135 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); 1136 1137 if (!swapwrite) 1138 current->flags &= ~PF_SWAPWRITE; 1139 1140 return rc; 1141 } 1142 1143 #ifdef CONFIG_NUMA 1144 /* 1145 * Move a list of individual pages 1146 */ 1147 struct page_to_node { 1148 unsigned long addr; 1149 struct page *page; 1150 int node; 1151 int status; 1152 }; 1153 1154 static struct page *new_page_node(struct page *p, unsigned long private, 1155 int **result) 1156 { 1157 struct page_to_node *pm = (struct page_to_node *)private; 1158 1159 while (pm->node != MAX_NUMNODES && pm->page != p) 1160 pm++; 1161 1162 if (pm->node == MAX_NUMNODES) 1163 return NULL; 1164 1165 *result = &pm->status; 1166 1167 if (PageHuge(p)) 1168 return alloc_huge_page_node(page_hstate(compound_head(p)), 1169 pm->node); 1170 else 1171 return alloc_pages_exact_node(pm->node, 1172 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1173 } 1174 1175 /* 1176 * Move a set of pages as indicated in the pm array. The addr 1177 * field must be set to the virtual address of the page to be moved 1178 * and the node number must contain a valid target node. 1179 * The pm array ends with node = MAX_NUMNODES. 1180 */ 1181 static int do_move_page_to_node_array(struct mm_struct *mm, 1182 struct page_to_node *pm, 1183 int migrate_all) 1184 { 1185 int err; 1186 struct page_to_node *pp; 1187 LIST_HEAD(pagelist); 1188 1189 down_read(&mm->mmap_sem); 1190 1191 /* 1192 * Build a list of pages to migrate 1193 */ 1194 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1195 struct vm_area_struct *vma; 1196 struct page *page; 1197 1198 err = -EFAULT; 1199 vma = find_vma(mm, pp->addr); 1200 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1201 goto set_status; 1202 1203 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); 1204 1205 err = PTR_ERR(page); 1206 if (IS_ERR(page)) 1207 goto set_status; 1208 1209 err = -ENOENT; 1210 if (!page) 1211 goto set_status; 1212 1213 /* Use PageReserved to check for zero page */ 1214 if (PageReserved(page)) 1215 goto put_and_set; 1216 1217 pp->page = page; 1218 err = page_to_nid(page); 1219 1220 if (err == pp->node) 1221 /* 1222 * Node already in the right place 1223 */ 1224 goto put_and_set; 1225 1226 err = -EACCES; 1227 if (page_mapcount(page) > 1 && 1228 !migrate_all) 1229 goto put_and_set; 1230 1231 if (PageHuge(page)) { 1232 isolate_huge_page(page, &pagelist); 1233 goto put_and_set; 1234 } 1235 1236 err = isolate_lru_page(page); 1237 if (!err) { 1238 list_add_tail(&page->lru, &pagelist); 1239 inc_zone_page_state(page, NR_ISOLATED_ANON + 1240 page_is_file_cache(page)); 1241 } 1242 put_and_set: 1243 /* 1244 * Either remove the duplicate refcount from 1245 * isolate_lru_page() or drop the page ref if it was 1246 * not isolated. 1247 */ 1248 put_page(page); 1249 set_status: 1250 pp->status = err; 1251 } 1252 1253 err = 0; 1254 if (!list_empty(&pagelist)) { 1255 err = migrate_pages(&pagelist, new_page_node, 1256 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); 1257 if (err) 1258 putback_movable_pages(&pagelist); 1259 } 1260 1261 up_read(&mm->mmap_sem); 1262 return err; 1263 } 1264 1265 /* 1266 * Migrate an array of page address onto an array of nodes and fill 1267 * the corresponding array of status. 1268 */ 1269 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1270 unsigned long nr_pages, 1271 const void __user * __user *pages, 1272 const int __user *nodes, 1273 int __user *status, int flags) 1274 { 1275 struct page_to_node *pm; 1276 unsigned long chunk_nr_pages; 1277 unsigned long chunk_start; 1278 int err; 1279 1280 err = -ENOMEM; 1281 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 1282 if (!pm) 1283 goto out; 1284 1285 migrate_prep(); 1286 1287 /* 1288 * Store a chunk of page_to_node array in a page, 1289 * but keep the last one as a marker 1290 */ 1291 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 1292 1293 for (chunk_start = 0; 1294 chunk_start < nr_pages; 1295 chunk_start += chunk_nr_pages) { 1296 int j; 1297 1298 if (chunk_start + chunk_nr_pages > nr_pages) 1299 chunk_nr_pages = nr_pages - chunk_start; 1300 1301 /* fill the chunk pm with addrs and nodes from user-space */ 1302 for (j = 0; j < chunk_nr_pages; j++) { 1303 const void __user *p; 1304 int node; 1305 1306 err = -EFAULT; 1307 if (get_user(p, pages + j + chunk_start)) 1308 goto out_pm; 1309 pm[j].addr = (unsigned long) p; 1310 1311 if (get_user(node, nodes + j + chunk_start)) 1312 goto out_pm; 1313 1314 err = -ENODEV; 1315 if (node < 0 || node >= MAX_NUMNODES) 1316 goto out_pm; 1317 1318 if (!node_state(node, N_MEMORY)) 1319 goto out_pm; 1320 1321 err = -EACCES; 1322 if (!node_isset(node, task_nodes)) 1323 goto out_pm; 1324 1325 pm[j].node = node; 1326 } 1327 1328 /* End marker for this chunk */ 1329 pm[chunk_nr_pages].node = MAX_NUMNODES; 1330 1331 /* Migrate this chunk */ 1332 err = do_move_page_to_node_array(mm, pm, 1333 flags & MPOL_MF_MOVE_ALL); 1334 if (err < 0) 1335 goto out_pm; 1336 1337 /* Return status information */ 1338 for (j = 0; j < chunk_nr_pages; j++) 1339 if (put_user(pm[j].status, status + j + chunk_start)) { 1340 err = -EFAULT; 1341 goto out_pm; 1342 } 1343 } 1344 err = 0; 1345 1346 out_pm: 1347 free_page((unsigned long)pm); 1348 out: 1349 return err; 1350 } 1351 1352 /* 1353 * Determine the nodes of an array of pages and store it in an array of status. 1354 */ 1355 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1356 const void __user **pages, int *status) 1357 { 1358 unsigned long i; 1359 1360 down_read(&mm->mmap_sem); 1361 1362 for (i = 0; i < nr_pages; i++) { 1363 unsigned long addr = (unsigned long)(*pages); 1364 struct vm_area_struct *vma; 1365 struct page *page; 1366 int err = -EFAULT; 1367 1368 vma = find_vma(mm, addr); 1369 if (!vma || addr < vma->vm_start) 1370 goto set_status; 1371 1372 page = follow_page(vma, addr, 0); 1373 1374 err = PTR_ERR(page); 1375 if (IS_ERR(page)) 1376 goto set_status; 1377 1378 err = -ENOENT; 1379 /* Use PageReserved to check for zero page */ 1380 if (!page || PageReserved(page)) 1381 goto set_status; 1382 1383 err = page_to_nid(page); 1384 set_status: 1385 *status = err; 1386 1387 pages++; 1388 status++; 1389 } 1390 1391 up_read(&mm->mmap_sem); 1392 } 1393 1394 /* 1395 * Determine the nodes of a user array of pages and store it in 1396 * a user array of status. 1397 */ 1398 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1399 const void __user * __user *pages, 1400 int __user *status) 1401 { 1402 #define DO_PAGES_STAT_CHUNK_NR 16 1403 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1404 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1405 1406 while (nr_pages) { 1407 unsigned long chunk_nr; 1408 1409 chunk_nr = nr_pages; 1410 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1411 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1412 1413 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1414 break; 1415 1416 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1417 1418 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1419 break; 1420 1421 pages += chunk_nr; 1422 status += chunk_nr; 1423 nr_pages -= chunk_nr; 1424 } 1425 return nr_pages ? -EFAULT : 0; 1426 } 1427 1428 /* 1429 * Move a list of pages in the address space of the currently executing 1430 * process. 1431 */ 1432 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1433 const void __user * __user *, pages, 1434 const int __user *, nodes, 1435 int __user *, status, int, flags) 1436 { 1437 const struct cred *cred = current_cred(), *tcred; 1438 struct task_struct *task; 1439 struct mm_struct *mm; 1440 int err; 1441 nodemask_t task_nodes; 1442 1443 /* Check flags */ 1444 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1445 return -EINVAL; 1446 1447 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1448 return -EPERM; 1449 1450 /* Find the mm_struct */ 1451 rcu_read_lock(); 1452 task = pid ? find_task_by_vpid(pid) : current; 1453 if (!task) { 1454 rcu_read_unlock(); 1455 return -ESRCH; 1456 } 1457 get_task_struct(task); 1458 1459 /* 1460 * Check if this process has the right to modify the specified 1461 * process. The right exists if the process has administrative 1462 * capabilities, superuser privileges or the same 1463 * userid as the target process. 1464 */ 1465 tcred = __task_cred(task); 1466 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1467 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 1468 !capable(CAP_SYS_NICE)) { 1469 rcu_read_unlock(); 1470 err = -EPERM; 1471 goto out; 1472 } 1473 rcu_read_unlock(); 1474 1475 err = security_task_movememory(task); 1476 if (err) 1477 goto out; 1478 1479 task_nodes = cpuset_mems_allowed(task); 1480 mm = get_task_mm(task); 1481 put_task_struct(task); 1482 1483 if (!mm) 1484 return -EINVAL; 1485 1486 if (nodes) 1487 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1488 nodes, status, flags); 1489 else 1490 err = do_pages_stat(mm, nr_pages, pages, status); 1491 1492 mmput(mm); 1493 return err; 1494 1495 out: 1496 put_task_struct(task); 1497 return err; 1498 } 1499 1500 /* 1501 * Call migration functions in the vma_ops that may prepare 1502 * memory in a vm for migration. migration functions may perform 1503 * the migration for vmas that do not have an underlying page struct. 1504 */ 1505 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1506 const nodemask_t *from, unsigned long flags) 1507 { 1508 struct vm_area_struct *vma; 1509 int err = 0; 1510 1511 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { 1512 if (vma->vm_ops && vma->vm_ops->migrate) { 1513 err = vma->vm_ops->migrate(vma, to, from, flags); 1514 if (err) 1515 break; 1516 } 1517 } 1518 return err; 1519 } 1520 1521 #ifdef CONFIG_NUMA_BALANCING 1522 /* 1523 * Returns true if this is a safe migration target node for misplaced NUMA 1524 * pages. Currently it only checks the watermarks which crude 1525 */ 1526 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1527 unsigned long nr_migrate_pages) 1528 { 1529 int z; 1530 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1531 struct zone *zone = pgdat->node_zones + z; 1532 1533 if (!populated_zone(zone)) 1534 continue; 1535 1536 if (!zone_reclaimable(zone)) 1537 continue; 1538 1539 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1540 if (!zone_watermark_ok(zone, 0, 1541 high_wmark_pages(zone) + 1542 nr_migrate_pages, 1543 0, 0)) 1544 continue; 1545 return true; 1546 } 1547 return false; 1548 } 1549 1550 static struct page *alloc_misplaced_dst_page(struct page *page, 1551 unsigned long data, 1552 int **result) 1553 { 1554 int nid = (int) data; 1555 struct page *newpage; 1556 1557 newpage = alloc_pages_exact_node(nid, 1558 (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | 1559 __GFP_NOMEMALLOC | __GFP_NORETRY | 1560 __GFP_NOWARN) & 1561 ~GFP_IOFS, 0); 1562 if (newpage) 1563 page_cpupid_xchg_last(newpage, page_cpupid_last(page)); 1564 1565 return newpage; 1566 } 1567 1568 /* 1569 * page migration rate limiting control. 1570 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs 1571 * window of time. Default here says do not migrate more than 1280M per second. 1572 * If a node is rate-limited then PTE NUMA updates are also rate-limited. However 1573 * as it is faults that reset the window, pte updates will happen unconditionally 1574 * if there has not been a fault since @pteupdate_interval_millisecs after the 1575 * throttle window closed. 1576 */ 1577 static unsigned int migrate_interval_millisecs __read_mostly = 100; 1578 static unsigned int pteupdate_interval_millisecs __read_mostly = 1000; 1579 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); 1580 1581 /* Returns true if NUMA migration is currently rate limited */ 1582 bool migrate_ratelimited(int node) 1583 { 1584 pg_data_t *pgdat = NODE_DATA(node); 1585 1586 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window + 1587 msecs_to_jiffies(pteupdate_interval_millisecs))) 1588 return false; 1589 1590 if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages) 1591 return false; 1592 1593 return true; 1594 } 1595 1596 /* Returns true if the node is migrate rate-limited after the update */ 1597 bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) 1598 { 1599 bool rate_limited = false; 1600 1601 /* 1602 * Rate-limit the amount of data that is being migrated to a node. 1603 * Optimal placement is no good if the memory bus is saturated and 1604 * all the time is being spent migrating! 1605 */ 1606 spin_lock(&pgdat->numabalancing_migrate_lock); 1607 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { 1608 pgdat->numabalancing_migrate_nr_pages = 0; 1609 pgdat->numabalancing_migrate_next_window = jiffies + 1610 msecs_to_jiffies(migrate_interval_millisecs); 1611 } 1612 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) 1613 rate_limited = true; 1614 else 1615 pgdat->numabalancing_migrate_nr_pages += nr_pages; 1616 spin_unlock(&pgdat->numabalancing_migrate_lock); 1617 1618 return rate_limited; 1619 } 1620 1621 int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1622 { 1623 int page_lru; 1624 1625 VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); 1626 1627 /* Avoid migrating to a node that is nearly full */ 1628 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) 1629 return 0; 1630 1631 if (isolate_lru_page(page)) 1632 return 0; 1633 1634 /* 1635 * migrate_misplaced_transhuge_page() skips page migration's usual 1636 * check on page_count(), so we must do it here, now that the page 1637 * has been isolated: a GUP pin, or any other pin, prevents migration. 1638 * The expected page count is 3: 1 for page's mapcount and 1 for the 1639 * caller's pin and 1 for the reference taken by isolate_lru_page(). 1640 */ 1641 if (PageTransHuge(page) && page_count(page) != 3) { 1642 putback_lru_page(page); 1643 return 0; 1644 } 1645 1646 page_lru = page_is_file_cache(page); 1647 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, 1648 hpage_nr_pages(page)); 1649 1650 /* 1651 * Isolating the page has taken another reference, so the 1652 * caller's reference can be safely dropped without the page 1653 * disappearing underneath us during migration. 1654 */ 1655 put_page(page); 1656 return 1; 1657 } 1658 1659 bool pmd_trans_migrating(pmd_t pmd) 1660 { 1661 struct page *page = pmd_page(pmd); 1662 return PageLocked(page); 1663 } 1664 1665 void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) 1666 { 1667 struct page *page = pmd_page(*pmd); 1668 wait_on_page_locked(page); 1669 } 1670 1671 /* 1672 * Attempt to migrate a misplaced page to the specified destination 1673 * node. Caller is expected to have an elevated reference count on 1674 * the page that will be dropped by this function before returning. 1675 */ 1676 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 1677 int node) 1678 { 1679 pg_data_t *pgdat = NODE_DATA(node); 1680 int isolated; 1681 int nr_remaining; 1682 LIST_HEAD(migratepages); 1683 1684 /* 1685 * Don't migrate file pages that are mapped in multiple processes 1686 * with execute permissions as they are probably shared libraries. 1687 */ 1688 if (page_mapcount(page) != 1 && page_is_file_cache(page) && 1689 (vma->vm_flags & VM_EXEC)) 1690 goto out; 1691 1692 /* 1693 * Rate-limit the amount of data that is being migrated to a node. 1694 * Optimal placement is no good if the memory bus is saturated and 1695 * all the time is being spent migrating! 1696 */ 1697 if (numamigrate_update_ratelimit(pgdat, 1)) 1698 goto out; 1699 1700 isolated = numamigrate_isolate_page(pgdat, page); 1701 if (!isolated) 1702 goto out; 1703 1704 list_add(&page->lru, &migratepages); 1705 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 1706 node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); 1707 if (nr_remaining) { 1708 putback_lru_pages(&migratepages); 1709 isolated = 0; 1710 } else 1711 count_vm_numa_event(NUMA_PAGE_MIGRATE); 1712 BUG_ON(!list_empty(&migratepages)); 1713 return isolated; 1714 1715 out: 1716 put_page(page); 1717 return 0; 1718 } 1719 #endif /* CONFIG_NUMA_BALANCING */ 1720 1721 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1722 /* 1723 * Migrates a THP to a given target node. page must be locked and is unlocked 1724 * before returning. 1725 */ 1726 int migrate_misplaced_transhuge_page(struct mm_struct *mm, 1727 struct vm_area_struct *vma, 1728 pmd_t *pmd, pmd_t entry, 1729 unsigned long address, 1730 struct page *page, int node) 1731 { 1732 spinlock_t *ptl; 1733 pg_data_t *pgdat = NODE_DATA(node); 1734 int isolated = 0; 1735 struct page *new_page = NULL; 1736 struct mem_cgroup *memcg = NULL; 1737 int page_lru = page_is_file_cache(page); 1738 unsigned long mmun_start = address & HPAGE_PMD_MASK; 1739 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 1740 pmd_t orig_entry; 1741 1742 /* 1743 * Rate-limit the amount of data that is being migrated to a node. 1744 * Optimal placement is no good if the memory bus is saturated and 1745 * all the time is being spent migrating! 1746 */ 1747 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR)) 1748 goto out_dropref; 1749 1750 new_page = alloc_pages_node(node, 1751 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); 1752 if (!new_page) 1753 goto out_fail; 1754 1755 page_cpupid_xchg_last(new_page, page_cpupid_last(page)); 1756 1757 isolated = numamigrate_isolate_page(pgdat, page); 1758 if (!isolated) { 1759 put_page(new_page); 1760 goto out_fail; 1761 } 1762 1763 if (mm_tlb_flush_pending(mm)) 1764 flush_tlb_range(vma, mmun_start, mmun_end); 1765 1766 /* Prepare a page as a migration target */ 1767 __set_page_locked(new_page); 1768 SetPageSwapBacked(new_page); 1769 1770 /* anon mapping, we can simply copy page->mapping to the new page: */ 1771 new_page->mapping = page->mapping; 1772 new_page->index = page->index; 1773 migrate_page_copy(new_page, page); 1774 WARN_ON(PageLRU(new_page)); 1775 1776 /* Recheck the target PMD */ 1777 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1778 ptl = pmd_lock(mm, pmd); 1779 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { 1780 fail_putback: 1781 spin_unlock(ptl); 1782 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1783 1784 /* Reverse changes made by migrate_page_copy() */ 1785 if (TestClearPageActive(new_page)) 1786 SetPageActive(page); 1787 if (TestClearPageUnevictable(new_page)) 1788 SetPageUnevictable(page); 1789 mlock_migrate_page(page, new_page); 1790 1791 unlock_page(new_page); 1792 put_page(new_page); /* Free it */ 1793 1794 /* Retake the callers reference and putback on LRU */ 1795 get_page(page); 1796 putback_lru_page(page); 1797 mod_zone_page_state(page_zone(page), 1798 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 1799 1800 goto out_unlock; 1801 } 1802 1803 /* 1804 * Traditional migration needs to prepare the memcg charge 1805 * transaction early to prevent the old page from being 1806 * uncharged when installing migration entries. Here we can 1807 * save the potential rollback and start the charge transfer 1808 * only when migration is already known to end successfully. 1809 */ 1810 mem_cgroup_prepare_migration(page, new_page, &memcg); 1811 1812 orig_entry = *pmd; 1813 entry = mk_pmd(new_page, vma->vm_page_prot); 1814 entry = pmd_mkhuge(entry); 1815 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1816 1817 /* 1818 * Clear the old entry under pagetable lock and establish the new PTE. 1819 * Any parallel GUP will either observe the old page blocking on the 1820 * page lock, block on the page table lock or observe the new page. 1821 * The SetPageUptodate on the new page and page_add_new_anon_rmap 1822 * guarantee the copy is visible before the pagetable update. 1823 */ 1824 flush_cache_range(vma, mmun_start, mmun_end); 1825 page_add_new_anon_rmap(new_page, vma, mmun_start); 1826 pmdp_clear_flush(vma, mmun_start, pmd); 1827 set_pmd_at(mm, mmun_start, pmd, entry); 1828 flush_tlb_range(vma, mmun_start, mmun_end); 1829 update_mmu_cache_pmd(vma, address, &entry); 1830 1831 if (page_count(page) != 2) { 1832 set_pmd_at(mm, mmun_start, pmd, orig_entry); 1833 flush_tlb_range(vma, mmun_start, mmun_end); 1834 update_mmu_cache_pmd(vma, address, &entry); 1835 page_remove_rmap(new_page); 1836 goto fail_putback; 1837 } 1838 1839 page_remove_rmap(page); 1840 1841 /* 1842 * Finish the charge transaction under the page table lock to 1843 * prevent split_huge_page() from dividing up the charge 1844 * before it's fully transferred to the new page. 1845 */ 1846 mem_cgroup_end_migration(memcg, page, new_page, true); 1847 spin_unlock(ptl); 1848 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1849 1850 unlock_page(new_page); 1851 unlock_page(page); 1852 put_page(page); /* Drop the rmap reference */ 1853 put_page(page); /* Drop the LRU isolation reference */ 1854 1855 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 1856 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 1857 1858 mod_zone_page_state(page_zone(page), 1859 NR_ISOLATED_ANON + page_lru, 1860 -HPAGE_PMD_NR); 1861 return isolated; 1862 1863 out_fail: 1864 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1865 out_dropref: 1866 ptl = pmd_lock(mm, pmd); 1867 if (pmd_same(*pmd, entry)) { 1868 entry = pmd_mknonnuma(entry); 1869 set_pmd_at(mm, mmun_start, pmd, entry); 1870 update_mmu_cache_pmd(vma, address, &entry); 1871 } 1872 spin_unlock(ptl); 1873 1874 out_unlock: 1875 unlock_page(page); 1876 put_page(page); 1877 return 0; 1878 } 1879 #endif /* CONFIG_NUMA_BALANCING */ 1880 1881 #endif /* CONFIG_NUMA */ 1882