1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/export.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/nsproxy.h> 23 #include <linux/pagevec.h> 24 #include <linux/ksm.h> 25 #include <linux/rmap.h> 26 #include <linux/topology.h> 27 #include <linux/cpu.h> 28 #include <linux/cpuset.h> 29 #include <linux/writeback.h> 30 #include <linux/mempolicy.h> 31 #include <linux/vmalloc.h> 32 #include <linux/security.h> 33 #include <linux/memcontrol.h> 34 #include <linux/syscalls.h> 35 #include <linux/hugetlb.h> 36 #include <linux/hugetlb_cgroup.h> 37 #include <linux/gfp.h> 38 #include <linux/balloon_compaction.h> 39 40 #include <asm/tlbflush.h> 41 42 #define CREATE_TRACE_POINTS 43 #include <trace/events/migrate.h> 44 45 #include "internal.h" 46 47 /* 48 * migrate_prep() needs to be called before we start compiling a list of pages 49 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 50 * undesirable, use migrate_prep_local() 51 */ 52 int migrate_prep(void) 53 { 54 /* 55 * Clear the LRU lists so pages can be isolated. 56 * Note that pages may be moved off the LRU after we have 57 * drained them. Those pages will fail to migrate like other 58 * pages that may be busy. 59 */ 60 lru_add_drain_all(); 61 62 return 0; 63 } 64 65 /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 66 int migrate_prep_local(void) 67 { 68 lru_add_drain(); 69 70 return 0; 71 } 72 73 /* 74 * Add isolated pages on the list back to the LRU under page lock 75 * to avoid leaking evictable pages back onto unevictable list. 76 */ 77 void putback_lru_pages(struct list_head *l) 78 { 79 struct page *page; 80 struct page *page2; 81 82 list_for_each_entry_safe(page, page2, l, lru) { 83 list_del(&page->lru); 84 dec_zone_page_state(page, NR_ISOLATED_ANON + 85 page_is_file_cache(page)); 86 putback_lru_page(page); 87 } 88 } 89 90 /* 91 * Put previously isolated pages back onto the appropriate lists 92 * from where they were once taken off for compaction/migration. 93 * 94 * This function shall be used instead of putback_lru_pages(), 95 * whenever the isolated pageset has been built by isolate_migratepages_range() 96 */ 97 void putback_movable_pages(struct list_head *l) 98 { 99 struct page *page; 100 struct page *page2; 101 102 list_for_each_entry_safe(page, page2, l, lru) { 103 list_del(&page->lru); 104 dec_zone_page_state(page, NR_ISOLATED_ANON + 105 page_is_file_cache(page)); 106 if (unlikely(balloon_page_movable(page))) 107 balloon_page_putback(page); 108 else 109 putback_lru_page(page); 110 } 111 } 112 113 /* 114 * Restore a potential migration pte to a working pte entry 115 */ 116 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 117 unsigned long addr, void *old) 118 { 119 struct mm_struct *mm = vma->vm_mm; 120 swp_entry_t entry; 121 pmd_t *pmd; 122 pte_t *ptep, pte; 123 spinlock_t *ptl; 124 125 if (unlikely(PageHuge(new))) { 126 ptep = huge_pte_offset(mm, addr); 127 if (!ptep) 128 goto out; 129 ptl = &mm->page_table_lock; 130 } else { 131 pmd = mm_find_pmd(mm, addr); 132 if (!pmd) 133 goto out; 134 if (pmd_trans_huge(*pmd)) 135 goto out; 136 137 ptep = pte_offset_map(pmd, addr); 138 139 /* 140 * Peek to check is_swap_pte() before taking ptlock? No, we 141 * can race mremap's move_ptes(), which skips anon_vma lock. 142 */ 143 144 ptl = pte_lockptr(mm, pmd); 145 } 146 147 spin_lock(ptl); 148 pte = *ptep; 149 if (!is_swap_pte(pte)) 150 goto unlock; 151 152 entry = pte_to_swp_entry(pte); 153 154 if (!is_migration_entry(entry) || 155 migration_entry_to_page(entry) != old) 156 goto unlock; 157 158 get_page(new); 159 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 160 if (is_write_migration_entry(entry)) 161 pte = pte_mkwrite(pte); 162 #ifdef CONFIG_HUGETLB_PAGE 163 if (PageHuge(new)) { 164 pte = pte_mkhuge(pte); 165 pte = arch_make_huge_pte(pte, vma, new, 0); 166 } 167 #endif 168 flush_dcache_page(new); 169 set_pte_at(mm, addr, ptep, pte); 170 171 if (PageHuge(new)) { 172 if (PageAnon(new)) 173 hugepage_add_anon_rmap(new, vma, addr); 174 else 175 page_dup_rmap(new); 176 } else if (PageAnon(new)) 177 page_add_anon_rmap(new, vma, addr); 178 else 179 page_add_file_rmap(new); 180 181 /* No need to invalidate - it was non-present before */ 182 update_mmu_cache(vma, addr, ptep); 183 unlock: 184 pte_unmap_unlock(ptep, ptl); 185 out: 186 return SWAP_AGAIN; 187 } 188 189 /* 190 * Get rid of all migration entries and replace them by 191 * references to the indicated page. 192 */ 193 static void remove_migration_ptes(struct page *old, struct page *new) 194 { 195 rmap_walk(new, remove_migration_pte, old); 196 } 197 198 /* 199 * Something used the pte of a page under migration. We need to 200 * get to the page and wait until migration is finished. 201 * When we return from this function the fault will be retried. 202 */ 203 static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 204 spinlock_t *ptl) 205 { 206 pte_t pte; 207 swp_entry_t entry; 208 struct page *page; 209 210 spin_lock(ptl); 211 pte = *ptep; 212 if (!is_swap_pte(pte)) 213 goto out; 214 215 entry = pte_to_swp_entry(pte); 216 if (!is_migration_entry(entry)) 217 goto out; 218 219 page = migration_entry_to_page(entry); 220 221 /* 222 * Once radix-tree replacement of page migration started, page_count 223 * *must* be zero. And, we don't want to call wait_on_page_locked() 224 * against a page without get_page(). 225 * So, we use get_page_unless_zero(), here. Even failed, page fault 226 * will occur again. 227 */ 228 if (!get_page_unless_zero(page)) 229 goto out; 230 pte_unmap_unlock(ptep, ptl); 231 wait_on_page_locked(page); 232 put_page(page); 233 return; 234 out: 235 pte_unmap_unlock(ptep, ptl); 236 } 237 238 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 239 unsigned long address) 240 { 241 spinlock_t *ptl = pte_lockptr(mm, pmd); 242 pte_t *ptep = pte_offset_map(pmd, address); 243 __migration_entry_wait(mm, ptep, ptl); 244 } 245 246 void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) 247 { 248 spinlock_t *ptl = &(mm)->page_table_lock; 249 __migration_entry_wait(mm, pte, ptl); 250 } 251 252 #ifdef CONFIG_BLOCK 253 /* Returns true if all buffers are successfully locked */ 254 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 255 enum migrate_mode mode) 256 { 257 struct buffer_head *bh = head; 258 259 /* Simple case, sync compaction */ 260 if (mode != MIGRATE_ASYNC) { 261 do { 262 get_bh(bh); 263 lock_buffer(bh); 264 bh = bh->b_this_page; 265 266 } while (bh != head); 267 268 return true; 269 } 270 271 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 272 do { 273 get_bh(bh); 274 if (!trylock_buffer(bh)) { 275 /* 276 * We failed to lock the buffer and cannot stall in 277 * async migration. Release the taken locks 278 */ 279 struct buffer_head *failed_bh = bh; 280 put_bh(failed_bh); 281 bh = head; 282 while (bh != failed_bh) { 283 unlock_buffer(bh); 284 put_bh(bh); 285 bh = bh->b_this_page; 286 } 287 return false; 288 } 289 290 bh = bh->b_this_page; 291 } while (bh != head); 292 return true; 293 } 294 #else 295 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 296 enum migrate_mode mode) 297 { 298 return true; 299 } 300 #endif /* CONFIG_BLOCK */ 301 302 /* 303 * Replace the page in the mapping. 304 * 305 * The number of remaining references must be: 306 * 1 for anonymous pages without a mapping 307 * 2 for pages with a mapping 308 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 309 */ 310 static int migrate_page_move_mapping(struct address_space *mapping, 311 struct page *newpage, struct page *page, 312 struct buffer_head *head, enum migrate_mode mode) 313 { 314 int expected_count = 0; 315 void **pslot; 316 317 if (!mapping) { 318 /* Anonymous page without mapping */ 319 if (page_count(page) != 1) 320 return -EAGAIN; 321 return MIGRATEPAGE_SUCCESS; 322 } 323 324 spin_lock_irq(&mapping->tree_lock); 325 326 pslot = radix_tree_lookup_slot(&mapping->page_tree, 327 page_index(page)); 328 329 expected_count = 2 + page_has_private(page); 330 if (page_count(page) != expected_count || 331 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 332 spin_unlock_irq(&mapping->tree_lock); 333 return -EAGAIN; 334 } 335 336 if (!page_freeze_refs(page, expected_count)) { 337 spin_unlock_irq(&mapping->tree_lock); 338 return -EAGAIN; 339 } 340 341 /* 342 * In the async migration case of moving a page with buffers, lock the 343 * buffers using trylock before the mapping is moved. If the mapping 344 * was moved, we later failed to lock the buffers and could not move 345 * the mapping back due to an elevated page count, we would have to 346 * block waiting on other references to be dropped. 347 */ 348 if (mode == MIGRATE_ASYNC && head && 349 !buffer_migrate_lock_buffers(head, mode)) { 350 page_unfreeze_refs(page, expected_count); 351 spin_unlock_irq(&mapping->tree_lock); 352 return -EAGAIN; 353 } 354 355 /* 356 * Now we know that no one else is looking at the page. 357 */ 358 get_page(newpage); /* add cache reference */ 359 if (PageSwapCache(page)) { 360 SetPageSwapCache(newpage); 361 set_page_private(newpage, page_private(page)); 362 } 363 364 radix_tree_replace_slot(pslot, newpage); 365 366 /* 367 * Drop cache reference from old page by unfreezing 368 * to one less reference. 369 * We know this isn't the last reference. 370 */ 371 page_unfreeze_refs(page, expected_count - 1); 372 373 /* 374 * If moved to a different zone then also account 375 * the page for that zone. Other VM counters will be 376 * taken care of when we establish references to the 377 * new page and drop references to the old page. 378 * 379 * Note that anonymous pages are accounted for 380 * via NR_FILE_PAGES and NR_ANON_PAGES if they 381 * are mapped to swap space. 382 */ 383 __dec_zone_page_state(page, NR_FILE_PAGES); 384 __inc_zone_page_state(newpage, NR_FILE_PAGES); 385 if (!PageSwapCache(page) && PageSwapBacked(page)) { 386 __dec_zone_page_state(page, NR_SHMEM); 387 __inc_zone_page_state(newpage, NR_SHMEM); 388 } 389 spin_unlock_irq(&mapping->tree_lock); 390 391 return MIGRATEPAGE_SUCCESS; 392 } 393 394 /* 395 * The expected number of remaining references is the same as that 396 * of migrate_page_move_mapping(). 397 */ 398 int migrate_huge_page_move_mapping(struct address_space *mapping, 399 struct page *newpage, struct page *page) 400 { 401 int expected_count; 402 void **pslot; 403 404 if (!mapping) { 405 if (page_count(page) != 1) 406 return -EAGAIN; 407 return MIGRATEPAGE_SUCCESS; 408 } 409 410 spin_lock_irq(&mapping->tree_lock); 411 412 pslot = radix_tree_lookup_slot(&mapping->page_tree, 413 page_index(page)); 414 415 expected_count = 2 + page_has_private(page); 416 if (page_count(page) != expected_count || 417 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 418 spin_unlock_irq(&mapping->tree_lock); 419 return -EAGAIN; 420 } 421 422 if (!page_freeze_refs(page, expected_count)) { 423 spin_unlock_irq(&mapping->tree_lock); 424 return -EAGAIN; 425 } 426 427 get_page(newpage); 428 429 radix_tree_replace_slot(pslot, newpage); 430 431 page_unfreeze_refs(page, expected_count - 1); 432 433 spin_unlock_irq(&mapping->tree_lock); 434 return MIGRATEPAGE_SUCCESS; 435 } 436 437 /* 438 * Copy the page to its new location 439 */ 440 void migrate_page_copy(struct page *newpage, struct page *page) 441 { 442 if (PageHuge(page) || PageTransHuge(page)) 443 copy_huge_page(newpage, page); 444 else 445 copy_highpage(newpage, page); 446 447 if (PageError(page)) 448 SetPageError(newpage); 449 if (PageReferenced(page)) 450 SetPageReferenced(newpage); 451 if (PageUptodate(page)) 452 SetPageUptodate(newpage); 453 if (TestClearPageActive(page)) { 454 VM_BUG_ON(PageUnevictable(page)); 455 SetPageActive(newpage); 456 } else if (TestClearPageUnevictable(page)) 457 SetPageUnevictable(newpage); 458 if (PageChecked(page)) 459 SetPageChecked(newpage); 460 if (PageMappedToDisk(page)) 461 SetPageMappedToDisk(newpage); 462 463 if (PageDirty(page)) { 464 clear_page_dirty_for_io(page); 465 /* 466 * Want to mark the page and the radix tree as dirty, and 467 * redo the accounting that clear_page_dirty_for_io undid, 468 * but we can't use set_page_dirty because that function 469 * is actually a signal that all of the page has become dirty. 470 * Whereas only part of our page may be dirty. 471 */ 472 if (PageSwapBacked(page)) 473 SetPageDirty(newpage); 474 else 475 __set_page_dirty_nobuffers(newpage); 476 } 477 478 mlock_migrate_page(newpage, page); 479 ksm_migrate_page(newpage, page); 480 /* 481 * Please do not reorder this without considering how mm/ksm.c's 482 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 483 */ 484 ClearPageSwapCache(page); 485 ClearPagePrivate(page); 486 set_page_private(page, 0); 487 488 /* 489 * If any waiters have accumulated on the new page then 490 * wake them up. 491 */ 492 if (PageWriteback(newpage)) 493 end_page_writeback(newpage); 494 } 495 496 /************************************************************ 497 * Migration functions 498 ***********************************************************/ 499 500 /* Always fail migration. Used for mappings that are not movable */ 501 int fail_migrate_page(struct address_space *mapping, 502 struct page *newpage, struct page *page) 503 { 504 return -EIO; 505 } 506 EXPORT_SYMBOL(fail_migrate_page); 507 508 /* 509 * Common logic to directly migrate a single page suitable for 510 * pages that do not use PagePrivate/PagePrivate2. 511 * 512 * Pages are locked upon entry and exit. 513 */ 514 int migrate_page(struct address_space *mapping, 515 struct page *newpage, struct page *page, 516 enum migrate_mode mode) 517 { 518 int rc; 519 520 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 521 522 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); 523 524 if (rc != MIGRATEPAGE_SUCCESS) 525 return rc; 526 527 migrate_page_copy(newpage, page); 528 return MIGRATEPAGE_SUCCESS; 529 } 530 EXPORT_SYMBOL(migrate_page); 531 532 #ifdef CONFIG_BLOCK 533 /* 534 * Migration function for pages with buffers. This function can only be used 535 * if the underlying filesystem guarantees that no other references to "page" 536 * exist. 537 */ 538 int buffer_migrate_page(struct address_space *mapping, 539 struct page *newpage, struct page *page, enum migrate_mode mode) 540 { 541 struct buffer_head *bh, *head; 542 int rc; 543 544 if (!page_has_buffers(page)) 545 return migrate_page(mapping, newpage, page, mode); 546 547 head = page_buffers(page); 548 549 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); 550 551 if (rc != MIGRATEPAGE_SUCCESS) 552 return rc; 553 554 /* 555 * In the async case, migrate_page_move_mapping locked the buffers 556 * with an IRQ-safe spinlock held. In the sync case, the buffers 557 * need to be locked now 558 */ 559 if (mode != MIGRATE_ASYNC) 560 BUG_ON(!buffer_migrate_lock_buffers(head, mode)); 561 562 ClearPagePrivate(page); 563 set_page_private(newpage, page_private(page)); 564 set_page_private(page, 0); 565 put_page(page); 566 get_page(newpage); 567 568 bh = head; 569 do { 570 set_bh_page(bh, newpage, bh_offset(bh)); 571 bh = bh->b_this_page; 572 573 } while (bh != head); 574 575 SetPagePrivate(newpage); 576 577 migrate_page_copy(newpage, page); 578 579 bh = head; 580 do { 581 unlock_buffer(bh); 582 put_bh(bh); 583 bh = bh->b_this_page; 584 585 } while (bh != head); 586 587 return MIGRATEPAGE_SUCCESS; 588 } 589 EXPORT_SYMBOL(buffer_migrate_page); 590 #endif 591 592 /* 593 * Writeback a page to clean the dirty state 594 */ 595 static int writeout(struct address_space *mapping, struct page *page) 596 { 597 struct writeback_control wbc = { 598 .sync_mode = WB_SYNC_NONE, 599 .nr_to_write = 1, 600 .range_start = 0, 601 .range_end = LLONG_MAX, 602 .for_reclaim = 1 603 }; 604 int rc; 605 606 if (!mapping->a_ops->writepage) 607 /* No write method for the address space */ 608 return -EINVAL; 609 610 if (!clear_page_dirty_for_io(page)) 611 /* Someone else already triggered a write */ 612 return -EAGAIN; 613 614 /* 615 * A dirty page may imply that the underlying filesystem has 616 * the page on some queue. So the page must be clean for 617 * migration. Writeout may mean we loose the lock and the 618 * page state is no longer what we checked for earlier. 619 * At this point we know that the migration attempt cannot 620 * be successful. 621 */ 622 remove_migration_ptes(page, page); 623 624 rc = mapping->a_ops->writepage(page, &wbc); 625 626 if (rc != AOP_WRITEPAGE_ACTIVATE) 627 /* unlocked. Relock */ 628 lock_page(page); 629 630 return (rc < 0) ? -EIO : -EAGAIN; 631 } 632 633 /* 634 * Default handling if a filesystem does not provide a migration function. 635 */ 636 static int fallback_migrate_page(struct address_space *mapping, 637 struct page *newpage, struct page *page, enum migrate_mode mode) 638 { 639 if (PageDirty(page)) { 640 /* Only writeback pages in full synchronous migration */ 641 if (mode != MIGRATE_SYNC) 642 return -EBUSY; 643 return writeout(mapping, page); 644 } 645 646 /* 647 * Buffers may be managed in a filesystem specific way. 648 * We must have no buffers or drop them. 649 */ 650 if (page_has_private(page) && 651 !try_to_release_page(page, GFP_KERNEL)) 652 return -EAGAIN; 653 654 return migrate_page(mapping, newpage, page, mode); 655 } 656 657 /* 658 * Move a page to a newly allocated page 659 * The page is locked and all ptes have been successfully removed. 660 * 661 * The new page will have replaced the old page if this function 662 * is successful. 663 * 664 * Return value: 665 * < 0 - error code 666 * MIGRATEPAGE_SUCCESS - success 667 */ 668 static int move_to_new_page(struct page *newpage, struct page *page, 669 int remap_swapcache, enum migrate_mode mode) 670 { 671 struct address_space *mapping; 672 int rc; 673 674 /* 675 * Block others from accessing the page when we get around to 676 * establishing additional references. We are the only one 677 * holding a reference to the new page at this point. 678 */ 679 if (!trylock_page(newpage)) 680 BUG(); 681 682 /* Prepare mapping for the new page.*/ 683 newpage->index = page->index; 684 newpage->mapping = page->mapping; 685 if (PageSwapBacked(page)) 686 SetPageSwapBacked(newpage); 687 688 mapping = page_mapping(page); 689 if (!mapping) 690 rc = migrate_page(mapping, newpage, page, mode); 691 else if (mapping->a_ops->migratepage) 692 /* 693 * Most pages have a mapping and most filesystems provide a 694 * migratepage callback. Anonymous pages are part of swap 695 * space which also has its own migratepage callback. This 696 * is the most common path for page migration. 697 */ 698 rc = mapping->a_ops->migratepage(mapping, 699 newpage, page, mode); 700 else 701 rc = fallback_migrate_page(mapping, newpage, page, mode); 702 703 if (rc != MIGRATEPAGE_SUCCESS) { 704 newpage->mapping = NULL; 705 } else { 706 if (remap_swapcache) 707 remove_migration_ptes(page, newpage); 708 page->mapping = NULL; 709 } 710 711 unlock_page(newpage); 712 713 return rc; 714 } 715 716 static int __unmap_and_move(struct page *page, struct page *newpage, 717 int force, enum migrate_mode mode) 718 { 719 int rc = -EAGAIN; 720 int remap_swapcache = 1; 721 struct mem_cgroup *mem; 722 struct anon_vma *anon_vma = NULL; 723 724 if (!trylock_page(page)) { 725 if (!force || mode == MIGRATE_ASYNC) 726 goto out; 727 728 /* 729 * It's not safe for direct compaction to call lock_page. 730 * For example, during page readahead pages are added locked 731 * to the LRU. Later, when the IO completes the pages are 732 * marked uptodate and unlocked. However, the queueing 733 * could be merging multiple pages for one bio (e.g. 734 * mpage_readpages). If an allocation happens for the 735 * second or third page, the process can end up locking 736 * the same page twice and deadlocking. Rather than 737 * trying to be clever about what pages can be locked, 738 * avoid the use of lock_page for direct compaction 739 * altogether. 740 */ 741 if (current->flags & PF_MEMALLOC) 742 goto out; 743 744 lock_page(page); 745 } 746 747 /* charge against new page */ 748 mem_cgroup_prepare_migration(page, newpage, &mem); 749 750 if (PageWriteback(page)) { 751 /* 752 * Only in the case of a full synchronous migration is it 753 * necessary to wait for PageWriteback. In the async case, 754 * the retry loop is too short and in the sync-light case, 755 * the overhead of stalling is too much 756 */ 757 if (mode != MIGRATE_SYNC) { 758 rc = -EBUSY; 759 goto uncharge; 760 } 761 if (!force) 762 goto uncharge; 763 wait_on_page_writeback(page); 764 } 765 /* 766 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 767 * we cannot notice that anon_vma is freed while we migrates a page. 768 * This get_anon_vma() delays freeing anon_vma pointer until the end 769 * of migration. File cache pages are no problem because of page_lock() 770 * File Caches may use write_page() or lock_page() in migration, then, 771 * just care Anon page here. 772 */ 773 if (PageAnon(page) && !PageKsm(page)) { 774 /* 775 * Only page_lock_anon_vma_read() understands the subtleties of 776 * getting a hold on an anon_vma from outside one of its mms. 777 */ 778 anon_vma = page_get_anon_vma(page); 779 if (anon_vma) { 780 /* 781 * Anon page 782 */ 783 } else if (PageSwapCache(page)) { 784 /* 785 * We cannot be sure that the anon_vma of an unmapped 786 * swapcache page is safe to use because we don't 787 * know in advance if the VMA that this page belonged 788 * to still exists. If the VMA and others sharing the 789 * data have been freed, then the anon_vma could 790 * already be invalid. 791 * 792 * To avoid this possibility, swapcache pages get 793 * migrated but are not remapped when migration 794 * completes 795 */ 796 remap_swapcache = 0; 797 } else { 798 goto uncharge; 799 } 800 } 801 802 if (unlikely(balloon_page_movable(page))) { 803 /* 804 * A ballooned page does not need any special attention from 805 * physical to virtual reverse mapping procedures. 806 * Skip any attempt to unmap PTEs or to remap swap cache, 807 * in order to avoid burning cycles at rmap level, and perform 808 * the page migration right away (proteced by page lock). 809 */ 810 rc = balloon_page_migrate(newpage, page, mode); 811 goto uncharge; 812 } 813 814 /* 815 * Corner case handling: 816 * 1. When a new swap-cache page is read into, it is added to the LRU 817 * and treated as swapcache but it has no rmap yet. 818 * Calling try_to_unmap() against a page->mapping==NULL page will 819 * trigger a BUG. So handle it here. 820 * 2. An orphaned page (see truncate_complete_page) might have 821 * fs-private metadata. The page can be picked up due to memory 822 * offlining. Everywhere else except page reclaim, the page is 823 * invisible to the vm, so the page can not be migrated. So try to 824 * free the metadata, so the page can be freed. 825 */ 826 if (!page->mapping) { 827 VM_BUG_ON(PageAnon(page)); 828 if (page_has_private(page)) { 829 try_to_free_buffers(page); 830 goto uncharge; 831 } 832 goto skip_unmap; 833 } 834 835 /* Establish migration ptes or remove ptes */ 836 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 837 838 skip_unmap: 839 if (!page_mapped(page)) 840 rc = move_to_new_page(newpage, page, remap_swapcache, mode); 841 842 if (rc && remap_swapcache) 843 remove_migration_ptes(page, page); 844 845 /* Drop an anon_vma reference if we took one */ 846 if (anon_vma) 847 put_anon_vma(anon_vma); 848 849 uncharge: 850 mem_cgroup_end_migration(mem, page, newpage, 851 (rc == MIGRATEPAGE_SUCCESS || 852 rc == MIGRATEPAGE_BALLOON_SUCCESS)); 853 unlock_page(page); 854 out: 855 return rc; 856 } 857 858 /* 859 * Obtain the lock on page, remove all ptes and migrate the page 860 * to the newly allocated page in newpage. 861 */ 862 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 863 struct page *page, int force, enum migrate_mode mode) 864 { 865 int rc = 0; 866 int *result = NULL; 867 struct page *newpage = get_new_page(page, private, &result); 868 869 if (!newpage) 870 return -ENOMEM; 871 872 if (page_count(page) == 1) { 873 /* page was freed from under us. So we are done. */ 874 goto out; 875 } 876 877 if (unlikely(PageTransHuge(page))) 878 if (unlikely(split_huge_page(page))) 879 goto out; 880 881 rc = __unmap_and_move(page, newpage, force, mode); 882 883 if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { 884 /* 885 * A ballooned page has been migrated already. 886 * Now, it's the time to wrap-up counters, 887 * handle the page back to Buddy and return. 888 */ 889 dec_zone_page_state(page, NR_ISOLATED_ANON + 890 page_is_file_cache(page)); 891 balloon_page_free(page); 892 return MIGRATEPAGE_SUCCESS; 893 } 894 out: 895 if (rc != -EAGAIN) { 896 /* 897 * A page that has been migrated has all references 898 * removed and will be freed. A page that has not been 899 * migrated will have kepts its references and be 900 * restored. 901 */ 902 list_del(&page->lru); 903 dec_zone_page_state(page, NR_ISOLATED_ANON + 904 page_is_file_cache(page)); 905 putback_lru_page(page); 906 } 907 /* 908 * Move the new page to the LRU. If migration was not successful 909 * then this will free the page. 910 */ 911 putback_lru_page(newpage); 912 if (result) { 913 if (rc) 914 *result = rc; 915 else 916 *result = page_to_nid(newpage); 917 } 918 return rc; 919 } 920 921 /* 922 * Counterpart of unmap_and_move_page() for hugepage migration. 923 * 924 * This function doesn't wait the completion of hugepage I/O 925 * because there is no race between I/O and migration for hugepage. 926 * Note that currently hugepage I/O occurs only in direct I/O 927 * where no lock is held and PG_writeback is irrelevant, 928 * and writeback status of all subpages are counted in the reference 929 * count of the head page (i.e. if all subpages of a 2MB hugepage are 930 * under direct I/O, the reference of the head page is 512 and a bit more.) 931 * This means that when we try to migrate hugepage whose subpages are 932 * doing direct I/O, some references remain after try_to_unmap() and 933 * hugepage migration fails without data corruption. 934 * 935 * There is also no race when direct I/O is issued on the page under migration, 936 * because then pte is replaced with migration swap entry and direct I/O code 937 * will wait in the page fault for migration to complete. 938 */ 939 static int unmap_and_move_huge_page(new_page_t get_new_page, 940 unsigned long private, struct page *hpage, 941 int force, enum migrate_mode mode) 942 { 943 int rc = 0; 944 int *result = NULL; 945 struct page *new_hpage = get_new_page(hpage, private, &result); 946 struct anon_vma *anon_vma = NULL; 947 948 if (!new_hpage) 949 return -ENOMEM; 950 951 rc = -EAGAIN; 952 953 if (!trylock_page(hpage)) { 954 if (!force || mode != MIGRATE_SYNC) 955 goto out; 956 lock_page(hpage); 957 } 958 959 if (PageAnon(hpage)) 960 anon_vma = page_get_anon_vma(hpage); 961 962 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 963 964 if (!page_mapped(hpage)) 965 rc = move_to_new_page(new_hpage, hpage, 1, mode); 966 967 if (rc) 968 remove_migration_ptes(hpage, hpage); 969 970 if (anon_vma) 971 put_anon_vma(anon_vma); 972 973 if (!rc) 974 hugetlb_cgroup_migrate(hpage, new_hpage); 975 976 unlock_page(hpage); 977 out: 978 put_page(new_hpage); 979 if (result) { 980 if (rc) 981 *result = rc; 982 else 983 *result = page_to_nid(new_hpage); 984 } 985 return rc; 986 } 987 988 /* 989 * migrate_pages - migrate the pages specified in a list, to the free pages 990 * supplied as the target for the page migration 991 * 992 * @from: The list of pages to be migrated. 993 * @get_new_page: The function used to allocate free pages to be used 994 * as the target of the page migration. 995 * @private: Private data to be passed on to get_new_page() 996 * @mode: The migration mode that specifies the constraints for 997 * page migration, if any. 998 * @reason: The reason for page migration. 999 * 1000 * The function returns after 10 attempts or if no pages are movable any more 1001 * because the list has become empty or no retryable pages exist any more. 1002 * The caller should call putback_lru_pages() to return pages to the LRU 1003 * or free list only if ret != 0. 1004 * 1005 * Returns the number of pages that were not migrated, or an error code. 1006 */ 1007 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1008 unsigned long private, enum migrate_mode mode, int reason) 1009 { 1010 int retry = 1; 1011 int nr_failed = 0; 1012 int nr_succeeded = 0; 1013 int pass = 0; 1014 struct page *page; 1015 struct page *page2; 1016 int swapwrite = current->flags & PF_SWAPWRITE; 1017 int rc; 1018 1019 if (!swapwrite) 1020 current->flags |= PF_SWAPWRITE; 1021 1022 for(pass = 0; pass < 10 && retry; pass++) { 1023 retry = 0; 1024 1025 list_for_each_entry_safe(page, page2, from, lru) { 1026 cond_resched(); 1027 1028 rc = unmap_and_move(get_new_page, private, 1029 page, pass > 2, mode); 1030 1031 switch(rc) { 1032 case -ENOMEM: 1033 goto out; 1034 case -EAGAIN: 1035 retry++; 1036 break; 1037 case MIGRATEPAGE_SUCCESS: 1038 nr_succeeded++; 1039 break; 1040 default: 1041 /* Permanent failure */ 1042 nr_failed++; 1043 break; 1044 } 1045 } 1046 } 1047 rc = nr_failed + retry; 1048 out: 1049 if (nr_succeeded) 1050 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1051 if (nr_failed) 1052 count_vm_events(PGMIGRATE_FAIL, nr_failed); 1053 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); 1054 1055 if (!swapwrite) 1056 current->flags &= ~PF_SWAPWRITE; 1057 1058 return rc; 1059 } 1060 1061 int migrate_huge_page(struct page *hpage, new_page_t get_new_page, 1062 unsigned long private, enum migrate_mode mode) 1063 { 1064 int pass, rc; 1065 1066 for (pass = 0; pass < 10; pass++) { 1067 rc = unmap_and_move_huge_page(get_new_page, private, 1068 hpage, pass > 2, mode); 1069 switch (rc) { 1070 case -ENOMEM: 1071 goto out; 1072 case -EAGAIN: 1073 /* try again */ 1074 cond_resched(); 1075 break; 1076 case MIGRATEPAGE_SUCCESS: 1077 goto out; 1078 default: 1079 rc = -EIO; 1080 goto out; 1081 } 1082 } 1083 out: 1084 return rc; 1085 } 1086 1087 #ifdef CONFIG_NUMA 1088 /* 1089 * Move a list of individual pages 1090 */ 1091 struct page_to_node { 1092 unsigned long addr; 1093 struct page *page; 1094 int node; 1095 int status; 1096 }; 1097 1098 static struct page *new_page_node(struct page *p, unsigned long private, 1099 int **result) 1100 { 1101 struct page_to_node *pm = (struct page_to_node *)private; 1102 1103 while (pm->node != MAX_NUMNODES && pm->page != p) 1104 pm++; 1105 1106 if (pm->node == MAX_NUMNODES) 1107 return NULL; 1108 1109 *result = &pm->status; 1110 1111 return alloc_pages_exact_node(pm->node, 1112 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1113 } 1114 1115 /* 1116 * Move a set of pages as indicated in the pm array. The addr 1117 * field must be set to the virtual address of the page to be moved 1118 * and the node number must contain a valid target node. 1119 * The pm array ends with node = MAX_NUMNODES. 1120 */ 1121 static int do_move_page_to_node_array(struct mm_struct *mm, 1122 struct page_to_node *pm, 1123 int migrate_all) 1124 { 1125 int err; 1126 struct page_to_node *pp; 1127 LIST_HEAD(pagelist); 1128 1129 down_read(&mm->mmap_sem); 1130 1131 /* 1132 * Build a list of pages to migrate 1133 */ 1134 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1135 struct vm_area_struct *vma; 1136 struct page *page; 1137 1138 err = -EFAULT; 1139 vma = find_vma(mm, pp->addr); 1140 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1141 goto set_status; 1142 1143 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); 1144 1145 err = PTR_ERR(page); 1146 if (IS_ERR(page)) 1147 goto set_status; 1148 1149 err = -ENOENT; 1150 if (!page) 1151 goto set_status; 1152 1153 /* Use PageReserved to check for zero page */ 1154 if (PageReserved(page)) 1155 goto put_and_set; 1156 1157 pp->page = page; 1158 err = page_to_nid(page); 1159 1160 if (err == pp->node) 1161 /* 1162 * Node already in the right place 1163 */ 1164 goto put_and_set; 1165 1166 err = -EACCES; 1167 if (page_mapcount(page) > 1 && 1168 !migrate_all) 1169 goto put_and_set; 1170 1171 err = isolate_lru_page(page); 1172 if (!err) { 1173 list_add_tail(&page->lru, &pagelist); 1174 inc_zone_page_state(page, NR_ISOLATED_ANON + 1175 page_is_file_cache(page)); 1176 } 1177 put_and_set: 1178 /* 1179 * Either remove the duplicate refcount from 1180 * isolate_lru_page() or drop the page ref if it was 1181 * not isolated. 1182 */ 1183 put_page(page); 1184 set_status: 1185 pp->status = err; 1186 } 1187 1188 err = 0; 1189 if (!list_empty(&pagelist)) { 1190 err = migrate_pages(&pagelist, new_page_node, 1191 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); 1192 if (err) 1193 putback_lru_pages(&pagelist); 1194 } 1195 1196 up_read(&mm->mmap_sem); 1197 return err; 1198 } 1199 1200 /* 1201 * Migrate an array of page address onto an array of nodes and fill 1202 * the corresponding array of status. 1203 */ 1204 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1205 unsigned long nr_pages, 1206 const void __user * __user *pages, 1207 const int __user *nodes, 1208 int __user *status, int flags) 1209 { 1210 struct page_to_node *pm; 1211 unsigned long chunk_nr_pages; 1212 unsigned long chunk_start; 1213 int err; 1214 1215 err = -ENOMEM; 1216 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 1217 if (!pm) 1218 goto out; 1219 1220 migrate_prep(); 1221 1222 /* 1223 * Store a chunk of page_to_node array in a page, 1224 * but keep the last one as a marker 1225 */ 1226 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 1227 1228 for (chunk_start = 0; 1229 chunk_start < nr_pages; 1230 chunk_start += chunk_nr_pages) { 1231 int j; 1232 1233 if (chunk_start + chunk_nr_pages > nr_pages) 1234 chunk_nr_pages = nr_pages - chunk_start; 1235 1236 /* fill the chunk pm with addrs and nodes from user-space */ 1237 for (j = 0; j < chunk_nr_pages; j++) { 1238 const void __user *p; 1239 int node; 1240 1241 err = -EFAULT; 1242 if (get_user(p, pages + j + chunk_start)) 1243 goto out_pm; 1244 pm[j].addr = (unsigned long) p; 1245 1246 if (get_user(node, nodes + j + chunk_start)) 1247 goto out_pm; 1248 1249 err = -ENODEV; 1250 if (node < 0 || node >= MAX_NUMNODES) 1251 goto out_pm; 1252 1253 if (!node_state(node, N_MEMORY)) 1254 goto out_pm; 1255 1256 err = -EACCES; 1257 if (!node_isset(node, task_nodes)) 1258 goto out_pm; 1259 1260 pm[j].node = node; 1261 } 1262 1263 /* End marker for this chunk */ 1264 pm[chunk_nr_pages].node = MAX_NUMNODES; 1265 1266 /* Migrate this chunk */ 1267 err = do_move_page_to_node_array(mm, pm, 1268 flags & MPOL_MF_MOVE_ALL); 1269 if (err < 0) 1270 goto out_pm; 1271 1272 /* Return status information */ 1273 for (j = 0; j < chunk_nr_pages; j++) 1274 if (put_user(pm[j].status, status + j + chunk_start)) { 1275 err = -EFAULT; 1276 goto out_pm; 1277 } 1278 } 1279 err = 0; 1280 1281 out_pm: 1282 free_page((unsigned long)pm); 1283 out: 1284 return err; 1285 } 1286 1287 /* 1288 * Determine the nodes of an array of pages and store it in an array of status. 1289 */ 1290 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1291 const void __user **pages, int *status) 1292 { 1293 unsigned long i; 1294 1295 down_read(&mm->mmap_sem); 1296 1297 for (i = 0; i < nr_pages; i++) { 1298 unsigned long addr = (unsigned long)(*pages); 1299 struct vm_area_struct *vma; 1300 struct page *page; 1301 int err = -EFAULT; 1302 1303 vma = find_vma(mm, addr); 1304 if (!vma || addr < vma->vm_start) 1305 goto set_status; 1306 1307 page = follow_page(vma, addr, 0); 1308 1309 err = PTR_ERR(page); 1310 if (IS_ERR(page)) 1311 goto set_status; 1312 1313 err = -ENOENT; 1314 /* Use PageReserved to check for zero page */ 1315 if (!page || PageReserved(page)) 1316 goto set_status; 1317 1318 err = page_to_nid(page); 1319 set_status: 1320 *status = err; 1321 1322 pages++; 1323 status++; 1324 } 1325 1326 up_read(&mm->mmap_sem); 1327 } 1328 1329 /* 1330 * Determine the nodes of a user array of pages and store it in 1331 * a user array of status. 1332 */ 1333 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1334 const void __user * __user *pages, 1335 int __user *status) 1336 { 1337 #define DO_PAGES_STAT_CHUNK_NR 16 1338 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1339 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1340 1341 while (nr_pages) { 1342 unsigned long chunk_nr; 1343 1344 chunk_nr = nr_pages; 1345 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1346 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1347 1348 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1349 break; 1350 1351 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1352 1353 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1354 break; 1355 1356 pages += chunk_nr; 1357 status += chunk_nr; 1358 nr_pages -= chunk_nr; 1359 } 1360 return nr_pages ? -EFAULT : 0; 1361 } 1362 1363 /* 1364 * Move a list of pages in the address space of the currently executing 1365 * process. 1366 */ 1367 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1368 const void __user * __user *, pages, 1369 const int __user *, nodes, 1370 int __user *, status, int, flags) 1371 { 1372 const struct cred *cred = current_cred(), *tcred; 1373 struct task_struct *task; 1374 struct mm_struct *mm; 1375 int err; 1376 nodemask_t task_nodes; 1377 1378 /* Check flags */ 1379 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1380 return -EINVAL; 1381 1382 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1383 return -EPERM; 1384 1385 /* Find the mm_struct */ 1386 rcu_read_lock(); 1387 task = pid ? find_task_by_vpid(pid) : current; 1388 if (!task) { 1389 rcu_read_unlock(); 1390 return -ESRCH; 1391 } 1392 get_task_struct(task); 1393 1394 /* 1395 * Check if this process has the right to modify the specified 1396 * process. The right exists if the process has administrative 1397 * capabilities, superuser privileges or the same 1398 * userid as the target process. 1399 */ 1400 tcred = __task_cred(task); 1401 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1402 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 1403 !capable(CAP_SYS_NICE)) { 1404 rcu_read_unlock(); 1405 err = -EPERM; 1406 goto out; 1407 } 1408 rcu_read_unlock(); 1409 1410 err = security_task_movememory(task); 1411 if (err) 1412 goto out; 1413 1414 task_nodes = cpuset_mems_allowed(task); 1415 mm = get_task_mm(task); 1416 put_task_struct(task); 1417 1418 if (!mm) 1419 return -EINVAL; 1420 1421 if (nodes) 1422 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1423 nodes, status, flags); 1424 else 1425 err = do_pages_stat(mm, nr_pages, pages, status); 1426 1427 mmput(mm); 1428 return err; 1429 1430 out: 1431 put_task_struct(task); 1432 return err; 1433 } 1434 1435 /* 1436 * Call migration functions in the vma_ops that may prepare 1437 * memory in a vm for migration. migration functions may perform 1438 * the migration for vmas that do not have an underlying page struct. 1439 */ 1440 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1441 const nodemask_t *from, unsigned long flags) 1442 { 1443 struct vm_area_struct *vma; 1444 int err = 0; 1445 1446 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { 1447 if (vma->vm_ops && vma->vm_ops->migrate) { 1448 err = vma->vm_ops->migrate(vma, to, from, flags); 1449 if (err) 1450 break; 1451 } 1452 } 1453 return err; 1454 } 1455 1456 #ifdef CONFIG_NUMA_BALANCING 1457 /* 1458 * Returns true if this is a safe migration target node for misplaced NUMA 1459 * pages. Currently it only checks the watermarks which crude 1460 */ 1461 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1462 unsigned long nr_migrate_pages) 1463 { 1464 int z; 1465 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1466 struct zone *zone = pgdat->node_zones + z; 1467 1468 if (!populated_zone(zone)) 1469 continue; 1470 1471 if (zone->all_unreclaimable) 1472 continue; 1473 1474 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1475 if (!zone_watermark_ok(zone, 0, 1476 high_wmark_pages(zone) + 1477 nr_migrate_pages, 1478 0, 0)) 1479 continue; 1480 return true; 1481 } 1482 return false; 1483 } 1484 1485 static struct page *alloc_misplaced_dst_page(struct page *page, 1486 unsigned long data, 1487 int **result) 1488 { 1489 int nid = (int) data; 1490 struct page *newpage; 1491 1492 newpage = alloc_pages_exact_node(nid, 1493 (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | 1494 __GFP_NOMEMALLOC | __GFP_NORETRY | 1495 __GFP_NOWARN) & 1496 ~GFP_IOFS, 0); 1497 if (newpage) 1498 page_nid_xchg_last(newpage, page_nid_last(page)); 1499 1500 return newpage; 1501 } 1502 1503 /* 1504 * page migration rate limiting control. 1505 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs 1506 * window of time. Default here says do not migrate more than 1280M per second. 1507 * If a node is rate-limited then PTE NUMA updates are also rate-limited. However 1508 * as it is faults that reset the window, pte updates will happen unconditionally 1509 * if there has not been a fault since @pteupdate_interval_millisecs after the 1510 * throttle window closed. 1511 */ 1512 static unsigned int migrate_interval_millisecs __read_mostly = 100; 1513 static unsigned int pteupdate_interval_millisecs __read_mostly = 1000; 1514 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); 1515 1516 /* Returns true if NUMA migration is currently rate limited */ 1517 bool migrate_ratelimited(int node) 1518 { 1519 pg_data_t *pgdat = NODE_DATA(node); 1520 1521 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window + 1522 msecs_to_jiffies(pteupdate_interval_millisecs))) 1523 return false; 1524 1525 if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages) 1526 return false; 1527 1528 return true; 1529 } 1530 1531 /* Returns true if the node is migrate rate-limited after the update */ 1532 bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) 1533 { 1534 bool rate_limited = false; 1535 1536 /* 1537 * Rate-limit the amount of data that is being migrated to a node. 1538 * Optimal placement is no good if the memory bus is saturated and 1539 * all the time is being spent migrating! 1540 */ 1541 spin_lock(&pgdat->numabalancing_migrate_lock); 1542 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { 1543 pgdat->numabalancing_migrate_nr_pages = 0; 1544 pgdat->numabalancing_migrate_next_window = jiffies + 1545 msecs_to_jiffies(migrate_interval_millisecs); 1546 } 1547 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) 1548 rate_limited = true; 1549 else 1550 pgdat->numabalancing_migrate_nr_pages += nr_pages; 1551 spin_unlock(&pgdat->numabalancing_migrate_lock); 1552 1553 return rate_limited; 1554 } 1555 1556 int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1557 { 1558 int page_lru; 1559 1560 VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); 1561 1562 /* Avoid migrating to a node that is nearly full */ 1563 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) 1564 return 0; 1565 1566 if (isolate_lru_page(page)) 1567 return 0; 1568 1569 /* 1570 * migrate_misplaced_transhuge_page() skips page migration's usual 1571 * check on page_count(), so we must do it here, now that the page 1572 * has been isolated: a GUP pin, or any other pin, prevents migration. 1573 * The expected page count is 3: 1 for page's mapcount and 1 for the 1574 * caller's pin and 1 for the reference taken by isolate_lru_page(). 1575 */ 1576 if (PageTransHuge(page) && page_count(page) != 3) { 1577 putback_lru_page(page); 1578 return 0; 1579 } 1580 1581 page_lru = page_is_file_cache(page); 1582 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, 1583 hpage_nr_pages(page)); 1584 1585 /* 1586 * Isolating the page has taken another reference, so the 1587 * caller's reference can be safely dropped without the page 1588 * disappearing underneath us during migration. 1589 */ 1590 put_page(page); 1591 return 1; 1592 } 1593 1594 /* 1595 * Attempt to migrate a misplaced page to the specified destination 1596 * node. Caller is expected to have an elevated reference count on 1597 * the page that will be dropped by this function before returning. 1598 */ 1599 int migrate_misplaced_page(struct page *page, int node) 1600 { 1601 pg_data_t *pgdat = NODE_DATA(node); 1602 int isolated; 1603 int nr_remaining; 1604 LIST_HEAD(migratepages); 1605 1606 /* 1607 * Don't migrate pages that are mapped in multiple processes. 1608 * TODO: Handle false sharing detection instead of this hammer 1609 */ 1610 if (page_mapcount(page) != 1) 1611 goto out; 1612 1613 /* 1614 * Rate-limit the amount of data that is being migrated to a node. 1615 * Optimal placement is no good if the memory bus is saturated and 1616 * all the time is being spent migrating! 1617 */ 1618 if (numamigrate_update_ratelimit(pgdat, 1)) 1619 goto out; 1620 1621 isolated = numamigrate_isolate_page(pgdat, page); 1622 if (!isolated) 1623 goto out; 1624 1625 list_add(&page->lru, &migratepages); 1626 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 1627 node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); 1628 if (nr_remaining) { 1629 putback_lru_pages(&migratepages); 1630 isolated = 0; 1631 } else 1632 count_vm_numa_event(NUMA_PAGE_MIGRATE); 1633 BUG_ON(!list_empty(&migratepages)); 1634 return isolated; 1635 1636 out: 1637 put_page(page); 1638 return 0; 1639 } 1640 #endif /* CONFIG_NUMA_BALANCING */ 1641 1642 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1643 /* 1644 * Migrates a THP to a given target node. page must be locked and is unlocked 1645 * before returning. 1646 */ 1647 int migrate_misplaced_transhuge_page(struct mm_struct *mm, 1648 struct vm_area_struct *vma, 1649 pmd_t *pmd, pmd_t entry, 1650 unsigned long address, 1651 struct page *page, int node) 1652 { 1653 unsigned long haddr = address & HPAGE_PMD_MASK; 1654 pg_data_t *pgdat = NODE_DATA(node); 1655 int isolated = 0; 1656 struct page *new_page = NULL; 1657 struct mem_cgroup *memcg = NULL; 1658 int page_lru = page_is_file_cache(page); 1659 1660 /* 1661 * Don't migrate pages that are mapped in multiple processes. 1662 * TODO: Handle false sharing detection instead of this hammer 1663 */ 1664 if (page_mapcount(page) != 1) 1665 goto out_dropref; 1666 1667 /* 1668 * Rate-limit the amount of data that is being migrated to a node. 1669 * Optimal placement is no good if the memory bus is saturated and 1670 * all the time is being spent migrating! 1671 */ 1672 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR)) 1673 goto out_dropref; 1674 1675 new_page = alloc_pages_node(node, 1676 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); 1677 if (!new_page) 1678 goto out_fail; 1679 1680 page_nid_xchg_last(new_page, page_nid_last(page)); 1681 1682 isolated = numamigrate_isolate_page(pgdat, page); 1683 if (!isolated) { 1684 put_page(new_page); 1685 goto out_fail; 1686 } 1687 1688 /* Prepare a page as a migration target */ 1689 __set_page_locked(new_page); 1690 SetPageSwapBacked(new_page); 1691 1692 /* anon mapping, we can simply copy page->mapping to the new page: */ 1693 new_page->mapping = page->mapping; 1694 new_page->index = page->index; 1695 migrate_page_copy(new_page, page); 1696 WARN_ON(PageLRU(new_page)); 1697 1698 /* Recheck the target PMD */ 1699 spin_lock(&mm->page_table_lock); 1700 if (unlikely(!pmd_same(*pmd, entry))) { 1701 spin_unlock(&mm->page_table_lock); 1702 1703 /* Reverse changes made by migrate_page_copy() */ 1704 if (TestClearPageActive(new_page)) 1705 SetPageActive(page); 1706 if (TestClearPageUnevictable(new_page)) 1707 SetPageUnevictable(page); 1708 mlock_migrate_page(page, new_page); 1709 1710 unlock_page(new_page); 1711 put_page(new_page); /* Free it */ 1712 1713 unlock_page(page); 1714 putback_lru_page(page); 1715 1716 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1717 isolated = 0; 1718 goto out; 1719 } 1720 1721 /* 1722 * Traditional migration needs to prepare the memcg charge 1723 * transaction early to prevent the old page from being 1724 * uncharged when installing migration entries. Here we can 1725 * save the potential rollback and start the charge transfer 1726 * only when migration is already known to end successfully. 1727 */ 1728 mem_cgroup_prepare_migration(page, new_page, &memcg); 1729 1730 entry = mk_pmd(new_page, vma->vm_page_prot); 1731 entry = pmd_mknonnuma(entry); 1732 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1733 entry = pmd_mkhuge(entry); 1734 1735 page_add_new_anon_rmap(new_page, vma, haddr); 1736 1737 set_pmd_at(mm, haddr, pmd, entry); 1738 update_mmu_cache_pmd(vma, address, &entry); 1739 page_remove_rmap(page); 1740 /* 1741 * Finish the charge transaction under the page table lock to 1742 * prevent split_huge_page() from dividing up the charge 1743 * before it's fully transferred to the new page. 1744 */ 1745 mem_cgroup_end_migration(memcg, page, new_page, true); 1746 spin_unlock(&mm->page_table_lock); 1747 1748 unlock_page(new_page); 1749 unlock_page(page); 1750 put_page(page); /* Drop the rmap reference */ 1751 put_page(page); /* Drop the LRU isolation reference */ 1752 1753 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 1754 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 1755 1756 out: 1757 mod_zone_page_state(page_zone(page), 1758 NR_ISOLATED_ANON + page_lru, 1759 -HPAGE_PMD_NR); 1760 return isolated; 1761 1762 out_fail: 1763 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1764 out_dropref: 1765 unlock_page(page); 1766 put_page(page); 1767 return 0; 1768 } 1769 #endif /* CONFIG_NUMA_BALANCING */ 1770 1771 #endif /* CONFIG_NUMA */ 1772