1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/export.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/nsproxy.h> 23 #include <linux/pagevec.h> 24 #include <linux/ksm.h> 25 #include <linux/rmap.h> 26 #include <linux/topology.h> 27 #include <linux/cpu.h> 28 #include <linux/cpuset.h> 29 #include <linux/writeback.h> 30 #include <linux/mempolicy.h> 31 #include <linux/vmalloc.h> 32 #include <linux/security.h> 33 #include <linux/memcontrol.h> 34 #include <linux/syscalls.h> 35 #include <linux/hugetlb.h> 36 #include <linux/hugetlb_cgroup.h> 37 #include <linux/gfp.h> 38 #include <linux/balloon_compaction.h> 39 #include <linux/mmu_notifier.h> 40 41 #include <asm/tlbflush.h> 42 43 #define CREATE_TRACE_POINTS 44 #include <trace/events/migrate.h> 45 46 #include "internal.h" 47 48 /* 49 * migrate_prep() needs to be called before we start compiling a list of pages 50 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 51 * undesirable, use migrate_prep_local() 52 */ 53 int migrate_prep(void) 54 { 55 /* 56 * Clear the LRU lists so pages can be isolated. 57 * Note that pages may be moved off the LRU after we have 58 * drained them. Those pages will fail to migrate like other 59 * pages that may be busy. 60 */ 61 lru_add_drain_all(); 62 63 return 0; 64 } 65 66 /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 67 int migrate_prep_local(void) 68 { 69 lru_add_drain(); 70 71 return 0; 72 } 73 74 /* 75 * Put previously isolated pages back onto the appropriate lists 76 * from where they were once taken off for compaction/migration. 77 * 78 * This function shall be used whenever the isolated pageset has been 79 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 80 * and isolate_huge_page(). 81 */ 82 void putback_movable_pages(struct list_head *l) 83 { 84 struct page *page; 85 struct page *page2; 86 87 list_for_each_entry_safe(page, page2, l, lru) { 88 if (unlikely(PageHuge(page))) { 89 putback_active_hugepage(page); 90 continue; 91 } 92 list_del(&page->lru); 93 dec_zone_page_state(page, NR_ISOLATED_ANON + 94 page_is_file_cache(page)); 95 if (unlikely(isolated_balloon_page(page))) 96 balloon_page_putback(page); 97 else 98 putback_lru_page(page); 99 } 100 } 101 102 /* 103 * Restore a potential migration pte to a working pte entry 104 */ 105 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 106 unsigned long addr, void *old) 107 { 108 struct mm_struct *mm = vma->vm_mm; 109 swp_entry_t entry; 110 pmd_t *pmd; 111 pte_t *ptep, pte; 112 spinlock_t *ptl; 113 114 if (unlikely(PageHuge(new))) { 115 ptep = huge_pte_offset(mm, addr); 116 if (!ptep) 117 goto out; 118 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); 119 } else { 120 pmd = mm_find_pmd(mm, addr); 121 if (!pmd) 122 goto out; 123 if (pmd_trans_huge(*pmd)) 124 goto out; 125 126 ptep = pte_offset_map(pmd, addr); 127 128 /* 129 * Peek to check is_swap_pte() before taking ptlock? No, we 130 * can race mremap's move_ptes(), which skips anon_vma lock. 131 */ 132 133 ptl = pte_lockptr(mm, pmd); 134 } 135 136 spin_lock(ptl); 137 pte = *ptep; 138 if (!is_swap_pte(pte)) 139 goto unlock; 140 141 entry = pte_to_swp_entry(pte); 142 143 if (!is_migration_entry(entry) || 144 migration_entry_to_page(entry) != old) 145 goto unlock; 146 147 get_page(new); 148 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 149 if (pte_swp_soft_dirty(*ptep)) 150 pte = pte_mksoft_dirty(pte); 151 if (is_write_migration_entry(entry)) 152 pte = pte_mkwrite(pte); 153 #ifdef CONFIG_HUGETLB_PAGE 154 if (PageHuge(new)) { 155 pte = pte_mkhuge(pte); 156 pte = arch_make_huge_pte(pte, vma, new, 0); 157 } 158 #endif 159 flush_dcache_page(new); 160 set_pte_at(mm, addr, ptep, pte); 161 162 if (PageHuge(new)) { 163 if (PageAnon(new)) 164 hugepage_add_anon_rmap(new, vma, addr); 165 else 166 page_dup_rmap(new); 167 } else if (PageAnon(new)) 168 page_add_anon_rmap(new, vma, addr); 169 else 170 page_add_file_rmap(new); 171 172 /* No need to invalidate - it was non-present before */ 173 update_mmu_cache(vma, addr, ptep); 174 unlock: 175 pte_unmap_unlock(ptep, ptl); 176 out: 177 return SWAP_AGAIN; 178 } 179 180 /* 181 * Get rid of all migration entries and replace them by 182 * references to the indicated page. 183 */ 184 static void remove_migration_ptes(struct page *old, struct page *new) 185 { 186 struct rmap_walk_control rwc = { 187 .rmap_one = remove_migration_pte, 188 .arg = old, 189 }; 190 191 rmap_walk(new, &rwc); 192 } 193 194 /* 195 * Something used the pte of a page under migration. We need to 196 * get to the page and wait until migration is finished. 197 * When we return from this function the fault will be retried. 198 */ 199 static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 200 spinlock_t *ptl) 201 { 202 pte_t pte; 203 swp_entry_t entry; 204 struct page *page; 205 206 spin_lock(ptl); 207 pte = *ptep; 208 if (!is_swap_pte(pte)) 209 goto out; 210 211 entry = pte_to_swp_entry(pte); 212 if (!is_migration_entry(entry)) 213 goto out; 214 215 page = migration_entry_to_page(entry); 216 217 /* 218 * Once radix-tree replacement of page migration started, page_count 219 * *must* be zero. And, we don't want to call wait_on_page_locked() 220 * against a page without get_page(). 221 * So, we use get_page_unless_zero(), here. Even failed, page fault 222 * will occur again. 223 */ 224 if (!get_page_unless_zero(page)) 225 goto out; 226 pte_unmap_unlock(ptep, ptl); 227 wait_on_page_locked(page); 228 put_page(page); 229 return; 230 out: 231 pte_unmap_unlock(ptep, ptl); 232 } 233 234 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 235 unsigned long address) 236 { 237 spinlock_t *ptl = pte_lockptr(mm, pmd); 238 pte_t *ptep = pte_offset_map(pmd, address); 239 __migration_entry_wait(mm, ptep, ptl); 240 } 241 242 void migration_entry_wait_huge(struct vm_area_struct *vma, 243 struct mm_struct *mm, pte_t *pte) 244 { 245 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 246 __migration_entry_wait(mm, pte, ptl); 247 } 248 249 #ifdef CONFIG_BLOCK 250 /* Returns true if all buffers are successfully locked */ 251 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 252 enum migrate_mode mode) 253 { 254 struct buffer_head *bh = head; 255 256 /* Simple case, sync compaction */ 257 if (mode != MIGRATE_ASYNC) { 258 do { 259 get_bh(bh); 260 lock_buffer(bh); 261 bh = bh->b_this_page; 262 263 } while (bh != head); 264 265 return true; 266 } 267 268 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 269 do { 270 get_bh(bh); 271 if (!trylock_buffer(bh)) { 272 /* 273 * We failed to lock the buffer and cannot stall in 274 * async migration. Release the taken locks 275 */ 276 struct buffer_head *failed_bh = bh; 277 put_bh(failed_bh); 278 bh = head; 279 while (bh != failed_bh) { 280 unlock_buffer(bh); 281 put_bh(bh); 282 bh = bh->b_this_page; 283 } 284 return false; 285 } 286 287 bh = bh->b_this_page; 288 } while (bh != head); 289 return true; 290 } 291 #else 292 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 293 enum migrate_mode mode) 294 { 295 return true; 296 } 297 #endif /* CONFIG_BLOCK */ 298 299 /* 300 * Replace the page in the mapping. 301 * 302 * The number of remaining references must be: 303 * 1 for anonymous pages without a mapping 304 * 2 for pages with a mapping 305 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 306 */ 307 int migrate_page_move_mapping(struct address_space *mapping, 308 struct page *newpage, struct page *page, 309 struct buffer_head *head, enum migrate_mode mode, 310 int extra_count) 311 { 312 int expected_count = 1 + extra_count; 313 void **pslot; 314 315 if (!mapping) { 316 /* Anonymous page without mapping */ 317 if (page_count(page) != expected_count) 318 return -EAGAIN; 319 return MIGRATEPAGE_SUCCESS; 320 } 321 322 spin_lock_irq(&mapping->tree_lock); 323 324 pslot = radix_tree_lookup_slot(&mapping->page_tree, 325 page_index(page)); 326 327 expected_count += 1 + page_has_private(page); 328 if (page_count(page) != expected_count || 329 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 330 spin_unlock_irq(&mapping->tree_lock); 331 return -EAGAIN; 332 } 333 334 if (!page_freeze_refs(page, expected_count)) { 335 spin_unlock_irq(&mapping->tree_lock); 336 return -EAGAIN; 337 } 338 339 /* 340 * In the async migration case of moving a page with buffers, lock the 341 * buffers using trylock before the mapping is moved. If the mapping 342 * was moved, we later failed to lock the buffers and could not move 343 * the mapping back due to an elevated page count, we would have to 344 * block waiting on other references to be dropped. 345 */ 346 if (mode == MIGRATE_ASYNC && head && 347 !buffer_migrate_lock_buffers(head, mode)) { 348 page_unfreeze_refs(page, expected_count); 349 spin_unlock_irq(&mapping->tree_lock); 350 return -EAGAIN; 351 } 352 353 /* 354 * Now we know that no one else is looking at the page. 355 */ 356 get_page(newpage); /* add cache reference */ 357 if (PageSwapCache(page)) { 358 SetPageSwapCache(newpage); 359 set_page_private(newpage, page_private(page)); 360 } 361 362 radix_tree_replace_slot(pslot, newpage); 363 364 /* 365 * Drop cache reference from old page by unfreezing 366 * to one less reference. 367 * We know this isn't the last reference. 368 */ 369 page_unfreeze_refs(page, expected_count - 1); 370 371 /* 372 * If moved to a different zone then also account 373 * the page for that zone. Other VM counters will be 374 * taken care of when we establish references to the 375 * new page and drop references to the old page. 376 * 377 * Note that anonymous pages are accounted for 378 * via NR_FILE_PAGES and NR_ANON_PAGES if they 379 * are mapped to swap space. 380 */ 381 __dec_zone_page_state(page, NR_FILE_PAGES); 382 __inc_zone_page_state(newpage, NR_FILE_PAGES); 383 if (!PageSwapCache(page) && PageSwapBacked(page)) { 384 __dec_zone_page_state(page, NR_SHMEM); 385 __inc_zone_page_state(newpage, NR_SHMEM); 386 } 387 spin_unlock_irq(&mapping->tree_lock); 388 389 return MIGRATEPAGE_SUCCESS; 390 } 391 392 /* 393 * The expected number of remaining references is the same as that 394 * of migrate_page_move_mapping(). 395 */ 396 int migrate_huge_page_move_mapping(struct address_space *mapping, 397 struct page *newpage, struct page *page) 398 { 399 int expected_count; 400 void **pslot; 401 402 if (!mapping) { 403 if (page_count(page) != 1) 404 return -EAGAIN; 405 return MIGRATEPAGE_SUCCESS; 406 } 407 408 spin_lock_irq(&mapping->tree_lock); 409 410 pslot = radix_tree_lookup_slot(&mapping->page_tree, 411 page_index(page)); 412 413 expected_count = 2 + page_has_private(page); 414 if (page_count(page) != expected_count || 415 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 416 spin_unlock_irq(&mapping->tree_lock); 417 return -EAGAIN; 418 } 419 420 if (!page_freeze_refs(page, expected_count)) { 421 spin_unlock_irq(&mapping->tree_lock); 422 return -EAGAIN; 423 } 424 425 get_page(newpage); 426 427 radix_tree_replace_slot(pslot, newpage); 428 429 page_unfreeze_refs(page, expected_count - 1); 430 431 spin_unlock_irq(&mapping->tree_lock); 432 return MIGRATEPAGE_SUCCESS; 433 } 434 435 /* 436 * Gigantic pages are so large that we do not guarantee that page++ pointer 437 * arithmetic will work across the entire page. We need something more 438 * specialized. 439 */ 440 static void __copy_gigantic_page(struct page *dst, struct page *src, 441 int nr_pages) 442 { 443 int i; 444 struct page *dst_base = dst; 445 struct page *src_base = src; 446 447 for (i = 0; i < nr_pages; ) { 448 cond_resched(); 449 copy_highpage(dst, src); 450 451 i++; 452 dst = mem_map_next(dst, dst_base, i); 453 src = mem_map_next(src, src_base, i); 454 } 455 } 456 457 static void copy_huge_page(struct page *dst, struct page *src) 458 { 459 int i; 460 int nr_pages; 461 462 if (PageHuge(src)) { 463 /* hugetlbfs page */ 464 struct hstate *h = page_hstate(src); 465 nr_pages = pages_per_huge_page(h); 466 467 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 468 __copy_gigantic_page(dst, src, nr_pages); 469 return; 470 } 471 } else { 472 /* thp page */ 473 BUG_ON(!PageTransHuge(src)); 474 nr_pages = hpage_nr_pages(src); 475 } 476 477 for (i = 0; i < nr_pages; i++) { 478 cond_resched(); 479 copy_highpage(dst + i, src + i); 480 } 481 } 482 483 /* 484 * Copy the page to its new location 485 */ 486 void migrate_page_copy(struct page *newpage, struct page *page) 487 { 488 int cpupid; 489 490 if (PageHuge(page) || PageTransHuge(page)) 491 copy_huge_page(newpage, page); 492 else 493 copy_highpage(newpage, page); 494 495 if (PageError(page)) 496 SetPageError(newpage); 497 if (PageReferenced(page)) 498 SetPageReferenced(newpage); 499 if (PageUptodate(page)) 500 SetPageUptodate(newpage); 501 if (TestClearPageActive(page)) { 502 VM_BUG_ON_PAGE(PageUnevictable(page), page); 503 SetPageActive(newpage); 504 } else if (TestClearPageUnevictable(page)) 505 SetPageUnevictable(newpage); 506 if (PageChecked(page)) 507 SetPageChecked(newpage); 508 if (PageMappedToDisk(page)) 509 SetPageMappedToDisk(newpage); 510 511 if (PageDirty(page)) { 512 clear_page_dirty_for_io(page); 513 /* 514 * Want to mark the page and the radix tree as dirty, and 515 * redo the accounting that clear_page_dirty_for_io undid, 516 * but we can't use set_page_dirty because that function 517 * is actually a signal that all of the page has become dirty. 518 * Whereas only part of our page may be dirty. 519 */ 520 if (PageSwapBacked(page)) 521 SetPageDirty(newpage); 522 else 523 __set_page_dirty_nobuffers(newpage); 524 } 525 526 /* 527 * Copy NUMA information to the new page, to prevent over-eager 528 * future migrations of this same page. 529 */ 530 cpupid = page_cpupid_xchg_last(page, -1); 531 page_cpupid_xchg_last(newpage, cpupid); 532 533 mlock_migrate_page(newpage, page); 534 ksm_migrate_page(newpage, page); 535 /* 536 * Please do not reorder this without considering how mm/ksm.c's 537 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 538 */ 539 ClearPageSwapCache(page); 540 ClearPagePrivate(page); 541 set_page_private(page, 0); 542 543 /* 544 * If any waiters have accumulated on the new page then 545 * wake them up. 546 */ 547 if (PageWriteback(newpage)) 548 end_page_writeback(newpage); 549 } 550 551 /************************************************************ 552 * Migration functions 553 ***********************************************************/ 554 555 /* 556 * Common logic to directly migrate a single page suitable for 557 * pages that do not use PagePrivate/PagePrivate2. 558 * 559 * Pages are locked upon entry and exit. 560 */ 561 int migrate_page(struct address_space *mapping, 562 struct page *newpage, struct page *page, 563 enum migrate_mode mode) 564 { 565 int rc; 566 567 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 568 569 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); 570 571 if (rc != MIGRATEPAGE_SUCCESS) 572 return rc; 573 574 migrate_page_copy(newpage, page); 575 return MIGRATEPAGE_SUCCESS; 576 } 577 EXPORT_SYMBOL(migrate_page); 578 579 #ifdef CONFIG_BLOCK 580 /* 581 * Migration function for pages with buffers. This function can only be used 582 * if the underlying filesystem guarantees that no other references to "page" 583 * exist. 584 */ 585 int buffer_migrate_page(struct address_space *mapping, 586 struct page *newpage, struct page *page, enum migrate_mode mode) 587 { 588 struct buffer_head *bh, *head; 589 int rc; 590 591 if (!page_has_buffers(page)) 592 return migrate_page(mapping, newpage, page, mode); 593 594 head = page_buffers(page); 595 596 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); 597 598 if (rc != MIGRATEPAGE_SUCCESS) 599 return rc; 600 601 /* 602 * In the async case, migrate_page_move_mapping locked the buffers 603 * with an IRQ-safe spinlock held. In the sync case, the buffers 604 * need to be locked now 605 */ 606 if (mode != MIGRATE_ASYNC) 607 BUG_ON(!buffer_migrate_lock_buffers(head, mode)); 608 609 ClearPagePrivate(page); 610 set_page_private(newpage, page_private(page)); 611 set_page_private(page, 0); 612 put_page(page); 613 get_page(newpage); 614 615 bh = head; 616 do { 617 set_bh_page(bh, newpage, bh_offset(bh)); 618 bh = bh->b_this_page; 619 620 } while (bh != head); 621 622 SetPagePrivate(newpage); 623 624 migrate_page_copy(newpage, page); 625 626 bh = head; 627 do { 628 unlock_buffer(bh); 629 put_bh(bh); 630 bh = bh->b_this_page; 631 632 } while (bh != head); 633 634 return MIGRATEPAGE_SUCCESS; 635 } 636 EXPORT_SYMBOL(buffer_migrate_page); 637 #endif 638 639 /* 640 * Writeback a page to clean the dirty state 641 */ 642 static int writeout(struct address_space *mapping, struct page *page) 643 { 644 struct writeback_control wbc = { 645 .sync_mode = WB_SYNC_NONE, 646 .nr_to_write = 1, 647 .range_start = 0, 648 .range_end = LLONG_MAX, 649 .for_reclaim = 1 650 }; 651 int rc; 652 653 if (!mapping->a_ops->writepage) 654 /* No write method for the address space */ 655 return -EINVAL; 656 657 if (!clear_page_dirty_for_io(page)) 658 /* Someone else already triggered a write */ 659 return -EAGAIN; 660 661 /* 662 * A dirty page may imply that the underlying filesystem has 663 * the page on some queue. So the page must be clean for 664 * migration. Writeout may mean we loose the lock and the 665 * page state is no longer what we checked for earlier. 666 * At this point we know that the migration attempt cannot 667 * be successful. 668 */ 669 remove_migration_ptes(page, page); 670 671 rc = mapping->a_ops->writepage(page, &wbc); 672 673 if (rc != AOP_WRITEPAGE_ACTIVATE) 674 /* unlocked. Relock */ 675 lock_page(page); 676 677 return (rc < 0) ? -EIO : -EAGAIN; 678 } 679 680 /* 681 * Default handling if a filesystem does not provide a migration function. 682 */ 683 static int fallback_migrate_page(struct address_space *mapping, 684 struct page *newpage, struct page *page, enum migrate_mode mode) 685 { 686 if (PageDirty(page)) { 687 /* Only writeback pages in full synchronous migration */ 688 if (mode != MIGRATE_SYNC) 689 return -EBUSY; 690 return writeout(mapping, page); 691 } 692 693 /* 694 * Buffers may be managed in a filesystem specific way. 695 * We must have no buffers or drop them. 696 */ 697 if (page_has_private(page) && 698 !try_to_release_page(page, GFP_KERNEL)) 699 return -EAGAIN; 700 701 return migrate_page(mapping, newpage, page, mode); 702 } 703 704 /* 705 * Move a page to a newly allocated page 706 * The page is locked and all ptes have been successfully removed. 707 * 708 * The new page will have replaced the old page if this function 709 * is successful. 710 * 711 * Return value: 712 * < 0 - error code 713 * MIGRATEPAGE_SUCCESS - success 714 */ 715 static int move_to_new_page(struct page *newpage, struct page *page, 716 int remap_swapcache, enum migrate_mode mode) 717 { 718 struct address_space *mapping; 719 int rc; 720 721 /* 722 * Block others from accessing the page when we get around to 723 * establishing additional references. We are the only one 724 * holding a reference to the new page at this point. 725 */ 726 if (!trylock_page(newpage)) 727 BUG(); 728 729 /* Prepare mapping for the new page.*/ 730 newpage->index = page->index; 731 newpage->mapping = page->mapping; 732 if (PageSwapBacked(page)) 733 SetPageSwapBacked(newpage); 734 735 mapping = page_mapping(page); 736 if (!mapping) 737 rc = migrate_page(mapping, newpage, page, mode); 738 else if (mapping->a_ops->migratepage) 739 /* 740 * Most pages have a mapping and most filesystems provide a 741 * migratepage callback. Anonymous pages are part of swap 742 * space which also has its own migratepage callback. This 743 * is the most common path for page migration. 744 */ 745 rc = mapping->a_ops->migratepage(mapping, 746 newpage, page, mode); 747 else 748 rc = fallback_migrate_page(mapping, newpage, page, mode); 749 750 if (rc != MIGRATEPAGE_SUCCESS) { 751 newpage->mapping = NULL; 752 } else { 753 if (remap_swapcache) 754 remove_migration_ptes(page, newpage); 755 page->mapping = NULL; 756 } 757 758 unlock_page(newpage); 759 760 return rc; 761 } 762 763 static int __unmap_and_move(struct page *page, struct page *newpage, 764 int force, enum migrate_mode mode) 765 { 766 int rc = -EAGAIN; 767 int remap_swapcache = 1; 768 struct mem_cgroup *mem; 769 struct anon_vma *anon_vma = NULL; 770 771 if (!trylock_page(page)) { 772 if (!force || mode == MIGRATE_ASYNC) 773 goto out; 774 775 /* 776 * It's not safe for direct compaction to call lock_page. 777 * For example, during page readahead pages are added locked 778 * to the LRU. Later, when the IO completes the pages are 779 * marked uptodate and unlocked. However, the queueing 780 * could be merging multiple pages for one bio (e.g. 781 * mpage_readpages). If an allocation happens for the 782 * second or third page, the process can end up locking 783 * the same page twice and deadlocking. Rather than 784 * trying to be clever about what pages can be locked, 785 * avoid the use of lock_page for direct compaction 786 * altogether. 787 */ 788 if (current->flags & PF_MEMALLOC) 789 goto out; 790 791 lock_page(page); 792 } 793 794 /* charge against new page */ 795 mem_cgroup_prepare_migration(page, newpage, &mem); 796 797 if (PageWriteback(page)) { 798 /* 799 * Only in the case of a full synchronous migration is it 800 * necessary to wait for PageWriteback. In the async case, 801 * the retry loop is too short and in the sync-light case, 802 * the overhead of stalling is too much 803 */ 804 if (mode != MIGRATE_SYNC) { 805 rc = -EBUSY; 806 goto uncharge; 807 } 808 if (!force) 809 goto uncharge; 810 wait_on_page_writeback(page); 811 } 812 /* 813 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 814 * we cannot notice that anon_vma is freed while we migrates a page. 815 * This get_anon_vma() delays freeing anon_vma pointer until the end 816 * of migration. File cache pages are no problem because of page_lock() 817 * File Caches may use write_page() or lock_page() in migration, then, 818 * just care Anon page here. 819 */ 820 if (PageAnon(page) && !PageKsm(page)) { 821 /* 822 * Only page_lock_anon_vma_read() understands the subtleties of 823 * getting a hold on an anon_vma from outside one of its mms. 824 */ 825 anon_vma = page_get_anon_vma(page); 826 if (anon_vma) { 827 /* 828 * Anon page 829 */ 830 } else if (PageSwapCache(page)) { 831 /* 832 * We cannot be sure that the anon_vma of an unmapped 833 * swapcache page is safe to use because we don't 834 * know in advance if the VMA that this page belonged 835 * to still exists. If the VMA and others sharing the 836 * data have been freed, then the anon_vma could 837 * already be invalid. 838 * 839 * To avoid this possibility, swapcache pages get 840 * migrated but are not remapped when migration 841 * completes 842 */ 843 remap_swapcache = 0; 844 } else { 845 goto uncharge; 846 } 847 } 848 849 if (unlikely(balloon_page_movable(page))) { 850 /* 851 * A ballooned page does not need any special attention from 852 * physical to virtual reverse mapping procedures. 853 * Skip any attempt to unmap PTEs or to remap swap cache, 854 * in order to avoid burning cycles at rmap level, and perform 855 * the page migration right away (proteced by page lock). 856 */ 857 rc = balloon_page_migrate(newpage, page, mode); 858 goto uncharge; 859 } 860 861 /* 862 * Corner case handling: 863 * 1. When a new swap-cache page is read into, it is added to the LRU 864 * and treated as swapcache but it has no rmap yet. 865 * Calling try_to_unmap() against a page->mapping==NULL page will 866 * trigger a BUG. So handle it here. 867 * 2. An orphaned page (see truncate_complete_page) might have 868 * fs-private metadata. The page can be picked up due to memory 869 * offlining. Everywhere else except page reclaim, the page is 870 * invisible to the vm, so the page can not be migrated. So try to 871 * free the metadata, so the page can be freed. 872 */ 873 if (!page->mapping) { 874 VM_BUG_ON_PAGE(PageAnon(page), page); 875 if (page_has_private(page)) { 876 try_to_free_buffers(page); 877 goto uncharge; 878 } 879 goto skip_unmap; 880 } 881 882 /* Establish migration ptes or remove ptes */ 883 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 884 885 skip_unmap: 886 if (!page_mapped(page)) 887 rc = move_to_new_page(newpage, page, remap_swapcache, mode); 888 889 if (rc && remap_swapcache) 890 remove_migration_ptes(page, page); 891 892 /* Drop an anon_vma reference if we took one */ 893 if (anon_vma) 894 put_anon_vma(anon_vma); 895 896 uncharge: 897 mem_cgroup_end_migration(mem, page, newpage, 898 (rc == MIGRATEPAGE_SUCCESS || 899 rc == MIGRATEPAGE_BALLOON_SUCCESS)); 900 unlock_page(page); 901 out: 902 return rc; 903 } 904 905 /* 906 * Obtain the lock on page, remove all ptes and migrate the page 907 * to the newly allocated page in newpage. 908 */ 909 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 910 struct page *page, int force, enum migrate_mode mode) 911 { 912 int rc = 0; 913 int *result = NULL; 914 struct page *newpage = get_new_page(page, private, &result); 915 916 if (!newpage) 917 return -ENOMEM; 918 919 if (page_count(page) == 1) { 920 /* page was freed from under us. So we are done. */ 921 goto out; 922 } 923 924 if (unlikely(PageTransHuge(page))) 925 if (unlikely(split_huge_page(page))) 926 goto out; 927 928 rc = __unmap_and_move(page, newpage, force, mode); 929 930 if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { 931 /* 932 * A ballooned page has been migrated already. 933 * Now, it's the time to wrap-up counters, 934 * handle the page back to Buddy and return. 935 */ 936 dec_zone_page_state(page, NR_ISOLATED_ANON + 937 page_is_file_cache(page)); 938 balloon_page_free(page); 939 return MIGRATEPAGE_SUCCESS; 940 } 941 out: 942 if (rc != -EAGAIN) { 943 /* 944 * A page that has been migrated has all references 945 * removed and will be freed. A page that has not been 946 * migrated will have kepts its references and be 947 * restored. 948 */ 949 list_del(&page->lru); 950 dec_zone_page_state(page, NR_ISOLATED_ANON + 951 page_is_file_cache(page)); 952 putback_lru_page(page); 953 } 954 /* 955 * Move the new page to the LRU. If migration was not successful 956 * then this will free the page. 957 */ 958 putback_lru_page(newpage); 959 if (result) { 960 if (rc) 961 *result = rc; 962 else 963 *result = page_to_nid(newpage); 964 } 965 return rc; 966 } 967 968 /* 969 * Counterpart of unmap_and_move_page() for hugepage migration. 970 * 971 * This function doesn't wait the completion of hugepage I/O 972 * because there is no race between I/O and migration for hugepage. 973 * Note that currently hugepage I/O occurs only in direct I/O 974 * where no lock is held and PG_writeback is irrelevant, 975 * and writeback status of all subpages are counted in the reference 976 * count of the head page (i.e. if all subpages of a 2MB hugepage are 977 * under direct I/O, the reference of the head page is 512 and a bit more.) 978 * This means that when we try to migrate hugepage whose subpages are 979 * doing direct I/O, some references remain after try_to_unmap() and 980 * hugepage migration fails without data corruption. 981 * 982 * There is also no race when direct I/O is issued on the page under migration, 983 * because then pte is replaced with migration swap entry and direct I/O code 984 * will wait in the page fault for migration to complete. 985 */ 986 static int unmap_and_move_huge_page(new_page_t get_new_page, 987 unsigned long private, struct page *hpage, 988 int force, enum migrate_mode mode) 989 { 990 int rc = 0; 991 int *result = NULL; 992 struct page *new_hpage; 993 struct anon_vma *anon_vma = NULL; 994 995 /* 996 * Movability of hugepages depends on architectures and hugepage size. 997 * This check is necessary because some callers of hugepage migration 998 * like soft offline and memory hotremove don't walk through page 999 * tables or check whether the hugepage is pmd-based or not before 1000 * kicking migration. 1001 */ 1002 if (!hugepage_migration_support(page_hstate(hpage))) { 1003 putback_active_hugepage(hpage); 1004 return -ENOSYS; 1005 } 1006 1007 new_hpage = get_new_page(hpage, private, &result); 1008 if (!new_hpage) 1009 return -ENOMEM; 1010 1011 rc = -EAGAIN; 1012 1013 if (!trylock_page(hpage)) { 1014 if (!force || mode != MIGRATE_SYNC) 1015 goto out; 1016 lock_page(hpage); 1017 } 1018 1019 if (PageAnon(hpage)) 1020 anon_vma = page_get_anon_vma(hpage); 1021 1022 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1023 1024 if (!page_mapped(hpage)) 1025 rc = move_to_new_page(new_hpage, hpage, 1, mode); 1026 1027 if (rc) 1028 remove_migration_ptes(hpage, hpage); 1029 1030 if (anon_vma) 1031 put_anon_vma(anon_vma); 1032 1033 if (!rc) 1034 hugetlb_cgroup_migrate(hpage, new_hpage); 1035 1036 unlock_page(hpage); 1037 out: 1038 if (rc != -EAGAIN) 1039 putback_active_hugepage(hpage); 1040 put_page(new_hpage); 1041 if (result) { 1042 if (rc) 1043 *result = rc; 1044 else 1045 *result = page_to_nid(new_hpage); 1046 } 1047 return rc; 1048 } 1049 1050 /* 1051 * migrate_pages - migrate the pages specified in a list, to the free pages 1052 * supplied as the target for the page migration 1053 * 1054 * @from: The list of pages to be migrated. 1055 * @get_new_page: The function used to allocate free pages to be used 1056 * as the target of the page migration. 1057 * @private: Private data to be passed on to get_new_page() 1058 * @mode: The migration mode that specifies the constraints for 1059 * page migration, if any. 1060 * @reason: The reason for page migration. 1061 * 1062 * The function returns after 10 attempts or if no pages are movable any more 1063 * because the list has become empty or no retryable pages exist any more. 1064 * The caller should call putback_lru_pages() to return pages to the LRU 1065 * or free list only if ret != 0. 1066 * 1067 * Returns the number of pages that were not migrated, or an error code. 1068 */ 1069 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1070 unsigned long private, enum migrate_mode mode, int reason) 1071 { 1072 int retry = 1; 1073 int nr_failed = 0; 1074 int nr_succeeded = 0; 1075 int pass = 0; 1076 struct page *page; 1077 struct page *page2; 1078 int swapwrite = current->flags & PF_SWAPWRITE; 1079 int rc; 1080 1081 if (!swapwrite) 1082 current->flags |= PF_SWAPWRITE; 1083 1084 for(pass = 0; pass < 10 && retry; pass++) { 1085 retry = 0; 1086 1087 list_for_each_entry_safe(page, page2, from, lru) { 1088 cond_resched(); 1089 1090 if (PageHuge(page)) 1091 rc = unmap_and_move_huge_page(get_new_page, 1092 private, page, pass > 2, mode); 1093 else 1094 rc = unmap_and_move(get_new_page, private, 1095 page, pass > 2, mode); 1096 1097 switch(rc) { 1098 case -ENOMEM: 1099 goto out; 1100 case -EAGAIN: 1101 retry++; 1102 break; 1103 case MIGRATEPAGE_SUCCESS: 1104 nr_succeeded++; 1105 break; 1106 default: 1107 /* 1108 * Permanent failure (-EBUSY, -ENOSYS, etc.): 1109 * unlike -EAGAIN case, the failed page is 1110 * removed from migration page list and not 1111 * retried in the next outer loop. 1112 */ 1113 nr_failed++; 1114 break; 1115 } 1116 } 1117 } 1118 rc = nr_failed + retry; 1119 out: 1120 if (nr_succeeded) 1121 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1122 if (nr_failed) 1123 count_vm_events(PGMIGRATE_FAIL, nr_failed); 1124 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); 1125 1126 if (!swapwrite) 1127 current->flags &= ~PF_SWAPWRITE; 1128 1129 return rc; 1130 } 1131 1132 #ifdef CONFIG_NUMA 1133 /* 1134 * Move a list of individual pages 1135 */ 1136 struct page_to_node { 1137 unsigned long addr; 1138 struct page *page; 1139 int node; 1140 int status; 1141 }; 1142 1143 static struct page *new_page_node(struct page *p, unsigned long private, 1144 int **result) 1145 { 1146 struct page_to_node *pm = (struct page_to_node *)private; 1147 1148 while (pm->node != MAX_NUMNODES && pm->page != p) 1149 pm++; 1150 1151 if (pm->node == MAX_NUMNODES) 1152 return NULL; 1153 1154 *result = &pm->status; 1155 1156 if (PageHuge(p)) 1157 return alloc_huge_page_node(page_hstate(compound_head(p)), 1158 pm->node); 1159 else 1160 return alloc_pages_exact_node(pm->node, 1161 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1162 } 1163 1164 /* 1165 * Move a set of pages as indicated in the pm array. The addr 1166 * field must be set to the virtual address of the page to be moved 1167 * and the node number must contain a valid target node. 1168 * The pm array ends with node = MAX_NUMNODES. 1169 */ 1170 static int do_move_page_to_node_array(struct mm_struct *mm, 1171 struct page_to_node *pm, 1172 int migrate_all) 1173 { 1174 int err; 1175 struct page_to_node *pp; 1176 LIST_HEAD(pagelist); 1177 1178 down_read(&mm->mmap_sem); 1179 1180 /* 1181 * Build a list of pages to migrate 1182 */ 1183 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1184 struct vm_area_struct *vma; 1185 struct page *page; 1186 1187 err = -EFAULT; 1188 vma = find_vma(mm, pp->addr); 1189 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1190 goto set_status; 1191 1192 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); 1193 1194 err = PTR_ERR(page); 1195 if (IS_ERR(page)) 1196 goto set_status; 1197 1198 err = -ENOENT; 1199 if (!page) 1200 goto set_status; 1201 1202 /* Use PageReserved to check for zero page */ 1203 if (PageReserved(page)) 1204 goto put_and_set; 1205 1206 pp->page = page; 1207 err = page_to_nid(page); 1208 1209 if (err == pp->node) 1210 /* 1211 * Node already in the right place 1212 */ 1213 goto put_and_set; 1214 1215 err = -EACCES; 1216 if (page_mapcount(page) > 1 && 1217 !migrate_all) 1218 goto put_and_set; 1219 1220 if (PageHuge(page)) { 1221 isolate_huge_page(page, &pagelist); 1222 goto put_and_set; 1223 } 1224 1225 err = isolate_lru_page(page); 1226 if (!err) { 1227 list_add_tail(&page->lru, &pagelist); 1228 inc_zone_page_state(page, NR_ISOLATED_ANON + 1229 page_is_file_cache(page)); 1230 } 1231 put_and_set: 1232 /* 1233 * Either remove the duplicate refcount from 1234 * isolate_lru_page() or drop the page ref if it was 1235 * not isolated. 1236 */ 1237 put_page(page); 1238 set_status: 1239 pp->status = err; 1240 } 1241 1242 err = 0; 1243 if (!list_empty(&pagelist)) { 1244 err = migrate_pages(&pagelist, new_page_node, 1245 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); 1246 if (err) 1247 putback_movable_pages(&pagelist); 1248 } 1249 1250 up_read(&mm->mmap_sem); 1251 return err; 1252 } 1253 1254 /* 1255 * Migrate an array of page address onto an array of nodes and fill 1256 * the corresponding array of status. 1257 */ 1258 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1259 unsigned long nr_pages, 1260 const void __user * __user *pages, 1261 const int __user *nodes, 1262 int __user *status, int flags) 1263 { 1264 struct page_to_node *pm; 1265 unsigned long chunk_nr_pages; 1266 unsigned long chunk_start; 1267 int err; 1268 1269 err = -ENOMEM; 1270 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 1271 if (!pm) 1272 goto out; 1273 1274 migrate_prep(); 1275 1276 /* 1277 * Store a chunk of page_to_node array in a page, 1278 * but keep the last one as a marker 1279 */ 1280 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 1281 1282 for (chunk_start = 0; 1283 chunk_start < nr_pages; 1284 chunk_start += chunk_nr_pages) { 1285 int j; 1286 1287 if (chunk_start + chunk_nr_pages > nr_pages) 1288 chunk_nr_pages = nr_pages - chunk_start; 1289 1290 /* fill the chunk pm with addrs and nodes from user-space */ 1291 for (j = 0; j < chunk_nr_pages; j++) { 1292 const void __user *p; 1293 int node; 1294 1295 err = -EFAULT; 1296 if (get_user(p, pages + j + chunk_start)) 1297 goto out_pm; 1298 pm[j].addr = (unsigned long) p; 1299 1300 if (get_user(node, nodes + j + chunk_start)) 1301 goto out_pm; 1302 1303 err = -ENODEV; 1304 if (node < 0 || node >= MAX_NUMNODES) 1305 goto out_pm; 1306 1307 if (!node_state(node, N_MEMORY)) 1308 goto out_pm; 1309 1310 err = -EACCES; 1311 if (!node_isset(node, task_nodes)) 1312 goto out_pm; 1313 1314 pm[j].node = node; 1315 } 1316 1317 /* End marker for this chunk */ 1318 pm[chunk_nr_pages].node = MAX_NUMNODES; 1319 1320 /* Migrate this chunk */ 1321 err = do_move_page_to_node_array(mm, pm, 1322 flags & MPOL_MF_MOVE_ALL); 1323 if (err < 0) 1324 goto out_pm; 1325 1326 /* Return status information */ 1327 for (j = 0; j < chunk_nr_pages; j++) 1328 if (put_user(pm[j].status, status + j + chunk_start)) { 1329 err = -EFAULT; 1330 goto out_pm; 1331 } 1332 } 1333 err = 0; 1334 1335 out_pm: 1336 free_page((unsigned long)pm); 1337 out: 1338 return err; 1339 } 1340 1341 /* 1342 * Determine the nodes of an array of pages and store it in an array of status. 1343 */ 1344 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1345 const void __user **pages, int *status) 1346 { 1347 unsigned long i; 1348 1349 down_read(&mm->mmap_sem); 1350 1351 for (i = 0; i < nr_pages; i++) { 1352 unsigned long addr = (unsigned long)(*pages); 1353 struct vm_area_struct *vma; 1354 struct page *page; 1355 int err = -EFAULT; 1356 1357 vma = find_vma(mm, addr); 1358 if (!vma || addr < vma->vm_start) 1359 goto set_status; 1360 1361 page = follow_page(vma, addr, 0); 1362 1363 err = PTR_ERR(page); 1364 if (IS_ERR(page)) 1365 goto set_status; 1366 1367 err = -ENOENT; 1368 /* Use PageReserved to check for zero page */ 1369 if (!page || PageReserved(page)) 1370 goto set_status; 1371 1372 err = page_to_nid(page); 1373 set_status: 1374 *status = err; 1375 1376 pages++; 1377 status++; 1378 } 1379 1380 up_read(&mm->mmap_sem); 1381 } 1382 1383 /* 1384 * Determine the nodes of a user array of pages and store it in 1385 * a user array of status. 1386 */ 1387 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1388 const void __user * __user *pages, 1389 int __user *status) 1390 { 1391 #define DO_PAGES_STAT_CHUNK_NR 16 1392 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1393 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1394 1395 while (nr_pages) { 1396 unsigned long chunk_nr; 1397 1398 chunk_nr = nr_pages; 1399 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1400 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1401 1402 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1403 break; 1404 1405 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1406 1407 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1408 break; 1409 1410 pages += chunk_nr; 1411 status += chunk_nr; 1412 nr_pages -= chunk_nr; 1413 } 1414 return nr_pages ? -EFAULT : 0; 1415 } 1416 1417 /* 1418 * Move a list of pages in the address space of the currently executing 1419 * process. 1420 */ 1421 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1422 const void __user * __user *, pages, 1423 const int __user *, nodes, 1424 int __user *, status, int, flags) 1425 { 1426 const struct cred *cred = current_cred(), *tcred; 1427 struct task_struct *task; 1428 struct mm_struct *mm; 1429 int err; 1430 nodemask_t task_nodes; 1431 1432 /* Check flags */ 1433 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1434 return -EINVAL; 1435 1436 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1437 return -EPERM; 1438 1439 /* Find the mm_struct */ 1440 rcu_read_lock(); 1441 task = pid ? find_task_by_vpid(pid) : current; 1442 if (!task) { 1443 rcu_read_unlock(); 1444 return -ESRCH; 1445 } 1446 get_task_struct(task); 1447 1448 /* 1449 * Check if this process has the right to modify the specified 1450 * process. The right exists if the process has administrative 1451 * capabilities, superuser privileges or the same 1452 * userid as the target process. 1453 */ 1454 tcred = __task_cred(task); 1455 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1456 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 1457 !capable(CAP_SYS_NICE)) { 1458 rcu_read_unlock(); 1459 err = -EPERM; 1460 goto out; 1461 } 1462 rcu_read_unlock(); 1463 1464 err = security_task_movememory(task); 1465 if (err) 1466 goto out; 1467 1468 task_nodes = cpuset_mems_allowed(task); 1469 mm = get_task_mm(task); 1470 put_task_struct(task); 1471 1472 if (!mm) 1473 return -EINVAL; 1474 1475 if (nodes) 1476 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1477 nodes, status, flags); 1478 else 1479 err = do_pages_stat(mm, nr_pages, pages, status); 1480 1481 mmput(mm); 1482 return err; 1483 1484 out: 1485 put_task_struct(task); 1486 return err; 1487 } 1488 1489 /* 1490 * Call migration functions in the vma_ops that may prepare 1491 * memory in a vm for migration. migration functions may perform 1492 * the migration for vmas that do not have an underlying page struct. 1493 */ 1494 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1495 const nodemask_t *from, unsigned long flags) 1496 { 1497 struct vm_area_struct *vma; 1498 int err = 0; 1499 1500 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { 1501 if (vma->vm_ops && vma->vm_ops->migrate) { 1502 err = vma->vm_ops->migrate(vma, to, from, flags); 1503 if (err) 1504 break; 1505 } 1506 } 1507 return err; 1508 } 1509 1510 #ifdef CONFIG_NUMA_BALANCING 1511 /* 1512 * Returns true if this is a safe migration target node for misplaced NUMA 1513 * pages. Currently it only checks the watermarks which crude 1514 */ 1515 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1516 unsigned long nr_migrate_pages) 1517 { 1518 int z; 1519 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1520 struct zone *zone = pgdat->node_zones + z; 1521 1522 if (!populated_zone(zone)) 1523 continue; 1524 1525 if (!zone_reclaimable(zone)) 1526 continue; 1527 1528 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1529 if (!zone_watermark_ok(zone, 0, 1530 high_wmark_pages(zone) + 1531 nr_migrate_pages, 1532 0, 0)) 1533 continue; 1534 return true; 1535 } 1536 return false; 1537 } 1538 1539 static struct page *alloc_misplaced_dst_page(struct page *page, 1540 unsigned long data, 1541 int **result) 1542 { 1543 int nid = (int) data; 1544 struct page *newpage; 1545 1546 newpage = alloc_pages_exact_node(nid, 1547 (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | 1548 __GFP_NOMEMALLOC | __GFP_NORETRY | 1549 __GFP_NOWARN) & 1550 ~GFP_IOFS, 0); 1551 1552 return newpage; 1553 } 1554 1555 /* 1556 * page migration rate limiting control. 1557 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs 1558 * window of time. Default here says do not migrate more than 1280M per second. 1559 * If a node is rate-limited then PTE NUMA updates are also rate-limited. However 1560 * as it is faults that reset the window, pte updates will happen unconditionally 1561 * if there has not been a fault since @pteupdate_interval_millisecs after the 1562 * throttle window closed. 1563 */ 1564 static unsigned int migrate_interval_millisecs __read_mostly = 100; 1565 static unsigned int pteupdate_interval_millisecs __read_mostly = 1000; 1566 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); 1567 1568 /* Returns true if NUMA migration is currently rate limited */ 1569 bool migrate_ratelimited(int node) 1570 { 1571 pg_data_t *pgdat = NODE_DATA(node); 1572 1573 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window + 1574 msecs_to_jiffies(pteupdate_interval_millisecs))) 1575 return false; 1576 1577 if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages) 1578 return false; 1579 1580 return true; 1581 } 1582 1583 /* Returns true if the node is migrate rate-limited after the update */ 1584 static bool numamigrate_update_ratelimit(pg_data_t *pgdat, 1585 unsigned long nr_pages) 1586 { 1587 /* 1588 * Rate-limit the amount of data that is being migrated to a node. 1589 * Optimal placement is no good if the memory bus is saturated and 1590 * all the time is being spent migrating! 1591 */ 1592 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { 1593 spin_lock(&pgdat->numabalancing_migrate_lock); 1594 pgdat->numabalancing_migrate_nr_pages = 0; 1595 pgdat->numabalancing_migrate_next_window = jiffies + 1596 msecs_to_jiffies(migrate_interval_millisecs); 1597 spin_unlock(&pgdat->numabalancing_migrate_lock); 1598 } 1599 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) { 1600 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id, 1601 nr_pages); 1602 return true; 1603 } 1604 1605 /* 1606 * This is an unlocked non-atomic update so errors are possible. 1607 * The consequences are failing to migrate when we potentiall should 1608 * have which is not severe enough to warrant locking. If it is ever 1609 * a problem, it can be converted to a per-cpu counter. 1610 */ 1611 pgdat->numabalancing_migrate_nr_pages += nr_pages; 1612 return false; 1613 } 1614 1615 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1616 { 1617 int page_lru; 1618 1619 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 1620 1621 /* Avoid migrating to a node that is nearly full */ 1622 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) 1623 return 0; 1624 1625 if (isolate_lru_page(page)) 1626 return 0; 1627 1628 /* 1629 * migrate_misplaced_transhuge_page() skips page migration's usual 1630 * check on page_count(), so we must do it here, now that the page 1631 * has been isolated: a GUP pin, or any other pin, prevents migration. 1632 * The expected page count is 3: 1 for page's mapcount and 1 for the 1633 * caller's pin and 1 for the reference taken by isolate_lru_page(). 1634 */ 1635 if (PageTransHuge(page) && page_count(page) != 3) { 1636 putback_lru_page(page); 1637 return 0; 1638 } 1639 1640 page_lru = page_is_file_cache(page); 1641 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, 1642 hpage_nr_pages(page)); 1643 1644 /* 1645 * Isolating the page has taken another reference, so the 1646 * caller's reference can be safely dropped without the page 1647 * disappearing underneath us during migration. 1648 */ 1649 put_page(page); 1650 return 1; 1651 } 1652 1653 bool pmd_trans_migrating(pmd_t pmd) 1654 { 1655 struct page *page = pmd_page(pmd); 1656 return PageLocked(page); 1657 } 1658 1659 void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) 1660 { 1661 struct page *page = pmd_page(*pmd); 1662 wait_on_page_locked(page); 1663 } 1664 1665 /* 1666 * Attempt to migrate a misplaced page to the specified destination 1667 * node. Caller is expected to have an elevated reference count on 1668 * the page that will be dropped by this function before returning. 1669 */ 1670 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 1671 int node) 1672 { 1673 pg_data_t *pgdat = NODE_DATA(node); 1674 int isolated; 1675 int nr_remaining; 1676 LIST_HEAD(migratepages); 1677 1678 /* 1679 * Don't migrate file pages that are mapped in multiple processes 1680 * with execute permissions as they are probably shared libraries. 1681 */ 1682 if (page_mapcount(page) != 1 && page_is_file_cache(page) && 1683 (vma->vm_flags & VM_EXEC)) 1684 goto out; 1685 1686 /* 1687 * Rate-limit the amount of data that is being migrated to a node. 1688 * Optimal placement is no good if the memory bus is saturated and 1689 * all the time is being spent migrating! 1690 */ 1691 if (numamigrate_update_ratelimit(pgdat, 1)) 1692 goto out; 1693 1694 isolated = numamigrate_isolate_page(pgdat, page); 1695 if (!isolated) 1696 goto out; 1697 1698 list_add(&page->lru, &migratepages); 1699 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 1700 node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); 1701 if (nr_remaining) { 1702 if (!list_empty(&migratepages)) { 1703 list_del(&page->lru); 1704 dec_zone_page_state(page, NR_ISOLATED_ANON + 1705 page_is_file_cache(page)); 1706 putback_lru_page(page); 1707 } 1708 isolated = 0; 1709 } else 1710 count_vm_numa_event(NUMA_PAGE_MIGRATE); 1711 BUG_ON(!list_empty(&migratepages)); 1712 return isolated; 1713 1714 out: 1715 put_page(page); 1716 return 0; 1717 } 1718 #endif /* CONFIG_NUMA_BALANCING */ 1719 1720 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1721 /* 1722 * Migrates a THP to a given target node. page must be locked and is unlocked 1723 * before returning. 1724 */ 1725 int migrate_misplaced_transhuge_page(struct mm_struct *mm, 1726 struct vm_area_struct *vma, 1727 pmd_t *pmd, pmd_t entry, 1728 unsigned long address, 1729 struct page *page, int node) 1730 { 1731 spinlock_t *ptl; 1732 pg_data_t *pgdat = NODE_DATA(node); 1733 int isolated = 0; 1734 struct page *new_page = NULL; 1735 struct mem_cgroup *memcg = NULL; 1736 int page_lru = page_is_file_cache(page); 1737 unsigned long mmun_start = address & HPAGE_PMD_MASK; 1738 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 1739 pmd_t orig_entry; 1740 1741 /* 1742 * Rate-limit the amount of data that is being migrated to a node. 1743 * Optimal placement is no good if the memory bus is saturated and 1744 * all the time is being spent migrating! 1745 */ 1746 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR)) 1747 goto out_dropref; 1748 1749 new_page = alloc_pages_node(node, 1750 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); 1751 if (!new_page) 1752 goto out_fail; 1753 1754 isolated = numamigrate_isolate_page(pgdat, page); 1755 if (!isolated) { 1756 put_page(new_page); 1757 goto out_fail; 1758 } 1759 1760 if (mm_tlb_flush_pending(mm)) 1761 flush_tlb_range(vma, mmun_start, mmun_end); 1762 1763 /* Prepare a page as a migration target */ 1764 __set_page_locked(new_page); 1765 SetPageSwapBacked(new_page); 1766 1767 /* anon mapping, we can simply copy page->mapping to the new page: */ 1768 new_page->mapping = page->mapping; 1769 new_page->index = page->index; 1770 migrate_page_copy(new_page, page); 1771 WARN_ON(PageLRU(new_page)); 1772 1773 /* Recheck the target PMD */ 1774 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1775 ptl = pmd_lock(mm, pmd); 1776 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { 1777 fail_putback: 1778 spin_unlock(ptl); 1779 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1780 1781 /* Reverse changes made by migrate_page_copy() */ 1782 if (TestClearPageActive(new_page)) 1783 SetPageActive(page); 1784 if (TestClearPageUnevictable(new_page)) 1785 SetPageUnevictable(page); 1786 mlock_migrate_page(page, new_page); 1787 1788 unlock_page(new_page); 1789 put_page(new_page); /* Free it */ 1790 1791 /* Retake the callers reference and putback on LRU */ 1792 get_page(page); 1793 putback_lru_page(page); 1794 mod_zone_page_state(page_zone(page), 1795 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 1796 1797 goto out_unlock; 1798 } 1799 1800 /* 1801 * Traditional migration needs to prepare the memcg charge 1802 * transaction early to prevent the old page from being 1803 * uncharged when installing migration entries. Here we can 1804 * save the potential rollback and start the charge transfer 1805 * only when migration is already known to end successfully. 1806 */ 1807 mem_cgroup_prepare_migration(page, new_page, &memcg); 1808 1809 orig_entry = *pmd; 1810 entry = mk_pmd(new_page, vma->vm_page_prot); 1811 entry = pmd_mkhuge(entry); 1812 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1813 1814 /* 1815 * Clear the old entry under pagetable lock and establish the new PTE. 1816 * Any parallel GUP will either observe the old page blocking on the 1817 * page lock, block on the page table lock or observe the new page. 1818 * The SetPageUptodate on the new page and page_add_new_anon_rmap 1819 * guarantee the copy is visible before the pagetable update. 1820 */ 1821 flush_cache_range(vma, mmun_start, mmun_end); 1822 page_add_new_anon_rmap(new_page, vma, mmun_start); 1823 pmdp_clear_flush(vma, mmun_start, pmd); 1824 set_pmd_at(mm, mmun_start, pmd, entry); 1825 flush_tlb_range(vma, mmun_start, mmun_end); 1826 update_mmu_cache_pmd(vma, address, &entry); 1827 1828 if (page_count(page) != 2) { 1829 set_pmd_at(mm, mmun_start, pmd, orig_entry); 1830 flush_tlb_range(vma, mmun_start, mmun_end); 1831 update_mmu_cache_pmd(vma, address, &entry); 1832 page_remove_rmap(new_page); 1833 goto fail_putback; 1834 } 1835 1836 page_remove_rmap(page); 1837 1838 /* 1839 * Finish the charge transaction under the page table lock to 1840 * prevent split_huge_page() from dividing up the charge 1841 * before it's fully transferred to the new page. 1842 */ 1843 mem_cgroup_end_migration(memcg, page, new_page, true); 1844 spin_unlock(ptl); 1845 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1846 1847 unlock_page(new_page); 1848 unlock_page(page); 1849 put_page(page); /* Drop the rmap reference */ 1850 put_page(page); /* Drop the LRU isolation reference */ 1851 1852 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 1853 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 1854 1855 mod_zone_page_state(page_zone(page), 1856 NR_ISOLATED_ANON + page_lru, 1857 -HPAGE_PMD_NR); 1858 return isolated; 1859 1860 out_fail: 1861 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1862 out_dropref: 1863 ptl = pmd_lock(mm, pmd); 1864 if (pmd_same(*pmd, entry)) { 1865 entry = pmd_mknonnuma(entry); 1866 set_pmd_at(mm, mmun_start, pmd, entry); 1867 update_mmu_cache_pmd(vma, address, &entry); 1868 } 1869 spin_unlock(ptl); 1870 1871 out_unlock: 1872 unlock_page(page); 1873 put_page(page); 1874 return 0; 1875 } 1876 #endif /* CONFIG_NUMA_BALANCING */ 1877 1878 #endif /* CONFIG_NUMA */ 1879