1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/nsproxy.h> 23 #include <linux/pagevec.h> 24 #include <linux/rmap.h> 25 #include <linux/topology.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/writeback.h> 29 #include <linux/mempolicy.h> 30 #include <linux/vmalloc.h> 31 #include <linux/security.h> 32 #include <linux/memcontrol.h> 33 #include <linux/syscalls.h> 34 35 #include "internal.h" 36 37 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 38 39 /* 40 * Isolate one page from the LRU lists. If successful put it onto 41 * the indicated list with elevated page count. 42 * 43 * Result: 44 * -EBUSY: page not on LRU list 45 * 0: page removed from LRU list and added to the specified list. 46 */ 47 int isolate_lru_page(struct page *page, struct list_head *pagelist) 48 { 49 int ret = -EBUSY; 50 51 if (PageLRU(page)) { 52 struct zone *zone = page_zone(page); 53 54 spin_lock_irq(&zone->lru_lock); 55 if (PageLRU(page) && get_page_unless_zero(page)) { 56 ret = 0; 57 ClearPageLRU(page); 58 if (PageActive(page)) 59 del_page_from_active_list(zone, page); 60 else 61 del_page_from_inactive_list(zone, page); 62 list_add_tail(&page->lru, pagelist); 63 } 64 spin_unlock_irq(&zone->lru_lock); 65 } 66 return ret; 67 } 68 69 /* 70 * migrate_prep() needs to be called before we start compiling a list of pages 71 * to be migrated using isolate_lru_page(). 72 */ 73 int migrate_prep(void) 74 { 75 /* 76 * Clear the LRU lists so pages can be isolated. 77 * Note that pages may be moved off the LRU after we have 78 * drained them. Those pages will fail to migrate like other 79 * pages that may be busy. 80 */ 81 lru_add_drain_all(); 82 83 return 0; 84 } 85 86 static inline void move_to_lru(struct page *page) 87 { 88 if (PageActive(page)) { 89 /* 90 * lru_cache_add_active checks that 91 * the PG_active bit is off. 92 */ 93 ClearPageActive(page); 94 lru_cache_add_active(page); 95 } else { 96 lru_cache_add(page); 97 } 98 put_page(page); 99 } 100 101 /* 102 * Add isolated pages on the list back to the LRU. 103 * 104 * returns the number of pages put back. 105 */ 106 int putback_lru_pages(struct list_head *l) 107 { 108 struct page *page; 109 struct page *page2; 110 int count = 0; 111 112 list_for_each_entry_safe(page, page2, l, lru) { 113 list_del(&page->lru); 114 move_to_lru(page); 115 count++; 116 } 117 return count; 118 } 119 120 /* 121 * Restore a potential migration pte to a working pte entry 122 */ 123 static void remove_migration_pte(struct vm_area_struct *vma, 124 struct page *old, struct page *new) 125 { 126 struct mm_struct *mm = vma->vm_mm; 127 swp_entry_t entry; 128 pgd_t *pgd; 129 pud_t *pud; 130 pmd_t *pmd; 131 pte_t *ptep, pte; 132 spinlock_t *ptl; 133 unsigned long addr = page_address_in_vma(new, vma); 134 135 if (addr == -EFAULT) 136 return; 137 138 pgd = pgd_offset(mm, addr); 139 if (!pgd_present(*pgd)) 140 return; 141 142 pud = pud_offset(pgd, addr); 143 if (!pud_present(*pud)) 144 return; 145 146 pmd = pmd_offset(pud, addr); 147 if (!pmd_present(*pmd)) 148 return; 149 150 ptep = pte_offset_map(pmd, addr); 151 152 if (!is_swap_pte(*ptep)) { 153 pte_unmap(ptep); 154 return; 155 } 156 157 ptl = pte_lockptr(mm, pmd); 158 spin_lock(ptl); 159 pte = *ptep; 160 if (!is_swap_pte(pte)) 161 goto out; 162 163 entry = pte_to_swp_entry(pte); 164 165 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) 166 goto out; 167 168 /* 169 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. 170 * Failure is not an option here: we're now expected to remove every 171 * migration pte, and will cause crashes otherwise. Normally this 172 * is not an issue: mem_cgroup_prepare_migration bumped up the old 173 * page_cgroup count for safety, that's now attached to the new page, 174 * so this charge should just be another incrementation of the count, 175 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if 176 * there's been a force_empty, those reference counts may no longer 177 * be reliable, and this charge can actually fail: oh well, we don't 178 * make the situation any worse by proceeding as if it had succeeded. 179 */ 180 mem_cgroup_charge(new, mm, GFP_ATOMIC); 181 182 get_page(new); 183 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 184 if (is_write_migration_entry(entry)) 185 pte = pte_mkwrite(pte); 186 flush_cache_page(vma, addr, pte_pfn(pte)); 187 set_pte_at(mm, addr, ptep, pte); 188 189 if (PageAnon(new)) 190 page_add_anon_rmap(new, vma, addr); 191 else 192 page_add_file_rmap(new); 193 194 /* No need to invalidate - it was non-present before */ 195 update_mmu_cache(vma, addr, pte); 196 197 out: 198 pte_unmap_unlock(ptep, ptl); 199 } 200 201 /* 202 * Note that remove_file_migration_ptes will only work on regular mappings, 203 * Nonlinear mappings do not use migration entries. 204 */ 205 static void remove_file_migration_ptes(struct page *old, struct page *new) 206 { 207 struct vm_area_struct *vma; 208 struct address_space *mapping = page_mapping(new); 209 struct prio_tree_iter iter; 210 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 211 212 if (!mapping) 213 return; 214 215 spin_lock(&mapping->i_mmap_lock); 216 217 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) 218 remove_migration_pte(vma, old, new); 219 220 spin_unlock(&mapping->i_mmap_lock); 221 } 222 223 /* 224 * Must hold mmap_sem lock on at least one of the vmas containing 225 * the page so that the anon_vma cannot vanish. 226 */ 227 static void remove_anon_migration_ptes(struct page *old, struct page *new) 228 { 229 struct anon_vma *anon_vma; 230 struct vm_area_struct *vma; 231 unsigned long mapping; 232 233 mapping = (unsigned long)new->mapping; 234 235 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) 236 return; 237 238 /* 239 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. 240 */ 241 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); 242 spin_lock(&anon_vma->lock); 243 244 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) 245 remove_migration_pte(vma, old, new); 246 247 spin_unlock(&anon_vma->lock); 248 } 249 250 /* 251 * Get rid of all migration entries and replace them by 252 * references to the indicated page. 253 */ 254 static void remove_migration_ptes(struct page *old, struct page *new) 255 { 256 if (PageAnon(new)) 257 remove_anon_migration_ptes(old, new); 258 else 259 remove_file_migration_ptes(old, new); 260 } 261 262 /* 263 * Something used the pte of a page under migration. We need to 264 * get to the page and wait until migration is finished. 265 * When we return from this function the fault will be retried. 266 * 267 * This function is called from do_swap_page(). 268 */ 269 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 270 unsigned long address) 271 { 272 pte_t *ptep, pte; 273 spinlock_t *ptl; 274 swp_entry_t entry; 275 struct page *page; 276 277 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 278 pte = *ptep; 279 if (!is_swap_pte(pte)) 280 goto out; 281 282 entry = pte_to_swp_entry(pte); 283 if (!is_migration_entry(entry)) 284 goto out; 285 286 page = migration_entry_to_page(entry); 287 288 get_page(page); 289 pte_unmap_unlock(ptep, ptl); 290 wait_on_page_locked(page); 291 put_page(page); 292 return; 293 out: 294 pte_unmap_unlock(ptep, ptl); 295 } 296 297 /* 298 * Replace the page in the mapping. 299 * 300 * The number of remaining references must be: 301 * 1 for anonymous pages without a mapping 302 * 2 for pages with a mapping 303 * 3 for pages with a mapping and PagePrivate set. 304 */ 305 static int migrate_page_move_mapping(struct address_space *mapping, 306 struct page *newpage, struct page *page) 307 { 308 void **pslot; 309 310 if (!mapping) { 311 /* Anonymous page without mapping */ 312 if (page_count(page) != 1) 313 return -EAGAIN; 314 return 0; 315 } 316 317 write_lock_irq(&mapping->tree_lock); 318 319 pslot = radix_tree_lookup_slot(&mapping->page_tree, 320 page_index(page)); 321 322 if (page_count(page) != 2 + !!PagePrivate(page) || 323 (struct page *)radix_tree_deref_slot(pslot) != page) { 324 write_unlock_irq(&mapping->tree_lock); 325 return -EAGAIN; 326 } 327 328 /* 329 * Now we know that no one else is looking at the page. 330 */ 331 get_page(newpage); /* add cache reference */ 332 #ifdef CONFIG_SWAP 333 if (PageSwapCache(page)) { 334 SetPageSwapCache(newpage); 335 set_page_private(newpage, page_private(page)); 336 } 337 #endif 338 339 radix_tree_replace_slot(pslot, newpage); 340 341 /* 342 * Drop cache reference from old page. 343 * We know this isn't the last reference. 344 */ 345 __put_page(page); 346 347 /* 348 * If moved to a different zone then also account 349 * the page for that zone. Other VM counters will be 350 * taken care of when we establish references to the 351 * new page and drop references to the old page. 352 * 353 * Note that anonymous pages are accounted for 354 * via NR_FILE_PAGES and NR_ANON_PAGES if they 355 * are mapped to swap space. 356 */ 357 __dec_zone_page_state(page, NR_FILE_PAGES); 358 __inc_zone_page_state(newpage, NR_FILE_PAGES); 359 360 write_unlock_irq(&mapping->tree_lock); 361 if (!PageSwapCache(newpage)) { 362 mem_cgroup_uncharge_cache_page(page); 363 } 364 365 return 0; 366 } 367 368 /* 369 * Copy the page to its new location 370 */ 371 static void migrate_page_copy(struct page *newpage, struct page *page) 372 { 373 copy_highpage(newpage, page); 374 375 if (PageError(page)) 376 SetPageError(newpage); 377 if (PageReferenced(page)) 378 SetPageReferenced(newpage); 379 if (PageUptodate(page)) 380 SetPageUptodate(newpage); 381 if (PageActive(page)) 382 SetPageActive(newpage); 383 if (PageChecked(page)) 384 SetPageChecked(newpage); 385 if (PageMappedToDisk(page)) 386 SetPageMappedToDisk(newpage); 387 388 if (PageDirty(page)) { 389 clear_page_dirty_for_io(page); 390 /* 391 * Want to mark the page and the radix tree as dirty, and 392 * redo the accounting that clear_page_dirty_for_io undid, 393 * but we can't use set_page_dirty because that function 394 * is actually a signal that all of the page has become dirty. 395 * Wheras only part of our page may be dirty. 396 */ 397 __set_page_dirty_nobuffers(newpage); 398 } 399 400 #ifdef CONFIG_SWAP 401 ClearPageSwapCache(page); 402 #endif 403 ClearPageActive(page); 404 ClearPagePrivate(page); 405 set_page_private(page, 0); 406 page->mapping = NULL; 407 408 /* 409 * If any waiters have accumulated on the new page then 410 * wake them up. 411 */ 412 if (PageWriteback(newpage)) 413 end_page_writeback(newpage); 414 } 415 416 /************************************************************ 417 * Migration functions 418 ***********************************************************/ 419 420 /* Always fail migration. Used for mappings that are not movable */ 421 int fail_migrate_page(struct address_space *mapping, 422 struct page *newpage, struct page *page) 423 { 424 return -EIO; 425 } 426 EXPORT_SYMBOL(fail_migrate_page); 427 428 /* 429 * Common logic to directly migrate a single page suitable for 430 * pages that do not use PagePrivate. 431 * 432 * Pages are locked upon entry and exit. 433 */ 434 int migrate_page(struct address_space *mapping, 435 struct page *newpage, struct page *page) 436 { 437 int rc; 438 439 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 440 441 rc = migrate_page_move_mapping(mapping, newpage, page); 442 443 if (rc) 444 return rc; 445 446 migrate_page_copy(newpage, page); 447 return 0; 448 } 449 EXPORT_SYMBOL(migrate_page); 450 451 #ifdef CONFIG_BLOCK 452 /* 453 * Migration function for pages with buffers. This function can only be used 454 * if the underlying filesystem guarantees that no other references to "page" 455 * exist. 456 */ 457 int buffer_migrate_page(struct address_space *mapping, 458 struct page *newpage, struct page *page) 459 { 460 struct buffer_head *bh, *head; 461 int rc; 462 463 if (!page_has_buffers(page)) 464 return migrate_page(mapping, newpage, page); 465 466 head = page_buffers(page); 467 468 rc = migrate_page_move_mapping(mapping, newpage, page); 469 470 if (rc) 471 return rc; 472 473 bh = head; 474 do { 475 get_bh(bh); 476 lock_buffer(bh); 477 bh = bh->b_this_page; 478 479 } while (bh != head); 480 481 ClearPagePrivate(page); 482 set_page_private(newpage, page_private(page)); 483 set_page_private(page, 0); 484 put_page(page); 485 get_page(newpage); 486 487 bh = head; 488 do { 489 set_bh_page(bh, newpage, bh_offset(bh)); 490 bh = bh->b_this_page; 491 492 } while (bh != head); 493 494 SetPagePrivate(newpage); 495 496 migrate_page_copy(newpage, page); 497 498 bh = head; 499 do { 500 unlock_buffer(bh); 501 put_bh(bh); 502 bh = bh->b_this_page; 503 504 } while (bh != head); 505 506 return 0; 507 } 508 EXPORT_SYMBOL(buffer_migrate_page); 509 #endif 510 511 /* 512 * Writeback a page to clean the dirty state 513 */ 514 static int writeout(struct address_space *mapping, struct page *page) 515 { 516 struct writeback_control wbc = { 517 .sync_mode = WB_SYNC_NONE, 518 .nr_to_write = 1, 519 .range_start = 0, 520 .range_end = LLONG_MAX, 521 .nonblocking = 1, 522 .for_reclaim = 1 523 }; 524 int rc; 525 526 if (!mapping->a_ops->writepage) 527 /* No write method for the address space */ 528 return -EINVAL; 529 530 if (!clear_page_dirty_for_io(page)) 531 /* Someone else already triggered a write */ 532 return -EAGAIN; 533 534 /* 535 * A dirty page may imply that the underlying filesystem has 536 * the page on some queue. So the page must be clean for 537 * migration. Writeout may mean we loose the lock and the 538 * page state is no longer what we checked for earlier. 539 * At this point we know that the migration attempt cannot 540 * be successful. 541 */ 542 remove_migration_ptes(page, page); 543 544 rc = mapping->a_ops->writepage(page, &wbc); 545 if (rc < 0) 546 /* I/O Error writing */ 547 return -EIO; 548 549 if (rc != AOP_WRITEPAGE_ACTIVATE) 550 /* unlocked. Relock */ 551 lock_page(page); 552 553 return -EAGAIN; 554 } 555 556 /* 557 * Default handling if a filesystem does not provide a migration function. 558 */ 559 static int fallback_migrate_page(struct address_space *mapping, 560 struct page *newpage, struct page *page) 561 { 562 if (PageDirty(page)) 563 return writeout(mapping, page); 564 565 /* 566 * Buffers may be managed in a filesystem specific way. 567 * We must have no buffers or drop them. 568 */ 569 if (PagePrivate(page) && 570 !try_to_release_page(page, GFP_KERNEL)) 571 return -EAGAIN; 572 573 return migrate_page(mapping, newpage, page); 574 } 575 576 /* 577 * Move a page to a newly allocated page 578 * The page is locked and all ptes have been successfully removed. 579 * 580 * The new page will have replaced the old page if this function 581 * is successful. 582 */ 583 static int move_to_new_page(struct page *newpage, struct page *page) 584 { 585 struct address_space *mapping; 586 int rc; 587 588 /* 589 * Block others from accessing the page when we get around to 590 * establishing additional references. We are the only one 591 * holding a reference to the new page at this point. 592 */ 593 if (TestSetPageLocked(newpage)) 594 BUG(); 595 596 /* Prepare mapping for the new page.*/ 597 newpage->index = page->index; 598 newpage->mapping = page->mapping; 599 600 mapping = page_mapping(page); 601 if (!mapping) 602 rc = migrate_page(mapping, newpage, page); 603 else if (mapping->a_ops->migratepage) 604 /* 605 * Most pages have a mapping and most filesystems 606 * should provide a migration function. Anonymous 607 * pages are part of swap space which also has its 608 * own migration function. This is the most common 609 * path for page migration. 610 */ 611 rc = mapping->a_ops->migratepage(mapping, 612 newpage, page); 613 else 614 rc = fallback_migrate_page(mapping, newpage, page); 615 616 if (!rc) { 617 remove_migration_ptes(page, newpage); 618 } else 619 newpage->mapping = NULL; 620 621 unlock_page(newpage); 622 623 return rc; 624 } 625 626 /* 627 * Obtain the lock on page, remove all ptes and migrate the page 628 * to the newly allocated page in newpage. 629 */ 630 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 631 struct page *page, int force) 632 { 633 int rc = 0; 634 int *result = NULL; 635 struct page *newpage = get_new_page(page, private, &result); 636 int rcu_locked = 0; 637 int charge = 0; 638 639 if (!newpage) 640 return -ENOMEM; 641 642 if (page_count(page) == 1) 643 /* page was freed from under us. So we are done. */ 644 goto move_newpage; 645 646 charge = mem_cgroup_prepare_migration(page, newpage); 647 if (charge == -ENOMEM) { 648 rc = -ENOMEM; 649 goto move_newpage; 650 } 651 /* prepare cgroup just returns 0 or -ENOMEM */ 652 BUG_ON(charge); 653 654 rc = -EAGAIN; 655 if (TestSetPageLocked(page)) { 656 if (!force) 657 goto move_newpage; 658 lock_page(page); 659 } 660 661 if (PageWriteback(page)) { 662 if (!force) 663 goto unlock; 664 wait_on_page_writeback(page); 665 } 666 /* 667 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 668 * we cannot notice that anon_vma is freed while we migrates a page. 669 * This rcu_read_lock() delays freeing anon_vma pointer until the end 670 * of migration. File cache pages are no problem because of page_lock() 671 * File Caches may use write_page() or lock_page() in migration, then, 672 * just care Anon page here. 673 */ 674 if (PageAnon(page)) { 675 rcu_read_lock(); 676 rcu_locked = 1; 677 } 678 679 /* 680 * Corner case handling: 681 * 1. When a new swap-cache page is read into, it is added to the LRU 682 * and treated as swapcache but it has no rmap yet. 683 * Calling try_to_unmap() against a page->mapping==NULL page will 684 * trigger a BUG. So handle it here. 685 * 2. An orphaned page (see truncate_complete_page) might have 686 * fs-private metadata. The page can be picked up due to memory 687 * offlining. Everywhere else except page reclaim, the page is 688 * invisible to the vm, so the page can not be migrated. So try to 689 * free the metadata, so the page can be freed. 690 */ 691 if (!page->mapping) { 692 if (!PageAnon(page) && PagePrivate(page)) { 693 /* 694 * Go direct to try_to_free_buffers() here because 695 * a) that's what try_to_release_page() would do anyway 696 * b) we may be under rcu_read_lock() here, so we can't 697 * use GFP_KERNEL which is what try_to_release_page() 698 * needs to be effective. 699 */ 700 try_to_free_buffers(page); 701 } 702 goto rcu_unlock; 703 } 704 705 /* Establish migration ptes or remove ptes */ 706 try_to_unmap(page, 1); 707 708 if (!page_mapped(page)) 709 rc = move_to_new_page(newpage, page); 710 711 if (rc) 712 remove_migration_ptes(page, page); 713 rcu_unlock: 714 if (rcu_locked) 715 rcu_read_unlock(); 716 717 unlock: 718 719 unlock_page(page); 720 721 if (rc != -EAGAIN) { 722 /* 723 * A page that has been migrated has all references 724 * removed and will be freed. A page that has not been 725 * migrated will have kepts its references and be 726 * restored. 727 */ 728 list_del(&page->lru); 729 move_to_lru(page); 730 } 731 732 move_newpage: 733 if (!charge) 734 mem_cgroup_end_migration(newpage); 735 /* 736 * Move the new page to the LRU. If migration was not successful 737 * then this will free the page. 738 */ 739 move_to_lru(newpage); 740 if (result) { 741 if (rc) 742 *result = rc; 743 else 744 *result = page_to_nid(newpage); 745 } 746 return rc; 747 } 748 749 /* 750 * migrate_pages 751 * 752 * The function takes one list of pages to migrate and a function 753 * that determines from the page to be migrated and the private data 754 * the target of the move and allocates the page. 755 * 756 * The function returns after 10 attempts or if no pages 757 * are movable anymore because to has become empty 758 * or no retryable pages exist anymore. All pages will be 759 * returned to the LRU or freed. 760 * 761 * Return: Number of pages not migrated or error code. 762 */ 763 int migrate_pages(struct list_head *from, 764 new_page_t get_new_page, unsigned long private) 765 { 766 int retry = 1; 767 int nr_failed = 0; 768 int pass = 0; 769 struct page *page; 770 struct page *page2; 771 int swapwrite = current->flags & PF_SWAPWRITE; 772 int rc; 773 774 if (!swapwrite) 775 current->flags |= PF_SWAPWRITE; 776 777 for(pass = 0; pass < 10 && retry; pass++) { 778 retry = 0; 779 780 list_for_each_entry_safe(page, page2, from, lru) { 781 cond_resched(); 782 783 rc = unmap_and_move(get_new_page, private, 784 page, pass > 2); 785 786 switch(rc) { 787 case -ENOMEM: 788 goto out; 789 case -EAGAIN: 790 retry++; 791 break; 792 case 0: 793 break; 794 default: 795 /* Permanent failure */ 796 nr_failed++; 797 break; 798 } 799 } 800 } 801 rc = 0; 802 out: 803 if (!swapwrite) 804 current->flags &= ~PF_SWAPWRITE; 805 806 putback_lru_pages(from); 807 808 if (rc) 809 return rc; 810 811 return nr_failed + retry; 812 } 813 814 #ifdef CONFIG_NUMA 815 /* 816 * Move a list of individual pages 817 */ 818 struct page_to_node { 819 unsigned long addr; 820 struct page *page; 821 int node; 822 int status; 823 }; 824 825 static struct page *new_page_node(struct page *p, unsigned long private, 826 int **result) 827 { 828 struct page_to_node *pm = (struct page_to_node *)private; 829 830 while (pm->node != MAX_NUMNODES && pm->page != p) 831 pm++; 832 833 if (pm->node == MAX_NUMNODES) 834 return NULL; 835 836 *result = &pm->status; 837 838 return alloc_pages_node(pm->node, 839 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 840 } 841 842 /* 843 * Move a set of pages as indicated in the pm array. The addr 844 * field must be set to the virtual address of the page to be moved 845 * and the node number must contain a valid target node. 846 */ 847 static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, 848 int migrate_all) 849 { 850 int err; 851 struct page_to_node *pp; 852 LIST_HEAD(pagelist); 853 854 down_read(&mm->mmap_sem); 855 856 /* 857 * Build a list of pages to migrate 858 */ 859 migrate_prep(); 860 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 861 struct vm_area_struct *vma; 862 struct page *page; 863 864 /* 865 * A valid page pointer that will not match any of the 866 * pages that will be moved. 867 */ 868 pp->page = ZERO_PAGE(0); 869 870 err = -EFAULT; 871 vma = find_vma(mm, pp->addr); 872 if (!vma || !vma_migratable(vma)) 873 goto set_status; 874 875 page = follow_page(vma, pp->addr, FOLL_GET); 876 877 err = PTR_ERR(page); 878 if (IS_ERR(page)) 879 goto set_status; 880 881 err = -ENOENT; 882 if (!page) 883 goto set_status; 884 885 if (PageReserved(page)) /* Check for zero page */ 886 goto put_and_set; 887 888 pp->page = page; 889 err = page_to_nid(page); 890 891 if (err == pp->node) 892 /* 893 * Node already in the right place 894 */ 895 goto put_and_set; 896 897 err = -EACCES; 898 if (page_mapcount(page) > 1 && 899 !migrate_all) 900 goto put_and_set; 901 902 err = isolate_lru_page(page, &pagelist); 903 put_and_set: 904 /* 905 * Either remove the duplicate refcount from 906 * isolate_lru_page() or drop the page ref if it was 907 * not isolated. 908 */ 909 put_page(page); 910 set_status: 911 pp->status = err; 912 } 913 914 if (!list_empty(&pagelist)) 915 err = migrate_pages(&pagelist, new_page_node, 916 (unsigned long)pm); 917 else 918 err = -ENOENT; 919 920 up_read(&mm->mmap_sem); 921 return err; 922 } 923 924 /* 925 * Determine the nodes of a list of pages. The addr in the pm array 926 * must have been set to the virtual address of which we want to determine 927 * the node number. 928 */ 929 static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) 930 { 931 down_read(&mm->mmap_sem); 932 933 for ( ; pm->node != MAX_NUMNODES; pm++) { 934 struct vm_area_struct *vma; 935 struct page *page; 936 int err; 937 938 err = -EFAULT; 939 vma = find_vma(mm, pm->addr); 940 if (!vma) 941 goto set_status; 942 943 page = follow_page(vma, pm->addr, 0); 944 945 err = PTR_ERR(page); 946 if (IS_ERR(page)) 947 goto set_status; 948 949 err = -ENOENT; 950 /* Use PageReserved to check for zero page */ 951 if (!page || PageReserved(page)) 952 goto set_status; 953 954 err = page_to_nid(page); 955 set_status: 956 pm->status = err; 957 } 958 959 up_read(&mm->mmap_sem); 960 return 0; 961 } 962 963 /* 964 * Move a list of pages in the address space of the currently executing 965 * process. 966 */ 967 asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, 968 const void __user * __user *pages, 969 const int __user *nodes, 970 int __user *status, int flags) 971 { 972 int err = 0; 973 int i; 974 struct task_struct *task; 975 nodemask_t task_nodes; 976 struct mm_struct *mm; 977 struct page_to_node *pm = NULL; 978 979 /* Check flags */ 980 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 981 return -EINVAL; 982 983 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 984 return -EPERM; 985 986 /* Find the mm_struct */ 987 read_lock(&tasklist_lock); 988 task = pid ? find_task_by_vpid(pid) : current; 989 if (!task) { 990 read_unlock(&tasklist_lock); 991 return -ESRCH; 992 } 993 mm = get_task_mm(task); 994 read_unlock(&tasklist_lock); 995 996 if (!mm) 997 return -EINVAL; 998 999 /* 1000 * Check if this process has the right to modify the specified 1001 * process. The right exists if the process has administrative 1002 * capabilities, superuser privileges or the same 1003 * userid as the target process. 1004 */ 1005 if ((current->euid != task->suid) && (current->euid != task->uid) && 1006 (current->uid != task->suid) && (current->uid != task->uid) && 1007 !capable(CAP_SYS_NICE)) { 1008 err = -EPERM; 1009 goto out2; 1010 } 1011 1012 err = security_task_movememory(task); 1013 if (err) 1014 goto out2; 1015 1016 1017 task_nodes = cpuset_mems_allowed(task); 1018 1019 /* Limit nr_pages so that the multiplication may not overflow */ 1020 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { 1021 err = -E2BIG; 1022 goto out2; 1023 } 1024 1025 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); 1026 if (!pm) { 1027 err = -ENOMEM; 1028 goto out2; 1029 } 1030 1031 /* 1032 * Get parameters from user space and initialize the pm 1033 * array. Return various errors if the user did something wrong. 1034 */ 1035 for (i = 0; i < nr_pages; i++) { 1036 const void __user *p; 1037 1038 err = -EFAULT; 1039 if (get_user(p, pages + i)) 1040 goto out; 1041 1042 pm[i].addr = (unsigned long)p; 1043 if (nodes) { 1044 int node; 1045 1046 if (get_user(node, nodes + i)) 1047 goto out; 1048 1049 err = -ENODEV; 1050 if (!node_state(node, N_HIGH_MEMORY)) 1051 goto out; 1052 1053 err = -EACCES; 1054 if (!node_isset(node, task_nodes)) 1055 goto out; 1056 1057 pm[i].node = node; 1058 } else 1059 pm[i].node = 0; /* anything to not match MAX_NUMNODES */ 1060 } 1061 /* End marker */ 1062 pm[nr_pages].node = MAX_NUMNODES; 1063 1064 if (nodes) 1065 err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL); 1066 else 1067 err = do_pages_stat(mm, pm); 1068 1069 if (err >= 0) 1070 /* Return status information */ 1071 for (i = 0; i < nr_pages; i++) 1072 if (put_user(pm[i].status, status + i)) 1073 err = -EFAULT; 1074 1075 out: 1076 vfree(pm); 1077 out2: 1078 mmput(mm); 1079 return err; 1080 } 1081 1082 /* 1083 * Call migration functions in the vma_ops that may prepare 1084 * memory in a vm for migration. migration functions may perform 1085 * the migration for vmas that do not have an underlying page struct. 1086 */ 1087 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1088 const nodemask_t *from, unsigned long flags) 1089 { 1090 struct vm_area_struct *vma; 1091 int err = 0; 1092 1093 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { 1094 if (vma->vm_ops && vma->vm_ops->migrate) { 1095 err = vma->vm_ops->migrate(vma, to, from, flags); 1096 if (err) 1097 break; 1098 } 1099 } 1100 return err; 1101 } 1102 #endif 1103