1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter <clameter@sgi.com> 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/nsproxy.h> 23 #include <linux/pagevec.h> 24 #include <linux/rmap.h> 25 #include <linux/topology.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/writeback.h> 29 #include <linux/mempolicy.h> 30 #include <linux/vmalloc.h> 31 #include <linux/security.h> 32 #include <linux/memcontrol.h> 33 34 #include "internal.h" 35 36 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 37 38 /* 39 * Isolate one page from the LRU lists. If successful put it onto 40 * the indicated list with elevated page count. 41 * 42 * Result: 43 * -EBUSY: page not on LRU list 44 * 0: page removed from LRU list and added to the specified list. 45 */ 46 int isolate_lru_page(struct page *page, struct list_head *pagelist) 47 { 48 int ret = -EBUSY; 49 50 if (PageLRU(page)) { 51 struct zone *zone = page_zone(page); 52 53 spin_lock_irq(&zone->lru_lock); 54 if (PageLRU(page) && get_page_unless_zero(page)) { 55 ret = 0; 56 ClearPageLRU(page); 57 if (PageActive(page)) 58 del_page_from_active_list(zone, page); 59 else 60 del_page_from_inactive_list(zone, page); 61 list_add_tail(&page->lru, pagelist); 62 } 63 spin_unlock_irq(&zone->lru_lock); 64 } 65 return ret; 66 } 67 68 /* 69 * migrate_prep() needs to be called before we start compiling a list of pages 70 * to be migrated using isolate_lru_page(). 71 */ 72 int migrate_prep(void) 73 { 74 /* 75 * Clear the LRU lists so pages can be isolated. 76 * Note that pages may be moved off the LRU after we have 77 * drained them. Those pages will fail to migrate like other 78 * pages that may be busy. 79 */ 80 lru_add_drain_all(); 81 82 return 0; 83 } 84 85 static inline void move_to_lru(struct page *page) 86 { 87 if (PageActive(page)) { 88 /* 89 * lru_cache_add_active checks that 90 * the PG_active bit is off. 91 */ 92 ClearPageActive(page); 93 lru_cache_add_active(page); 94 } else { 95 lru_cache_add(page); 96 } 97 put_page(page); 98 } 99 100 /* 101 * Add isolated pages on the list back to the LRU. 102 * 103 * returns the number of pages put back. 104 */ 105 int putback_lru_pages(struct list_head *l) 106 { 107 struct page *page; 108 struct page *page2; 109 int count = 0; 110 111 list_for_each_entry_safe(page, page2, l, lru) { 112 list_del(&page->lru); 113 move_to_lru(page); 114 count++; 115 } 116 return count; 117 } 118 119 /* 120 * Restore a potential migration pte to a working pte entry 121 */ 122 static void remove_migration_pte(struct vm_area_struct *vma, 123 struct page *old, struct page *new) 124 { 125 struct mm_struct *mm = vma->vm_mm; 126 swp_entry_t entry; 127 pgd_t *pgd; 128 pud_t *pud; 129 pmd_t *pmd; 130 pte_t *ptep, pte; 131 spinlock_t *ptl; 132 unsigned long addr = page_address_in_vma(new, vma); 133 134 if (addr == -EFAULT) 135 return; 136 137 pgd = pgd_offset(mm, addr); 138 if (!pgd_present(*pgd)) 139 return; 140 141 pud = pud_offset(pgd, addr); 142 if (!pud_present(*pud)) 143 return; 144 145 pmd = pmd_offset(pud, addr); 146 if (!pmd_present(*pmd)) 147 return; 148 149 ptep = pte_offset_map(pmd, addr); 150 151 if (!is_swap_pte(*ptep)) { 152 pte_unmap(ptep); 153 return; 154 } 155 156 ptl = pte_lockptr(mm, pmd); 157 spin_lock(ptl); 158 pte = *ptep; 159 if (!is_swap_pte(pte)) 160 goto out; 161 162 entry = pte_to_swp_entry(pte); 163 164 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) 165 goto out; 166 167 /* 168 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. 169 * Failure is not an option here: we're now expected to remove every 170 * migration pte, and will cause crashes otherwise. Normally this 171 * is not an issue: mem_cgroup_prepare_migration bumped up the old 172 * page_cgroup count for safety, that's now attached to the new page, 173 * so this charge should just be another incrementation of the count, 174 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if 175 * there's been a force_empty, those reference counts may no longer 176 * be reliable, and this charge can actually fail: oh well, we don't 177 * make the situation any worse by proceeding as if it had succeeded. 178 */ 179 mem_cgroup_charge(new, mm, GFP_ATOMIC); 180 181 get_page(new); 182 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 183 if (is_write_migration_entry(entry)) 184 pte = pte_mkwrite(pte); 185 flush_cache_page(vma, addr, pte_pfn(pte)); 186 set_pte_at(mm, addr, ptep, pte); 187 188 if (PageAnon(new)) 189 page_add_anon_rmap(new, vma, addr); 190 else 191 page_add_file_rmap(new); 192 193 /* No need to invalidate - it was non-present before */ 194 update_mmu_cache(vma, addr, pte); 195 196 out: 197 pte_unmap_unlock(ptep, ptl); 198 } 199 200 /* 201 * Note that remove_file_migration_ptes will only work on regular mappings, 202 * Nonlinear mappings do not use migration entries. 203 */ 204 static void remove_file_migration_ptes(struct page *old, struct page *new) 205 { 206 struct vm_area_struct *vma; 207 struct address_space *mapping = page_mapping(new); 208 struct prio_tree_iter iter; 209 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 210 211 if (!mapping) 212 return; 213 214 spin_lock(&mapping->i_mmap_lock); 215 216 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) 217 remove_migration_pte(vma, old, new); 218 219 spin_unlock(&mapping->i_mmap_lock); 220 } 221 222 /* 223 * Must hold mmap_sem lock on at least one of the vmas containing 224 * the page so that the anon_vma cannot vanish. 225 */ 226 static void remove_anon_migration_ptes(struct page *old, struct page *new) 227 { 228 struct anon_vma *anon_vma; 229 struct vm_area_struct *vma; 230 unsigned long mapping; 231 232 mapping = (unsigned long)new->mapping; 233 234 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) 235 return; 236 237 /* 238 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. 239 */ 240 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); 241 spin_lock(&anon_vma->lock); 242 243 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) 244 remove_migration_pte(vma, old, new); 245 246 spin_unlock(&anon_vma->lock); 247 } 248 249 /* 250 * Get rid of all migration entries and replace them by 251 * references to the indicated page. 252 */ 253 static void remove_migration_ptes(struct page *old, struct page *new) 254 { 255 if (PageAnon(new)) 256 remove_anon_migration_ptes(old, new); 257 else 258 remove_file_migration_ptes(old, new); 259 } 260 261 /* 262 * Something used the pte of a page under migration. We need to 263 * get to the page and wait until migration is finished. 264 * When we return from this function the fault will be retried. 265 * 266 * This function is called from do_swap_page(). 267 */ 268 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 269 unsigned long address) 270 { 271 pte_t *ptep, pte; 272 spinlock_t *ptl; 273 swp_entry_t entry; 274 struct page *page; 275 276 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 277 pte = *ptep; 278 if (!is_swap_pte(pte)) 279 goto out; 280 281 entry = pte_to_swp_entry(pte); 282 if (!is_migration_entry(entry)) 283 goto out; 284 285 page = migration_entry_to_page(entry); 286 287 get_page(page); 288 pte_unmap_unlock(ptep, ptl); 289 wait_on_page_locked(page); 290 put_page(page); 291 return; 292 out: 293 pte_unmap_unlock(ptep, ptl); 294 } 295 296 /* 297 * Replace the page in the mapping. 298 * 299 * The number of remaining references must be: 300 * 1 for anonymous pages without a mapping 301 * 2 for pages with a mapping 302 * 3 for pages with a mapping and PagePrivate set. 303 */ 304 static int migrate_page_move_mapping(struct address_space *mapping, 305 struct page *newpage, struct page *page) 306 { 307 void **pslot; 308 309 if (!mapping) { 310 /* Anonymous page without mapping */ 311 if (page_count(page) != 1) 312 return -EAGAIN; 313 return 0; 314 } 315 316 write_lock_irq(&mapping->tree_lock); 317 318 pslot = radix_tree_lookup_slot(&mapping->page_tree, 319 page_index(page)); 320 321 if (page_count(page) != 2 + !!PagePrivate(page) || 322 (struct page *)radix_tree_deref_slot(pslot) != page) { 323 write_unlock_irq(&mapping->tree_lock); 324 return -EAGAIN; 325 } 326 327 /* 328 * Now we know that no one else is looking at the page. 329 */ 330 get_page(newpage); /* add cache reference */ 331 #ifdef CONFIG_SWAP 332 if (PageSwapCache(page)) { 333 SetPageSwapCache(newpage); 334 set_page_private(newpage, page_private(page)); 335 } 336 #endif 337 338 radix_tree_replace_slot(pslot, newpage); 339 340 /* 341 * Drop cache reference from old page. 342 * We know this isn't the last reference. 343 */ 344 __put_page(page); 345 346 /* 347 * If moved to a different zone then also account 348 * the page for that zone. Other VM counters will be 349 * taken care of when we establish references to the 350 * new page and drop references to the old page. 351 * 352 * Note that anonymous pages are accounted for 353 * via NR_FILE_PAGES and NR_ANON_PAGES if they 354 * are mapped to swap space. 355 */ 356 __dec_zone_page_state(page, NR_FILE_PAGES); 357 __inc_zone_page_state(newpage, NR_FILE_PAGES); 358 359 write_unlock_irq(&mapping->tree_lock); 360 361 return 0; 362 } 363 364 /* 365 * Copy the page to its new location 366 */ 367 static void migrate_page_copy(struct page *newpage, struct page *page) 368 { 369 copy_highpage(newpage, page); 370 371 if (PageError(page)) 372 SetPageError(newpage); 373 if (PageReferenced(page)) 374 SetPageReferenced(newpage); 375 if (PageUptodate(page)) 376 SetPageUptodate(newpage); 377 if (PageActive(page)) 378 SetPageActive(newpage); 379 if (PageChecked(page)) 380 SetPageChecked(newpage); 381 if (PageMappedToDisk(page)) 382 SetPageMappedToDisk(newpage); 383 384 if (PageDirty(page)) { 385 clear_page_dirty_for_io(page); 386 /* 387 * Want to mark the page and the radix tree as dirty, and 388 * redo the accounting that clear_page_dirty_for_io undid, 389 * but we can't use set_page_dirty because that function 390 * is actually a signal that all of the page has become dirty. 391 * Wheras only part of our page may be dirty. 392 */ 393 __set_page_dirty_nobuffers(newpage); 394 } 395 396 #ifdef CONFIG_SWAP 397 ClearPageSwapCache(page); 398 #endif 399 ClearPageActive(page); 400 ClearPagePrivate(page); 401 set_page_private(page, 0); 402 page->mapping = NULL; 403 404 /* 405 * If any waiters have accumulated on the new page then 406 * wake them up. 407 */ 408 if (PageWriteback(newpage)) 409 end_page_writeback(newpage); 410 } 411 412 /************************************************************ 413 * Migration functions 414 ***********************************************************/ 415 416 /* Always fail migration. Used for mappings that are not movable */ 417 int fail_migrate_page(struct address_space *mapping, 418 struct page *newpage, struct page *page) 419 { 420 return -EIO; 421 } 422 EXPORT_SYMBOL(fail_migrate_page); 423 424 /* 425 * Common logic to directly migrate a single page suitable for 426 * pages that do not use PagePrivate. 427 * 428 * Pages are locked upon entry and exit. 429 */ 430 int migrate_page(struct address_space *mapping, 431 struct page *newpage, struct page *page) 432 { 433 int rc; 434 435 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 436 437 rc = migrate_page_move_mapping(mapping, newpage, page); 438 439 if (rc) 440 return rc; 441 442 migrate_page_copy(newpage, page); 443 return 0; 444 } 445 EXPORT_SYMBOL(migrate_page); 446 447 #ifdef CONFIG_BLOCK 448 /* 449 * Migration function for pages with buffers. This function can only be used 450 * if the underlying filesystem guarantees that no other references to "page" 451 * exist. 452 */ 453 int buffer_migrate_page(struct address_space *mapping, 454 struct page *newpage, struct page *page) 455 { 456 struct buffer_head *bh, *head; 457 int rc; 458 459 if (!page_has_buffers(page)) 460 return migrate_page(mapping, newpage, page); 461 462 head = page_buffers(page); 463 464 rc = migrate_page_move_mapping(mapping, newpage, page); 465 466 if (rc) 467 return rc; 468 469 bh = head; 470 do { 471 get_bh(bh); 472 lock_buffer(bh); 473 bh = bh->b_this_page; 474 475 } while (bh != head); 476 477 ClearPagePrivate(page); 478 set_page_private(newpage, page_private(page)); 479 set_page_private(page, 0); 480 put_page(page); 481 get_page(newpage); 482 483 bh = head; 484 do { 485 set_bh_page(bh, newpage, bh_offset(bh)); 486 bh = bh->b_this_page; 487 488 } while (bh != head); 489 490 SetPagePrivate(newpage); 491 492 migrate_page_copy(newpage, page); 493 494 bh = head; 495 do { 496 unlock_buffer(bh); 497 put_bh(bh); 498 bh = bh->b_this_page; 499 500 } while (bh != head); 501 502 return 0; 503 } 504 EXPORT_SYMBOL(buffer_migrate_page); 505 #endif 506 507 /* 508 * Writeback a page to clean the dirty state 509 */ 510 static int writeout(struct address_space *mapping, struct page *page) 511 { 512 struct writeback_control wbc = { 513 .sync_mode = WB_SYNC_NONE, 514 .nr_to_write = 1, 515 .range_start = 0, 516 .range_end = LLONG_MAX, 517 .nonblocking = 1, 518 .for_reclaim = 1 519 }; 520 int rc; 521 522 if (!mapping->a_ops->writepage) 523 /* No write method for the address space */ 524 return -EINVAL; 525 526 if (!clear_page_dirty_for_io(page)) 527 /* Someone else already triggered a write */ 528 return -EAGAIN; 529 530 /* 531 * A dirty page may imply that the underlying filesystem has 532 * the page on some queue. So the page must be clean for 533 * migration. Writeout may mean we loose the lock and the 534 * page state is no longer what we checked for earlier. 535 * At this point we know that the migration attempt cannot 536 * be successful. 537 */ 538 remove_migration_ptes(page, page); 539 540 rc = mapping->a_ops->writepage(page, &wbc); 541 if (rc < 0) 542 /* I/O Error writing */ 543 return -EIO; 544 545 if (rc != AOP_WRITEPAGE_ACTIVATE) 546 /* unlocked. Relock */ 547 lock_page(page); 548 549 return -EAGAIN; 550 } 551 552 /* 553 * Default handling if a filesystem does not provide a migration function. 554 */ 555 static int fallback_migrate_page(struct address_space *mapping, 556 struct page *newpage, struct page *page) 557 { 558 if (PageDirty(page)) 559 return writeout(mapping, page); 560 561 /* 562 * Buffers may be managed in a filesystem specific way. 563 * We must have no buffers or drop them. 564 */ 565 if (PagePrivate(page) && 566 !try_to_release_page(page, GFP_KERNEL)) 567 return -EAGAIN; 568 569 return migrate_page(mapping, newpage, page); 570 } 571 572 /* 573 * Move a page to a newly allocated page 574 * The page is locked and all ptes have been successfully removed. 575 * 576 * The new page will have replaced the old page if this function 577 * is successful. 578 */ 579 static int move_to_new_page(struct page *newpage, struct page *page) 580 { 581 struct address_space *mapping; 582 int rc; 583 584 /* 585 * Block others from accessing the page when we get around to 586 * establishing additional references. We are the only one 587 * holding a reference to the new page at this point. 588 */ 589 if (TestSetPageLocked(newpage)) 590 BUG(); 591 592 /* Prepare mapping for the new page.*/ 593 newpage->index = page->index; 594 newpage->mapping = page->mapping; 595 596 mapping = page_mapping(page); 597 if (!mapping) 598 rc = migrate_page(mapping, newpage, page); 599 else if (mapping->a_ops->migratepage) 600 /* 601 * Most pages have a mapping and most filesystems 602 * should provide a migration function. Anonymous 603 * pages are part of swap space which also has its 604 * own migration function. This is the most common 605 * path for page migration. 606 */ 607 rc = mapping->a_ops->migratepage(mapping, 608 newpage, page); 609 else 610 rc = fallback_migrate_page(mapping, newpage, page); 611 612 if (!rc) { 613 mem_cgroup_page_migration(page, newpage); 614 remove_migration_ptes(page, newpage); 615 } else 616 newpage->mapping = NULL; 617 618 unlock_page(newpage); 619 620 return rc; 621 } 622 623 /* 624 * Obtain the lock on page, remove all ptes and migrate the page 625 * to the newly allocated page in newpage. 626 */ 627 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 628 struct page *page, int force) 629 { 630 int rc = 0; 631 int *result = NULL; 632 struct page *newpage = get_new_page(page, private, &result); 633 int rcu_locked = 0; 634 int charge = 0; 635 636 if (!newpage) 637 return -ENOMEM; 638 639 if (page_count(page) == 1) 640 /* page was freed from under us. So we are done. */ 641 goto move_newpage; 642 643 rc = -EAGAIN; 644 if (TestSetPageLocked(page)) { 645 if (!force) 646 goto move_newpage; 647 lock_page(page); 648 } 649 650 if (PageWriteback(page)) { 651 if (!force) 652 goto unlock; 653 wait_on_page_writeback(page); 654 } 655 /* 656 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 657 * we cannot notice that anon_vma is freed while we migrates a page. 658 * This rcu_read_lock() delays freeing anon_vma pointer until the end 659 * of migration. File cache pages are no problem because of page_lock() 660 * File Caches may use write_page() or lock_page() in migration, then, 661 * just care Anon page here. 662 */ 663 if (PageAnon(page)) { 664 rcu_read_lock(); 665 rcu_locked = 1; 666 } 667 668 /* 669 * Corner case handling: 670 * 1. When a new swap-cache page is read into, it is added to the LRU 671 * and treated as swapcache but it has no rmap yet. 672 * Calling try_to_unmap() against a page->mapping==NULL page will 673 * trigger a BUG. So handle it here. 674 * 2. An orphaned page (see truncate_complete_page) might have 675 * fs-private metadata. The page can be picked up due to memory 676 * offlining. Everywhere else except page reclaim, the page is 677 * invisible to the vm, so the page can not be migrated. So try to 678 * free the metadata, so the page can be freed. 679 */ 680 if (!page->mapping) { 681 if (!PageAnon(page) && PagePrivate(page)) { 682 /* 683 * Go direct to try_to_free_buffers() here because 684 * a) that's what try_to_release_page() would do anyway 685 * b) we may be under rcu_read_lock() here, so we can't 686 * use GFP_KERNEL which is what try_to_release_page() 687 * needs to be effective. 688 */ 689 try_to_free_buffers(page); 690 } 691 goto rcu_unlock; 692 } 693 694 charge = mem_cgroup_prepare_migration(page); 695 /* Establish migration ptes or remove ptes */ 696 try_to_unmap(page, 1); 697 698 if (!page_mapped(page)) 699 rc = move_to_new_page(newpage, page); 700 701 if (rc) { 702 remove_migration_ptes(page, page); 703 if (charge) 704 mem_cgroup_end_migration(page); 705 } else if (charge) 706 mem_cgroup_end_migration(newpage); 707 rcu_unlock: 708 if (rcu_locked) 709 rcu_read_unlock(); 710 711 unlock: 712 713 unlock_page(page); 714 715 if (rc != -EAGAIN) { 716 /* 717 * A page that has been migrated has all references 718 * removed and will be freed. A page that has not been 719 * migrated will have kepts its references and be 720 * restored. 721 */ 722 list_del(&page->lru); 723 move_to_lru(page); 724 } 725 726 move_newpage: 727 /* 728 * Move the new page to the LRU. If migration was not successful 729 * then this will free the page. 730 */ 731 move_to_lru(newpage); 732 if (result) { 733 if (rc) 734 *result = rc; 735 else 736 *result = page_to_nid(newpage); 737 } 738 return rc; 739 } 740 741 /* 742 * migrate_pages 743 * 744 * The function takes one list of pages to migrate and a function 745 * that determines from the page to be migrated and the private data 746 * the target of the move and allocates the page. 747 * 748 * The function returns after 10 attempts or if no pages 749 * are movable anymore because to has become empty 750 * or no retryable pages exist anymore. All pages will be 751 * returned to the LRU or freed. 752 * 753 * Return: Number of pages not migrated or error code. 754 */ 755 int migrate_pages(struct list_head *from, 756 new_page_t get_new_page, unsigned long private) 757 { 758 int retry = 1; 759 int nr_failed = 0; 760 int pass = 0; 761 struct page *page; 762 struct page *page2; 763 int swapwrite = current->flags & PF_SWAPWRITE; 764 int rc; 765 766 if (!swapwrite) 767 current->flags |= PF_SWAPWRITE; 768 769 for(pass = 0; pass < 10 && retry; pass++) { 770 retry = 0; 771 772 list_for_each_entry_safe(page, page2, from, lru) { 773 cond_resched(); 774 775 rc = unmap_and_move(get_new_page, private, 776 page, pass > 2); 777 778 switch(rc) { 779 case -ENOMEM: 780 goto out; 781 case -EAGAIN: 782 retry++; 783 break; 784 case 0: 785 break; 786 default: 787 /* Permanent failure */ 788 nr_failed++; 789 break; 790 } 791 } 792 } 793 rc = 0; 794 out: 795 if (!swapwrite) 796 current->flags &= ~PF_SWAPWRITE; 797 798 putback_lru_pages(from); 799 800 if (rc) 801 return rc; 802 803 return nr_failed + retry; 804 } 805 806 #ifdef CONFIG_NUMA 807 /* 808 * Move a list of individual pages 809 */ 810 struct page_to_node { 811 unsigned long addr; 812 struct page *page; 813 int node; 814 int status; 815 }; 816 817 static struct page *new_page_node(struct page *p, unsigned long private, 818 int **result) 819 { 820 struct page_to_node *pm = (struct page_to_node *)private; 821 822 while (pm->node != MAX_NUMNODES && pm->page != p) 823 pm++; 824 825 if (pm->node == MAX_NUMNODES) 826 return NULL; 827 828 *result = &pm->status; 829 830 return alloc_pages_node(pm->node, 831 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 832 } 833 834 /* 835 * Move a set of pages as indicated in the pm array. The addr 836 * field must be set to the virtual address of the page to be moved 837 * and the node number must contain a valid target node. 838 */ 839 static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, 840 int migrate_all) 841 { 842 int err; 843 struct page_to_node *pp; 844 LIST_HEAD(pagelist); 845 846 down_read(&mm->mmap_sem); 847 848 /* 849 * Build a list of pages to migrate 850 */ 851 migrate_prep(); 852 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 853 struct vm_area_struct *vma; 854 struct page *page; 855 856 /* 857 * A valid page pointer that will not match any of the 858 * pages that will be moved. 859 */ 860 pp->page = ZERO_PAGE(0); 861 862 err = -EFAULT; 863 vma = find_vma(mm, pp->addr); 864 if (!vma || !vma_migratable(vma)) 865 goto set_status; 866 867 page = follow_page(vma, pp->addr, FOLL_GET); 868 err = -ENOENT; 869 if (!page) 870 goto set_status; 871 872 if (PageReserved(page)) /* Check for zero page */ 873 goto put_and_set; 874 875 pp->page = page; 876 err = page_to_nid(page); 877 878 if (err == pp->node) 879 /* 880 * Node already in the right place 881 */ 882 goto put_and_set; 883 884 err = -EACCES; 885 if (page_mapcount(page) > 1 && 886 !migrate_all) 887 goto put_and_set; 888 889 err = isolate_lru_page(page, &pagelist); 890 put_and_set: 891 /* 892 * Either remove the duplicate refcount from 893 * isolate_lru_page() or drop the page ref if it was 894 * not isolated. 895 */ 896 put_page(page); 897 set_status: 898 pp->status = err; 899 } 900 901 if (!list_empty(&pagelist)) 902 err = migrate_pages(&pagelist, new_page_node, 903 (unsigned long)pm); 904 else 905 err = -ENOENT; 906 907 up_read(&mm->mmap_sem); 908 return err; 909 } 910 911 /* 912 * Determine the nodes of a list of pages. The addr in the pm array 913 * must have been set to the virtual address of which we want to determine 914 * the node number. 915 */ 916 static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) 917 { 918 down_read(&mm->mmap_sem); 919 920 for ( ; pm->node != MAX_NUMNODES; pm++) { 921 struct vm_area_struct *vma; 922 struct page *page; 923 int err; 924 925 err = -EFAULT; 926 vma = find_vma(mm, pm->addr); 927 if (!vma) 928 goto set_status; 929 930 page = follow_page(vma, pm->addr, 0); 931 err = -ENOENT; 932 /* Use PageReserved to check for zero page */ 933 if (!page || PageReserved(page)) 934 goto set_status; 935 936 err = page_to_nid(page); 937 set_status: 938 pm->status = err; 939 } 940 941 up_read(&mm->mmap_sem); 942 return 0; 943 } 944 945 /* 946 * Move a list of pages in the address space of the currently executing 947 * process. 948 */ 949 asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, 950 const void __user * __user *pages, 951 const int __user *nodes, 952 int __user *status, int flags) 953 { 954 int err = 0; 955 int i; 956 struct task_struct *task; 957 nodemask_t task_nodes; 958 struct mm_struct *mm; 959 struct page_to_node *pm = NULL; 960 961 /* Check flags */ 962 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 963 return -EINVAL; 964 965 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 966 return -EPERM; 967 968 /* Find the mm_struct */ 969 read_lock(&tasklist_lock); 970 task = pid ? find_task_by_vpid(pid) : current; 971 if (!task) { 972 read_unlock(&tasklist_lock); 973 return -ESRCH; 974 } 975 mm = get_task_mm(task); 976 read_unlock(&tasklist_lock); 977 978 if (!mm) 979 return -EINVAL; 980 981 /* 982 * Check if this process has the right to modify the specified 983 * process. The right exists if the process has administrative 984 * capabilities, superuser privileges or the same 985 * userid as the target process. 986 */ 987 if ((current->euid != task->suid) && (current->euid != task->uid) && 988 (current->uid != task->suid) && (current->uid != task->uid) && 989 !capable(CAP_SYS_NICE)) { 990 err = -EPERM; 991 goto out2; 992 } 993 994 err = security_task_movememory(task); 995 if (err) 996 goto out2; 997 998 999 task_nodes = cpuset_mems_allowed(task); 1000 1001 /* Limit nr_pages so that the multiplication may not overflow */ 1002 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { 1003 err = -E2BIG; 1004 goto out2; 1005 } 1006 1007 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); 1008 if (!pm) { 1009 err = -ENOMEM; 1010 goto out2; 1011 } 1012 1013 /* 1014 * Get parameters from user space and initialize the pm 1015 * array. Return various errors if the user did something wrong. 1016 */ 1017 for (i = 0; i < nr_pages; i++) { 1018 const void __user *p; 1019 1020 err = -EFAULT; 1021 if (get_user(p, pages + i)) 1022 goto out; 1023 1024 pm[i].addr = (unsigned long)p; 1025 if (nodes) { 1026 int node; 1027 1028 if (get_user(node, nodes + i)) 1029 goto out; 1030 1031 err = -ENODEV; 1032 if (!node_state(node, N_HIGH_MEMORY)) 1033 goto out; 1034 1035 err = -EACCES; 1036 if (!node_isset(node, task_nodes)) 1037 goto out; 1038 1039 pm[i].node = node; 1040 } else 1041 pm[i].node = 0; /* anything to not match MAX_NUMNODES */ 1042 } 1043 /* End marker */ 1044 pm[nr_pages].node = MAX_NUMNODES; 1045 1046 if (nodes) 1047 err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL); 1048 else 1049 err = do_pages_stat(mm, pm); 1050 1051 if (err >= 0) 1052 /* Return status information */ 1053 for (i = 0; i < nr_pages; i++) 1054 if (put_user(pm[i].status, status + i)) 1055 err = -EFAULT; 1056 1057 out: 1058 vfree(pm); 1059 out2: 1060 mmput(mm); 1061 return err; 1062 } 1063 #endif 1064 1065 /* 1066 * Call migration functions in the vma_ops that may prepare 1067 * memory in a vm for migration. migration functions may perform 1068 * the migration for vmas that do not have an underlying page struct. 1069 */ 1070 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1071 const nodemask_t *from, unsigned long flags) 1072 { 1073 struct vm_area_struct *vma; 1074 int err = 0; 1075 1076 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { 1077 if (vma->vm_ops && vma->vm_ops->migrate) { 1078 err = vma->vm_ops->migrate(vma, to, from, flags); 1079 if (err) 1080 break; 1081 } 1082 } 1083 return err; 1084 } 1085