1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter <clameter@sgi.com> 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/buffer_head.h> 20 #include <linux/mm_inline.h> 21 #include <linux/pagevec.h> 22 #include <linux/rmap.h> 23 #include <linux/topology.h> 24 #include <linux/cpu.h> 25 #include <linux/cpuset.h> 26 #include <linux/swapops.h> 27 28 #include "internal.h" 29 30 /* The maximum number of pages to take off the LRU for migration */ 31 #define MIGRATE_CHUNK_SIZE 256 32 33 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 34 35 /* 36 * Isolate one page from the LRU lists. If successful put it onto 37 * the indicated list with elevated page count. 38 * 39 * Result: 40 * -EBUSY: page not on LRU list 41 * 0: page removed from LRU list and added to the specified list. 42 */ 43 int isolate_lru_page(struct page *page, struct list_head *pagelist) 44 { 45 int ret = -EBUSY; 46 47 if (PageLRU(page)) { 48 struct zone *zone = page_zone(page); 49 50 spin_lock_irq(&zone->lru_lock); 51 if (PageLRU(page)) { 52 ret = 0; 53 get_page(page); 54 ClearPageLRU(page); 55 if (PageActive(page)) 56 del_page_from_active_list(zone, page); 57 else 58 del_page_from_inactive_list(zone, page); 59 list_add_tail(&page->lru, pagelist); 60 } 61 spin_unlock_irq(&zone->lru_lock); 62 } 63 return ret; 64 } 65 66 /* 67 * migrate_prep() needs to be called after we have compiled the list of pages 68 * to be migrated using isolate_lru_page() but before we begin a series of calls 69 * to migrate_pages(). 70 */ 71 int migrate_prep(void) 72 { 73 /* Must have swap device for migration */ 74 if (nr_swap_pages <= 0) 75 return -ENODEV; 76 77 /* 78 * Clear the LRU lists so pages can be isolated. 79 * Note that pages may be moved off the LRU after we have 80 * drained them. Those pages will fail to migrate like other 81 * pages that may be busy. 82 */ 83 lru_add_drain_all(); 84 85 return 0; 86 } 87 88 static inline void move_to_lru(struct page *page) 89 { 90 list_del(&page->lru); 91 if (PageActive(page)) { 92 /* 93 * lru_cache_add_active checks that 94 * the PG_active bit is off. 95 */ 96 ClearPageActive(page); 97 lru_cache_add_active(page); 98 } else { 99 lru_cache_add(page); 100 } 101 put_page(page); 102 } 103 104 /* 105 * Add isolated pages on the list back to the LRU. 106 * 107 * returns the number of pages put back. 108 */ 109 int putback_lru_pages(struct list_head *l) 110 { 111 struct page *page; 112 struct page *page2; 113 int count = 0; 114 115 list_for_each_entry_safe(page, page2, l, lru) { 116 move_to_lru(page); 117 count++; 118 } 119 return count; 120 } 121 122 /* 123 * swapout a single page 124 * page is locked upon entry, unlocked on exit 125 */ 126 static int swap_page(struct page *page) 127 { 128 struct address_space *mapping = page_mapping(page); 129 130 if (page_mapped(page) && mapping) 131 if (try_to_unmap(page, 1) != SWAP_SUCCESS) 132 goto unlock_retry; 133 134 if (PageDirty(page)) { 135 /* Page is dirty, try to write it out here */ 136 switch(pageout(page, mapping)) { 137 case PAGE_KEEP: 138 case PAGE_ACTIVATE: 139 goto unlock_retry; 140 141 case PAGE_SUCCESS: 142 goto retry; 143 144 case PAGE_CLEAN: 145 ; /* try to free the page below */ 146 } 147 } 148 149 if (PagePrivate(page)) { 150 if (!try_to_release_page(page, GFP_KERNEL) || 151 (!mapping && page_count(page) == 1)) 152 goto unlock_retry; 153 } 154 155 if (remove_mapping(mapping, page)) { 156 /* Success */ 157 unlock_page(page); 158 return 0; 159 } 160 161 unlock_retry: 162 unlock_page(page); 163 164 retry: 165 return -EAGAIN; 166 } 167 168 /* 169 * Remove references for a page and establish the new page with the correct 170 * basic settings to be able to stop accesses to the page. 171 */ 172 int migrate_page_remove_references(struct page *newpage, 173 struct page *page, int nr_refs) 174 { 175 struct address_space *mapping = page_mapping(page); 176 struct page **radix_pointer; 177 178 /* 179 * Avoid doing any of the following work if the page count 180 * indicates that the page is in use or truncate has removed 181 * the page. 182 */ 183 if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) 184 return -EAGAIN; 185 186 /* 187 * Establish swap ptes for anonymous pages or destroy pte 188 * maps for files. 189 * 190 * In order to reestablish file backed mappings the fault handlers 191 * will take the radix tree_lock which may then be used to stop 192 * processses from accessing this page until the new page is ready. 193 * 194 * A process accessing via a swap pte (an anonymous page) will take a 195 * page_lock on the old page which will block the process until the 196 * migration attempt is complete. At that time the PageSwapCache bit 197 * will be examined. If the page was migrated then the PageSwapCache 198 * bit will be clear and the operation to retrieve the page will be 199 * retried which will find the new page in the radix tree. Then a new 200 * direct mapping may be generated based on the radix tree contents. 201 * 202 * If the page was not migrated then the PageSwapCache bit 203 * is still set and the operation may continue. 204 */ 205 if (try_to_unmap(page, 1) == SWAP_FAIL) 206 /* A vma has VM_LOCKED set -> permanent failure */ 207 return -EPERM; 208 209 /* 210 * Give up if we were unable to remove all mappings. 211 */ 212 if (page_mapcount(page)) 213 return -EAGAIN; 214 215 write_lock_irq(&mapping->tree_lock); 216 217 radix_pointer = (struct page **)radix_tree_lookup_slot( 218 &mapping->page_tree, 219 page_index(page)); 220 221 if (!page_mapping(page) || page_count(page) != nr_refs || 222 *radix_pointer != page) { 223 write_unlock_irq(&mapping->tree_lock); 224 return -EAGAIN; 225 } 226 227 /* 228 * Now we know that no one else is looking at the page. 229 * 230 * Certain minimal information about a page must be available 231 * in order for other subsystems to properly handle the page if they 232 * find it through the radix tree update before we are finished 233 * copying the page. 234 */ 235 get_page(newpage); 236 newpage->index = page->index; 237 newpage->mapping = page->mapping; 238 if (PageSwapCache(page)) { 239 SetPageSwapCache(newpage); 240 set_page_private(newpage, page_private(page)); 241 } 242 243 *radix_pointer = newpage; 244 __put_page(page); 245 write_unlock_irq(&mapping->tree_lock); 246 247 return 0; 248 } 249 EXPORT_SYMBOL(migrate_page_remove_references); 250 251 /* 252 * Copy the page to its new location 253 */ 254 void migrate_page_copy(struct page *newpage, struct page *page) 255 { 256 copy_highpage(newpage, page); 257 258 if (PageError(page)) 259 SetPageError(newpage); 260 if (PageReferenced(page)) 261 SetPageReferenced(newpage); 262 if (PageUptodate(page)) 263 SetPageUptodate(newpage); 264 if (PageActive(page)) 265 SetPageActive(newpage); 266 if (PageChecked(page)) 267 SetPageChecked(newpage); 268 if (PageMappedToDisk(page)) 269 SetPageMappedToDisk(newpage); 270 271 if (PageDirty(page)) { 272 clear_page_dirty_for_io(page); 273 set_page_dirty(newpage); 274 } 275 276 ClearPageSwapCache(page); 277 ClearPageActive(page); 278 ClearPagePrivate(page); 279 set_page_private(page, 0); 280 page->mapping = NULL; 281 282 /* 283 * If any waiters have accumulated on the new page then 284 * wake them up. 285 */ 286 if (PageWriteback(newpage)) 287 end_page_writeback(newpage); 288 } 289 EXPORT_SYMBOL(migrate_page_copy); 290 291 /************************************************************ 292 * Migration functions 293 ***********************************************************/ 294 295 /* Always fail migration. Used for mappings that are not movable */ 296 int fail_migrate_page(struct page *newpage, struct page *page) 297 { 298 return -EIO; 299 } 300 EXPORT_SYMBOL(fail_migrate_page); 301 302 /* 303 * Common logic to directly migrate a single page suitable for 304 * pages that do not use PagePrivate. 305 * 306 * Pages are locked upon entry and exit. 307 */ 308 int migrate_page(struct page *newpage, struct page *page) 309 { 310 int rc; 311 312 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 313 314 rc = migrate_page_remove_references(newpage, page, 2); 315 316 if (rc) 317 return rc; 318 319 migrate_page_copy(newpage, page); 320 321 /* 322 * Remove auxiliary swap entries and replace 323 * them with real ptes. 324 * 325 * Note that a real pte entry will allow processes that are not 326 * waiting on the page lock to use the new page via the page tables 327 * before the new page is unlocked. 328 */ 329 remove_from_swap(newpage); 330 return 0; 331 } 332 EXPORT_SYMBOL(migrate_page); 333 334 /* 335 * Migration function for pages with buffers. This function can only be used 336 * if the underlying filesystem guarantees that no other references to "page" 337 * exist. 338 */ 339 int buffer_migrate_page(struct page *newpage, struct page *page) 340 { 341 struct address_space *mapping = page->mapping; 342 struct buffer_head *bh, *head; 343 int rc; 344 345 if (!mapping) 346 return -EAGAIN; 347 348 if (!page_has_buffers(page)) 349 return migrate_page(newpage, page); 350 351 head = page_buffers(page); 352 353 rc = migrate_page_remove_references(newpage, page, 3); 354 355 if (rc) 356 return rc; 357 358 bh = head; 359 do { 360 get_bh(bh); 361 lock_buffer(bh); 362 bh = bh->b_this_page; 363 364 } while (bh != head); 365 366 ClearPagePrivate(page); 367 set_page_private(newpage, page_private(page)); 368 set_page_private(page, 0); 369 put_page(page); 370 get_page(newpage); 371 372 bh = head; 373 do { 374 set_bh_page(bh, newpage, bh_offset(bh)); 375 bh = bh->b_this_page; 376 377 } while (bh != head); 378 379 SetPagePrivate(newpage); 380 381 migrate_page_copy(newpage, page); 382 383 bh = head; 384 do { 385 unlock_buffer(bh); 386 put_bh(bh); 387 bh = bh->b_this_page; 388 389 } while (bh != head); 390 391 return 0; 392 } 393 EXPORT_SYMBOL(buffer_migrate_page); 394 395 /* 396 * migrate_pages 397 * 398 * Two lists are passed to this function. The first list 399 * contains the pages isolated from the LRU to be migrated. 400 * The second list contains new pages that the pages isolated 401 * can be moved to. If the second list is NULL then all 402 * pages are swapped out. 403 * 404 * The function returns after 10 attempts or if no pages 405 * are movable anymore because to has become empty 406 * or no retryable pages exist anymore. 407 * 408 * Return: Number of pages not migrated when "to" ran empty. 409 */ 410 int migrate_pages(struct list_head *from, struct list_head *to, 411 struct list_head *moved, struct list_head *failed) 412 { 413 int retry; 414 int nr_failed = 0; 415 int pass = 0; 416 struct page *page; 417 struct page *page2; 418 int swapwrite = current->flags & PF_SWAPWRITE; 419 int rc; 420 421 if (!swapwrite) 422 current->flags |= PF_SWAPWRITE; 423 424 redo: 425 retry = 0; 426 427 list_for_each_entry_safe(page, page2, from, lru) { 428 struct page *newpage = NULL; 429 struct address_space *mapping; 430 431 cond_resched(); 432 433 rc = 0; 434 if (page_count(page) == 1) 435 /* page was freed from under us. So we are done. */ 436 goto next; 437 438 if (to && list_empty(to)) 439 break; 440 441 /* 442 * Skip locked pages during the first two passes to give the 443 * functions holding the lock time to release the page. Later we 444 * use lock_page() to have a higher chance of acquiring the 445 * lock. 446 */ 447 rc = -EAGAIN; 448 if (pass > 2) 449 lock_page(page); 450 else 451 if (TestSetPageLocked(page)) 452 goto next; 453 454 /* 455 * Only wait on writeback if we have already done a pass where 456 * we we may have triggered writeouts for lots of pages. 457 */ 458 if (pass > 0) { 459 wait_on_page_writeback(page); 460 } else { 461 if (PageWriteback(page)) 462 goto unlock_page; 463 } 464 465 /* 466 * Anonymous pages must have swap cache references otherwise 467 * the information contained in the page maps cannot be 468 * preserved. 469 */ 470 if (PageAnon(page) && !PageSwapCache(page)) { 471 if (!add_to_swap(page, GFP_KERNEL)) { 472 rc = -ENOMEM; 473 goto unlock_page; 474 } 475 } 476 477 if (!to) { 478 rc = swap_page(page); 479 goto next; 480 } 481 482 newpage = lru_to_page(to); 483 lock_page(newpage); 484 485 /* 486 * Pages are properly locked and writeback is complete. 487 * Try to migrate the page. 488 */ 489 mapping = page_mapping(page); 490 if (!mapping) 491 goto unlock_both; 492 493 if (mapping->a_ops->migratepage) { 494 /* 495 * Most pages have a mapping and most filesystems 496 * should provide a migration function. Anonymous 497 * pages are part of swap space which also has its 498 * own migration function. This is the most common 499 * path for page migration. 500 */ 501 rc = mapping->a_ops->migratepage(newpage, page); 502 goto unlock_both; 503 } 504 505 /* Make sure the dirty bit is up to date */ 506 if (try_to_unmap(page, 1) == SWAP_FAIL) { 507 rc = -EPERM; 508 goto unlock_both; 509 } 510 511 if (page_mapcount(page)) { 512 rc = -EAGAIN; 513 goto unlock_both; 514 } 515 516 /* 517 * Default handling if a filesystem does not provide 518 * a migration function. We can only migrate clean 519 * pages so try to write out any dirty pages first. 520 */ 521 if (PageDirty(page)) { 522 switch (pageout(page, mapping)) { 523 case PAGE_KEEP: 524 case PAGE_ACTIVATE: 525 goto unlock_both; 526 527 case PAGE_SUCCESS: 528 unlock_page(newpage); 529 goto next; 530 531 case PAGE_CLEAN: 532 ; /* try to migrate the page below */ 533 } 534 } 535 536 /* 537 * Buffers are managed in a filesystem specific way. 538 * We must have no buffers or drop them. 539 */ 540 if (!page_has_buffers(page) || 541 try_to_release_page(page, GFP_KERNEL)) { 542 rc = migrate_page(newpage, page); 543 goto unlock_both; 544 } 545 546 /* 547 * On early passes with mapped pages simply 548 * retry. There may be a lock held for some 549 * buffers that may go away. Later 550 * swap them out. 551 */ 552 if (pass > 4) { 553 /* 554 * Persistently unable to drop buffers..... As a 555 * measure of last resort we fall back to 556 * swap_page(). 557 */ 558 unlock_page(newpage); 559 newpage = NULL; 560 rc = swap_page(page); 561 goto next; 562 } 563 564 unlock_both: 565 unlock_page(newpage); 566 567 unlock_page: 568 unlock_page(page); 569 570 next: 571 if (rc == -EAGAIN) { 572 retry++; 573 } else if (rc) { 574 /* Permanent failure */ 575 list_move(&page->lru, failed); 576 nr_failed++; 577 } else { 578 if (newpage) { 579 /* Successful migration. Return page to LRU */ 580 move_to_lru(newpage); 581 } 582 list_move(&page->lru, moved); 583 } 584 } 585 if (retry && pass++ < 10) 586 goto redo; 587 588 if (!swapwrite) 589 current->flags &= ~PF_SWAPWRITE; 590 591 return nr_failed + retry; 592 } 593 594 /* 595 * Migrate the list 'pagelist' of pages to a certain destination. 596 * 597 * Specify destination with either non-NULL vma or dest_node >= 0 598 * Return the number of pages not migrated or error code 599 */ 600 int migrate_pages_to(struct list_head *pagelist, 601 struct vm_area_struct *vma, int dest) 602 { 603 LIST_HEAD(newlist); 604 LIST_HEAD(moved); 605 LIST_HEAD(failed); 606 int err = 0; 607 unsigned long offset = 0; 608 int nr_pages; 609 struct page *page; 610 struct list_head *p; 611 612 redo: 613 nr_pages = 0; 614 list_for_each(p, pagelist) { 615 if (vma) { 616 /* 617 * The address passed to alloc_page_vma is used to 618 * generate the proper interleave behavior. We fake 619 * the address here by an increasing offset in order 620 * to get the proper distribution of pages. 621 * 622 * No decision has been made as to which page 623 * a certain old page is moved to so we cannot 624 * specify the correct address. 625 */ 626 page = alloc_page_vma(GFP_HIGHUSER, vma, 627 offset + vma->vm_start); 628 offset += PAGE_SIZE; 629 } 630 else 631 page = alloc_pages_node(dest, GFP_HIGHUSER, 0); 632 633 if (!page) { 634 err = -ENOMEM; 635 goto out; 636 } 637 list_add_tail(&page->lru, &newlist); 638 nr_pages++; 639 if (nr_pages > MIGRATE_CHUNK_SIZE) 640 break; 641 } 642 err = migrate_pages(pagelist, &newlist, &moved, &failed); 643 644 putback_lru_pages(&moved); /* Call release pages instead ?? */ 645 646 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist)) 647 goto redo; 648 out: 649 /* Return leftover allocated pages */ 650 while (!list_empty(&newlist)) { 651 page = list_entry(newlist.next, struct page, lru); 652 list_del(&page->lru); 653 __free_page(page); 654 } 655 list_splice(&failed, pagelist); 656 if (err < 0) 657 return err; 658 659 /* Calculate number of leftover pages */ 660 nr_pages = 0; 661 list_for_each(p, pagelist) 662 nr_pages++; 663 return nr_pages; 664 } 665