1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/file.h> 23 #include <linux/writeback.h> 24 #include <linux/blkdev.h> 25 #include <linux/buffer_head.h> /* for try_to_release_page(), 26 buffer_heads_over_limit */ 27 #include <linux/mm_inline.h> 28 #include <linux/pagevec.h> 29 #include <linux/backing-dev.h> 30 #include <linux/rmap.h> 31 #include <linux/topology.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/notifier.h> 35 #include <linux/rwsem.h> 36 37 #include <asm/tlbflush.h> 38 #include <asm/div64.h> 39 40 #include <linux/swapops.h> 41 42 /* possible outcome of pageout() */ 43 typedef enum { 44 /* failed to write page out, page is locked */ 45 PAGE_KEEP, 46 /* move page to the active list, page is locked */ 47 PAGE_ACTIVATE, 48 /* page has been sent to the disk successfully, page is unlocked */ 49 PAGE_SUCCESS, 50 /* page is clean and locked */ 51 PAGE_CLEAN, 52 } pageout_t; 53 54 struct scan_control { 55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ 56 unsigned long nr_to_scan; 57 58 /* Incremented by the number of inactive pages that were scanned */ 59 unsigned long nr_scanned; 60 61 /* Incremented by the number of pages reclaimed */ 62 unsigned long nr_reclaimed; 63 64 unsigned long nr_mapped; /* From page_state */ 65 66 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 67 unsigned int priority; 68 69 /* This context's GFP mask */ 70 gfp_t gfp_mask; 71 72 int may_writepage; 73 74 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 75 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 76 * In this context, it doesn't matter that we scan the 77 * whole list at once. */ 78 int swap_cluster_max; 79 }; 80 81 /* 82 * The list of shrinker callbacks used by to apply pressure to 83 * ageable caches. 84 */ 85 struct shrinker { 86 shrinker_t shrinker; 87 struct list_head list; 88 int seeks; /* seeks to recreate an obj */ 89 long nr; /* objs pending delete */ 90 }; 91 92 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 93 94 #ifdef ARCH_HAS_PREFETCH 95 #define prefetch_prev_lru_page(_page, _base, _field) \ 96 do { \ 97 if ((_page)->lru.prev != _base) { \ 98 struct page *prev; \ 99 \ 100 prev = lru_to_page(&(_page->lru)); \ 101 prefetch(&prev->_field); \ 102 } \ 103 } while (0) 104 #else 105 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 106 #endif 107 108 #ifdef ARCH_HAS_PREFETCHW 109 #define prefetchw_prev_lru_page(_page, _base, _field) \ 110 do { \ 111 if ((_page)->lru.prev != _base) { \ 112 struct page *prev; \ 113 \ 114 prev = lru_to_page(&(_page->lru)); \ 115 prefetchw(&prev->_field); \ 116 } \ 117 } while (0) 118 #else 119 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 120 #endif 121 122 /* 123 * From 0 .. 100. Higher means more swappy. 124 */ 125 int vm_swappiness = 60; 126 static long total_memory; 127 128 static LIST_HEAD(shrinker_list); 129 static DECLARE_RWSEM(shrinker_rwsem); 130 131 /* 132 * Add a shrinker callback to be called from the vm 133 */ 134 struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 135 { 136 struct shrinker *shrinker; 137 138 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 139 if (shrinker) { 140 shrinker->shrinker = theshrinker; 141 shrinker->seeks = seeks; 142 shrinker->nr = 0; 143 down_write(&shrinker_rwsem); 144 list_add_tail(&shrinker->list, &shrinker_list); 145 up_write(&shrinker_rwsem); 146 } 147 return shrinker; 148 } 149 EXPORT_SYMBOL(set_shrinker); 150 151 /* 152 * Remove one 153 */ 154 void remove_shrinker(struct shrinker *shrinker) 155 { 156 down_write(&shrinker_rwsem); 157 list_del(&shrinker->list); 158 up_write(&shrinker_rwsem); 159 kfree(shrinker); 160 } 161 EXPORT_SYMBOL(remove_shrinker); 162 163 #define SHRINK_BATCH 128 164 /* 165 * Call the shrink functions to age shrinkable caches 166 * 167 * Here we assume it costs one seek to replace a lru page and that it also 168 * takes a seek to recreate a cache object. With this in mind we age equal 169 * percentages of the lru and ageable caches. This should balance the seeks 170 * generated by these structures. 171 * 172 * If the vm encounted mapped pages on the LRU it increase the pressure on 173 * slab to avoid swapping. 174 * 175 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 176 * 177 * `lru_pages' represents the number of on-LRU pages in all the zones which 178 * are eligible for the caller's allocation attempt. It is used for balancing 179 * slab reclaim versus page reclaim. 180 * 181 * Returns the number of slab objects which we shrunk. 182 */ 183 int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) 184 { 185 struct shrinker *shrinker; 186 int ret = 0; 187 188 if (scanned == 0) 189 scanned = SWAP_CLUSTER_MAX; 190 191 if (!down_read_trylock(&shrinker_rwsem)) 192 return 1; /* Assume we'll be able to shrink next time */ 193 194 list_for_each_entry(shrinker, &shrinker_list, list) { 195 unsigned long long delta; 196 unsigned long total_scan; 197 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 198 199 delta = (4 * scanned) / shrinker->seeks; 200 delta *= max_pass; 201 do_div(delta, lru_pages + 1); 202 shrinker->nr += delta; 203 if (shrinker->nr < 0) { 204 printk(KERN_ERR "%s: nr=%ld\n", 205 __FUNCTION__, shrinker->nr); 206 shrinker->nr = max_pass; 207 } 208 209 /* 210 * Avoid risking looping forever due to too large nr value: 211 * never try to free more than twice the estimate number of 212 * freeable entries. 213 */ 214 if (shrinker->nr > max_pass * 2) 215 shrinker->nr = max_pass * 2; 216 217 total_scan = shrinker->nr; 218 shrinker->nr = 0; 219 220 while (total_scan >= SHRINK_BATCH) { 221 long this_scan = SHRINK_BATCH; 222 int shrink_ret; 223 int nr_before; 224 225 nr_before = (*shrinker->shrinker)(0, gfp_mask); 226 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 227 if (shrink_ret == -1) 228 break; 229 if (shrink_ret < nr_before) 230 ret += nr_before - shrink_ret; 231 mod_page_state(slabs_scanned, this_scan); 232 total_scan -= this_scan; 233 234 cond_resched(); 235 } 236 237 shrinker->nr += total_scan; 238 } 239 up_read(&shrinker_rwsem); 240 return ret; 241 } 242 243 /* Called without lock on whether page is mapped, so answer is unstable */ 244 static inline int page_mapping_inuse(struct page *page) 245 { 246 struct address_space *mapping; 247 248 /* Page is in somebody's page tables. */ 249 if (page_mapped(page)) 250 return 1; 251 252 /* Be more reluctant to reclaim swapcache than pagecache */ 253 if (PageSwapCache(page)) 254 return 1; 255 256 mapping = page_mapping(page); 257 if (!mapping) 258 return 0; 259 260 /* File is mmap'd by somebody? */ 261 return mapping_mapped(mapping); 262 } 263 264 static inline int is_page_cache_freeable(struct page *page) 265 { 266 return page_count(page) - !!PagePrivate(page) == 2; 267 } 268 269 static int may_write_to_queue(struct backing_dev_info *bdi) 270 { 271 if (current->flags & PF_SWAPWRITE) 272 return 1; 273 if (!bdi_write_congested(bdi)) 274 return 1; 275 if (bdi == current->backing_dev_info) 276 return 1; 277 return 0; 278 } 279 280 /* 281 * We detected a synchronous write error writing a page out. Probably 282 * -ENOSPC. We need to propagate that into the address_space for a subsequent 283 * fsync(), msync() or close(). 284 * 285 * The tricky part is that after writepage we cannot touch the mapping: nothing 286 * prevents it from being freed up. But we have a ref on the page and once 287 * that page is locked, the mapping is pinned. 288 * 289 * We're allowed to run sleeping lock_page() here because we know the caller has 290 * __GFP_FS. 291 */ 292 static void handle_write_error(struct address_space *mapping, 293 struct page *page, int error) 294 { 295 lock_page(page); 296 if (page_mapping(page) == mapping) { 297 if (error == -ENOSPC) 298 set_bit(AS_ENOSPC, &mapping->flags); 299 else 300 set_bit(AS_EIO, &mapping->flags); 301 } 302 unlock_page(page); 303 } 304 305 /* 306 * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). 307 */ 308 static pageout_t pageout(struct page *page, struct address_space *mapping) 309 { 310 /* 311 * If the page is dirty, only perform writeback if that write 312 * will be non-blocking. To prevent this allocation from being 313 * stalled by pagecache activity. But note that there may be 314 * stalls if we need to run get_block(). We could test 315 * PagePrivate for that. 316 * 317 * If this process is currently in generic_file_write() against 318 * this page's queue, we can perform writeback even if that 319 * will block. 320 * 321 * If the page is swapcache, write it back even if that would 322 * block, for some throttling. This happens by accident, because 323 * swap_backing_dev_info is bust: it doesn't reflect the 324 * congestion state of the swapdevs. Easy to fix, if needed. 325 * See swapfile.c:page_queue_congested(). 326 */ 327 if (!is_page_cache_freeable(page)) 328 return PAGE_KEEP; 329 if (!mapping) { 330 /* 331 * Some data journaling orphaned pages can have 332 * page->mapping == NULL while being dirty with clean buffers. 333 */ 334 if (PagePrivate(page)) { 335 if (try_to_free_buffers(page)) { 336 ClearPageDirty(page); 337 printk("%s: orphaned page\n", __FUNCTION__); 338 return PAGE_CLEAN; 339 } 340 } 341 return PAGE_KEEP; 342 } 343 if (mapping->a_ops->writepage == NULL) 344 return PAGE_ACTIVATE; 345 if (!may_write_to_queue(mapping->backing_dev_info)) 346 return PAGE_KEEP; 347 348 if (clear_page_dirty_for_io(page)) { 349 int res; 350 struct writeback_control wbc = { 351 .sync_mode = WB_SYNC_NONE, 352 .nr_to_write = SWAP_CLUSTER_MAX, 353 .nonblocking = 1, 354 .for_reclaim = 1, 355 }; 356 357 SetPageReclaim(page); 358 res = mapping->a_ops->writepage(page, &wbc); 359 if (res < 0) 360 handle_write_error(mapping, page, res); 361 if (res == AOP_WRITEPAGE_ACTIVATE) { 362 ClearPageReclaim(page); 363 return PAGE_ACTIVATE; 364 } 365 if (!PageWriteback(page)) { 366 /* synchronous write or broken a_ops? */ 367 ClearPageReclaim(page); 368 } 369 370 return PAGE_SUCCESS; 371 } 372 373 return PAGE_CLEAN; 374 } 375 376 static int remove_mapping(struct address_space *mapping, struct page *page) 377 { 378 if (!mapping) 379 return 0; /* truncate got there first */ 380 381 write_lock_irq(&mapping->tree_lock); 382 383 /* 384 * The non-racy check for busy page. It is critical to check 385 * PageDirty _after_ making sure that the page is freeable and 386 * not in use by anybody. (pagecache + us == 2) 387 */ 388 if (unlikely(page_count(page) != 2)) 389 goto cannot_free; 390 smp_rmb(); 391 if (unlikely(PageDirty(page))) 392 goto cannot_free; 393 394 if (PageSwapCache(page)) { 395 swp_entry_t swap = { .val = page_private(page) }; 396 __delete_from_swap_cache(page); 397 write_unlock_irq(&mapping->tree_lock); 398 swap_free(swap); 399 __put_page(page); /* The pagecache ref */ 400 return 1; 401 } 402 403 __remove_from_page_cache(page); 404 write_unlock_irq(&mapping->tree_lock); 405 __put_page(page); 406 return 1; 407 408 cannot_free: 409 write_unlock_irq(&mapping->tree_lock); 410 return 0; 411 } 412 413 /* 414 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed 415 */ 416 static int shrink_list(struct list_head *page_list, struct scan_control *sc) 417 { 418 LIST_HEAD(ret_pages); 419 struct pagevec freed_pvec; 420 int pgactivate = 0; 421 int reclaimed = 0; 422 423 cond_resched(); 424 425 pagevec_init(&freed_pvec, 1); 426 while (!list_empty(page_list)) { 427 struct address_space *mapping; 428 struct page *page; 429 int may_enter_fs; 430 int referenced; 431 432 cond_resched(); 433 434 page = lru_to_page(page_list); 435 list_del(&page->lru); 436 437 if (TestSetPageLocked(page)) 438 goto keep; 439 440 BUG_ON(PageActive(page)); 441 442 sc->nr_scanned++; 443 /* Double the slab pressure for mapped and swapcache pages */ 444 if (page_mapped(page) || PageSwapCache(page)) 445 sc->nr_scanned++; 446 447 if (PageWriteback(page)) 448 goto keep_locked; 449 450 referenced = page_referenced(page, 1); 451 /* In active use or really unfreeable? Activate it. */ 452 if (referenced && page_mapping_inuse(page)) 453 goto activate_locked; 454 455 #ifdef CONFIG_SWAP 456 /* 457 * Anonymous process memory has backing store? 458 * Try to allocate it some swap space here. 459 */ 460 if (PageAnon(page) && !PageSwapCache(page)) { 461 if (!add_to_swap(page, GFP_ATOMIC)) 462 goto activate_locked; 463 } 464 #endif /* CONFIG_SWAP */ 465 466 mapping = page_mapping(page); 467 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 468 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 469 470 /* 471 * The page is mapped into the page tables of one or more 472 * processes. Try to unmap it here. 473 */ 474 if (page_mapped(page) && mapping) { 475 switch (try_to_unmap(page)) { 476 case SWAP_FAIL: 477 goto activate_locked; 478 case SWAP_AGAIN: 479 goto keep_locked; 480 case SWAP_SUCCESS: 481 ; /* try to free the page below */ 482 } 483 } 484 485 if (PageDirty(page)) { 486 if (referenced) 487 goto keep_locked; 488 if (!may_enter_fs) 489 goto keep_locked; 490 if (laptop_mode && !sc->may_writepage) 491 goto keep_locked; 492 493 /* Page is dirty, try to write it out here */ 494 switch(pageout(page, mapping)) { 495 case PAGE_KEEP: 496 goto keep_locked; 497 case PAGE_ACTIVATE: 498 goto activate_locked; 499 case PAGE_SUCCESS: 500 if (PageWriteback(page) || PageDirty(page)) 501 goto keep; 502 /* 503 * A synchronous write - probably a ramdisk. Go 504 * ahead and try to reclaim the page. 505 */ 506 if (TestSetPageLocked(page)) 507 goto keep; 508 if (PageDirty(page) || PageWriteback(page)) 509 goto keep_locked; 510 mapping = page_mapping(page); 511 case PAGE_CLEAN: 512 ; /* try to free the page below */ 513 } 514 } 515 516 /* 517 * If the page has buffers, try to free the buffer mappings 518 * associated with this page. If we succeed we try to free 519 * the page as well. 520 * 521 * We do this even if the page is PageDirty(). 522 * try_to_release_page() does not perform I/O, but it is 523 * possible for a page to have PageDirty set, but it is actually 524 * clean (all its buffers are clean). This happens if the 525 * buffers were written out directly, with submit_bh(). ext3 526 * will do this, as well as the blockdev mapping. 527 * try_to_release_page() will discover that cleanness and will 528 * drop the buffers and mark the page clean - it can be freed. 529 * 530 * Rarely, pages can have buffers and no ->mapping. These are 531 * the pages which were not successfully invalidated in 532 * truncate_complete_page(). We try to drop those buffers here 533 * and if that worked, and the page is no longer mapped into 534 * process address space (page_count == 1) it can be freed. 535 * Otherwise, leave the page on the LRU so it is swappable. 536 */ 537 if (PagePrivate(page)) { 538 if (!try_to_release_page(page, sc->gfp_mask)) 539 goto activate_locked; 540 if (!mapping && page_count(page) == 1) 541 goto free_it; 542 } 543 544 if (!remove_mapping(mapping, page)) 545 goto keep_locked; 546 547 free_it: 548 unlock_page(page); 549 reclaimed++; 550 if (!pagevec_add(&freed_pvec, page)) 551 __pagevec_release_nonlru(&freed_pvec); 552 continue; 553 554 activate_locked: 555 SetPageActive(page); 556 pgactivate++; 557 keep_locked: 558 unlock_page(page); 559 keep: 560 list_add(&page->lru, &ret_pages); 561 BUG_ON(PageLRU(page)); 562 } 563 list_splice(&ret_pages, page_list); 564 if (pagevec_count(&freed_pvec)) 565 __pagevec_release_nonlru(&freed_pvec); 566 mod_page_state(pgactivate, pgactivate); 567 sc->nr_reclaimed += reclaimed; 568 return reclaimed; 569 } 570 571 #ifdef CONFIG_MIGRATION 572 static inline void move_to_lru(struct page *page) 573 { 574 list_del(&page->lru); 575 if (PageActive(page)) { 576 /* 577 * lru_cache_add_active checks that 578 * the PG_active bit is off. 579 */ 580 ClearPageActive(page); 581 lru_cache_add_active(page); 582 } else { 583 lru_cache_add(page); 584 } 585 put_page(page); 586 } 587 588 /* 589 * Add isolated pages on the list back to the LRU 590 * 591 * returns the number of pages put back. 592 */ 593 int putback_lru_pages(struct list_head *l) 594 { 595 struct page *page; 596 struct page *page2; 597 int count = 0; 598 599 list_for_each_entry_safe(page, page2, l, lru) { 600 move_to_lru(page); 601 count++; 602 } 603 return count; 604 } 605 606 /* 607 * swapout a single page 608 * page is locked upon entry, unlocked on exit 609 */ 610 static int swap_page(struct page *page) 611 { 612 struct address_space *mapping = page_mapping(page); 613 614 if (page_mapped(page) && mapping) 615 if (try_to_unmap(page) != SWAP_SUCCESS) 616 goto unlock_retry; 617 618 if (PageDirty(page)) { 619 /* Page is dirty, try to write it out here */ 620 switch(pageout(page, mapping)) { 621 case PAGE_KEEP: 622 case PAGE_ACTIVATE: 623 goto unlock_retry; 624 625 case PAGE_SUCCESS: 626 goto retry; 627 628 case PAGE_CLEAN: 629 ; /* try to free the page below */ 630 } 631 } 632 633 if (PagePrivate(page)) { 634 if (!try_to_release_page(page, GFP_KERNEL) || 635 (!mapping && page_count(page) == 1)) 636 goto unlock_retry; 637 } 638 639 if (remove_mapping(mapping, page)) { 640 /* Success */ 641 unlock_page(page); 642 return 0; 643 } 644 645 unlock_retry: 646 unlock_page(page); 647 648 retry: 649 return -EAGAIN; 650 } 651 /* 652 * migrate_pages 653 * 654 * Two lists are passed to this function. The first list 655 * contains the pages isolated from the LRU to be migrated. 656 * The second list contains new pages that the pages isolated 657 * can be moved to. If the second list is NULL then all 658 * pages are swapped out. 659 * 660 * The function returns after 10 attempts or if no pages 661 * are movable anymore because t has become empty 662 * or no retryable pages exist anymore. 663 * 664 * SIMPLIFIED VERSION: This implementation of migrate_pages 665 * is only swapping out pages and never touches the second 666 * list. The direct migration patchset 667 * extends this function to avoid the use of swap. 668 * 669 * Return: Number of pages not migrated when "to" ran empty. 670 */ 671 int migrate_pages(struct list_head *from, struct list_head *to, 672 struct list_head *moved, struct list_head *failed) 673 { 674 int retry; 675 int nr_failed = 0; 676 int pass = 0; 677 struct page *page; 678 struct page *page2; 679 int swapwrite = current->flags & PF_SWAPWRITE; 680 int rc; 681 682 if (!swapwrite) 683 current->flags |= PF_SWAPWRITE; 684 685 redo: 686 retry = 0; 687 688 list_for_each_entry_safe(page, page2, from, lru) { 689 cond_resched(); 690 691 rc = 0; 692 if (page_count(page) == 1) 693 /* page was freed from under us. So we are done. */ 694 goto next; 695 696 /* 697 * Skip locked pages during the first two passes to give the 698 * functions holding the lock time to release the page. Later we 699 * use lock_page() to have a higher chance of acquiring the 700 * lock. 701 */ 702 rc = -EAGAIN; 703 if (pass > 2) 704 lock_page(page); 705 else 706 if (TestSetPageLocked(page)) 707 goto next; 708 709 /* 710 * Only wait on writeback if we have already done a pass where 711 * we we may have triggered writeouts for lots of pages. 712 */ 713 if (pass > 0) { 714 wait_on_page_writeback(page); 715 } else { 716 if (PageWriteback(page)) 717 goto unlock_page; 718 } 719 720 /* 721 * Anonymous pages must have swap cache references otherwise 722 * the information contained in the page maps cannot be 723 * preserved. 724 */ 725 if (PageAnon(page) && !PageSwapCache(page)) { 726 if (!add_to_swap(page, GFP_KERNEL)) { 727 rc = -ENOMEM; 728 goto unlock_page; 729 } 730 } 731 732 /* 733 * Page is properly locked and writeback is complete. 734 * Try to migrate the page. 735 */ 736 rc = swap_page(page); 737 goto next; 738 739 unlock_page: 740 unlock_page(page); 741 742 next: 743 if (rc == -EAGAIN) { 744 retry++; 745 } else if (rc) { 746 /* Permanent failure */ 747 list_move(&page->lru, failed); 748 nr_failed++; 749 } else { 750 /* Success */ 751 list_move(&page->lru, moved); 752 } 753 } 754 if (retry && pass++ < 10) 755 goto redo; 756 757 if (!swapwrite) 758 current->flags &= ~PF_SWAPWRITE; 759 760 return nr_failed + retry; 761 } 762 763 static void lru_add_drain_per_cpu(void *dummy) 764 { 765 lru_add_drain(); 766 } 767 768 /* 769 * Isolate one page from the LRU lists and put it on the 770 * indicated list. Do necessary cache draining if the 771 * page is not on the LRU lists yet. 772 * 773 * Result: 774 * 0 = page not on LRU list 775 * 1 = page removed from LRU list and added to the specified list. 776 * -ENOENT = page is being freed elsewhere. 777 */ 778 int isolate_lru_page(struct page *page) 779 { 780 int rc = 0; 781 struct zone *zone = page_zone(page); 782 783 redo: 784 spin_lock_irq(&zone->lru_lock); 785 rc = __isolate_lru_page(page); 786 if (rc == 1) { 787 if (PageActive(page)) 788 del_page_from_active_list(zone, page); 789 else 790 del_page_from_inactive_list(zone, page); 791 } 792 spin_unlock_irq(&zone->lru_lock); 793 if (rc == 0) { 794 /* 795 * Maybe this page is still waiting for a cpu to drain it 796 * from one of the lru lists? 797 */ 798 rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); 799 if (rc == 0 && PageLRU(page)) 800 goto redo; 801 } 802 return rc; 803 } 804 #endif 805 806 /* 807 * zone->lru_lock is heavily contended. Some of the functions that 808 * shrink the lists perform better by taking out a batch of pages 809 * and working on them outside the LRU lock. 810 * 811 * For pagecache intensive workloads, this function is the hottest 812 * spot in the kernel (apart from copy_*_user functions). 813 * 814 * Appropriate locks must be held before calling this function. 815 * 816 * @nr_to_scan: The number of pages to look through on the list. 817 * @src: The LRU list to pull pages off. 818 * @dst: The temp list to put pages on to. 819 * @scanned: The number of pages that were scanned. 820 * 821 * returns how many pages were moved onto *@dst. 822 */ 823 static int isolate_lru_pages(int nr_to_scan, struct list_head *src, 824 struct list_head *dst, int *scanned) 825 { 826 int nr_taken = 0; 827 struct page *page; 828 int scan = 0; 829 830 while (scan++ < nr_to_scan && !list_empty(src)) { 831 page = lru_to_page(src); 832 prefetchw_prev_lru_page(page, src, flags); 833 834 switch (__isolate_lru_page(page)) { 835 case 1: 836 /* Succeeded to isolate page */ 837 list_move(&page->lru, dst); 838 nr_taken++; 839 break; 840 case -ENOENT: 841 /* Not possible to isolate */ 842 list_move(&page->lru, src); 843 break; 844 default: 845 BUG(); 846 } 847 } 848 849 *scanned = scan; 850 return nr_taken; 851 } 852 853 /* 854 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 855 */ 856 static void shrink_cache(struct zone *zone, struct scan_control *sc) 857 { 858 LIST_HEAD(page_list); 859 struct pagevec pvec; 860 int max_scan = sc->nr_to_scan; 861 862 pagevec_init(&pvec, 1); 863 864 lru_add_drain(); 865 spin_lock_irq(&zone->lru_lock); 866 while (max_scan > 0) { 867 struct page *page; 868 int nr_taken; 869 int nr_scan; 870 int nr_freed; 871 872 nr_taken = isolate_lru_pages(sc->swap_cluster_max, 873 &zone->inactive_list, 874 &page_list, &nr_scan); 875 zone->nr_inactive -= nr_taken; 876 zone->pages_scanned += nr_scan; 877 spin_unlock_irq(&zone->lru_lock); 878 879 if (nr_taken == 0) 880 goto done; 881 882 max_scan -= nr_scan; 883 nr_freed = shrink_list(&page_list, sc); 884 885 local_irq_disable(); 886 if (current_is_kswapd()) { 887 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 888 __mod_page_state(kswapd_steal, nr_freed); 889 } else 890 __mod_page_state_zone(zone, pgscan_direct, nr_scan); 891 __mod_page_state_zone(zone, pgsteal, nr_freed); 892 893 spin_lock(&zone->lru_lock); 894 /* 895 * Put back any unfreeable pages. 896 */ 897 while (!list_empty(&page_list)) { 898 page = lru_to_page(&page_list); 899 if (TestSetPageLRU(page)) 900 BUG(); 901 list_del(&page->lru); 902 if (PageActive(page)) 903 add_page_to_active_list(zone, page); 904 else 905 add_page_to_inactive_list(zone, page); 906 if (!pagevec_add(&pvec, page)) { 907 spin_unlock_irq(&zone->lru_lock); 908 __pagevec_release(&pvec); 909 spin_lock_irq(&zone->lru_lock); 910 } 911 } 912 } 913 spin_unlock_irq(&zone->lru_lock); 914 done: 915 pagevec_release(&pvec); 916 } 917 918 /* 919 * This moves pages from the active list to the inactive list. 920 * 921 * We move them the other way if the page is referenced by one or more 922 * processes, from rmap. 923 * 924 * If the pages are mostly unmapped, the processing is fast and it is 925 * appropriate to hold zone->lru_lock across the whole operation. But if 926 * the pages are mapped, the processing is slow (page_referenced()) so we 927 * should drop zone->lru_lock around each page. It's impossible to balance 928 * this, so instead we remove the pages from the LRU while processing them. 929 * It is safe to rely on PG_active against the non-LRU pages in here because 930 * nobody will play with that bit on a non-LRU page. 931 * 932 * The downside is that we have to touch page->_count against each page. 933 * But we had to alter page->flags anyway. 934 */ 935 static void 936 refill_inactive_zone(struct zone *zone, struct scan_control *sc) 937 { 938 int pgmoved; 939 int pgdeactivate = 0; 940 int pgscanned; 941 int nr_pages = sc->nr_to_scan; 942 LIST_HEAD(l_hold); /* The pages which were snipped off */ 943 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 944 LIST_HEAD(l_active); /* Pages to go onto the active_list */ 945 struct page *page; 946 struct pagevec pvec; 947 int reclaim_mapped = 0; 948 long mapped_ratio; 949 long distress; 950 long swap_tendency; 951 952 lru_add_drain(); 953 spin_lock_irq(&zone->lru_lock); 954 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 955 &l_hold, &pgscanned); 956 zone->pages_scanned += pgscanned; 957 zone->nr_active -= pgmoved; 958 spin_unlock_irq(&zone->lru_lock); 959 960 /* 961 * `distress' is a measure of how much trouble we're having reclaiming 962 * pages. 0 -> no problems. 100 -> great trouble. 963 */ 964 distress = 100 >> zone->prev_priority; 965 966 /* 967 * The point of this algorithm is to decide when to start reclaiming 968 * mapped memory instead of just pagecache. Work out how much memory 969 * is mapped. 970 */ 971 mapped_ratio = (sc->nr_mapped * 100) / total_memory; 972 973 /* 974 * Now decide how much we really want to unmap some pages. The mapped 975 * ratio is downgraded - just because there's a lot of mapped memory 976 * doesn't necessarily mean that page reclaim isn't succeeding. 977 * 978 * The distress ratio is important - we don't want to start going oom. 979 * 980 * A 100% value of vm_swappiness overrides this algorithm altogether. 981 */ 982 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; 983 984 /* 985 * Now use this metric to decide whether to start moving mapped memory 986 * onto the inactive list. 987 */ 988 if (swap_tendency >= 100) 989 reclaim_mapped = 1; 990 991 while (!list_empty(&l_hold)) { 992 cond_resched(); 993 page = lru_to_page(&l_hold); 994 list_del(&page->lru); 995 if (page_mapped(page)) { 996 if (!reclaim_mapped || 997 (total_swap_pages == 0 && PageAnon(page)) || 998 page_referenced(page, 0)) { 999 list_add(&page->lru, &l_active); 1000 continue; 1001 } 1002 } 1003 list_add(&page->lru, &l_inactive); 1004 } 1005 1006 pagevec_init(&pvec, 1); 1007 pgmoved = 0; 1008 spin_lock_irq(&zone->lru_lock); 1009 while (!list_empty(&l_inactive)) { 1010 page = lru_to_page(&l_inactive); 1011 prefetchw_prev_lru_page(page, &l_inactive, flags); 1012 if (TestSetPageLRU(page)) 1013 BUG(); 1014 if (!TestClearPageActive(page)) 1015 BUG(); 1016 list_move(&page->lru, &zone->inactive_list); 1017 pgmoved++; 1018 if (!pagevec_add(&pvec, page)) { 1019 zone->nr_inactive += pgmoved; 1020 spin_unlock_irq(&zone->lru_lock); 1021 pgdeactivate += pgmoved; 1022 pgmoved = 0; 1023 if (buffer_heads_over_limit) 1024 pagevec_strip(&pvec); 1025 __pagevec_release(&pvec); 1026 spin_lock_irq(&zone->lru_lock); 1027 } 1028 } 1029 zone->nr_inactive += pgmoved; 1030 pgdeactivate += pgmoved; 1031 if (buffer_heads_over_limit) { 1032 spin_unlock_irq(&zone->lru_lock); 1033 pagevec_strip(&pvec); 1034 spin_lock_irq(&zone->lru_lock); 1035 } 1036 1037 pgmoved = 0; 1038 while (!list_empty(&l_active)) { 1039 page = lru_to_page(&l_active); 1040 prefetchw_prev_lru_page(page, &l_active, flags); 1041 if (TestSetPageLRU(page)) 1042 BUG(); 1043 BUG_ON(!PageActive(page)); 1044 list_move(&page->lru, &zone->active_list); 1045 pgmoved++; 1046 if (!pagevec_add(&pvec, page)) { 1047 zone->nr_active += pgmoved; 1048 pgmoved = 0; 1049 spin_unlock_irq(&zone->lru_lock); 1050 __pagevec_release(&pvec); 1051 spin_lock_irq(&zone->lru_lock); 1052 } 1053 } 1054 zone->nr_active += pgmoved; 1055 spin_unlock(&zone->lru_lock); 1056 1057 __mod_page_state_zone(zone, pgrefill, pgscanned); 1058 __mod_page_state(pgdeactivate, pgdeactivate); 1059 local_irq_enable(); 1060 1061 pagevec_release(&pvec); 1062 } 1063 1064 /* 1065 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1066 */ 1067 static void 1068 shrink_zone(struct zone *zone, struct scan_control *sc) 1069 { 1070 unsigned long nr_active; 1071 unsigned long nr_inactive; 1072 1073 atomic_inc(&zone->reclaim_in_progress); 1074 1075 /* 1076 * Add one to `nr_to_scan' just to make sure that the kernel will 1077 * slowly sift through the active list. 1078 */ 1079 zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; 1080 nr_active = zone->nr_scan_active; 1081 if (nr_active >= sc->swap_cluster_max) 1082 zone->nr_scan_active = 0; 1083 else 1084 nr_active = 0; 1085 1086 zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; 1087 nr_inactive = zone->nr_scan_inactive; 1088 if (nr_inactive >= sc->swap_cluster_max) 1089 zone->nr_scan_inactive = 0; 1090 else 1091 nr_inactive = 0; 1092 1093 while (nr_active || nr_inactive) { 1094 if (nr_active) { 1095 sc->nr_to_scan = min(nr_active, 1096 (unsigned long)sc->swap_cluster_max); 1097 nr_active -= sc->nr_to_scan; 1098 refill_inactive_zone(zone, sc); 1099 } 1100 1101 if (nr_inactive) { 1102 sc->nr_to_scan = min(nr_inactive, 1103 (unsigned long)sc->swap_cluster_max); 1104 nr_inactive -= sc->nr_to_scan; 1105 shrink_cache(zone, sc); 1106 } 1107 } 1108 1109 throttle_vm_writeout(); 1110 1111 atomic_dec(&zone->reclaim_in_progress); 1112 } 1113 1114 /* 1115 * This is the direct reclaim path, for page-allocating processes. We only 1116 * try to reclaim pages from zones which will satisfy the caller's allocation 1117 * request. 1118 * 1119 * We reclaim from a zone even if that zone is over pages_high. Because: 1120 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1121 * allocation or 1122 * b) The zones may be over pages_high but they must go *over* pages_high to 1123 * satisfy the `incremental min' zone defense algorithm. 1124 * 1125 * Returns the number of reclaimed pages. 1126 * 1127 * If a zone is deemed to be full of pinned pages then just give it a light 1128 * scan then give up on it. 1129 */ 1130 static void 1131 shrink_caches(struct zone **zones, struct scan_control *sc) 1132 { 1133 int i; 1134 1135 for (i = 0; zones[i] != NULL; i++) { 1136 struct zone *zone = zones[i]; 1137 1138 if (!populated_zone(zone)) 1139 continue; 1140 1141 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1142 continue; 1143 1144 zone->temp_priority = sc->priority; 1145 if (zone->prev_priority > sc->priority) 1146 zone->prev_priority = sc->priority; 1147 1148 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) 1149 continue; /* Let kswapd poll it */ 1150 1151 shrink_zone(zone, sc); 1152 } 1153 } 1154 1155 /* 1156 * This is the main entry point to direct page reclaim. 1157 * 1158 * If a full scan of the inactive list fails to free enough memory then we 1159 * are "out of memory" and something needs to be killed. 1160 * 1161 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1162 * high - the zone may be full of dirty or under-writeback pages, which this 1163 * caller can't do much about. We kick pdflush and take explicit naps in the 1164 * hope that some of these pages can be written. But if the allocating task 1165 * holds filesystem locks which prevent writeout this might not work, and the 1166 * allocation attempt will fail. 1167 */ 1168 int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 1169 { 1170 int priority; 1171 int ret = 0; 1172 int total_scanned = 0, total_reclaimed = 0; 1173 struct reclaim_state *reclaim_state = current->reclaim_state; 1174 struct scan_control sc; 1175 unsigned long lru_pages = 0; 1176 int i; 1177 1178 sc.gfp_mask = gfp_mask; 1179 sc.may_writepage = 0; 1180 1181 inc_page_state(allocstall); 1182 1183 for (i = 0; zones[i] != NULL; i++) { 1184 struct zone *zone = zones[i]; 1185 1186 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1187 continue; 1188 1189 zone->temp_priority = DEF_PRIORITY; 1190 lru_pages += zone->nr_active + zone->nr_inactive; 1191 } 1192 1193 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1194 sc.nr_mapped = read_page_state(nr_mapped); 1195 sc.nr_scanned = 0; 1196 sc.nr_reclaimed = 0; 1197 sc.priority = priority; 1198 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1199 if (!priority) 1200 disable_swap_token(); 1201 shrink_caches(zones, &sc); 1202 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 1203 if (reclaim_state) { 1204 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1205 reclaim_state->reclaimed_slab = 0; 1206 } 1207 total_scanned += sc.nr_scanned; 1208 total_reclaimed += sc.nr_reclaimed; 1209 if (total_reclaimed >= sc.swap_cluster_max) { 1210 ret = 1; 1211 goto out; 1212 } 1213 1214 /* 1215 * Try to write back as many pages as we just scanned. This 1216 * tends to cause slow streaming writers to write data to the 1217 * disk smoothly, at the dirtying rate, which is nice. But 1218 * that's undesirable in laptop mode, where we *want* lumpy 1219 * writeout. So in laptop mode, write out the whole world. 1220 */ 1221 if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { 1222 wakeup_pdflush(laptop_mode ? 0 : total_scanned); 1223 sc.may_writepage = 1; 1224 } 1225 1226 /* Take a nap, wait for some writeback to complete */ 1227 if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 1228 blk_congestion_wait(WRITE, HZ/10); 1229 } 1230 out: 1231 for (i = 0; zones[i] != 0; i++) { 1232 struct zone *zone = zones[i]; 1233 1234 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1235 continue; 1236 1237 zone->prev_priority = zone->temp_priority; 1238 } 1239 return ret; 1240 } 1241 1242 /* 1243 * For kswapd, balance_pgdat() will work across all this node's zones until 1244 * they are all at pages_high. 1245 * 1246 * If `nr_pages' is non-zero then it is the number of pages which are to be 1247 * reclaimed, regardless of the zone occupancies. This is a software suspend 1248 * special. 1249 * 1250 * Returns the number of pages which were actually freed. 1251 * 1252 * There is special handling here for zones which are full of pinned pages. 1253 * This can happen if the pages are all mlocked, or if they are all used by 1254 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1255 * What we do is to detect the case where all pages in the zone have been 1256 * scanned twice and there has been zero successful reclaim. Mark the zone as 1257 * dead and from now on, only perform a short scan. Basically we're polling 1258 * the zone for when the problem goes away. 1259 * 1260 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1261 * zones which have free_pages > pages_high, but once a zone is found to have 1262 * free_pages <= pages_high, we scan that zone and the lower zones regardless 1263 * of the number of free pages in the lower zones. This interoperates with 1264 * the page allocator fallback scheme to ensure that aging of pages is balanced 1265 * across the zones. 1266 */ 1267 static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) 1268 { 1269 int to_free = nr_pages; 1270 int all_zones_ok; 1271 int priority; 1272 int i; 1273 int total_scanned, total_reclaimed; 1274 struct reclaim_state *reclaim_state = current->reclaim_state; 1275 struct scan_control sc; 1276 1277 loop_again: 1278 total_scanned = 0; 1279 total_reclaimed = 0; 1280 sc.gfp_mask = GFP_KERNEL; 1281 sc.may_writepage = 0; 1282 sc.nr_mapped = read_page_state(nr_mapped); 1283 1284 inc_page_state(pageoutrun); 1285 1286 for (i = 0; i < pgdat->nr_zones; i++) { 1287 struct zone *zone = pgdat->node_zones + i; 1288 1289 zone->temp_priority = DEF_PRIORITY; 1290 } 1291 1292 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1293 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1294 unsigned long lru_pages = 0; 1295 1296 /* The swap token gets in the way of swapout... */ 1297 if (!priority) 1298 disable_swap_token(); 1299 1300 all_zones_ok = 1; 1301 1302 if (nr_pages == 0) { 1303 /* 1304 * Scan in the highmem->dma direction for the highest 1305 * zone which needs scanning 1306 */ 1307 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1308 struct zone *zone = pgdat->node_zones + i; 1309 1310 if (!populated_zone(zone)) 1311 continue; 1312 1313 if (zone->all_unreclaimable && 1314 priority != DEF_PRIORITY) 1315 continue; 1316 1317 if (!zone_watermark_ok(zone, order, 1318 zone->pages_high, 0, 0)) { 1319 end_zone = i; 1320 goto scan; 1321 } 1322 } 1323 goto out; 1324 } else { 1325 end_zone = pgdat->nr_zones - 1; 1326 } 1327 scan: 1328 for (i = 0; i <= end_zone; i++) { 1329 struct zone *zone = pgdat->node_zones + i; 1330 1331 lru_pages += zone->nr_active + zone->nr_inactive; 1332 } 1333 1334 /* 1335 * Now scan the zone in the dma->highmem direction, stopping 1336 * at the last zone which needs scanning. 1337 * 1338 * We do this because the page allocator works in the opposite 1339 * direction. This prevents the page allocator from allocating 1340 * pages behind kswapd's direction of progress, which would 1341 * cause too much scanning of the lower zones. 1342 */ 1343 for (i = 0; i <= end_zone; i++) { 1344 struct zone *zone = pgdat->node_zones + i; 1345 int nr_slab; 1346 1347 if (!populated_zone(zone)) 1348 continue; 1349 1350 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1351 continue; 1352 1353 if (nr_pages == 0) { /* Not software suspend */ 1354 if (!zone_watermark_ok(zone, order, 1355 zone->pages_high, end_zone, 0)) 1356 all_zones_ok = 0; 1357 } 1358 zone->temp_priority = priority; 1359 if (zone->prev_priority > priority) 1360 zone->prev_priority = priority; 1361 sc.nr_scanned = 0; 1362 sc.nr_reclaimed = 0; 1363 sc.priority = priority; 1364 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1365 atomic_inc(&zone->reclaim_in_progress); 1366 shrink_zone(zone, &sc); 1367 atomic_dec(&zone->reclaim_in_progress); 1368 reclaim_state->reclaimed_slab = 0; 1369 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1370 lru_pages); 1371 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1372 total_reclaimed += sc.nr_reclaimed; 1373 total_scanned += sc.nr_scanned; 1374 if (zone->all_unreclaimable) 1375 continue; 1376 if (nr_slab == 0 && zone->pages_scanned >= 1377 (zone->nr_active + zone->nr_inactive) * 4) 1378 zone->all_unreclaimable = 1; 1379 /* 1380 * If we've done a decent amount of scanning and 1381 * the reclaim ratio is low, start doing writepage 1382 * even in laptop mode 1383 */ 1384 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 1385 total_scanned > total_reclaimed+total_reclaimed/2) 1386 sc.may_writepage = 1; 1387 } 1388 if (nr_pages && to_free > total_reclaimed) 1389 continue; /* swsusp: need to do more work */ 1390 if (all_zones_ok) 1391 break; /* kswapd: all done */ 1392 /* 1393 * OK, kswapd is getting into trouble. Take a nap, then take 1394 * another pass across the zones. 1395 */ 1396 if (total_scanned && priority < DEF_PRIORITY - 2) 1397 blk_congestion_wait(WRITE, HZ/10); 1398 1399 /* 1400 * We do this so kswapd doesn't build up large priorities for 1401 * example when it is freeing in parallel with allocators. It 1402 * matches the direct reclaim path behaviour in terms of impact 1403 * on zone->*_priority. 1404 */ 1405 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) 1406 break; 1407 } 1408 out: 1409 for (i = 0; i < pgdat->nr_zones; i++) { 1410 struct zone *zone = pgdat->node_zones + i; 1411 1412 zone->prev_priority = zone->temp_priority; 1413 } 1414 if (!all_zones_ok) { 1415 cond_resched(); 1416 goto loop_again; 1417 } 1418 1419 return total_reclaimed; 1420 } 1421 1422 /* 1423 * The background pageout daemon, started as a kernel thread 1424 * from the init process. 1425 * 1426 * This basically trickles out pages so that we have _some_ 1427 * free memory available even if there is no other activity 1428 * that frees anything up. This is needed for things like routing 1429 * etc, where we otherwise might have all activity going on in 1430 * asynchronous contexts that cannot page things out. 1431 * 1432 * If there are applications that are active memory-allocators 1433 * (most normal use), this basically shouldn't matter. 1434 */ 1435 static int kswapd(void *p) 1436 { 1437 unsigned long order; 1438 pg_data_t *pgdat = (pg_data_t*)p; 1439 struct task_struct *tsk = current; 1440 DEFINE_WAIT(wait); 1441 struct reclaim_state reclaim_state = { 1442 .reclaimed_slab = 0, 1443 }; 1444 cpumask_t cpumask; 1445 1446 daemonize("kswapd%d", pgdat->node_id); 1447 cpumask = node_to_cpumask(pgdat->node_id); 1448 if (!cpus_empty(cpumask)) 1449 set_cpus_allowed(tsk, cpumask); 1450 current->reclaim_state = &reclaim_state; 1451 1452 /* 1453 * Tell the memory management that we're a "memory allocator", 1454 * and that if we need more memory we should get access to it 1455 * regardless (see "__alloc_pages()"). "kswapd" should 1456 * never get caught in the normal page freeing logic. 1457 * 1458 * (Kswapd normally doesn't need memory anyway, but sometimes 1459 * you need a small amount of memory in order to be able to 1460 * page out something else, and this flag essentially protects 1461 * us from recursively trying to free more memory as we're 1462 * trying to free the first piece of memory in the first place). 1463 */ 1464 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 1465 1466 order = 0; 1467 for ( ; ; ) { 1468 unsigned long new_order; 1469 1470 try_to_freeze(); 1471 1472 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 1473 new_order = pgdat->kswapd_max_order; 1474 pgdat->kswapd_max_order = 0; 1475 if (order < new_order) { 1476 /* 1477 * Don't sleep if someone wants a larger 'order' 1478 * allocation 1479 */ 1480 order = new_order; 1481 } else { 1482 schedule(); 1483 order = pgdat->kswapd_max_order; 1484 } 1485 finish_wait(&pgdat->kswapd_wait, &wait); 1486 1487 balance_pgdat(pgdat, 0, order); 1488 } 1489 return 0; 1490 } 1491 1492 /* 1493 * A zone is low on free memory, so wake its kswapd task to service it. 1494 */ 1495 void wakeup_kswapd(struct zone *zone, int order) 1496 { 1497 pg_data_t *pgdat; 1498 1499 if (!populated_zone(zone)) 1500 return; 1501 1502 pgdat = zone->zone_pgdat; 1503 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 1504 return; 1505 if (pgdat->kswapd_max_order < order) 1506 pgdat->kswapd_max_order = order; 1507 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1508 return; 1509 if (!waitqueue_active(&pgdat->kswapd_wait)) 1510 return; 1511 wake_up_interruptible(&pgdat->kswapd_wait); 1512 } 1513 1514 #ifdef CONFIG_PM 1515 /* 1516 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 1517 * pages. 1518 */ 1519 int shrink_all_memory(int nr_pages) 1520 { 1521 pg_data_t *pgdat; 1522 int nr_to_free = nr_pages; 1523 int ret = 0; 1524 struct reclaim_state reclaim_state = { 1525 .reclaimed_slab = 0, 1526 }; 1527 1528 current->reclaim_state = &reclaim_state; 1529 for_each_pgdat(pgdat) { 1530 int freed; 1531 freed = balance_pgdat(pgdat, nr_to_free, 0); 1532 ret += freed; 1533 nr_to_free -= freed; 1534 if (nr_to_free <= 0) 1535 break; 1536 } 1537 current->reclaim_state = NULL; 1538 return ret; 1539 } 1540 #endif 1541 1542 #ifdef CONFIG_HOTPLUG_CPU 1543 /* It's optimal to keep kswapds on the same CPUs as their memory, but 1544 not required for correctness. So if the last cpu in a node goes 1545 away, we get changed to run anywhere: as the first one comes back, 1546 restore their cpu bindings. */ 1547 static int __devinit cpu_callback(struct notifier_block *nfb, 1548 unsigned long action, 1549 void *hcpu) 1550 { 1551 pg_data_t *pgdat; 1552 cpumask_t mask; 1553 1554 if (action == CPU_ONLINE) { 1555 for_each_pgdat(pgdat) { 1556 mask = node_to_cpumask(pgdat->node_id); 1557 if (any_online_cpu(mask) != NR_CPUS) 1558 /* One of our CPUs online: restore mask */ 1559 set_cpus_allowed(pgdat->kswapd, mask); 1560 } 1561 } 1562 return NOTIFY_OK; 1563 } 1564 #endif /* CONFIG_HOTPLUG_CPU */ 1565 1566 static int __init kswapd_init(void) 1567 { 1568 pg_data_t *pgdat; 1569 swap_setup(); 1570 for_each_pgdat(pgdat) 1571 pgdat->kswapd 1572 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); 1573 total_memory = nr_free_pagecache_pages(); 1574 hotcpu_notifier(cpu_callback, 0); 1575 return 0; 1576 } 1577 1578 module_init(kswapd_init) 1579