1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/vmstat.h> 23 #include <linux/file.h> 24 #include <linux/writeback.h> 25 #include <linux/blkdev.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28 #include <linux/mm_inline.h> 29 #include <linux/pagevec.h> 30 #include <linux/backing-dev.h> 31 #include <linux/rmap.h> 32 #include <linux/topology.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/notifier.h> 36 #include <linux/rwsem.h> 37 #include <linux/delay.h> 38 #include <linux/kthread.h> 39 #include <linux/freezer.h> 40 #include <linux/memcontrol.h> 41 #include <linux/delayacct.h> 42 #include <linux/sysctl.h> 43 44 #include <asm/tlbflush.h> 45 #include <asm/div64.h> 46 47 #include <linux/swapops.h> 48 49 #include "internal.h" 50 51 struct scan_control { 52 /* Incremented by the number of inactive pages that were scanned */ 53 unsigned long nr_scanned; 54 55 /* Number of pages freed so far during a call to shrink_zones() */ 56 unsigned long nr_reclaimed; 57 58 /* How many pages shrink_list() should reclaim */ 59 unsigned long nr_to_reclaim; 60 61 unsigned long hibernation_mode; 62 63 /* This context's GFP mask */ 64 gfp_t gfp_mask; 65 66 int may_writepage; 67 68 /* Can mapped pages be reclaimed? */ 69 int may_unmap; 70 71 /* Can pages be swapped as part of reclaim? */ 72 int may_swap; 73 74 int swappiness; 75 76 int all_unreclaimable; 77 78 int order; 79 80 /* Which cgroup do we reclaim from */ 81 struct mem_cgroup *mem_cgroup; 82 83 /* 84 * Nodemask of nodes allowed by the caller. If NULL, all nodes 85 * are scanned. 86 */ 87 nodemask_t *nodemask; 88 89 /* Pluggable isolate pages callback */ 90 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst, 91 unsigned long *scanned, int order, int mode, 92 struct zone *z, struct mem_cgroup *mem_cont, 93 int active, int file); 94 }; 95 96 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 97 98 #ifdef ARCH_HAS_PREFETCH 99 #define prefetch_prev_lru_page(_page, _base, _field) \ 100 do { \ 101 if ((_page)->lru.prev != _base) { \ 102 struct page *prev; \ 103 \ 104 prev = lru_to_page(&(_page->lru)); \ 105 prefetch(&prev->_field); \ 106 } \ 107 } while (0) 108 #else 109 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 110 #endif 111 112 #ifdef ARCH_HAS_PREFETCHW 113 #define prefetchw_prev_lru_page(_page, _base, _field) \ 114 do { \ 115 if ((_page)->lru.prev != _base) { \ 116 struct page *prev; \ 117 \ 118 prev = lru_to_page(&(_page->lru)); \ 119 prefetchw(&prev->_field); \ 120 } \ 121 } while (0) 122 #else 123 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 124 #endif 125 126 /* 127 * From 0 .. 100. Higher means more swappy. 128 */ 129 int vm_swappiness = 60; 130 long vm_total_pages; /* The total number of pages which the VM controls */ 131 132 static LIST_HEAD(shrinker_list); 133 static DECLARE_RWSEM(shrinker_rwsem); 134 135 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 136 #define scanning_global_lru(sc) (!(sc)->mem_cgroup) 137 #else 138 #define scanning_global_lru(sc) (1) 139 #endif 140 141 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 142 struct scan_control *sc) 143 { 144 if (!scanning_global_lru(sc)) 145 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); 146 147 return &zone->reclaim_stat; 148 } 149 150 static unsigned long zone_nr_lru_pages(struct zone *zone, 151 struct scan_control *sc, enum lru_list lru) 152 { 153 if (!scanning_global_lru(sc)) 154 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); 155 156 return zone_page_state(zone, NR_LRU_BASE + lru); 157 } 158 159 160 /* 161 * Add a shrinker callback to be called from the vm 162 */ 163 void register_shrinker(struct shrinker *shrinker) 164 { 165 shrinker->nr = 0; 166 down_write(&shrinker_rwsem); 167 list_add_tail(&shrinker->list, &shrinker_list); 168 up_write(&shrinker_rwsem); 169 } 170 EXPORT_SYMBOL(register_shrinker); 171 172 /* 173 * Remove one 174 */ 175 void unregister_shrinker(struct shrinker *shrinker) 176 { 177 down_write(&shrinker_rwsem); 178 list_del(&shrinker->list); 179 up_write(&shrinker_rwsem); 180 } 181 EXPORT_SYMBOL(unregister_shrinker); 182 183 #define SHRINK_BATCH 128 184 /* 185 * Call the shrink functions to age shrinkable caches 186 * 187 * Here we assume it costs one seek to replace a lru page and that it also 188 * takes a seek to recreate a cache object. With this in mind we age equal 189 * percentages of the lru and ageable caches. This should balance the seeks 190 * generated by these structures. 191 * 192 * If the vm encountered mapped pages on the LRU it increase the pressure on 193 * slab to avoid swapping. 194 * 195 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 196 * 197 * `lru_pages' represents the number of on-LRU pages in all the zones which 198 * are eligible for the caller's allocation attempt. It is used for balancing 199 * slab reclaim versus page reclaim. 200 * 201 * Returns the number of slab objects which we shrunk. 202 */ 203 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 204 unsigned long lru_pages) 205 { 206 struct shrinker *shrinker; 207 unsigned long ret = 0; 208 209 if (scanned == 0) 210 scanned = SWAP_CLUSTER_MAX; 211 212 if (!down_read_trylock(&shrinker_rwsem)) 213 return 1; /* Assume we'll be able to shrink next time */ 214 215 list_for_each_entry(shrinker, &shrinker_list, list) { 216 unsigned long long delta; 217 unsigned long total_scan; 218 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); 219 220 delta = (4 * scanned) / shrinker->seeks; 221 delta *= max_pass; 222 do_div(delta, lru_pages + 1); 223 shrinker->nr += delta; 224 if (shrinker->nr < 0) { 225 printk(KERN_ERR "shrink_slab: %pF negative objects to " 226 "delete nr=%ld\n", 227 shrinker->shrink, shrinker->nr); 228 shrinker->nr = max_pass; 229 } 230 231 /* 232 * Avoid risking looping forever due to too large nr value: 233 * never try to free more than twice the estimate number of 234 * freeable entries. 235 */ 236 if (shrinker->nr > max_pass * 2) 237 shrinker->nr = max_pass * 2; 238 239 total_scan = shrinker->nr; 240 shrinker->nr = 0; 241 242 while (total_scan >= SHRINK_BATCH) { 243 long this_scan = SHRINK_BATCH; 244 int shrink_ret; 245 int nr_before; 246 247 nr_before = (*shrinker->shrink)(0, gfp_mask); 248 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); 249 if (shrink_ret == -1) 250 break; 251 if (shrink_ret < nr_before) 252 ret += nr_before - shrink_ret; 253 count_vm_events(SLABS_SCANNED, this_scan); 254 total_scan -= this_scan; 255 256 cond_resched(); 257 } 258 259 shrinker->nr += total_scan; 260 } 261 up_read(&shrinker_rwsem); 262 return ret; 263 } 264 265 /* Called without lock on whether page is mapped, so answer is unstable */ 266 static inline int page_mapping_inuse(struct page *page) 267 { 268 struct address_space *mapping; 269 270 /* Page is in somebody's page tables. */ 271 if (page_mapped(page)) 272 return 1; 273 274 /* Be more reluctant to reclaim swapcache than pagecache */ 275 if (PageSwapCache(page)) 276 return 1; 277 278 mapping = page_mapping(page); 279 if (!mapping) 280 return 0; 281 282 /* File is mmap'd by somebody? */ 283 return mapping_mapped(mapping); 284 } 285 286 static inline int is_page_cache_freeable(struct page *page) 287 { 288 /* 289 * A freeable page cache page is referenced only by the caller 290 * that isolated the page, the page cache radix tree and 291 * optional buffer heads at page->private. 292 */ 293 return page_count(page) - page_has_private(page) == 2; 294 } 295 296 static int may_write_to_queue(struct backing_dev_info *bdi) 297 { 298 if (current->flags & PF_SWAPWRITE) 299 return 1; 300 if (!bdi_write_congested(bdi)) 301 return 1; 302 if (bdi == current->backing_dev_info) 303 return 1; 304 return 0; 305 } 306 307 /* 308 * We detected a synchronous write error writing a page out. Probably 309 * -ENOSPC. We need to propagate that into the address_space for a subsequent 310 * fsync(), msync() or close(). 311 * 312 * The tricky part is that after writepage we cannot touch the mapping: nothing 313 * prevents it from being freed up. But we have a ref on the page and once 314 * that page is locked, the mapping is pinned. 315 * 316 * We're allowed to run sleeping lock_page() here because we know the caller has 317 * __GFP_FS. 318 */ 319 static void handle_write_error(struct address_space *mapping, 320 struct page *page, int error) 321 { 322 lock_page(page); 323 if (page_mapping(page) == mapping) 324 mapping_set_error(mapping, error); 325 unlock_page(page); 326 } 327 328 /* Request for sync pageout. */ 329 enum pageout_io { 330 PAGEOUT_IO_ASYNC, 331 PAGEOUT_IO_SYNC, 332 }; 333 334 /* possible outcome of pageout() */ 335 typedef enum { 336 /* failed to write page out, page is locked */ 337 PAGE_KEEP, 338 /* move page to the active list, page is locked */ 339 PAGE_ACTIVATE, 340 /* page has been sent to the disk successfully, page is unlocked */ 341 PAGE_SUCCESS, 342 /* page is clean and locked */ 343 PAGE_CLEAN, 344 } pageout_t; 345 346 /* 347 * pageout is called by shrink_page_list() for each dirty page. 348 * Calls ->writepage(). 349 */ 350 static pageout_t pageout(struct page *page, struct address_space *mapping, 351 enum pageout_io sync_writeback) 352 { 353 /* 354 * If the page is dirty, only perform writeback if that write 355 * will be non-blocking. To prevent this allocation from being 356 * stalled by pagecache activity. But note that there may be 357 * stalls if we need to run get_block(). We could test 358 * PagePrivate for that. 359 * 360 * If this process is currently in __generic_file_aio_write() against 361 * this page's queue, we can perform writeback even if that 362 * will block. 363 * 364 * If the page is swapcache, write it back even if that would 365 * block, for some throttling. This happens by accident, because 366 * swap_backing_dev_info is bust: it doesn't reflect the 367 * congestion state of the swapdevs. Easy to fix, if needed. 368 */ 369 if (!is_page_cache_freeable(page)) 370 return PAGE_KEEP; 371 if (!mapping) { 372 /* 373 * Some data journaling orphaned pages can have 374 * page->mapping == NULL while being dirty with clean buffers. 375 */ 376 if (page_has_private(page)) { 377 if (try_to_free_buffers(page)) { 378 ClearPageDirty(page); 379 printk("%s: orphaned page\n", __func__); 380 return PAGE_CLEAN; 381 } 382 } 383 return PAGE_KEEP; 384 } 385 if (mapping->a_ops->writepage == NULL) 386 return PAGE_ACTIVATE; 387 if (!may_write_to_queue(mapping->backing_dev_info)) 388 return PAGE_KEEP; 389 390 if (clear_page_dirty_for_io(page)) { 391 int res; 392 struct writeback_control wbc = { 393 .sync_mode = WB_SYNC_NONE, 394 .nr_to_write = SWAP_CLUSTER_MAX, 395 .range_start = 0, 396 .range_end = LLONG_MAX, 397 .nonblocking = 1, 398 .for_reclaim = 1, 399 }; 400 401 SetPageReclaim(page); 402 res = mapping->a_ops->writepage(page, &wbc); 403 if (res < 0) 404 handle_write_error(mapping, page, res); 405 if (res == AOP_WRITEPAGE_ACTIVATE) { 406 ClearPageReclaim(page); 407 return PAGE_ACTIVATE; 408 } 409 410 /* 411 * Wait on writeback if requested to. This happens when 412 * direct reclaiming a large contiguous area and the 413 * first attempt to free a range of pages fails. 414 */ 415 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC) 416 wait_on_page_writeback(page); 417 418 if (!PageWriteback(page)) { 419 /* synchronous write or broken a_ops? */ 420 ClearPageReclaim(page); 421 } 422 inc_zone_page_state(page, NR_VMSCAN_WRITE); 423 return PAGE_SUCCESS; 424 } 425 426 return PAGE_CLEAN; 427 } 428 429 /* 430 * Same as remove_mapping, but if the page is removed from the mapping, it 431 * gets returned with a refcount of 0. 432 */ 433 static int __remove_mapping(struct address_space *mapping, struct page *page) 434 { 435 BUG_ON(!PageLocked(page)); 436 BUG_ON(mapping != page_mapping(page)); 437 438 spin_lock_irq(&mapping->tree_lock); 439 /* 440 * The non racy check for a busy page. 441 * 442 * Must be careful with the order of the tests. When someone has 443 * a ref to the page, it may be possible that they dirty it then 444 * drop the reference. So if PageDirty is tested before page_count 445 * here, then the following race may occur: 446 * 447 * get_user_pages(&page); 448 * [user mapping goes away] 449 * write_to(page); 450 * !PageDirty(page) [good] 451 * SetPageDirty(page); 452 * put_page(page); 453 * !page_count(page) [good, discard it] 454 * 455 * [oops, our write_to data is lost] 456 * 457 * Reversing the order of the tests ensures such a situation cannot 458 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 459 * load is not satisfied before that of page->_count. 460 * 461 * Note that if SetPageDirty is always performed via set_page_dirty, 462 * and thus under tree_lock, then this ordering is not required. 463 */ 464 if (!page_freeze_refs(page, 2)) 465 goto cannot_free; 466 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 467 if (unlikely(PageDirty(page))) { 468 page_unfreeze_refs(page, 2); 469 goto cannot_free; 470 } 471 472 if (PageSwapCache(page)) { 473 swp_entry_t swap = { .val = page_private(page) }; 474 __delete_from_swap_cache(page); 475 spin_unlock_irq(&mapping->tree_lock); 476 swapcache_free(swap, page); 477 } else { 478 __remove_from_page_cache(page); 479 spin_unlock_irq(&mapping->tree_lock); 480 mem_cgroup_uncharge_cache_page(page); 481 } 482 483 return 1; 484 485 cannot_free: 486 spin_unlock_irq(&mapping->tree_lock); 487 return 0; 488 } 489 490 /* 491 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 492 * someone else has a ref on the page, abort and return 0. If it was 493 * successfully detached, return 1. Assumes the caller has a single ref on 494 * this page. 495 */ 496 int remove_mapping(struct address_space *mapping, struct page *page) 497 { 498 if (__remove_mapping(mapping, page)) { 499 /* 500 * Unfreezing the refcount with 1 rather than 2 effectively 501 * drops the pagecache ref for us without requiring another 502 * atomic operation. 503 */ 504 page_unfreeze_refs(page, 1); 505 return 1; 506 } 507 return 0; 508 } 509 510 /** 511 * putback_lru_page - put previously isolated page onto appropriate LRU list 512 * @page: page to be put back to appropriate lru list 513 * 514 * Add previously isolated @page to appropriate LRU list. 515 * Page may still be unevictable for other reasons. 516 * 517 * lru_lock must not be held, interrupts must be enabled. 518 */ 519 void putback_lru_page(struct page *page) 520 { 521 int lru; 522 int active = !!TestClearPageActive(page); 523 int was_unevictable = PageUnevictable(page); 524 525 VM_BUG_ON(PageLRU(page)); 526 527 redo: 528 ClearPageUnevictable(page); 529 530 if (page_evictable(page, NULL)) { 531 /* 532 * For evictable pages, we can use the cache. 533 * In event of a race, worst case is we end up with an 534 * unevictable page on [in]active list. 535 * We know how to handle that. 536 */ 537 lru = active + page_lru_base_type(page); 538 lru_cache_add_lru(page, lru); 539 } else { 540 /* 541 * Put unevictable pages directly on zone's unevictable 542 * list. 543 */ 544 lru = LRU_UNEVICTABLE; 545 add_page_to_unevictable_list(page); 546 /* 547 * When racing with an mlock clearing (page is 548 * unlocked), make sure that if the other thread does 549 * not observe our setting of PG_lru and fails 550 * isolation, we see PG_mlocked cleared below and move 551 * the page back to the evictable list. 552 * 553 * The other side is TestClearPageMlocked(). 554 */ 555 smp_mb(); 556 } 557 558 /* 559 * page's status can change while we move it among lru. If an evictable 560 * page is on unevictable list, it never be freed. To avoid that, 561 * check after we added it to the list, again. 562 */ 563 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 564 if (!isolate_lru_page(page)) { 565 put_page(page); 566 goto redo; 567 } 568 /* This means someone else dropped this page from LRU 569 * So, it will be freed or putback to LRU again. There is 570 * nothing to do here. 571 */ 572 } 573 574 if (was_unevictable && lru != LRU_UNEVICTABLE) 575 count_vm_event(UNEVICTABLE_PGRESCUED); 576 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 577 count_vm_event(UNEVICTABLE_PGCULLED); 578 579 put_page(page); /* drop ref from isolate */ 580 } 581 582 /* 583 * shrink_page_list() returns the number of reclaimed pages 584 */ 585 static unsigned long shrink_page_list(struct list_head *page_list, 586 struct scan_control *sc, 587 enum pageout_io sync_writeback) 588 { 589 LIST_HEAD(ret_pages); 590 struct pagevec freed_pvec; 591 int pgactivate = 0; 592 unsigned long nr_reclaimed = 0; 593 unsigned long vm_flags; 594 595 cond_resched(); 596 597 pagevec_init(&freed_pvec, 1); 598 while (!list_empty(page_list)) { 599 struct address_space *mapping; 600 struct page *page; 601 int may_enter_fs; 602 int referenced; 603 604 cond_resched(); 605 606 page = lru_to_page(page_list); 607 list_del(&page->lru); 608 609 if (!trylock_page(page)) 610 goto keep; 611 612 VM_BUG_ON(PageActive(page)); 613 614 sc->nr_scanned++; 615 616 if (unlikely(!page_evictable(page, NULL))) 617 goto cull_mlocked; 618 619 if (!sc->may_unmap && page_mapped(page)) 620 goto keep_locked; 621 622 /* Double the slab pressure for mapped and swapcache pages */ 623 if (page_mapped(page) || PageSwapCache(page)) 624 sc->nr_scanned++; 625 626 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 627 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 628 629 if (PageWriteback(page)) { 630 /* 631 * Synchronous reclaim is performed in two passes, 632 * first an asynchronous pass over the list to 633 * start parallel writeback, and a second synchronous 634 * pass to wait for the IO to complete. Wait here 635 * for any page for which writeback has already 636 * started. 637 */ 638 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) 639 wait_on_page_writeback(page); 640 else 641 goto keep_locked; 642 } 643 644 referenced = page_referenced(page, 1, 645 sc->mem_cgroup, &vm_flags); 646 /* 647 * In active use or really unfreeable? Activate it. 648 * If page which have PG_mlocked lost isoltation race, 649 * try_to_unmap moves it to unevictable list 650 */ 651 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && 652 referenced && page_mapping_inuse(page) 653 && !(vm_flags & VM_LOCKED)) 654 goto activate_locked; 655 656 /* 657 * Anonymous process memory has backing store? 658 * Try to allocate it some swap space here. 659 */ 660 if (PageAnon(page) && !PageSwapCache(page)) { 661 if (!(sc->gfp_mask & __GFP_IO)) 662 goto keep_locked; 663 if (!add_to_swap(page)) 664 goto activate_locked; 665 may_enter_fs = 1; 666 } 667 668 mapping = page_mapping(page); 669 670 /* 671 * The page is mapped into the page tables of one or more 672 * processes. Try to unmap it here. 673 */ 674 if (page_mapped(page) && mapping) { 675 switch (try_to_unmap(page, TTU_UNMAP)) { 676 case SWAP_FAIL: 677 goto activate_locked; 678 case SWAP_AGAIN: 679 goto keep_locked; 680 case SWAP_MLOCK: 681 goto cull_mlocked; 682 case SWAP_SUCCESS: 683 ; /* try to free the page below */ 684 } 685 } 686 687 if (PageDirty(page)) { 688 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) 689 goto keep_locked; 690 if (!may_enter_fs) 691 goto keep_locked; 692 if (!sc->may_writepage) 693 goto keep_locked; 694 695 /* Page is dirty, try to write it out here */ 696 switch (pageout(page, mapping, sync_writeback)) { 697 case PAGE_KEEP: 698 goto keep_locked; 699 case PAGE_ACTIVATE: 700 goto activate_locked; 701 case PAGE_SUCCESS: 702 if (PageWriteback(page) || PageDirty(page)) 703 goto keep; 704 /* 705 * A synchronous write - probably a ramdisk. Go 706 * ahead and try to reclaim the page. 707 */ 708 if (!trylock_page(page)) 709 goto keep; 710 if (PageDirty(page) || PageWriteback(page)) 711 goto keep_locked; 712 mapping = page_mapping(page); 713 case PAGE_CLEAN: 714 ; /* try to free the page below */ 715 } 716 } 717 718 /* 719 * If the page has buffers, try to free the buffer mappings 720 * associated with this page. If we succeed we try to free 721 * the page as well. 722 * 723 * We do this even if the page is PageDirty(). 724 * try_to_release_page() does not perform I/O, but it is 725 * possible for a page to have PageDirty set, but it is actually 726 * clean (all its buffers are clean). This happens if the 727 * buffers were written out directly, with submit_bh(). ext3 728 * will do this, as well as the blockdev mapping. 729 * try_to_release_page() will discover that cleanness and will 730 * drop the buffers and mark the page clean - it can be freed. 731 * 732 * Rarely, pages can have buffers and no ->mapping. These are 733 * the pages which were not successfully invalidated in 734 * truncate_complete_page(). We try to drop those buffers here 735 * and if that worked, and the page is no longer mapped into 736 * process address space (page_count == 1) it can be freed. 737 * Otherwise, leave the page on the LRU so it is swappable. 738 */ 739 if (page_has_private(page)) { 740 if (!try_to_release_page(page, sc->gfp_mask)) 741 goto activate_locked; 742 if (!mapping && page_count(page) == 1) { 743 unlock_page(page); 744 if (put_page_testzero(page)) 745 goto free_it; 746 else { 747 /* 748 * rare race with speculative reference. 749 * the speculative reference will free 750 * this page shortly, so we may 751 * increment nr_reclaimed here (and 752 * leave it off the LRU). 753 */ 754 nr_reclaimed++; 755 continue; 756 } 757 } 758 } 759 760 if (!mapping || !__remove_mapping(mapping, page)) 761 goto keep_locked; 762 763 /* 764 * At this point, we have no other references and there is 765 * no way to pick any more up (removed from LRU, removed 766 * from pagecache). Can use non-atomic bitops now (and 767 * we obviously don't have to worry about waking up a process 768 * waiting on the page lock, because there are no references. 769 */ 770 __clear_page_locked(page); 771 free_it: 772 nr_reclaimed++; 773 if (!pagevec_add(&freed_pvec, page)) { 774 __pagevec_free(&freed_pvec); 775 pagevec_reinit(&freed_pvec); 776 } 777 continue; 778 779 cull_mlocked: 780 if (PageSwapCache(page)) 781 try_to_free_swap(page); 782 unlock_page(page); 783 putback_lru_page(page); 784 continue; 785 786 activate_locked: 787 /* Not a candidate for swapping, so reclaim swap space. */ 788 if (PageSwapCache(page) && vm_swap_full()) 789 try_to_free_swap(page); 790 VM_BUG_ON(PageActive(page)); 791 SetPageActive(page); 792 pgactivate++; 793 keep_locked: 794 unlock_page(page); 795 keep: 796 list_add(&page->lru, &ret_pages); 797 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 798 } 799 list_splice(&ret_pages, page_list); 800 if (pagevec_count(&freed_pvec)) 801 __pagevec_free(&freed_pvec); 802 count_vm_events(PGACTIVATE, pgactivate); 803 return nr_reclaimed; 804 } 805 806 /* LRU Isolation modes. */ 807 #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */ 808 #define ISOLATE_ACTIVE 1 /* Isolate active pages. */ 809 #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */ 810 811 /* 812 * Attempt to remove the specified page from its LRU. Only take this page 813 * if it is of the appropriate PageActive status. Pages which are being 814 * freed elsewhere are also ignored. 815 * 816 * page: page to consider 817 * mode: one of the LRU isolation modes defined above 818 * 819 * returns 0 on success, -ve errno on failure. 820 */ 821 int __isolate_lru_page(struct page *page, int mode, int file) 822 { 823 int ret = -EINVAL; 824 825 /* Only take pages on the LRU. */ 826 if (!PageLRU(page)) 827 return ret; 828 829 /* 830 * When checking the active state, we need to be sure we are 831 * dealing with comparible boolean values. Take the logical not 832 * of each. 833 */ 834 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) 835 return ret; 836 837 if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file) 838 return ret; 839 840 /* 841 * When this function is being called for lumpy reclaim, we 842 * initially look into all LRU pages, active, inactive and 843 * unevictable; only give shrink_page_list evictable pages. 844 */ 845 if (PageUnevictable(page)) 846 return ret; 847 848 ret = -EBUSY; 849 850 if (likely(get_page_unless_zero(page))) { 851 /* 852 * Be careful not to clear PageLRU until after we're 853 * sure the page is not being freed elsewhere -- the 854 * page release code relies on it. 855 */ 856 ClearPageLRU(page); 857 ret = 0; 858 } 859 860 return ret; 861 } 862 863 /* 864 * zone->lru_lock is heavily contended. Some of the functions that 865 * shrink the lists perform better by taking out a batch of pages 866 * and working on them outside the LRU lock. 867 * 868 * For pagecache intensive workloads, this function is the hottest 869 * spot in the kernel (apart from copy_*_user functions). 870 * 871 * Appropriate locks must be held before calling this function. 872 * 873 * @nr_to_scan: The number of pages to look through on the list. 874 * @src: The LRU list to pull pages off. 875 * @dst: The temp list to put pages on to. 876 * @scanned: The number of pages that were scanned. 877 * @order: The caller's attempted allocation order 878 * @mode: One of the LRU isolation modes 879 * @file: True [1] if isolating file [!anon] pages 880 * 881 * returns how many pages were moved onto *@dst. 882 */ 883 static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 884 struct list_head *src, struct list_head *dst, 885 unsigned long *scanned, int order, int mode, int file) 886 { 887 unsigned long nr_taken = 0; 888 unsigned long scan; 889 890 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 891 struct page *page; 892 unsigned long pfn; 893 unsigned long end_pfn; 894 unsigned long page_pfn; 895 int zone_id; 896 897 page = lru_to_page(src); 898 prefetchw_prev_lru_page(page, src, flags); 899 900 VM_BUG_ON(!PageLRU(page)); 901 902 switch (__isolate_lru_page(page, mode, file)) { 903 case 0: 904 list_move(&page->lru, dst); 905 mem_cgroup_del_lru(page); 906 nr_taken++; 907 break; 908 909 case -EBUSY: 910 /* else it is being freed elsewhere */ 911 list_move(&page->lru, src); 912 mem_cgroup_rotate_lru_list(page, page_lru(page)); 913 continue; 914 915 default: 916 BUG(); 917 } 918 919 if (!order) 920 continue; 921 922 /* 923 * Attempt to take all pages in the order aligned region 924 * surrounding the tag page. Only take those pages of 925 * the same active state as that tag page. We may safely 926 * round the target page pfn down to the requested order 927 * as the mem_map is guarenteed valid out to MAX_ORDER, 928 * where that page is in a different zone we will detect 929 * it from its zone id and abort this block scan. 930 */ 931 zone_id = page_zone_id(page); 932 page_pfn = page_to_pfn(page); 933 pfn = page_pfn & ~((1 << order) - 1); 934 end_pfn = pfn + (1 << order); 935 for (; pfn < end_pfn; pfn++) { 936 struct page *cursor_page; 937 938 /* The target page is in the block, ignore it. */ 939 if (unlikely(pfn == page_pfn)) 940 continue; 941 942 /* Avoid holes within the zone. */ 943 if (unlikely(!pfn_valid_within(pfn))) 944 break; 945 946 cursor_page = pfn_to_page(pfn); 947 948 /* Check that we have not crossed a zone boundary. */ 949 if (unlikely(page_zone_id(cursor_page) != zone_id)) 950 continue; 951 952 /* 953 * If we don't have enough swap space, reclaiming of 954 * anon page which don't already have a swap slot is 955 * pointless. 956 */ 957 if (nr_swap_pages <= 0 && PageAnon(cursor_page) && 958 !PageSwapCache(cursor_page)) 959 continue; 960 961 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 962 list_move(&cursor_page->lru, dst); 963 mem_cgroup_del_lru(cursor_page); 964 nr_taken++; 965 scan++; 966 } 967 } 968 } 969 970 *scanned = scan; 971 return nr_taken; 972 } 973 974 static unsigned long isolate_pages_global(unsigned long nr, 975 struct list_head *dst, 976 unsigned long *scanned, int order, 977 int mode, struct zone *z, 978 struct mem_cgroup *mem_cont, 979 int active, int file) 980 { 981 int lru = LRU_BASE; 982 if (active) 983 lru += LRU_ACTIVE; 984 if (file) 985 lru += LRU_FILE; 986 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 987 mode, file); 988 } 989 990 /* 991 * clear_active_flags() is a helper for shrink_active_list(), clearing 992 * any active bits from the pages in the list. 993 */ 994 static unsigned long clear_active_flags(struct list_head *page_list, 995 unsigned int *count) 996 { 997 int nr_active = 0; 998 int lru; 999 struct page *page; 1000 1001 list_for_each_entry(page, page_list, lru) { 1002 lru = page_lru_base_type(page); 1003 if (PageActive(page)) { 1004 lru += LRU_ACTIVE; 1005 ClearPageActive(page); 1006 nr_active++; 1007 } 1008 count[lru]++; 1009 } 1010 1011 return nr_active; 1012 } 1013 1014 /** 1015 * isolate_lru_page - tries to isolate a page from its LRU list 1016 * @page: page to isolate from its LRU list 1017 * 1018 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1019 * vmstat statistic corresponding to whatever LRU list the page was on. 1020 * 1021 * Returns 0 if the page was removed from an LRU list. 1022 * Returns -EBUSY if the page was not on an LRU list. 1023 * 1024 * The returned page will have PageLRU() cleared. If it was found on 1025 * the active list, it will have PageActive set. If it was found on 1026 * the unevictable list, it will have the PageUnevictable bit set. That flag 1027 * may need to be cleared by the caller before letting the page go. 1028 * 1029 * The vmstat statistic corresponding to the list on which the page was 1030 * found will be decremented. 1031 * 1032 * Restrictions: 1033 * (1) Must be called with an elevated refcount on the page. This is a 1034 * fundamentnal difference from isolate_lru_pages (which is called 1035 * without a stable reference). 1036 * (2) the lru_lock must not be held. 1037 * (3) interrupts must be enabled. 1038 */ 1039 int isolate_lru_page(struct page *page) 1040 { 1041 int ret = -EBUSY; 1042 1043 if (PageLRU(page)) { 1044 struct zone *zone = page_zone(page); 1045 1046 spin_lock_irq(&zone->lru_lock); 1047 if (PageLRU(page) && get_page_unless_zero(page)) { 1048 int lru = page_lru(page); 1049 ret = 0; 1050 ClearPageLRU(page); 1051 1052 del_page_from_lru_list(zone, page, lru); 1053 } 1054 spin_unlock_irq(&zone->lru_lock); 1055 } 1056 return ret; 1057 } 1058 1059 /* 1060 * Are there way too many processes in the direct reclaim path already? 1061 */ 1062 static int too_many_isolated(struct zone *zone, int file, 1063 struct scan_control *sc) 1064 { 1065 unsigned long inactive, isolated; 1066 1067 if (current_is_kswapd()) 1068 return 0; 1069 1070 if (!scanning_global_lru(sc)) 1071 return 0; 1072 1073 if (file) { 1074 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1075 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1076 } else { 1077 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1078 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1079 } 1080 1081 return isolated > inactive; 1082 } 1083 1084 /* 1085 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1086 * of reclaimed pages 1087 */ 1088 static unsigned long shrink_inactive_list(unsigned long max_scan, 1089 struct zone *zone, struct scan_control *sc, 1090 int priority, int file) 1091 { 1092 LIST_HEAD(page_list); 1093 struct pagevec pvec; 1094 unsigned long nr_scanned = 0; 1095 unsigned long nr_reclaimed = 0; 1096 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1097 int lumpy_reclaim = 0; 1098 1099 while (unlikely(too_many_isolated(zone, file, sc))) { 1100 congestion_wait(BLK_RW_ASYNC, HZ/10); 1101 1102 /* We are about to die and free our memory. Return now. */ 1103 if (fatal_signal_pending(current)) 1104 return SWAP_CLUSTER_MAX; 1105 } 1106 1107 /* 1108 * If we need a large contiguous chunk of memory, or have 1109 * trouble getting a small set of contiguous pages, we 1110 * will reclaim both active and inactive pages. 1111 * 1112 * We use the same threshold as pageout congestion_wait below. 1113 */ 1114 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1115 lumpy_reclaim = 1; 1116 else if (sc->order && priority < DEF_PRIORITY - 2) 1117 lumpy_reclaim = 1; 1118 1119 pagevec_init(&pvec, 1); 1120 1121 lru_add_drain(); 1122 spin_lock_irq(&zone->lru_lock); 1123 do { 1124 struct page *page; 1125 unsigned long nr_taken; 1126 unsigned long nr_scan; 1127 unsigned long nr_freed; 1128 unsigned long nr_active; 1129 unsigned int count[NR_LRU_LISTS] = { 0, }; 1130 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; 1131 unsigned long nr_anon; 1132 unsigned long nr_file; 1133 1134 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX, 1135 &page_list, &nr_scan, sc->order, mode, 1136 zone, sc->mem_cgroup, 0, file); 1137 1138 if (scanning_global_lru(sc)) { 1139 zone->pages_scanned += nr_scan; 1140 if (current_is_kswapd()) 1141 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1142 nr_scan); 1143 else 1144 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1145 nr_scan); 1146 } 1147 1148 if (nr_taken == 0) 1149 goto done; 1150 1151 nr_active = clear_active_flags(&page_list, count); 1152 __count_vm_events(PGDEACTIVATE, nr_active); 1153 1154 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1155 -count[LRU_ACTIVE_FILE]); 1156 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1157 -count[LRU_INACTIVE_FILE]); 1158 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1159 -count[LRU_ACTIVE_ANON]); 1160 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1161 -count[LRU_INACTIVE_ANON]); 1162 1163 nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1164 nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1165 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); 1166 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); 1167 1168 reclaim_stat->recent_scanned[0] += nr_anon; 1169 reclaim_stat->recent_scanned[1] += nr_file; 1170 1171 spin_unlock_irq(&zone->lru_lock); 1172 1173 nr_scanned += nr_scan; 1174 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); 1175 1176 /* 1177 * If we are direct reclaiming for contiguous pages and we do 1178 * not reclaim everything in the list, try again and wait 1179 * for IO to complete. This will stall high-order allocations 1180 * but that should be acceptable to the caller 1181 */ 1182 if (nr_freed < nr_taken && !current_is_kswapd() && 1183 lumpy_reclaim) { 1184 congestion_wait(BLK_RW_ASYNC, HZ/10); 1185 1186 /* 1187 * The attempt at page out may have made some 1188 * of the pages active, mark them inactive again. 1189 */ 1190 nr_active = clear_active_flags(&page_list, count); 1191 count_vm_events(PGDEACTIVATE, nr_active); 1192 1193 nr_freed += shrink_page_list(&page_list, sc, 1194 PAGEOUT_IO_SYNC); 1195 } 1196 1197 nr_reclaimed += nr_freed; 1198 1199 local_irq_disable(); 1200 if (current_is_kswapd()) 1201 __count_vm_events(KSWAPD_STEAL, nr_freed); 1202 __count_zone_vm_events(PGSTEAL, zone, nr_freed); 1203 1204 spin_lock(&zone->lru_lock); 1205 /* 1206 * Put back any unfreeable pages. 1207 */ 1208 while (!list_empty(&page_list)) { 1209 int lru; 1210 page = lru_to_page(&page_list); 1211 VM_BUG_ON(PageLRU(page)); 1212 list_del(&page->lru); 1213 if (unlikely(!page_evictable(page, NULL))) { 1214 spin_unlock_irq(&zone->lru_lock); 1215 putback_lru_page(page); 1216 spin_lock_irq(&zone->lru_lock); 1217 continue; 1218 } 1219 SetPageLRU(page); 1220 lru = page_lru(page); 1221 add_page_to_lru_list(zone, page, lru); 1222 if (is_active_lru(lru)) { 1223 int file = is_file_lru(lru); 1224 reclaim_stat->recent_rotated[file]++; 1225 } 1226 if (!pagevec_add(&pvec, page)) { 1227 spin_unlock_irq(&zone->lru_lock); 1228 __pagevec_release(&pvec); 1229 spin_lock_irq(&zone->lru_lock); 1230 } 1231 } 1232 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1233 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1234 1235 } while (nr_scanned < max_scan); 1236 1237 done: 1238 spin_unlock_irq(&zone->lru_lock); 1239 pagevec_release(&pvec); 1240 return nr_reclaimed; 1241 } 1242 1243 /* 1244 * We are about to scan this zone at a certain priority level. If that priority 1245 * level is smaller (ie: more urgent) than the previous priority, then note 1246 * that priority level within the zone. This is done so that when the next 1247 * process comes in to scan this zone, it will immediately start out at this 1248 * priority level rather than having to build up its own scanning priority. 1249 * Here, this priority affects only the reclaim-mapped threshold. 1250 */ 1251 static inline void note_zone_scanning_priority(struct zone *zone, int priority) 1252 { 1253 if (priority < zone->prev_priority) 1254 zone->prev_priority = priority; 1255 } 1256 1257 /* 1258 * This moves pages from the active list to the inactive list. 1259 * 1260 * We move them the other way if the page is referenced by one or more 1261 * processes, from rmap. 1262 * 1263 * If the pages are mostly unmapped, the processing is fast and it is 1264 * appropriate to hold zone->lru_lock across the whole operation. But if 1265 * the pages are mapped, the processing is slow (page_referenced()) so we 1266 * should drop zone->lru_lock around each page. It's impossible to balance 1267 * this, so instead we remove the pages from the LRU while processing them. 1268 * It is safe to rely on PG_active against the non-LRU pages in here because 1269 * nobody will play with that bit on a non-LRU page. 1270 * 1271 * The downside is that we have to touch page->_count against each page. 1272 * But we had to alter page->flags anyway. 1273 */ 1274 1275 static void move_active_pages_to_lru(struct zone *zone, 1276 struct list_head *list, 1277 enum lru_list lru) 1278 { 1279 unsigned long pgmoved = 0; 1280 struct pagevec pvec; 1281 struct page *page; 1282 1283 pagevec_init(&pvec, 1); 1284 1285 while (!list_empty(list)) { 1286 page = lru_to_page(list); 1287 1288 VM_BUG_ON(PageLRU(page)); 1289 SetPageLRU(page); 1290 1291 list_move(&page->lru, &zone->lru[lru].list); 1292 mem_cgroup_add_lru_list(page, lru); 1293 pgmoved++; 1294 1295 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1296 spin_unlock_irq(&zone->lru_lock); 1297 if (buffer_heads_over_limit) 1298 pagevec_strip(&pvec); 1299 __pagevec_release(&pvec); 1300 spin_lock_irq(&zone->lru_lock); 1301 } 1302 } 1303 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1304 if (!is_active_lru(lru)) 1305 __count_vm_events(PGDEACTIVATE, pgmoved); 1306 } 1307 1308 static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1309 struct scan_control *sc, int priority, int file) 1310 { 1311 unsigned long nr_taken; 1312 unsigned long pgscanned; 1313 unsigned long vm_flags; 1314 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1315 LIST_HEAD(l_active); 1316 LIST_HEAD(l_inactive); 1317 struct page *page; 1318 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1319 unsigned long nr_rotated = 0; 1320 1321 lru_add_drain(); 1322 spin_lock_irq(&zone->lru_lock); 1323 nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order, 1324 ISOLATE_ACTIVE, zone, 1325 sc->mem_cgroup, 1, file); 1326 /* 1327 * zone->pages_scanned is used for detect zone's oom 1328 * mem_cgroup remembers nr_scan by itself. 1329 */ 1330 if (scanning_global_lru(sc)) { 1331 zone->pages_scanned += pgscanned; 1332 } 1333 reclaim_stat->recent_scanned[file] += nr_taken; 1334 1335 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1336 if (file) 1337 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1338 else 1339 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1340 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1341 spin_unlock_irq(&zone->lru_lock); 1342 1343 while (!list_empty(&l_hold)) { 1344 cond_resched(); 1345 page = lru_to_page(&l_hold); 1346 list_del(&page->lru); 1347 1348 if (unlikely(!page_evictable(page, NULL))) { 1349 putback_lru_page(page); 1350 continue; 1351 } 1352 1353 /* page_referenced clears PageReferenced */ 1354 if (page_mapping_inuse(page) && 1355 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1356 nr_rotated++; 1357 /* 1358 * Identify referenced, file-backed active pages and 1359 * give them one more trip around the active list. So 1360 * that executable code get better chances to stay in 1361 * memory under moderate memory pressure. Anon pages 1362 * are not likely to be evicted by use-once streaming 1363 * IO, plus JVM can create lots of anon VM_EXEC pages, 1364 * so we ignore them here. 1365 */ 1366 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1367 list_add(&page->lru, &l_active); 1368 continue; 1369 } 1370 } 1371 1372 ClearPageActive(page); /* we are de-activating */ 1373 list_add(&page->lru, &l_inactive); 1374 } 1375 1376 /* 1377 * Move pages back to the lru list. 1378 */ 1379 spin_lock_irq(&zone->lru_lock); 1380 /* 1381 * Count referenced pages from currently used mappings as rotated, 1382 * even though only some of them are actually re-activated. This 1383 * helps balance scan pressure between file and anonymous pages in 1384 * get_scan_ratio. 1385 */ 1386 reclaim_stat->recent_rotated[file] += nr_rotated; 1387 1388 move_active_pages_to_lru(zone, &l_active, 1389 LRU_ACTIVE + file * LRU_FILE); 1390 move_active_pages_to_lru(zone, &l_inactive, 1391 LRU_BASE + file * LRU_FILE); 1392 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1393 spin_unlock_irq(&zone->lru_lock); 1394 } 1395 1396 static int inactive_anon_is_low_global(struct zone *zone) 1397 { 1398 unsigned long active, inactive; 1399 1400 active = zone_page_state(zone, NR_ACTIVE_ANON); 1401 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1402 1403 if (inactive * zone->inactive_ratio < active) 1404 return 1; 1405 1406 return 0; 1407 } 1408 1409 /** 1410 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1411 * @zone: zone to check 1412 * @sc: scan control of this context 1413 * 1414 * Returns true if the zone does not have enough inactive anon pages, 1415 * meaning some active anon pages need to be deactivated. 1416 */ 1417 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) 1418 { 1419 int low; 1420 1421 if (scanning_global_lru(sc)) 1422 low = inactive_anon_is_low_global(zone); 1423 else 1424 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); 1425 return low; 1426 } 1427 1428 static int inactive_file_is_low_global(struct zone *zone) 1429 { 1430 unsigned long active, inactive; 1431 1432 active = zone_page_state(zone, NR_ACTIVE_FILE); 1433 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1434 1435 return (active > inactive); 1436 } 1437 1438 /** 1439 * inactive_file_is_low - check if file pages need to be deactivated 1440 * @zone: zone to check 1441 * @sc: scan control of this context 1442 * 1443 * When the system is doing streaming IO, memory pressure here 1444 * ensures that active file pages get deactivated, until more 1445 * than half of the file pages are on the inactive list. 1446 * 1447 * Once we get to that situation, protect the system's working 1448 * set from being evicted by disabling active file page aging. 1449 * 1450 * This uses a different ratio than the anonymous pages, because 1451 * the page cache uses a use-once replacement algorithm. 1452 */ 1453 static int inactive_file_is_low(struct zone *zone, struct scan_control *sc) 1454 { 1455 int low; 1456 1457 if (scanning_global_lru(sc)) 1458 low = inactive_file_is_low_global(zone); 1459 else 1460 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup); 1461 return low; 1462 } 1463 1464 static int inactive_list_is_low(struct zone *zone, struct scan_control *sc, 1465 int file) 1466 { 1467 if (file) 1468 return inactive_file_is_low(zone, sc); 1469 else 1470 return inactive_anon_is_low(zone, sc); 1471 } 1472 1473 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1474 struct zone *zone, struct scan_control *sc, int priority) 1475 { 1476 int file = is_file_lru(lru); 1477 1478 if (is_active_lru(lru)) { 1479 if (inactive_list_is_low(zone, sc, file)) 1480 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1481 return 0; 1482 } 1483 1484 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1485 } 1486 1487 /* 1488 * Determine how aggressively the anon and file LRU lists should be 1489 * scanned. The relative value of each set of LRU lists is determined 1490 * by looking at the fraction of the pages scanned we did rotate back 1491 * onto the active list instead of evict. 1492 * 1493 * percent[0] specifies how much pressure to put on ram/swap backed 1494 * memory, while percent[1] determines pressure on the file LRUs. 1495 */ 1496 static void get_scan_ratio(struct zone *zone, struct scan_control *sc, 1497 unsigned long *percent) 1498 { 1499 unsigned long anon, file, free; 1500 unsigned long anon_prio, file_prio; 1501 unsigned long ap, fp; 1502 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1503 1504 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1505 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1506 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1507 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1508 1509 if (scanning_global_lru(sc)) { 1510 free = zone_page_state(zone, NR_FREE_PAGES); 1511 /* If we have very few page cache pages, 1512 force-scan anon pages. */ 1513 if (unlikely(file + free <= high_wmark_pages(zone))) { 1514 percent[0] = 100; 1515 percent[1] = 0; 1516 return; 1517 } 1518 } 1519 1520 /* 1521 * OK, so we have swap space and a fair amount of page cache 1522 * pages. We use the recently rotated / recently scanned 1523 * ratios to determine how valuable each cache is. 1524 * 1525 * Because workloads change over time (and to avoid overflow) 1526 * we keep these statistics as a floating average, which ends 1527 * up weighing recent references more than old ones. 1528 * 1529 * anon in [0], file in [1] 1530 */ 1531 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1532 spin_lock_irq(&zone->lru_lock); 1533 reclaim_stat->recent_scanned[0] /= 2; 1534 reclaim_stat->recent_rotated[0] /= 2; 1535 spin_unlock_irq(&zone->lru_lock); 1536 } 1537 1538 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1539 spin_lock_irq(&zone->lru_lock); 1540 reclaim_stat->recent_scanned[1] /= 2; 1541 reclaim_stat->recent_rotated[1] /= 2; 1542 spin_unlock_irq(&zone->lru_lock); 1543 } 1544 1545 /* 1546 * With swappiness at 100, anonymous and file have the same priority. 1547 * This scanning priority is essentially the inverse of IO cost. 1548 */ 1549 anon_prio = sc->swappiness; 1550 file_prio = 200 - sc->swappiness; 1551 1552 /* 1553 * The amount of pressure on anon vs file pages is inversely 1554 * proportional to the fraction of recently scanned pages on 1555 * each list that were recently referenced and in active use. 1556 */ 1557 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1558 ap /= reclaim_stat->recent_rotated[0] + 1; 1559 1560 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1561 fp /= reclaim_stat->recent_rotated[1] + 1; 1562 1563 /* Normalize to percentages */ 1564 percent[0] = 100 * ap / (ap + fp + 1); 1565 percent[1] = 100 - percent[0]; 1566 } 1567 1568 /* 1569 * Smallish @nr_to_scan's are deposited in @nr_saved_scan, 1570 * until we collected @swap_cluster_max pages to scan. 1571 */ 1572 static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, 1573 unsigned long *nr_saved_scan) 1574 { 1575 unsigned long nr; 1576 1577 *nr_saved_scan += nr_to_scan; 1578 nr = *nr_saved_scan; 1579 1580 if (nr >= SWAP_CLUSTER_MAX) 1581 *nr_saved_scan = 0; 1582 else 1583 nr = 0; 1584 1585 return nr; 1586 } 1587 1588 /* 1589 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1590 */ 1591 static void shrink_zone(int priority, struct zone *zone, 1592 struct scan_control *sc) 1593 { 1594 unsigned long nr[NR_LRU_LISTS]; 1595 unsigned long nr_to_scan; 1596 unsigned long percent[2]; /* anon @ 0; file @ 1 */ 1597 enum lru_list l; 1598 unsigned long nr_reclaimed = sc->nr_reclaimed; 1599 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1600 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1601 int noswap = 0; 1602 1603 /* If we have no swap space, do not bother scanning anon pages. */ 1604 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1605 noswap = 1; 1606 percent[0] = 0; 1607 percent[1] = 100; 1608 } else 1609 get_scan_ratio(zone, sc, percent); 1610 1611 for_each_evictable_lru(l) { 1612 int file = is_file_lru(l); 1613 unsigned long scan; 1614 1615 scan = zone_nr_lru_pages(zone, sc, l); 1616 if (priority || noswap) { 1617 scan >>= priority; 1618 scan = (scan * percent[file]) / 100; 1619 } 1620 nr[l] = nr_scan_try_batch(scan, 1621 &reclaim_stat->nr_saved_scan[l]); 1622 } 1623 1624 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1625 nr[LRU_INACTIVE_FILE]) { 1626 for_each_evictable_lru(l) { 1627 if (nr[l]) { 1628 nr_to_scan = min_t(unsigned long, 1629 nr[l], SWAP_CLUSTER_MAX); 1630 nr[l] -= nr_to_scan; 1631 1632 nr_reclaimed += shrink_list(l, nr_to_scan, 1633 zone, sc, priority); 1634 } 1635 } 1636 /* 1637 * On large memory systems, scan >> priority can become 1638 * really large. This is fine for the starting priority; 1639 * we want to put equal scanning pressure on each zone. 1640 * However, if the VM has a harder time of freeing pages, 1641 * with multiple processes reclaiming pages, the total 1642 * freeing target can get unreasonably large. 1643 */ 1644 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 1645 break; 1646 } 1647 1648 sc->nr_reclaimed = nr_reclaimed; 1649 1650 /* 1651 * Even if we did not try to evict anon pages at all, we want to 1652 * rebalance the anon lru active/inactive ratio. 1653 */ 1654 if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0) 1655 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1656 1657 throttle_vm_writeout(sc->gfp_mask); 1658 } 1659 1660 /* 1661 * This is the direct reclaim path, for page-allocating processes. We only 1662 * try to reclaim pages from zones which will satisfy the caller's allocation 1663 * request. 1664 * 1665 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 1666 * Because: 1667 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1668 * allocation or 1669 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 1670 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 1671 * zone defense algorithm. 1672 * 1673 * If a zone is deemed to be full of pinned pages then just give it a light 1674 * scan then give up on it. 1675 */ 1676 static void shrink_zones(int priority, struct zonelist *zonelist, 1677 struct scan_control *sc) 1678 { 1679 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1680 struct zoneref *z; 1681 struct zone *zone; 1682 1683 sc->all_unreclaimable = 1; 1684 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 1685 sc->nodemask) { 1686 if (!populated_zone(zone)) 1687 continue; 1688 /* 1689 * Take care memory controller reclaiming has small influence 1690 * to global LRU. 1691 */ 1692 if (scanning_global_lru(sc)) { 1693 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1694 continue; 1695 note_zone_scanning_priority(zone, priority); 1696 1697 if (zone_is_all_unreclaimable(zone) && 1698 priority != DEF_PRIORITY) 1699 continue; /* Let kswapd poll it */ 1700 sc->all_unreclaimable = 0; 1701 } else { 1702 /* 1703 * Ignore cpuset limitation here. We just want to reduce 1704 * # of used pages by us regardless of memory shortage. 1705 */ 1706 sc->all_unreclaimable = 0; 1707 mem_cgroup_note_reclaim_priority(sc->mem_cgroup, 1708 priority); 1709 } 1710 1711 shrink_zone(priority, zone, sc); 1712 } 1713 } 1714 1715 /* 1716 * This is the main entry point to direct page reclaim. 1717 * 1718 * If a full scan of the inactive list fails to free enough memory then we 1719 * are "out of memory" and something needs to be killed. 1720 * 1721 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1722 * high - the zone may be full of dirty or under-writeback pages, which this 1723 * caller can't do much about. We kick the writeback threads and take explicit 1724 * naps in the hope that some of these pages can be written. But if the 1725 * allocating task holds filesystem locks which prevent writeout this might not 1726 * work, and the allocation attempt will fail. 1727 * 1728 * returns: 0, if no pages reclaimed 1729 * else, the number of pages reclaimed 1730 */ 1731 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 1732 struct scan_control *sc) 1733 { 1734 int priority; 1735 unsigned long ret = 0; 1736 unsigned long total_scanned = 0; 1737 struct reclaim_state *reclaim_state = current->reclaim_state; 1738 unsigned long lru_pages = 0; 1739 struct zoneref *z; 1740 struct zone *zone; 1741 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1742 unsigned long writeback_threshold; 1743 1744 delayacct_freepages_start(); 1745 1746 if (scanning_global_lru(sc)) 1747 count_vm_event(ALLOCSTALL); 1748 /* 1749 * mem_cgroup will not do shrink_slab. 1750 */ 1751 if (scanning_global_lru(sc)) { 1752 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1753 1754 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1755 continue; 1756 1757 lru_pages += zone_reclaimable_pages(zone); 1758 } 1759 } 1760 1761 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1762 sc->nr_scanned = 0; 1763 if (!priority) 1764 disable_swap_token(); 1765 shrink_zones(priority, zonelist, sc); 1766 /* 1767 * Don't shrink slabs when reclaiming memory from 1768 * over limit cgroups 1769 */ 1770 if (scanning_global_lru(sc)) { 1771 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 1772 if (reclaim_state) { 1773 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 1774 reclaim_state->reclaimed_slab = 0; 1775 } 1776 } 1777 total_scanned += sc->nr_scanned; 1778 if (sc->nr_reclaimed >= sc->nr_to_reclaim) { 1779 ret = sc->nr_reclaimed; 1780 goto out; 1781 } 1782 1783 /* 1784 * Try to write back as many pages as we just scanned. This 1785 * tends to cause slow streaming writers to write data to the 1786 * disk smoothly, at the dirtying rate, which is nice. But 1787 * that's undesirable in laptop mode, where we *want* lumpy 1788 * writeout. So in laptop mode, write out the whole world. 1789 */ 1790 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 1791 if (total_scanned > writeback_threshold) { 1792 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); 1793 sc->may_writepage = 1; 1794 } 1795 1796 /* Take a nap, wait for some writeback to complete */ 1797 if (!sc->hibernation_mode && sc->nr_scanned && 1798 priority < DEF_PRIORITY - 2) 1799 congestion_wait(BLK_RW_ASYNC, HZ/10); 1800 } 1801 /* top priority shrink_zones still had more to do? don't OOM, then */ 1802 if (!sc->all_unreclaimable && scanning_global_lru(sc)) 1803 ret = sc->nr_reclaimed; 1804 out: 1805 /* 1806 * Now that we've scanned all the zones at this priority level, note 1807 * that level within the zone so that the next thread which performs 1808 * scanning of this zone will immediately start out at this priority 1809 * level. This affects only the decision whether or not to bring 1810 * mapped pages onto the inactive list. 1811 */ 1812 if (priority < 0) 1813 priority = 0; 1814 1815 if (scanning_global_lru(sc)) { 1816 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1817 1818 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1819 continue; 1820 1821 zone->prev_priority = priority; 1822 } 1823 } else 1824 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); 1825 1826 delayacct_freepages_end(); 1827 1828 return ret; 1829 } 1830 1831 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1832 gfp_t gfp_mask, nodemask_t *nodemask) 1833 { 1834 struct scan_control sc = { 1835 .gfp_mask = gfp_mask, 1836 .may_writepage = !laptop_mode, 1837 .nr_to_reclaim = SWAP_CLUSTER_MAX, 1838 .may_unmap = 1, 1839 .may_swap = 1, 1840 .swappiness = vm_swappiness, 1841 .order = order, 1842 .mem_cgroup = NULL, 1843 .isolate_pages = isolate_pages_global, 1844 .nodemask = nodemask, 1845 }; 1846 1847 return do_try_to_free_pages(zonelist, &sc); 1848 } 1849 1850 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 1851 1852 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 1853 gfp_t gfp_mask, bool noswap, 1854 unsigned int swappiness, 1855 struct zone *zone, int nid) 1856 { 1857 struct scan_control sc = { 1858 .may_writepage = !laptop_mode, 1859 .may_unmap = 1, 1860 .may_swap = !noswap, 1861 .swappiness = swappiness, 1862 .order = 0, 1863 .mem_cgroup = mem, 1864 .isolate_pages = mem_cgroup_isolate_pages, 1865 }; 1866 nodemask_t nm = nodemask_of_node(nid); 1867 1868 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1869 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1870 sc.nodemask = &nm; 1871 sc.nr_reclaimed = 0; 1872 sc.nr_scanned = 0; 1873 /* 1874 * NOTE: Although we can get the priority field, using it 1875 * here is not a good idea, since it limits the pages we can scan. 1876 * if we don't reclaim here, the shrink_zone from balance_pgdat 1877 * will pick up pages from other mem cgroup's as well. We hack 1878 * the priority and make it zero. 1879 */ 1880 shrink_zone(0, zone, &sc); 1881 return sc.nr_reclaimed; 1882 } 1883 1884 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 1885 gfp_t gfp_mask, 1886 bool noswap, 1887 unsigned int swappiness) 1888 { 1889 struct zonelist *zonelist; 1890 struct scan_control sc = { 1891 .may_writepage = !laptop_mode, 1892 .may_unmap = 1, 1893 .may_swap = !noswap, 1894 .nr_to_reclaim = SWAP_CLUSTER_MAX, 1895 .swappiness = swappiness, 1896 .order = 0, 1897 .mem_cgroup = mem_cont, 1898 .isolate_pages = mem_cgroup_isolate_pages, 1899 .nodemask = NULL, /* we don't care the placement */ 1900 }; 1901 1902 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1903 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1904 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1905 return do_try_to_free_pages(zonelist, &sc); 1906 } 1907 #endif 1908 1909 /* is kswapd sleeping prematurely? */ 1910 static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) 1911 { 1912 int i; 1913 1914 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 1915 if (remaining) 1916 return 1; 1917 1918 /* If after HZ/10, a zone is below the high mark, it's premature */ 1919 for (i = 0; i < pgdat->nr_zones; i++) { 1920 struct zone *zone = pgdat->node_zones + i; 1921 1922 if (!populated_zone(zone)) 1923 continue; 1924 1925 if (zone_is_all_unreclaimable(zone)) 1926 continue; 1927 1928 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1929 0, 0)) 1930 return 1; 1931 } 1932 1933 return 0; 1934 } 1935 1936 /* 1937 * For kswapd, balance_pgdat() will work across all this node's zones until 1938 * they are all at high_wmark_pages(zone). 1939 * 1940 * Returns the number of pages which were actually freed. 1941 * 1942 * There is special handling here for zones which are full of pinned pages. 1943 * This can happen if the pages are all mlocked, or if they are all used by 1944 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1945 * What we do is to detect the case where all pages in the zone have been 1946 * scanned twice and there has been zero successful reclaim. Mark the zone as 1947 * dead and from now on, only perform a short scan. Basically we're polling 1948 * the zone for when the problem goes away. 1949 * 1950 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1951 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 1952 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 1953 * lower zones regardless of the number of free pages in the lower zones. This 1954 * interoperates with the page allocator fallback scheme to ensure that aging 1955 * of pages is balanced across the zones. 1956 */ 1957 static unsigned long balance_pgdat(pg_data_t *pgdat, int order) 1958 { 1959 int all_zones_ok; 1960 int priority; 1961 int i; 1962 unsigned long total_scanned; 1963 struct reclaim_state *reclaim_state = current->reclaim_state; 1964 struct scan_control sc = { 1965 .gfp_mask = GFP_KERNEL, 1966 .may_unmap = 1, 1967 .may_swap = 1, 1968 /* 1969 * kswapd doesn't want to be bailed out while reclaim. because 1970 * we want to put equal scanning pressure on each zone. 1971 */ 1972 .nr_to_reclaim = ULONG_MAX, 1973 .swappiness = vm_swappiness, 1974 .order = order, 1975 .mem_cgroup = NULL, 1976 .isolate_pages = isolate_pages_global, 1977 }; 1978 /* 1979 * temp_priority is used to remember the scanning priority at which 1980 * this zone was successfully refilled to 1981 * free_pages == high_wmark_pages(zone). 1982 */ 1983 int temp_priority[MAX_NR_ZONES]; 1984 1985 loop_again: 1986 total_scanned = 0; 1987 sc.nr_reclaimed = 0; 1988 sc.may_writepage = !laptop_mode; 1989 count_vm_event(PAGEOUTRUN); 1990 1991 for (i = 0; i < pgdat->nr_zones; i++) 1992 temp_priority[i] = DEF_PRIORITY; 1993 1994 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1995 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1996 unsigned long lru_pages = 0; 1997 int has_under_min_watermark_zone = 0; 1998 1999 /* The swap token gets in the way of swapout... */ 2000 if (!priority) 2001 disable_swap_token(); 2002 2003 all_zones_ok = 1; 2004 2005 /* 2006 * Scan in the highmem->dma direction for the highest 2007 * zone which needs scanning 2008 */ 2009 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2010 struct zone *zone = pgdat->node_zones + i; 2011 2012 if (!populated_zone(zone)) 2013 continue; 2014 2015 if (zone_is_all_unreclaimable(zone) && 2016 priority != DEF_PRIORITY) 2017 continue; 2018 2019 /* 2020 * Do some background aging of the anon list, to give 2021 * pages a chance to be referenced before reclaiming. 2022 */ 2023 if (inactive_anon_is_low(zone, &sc)) 2024 shrink_active_list(SWAP_CLUSTER_MAX, zone, 2025 &sc, priority, 0); 2026 2027 if (!zone_watermark_ok(zone, order, 2028 high_wmark_pages(zone), 0, 0)) { 2029 end_zone = i; 2030 break; 2031 } 2032 } 2033 if (i < 0) 2034 goto out; 2035 2036 for (i = 0; i <= end_zone; i++) { 2037 struct zone *zone = pgdat->node_zones + i; 2038 2039 lru_pages += zone_reclaimable_pages(zone); 2040 } 2041 2042 /* 2043 * Now scan the zone in the dma->highmem direction, stopping 2044 * at the last zone which needs scanning. 2045 * 2046 * We do this because the page allocator works in the opposite 2047 * direction. This prevents the page allocator from allocating 2048 * pages behind kswapd's direction of progress, which would 2049 * cause too much scanning of the lower zones. 2050 */ 2051 for (i = 0; i <= end_zone; i++) { 2052 struct zone *zone = pgdat->node_zones + i; 2053 int nr_slab; 2054 int nid, zid; 2055 2056 if (!populated_zone(zone)) 2057 continue; 2058 2059 if (zone_is_all_unreclaimable(zone) && 2060 priority != DEF_PRIORITY) 2061 continue; 2062 2063 if (!zone_watermark_ok(zone, order, 2064 high_wmark_pages(zone), end_zone, 0)) 2065 all_zones_ok = 0; 2066 temp_priority[i] = priority; 2067 sc.nr_scanned = 0; 2068 note_zone_scanning_priority(zone, priority); 2069 2070 nid = pgdat->node_id; 2071 zid = zone_idx(zone); 2072 /* 2073 * Call soft limit reclaim before calling shrink_zone. 2074 * For now we ignore the return value 2075 */ 2076 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask, 2077 nid, zid); 2078 /* 2079 * We put equal pressure on every zone, unless one 2080 * zone has way too many pages free already. 2081 */ 2082 if (!zone_watermark_ok(zone, order, 2083 8*high_wmark_pages(zone), end_zone, 0)) 2084 shrink_zone(priority, zone, &sc); 2085 reclaim_state->reclaimed_slab = 0; 2086 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2087 lru_pages); 2088 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2089 total_scanned += sc.nr_scanned; 2090 if (zone_is_all_unreclaimable(zone)) 2091 continue; 2092 if (nr_slab == 0 && zone->pages_scanned >= 2093 (zone_reclaimable_pages(zone) * 6)) 2094 zone_set_flag(zone, 2095 ZONE_ALL_UNRECLAIMABLE); 2096 /* 2097 * If we've done a decent amount of scanning and 2098 * the reclaim ratio is low, start doing writepage 2099 * even in laptop mode 2100 */ 2101 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2102 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2103 sc.may_writepage = 1; 2104 2105 /* 2106 * We are still under min water mark. it mean we have 2107 * GFP_ATOMIC allocation failure risk. Hurry up! 2108 */ 2109 if (!zone_watermark_ok(zone, order, min_wmark_pages(zone), 2110 end_zone, 0)) 2111 has_under_min_watermark_zone = 1; 2112 2113 } 2114 if (all_zones_ok) 2115 break; /* kswapd: all done */ 2116 /* 2117 * OK, kswapd is getting into trouble. Take a nap, then take 2118 * another pass across the zones. 2119 */ 2120 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2121 if (has_under_min_watermark_zone) 2122 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2123 else 2124 congestion_wait(BLK_RW_ASYNC, HZ/10); 2125 } 2126 2127 /* 2128 * We do this so kswapd doesn't build up large priorities for 2129 * example when it is freeing in parallel with allocators. It 2130 * matches the direct reclaim path behaviour in terms of impact 2131 * on zone->*_priority. 2132 */ 2133 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2134 break; 2135 } 2136 out: 2137 /* 2138 * Note within each zone the priority level at which this zone was 2139 * brought into a happy state. So that the next thread which scans this 2140 * zone will start out at that priority level. 2141 */ 2142 for (i = 0; i < pgdat->nr_zones; i++) { 2143 struct zone *zone = pgdat->node_zones + i; 2144 2145 zone->prev_priority = temp_priority[i]; 2146 } 2147 if (!all_zones_ok) { 2148 cond_resched(); 2149 2150 try_to_freeze(); 2151 2152 /* 2153 * Fragmentation may mean that the system cannot be 2154 * rebalanced for high-order allocations in all zones. 2155 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2156 * it means the zones have been fully scanned and are still 2157 * not balanced. For high-order allocations, there is 2158 * little point trying all over again as kswapd may 2159 * infinite loop. 2160 * 2161 * Instead, recheck all watermarks at order-0 as they 2162 * are the most important. If watermarks are ok, kswapd will go 2163 * back to sleep. High-order users can still perform direct 2164 * reclaim if they wish. 2165 */ 2166 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2167 order = sc.order = 0; 2168 2169 goto loop_again; 2170 } 2171 2172 return sc.nr_reclaimed; 2173 } 2174 2175 /* 2176 * The background pageout daemon, started as a kernel thread 2177 * from the init process. 2178 * 2179 * This basically trickles out pages so that we have _some_ 2180 * free memory available even if there is no other activity 2181 * that frees anything up. This is needed for things like routing 2182 * etc, where we otherwise might have all activity going on in 2183 * asynchronous contexts that cannot page things out. 2184 * 2185 * If there are applications that are active memory-allocators 2186 * (most normal use), this basically shouldn't matter. 2187 */ 2188 static int kswapd(void *p) 2189 { 2190 unsigned long order; 2191 pg_data_t *pgdat = (pg_data_t*)p; 2192 struct task_struct *tsk = current; 2193 DEFINE_WAIT(wait); 2194 struct reclaim_state reclaim_state = { 2195 .reclaimed_slab = 0, 2196 }; 2197 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2198 2199 lockdep_set_current_reclaim_state(GFP_KERNEL); 2200 2201 if (!cpumask_empty(cpumask)) 2202 set_cpus_allowed_ptr(tsk, cpumask); 2203 current->reclaim_state = &reclaim_state; 2204 2205 /* 2206 * Tell the memory management that we're a "memory allocator", 2207 * and that if we need more memory we should get access to it 2208 * regardless (see "__alloc_pages()"). "kswapd" should 2209 * never get caught in the normal page freeing logic. 2210 * 2211 * (Kswapd normally doesn't need memory anyway, but sometimes 2212 * you need a small amount of memory in order to be able to 2213 * page out something else, and this flag essentially protects 2214 * us from recursively trying to free more memory as we're 2215 * trying to free the first piece of memory in the first place). 2216 */ 2217 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2218 set_freezable(); 2219 2220 order = 0; 2221 for ( ; ; ) { 2222 unsigned long new_order; 2223 int ret; 2224 2225 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2226 new_order = pgdat->kswapd_max_order; 2227 pgdat->kswapd_max_order = 0; 2228 if (order < new_order) { 2229 /* 2230 * Don't sleep if someone wants a larger 'order' 2231 * allocation 2232 */ 2233 order = new_order; 2234 } else { 2235 if (!freezing(current) && !kthread_should_stop()) { 2236 long remaining = 0; 2237 2238 /* Try to sleep for a short interval */ 2239 if (!sleeping_prematurely(pgdat, order, remaining)) { 2240 remaining = schedule_timeout(HZ/10); 2241 finish_wait(&pgdat->kswapd_wait, &wait); 2242 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2243 } 2244 2245 /* 2246 * After a short sleep, check if it was a 2247 * premature sleep. If not, then go fully 2248 * to sleep until explicitly woken up 2249 */ 2250 if (!sleeping_prematurely(pgdat, order, remaining)) 2251 schedule(); 2252 else { 2253 if (remaining) 2254 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2255 else 2256 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2257 } 2258 } 2259 2260 order = pgdat->kswapd_max_order; 2261 } 2262 finish_wait(&pgdat->kswapd_wait, &wait); 2263 2264 ret = try_to_freeze(); 2265 if (kthread_should_stop()) 2266 break; 2267 2268 /* 2269 * We can speed up thawing tasks if we don't call balance_pgdat 2270 * after returning from the refrigerator 2271 */ 2272 if (!ret) 2273 balance_pgdat(pgdat, order); 2274 } 2275 return 0; 2276 } 2277 2278 /* 2279 * A zone is low on free memory, so wake its kswapd task to service it. 2280 */ 2281 void wakeup_kswapd(struct zone *zone, int order) 2282 { 2283 pg_data_t *pgdat; 2284 2285 if (!populated_zone(zone)) 2286 return; 2287 2288 pgdat = zone->zone_pgdat; 2289 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 2290 return; 2291 if (pgdat->kswapd_max_order < order) 2292 pgdat->kswapd_max_order = order; 2293 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2294 return; 2295 if (!waitqueue_active(&pgdat->kswapd_wait)) 2296 return; 2297 wake_up_interruptible(&pgdat->kswapd_wait); 2298 } 2299 2300 /* 2301 * The reclaimable count would be mostly accurate. 2302 * The less reclaimable pages may be 2303 * - mlocked pages, which will be moved to unevictable list when encountered 2304 * - mapped pages, which may require several travels to be reclaimed 2305 * - dirty pages, which is not "instantly" reclaimable 2306 */ 2307 unsigned long global_reclaimable_pages(void) 2308 { 2309 int nr; 2310 2311 nr = global_page_state(NR_ACTIVE_FILE) + 2312 global_page_state(NR_INACTIVE_FILE); 2313 2314 if (nr_swap_pages > 0) 2315 nr += global_page_state(NR_ACTIVE_ANON) + 2316 global_page_state(NR_INACTIVE_ANON); 2317 2318 return nr; 2319 } 2320 2321 unsigned long zone_reclaimable_pages(struct zone *zone) 2322 { 2323 int nr; 2324 2325 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 2326 zone_page_state(zone, NR_INACTIVE_FILE); 2327 2328 if (nr_swap_pages > 0) 2329 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 2330 zone_page_state(zone, NR_INACTIVE_ANON); 2331 2332 return nr; 2333 } 2334 2335 #ifdef CONFIG_HIBERNATION 2336 /* 2337 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 2338 * freed pages. 2339 * 2340 * Rather than trying to age LRUs the aim is to preserve the overall 2341 * LRU order by reclaiming preferentially 2342 * inactive > active > active referenced > active mapped 2343 */ 2344 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 2345 { 2346 struct reclaim_state reclaim_state; 2347 struct scan_control sc = { 2348 .gfp_mask = GFP_HIGHUSER_MOVABLE, 2349 .may_swap = 1, 2350 .may_unmap = 1, 2351 .may_writepage = 1, 2352 .nr_to_reclaim = nr_to_reclaim, 2353 .hibernation_mode = 1, 2354 .swappiness = vm_swappiness, 2355 .order = 0, 2356 .isolate_pages = isolate_pages_global, 2357 }; 2358 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2359 struct task_struct *p = current; 2360 unsigned long nr_reclaimed; 2361 2362 p->flags |= PF_MEMALLOC; 2363 lockdep_set_current_reclaim_state(sc.gfp_mask); 2364 reclaim_state.reclaimed_slab = 0; 2365 p->reclaim_state = &reclaim_state; 2366 2367 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2368 2369 p->reclaim_state = NULL; 2370 lockdep_clear_current_reclaim_state(); 2371 p->flags &= ~PF_MEMALLOC; 2372 2373 return nr_reclaimed; 2374 } 2375 #endif /* CONFIG_HIBERNATION */ 2376 2377 /* It's optimal to keep kswapds on the same CPUs as their memory, but 2378 not required for correctness. So if the last cpu in a node goes 2379 away, we get changed to run anywhere: as the first one comes back, 2380 restore their cpu bindings. */ 2381 static int __devinit cpu_callback(struct notifier_block *nfb, 2382 unsigned long action, void *hcpu) 2383 { 2384 int nid; 2385 2386 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2387 for_each_node_state(nid, N_HIGH_MEMORY) { 2388 pg_data_t *pgdat = NODE_DATA(nid); 2389 const struct cpumask *mask; 2390 2391 mask = cpumask_of_node(pgdat->node_id); 2392 2393 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2394 /* One of our CPUs online: restore mask */ 2395 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2396 } 2397 } 2398 return NOTIFY_OK; 2399 } 2400 2401 /* 2402 * This kswapd start function will be called by init and node-hot-add. 2403 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 2404 */ 2405 int kswapd_run(int nid) 2406 { 2407 pg_data_t *pgdat = NODE_DATA(nid); 2408 int ret = 0; 2409 2410 if (pgdat->kswapd) 2411 return 0; 2412 2413 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 2414 if (IS_ERR(pgdat->kswapd)) { 2415 /* failure at boot is fatal */ 2416 BUG_ON(system_state == SYSTEM_BOOTING); 2417 printk("Failed to start kswapd on node %d\n",nid); 2418 ret = -1; 2419 } 2420 return ret; 2421 } 2422 2423 /* 2424 * Called by memory hotplug when all memory in a node is offlined. 2425 */ 2426 void kswapd_stop(int nid) 2427 { 2428 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 2429 2430 if (kswapd) 2431 kthread_stop(kswapd); 2432 } 2433 2434 static int __init kswapd_init(void) 2435 { 2436 int nid; 2437 2438 swap_setup(); 2439 for_each_node_state(nid, N_HIGH_MEMORY) 2440 kswapd_run(nid); 2441 hotcpu_notifier(cpu_callback, 0); 2442 return 0; 2443 } 2444 2445 module_init(kswapd_init) 2446 2447 #ifdef CONFIG_NUMA 2448 /* 2449 * Zone reclaim mode 2450 * 2451 * If non-zero call zone_reclaim when the number of free pages falls below 2452 * the watermarks. 2453 */ 2454 int zone_reclaim_mode __read_mostly; 2455 2456 #define RECLAIM_OFF 0 2457 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 2458 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 2459 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 2460 2461 /* 2462 * Priority for ZONE_RECLAIM. This determines the fraction of pages 2463 * of a node considered for each zone_reclaim. 4 scans 1/16th of 2464 * a zone. 2465 */ 2466 #define ZONE_RECLAIM_PRIORITY 4 2467 2468 /* 2469 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 2470 * occur. 2471 */ 2472 int sysctl_min_unmapped_ratio = 1; 2473 2474 /* 2475 * If the number of slab pages in a zone grows beyond this percentage then 2476 * slab reclaim needs to occur. 2477 */ 2478 int sysctl_min_slab_ratio = 5; 2479 2480 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 2481 { 2482 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 2483 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 2484 zone_page_state(zone, NR_ACTIVE_FILE); 2485 2486 /* 2487 * It's possible for there to be more file mapped pages than 2488 * accounted for by the pages on the file LRU lists because 2489 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 2490 */ 2491 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 2492 } 2493 2494 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 2495 static long zone_pagecache_reclaimable(struct zone *zone) 2496 { 2497 long nr_pagecache_reclaimable; 2498 long delta = 0; 2499 2500 /* 2501 * If RECLAIM_SWAP is set, then all file pages are considered 2502 * potentially reclaimable. Otherwise, we have to worry about 2503 * pages like swapcache and zone_unmapped_file_pages() provides 2504 * a better estimate 2505 */ 2506 if (zone_reclaim_mode & RECLAIM_SWAP) 2507 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 2508 else 2509 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 2510 2511 /* If we can't clean pages, remove dirty pages from consideration */ 2512 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 2513 delta += zone_page_state(zone, NR_FILE_DIRTY); 2514 2515 /* Watch for any possible underflows due to delta */ 2516 if (unlikely(delta > nr_pagecache_reclaimable)) 2517 delta = nr_pagecache_reclaimable; 2518 2519 return nr_pagecache_reclaimable - delta; 2520 } 2521 2522 /* 2523 * Try to free up some pages from this zone through reclaim. 2524 */ 2525 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 2526 { 2527 /* Minimum pages needed in order to stay on node */ 2528 const unsigned long nr_pages = 1 << order; 2529 struct task_struct *p = current; 2530 struct reclaim_state reclaim_state; 2531 int priority; 2532 struct scan_control sc = { 2533 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2534 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2535 .may_swap = 1, 2536 .nr_to_reclaim = max_t(unsigned long, nr_pages, 2537 SWAP_CLUSTER_MAX), 2538 .gfp_mask = gfp_mask, 2539 .swappiness = vm_swappiness, 2540 .order = order, 2541 .isolate_pages = isolate_pages_global, 2542 }; 2543 unsigned long slab_reclaimable; 2544 2545 disable_swap_token(); 2546 cond_resched(); 2547 /* 2548 * We need to be able to allocate from the reserves for RECLAIM_SWAP 2549 * and we also need to be able to write out pages for RECLAIM_WRITE 2550 * and RECLAIM_SWAP. 2551 */ 2552 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 2553 reclaim_state.reclaimed_slab = 0; 2554 p->reclaim_state = &reclaim_state; 2555 2556 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 2557 /* 2558 * Free memory by calling shrink zone with increasing 2559 * priorities until we have enough memory freed. 2560 */ 2561 priority = ZONE_RECLAIM_PRIORITY; 2562 do { 2563 note_zone_scanning_priority(zone, priority); 2564 shrink_zone(priority, zone, &sc); 2565 priority--; 2566 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 2567 } 2568 2569 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2570 if (slab_reclaimable > zone->min_slab_pages) { 2571 /* 2572 * shrink_slab() does not currently allow us to determine how 2573 * many pages were freed in this zone. So we take the current 2574 * number of slab pages and shake the slab until it is reduced 2575 * by the same nr_pages that we used for reclaiming unmapped 2576 * pages. 2577 * 2578 * Note that shrink_slab will free memory on all zones and may 2579 * take a long time. 2580 */ 2581 while (shrink_slab(sc.nr_scanned, gfp_mask, order) && 2582 zone_page_state(zone, NR_SLAB_RECLAIMABLE) > 2583 slab_reclaimable - nr_pages) 2584 ; 2585 2586 /* 2587 * Update nr_reclaimed by the number of slab pages we 2588 * reclaimed from this zone. 2589 */ 2590 sc.nr_reclaimed += slab_reclaimable - 2591 zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2592 } 2593 2594 p->reclaim_state = NULL; 2595 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 2596 return sc.nr_reclaimed >= nr_pages; 2597 } 2598 2599 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 2600 { 2601 int node_id; 2602 int ret; 2603 2604 /* 2605 * Zone reclaim reclaims unmapped file backed pages and 2606 * slab pages if we are over the defined limits. 2607 * 2608 * A small portion of unmapped file backed pages is needed for 2609 * file I/O otherwise pages read by file I/O will be immediately 2610 * thrown out if the zone is overallocated. So we do not reclaim 2611 * if less than a specified percentage of the zone is used by 2612 * unmapped file backed pages. 2613 */ 2614 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 2615 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 2616 return ZONE_RECLAIM_FULL; 2617 2618 if (zone_is_all_unreclaimable(zone)) 2619 return ZONE_RECLAIM_FULL; 2620 2621 /* 2622 * Do not scan if the allocation should not be delayed. 2623 */ 2624 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 2625 return ZONE_RECLAIM_NOSCAN; 2626 2627 /* 2628 * Only run zone reclaim on the local zone or on zones that do not 2629 * have associated processors. This will favor the local processor 2630 * over remote processors and spread off node memory allocations 2631 * as wide as possible. 2632 */ 2633 node_id = zone_to_nid(zone); 2634 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 2635 return ZONE_RECLAIM_NOSCAN; 2636 2637 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 2638 return ZONE_RECLAIM_NOSCAN; 2639 2640 ret = __zone_reclaim(zone, gfp_mask, order); 2641 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 2642 2643 if (!ret) 2644 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 2645 2646 return ret; 2647 } 2648 #endif 2649 2650 /* 2651 * page_evictable - test whether a page is evictable 2652 * @page: the page to test 2653 * @vma: the VMA in which the page is or will be mapped, may be NULL 2654 * 2655 * Test whether page is evictable--i.e., should be placed on active/inactive 2656 * lists vs unevictable list. The vma argument is !NULL when called from the 2657 * fault path to determine how to instantate a new page. 2658 * 2659 * Reasons page might not be evictable: 2660 * (1) page's mapping marked unevictable 2661 * (2) page is part of an mlocked VMA 2662 * 2663 */ 2664 int page_evictable(struct page *page, struct vm_area_struct *vma) 2665 { 2666 2667 if (mapping_unevictable(page_mapping(page))) 2668 return 0; 2669 2670 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 2671 return 0; 2672 2673 return 1; 2674 } 2675 2676 /** 2677 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 2678 * @page: page to check evictability and move to appropriate lru list 2679 * @zone: zone page is in 2680 * 2681 * Checks a page for evictability and moves the page to the appropriate 2682 * zone lru list. 2683 * 2684 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 2685 * have PageUnevictable set. 2686 */ 2687 static void check_move_unevictable_page(struct page *page, struct zone *zone) 2688 { 2689 VM_BUG_ON(PageActive(page)); 2690 2691 retry: 2692 ClearPageUnevictable(page); 2693 if (page_evictable(page, NULL)) { 2694 enum lru_list l = page_lru_base_type(page); 2695 2696 __dec_zone_state(zone, NR_UNEVICTABLE); 2697 list_move(&page->lru, &zone->lru[l].list); 2698 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 2699 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2700 __count_vm_event(UNEVICTABLE_PGRESCUED); 2701 } else { 2702 /* 2703 * rotate unevictable list 2704 */ 2705 SetPageUnevictable(page); 2706 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 2707 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 2708 if (page_evictable(page, NULL)) 2709 goto retry; 2710 } 2711 } 2712 2713 /** 2714 * scan_mapping_unevictable_pages - scan an address space for evictable pages 2715 * @mapping: struct address_space to scan for evictable pages 2716 * 2717 * Scan all pages in mapping. Check unevictable pages for 2718 * evictability and move them to the appropriate zone lru list. 2719 */ 2720 void scan_mapping_unevictable_pages(struct address_space *mapping) 2721 { 2722 pgoff_t next = 0; 2723 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 2724 PAGE_CACHE_SHIFT; 2725 struct zone *zone; 2726 struct pagevec pvec; 2727 2728 if (mapping->nrpages == 0) 2729 return; 2730 2731 pagevec_init(&pvec, 0); 2732 while (next < end && 2733 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2734 int i; 2735 int pg_scanned = 0; 2736 2737 zone = NULL; 2738 2739 for (i = 0; i < pagevec_count(&pvec); i++) { 2740 struct page *page = pvec.pages[i]; 2741 pgoff_t page_index = page->index; 2742 struct zone *pagezone = page_zone(page); 2743 2744 pg_scanned++; 2745 if (page_index > next) 2746 next = page_index; 2747 next++; 2748 2749 if (pagezone != zone) { 2750 if (zone) 2751 spin_unlock_irq(&zone->lru_lock); 2752 zone = pagezone; 2753 spin_lock_irq(&zone->lru_lock); 2754 } 2755 2756 if (PageLRU(page) && PageUnevictable(page)) 2757 check_move_unevictable_page(page, zone); 2758 } 2759 if (zone) 2760 spin_unlock_irq(&zone->lru_lock); 2761 pagevec_release(&pvec); 2762 2763 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 2764 } 2765 2766 } 2767 2768 /** 2769 * scan_zone_unevictable_pages - check unevictable list for evictable pages 2770 * @zone - zone of which to scan the unevictable list 2771 * 2772 * Scan @zone's unevictable LRU lists to check for pages that have become 2773 * evictable. Move those that have to @zone's inactive list where they 2774 * become candidates for reclaim, unless shrink_inactive_zone() decides 2775 * to reactivate them. Pages that are still unevictable are rotated 2776 * back onto @zone's unevictable list. 2777 */ 2778 #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ 2779 static void scan_zone_unevictable_pages(struct zone *zone) 2780 { 2781 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; 2782 unsigned long scan; 2783 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE); 2784 2785 while (nr_to_scan > 0) { 2786 unsigned long batch_size = min(nr_to_scan, 2787 SCAN_UNEVICTABLE_BATCH_SIZE); 2788 2789 spin_lock_irq(&zone->lru_lock); 2790 for (scan = 0; scan < batch_size; scan++) { 2791 struct page *page = lru_to_page(l_unevictable); 2792 2793 if (!trylock_page(page)) 2794 continue; 2795 2796 prefetchw_prev_lru_page(page, l_unevictable, flags); 2797 2798 if (likely(PageLRU(page) && PageUnevictable(page))) 2799 check_move_unevictable_page(page, zone); 2800 2801 unlock_page(page); 2802 } 2803 spin_unlock_irq(&zone->lru_lock); 2804 2805 nr_to_scan -= batch_size; 2806 } 2807 } 2808 2809 2810 /** 2811 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages 2812 * 2813 * A really big hammer: scan all zones' unevictable LRU lists to check for 2814 * pages that have become evictable. Move those back to the zones' 2815 * inactive list where they become candidates for reclaim. 2816 * This occurs when, e.g., we have unswappable pages on the unevictable lists, 2817 * and we add swap to the system. As such, it runs in the context of a task 2818 * that has possibly/probably made some previously unevictable pages 2819 * evictable. 2820 */ 2821 static void scan_all_zones_unevictable_pages(void) 2822 { 2823 struct zone *zone; 2824 2825 for_each_zone(zone) { 2826 scan_zone_unevictable_pages(zone); 2827 } 2828 } 2829 2830 /* 2831 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 2832 * all nodes' unevictable lists for evictable pages 2833 */ 2834 unsigned long scan_unevictable_pages; 2835 2836 int scan_unevictable_handler(struct ctl_table *table, int write, 2837 void __user *buffer, 2838 size_t *length, loff_t *ppos) 2839 { 2840 proc_doulongvec_minmax(table, write, buffer, length, ppos); 2841 2842 if (write && *(unsigned long *)table->data) 2843 scan_all_zones_unevictable_pages(); 2844 2845 scan_unevictable_pages = 0; 2846 return 0; 2847 } 2848 2849 /* 2850 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 2851 * a specified node's per zone unevictable lists for evictable pages. 2852 */ 2853 2854 static ssize_t read_scan_unevictable_node(struct sys_device *dev, 2855 struct sysdev_attribute *attr, 2856 char *buf) 2857 { 2858 return sprintf(buf, "0\n"); /* always zero; should fit... */ 2859 } 2860 2861 static ssize_t write_scan_unevictable_node(struct sys_device *dev, 2862 struct sysdev_attribute *attr, 2863 const char *buf, size_t count) 2864 { 2865 struct zone *node_zones = NODE_DATA(dev->id)->node_zones; 2866 struct zone *zone; 2867 unsigned long res; 2868 unsigned long req = strict_strtoul(buf, 10, &res); 2869 2870 if (!req) 2871 return 1; /* zero is no-op */ 2872 2873 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2874 if (!populated_zone(zone)) 2875 continue; 2876 scan_zone_unevictable_pages(zone); 2877 } 2878 return 1; 2879 } 2880 2881 2882 static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 2883 read_scan_unevictable_node, 2884 write_scan_unevictable_node); 2885 2886 int scan_unevictable_register_node(struct node *node) 2887 { 2888 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages); 2889 } 2890 2891 void scan_unevictable_unregister_node(struct node *node) 2892 { 2893 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); 2894 } 2895 2896