1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/gfp.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/vmstat.h> 23 #include <linux/file.h> 24 #include <linux/writeback.h> 25 #include <linux/blkdev.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28 #include <linux/mm_inline.h> 29 #include <linux/backing-dev.h> 30 #include <linux/rmap.h> 31 #include <linux/topology.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/compaction.h> 35 #include <linux/notifier.h> 36 #include <linux/rwsem.h> 37 #include <linux/delay.h> 38 #include <linux/kthread.h> 39 #include <linux/freezer.h> 40 #include <linux/memcontrol.h> 41 #include <linux/delayacct.h> 42 #include <linux/sysctl.h> 43 #include <linux/oom.h> 44 #include <linux/prefetch.h> 45 46 #include <asm/tlbflush.h> 47 #include <asm/div64.h> 48 49 #include <linux/swapops.h> 50 51 #include "internal.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/vmscan.h> 55 56 struct scan_control { 57 /* Incremented by the number of inactive pages that were scanned */ 58 unsigned long nr_scanned; 59 60 /* Number of pages freed so far during a call to shrink_zones() */ 61 unsigned long nr_reclaimed; 62 63 /* How many pages shrink_list() should reclaim */ 64 unsigned long nr_to_reclaim; 65 66 unsigned long hibernation_mode; 67 68 /* This context's GFP mask */ 69 gfp_t gfp_mask; 70 71 int may_writepage; 72 73 /* Can mapped pages be reclaimed? */ 74 int may_unmap; 75 76 /* Can pages be swapped as part of reclaim? */ 77 int may_swap; 78 79 int order; 80 81 /* Scan (total_size >> priority) pages at once */ 82 int priority; 83 84 /* 85 * The memory cgroup that hit its limit and as a result is the 86 * primary target of this reclaim invocation. 87 */ 88 struct mem_cgroup *target_mem_cgroup; 89 90 /* 91 * Nodemask of nodes allowed by the caller. If NULL, all nodes 92 * are scanned. 93 */ 94 nodemask_t *nodemask; 95 }; 96 97 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 98 99 #ifdef ARCH_HAS_PREFETCH 100 #define prefetch_prev_lru_page(_page, _base, _field) \ 101 do { \ 102 if ((_page)->lru.prev != _base) { \ 103 struct page *prev; \ 104 \ 105 prev = lru_to_page(&(_page->lru)); \ 106 prefetch(&prev->_field); \ 107 } \ 108 } while (0) 109 #else 110 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 111 #endif 112 113 #ifdef ARCH_HAS_PREFETCHW 114 #define prefetchw_prev_lru_page(_page, _base, _field) \ 115 do { \ 116 if ((_page)->lru.prev != _base) { \ 117 struct page *prev; \ 118 \ 119 prev = lru_to_page(&(_page->lru)); \ 120 prefetchw(&prev->_field); \ 121 } \ 122 } while (0) 123 #else 124 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 125 #endif 126 127 /* 128 * From 0 .. 100. Higher means more swappy. 129 */ 130 int vm_swappiness = 60; 131 long vm_total_pages; /* The total number of pages which the VM controls */ 132 133 static LIST_HEAD(shrinker_list); 134 static DECLARE_RWSEM(shrinker_rwsem); 135 136 #ifdef CONFIG_MEMCG 137 static bool global_reclaim(struct scan_control *sc) 138 { 139 return !sc->target_mem_cgroup; 140 } 141 #else 142 static bool global_reclaim(struct scan_control *sc) 143 { 144 return true; 145 } 146 #endif 147 148 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) 149 { 150 if (!mem_cgroup_disabled()) 151 return mem_cgroup_get_lru_size(lruvec, lru); 152 153 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); 154 } 155 156 /* 157 * Add a shrinker callback to be called from the vm 158 */ 159 void register_shrinker(struct shrinker *shrinker) 160 { 161 atomic_long_set(&shrinker->nr_in_batch, 0); 162 down_write(&shrinker_rwsem); 163 list_add_tail(&shrinker->list, &shrinker_list); 164 up_write(&shrinker_rwsem); 165 } 166 EXPORT_SYMBOL(register_shrinker); 167 168 /* 169 * Remove one 170 */ 171 void unregister_shrinker(struct shrinker *shrinker) 172 { 173 down_write(&shrinker_rwsem); 174 list_del(&shrinker->list); 175 up_write(&shrinker_rwsem); 176 } 177 EXPORT_SYMBOL(unregister_shrinker); 178 179 static inline int do_shrinker_shrink(struct shrinker *shrinker, 180 struct shrink_control *sc, 181 unsigned long nr_to_scan) 182 { 183 sc->nr_to_scan = nr_to_scan; 184 return (*shrinker->shrink)(shrinker, sc); 185 } 186 187 #define SHRINK_BATCH 128 188 /* 189 * Call the shrink functions to age shrinkable caches 190 * 191 * Here we assume it costs one seek to replace a lru page and that it also 192 * takes a seek to recreate a cache object. With this in mind we age equal 193 * percentages of the lru and ageable caches. This should balance the seeks 194 * generated by these structures. 195 * 196 * If the vm encountered mapped pages on the LRU it increase the pressure on 197 * slab to avoid swapping. 198 * 199 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 200 * 201 * `lru_pages' represents the number of on-LRU pages in all the zones which 202 * are eligible for the caller's allocation attempt. It is used for balancing 203 * slab reclaim versus page reclaim. 204 * 205 * Returns the number of slab objects which we shrunk. 206 */ 207 unsigned long shrink_slab(struct shrink_control *shrink, 208 unsigned long nr_pages_scanned, 209 unsigned long lru_pages) 210 { 211 struct shrinker *shrinker; 212 unsigned long ret = 0; 213 214 if (nr_pages_scanned == 0) 215 nr_pages_scanned = SWAP_CLUSTER_MAX; 216 217 if (!down_read_trylock(&shrinker_rwsem)) { 218 /* Assume we'll be able to shrink next time */ 219 ret = 1; 220 goto out; 221 } 222 223 list_for_each_entry(shrinker, &shrinker_list, list) { 224 unsigned long long delta; 225 long total_scan; 226 long max_pass; 227 int shrink_ret = 0; 228 long nr; 229 long new_nr; 230 long batch_size = shrinker->batch ? shrinker->batch 231 : SHRINK_BATCH; 232 233 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 234 if (max_pass <= 0) 235 continue; 236 237 /* 238 * copy the current shrinker scan count into a local variable 239 * and zero it so that other concurrent shrinker invocations 240 * don't also do this scanning work. 241 */ 242 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); 243 244 total_scan = nr; 245 delta = (4 * nr_pages_scanned) / shrinker->seeks; 246 delta *= max_pass; 247 do_div(delta, lru_pages + 1); 248 total_scan += delta; 249 if (total_scan < 0) { 250 printk(KERN_ERR "shrink_slab: %pF negative objects to " 251 "delete nr=%ld\n", 252 shrinker->shrink, total_scan); 253 total_scan = max_pass; 254 } 255 256 /* 257 * We need to avoid excessive windup on filesystem shrinkers 258 * due to large numbers of GFP_NOFS allocations causing the 259 * shrinkers to return -1 all the time. This results in a large 260 * nr being built up so when a shrink that can do some work 261 * comes along it empties the entire cache due to nr >>> 262 * max_pass. This is bad for sustaining a working set in 263 * memory. 264 * 265 * Hence only allow the shrinker to scan the entire cache when 266 * a large delta change is calculated directly. 267 */ 268 if (delta < max_pass / 4) 269 total_scan = min(total_scan, max_pass / 2); 270 271 /* 272 * Avoid risking looping forever due to too large nr value: 273 * never try to free more than twice the estimate number of 274 * freeable entries. 275 */ 276 if (total_scan > max_pass * 2) 277 total_scan = max_pass * 2; 278 279 trace_mm_shrink_slab_start(shrinker, shrink, nr, 280 nr_pages_scanned, lru_pages, 281 max_pass, delta, total_scan); 282 283 while (total_scan >= batch_size) { 284 int nr_before; 285 286 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 287 shrink_ret = do_shrinker_shrink(shrinker, shrink, 288 batch_size); 289 if (shrink_ret == -1) 290 break; 291 if (shrink_ret < nr_before) 292 ret += nr_before - shrink_ret; 293 count_vm_events(SLABS_SCANNED, batch_size); 294 total_scan -= batch_size; 295 296 cond_resched(); 297 } 298 299 /* 300 * move the unused scan count back into the shrinker in a 301 * manner that handles concurrent updates. If we exhausted the 302 * scan, there is no need to do an update. 303 */ 304 if (total_scan > 0) 305 new_nr = atomic_long_add_return(total_scan, 306 &shrinker->nr_in_batch); 307 else 308 new_nr = atomic_long_read(&shrinker->nr_in_batch); 309 310 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 311 } 312 up_read(&shrinker_rwsem); 313 out: 314 cond_resched(); 315 return ret; 316 } 317 318 static inline int is_page_cache_freeable(struct page *page) 319 { 320 /* 321 * A freeable page cache page is referenced only by the caller 322 * that isolated the page, the page cache radix tree and 323 * optional buffer heads at page->private. 324 */ 325 return page_count(page) - page_has_private(page) == 2; 326 } 327 328 static int may_write_to_queue(struct backing_dev_info *bdi, 329 struct scan_control *sc) 330 { 331 if (current->flags & PF_SWAPWRITE) 332 return 1; 333 if (!bdi_write_congested(bdi)) 334 return 1; 335 if (bdi == current->backing_dev_info) 336 return 1; 337 return 0; 338 } 339 340 /* 341 * We detected a synchronous write error writing a page out. Probably 342 * -ENOSPC. We need to propagate that into the address_space for a subsequent 343 * fsync(), msync() or close(). 344 * 345 * The tricky part is that after writepage we cannot touch the mapping: nothing 346 * prevents it from being freed up. But we have a ref on the page and once 347 * that page is locked, the mapping is pinned. 348 * 349 * We're allowed to run sleeping lock_page() here because we know the caller has 350 * __GFP_FS. 351 */ 352 static void handle_write_error(struct address_space *mapping, 353 struct page *page, int error) 354 { 355 lock_page(page); 356 if (page_mapping(page) == mapping) 357 mapping_set_error(mapping, error); 358 unlock_page(page); 359 } 360 361 /* possible outcome of pageout() */ 362 typedef enum { 363 /* failed to write page out, page is locked */ 364 PAGE_KEEP, 365 /* move page to the active list, page is locked */ 366 PAGE_ACTIVATE, 367 /* page has been sent to the disk successfully, page is unlocked */ 368 PAGE_SUCCESS, 369 /* page is clean and locked */ 370 PAGE_CLEAN, 371 } pageout_t; 372 373 /* 374 * pageout is called by shrink_page_list() for each dirty page. 375 * Calls ->writepage(). 376 */ 377 static pageout_t pageout(struct page *page, struct address_space *mapping, 378 struct scan_control *sc) 379 { 380 /* 381 * If the page is dirty, only perform writeback if that write 382 * will be non-blocking. To prevent this allocation from being 383 * stalled by pagecache activity. But note that there may be 384 * stalls if we need to run get_block(). We could test 385 * PagePrivate for that. 386 * 387 * If this process is currently in __generic_file_aio_write() against 388 * this page's queue, we can perform writeback even if that 389 * will block. 390 * 391 * If the page is swapcache, write it back even if that would 392 * block, for some throttling. This happens by accident, because 393 * swap_backing_dev_info is bust: it doesn't reflect the 394 * congestion state of the swapdevs. Easy to fix, if needed. 395 */ 396 if (!is_page_cache_freeable(page)) 397 return PAGE_KEEP; 398 if (!mapping) { 399 /* 400 * Some data journaling orphaned pages can have 401 * page->mapping == NULL while being dirty with clean buffers. 402 */ 403 if (page_has_private(page)) { 404 if (try_to_free_buffers(page)) { 405 ClearPageDirty(page); 406 printk("%s: orphaned page\n", __func__); 407 return PAGE_CLEAN; 408 } 409 } 410 return PAGE_KEEP; 411 } 412 if (mapping->a_ops->writepage == NULL) 413 return PAGE_ACTIVATE; 414 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 415 return PAGE_KEEP; 416 417 if (clear_page_dirty_for_io(page)) { 418 int res; 419 struct writeback_control wbc = { 420 .sync_mode = WB_SYNC_NONE, 421 .nr_to_write = SWAP_CLUSTER_MAX, 422 .range_start = 0, 423 .range_end = LLONG_MAX, 424 .for_reclaim = 1, 425 }; 426 427 SetPageReclaim(page); 428 res = mapping->a_ops->writepage(page, &wbc); 429 if (res < 0) 430 handle_write_error(mapping, page, res); 431 if (res == AOP_WRITEPAGE_ACTIVATE) { 432 ClearPageReclaim(page); 433 return PAGE_ACTIVATE; 434 } 435 436 if (!PageWriteback(page)) { 437 /* synchronous write or broken a_ops? */ 438 ClearPageReclaim(page); 439 } 440 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page)); 441 inc_zone_page_state(page, NR_VMSCAN_WRITE); 442 return PAGE_SUCCESS; 443 } 444 445 return PAGE_CLEAN; 446 } 447 448 /* 449 * Same as remove_mapping, but if the page is removed from the mapping, it 450 * gets returned with a refcount of 0. 451 */ 452 static int __remove_mapping(struct address_space *mapping, struct page *page) 453 { 454 BUG_ON(!PageLocked(page)); 455 BUG_ON(mapping != page_mapping(page)); 456 457 spin_lock_irq(&mapping->tree_lock); 458 /* 459 * The non racy check for a busy page. 460 * 461 * Must be careful with the order of the tests. When someone has 462 * a ref to the page, it may be possible that they dirty it then 463 * drop the reference. So if PageDirty is tested before page_count 464 * here, then the following race may occur: 465 * 466 * get_user_pages(&page); 467 * [user mapping goes away] 468 * write_to(page); 469 * !PageDirty(page) [good] 470 * SetPageDirty(page); 471 * put_page(page); 472 * !page_count(page) [good, discard it] 473 * 474 * [oops, our write_to data is lost] 475 * 476 * Reversing the order of the tests ensures such a situation cannot 477 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 478 * load is not satisfied before that of page->_count. 479 * 480 * Note that if SetPageDirty is always performed via set_page_dirty, 481 * and thus under tree_lock, then this ordering is not required. 482 */ 483 if (!page_freeze_refs(page, 2)) 484 goto cannot_free; 485 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 486 if (unlikely(PageDirty(page))) { 487 page_unfreeze_refs(page, 2); 488 goto cannot_free; 489 } 490 491 if (PageSwapCache(page)) { 492 swp_entry_t swap = { .val = page_private(page) }; 493 __delete_from_swap_cache(page); 494 spin_unlock_irq(&mapping->tree_lock); 495 swapcache_free(swap, page); 496 } else { 497 void (*freepage)(struct page *); 498 499 freepage = mapping->a_ops->freepage; 500 501 __delete_from_page_cache(page); 502 spin_unlock_irq(&mapping->tree_lock); 503 mem_cgroup_uncharge_cache_page(page); 504 505 if (freepage != NULL) 506 freepage(page); 507 } 508 509 return 1; 510 511 cannot_free: 512 spin_unlock_irq(&mapping->tree_lock); 513 return 0; 514 } 515 516 /* 517 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 518 * someone else has a ref on the page, abort and return 0. If it was 519 * successfully detached, return 1. Assumes the caller has a single ref on 520 * this page. 521 */ 522 int remove_mapping(struct address_space *mapping, struct page *page) 523 { 524 if (__remove_mapping(mapping, page)) { 525 /* 526 * Unfreezing the refcount with 1 rather than 2 effectively 527 * drops the pagecache ref for us without requiring another 528 * atomic operation. 529 */ 530 page_unfreeze_refs(page, 1); 531 return 1; 532 } 533 return 0; 534 } 535 536 /** 537 * putback_lru_page - put previously isolated page onto appropriate LRU list 538 * @page: page to be put back to appropriate lru list 539 * 540 * Add previously isolated @page to appropriate LRU list. 541 * Page may still be unevictable for other reasons. 542 * 543 * lru_lock must not be held, interrupts must be enabled. 544 */ 545 void putback_lru_page(struct page *page) 546 { 547 int lru; 548 int active = !!TestClearPageActive(page); 549 int was_unevictable = PageUnevictable(page); 550 551 VM_BUG_ON(PageLRU(page)); 552 553 redo: 554 ClearPageUnevictable(page); 555 556 if (page_evictable(page, NULL)) { 557 /* 558 * For evictable pages, we can use the cache. 559 * In event of a race, worst case is we end up with an 560 * unevictable page on [in]active list. 561 * We know how to handle that. 562 */ 563 lru = active + page_lru_base_type(page); 564 lru_cache_add_lru(page, lru); 565 } else { 566 /* 567 * Put unevictable pages directly on zone's unevictable 568 * list. 569 */ 570 lru = LRU_UNEVICTABLE; 571 add_page_to_unevictable_list(page); 572 /* 573 * When racing with an mlock or AS_UNEVICTABLE clearing 574 * (page is unlocked) make sure that if the other thread 575 * does not observe our setting of PG_lru and fails 576 * isolation/check_move_unevictable_pages, 577 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 578 * the page back to the evictable list. 579 * 580 * The other side is TestClearPageMlocked() or shmem_lock(). 581 */ 582 smp_mb(); 583 } 584 585 /* 586 * page's status can change while we move it among lru. If an evictable 587 * page is on unevictable list, it never be freed. To avoid that, 588 * check after we added it to the list, again. 589 */ 590 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 591 if (!isolate_lru_page(page)) { 592 put_page(page); 593 goto redo; 594 } 595 /* This means someone else dropped this page from LRU 596 * So, it will be freed or putback to LRU again. There is 597 * nothing to do here. 598 */ 599 } 600 601 if (was_unevictable && lru != LRU_UNEVICTABLE) 602 count_vm_event(UNEVICTABLE_PGRESCUED); 603 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 604 count_vm_event(UNEVICTABLE_PGCULLED); 605 606 put_page(page); /* drop ref from isolate */ 607 } 608 609 enum page_references { 610 PAGEREF_RECLAIM, 611 PAGEREF_RECLAIM_CLEAN, 612 PAGEREF_KEEP, 613 PAGEREF_ACTIVATE, 614 }; 615 616 static enum page_references page_check_references(struct page *page, 617 struct scan_control *sc) 618 { 619 int referenced_ptes, referenced_page; 620 unsigned long vm_flags; 621 622 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, 623 &vm_flags); 624 referenced_page = TestClearPageReferenced(page); 625 626 /* 627 * Mlock lost the isolation race with us. Let try_to_unmap() 628 * move the page to the unevictable list. 629 */ 630 if (vm_flags & VM_LOCKED) 631 return PAGEREF_RECLAIM; 632 633 if (referenced_ptes) { 634 if (PageSwapBacked(page)) 635 return PAGEREF_ACTIVATE; 636 /* 637 * All mapped pages start out with page table 638 * references from the instantiating fault, so we need 639 * to look twice if a mapped file page is used more 640 * than once. 641 * 642 * Mark it and spare it for another trip around the 643 * inactive list. Another page table reference will 644 * lead to its activation. 645 * 646 * Note: the mark is set for activated pages as well 647 * so that recently deactivated but used pages are 648 * quickly recovered. 649 */ 650 SetPageReferenced(page); 651 652 if (referenced_page || referenced_ptes > 1) 653 return PAGEREF_ACTIVATE; 654 655 /* 656 * Activate file-backed executable pages after first usage. 657 */ 658 if (vm_flags & VM_EXEC) 659 return PAGEREF_ACTIVATE; 660 661 return PAGEREF_KEEP; 662 } 663 664 /* Reclaim if clean, defer dirty pages to writeback */ 665 if (referenced_page && !PageSwapBacked(page)) 666 return PAGEREF_RECLAIM_CLEAN; 667 668 return PAGEREF_RECLAIM; 669 } 670 671 /* 672 * shrink_page_list() returns the number of reclaimed pages 673 */ 674 static unsigned long shrink_page_list(struct list_head *page_list, 675 struct zone *zone, 676 struct scan_control *sc, 677 unsigned long *ret_nr_dirty, 678 unsigned long *ret_nr_writeback) 679 { 680 LIST_HEAD(ret_pages); 681 LIST_HEAD(free_pages); 682 int pgactivate = 0; 683 unsigned long nr_dirty = 0; 684 unsigned long nr_congested = 0; 685 unsigned long nr_reclaimed = 0; 686 unsigned long nr_writeback = 0; 687 688 cond_resched(); 689 690 while (!list_empty(page_list)) { 691 enum page_references references; 692 struct address_space *mapping; 693 struct page *page; 694 int may_enter_fs; 695 696 cond_resched(); 697 698 page = lru_to_page(page_list); 699 list_del(&page->lru); 700 701 if (!trylock_page(page)) 702 goto keep; 703 704 VM_BUG_ON(PageActive(page)); 705 VM_BUG_ON(page_zone(page) != zone); 706 707 sc->nr_scanned++; 708 709 if (unlikely(!page_evictable(page, NULL))) 710 goto cull_mlocked; 711 712 if (!sc->may_unmap && page_mapped(page)) 713 goto keep_locked; 714 715 /* Double the slab pressure for mapped and swapcache pages */ 716 if (page_mapped(page) || PageSwapCache(page)) 717 sc->nr_scanned++; 718 719 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 720 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 721 722 if (PageWriteback(page)) { 723 nr_writeback++; 724 unlock_page(page); 725 goto keep; 726 } 727 728 references = page_check_references(page, sc); 729 switch (references) { 730 case PAGEREF_ACTIVATE: 731 goto activate_locked; 732 case PAGEREF_KEEP: 733 goto keep_locked; 734 case PAGEREF_RECLAIM: 735 case PAGEREF_RECLAIM_CLEAN: 736 ; /* try to reclaim the page below */ 737 } 738 739 /* 740 * Anonymous process memory has backing store? 741 * Try to allocate it some swap space here. 742 */ 743 if (PageAnon(page) && !PageSwapCache(page)) { 744 if (!(sc->gfp_mask & __GFP_IO)) 745 goto keep_locked; 746 if (!add_to_swap(page)) 747 goto activate_locked; 748 may_enter_fs = 1; 749 } 750 751 mapping = page_mapping(page); 752 753 /* 754 * The page is mapped into the page tables of one or more 755 * processes. Try to unmap it here. 756 */ 757 if (page_mapped(page) && mapping) { 758 switch (try_to_unmap(page, TTU_UNMAP)) { 759 case SWAP_FAIL: 760 goto activate_locked; 761 case SWAP_AGAIN: 762 goto keep_locked; 763 case SWAP_MLOCK: 764 goto cull_mlocked; 765 case SWAP_SUCCESS: 766 ; /* try to free the page below */ 767 } 768 } 769 770 if (PageDirty(page)) { 771 nr_dirty++; 772 773 /* 774 * Only kswapd can writeback filesystem pages to 775 * avoid risk of stack overflow but do not writeback 776 * unless under significant pressure. 777 */ 778 if (page_is_file_cache(page) && 779 (!current_is_kswapd() || 780 sc->priority >= DEF_PRIORITY - 2)) { 781 /* 782 * Immediately reclaim when written back. 783 * Similar in principal to deactivate_page() 784 * except we already have the page isolated 785 * and know it's dirty 786 */ 787 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); 788 SetPageReclaim(page); 789 790 goto keep_locked; 791 } 792 793 if (references == PAGEREF_RECLAIM_CLEAN) 794 goto keep_locked; 795 if (!may_enter_fs) 796 goto keep_locked; 797 if (!sc->may_writepage) 798 goto keep_locked; 799 800 /* Page is dirty, try to write it out here */ 801 switch (pageout(page, mapping, sc)) { 802 case PAGE_KEEP: 803 nr_congested++; 804 goto keep_locked; 805 case PAGE_ACTIVATE: 806 goto activate_locked; 807 case PAGE_SUCCESS: 808 if (PageWriteback(page)) 809 goto keep; 810 if (PageDirty(page)) 811 goto keep; 812 813 /* 814 * A synchronous write - probably a ramdisk. Go 815 * ahead and try to reclaim the page. 816 */ 817 if (!trylock_page(page)) 818 goto keep; 819 if (PageDirty(page) || PageWriteback(page)) 820 goto keep_locked; 821 mapping = page_mapping(page); 822 case PAGE_CLEAN: 823 ; /* try to free the page below */ 824 } 825 } 826 827 /* 828 * If the page has buffers, try to free the buffer mappings 829 * associated with this page. If we succeed we try to free 830 * the page as well. 831 * 832 * We do this even if the page is PageDirty(). 833 * try_to_release_page() does not perform I/O, but it is 834 * possible for a page to have PageDirty set, but it is actually 835 * clean (all its buffers are clean). This happens if the 836 * buffers were written out directly, with submit_bh(). ext3 837 * will do this, as well as the blockdev mapping. 838 * try_to_release_page() will discover that cleanness and will 839 * drop the buffers and mark the page clean - it can be freed. 840 * 841 * Rarely, pages can have buffers and no ->mapping. These are 842 * the pages which were not successfully invalidated in 843 * truncate_complete_page(). We try to drop those buffers here 844 * and if that worked, and the page is no longer mapped into 845 * process address space (page_count == 1) it can be freed. 846 * Otherwise, leave the page on the LRU so it is swappable. 847 */ 848 if (page_has_private(page)) { 849 if (!try_to_release_page(page, sc->gfp_mask)) 850 goto activate_locked; 851 if (!mapping && page_count(page) == 1) { 852 unlock_page(page); 853 if (put_page_testzero(page)) 854 goto free_it; 855 else { 856 /* 857 * rare race with speculative reference. 858 * the speculative reference will free 859 * this page shortly, so we may 860 * increment nr_reclaimed here (and 861 * leave it off the LRU). 862 */ 863 nr_reclaimed++; 864 continue; 865 } 866 } 867 } 868 869 if (!mapping || !__remove_mapping(mapping, page)) 870 goto keep_locked; 871 872 /* 873 * At this point, we have no other references and there is 874 * no way to pick any more up (removed from LRU, removed 875 * from pagecache). Can use non-atomic bitops now (and 876 * we obviously don't have to worry about waking up a process 877 * waiting on the page lock, because there are no references. 878 */ 879 __clear_page_locked(page); 880 free_it: 881 nr_reclaimed++; 882 883 /* 884 * Is there need to periodically free_page_list? It would 885 * appear not as the counts should be low 886 */ 887 list_add(&page->lru, &free_pages); 888 continue; 889 890 cull_mlocked: 891 if (PageSwapCache(page)) 892 try_to_free_swap(page); 893 unlock_page(page); 894 putback_lru_page(page); 895 continue; 896 897 activate_locked: 898 /* Not a candidate for swapping, so reclaim swap space. */ 899 if (PageSwapCache(page) && vm_swap_full()) 900 try_to_free_swap(page); 901 VM_BUG_ON(PageActive(page)); 902 SetPageActive(page); 903 pgactivate++; 904 keep_locked: 905 unlock_page(page); 906 keep: 907 list_add(&page->lru, &ret_pages); 908 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 909 } 910 911 /* 912 * Tag a zone as congested if all the dirty pages encountered were 913 * backed by a congested BDI. In this case, reclaimers should just 914 * back off and wait for congestion to clear because further reclaim 915 * will encounter the same problem 916 */ 917 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) 918 zone_set_flag(zone, ZONE_CONGESTED); 919 920 free_hot_cold_page_list(&free_pages, 1); 921 922 list_splice(&ret_pages, page_list); 923 count_vm_events(PGACTIVATE, pgactivate); 924 *ret_nr_dirty += nr_dirty; 925 *ret_nr_writeback += nr_writeback; 926 return nr_reclaimed; 927 } 928 929 /* 930 * Attempt to remove the specified page from its LRU. Only take this page 931 * if it is of the appropriate PageActive status. Pages which are being 932 * freed elsewhere are also ignored. 933 * 934 * page: page to consider 935 * mode: one of the LRU isolation modes defined above 936 * 937 * returns 0 on success, -ve errno on failure. 938 */ 939 int __isolate_lru_page(struct page *page, isolate_mode_t mode) 940 { 941 int ret = -EINVAL; 942 943 /* Only take pages on the LRU. */ 944 if (!PageLRU(page)) 945 return ret; 946 947 /* Do not give back unevictable pages for compaction */ 948 if (PageUnevictable(page)) 949 return ret; 950 951 ret = -EBUSY; 952 953 /* 954 * To minimise LRU disruption, the caller can indicate that it only 955 * wants to isolate pages it will be able to operate on without 956 * blocking - clean pages for the most part. 957 * 958 * ISOLATE_CLEAN means that only clean pages should be isolated. This 959 * is used by reclaim when it is cannot write to backing storage 960 * 961 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages 962 * that it is possible to migrate without blocking 963 */ 964 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { 965 /* All the caller can do on PageWriteback is block */ 966 if (PageWriteback(page)) 967 return ret; 968 969 if (PageDirty(page)) { 970 struct address_space *mapping; 971 972 /* ISOLATE_CLEAN means only clean pages */ 973 if (mode & ISOLATE_CLEAN) 974 return ret; 975 976 /* 977 * Only pages without mappings or that have a 978 * ->migratepage callback are possible to migrate 979 * without blocking 980 */ 981 mapping = page_mapping(page); 982 if (mapping && !mapping->a_ops->migratepage) 983 return ret; 984 } 985 } 986 987 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 988 return ret; 989 990 if (likely(get_page_unless_zero(page))) { 991 /* 992 * Be careful not to clear PageLRU until after we're 993 * sure the page is not being freed elsewhere -- the 994 * page release code relies on it. 995 */ 996 ClearPageLRU(page); 997 ret = 0; 998 } 999 1000 return ret; 1001 } 1002 1003 /* 1004 * zone->lru_lock is heavily contended. Some of the functions that 1005 * shrink the lists perform better by taking out a batch of pages 1006 * and working on them outside the LRU lock. 1007 * 1008 * For pagecache intensive workloads, this function is the hottest 1009 * spot in the kernel (apart from copy_*_user functions). 1010 * 1011 * Appropriate locks must be held before calling this function. 1012 * 1013 * @nr_to_scan: The number of pages to look through on the list. 1014 * @lruvec: The LRU vector to pull pages from. 1015 * @dst: The temp list to put pages on to. 1016 * @nr_scanned: The number of pages that were scanned. 1017 * @sc: The scan_control struct for this reclaim session 1018 * @mode: One of the LRU isolation modes 1019 * @lru: LRU list id for isolating 1020 * 1021 * returns how many pages were moved onto *@dst. 1022 */ 1023 static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1024 struct lruvec *lruvec, struct list_head *dst, 1025 unsigned long *nr_scanned, struct scan_control *sc, 1026 isolate_mode_t mode, enum lru_list lru) 1027 { 1028 struct list_head *src = &lruvec->lists[lru]; 1029 unsigned long nr_taken = 0; 1030 unsigned long scan; 1031 1032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1033 struct page *page; 1034 int nr_pages; 1035 1036 page = lru_to_page(src); 1037 prefetchw_prev_lru_page(page, src, flags); 1038 1039 VM_BUG_ON(!PageLRU(page)); 1040 1041 switch (__isolate_lru_page(page, mode)) { 1042 case 0: 1043 nr_pages = hpage_nr_pages(page); 1044 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); 1045 list_move(&page->lru, dst); 1046 nr_taken += nr_pages; 1047 break; 1048 1049 case -EBUSY: 1050 /* else it is being freed elsewhere */ 1051 list_move(&page->lru, src); 1052 continue; 1053 1054 default: 1055 BUG(); 1056 } 1057 } 1058 1059 *nr_scanned = scan; 1060 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, 1061 nr_taken, mode, is_file_lru(lru)); 1062 return nr_taken; 1063 } 1064 1065 /** 1066 * isolate_lru_page - tries to isolate a page from its LRU list 1067 * @page: page to isolate from its LRU list 1068 * 1069 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1070 * vmstat statistic corresponding to whatever LRU list the page was on. 1071 * 1072 * Returns 0 if the page was removed from an LRU list. 1073 * Returns -EBUSY if the page was not on an LRU list. 1074 * 1075 * The returned page will have PageLRU() cleared. If it was found on 1076 * the active list, it will have PageActive set. If it was found on 1077 * the unevictable list, it will have the PageUnevictable bit set. That flag 1078 * may need to be cleared by the caller before letting the page go. 1079 * 1080 * The vmstat statistic corresponding to the list on which the page was 1081 * found will be decremented. 1082 * 1083 * Restrictions: 1084 * (1) Must be called with an elevated refcount on the page. This is a 1085 * fundamentnal difference from isolate_lru_pages (which is called 1086 * without a stable reference). 1087 * (2) the lru_lock must not be held. 1088 * (3) interrupts must be enabled. 1089 */ 1090 int isolate_lru_page(struct page *page) 1091 { 1092 int ret = -EBUSY; 1093 1094 VM_BUG_ON(!page_count(page)); 1095 1096 if (PageLRU(page)) { 1097 struct zone *zone = page_zone(page); 1098 struct lruvec *lruvec; 1099 1100 spin_lock_irq(&zone->lru_lock); 1101 lruvec = mem_cgroup_page_lruvec(page, zone); 1102 if (PageLRU(page)) { 1103 int lru = page_lru(page); 1104 get_page(page); 1105 ClearPageLRU(page); 1106 del_page_from_lru_list(page, lruvec, lru); 1107 ret = 0; 1108 } 1109 spin_unlock_irq(&zone->lru_lock); 1110 } 1111 return ret; 1112 } 1113 1114 /* 1115 * Are there way too many processes in the direct reclaim path already? 1116 */ 1117 static int too_many_isolated(struct zone *zone, int file, 1118 struct scan_control *sc) 1119 { 1120 unsigned long inactive, isolated; 1121 1122 if (current_is_kswapd()) 1123 return 0; 1124 1125 if (!global_reclaim(sc)) 1126 return 0; 1127 1128 if (file) { 1129 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1130 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1131 } else { 1132 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1133 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1134 } 1135 1136 return isolated > inactive; 1137 } 1138 1139 static noinline_for_stack void 1140 putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) 1141 { 1142 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1143 struct zone *zone = lruvec_zone(lruvec); 1144 LIST_HEAD(pages_to_free); 1145 1146 /* 1147 * Put back any unfreeable pages. 1148 */ 1149 while (!list_empty(page_list)) { 1150 struct page *page = lru_to_page(page_list); 1151 int lru; 1152 1153 VM_BUG_ON(PageLRU(page)); 1154 list_del(&page->lru); 1155 if (unlikely(!page_evictable(page, NULL))) { 1156 spin_unlock_irq(&zone->lru_lock); 1157 putback_lru_page(page); 1158 spin_lock_irq(&zone->lru_lock); 1159 continue; 1160 } 1161 1162 lruvec = mem_cgroup_page_lruvec(page, zone); 1163 1164 SetPageLRU(page); 1165 lru = page_lru(page); 1166 add_page_to_lru_list(page, lruvec, lru); 1167 1168 if (is_active_lru(lru)) { 1169 int file = is_file_lru(lru); 1170 int numpages = hpage_nr_pages(page); 1171 reclaim_stat->recent_rotated[file] += numpages; 1172 } 1173 if (put_page_testzero(page)) { 1174 __ClearPageLRU(page); 1175 __ClearPageActive(page); 1176 del_page_from_lru_list(page, lruvec, lru); 1177 1178 if (unlikely(PageCompound(page))) { 1179 spin_unlock_irq(&zone->lru_lock); 1180 (*get_compound_page_dtor(page))(page); 1181 spin_lock_irq(&zone->lru_lock); 1182 } else 1183 list_add(&page->lru, &pages_to_free); 1184 } 1185 } 1186 1187 /* 1188 * To save our caller's stack, now use input list for pages to free. 1189 */ 1190 list_splice(&pages_to_free, page_list); 1191 } 1192 1193 /* 1194 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1195 * of reclaimed pages 1196 */ 1197 static noinline_for_stack unsigned long 1198 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, 1199 struct scan_control *sc, enum lru_list lru) 1200 { 1201 LIST_HEAD(page_list); 1202 unsigned long nr_scanned; 1203 unsigned long nr_reclaimed = 0; 1204 unsigned long nr_taken; 1205 unsigned long nr_dirty = 0; 1206 unsigned long nr_writeback = 0; 1207 isolate_mode_t isolate_mode = 0; 1208 int file = is_file_lru(lru); 1209 struct zone *zone = lruvec_zone(lruvec); 1210 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1211 1212 while (unlikely(too_many_isolated(zone, file, sc))) { 1213 congestion_wait(BLK_RW_ASYNC, HZ/10); 1214 1215 /* We are about to die and free our memory. Return now. */ 1216 if (fatal_signal_pending(current)) 1217 return SWAP_CLUSTER_MAX; 1218 } 1219 1220 lru_add_drain(); 1221 1222 if (!sc->may_unmap) 1223 isolate_mode |= ISOLATE_UNMAPPED; 1224 if (!sc->may_writepage) 1225 isolate_mode |= ISOLATE_CLEAN; 1226 1227 spin_lock_irq(&zone->lru_lock); 1228 1229 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 1230 &nr_scanned, sc, isolate_mode, lru); 1231 1232 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); 1233 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1234 1235 if (global_reclaim(sc)) { 1236 zone->pages_scanned += nr_scanned; 1237 if (current_is_kswapd()) 1238 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); 1239 else 1240 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); 1241 } 1242 spin_unlock_irq(&zone->lru_lock); 1243 1244 if (nr_taken == 0) 1245 return 0; 1246 1247 nr_reclaimed = shrink_page_list(&page_list, zone, sc, 1248 &nr_dirty, &nr_writeback); 1249 1250 spin_lock_irq(&zone->lru_lock); 1251 1252 reclaim_stat->recent_scanned[file] += nr_taken; 1253 1254 if (global_reclaim(sc)) { 1255 if (current_is_kswapd()) 1256 __count_zone_vm_events(PGSTEAL_KSWAPD, zone, 1257 nr_reclaimed); 1258 else 1259 __count_zone_vm_events(PGSTEAL_DIRECT, zone, 1260 nr_reclaimed); 1261 } 1262 1263 putback_inactive_pages(lruvec, &page_list); 1264 1265 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1266 1267 spin_unlock_irq(&zone->lru_lock); 1268 1269 free_hot_cold_page_list(&page_list, 1); 1270 1271 /* 1272 * If reclaim is isolating dirty pages under writeback, it implies 1273 * that the long-lived page allocation rate is exceeding the page 1274 * laundering rate. Either the global limits are not being effective 1275 * at throttling processes due to the page distribution throughout 1276 * zones or there is heavy usage of a slow backing device. The 1277 * only option is to throttle from reclaim context which is not ideal 1278 * as there is no guarantee the dirtying process is throttled in the 1279 * same way balance_dirty_pages() manages. 1280 * 1281 * This scales the number of dirty pages that must be under writeback 1282 * before throttling depending on priority. It is a simple backoff 1283 * function that has the most effect in the range DEF_PRIORITY to 1284 * DEF_PRIORITY-2 which is the priority reclaim is considered to be 1285 * in trouble and reclaim is considered to be in trouble. 1286 * 1287 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle 1288 * DEF_PRIORITY-1 50% must be PageWriteback 1289 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble 1290 * ... 1291 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1292 * isolated page is PageWriteback 1293 */ 1294 if (nr_writeback && nr_writeback >= 1295 (nr_taken >> (DEF_PRIORITY - sc->priority))) 1296 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1297 1298 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1299 zone_idx(zone), 1300 nr_scanned, nr_reclaimed, 1301 sc->priority, 1302 trace_shrink_flags(file)); 1303 return nr_reclaimed; 1304 } 1305 1306 /* 1307 * This moves pages from the active list to the inactive list. 1308 * 1309 * We move them the other way if the page is referenced by one or more 1310 * processes, from rmap. 1311 * 1312 * If the pages are mostly unmapped, the processing is fast and it is 1313 * appropriate to hold zone->lru_lock across the whole operation. But if 1314 * the pages are mapped, the processing is slow (page_referenced()) so we 1315 * should drop zone->lru_lock around each page. It's impossible to balance 1316 * this, so instead we remove the pages from the LRU while processing them. 1317 * It is safe to rely on PG_active against the non-LRU pages in here because 1318 * nobody will play with that bit on a non-LRU page. 1319 * 1320 * The downside is that we have to touch page->_count against each page. 1321 * But we had to alter page->flags anyway. 1322 */ 1323 1324 static void move_active_pages_to_lru(struct lruvec *lruvec, 1325 struct list_head *list, 1326 struct list_head *pages_to_free, 1327 enum lru_list lru) 1328 { 1329 struct zone *zone = lruvec_zone(lruvec); 1330 unsigned long pgmoved = 0; 1331 struct page *page; 1332 int nr_pages; 1333 1334 while (!list_empty(list)) { 1335 page = lru_to_page(list); 1336 lruvec = mem_cgroup_page_lruvec(page, zone); 1337 1338 VM_BUG_ON(PageLRU(page)); 1339 SetPageLRU(page); 1340 1341 nr_pages = hpage_nr_pages(page); 1342 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); 1343 list_move(&page->lru, &lruvec->lists[lru]); 1344 pgmoved += nr_pages; 1345 1346 if (put_page_testzero(page)) { 1347 __ClearPageLRU(page); 1348 __ClearPageActive(page); 1349 del_page_from_lru_list(page, lruvec, lru); 1350 1351 if (unlikely(PageCompound(page))) { 1352 spin_unlock_irq(&zone->lru_lock); 1353 (*get_compound_page_dtor(page))(page); 1354 spin_lock_irq(&zone->lru_lock); 1355 } else 1356 list_add(&page->lru, pages_to_free); 1357 } 1358 } 1359 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1360 if (!is_active_lru(lru)) 1361 __count_vm_events(PGDEACTIVATE, pgmoved); 1362 } 1363 1364 static void shrink_active_list(unsigned long nr_to_scan, 1365 struct lruvec *lruvec, 1366 struct scan_control *sc, 1367 enum lru_list lru) 1368 { 1369 unsigned long nr_taken; 1370 unsigned long nr_scanned; 1371 unsigned long vm_flags; 1372 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1373 LIST_HEAD(l_active); 1374 LIST_HEAD(l_inactive); 1375 struct page *page; 1376 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1377 unsigned long nr_rotated = 0; 1378 isolate_mode_t isolate_mode = 0; 1379 int file = is_file_lru(lru); 1380 struct zone *zone = lruvec_zone(lruvec); 1381 1382 lru_add_drain(); 1383 1384 if (!sc->may_unmap) 1385 isolate_mode |= ISOLATE_UNMAPPED; 1386 if (!sc->may_writepage) 1387 isolate_mode |= ISOLATE_CLEAN; 1388 1389 spin_lock_irq(&zone->lru_lock); 1390 1391 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 1392 &nr_scanned, sc, isolate_mode, lru); 1393 if (global_reclaim(sc)) 1394 zone->pages_scanned += nr_scanned; 1395 1396 reclaim_stat->recent_scanned[file] += nr_taken; 1397 1398 __count_zone_vm_events(PGREFILL, zone, nr_scanned); 1399 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); 1400 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1401 spin_unlock_irq(&zone->lru_lock); 1402 1403 while (!list_empty(&l_hold)) { 1404 cond_resched(); 1405 page = lru_to_page(&l_hold); 1406 list_del(&page->lru); 1407 1408 if (unlikely(!page_evictable(page, NULL))) { 1409 putback_lru_page(page); 1410 continue; 1411 } 1412 1413 if (unlikely(buffer_heads_over_limit)) { 1414 if (page_has_private(page) && trylock_page(page)) { 1415 if (page_has_private(page)) 1416 try_to_release_page(page, 0); 1417 unlock_page(page); 1418 } 1419 } 1420 1421 if (page_referenced(page, 0, sc->target_mem_cgroup, 1422 &vm_flags)) { 1423 nr_rotated += hpage_nr_pages(page); 1424 /* 1425 * Identify referenced, file-backed active pages and 1426 * give them one more trip around the active list. So 1427 * that executable code get better chances to stay in 1428 * memory under moderate memory pressure. Anon pages 1429 * are not likely to be evicted by use-once streaming 1430 * IO, plus JVM can create lots of anon VM_EXEC pages, 1431 * so we ignore them here. 1432 */ 1433 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1434 list_add(&page->lru, &l_active); 1435 continue; 1436 } 1437 } 1438 1439 ClearPageActive(page); /* we are de-activating */ 1440 list_add(&page->lru, &l_inactive); 1441 } 1442 1443 /* 1444 * Move pages back to the lru list. 1445 */ 1446 spin_lock_irq(&zone->lru_lock); 1447 /* 1448 * Count referenced pages from currently used mappings as rotated, 1449 * even though only some of them are actually re-activated. This 1450 * helps balance scan pressure between file and anonymous pages in 1451 * get_scan_ratio. 1452 */ 1453 reclaim_stat->recent_rotated[file] += nr_rotated; 1454 1455 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); 1456 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); 1457 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1458 spin_unlock_irq(&zone->lru_lock); 1459 1460 free_hot_cold_page_list(&l_hold, 1); 1461 } 1462 1463 #ifdef CONFIG_SWAP 1464 static int inactive_anon_is_low_global(struct zone *zone) 1465 { 1466 unsigned long active, inactive; 1467 1468 active = zone_page_state(zone, NR_ACTIVE_ANON); 1469 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1470 1471 if (inactive * zone->inactive_ratio < active) 1472 return 1; 1473 1474 return 0; 1475 } 1476 1477 /** 1478 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1479 * @lruvec: LRU vector to check 1480 * 1481 * Returns true if the zone does not have enough inactive anon pages, 1482 * meaning some active anon pages need to be deactivated. 1483 */ 1484 static int inactive_anon_is_low(struct lruvec *lruvec) 1485 { 1486 /* 1487 * If we don't have swap space, anonymous page deactivation 1488 * is pointless. 1489 */ 1490 if (!total_swap_pages) 1491 return 0; 1492 1493 if (!mem_cgroup_disabled()) 1494 return mem_cgroup_inactive_anon_is_low(lruvec); 1495 1496 return inactive_anon_is_low_global(lruvec_zone(lruvec)); 1497 } 1498 #else 1499 static inline int inactive_anon_is_low(struct lruvec *lruvec) 1500 { 1501 return 0; 1502 } 1503 #endif 1504 1505 static int inactive_file_is_low_global(struct zone *zone) 1506 { 1507 unsigned long active, inactive; 1508 1509 active = zone_page_state(zone, NR_ACTIVE_FILE); 1510 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1511 1512 return (active > inactive); 1513 } 1514 1515 /** 1516 * inactive_file_is_low - check if file pages need to be deactivated 1517 * @lruvec: LRU vector to check 1518 * 1519 * When the system is doing streaming IO, memory pressure here 1520 * ensures that active file pages get deactivated, until more 1521 * than half of the file pages are on the inactive list. 1522 * 1523 * Once we get to that situation, protect the system's working 1524 * set from being evicted by disabling active file page aging. 1525 * 1526 * This uses a different ratio than the anonymous pages, because 1527 * the page cache uses a use-once replacement algorithm. 1528 */ 1529 static int inactive_file_is_low(struct lruvec *lruvec) 1530 { 1531 if (!mem_cgroup_disabled()) 1532 return mem_cgroup_inactive_file_is_low(lruvec); 1533 1534 return inactive_file_is_low_global(lruvec_zone(lruvec)); 1535 } 1536 1537 static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru) 1538 { 1539 if (is_file_lru(lru)) 1540 return inactive_file_is_low(lruvec); 1541 else 1542 return inactive_anon_is_low(lruvec); 1543 } 1544 1545 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1546 struct lruvec *lruvec, struct scan_control *sc) 1547 { 1548 if (is_active_lru(lru)) { 1549 if (inactive_list_is_low(lruvec, lru)) 1550 shrink_active_list(nr_to_scan, lruvec, sc, lru); 1551 return 0; 1552 } 1553 1554 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 1555 } 1556 1557 static int vmscan_swappiness(struct scan_control *sc) 1558 { 1559 if (global_reclaim(sc)) 1560 return vm_swappiness; 1561 return mem_cgroup_swappiness(sc->target_mem_cgroup); 1562 } 1563 1564 /* 1565 * Determine how aggressively the anon and file LRU lists should be 1566 * scanned. The relative value of each set of LRU lists is determined 1567 * by looking at the fraction of the pages scanned we did rotate back 1568 * onto the active list instead of evict. 1569 * 1570 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan 1571 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan 1572 */ 1573 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, 1574 unsigned long *nr) 1575 { 1576 unsigned long anon, file, free; 1577 unsigned long anon_prio, file_prio; 1578 unsigned long ap, fp; 1579 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1580 u64 fraction[2], denominator; 1581 enum lru_list lru; 1582 int noswap = 0; 1583 bool force_scan = false; 1584 struct zone *zone = lruvec_zone(lruvec); 1585 1586 /* 1587 * If the zone or memcg is small, nr[l] can be 0. This 1588 * results in no scanning on this priority and a potential 1589 * priority drop. Global direct reclaim can go to the next 1590 * zone and tends to have no problems. Global kswapd is for 1591 * zone balancing and it needs to scan a minimum amount. When 1592 * reclaiming for a memcg, a priority drop can cause high 1593 * latencies, so it's better to scan a minimum amount there as 1594 * well. 1595 */ 1596 if (current_is_kswapd() && zone->all_unreclaimable) 1597 force_scan = true; 1598 if (!global_reclaim(sc)) 1599 force_scan = true; 1600 1601 /* If we have no swap space, do not bother scanning anon pages. */ 1602 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1603 noswap = 1; 1604 fraction[0] = 0; 1605 fraction[1] = 1; 1606 denominator = 1; 1607 goto out; 1608 } 1609 1610 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + 1611 get_lru_size(lruvec, LRU_INACTIVE_ANON); 1612 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + 1613 get_lru_size(lruvec, LRU_INACTIVE_FILE); 1614 1615 if (global_reclaim(sc)) { 1616 free = zone_page_state(zone, NR_FREE_PAGES); 1617 /* If we have very few page cache pages, 1618 force-scan anon pages. */ 1619 if (unlikely(file + free <= high_wmark_pages(zone))) { 1620 fraction[0] = 1; 1621 fraction[1] = 0; 1622 denominator = 1; 1623 goto out; 1624 } 1625 } 1626 1627 /* 1628 * With swappiness at 100, anonymous and file have the same priority. 1629 * This scanning priority is essentially the inverse of IO cost. 1630 */ 1631 anon_prio = vmscan_swappiness(sc); 1632 file_prio = 200 - anon_prio; 1633 1634 /* 1635 * OK, so we have swap space and a fair amount of page cache 1636 * pages. We use the recently rotated / recently scanned 1637 * ratios to determine how valuable each cache is. 1638 * 1639 * Because workloads change over time (and to avoid overflow) 1640 * we keep these statistics as a floating average, which ends 1641 * up weighing recent references more than old ones. 1642 * 1643 * anon in [0], file in [1] 1644 */ 1645 spin_lock_irq(&zone->lru_lock); 1646 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1647 reclaim_stat->recent_scanned[0] /= 2; 1648 reclaim_stat->recent_rotated[0] /= 2; 1649 } 1650 1651 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1652 reclaim_stat->recent_scanned[1] /= 2; 1653 reclaim_stat->recent_rotated[1] /= 2; 1654 } 1655 1656 /* 1657 * The amount of pressure on anon vs file pages is inversely 1658 * proportional to the fraction of recently scanned pages on 1659 * each list that were recently referenced and in active use. 1660 */ 1661 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); 1662 ap /= reclaim_stat->recent_rotated[0] + 1; 1663 1664 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); 1665 fp /= reclaim_stat->recent_rotated[1] + 1; 1666 spin_unlock_irq(&zone->lru_lock); 1667 1668 fraction[0] = ap; 1669 fraction[1] = fp; 1670 denominator = ap + fp + 1; 1671 out: 1672 for_each_evictable_lru(lru) { 1673 int file = is_file_lru(lru); 1674 unsigned long scan; 1675 1676 scan = get_lru_size(lruvec, lru); 1677 if (sc->priority || noswap || !vmscan_swappiness(sc)) { 1678 scan >>= sc->priority; 1679 if (!scan && force_scan) 1680 scan = SWAP_CLUSTER_MAX; 1681 scan = div64_u64(scan * fraction[file], denominator); 1682 } 1683 nr[lru] = scan; 1684 } 1685 } 1686 1687 /* Use reclaim/compaction for costly allocs or under memory pressure */ 1688 static bool in_reclaim_compaction(struct scan_control *sc) 1689 { 1690 if (COMPACTION_BUILD && sc->order && 1691 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 1692 sc->priority < DEF_PRIORITY - 2)) 1693 return true; 1694 1695 return false; 1696 } 1697 1698 /* 1699 * Reclaim/compaction is used for high-order allocation requests. It reclaims 1700 * order-0 pages before compacting the zone. should_continue_reclaim() returns 1701 * true if more pages should be reclaimed such that when the page allocator 1702 * calls try_to_compact_zone() that it will have enough free pages to succeed. 1703 * It will give up earlier than that if there is difficulty reclaiming pages. 1704 */ 1705 static inline bool should_continue_reclaim(struct lruvec *lruvec, 1706 unsigned long nr_reclaimed, 1707 unsigned long nr_scanned, 1708 struct scan_control *sc) 1709 { 1710 unsigned long pages_for_compaction; 1711 unsigned long inactive_lru_pages; 1712 1713 /* If not in reclaim/compaction mode, stop */ 1714 if (!in_reclaim_compaction(sc)) 1715 return false; 1716 1717 /* Consider stopping depending on scan and reclaim activity */ 1718 if (sc->gfp_mask & __GFP_REPEAT) { 1719 /* 1720 * For __GFP_REPEAT allocations, stop reclaiming if the 1721 * full LRU list has been scanned and we are still failing 1722 * to reclaim pages. This full LRU scan is potentially 1723 * expensive but a __GFP_REPEAT caller really wants to succeed 1724 */ 1725 if (!nr_reclaimed && !nr_scanned) 1726 return false; 1727 } else { 1728 /* 1729 * For non-__GFP_REPEAT allocations which can presumably 1730 * fail without consequence, stop if we failed to reclaim 1731 * any pages from the last SWAP_CLUSTER_MAX number of 1732 * pages that were scanned. This will return to the 1733 * caller faster at the risk reclaim/compaction and 1734 * the resulting allocation attempt fails 1735 */ 1736 if (!nr_reclaimed) 1737 return false; 1738 } 1739 1740 /* 1741 * If we have not reclaimed enough pages for compaction and the 1742 * inactive lists are large enough, continue reclaiming 1743 */ 1744 pages_for_compaction = (2UL << sc->order); 1745 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); 1746 if (nr_swap_pages > 0) 1747 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); 1748 if (sc->nr_reclaimed < pages_for_compaction && 1749 inactive_lru_pages > pages_for_compaction) 1750 return true; 1751 1752 /* If compaction would go ahead or the allocation would succeed, stop */ 1753 switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) { 1754 case COMPACT_PARTIAL: 1755 case COMPACT_CONTINUE: 1756 return false; 1757 default: 1758 return true; 1759 } 1760 } 1761 1762 /* 1763 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1764 */ 1765 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 1766 { 1767 unsigned long nr[NR_LRU_LISTS]; 1768 unsigned long nr_to_scan; 1769 enum lru_list lru; 1770 unsigned long nr_reclaimed, nr_scanned; 1771 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1772 struct blk_plug plug; 1773 1774 restart: 1775 nr_reclaimed = 0; 1776 nr_scanned = sc->nr_scanned; 1777 get_scan_count(lruvec, sc, nr); 1778 1779 blk_start_plug(&plug); 1780 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1781 nr[LRU_INACTIVE_FILE]) { 1782 for_each_evictable_lru(lru) { 1783 if (nr[lru]) { 1784 nr_to_scan = min_t(unsigned long, 1785 nr[lru], SWAP_CLUSTER_MAX); 1786 nr[lru] -= nr_to_scan; 1787 1788 nr_reclaimed += shrink_list(lru, nr_to_scan, 1789 lruvec, sc); 1790 } 1791 } 1792 /* 1793 * On large memory systems, scan >> priority can become 1794 * really large. This is fine for the starting priority; 1795 * we want to put equal scanning pressure on each zone. 1796 * However, if the VM has a harder time of freeing pages, 1797 * with multiple processes reclaiming pages, the total 1798 * freeing target can get unreasonably large. 1799 */ 1800 if (nr_reclaimed >= nr_to_reclaim && 1801 sc->priority < DEF_PRIORITY) 1802 break; 1803 } 1804 blk_finish_plug(&plug); 1805 sc->nr_reclaimed += nr_reclaimed; 1806 1807 /* 1808 * Even if we did not try to evict anon pages at all, we want to 1809 * rebalance the anon lru active/inactive ratio. 1810 */ 1811 if (inactive_anon_is_low(lruvec)) 1812 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 1813 sc, LRU_ACTIVE_ANON); 1814 1815 /* reclaim/compaction might need reclaim to continue */ 1816 if (should_continue_reclaim(lruvec, nr_reclaimed, 1817 sc->nr_scanned - nr_scanned, sc)) 1818 goto restart; 1819 1820 throttle_vm_writeout(sc->gfp_mask); 1821 } 1822 1823 static void shrink_zone(struct zone *zone, struct scan_control *sc) 1824 { 1825 struct mem_cgroup *root = sc->target_mem_cgroup; 1826 struct mem_cgroup_reclaim_cookie reclaim = { 1827 .zone = zone, 1828 .priority = sc->priority, 1829 }; 1830 struct mem_cgroup *memcg; 1831 1832 memcg = mem_cgroup_iter(root, NULL, &reclaim); 1833 do { 1834 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); 1835 1836 shrink_lruvec(lruvec, sc); 1837 1838 /* 1839 * Limit reclaim has historically picked one memcg and 1840 * scanned it with decreasing priority levels until 1841 * nr_to_reclaim had been reclaimed. This priority 1842 * cycle is thus over after a single memcg. 1843 * 1844 * Direct reclaim and kswapd, on the other hand, have 1845 * to scan all memory cgroups to fulfill the overall 1846 * scan target for the zone. 1847 */ 1848 if (!global_reclaim(sc)) { 1849 mem_cgroup_iter_break(root, memcg); 1850 break; 1851 } 1852 memcg = mem_cgroup_iter(root, memcg, &reclaim); 1853 } while (memcg); 1854 } 1855 1856 /* Returns true if compaction should go ahead for a high-order request */ 1857 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 1858 { 1859 unsigned long balance_gap, watermark; 1860 bool watermark_ok; 1861 1862 /* Do not consider compaction for orders reclaim is meant to satisfy */ 1863 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER) 1864 return false; 1865 1866 /* 1867 * Compaction takes time to run and there are potentially other 1868 * callers using the pages just freed. Continue reclaiming until 1869 * there is a buffer of free pages available to give compaction 1870 * a reasonable chance of completing and allocating the page 1871 */ 1872 balance_gap = min(low_wmark_pages(zone), 1873 (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 1874 KSWAPD_ZONE_BALANCE_GAP_RATIO); 1875 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); 1876 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); 1877 1878 /* 1879 * If compaction is deferred, reclaim up to a point where 1880 * compaction will have a chance of success when re-enabled 1881 */ 1882 if (compaction_deferred(zone, sc->order)) 1883 return watermark_ok; 1884 1885 /* If compaction is not ready to start, keep reclaiming */ 1886 if (!compaction_suitable(zone, sc->order)) 1887 return false; 1888 1889 return watermark_ok; 1890 } 1891 1892 /* 1893 * This is the direct reclaim path, for page-allocating processes. We only 1894 * try to reclaim pages from zones which will satisfy the caller's allocation 1895 * request. 1896 * 1897 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 1898 * Because: 1899 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1900 * allocation or 1901 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 1902 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 1903 * zone defense algorithm. 1904 * 1905 * If a zone is deemed to be full of pinned pages then just give it a light 1906 * scan then give up on it. 1907 * 1908 * This function returns true if a zone is being reclaimed for a costly 1909 * high-order allocation and compaction is ready to begin. This indicates to 1910 * the caller that it should consider retrying the allocation instead of 1911 * further reclaim. 1912 */ 1913 static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 1914 { 1915 struct zoneref *z; 1916 struct zone *zone; 1917 unsigned long nr_soft_reclaimed; 1918 unsigned long nr_soft_scanned; 1919 bool aborted_reclaim = false; 1920 1921 /* 1922 * If the number of buffer_heads in the machine exceeds the maximum 1923 * allowed level, force direct reclaim to scan the highmem zone as 1924 * highmem pages could be pinning lowmem pages storing buffer_heads 1925 */ 1926 if (buffer_heads_over_limit) 1927 sc->gfp_mask |= __GFP_HIGHMEM; 1928 1929 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1930 gfp_zone(sc->gfp_mask), sc->nodemask) { 1931 if (!populated_zone(zone)) 1932 continue; 1933 /* 1934 * Take care memory controller reclaiming has small influence 1935 * to global LRU. 1936 */ 1937 if (global_reclaim(sc)) { 1938 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1939 continue; 1940 if (zone->all_unreclaimable && 1941 sc->priority != DEF_PRIORITY) 1942 continue; /* Let kswapd poll it */ 1943 if (COMPACTION_BUILD) { 1944 /* 1945 * If we already have plenty of memory free for 1946 * compaction in this zone, don't free any more. 1947 * Even though compaction is invoked for any 1948 * non-zero order, only frequent costly order 1949 * reclamation is disruptive enough to become a 1950 * noticeable problem, like transparent huge 1951 * page allocations. 1952 */ 1953 if (compaction_ready(zone, sc)) { 1954 aborted_reclaim = true; 1955 continue; 1956 } 1957 } 1958 /* 1959 * This steals pages from memory cgroups over softlimit 1960 * and returns the number of reclaimed pages and 1961 * scanned pages. This works for global memory pressure 1962 * and balancing, not for a memcg's limit. 1963 */ 1964 nr_soft_scanned = 0; 1965 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 1966 sc->order, sc->gfp_mask, 1967 &nr_soft_scanned); 1968 sc->nr_reclaimed += nr_soft_reclaimed; 1969 sc->nr_scanned += nr_soft_scanned; 1970 /* need some check for avoid more shrink_zone() */ 1971 } 1972 1973 shrink_zone(zone, sc); 1974 } 1975 1976 return aborted_reclaim; 1977 } 1978 1979 static bool zone_reclaimable(struct zone *zone) 1980 { 1981 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 1982 } 1983 1984 /* All zones in zonelist are unreclaimable? */ 1985 static bool all_unreclaimable(struct zonelist *zonelist, 1986 struct scan_control *sc) 1987 { 1988 struct zoneref *z; 1989 struct zone *zone; 1990 1991 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1992 gfp_zone(sc->gfp_mask), sc->nodemask) { 1993 if (!populated_zone(zone)) 1994 continue; 1995 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1996 continue; 1997 if (!zone->all_unreclaimable) 1998 return false; 1999 } 2000 2001 return true; 2002 } 2003 2004 /* 2005 * This is the main entry point to direct page reclaim. 2006 * 2007 * If a full scan of the inactive list fails to free enough memory then we 2008 * are "out of memory" and something needs to be killed. 2009 * 2010 * If the caller is !__GFP_FS then the probability of a failure is reasonably 2011 * high - the zone may be full of dirty or under-writeback pages, which this 2012 * caller can't do much about. We kick the writeback threads and take explicit 2013 * naps in the hope that some of these pages can be written. But if the 2014 * allocating task holds filesystem locks which prevent writeout this might not 2015 * work, and the allocation attempt will fail. 2016 * 2017 * returns: 0, if no pages reclaimed 2018 * else, the number of pages reclaimed 2019 */ 2020 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2021 struct scan_control *sc, 2022 struct shrink_control *shrink) 2023 { 2024 unsigned long total_scanned = 0; 2025 struct reclaim_state *reclaim_state = current->reclaim_state; 2026 struct zoneref *z; 2027 struct zone *zone; 2028 unsigned long writeback_threshold; 2029 bool aborted_reclaim; 2030 2031 delayacct_freepages_start(); 2032 2033 if (global_reclaim(sc)) 2034 count_vm_event(ALLOCSTALL); 2035 2036 do { 2037 sc->nr_scanned = 0; 2038 aborted_reclaim = shrink_zones(zonelist, sc); 2039 2040 /* 2041 * Don't shrink slabs when reclaiming memory from 2042 * over limit cgroups 2043 */ 2044 if (global_reclaim(sc)) { 2045 unsigned long lru_pages = 0; 2046 for_each_zone_zonelist(zone, z, zonelist, 2047 gfp_zone(sc->gfp_mask)) { 2048 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2049 continue; 2050 2051 lru_pages += zone_reclaimable_pages(zone); 2052 } 2053 2054 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2055 if (reclaim_state) { 2056 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2057 reclaim_state->reclaimed_slab = 0; 2058 } 2059 } 2060 total_scanned += sc->nr_scanned; 2061 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2062 goto out; 2063 2064 /* 2065 * Try to write back as many pages as we just scanned. This 2066 * tends to cause slow streaming writers to write data to the 2067 * disk smoothly, at the dirtying rate, which is nice. But 2068 * that's undesirable in laptop mode, where we *want* lumpy 2069 * writeout. So in laptop mode, write out the whole world. 2070 */ 2071 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 2072 if (total_scanned > writeback_threshold) { 2073 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, 2074 WB_REASON_TRY_TO_FREE_PAGES); 2075 sc->may_writepage = 1; 2076 } 2077 2078 /* Take a nap, wait for some writeback to complete */ 2079 if (!sc->hibernation_mode && sc->nr_scanned && 2080 sc->priority < DEF_PRIORITY - 2) { 2081 struct zone *preferred_zone; 2082 2083 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2084 &cpuset_current_mems_allowed, 2085 &preferred_zone); 2086 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2087 } 2088 } while (--sc->priority >= 0); 2089 2090 out: 2091 delayacct_freepages_end(); 2092 2093 if (sc->nr_reclaimed) 2094 return sc->nr_reclaimed; 2095 2096 /* 2097 * As hibernation is going on, kswapd is freezed so that it can't mark 2098 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable 2099 * check. 2100 */ 2101 if (oom_killer_disabled) 2102 return 0; 2103 2104 /* Aborted reclaim to try compaction? don't OOM, then */ 2105 if (aborted_reclaim) 2106 return 1; 2107 2108 /* top priority shrink_zones still had more to do? don't OOM, then */ 2109 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc)) 2110 return 1; 2111 2112 return 0; 2113 } 2114 2115 static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) 2116 { 2117 struct zone *zone; 2118 unsigned long pfmemalloc_reserve = 0; 2119 unsigned long free_pages = 0; 2120 int i; 2121 bool wmark_ok; 2122 2123 for (i = 0; i <= ZONE_NORMAL; i++) { 2124 zone = &pgdat->node_zones[i]; 2125 pfmemalloc_reserve += min_wmark_pages(zone); 2126 free_pages += zone_page_state(zone, NR_FREE_PAGES); 2127 } 2128 2129 wmark_ok = free_pages > pfmemalloc_reserve / 2; 2130 2131 /* kswapd must be awake if processes are being throttled */ 2132 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 2133 pgdat->classzone_idx = min(pgdat->classzone_idx, 2134 (enum zone_type)ZONE_NORMAL); 2135 wake_up_interruptible(&pgdat->kswapd_wait); 2136 } 2137 2138 return wmark_ok; 2139 } 2140 2141 /* 2142 * Throttle direct reclaimers if backing storage is backed by the network 2143 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 2144 * depleted. kswapd will continue to make progress and wake the processes 2145 * when the low watermark is reached 2146 */ 2147 static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 2148 nodemask_t *nodemask) 2149 { 2150 struct zone *zone; 2151 int high_zoneidx = gfp_zone(gfp_mask); 2152 pg_data_t *pgdat; 2153 2154 /* 2155 * Kernel threads should not be throttled as they may be indirectly 2156 * responsible for cleaning pages necessary for reclaim to make forward 2157 * progress. kjournald for example may enter direct reclaim while 2158 * committing a transaction where throttling it could forcing other 2159 * processes to block on log_wait_commit(). 2160 */ 2161 if (current->flags & PF_KTHREAD) 2162 return; 2163 2164 /* Check if the pfmemalloc reserves are ok */ 2165 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone); 2166 pgdat = zone->zone_pgdat; 2167 if (pfmemalloc_watermark_ok(pgdat)) 2168 return; 2169 2170 /* Account for the throttling */ 2171 count_vm_event(PGSCAN_DIRECT_THROTTLE); 2172 2173 /* 2174 * If the caller cannot enter the filesystem, it's possible that it 2175 * is due to the caller holding an FS lock or performing a journal 2176 * transaction in the case of a filesystem like ext[3|4]. In this case, 2177 * it is not safe to block on pfmemalloc_wait as kswapd could be 2178 * blocked waiting on the same lock. Instead, throttle for up to a 2179 * second before continuing. 2180 */ 2181 if (!(gfp_mask & __GFP_FS)) { 2182 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 2183 pfmemalloc_watermark_ok(pgdat), HZ); 2184 return; 2185 } 2186 2187 /* Throttle until kswapd wakes the process */ 2188 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 2189 pfmemalloc_watermark_ok(pgdat)); 2190 } 2191 2192 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2193 gfp_t gfp_mask, nodemask_t *nodemask) 2194 { 2195 unsigned long nr_reclaimed; 2196 struct scan_control sc = { 2197 .gfp_mask = gfp_mask, 2198 .may_writepage = !laptop_mode, 2199 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2200 .may_unmap = 1, 2201 .may_swap = 1, 2202 .order = order, 2203 .priority = DEF_PRIORITY, 2204 .target_mem_cgroup = NULL, 2205 .nodemask = nodemask, 2206 }; 2207 struct shrink_control shrink = { 2208 .gfp_mask = sc.gfp_mask, 2209 }; 2210 2211 throttle_direct_reclaim(gfp_mask, zonelist, nodemask); 2212 2213 /* 2214 * Do not enter reclaim if fatal signal is pending. 1 is returned so 2215 * that the page allocator does not consider triggering OOM 2216 */ 2217 if (fatal_signal_pending(current)) 2218 return 1; 2219 2220 trace_mm_vmscan_direct_reclaim_begin(order, 2221 sc.may_writepage, 2222 gfp_mask); 2223 2224 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2225 2226 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2227 2228 return nr_reclaimed; 2229 } 2230 2231 #ifdef CONFIG_MEMCG 2232 2233 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, 2234 gfp_t gfp_mask, bool noswap, 2235 struct zone *zone, 2236 unsigned long *nr_scanned) 2237 { 2238 struct scan_control sc = { 2239 .nr_scanned = 0, 2240 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2241 .may_writepage = !laptop_mode, 2242 .may_unmap = 1, 2243 .may_swap = !noswap, 2244 .order = 0, 2245 .priority = 0, 2246 .target_mem_cgroup = memcg, 2247 }; 2248 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2249 2250 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2251 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2252 2253 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 2254 sc.may_writepage, 2255 sc.gfp_mask); 2256 2257 /* 2258 * NOTE: Although we can get the priority field, using it 2259 * here is not a good idea, since it limits the pages we can scan. 2260 * if we don't reclaim here, the shrink_zone from balance_pgdat 2261 * will pick up pages from other mem cgroup's as well. We hack 2262 * the priority and make it zero. 2263 */ 2264 shrink_lruvec(lruvec, &sc); 2265 2266 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2267 2268 *nr_scanned = sc.nr_scanned; 2269 return sc.nr_reclaimed; 2270 } 2271 2272 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 2273 gfp_t gfp_mask, 2274 bool noswap) 2275 { 2276 struct zonelist *zonelist; 2277 unsigned long nr_reclaimed; 2278 int nid; 2279 struct scan_control sc = { 2280 .may_writepage = !laptop_mode, 2281 .may_unmap = 1, 2282 .may_swap = !noswap, 2283 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2284 .order = 0, 2285 .priority = DEF_PRIORITY, 2286 .target_mem_cgroup = memcg, 2287 .nodemask = NULL, /* we don't care the placement */ 2288 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2289 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2290 }; 2291 struct shrink_control shrink = { 2292 .gfp_mask = sc.gfp_mask, 2293 }; 2294 2295 /* 2296 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2297 * take care of from where we get pages. So the node where we start the 2298 * scan does not need to be the current node. 2299 */ 2300 nid = mem_cgroup_select_victim_node(memcg); 2301 2302 zonelist = NODE_DATA(nid)->node_zonelists; 2303 2304 trace_mm_vmscan_memcg_reclaim_begin(0, 2305 sc.may_writepage, 2306 sc.gfp_mask); 2307 2308 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2309 2310 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2311 2312 return nr_reclaimed; 2313 } 2314 #endif 2315 2316 static void age_active_anon(struct zone *zone, struct scan_control *sc) 2317 { 2318 struct mem_cgroup *memcg; 2319 2320 if (!total_swap_pages) 2321 return; 2322 2323 memcg = mem_cgroup_iter(NULL, NULL, NULL); 2324 do { 2325 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2326 2327 if (inactive_anon_is_low(lruvec)) 2328 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 2329 sc, LRU_ACTIVE_ANON); 2330 2331 memcg = mem_cgroup_iter(NULL, memcg, NULL); 2332 } while (memcg); 2333 } 2334 2335 /* 2336 * pgdat_balanced is used when checking if a node is balanced for high-order 2337 * allocations. Only zones that meet watermarks and are in a zone allowed 2338 * by the callers classzone_idx are added to balanced_pages. The total of 2339 * balanced pages must be at least 25% of the zones allowed by classzone_idx 2340 * for the node to be considered balanced. Forcing all zones to be balanced 2341 * for high orders can cause excessive reclaim when there are imbalanced zones. 2342 * The choice of 25% is due to 2343 * o a 16M DMA zone that is balanced will not balance a zone on any 2344 * reasonable sized machine 2345 * o On all other machines, the top zone must be at least a reasonable 2346 * percentage of the middle zones. For example, on 32-bit x86, highmem 2347 * would need to be at least 256M for it to be balance a whole node. 2348 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2349 * to balance a node on its own. These seemed like reasonable ratios. 2350 */ 2351 static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, 2352 int classzone_idx) 2353 { 2354 unsigned long present_pages = 0; 2355 int i; 2356 2357 for (i = 0; i <= classzone_idx; i++) 2358 present_pages += pgdat->node_zones[i].present_pages; 2359 2360 /* A special case here: if zone has no page, we think it's balanced */ 2361 return balanced_pages >= (present_pages >> 2); 2362 } 2363 2364 /* 2365 * Prepare kswapd for sleeping. This verifies that there are no processes 2366 * waiting in throttle_direct_reclaim() and that watermarks have been met. 2367 * 2368 * Returns true if kswapd is ready to sleep 2369 */ 2370 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, 2371 int classzone_idx) 2372 { 2373 int i; 2374 unsigned long balanced = 0; 2375 bool all_zones_ok = true; 2376 2377 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 2378 if (remaining) 2379 return false; 2380 2381 /* 2382 * There is a potential race between when kswapd checks its watermarks 2383 * and a process gets throttled. There is also a potential race if 2384 * processes get throttled, kswapd wakes, a large process exits therby 2385 * balancing the zones that causes kswapd to miss a wakeup. If kswapd 2386 * is going to sleep, no process should be sleeping on pfmemalloc_wait 2387 * so wake them now if necessary. If necessary, processes will wake 2388 * kswapd and get throttled again 2389 */ 2390 if (waitqueue_active(&pgdat->pfmemalloc_wait)) { 2391 wake_up(&pgdat->pfmemalloc_wait); 2392 return false; 2393 } 2394 2395 /* Check the watermark levels */ 2396 for (i = 0; i <= classzone_idx; i++) { 2397 struct zone *zone = pgdat->node_zones + i; 2398 2399 if (!populated_zone(zone)) 2400 continue; 2401 2402 /* 2403 * balance_pgdat() skips over all_unreclaimable after 2404 * DEF_PRIORITY. Effectively, it considers them balanced so 2405 * they must be considered balanced here as well if kswapd 2406 * is to sleep 2407 */ 2408 if (zone->all_unreclaimable) { 2409 balanced += zone->present_pages; 2410 continue; 2411 } 2412 2413 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2414 i, 0)) 2415 all_zones_ok = false; 2416 else 2417 balanced += zone->present_pages; 2418 } 2419 2420 /* 2421 * For high-order requests, the balanced zones must contain at least 2422 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones 2423 * must be balanced 2424 */ 2425 if (order) 2426 return pgdat_balanced(pgdat, balanced, classzone_idx); 2427 else 2428 return all_zones_ok; 2429 } 2430 2431 /* 2432 * For kswapd, balance_pgdat() will work across all this node's zones until 2433 * they are all at high_wmark_pages(zone). 2434 * 2435 * Returns the final order kswapd was reclaiming at 2436 * 2437 * There is special handling here for zones which are full of pinned pages. 2438 * This can happen if the pages are all mlocked, or if they are all used by 2439 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 2440 * What we do is to detect the case where all pages in the zone have been 2441 * scanned twice and there has been zero successful reclaim. Mark the zone as 2442 * dead and from now on, only perform a short scan. Basically we're polling 2443 * the zone for when the problem goes away. 2444 * 2445 * kswapd scans the zones in the highmem->normal->dma direction. It skips 2446 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 2447 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 2448 * lower zones regardless of the number of free pages in the lower zones. This 2449 * interoperates with the page allocator fallback scheme to ensure that aging 2450 * of pages is balanced across the zones. 2451 */ 2452 static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2453 int *classzone_idx) 2454 { 2455 int all_zones_ok; 2456 unsigned long balanced; 2457 int i; 2458 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2459 unsigned long total_scanned; 2460 struct reclaim_state *reclaim_state = current->reclaim_state; 2461 unsigned long nr_soft_reclaimed; 2462 unsigned long nr_soft_scanned; 2463 struct scan_control sc = { 2464 .gfp_mask = GFP_KERNEL, 2465 .may_unmap = 1, 2466 .may_swap = 1, 2467 /* 2468 * kswapd doesn't want to be bailed out while reclaim. because 2469 * we want to put equal scanning pressure on each zone. 2470 */ 2471 .nr_to_reclaim = ULONG_MAX, 2472 .order = order, 2473 .target_mem_cgroup = NULL, 2474 }; 2475 struct shrink_control shrink = { 2476 .gfp_mask = sc.gfp_mask, 2477 }; 2478 loop_again: 2479 total_scanned = 0; 2480 sc.priority = DEF_PRIORITY; 2481 sc.nr_reclaimed = 0; 2482 sc.may_writepage = !laptop_mode; 2483 count_vm_event(PAGEOUTRUN); 2484 2485 do { 2486 unsigned long lru_pages = 0; 2487 int has_under_min_watermark_zone = 0; 2488 2489 all_zones_ok = 1; 2490 balanced = 0; 2491 2492 /* 2493 * Scan in the highmem->dma direction for the highest 2494 * zone which needs scanning 2495 */ 2496 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2497 struct zone *zone = pgdat->node_zones + i; 2498 2499 if (!populated_zone(zone)) 2500 continue; 2501 2502 if (zone->all_unreclaimable && 2503 sc.priority != DEF_PRIORITY) 2504 continue; 2505 2506 /* 2507 * Do some background aging of the anon list, to give 2508 * pages a chance to be referenced before reclaiming. 2509 */ 2510 age_active_anon(zone, &sc); 2511 2512 /* 2513 * If the number of buffer_heads in the machine 2514 * exceeds the maximum allowed level and this node 2515 * has a highmem zone, force kswapd to reclaim from 2516 * it to relieve lowmem pressure. 2517 */ 2518 if (buffer_heads_over_limit && is_highmem_idx(i)) { 2519 end_zone = i; 2520 break; 2521 } 2522 2523 if (!zone_watermark_ok_safe(zone, order, 2524 high_wmark_pages(zone), 0, 0)) { 2525 end_zone = i; 2526 break; 2527 } else { 2528 /* If balanced, clear the congested flag */ 2529 zone_clear_flag(zone, ZONE_CONGESTED); 2530 } 2531 } 2532 if (i < 0) 2533 goto out; 2534 2535 for (i = 0; i <= end_zone; i++) { 2536 struct zone *zone = pgdat->node_zones + i; 2537 2538 lru_pages += zone_reclaimable_pages(zone); 2539 } 2540 2541 /* 2542 * Now scan the zone in the dma->highmem direction, stopping 2543 * at the last zone which needs scanning. 2544 * 2545 * We do this because the page allocator works in the opposite 2546 * direction. This prevents the page allocator from allocating 2547 * pages behind kswapd's direction of progress, which would 2548 * cause too much scanning of the lower zones. 2549 */ 2550 for (i = 0; i <= end_zone; i++) { 2551 struct zone *zone = pgdat->node_zones + i; 2552 int nr_slab, testorder; 2553 unsigned long balance_gap; 2554 2555 if (!populated_zone(zone)) 2556 continue; 2557 2558 if (zone->all_unreclaimable && 2559 sc.priority != DEF_PRIORITY) 2560 continue; 2561 2562 sc.nr_scanned = 0; 2563 2564 nr_soft_scanned = 0; 2565 /* 2566 * Call soft limit reclaim before calling shrink_zone. 2567 */ 2568 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2569 order, sc.gfp_mask, 2570 &nr_soft_scanned); 2571 sc.nr_reclaimed += nr_soft_reclaimed; 2572 total_scanned += nr_soft_scanned; 2573 2574 /* 2575 * We put equal pressure on every zone, unless 2576 * one zone has way too many pages free 2577 * already. The "too many pages" is defined 2578 * as the high wmark plus a "gap" where the 2579 * gap is either the low watermark or 1% 2580 * of the zone, whichever is smaller. 2581 */ 2582 balance_gap = min(low_wmark_pages(zone), 2583 (zone->present_pages + 2584 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2585 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2586 /* 2587 * Kswapd reclaims only single pages with compaction 2588 * enabled. Trying too hard to reclaim until contiguous 2589 * free pages have become available can hurt performance 2590 * by evicting too much useful data from memory. 2591 * Do not reclaim more than needed for compaction. 2592 */ 2593 testorder = order; 2594 if (COMPACTION_BUILD && order && 2595 compaction_suitable(zone, order) != 2596 COMPACT_SKIPPED) 2597 testorder = 0; 2598 2599 if ((buffer_heads_over_limit && is_highmem_idx(i)) || 2600 !zone_watermark_ok_safe(zone, testorder, 2601 high_wmark_pages(zone) + balance_gap, 2602 end_zone, 0)) { 2603 shrink_zone(zone, &sc); 2604 2605 reclaim_state->reclaimed_slab = 0; 2606 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2607 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2608 total_scanned += sc.nr_scanned; 2609 2610 if (nr_slab == 0 && !zone_reclaimable(zone)) 2611 zone->all_unreclaimable = 1; 2612 } 2613 2614 /* 2615 * If we've done a decent amount of scanning and 2616 * the reclaim ratio is low, start doing writepage 2617 * even in laptop mode 2618 */ 2619 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2620 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2621 sc.may_writepage = 1; 2622 2623 if (zone->all_unreclaimable) { 2624 if (end_zone && end_zone == i) 2625 end_zone--; 2626 continue; 2627 } 2628 2629 if (!zone_watermark_ok_safe(zone, testorder, 2630 high_wmark_pages(zone), end_zone, 0)) { 2631 all_zones_ok = 0; 2632 /* 2633 * We are still under min water mark. This 2634 * means that we have a GFP_ATOMIC allocation 2635 * failure risk. Hurry up! 2636 */ 2637 if (!zone_watermark_ok_safe(zone, order, 2638 min_wmark_pages(zone), end_zone, 0)) 2639 has_under_min_watermark_zone = 1; 2640 } else { 2641 /* 2642 * If a zone reaches its high watermark, 2643 * consider it to be no longer congested. It's 2644 * possible there are dirty pages backed by 2645 * congested BDIs but as pressure is relieved, 2646 * speculatively avoid congestion waits 2647 */ 2648 zone_clear_flag(zone, ZONE_CONGESTED); 2649 if (i <= *classzone_idx) 2650 balanced += zone->present_pages; 2651 } 2652 2653 } 2654 2655 /* 2656 * If the low watermark is met there is no need for processes 2657 * to be throttled on pfmemalloc_wait as they should not be 2658 * able to safely make forward progress. Wake them 2659 */ 2660 if (waitqueue_active(&pgdat->pfmemalloc_wait) && 2661 pfmemalloc_watermark_ok(pgdat)) 2662 wake_up(&pgdat->pfmemalloc_wait); 2663 2664 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2665 break; /* kswapd: all done */ 2666 /* 2667 * OK, kswapd is getting into trouble. Take a nap, then take 2668 * another pass across the zones. 2669 */ 2670 if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) { 2671 if (has_under_min_watermark_zone) 2672 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2673 else 2674 congestion_wait(BLK_RW_ASYNC, HZ/10); 2675 } 2676 2677 /* 2678 * We do this so kswapd doesn't build up large priorities for 2679 * example when it is freeing in parallel with allocators. It 2680 * matches the direct reclaim path behaviour in terms of impact 2681 * on zone->*_priority. 2682 */ 2683 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2684 break; 2685 } while (--sc.priority >= 0); 2686 out: 2687 2688 /* 2689 * order-0: All zones must meet high watermark for a balanced node 2690 * high-order: Balanced zones must make up at least 25% of the node 2691 * for the node to be balanced 2692 */ 2693 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2694 cond_resched(); 2695 2696 try_to_freeze(); 2697 2698 /* 2699 * Fragmentation may mean that the system cannot be 2700 * rebalanced for high-order allocations in all zones. 2701 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2702 * it means the zones have been fully scanned and are still 2703 * not balanced. For high-order allocations, there is 2704 * little point trying all over again as kswapd may 2705 * infinite loop. 2706 * 2707 * Instead, recheck all watermarks at order-0 as they 2708 * are the most important. If watermarks are ok, kswapd will go 2709 * back to sleep. High-order users can still perform direct 2710 * reclaim if they wish. 2711 */ 2712 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2713 order = sc.order = 0; 2714 2715 goto loop_again; 2716 } 2717 2718 /* 2719 * If kswapd was reclaiming at a higher order, it has the option of 2720 * sleeping without all zones being balanced. Before it does, it must 2721 * ensure that the watermarks for order-0 on *all* zones are met and 2722 * that the congestion flags are cleared. The congestion flag must 2723 * be cleared as kswapd is the only mechanism that clears the flag 2724 * and it is potentially going to sleep here. 2725 */ 2726 if (order) { 2727 int zones_need_compaction = 1; 2728 2729 for (i = 0; i <= end_zone; i++) { 2730 struct zone *zone = pgdat->node_zones + i; 2731 2732 if (!populated_zone(zone)) 2733 continue; 2734 2735 if (zone->all_unreclaimable && 2736 sc.priority != DEF_PRIORITY) 2737 continue; 2738 2739 /* Would compaction fail due to lack of free memory? */ 2740 if (COMPACTION_BUILD && 2741 compaction_suitable(zone, order) == COMPACT_SKIPPED) 2742 goto loop_again; 2743 2744 /* Confirm the zone is balanced for order-0 */ 2745 if (!zone_watermark_ok(zone, 0, 2746 high_wmark_pages(zone), 0, 0)) { 2747 order = sc.order = 0; 2748 goto loop_again; 2749 } 2750 2751 /* Check if the memory needs to be defragmented. */ 2752 if (zone_watermark_ok(zone, order, 2753 low_wmark_pages(zone), *classzone_idx, 0)) 2754 zones_need_compaction = 0; 2755 2756 /* If balanced, clear the congested flag */ 2757 zone_clear_flag(zone, ZONE_CONGESTED); 2758 } 2759 2760 if (zones_need_compaction) 2761 compact_pgdat(pgdat, order); 2762 } 2763 2764 /* 2765 * Return the order we were reclaiming at so prepare_kswapd_sleep() 2766 * makes a decision on the order we were last reclaiming at. However, 2767 * if another caller entered the allocator slow path while kswapd 2768 * was awake, order will remain at the higher level 2769 */ 2770 *classzone_idx = end_zone; 2771 return order; 2772 } 2773 2774 static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) 2775 { 2776 long remaining = 0; 2777 DEFINE_WAIT(wait); 2778 2779 if (freezing(current) || kthread_should_stop()) 2780 return; 2781 2782 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2783 2784 /* Try to sleep for a short interval */ 2785 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) { 2786 remaining = schedule_timeout(HZ/10); 2787 finish_wait(&pgdat->kswapd_wait, &wait); 2788 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2789 } 2790 2791 /* 2792 * After a short sleep, check if it was a premature sleep. If not, then 2793 * go fully to sleep until explicitly woken up. 2794 */ 2795 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) { 2796 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2797 2798 /* 2799 * vmstat counters are not perfectly accurate and the estimated 2800 * value for counters such as NR_FREE_PAGES can deviate from the 2801 * true value by nr_online_cpus * threshold. To avoid the zone 2802 * watermarks being breached while under pressure, we reduce the 2803 * per-cpu vmstat threshold while kswapd is awake and restore 2804 * them before going back to sleep. 2805 */ 2806 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 2807 2808 if (!kthread_should_stop()) 2809 schedule(); 2810 2811 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 2812 } else { 2813 if (remaining) 2814 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2815 else 2816 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2817 } 2818 finish_wait(&pgdat->kswapd_wait, &wait); 2819 } 2820 2821 /* 2822 * The background pageout daemon, started as a kernel thread 2823 * from the init process. 2824 * 2825 * This basically trickles out pages so that we have _some_ 2826 * free memory available even if there is no other activity 2827 * that frees anything up. This is needed for things like routing 2828 * etc, where we otherwise might have all activity going on in 2829 * asynchronous contexts that cannot page things out. 2830 * 2831 * If there are applications that are active memory-allocators 2832 * (most normal use), this basically shouldn't matter. 2833 */ 2834 static int kswapd(void *p) 2835 { 2836 unsigned long order, new_order; 2837 unsigned balanced_order; 2838 int classzone_idx, new_classzone_idx; 2839 int balanced_classzone_idx; 2840 pg_data_t *pgdat = (pg_data_t*)p; 2841 struct task_struct *tsk = current; 2842 2843 struct reclaim_state reclaim_state = { 2844 .reclaimed_slab = 0, 2845 }; 2846 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2847 2848 lockdep_set_current_reclaim_state(GFP_KERNEL); 2849 2850 if (!cpumask_empty(cpumask)) 2851 set_cpus_allowed_ptr(tsk, cpumask); 2852 current->reclaim_state = &reclaim_state; 2853 2854 /* 2855 * Tell the memory management that we're a "memory allocator", 2856 * and that if we need more memory we should get access to it 2857 * regardless (see "__alloc_pages()"). "kswapd" should 2858 * never get caught in the normal page freeing logic. 2859 * 2860 * (Kswapd normally doesn't need memory anyway, but sometimes 2861 * you need a small amount of memory in order to be able to 2862 * page out something else, and this flag essentially protects 2863 * us from recursively trying to free more memory as we're 2864 * trying to free the first piece of memory in the first place). 2865 */ 2866 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2867 set_freezable(); 2868 2869 order = new_order = 0; 2870 balanced_order = 0; 2871 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; 2872 balanced_classzone_idx = classzone_idx; 2873 for ( ; ; ) { 2874 int ret; 2875 2876 /* 2877 * If the last balance_pgdat was unsuccessful it's unlikely a 2878 * new request of a similar or harder type will succeed soon 2879 * so consider going to sleep on the basis we reclaimed at 2880 */ 2881 if (balanced_classzone_idx >= new_classzone_idx && 2882 balanced_order == new_order) { 2883 new_order = pgdat->kswapd_max_order; 2884 new_classzone_idx = pgdat->classzone_idx; 2885 pgdat->kswapd_max_order = 0; 2886 pgdat->classzone_idx = pgdat->nr_zones - 1; 2887 } 2888 2889 if (order < new_order || classzone_idx > new_classzone_idx) { 2890 /* 2891 * Don't sleep if someone wants a larger 'order' 2892 * allocation or has tigher zone constraints 2893 */ 2894 order = new_order; 2895 classzone_idx = new_classzone_idx; 2896 } else { 2897 kswapd_try_to_sleep(pgdat, balanced_order, 2898 balanced_classzone_idx); 2899 order = pgdat->kswapd_max_order; 2900 classzone_idx = pgdat->classzone_idx; 2901 new_order = order; 2902 new_classzone_idx = classzone_idx; 2903 pgdat->kswapd_max_order = 0; 2904 pgdat->classzone_idx = pgdat->nr_zones - 1; 2905 } 2906 2907 ret = try_to_freeze(); 2908 if (kthread_should_stop()) 2909 break; 2910 2911 /* 2912 * We can speed up thawing tasks if we don't call balance_pgdat 2913 * after returning from the refrigerator 2914 */ 2915 if (!ret) { 2916 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); 2917 balanced_classzone_idx = classzone_idx; 2918 balanced_order = balance_pgdat(pgdat, order, 2919 &balanced_classzone_idx); 2920 } 2921 } 2922 return 0; 2923 } 2924 2925 /* 2926 * A zone is low on free memory, so wake its kswapd task to service it. 2927 */ 2928 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 2929 { 2930 pg_data_t *pgdat; 2931 2932 if (!populated_zone(zone)) 2933 return; 2934 2935 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2936 return; 2937 pgdat = zone->zone_pgdat; 2938 if (pgdat->kswapd_max_order < order) { 2939 pgdat->kswapd_max_order = order; 2940 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); 2941 } 2942 if (!waitqueue_active(&pgdat->kswapd_wait)) 2943 return; 2944 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) 2945 return; 2946 2947 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 2948 wake_up_interruptible(&pgdat->kswapd_wait); 2949 } 2950 2951 /* 2952 * The reclaimable count would be mostly accurate. 2953 * The less reclaimable pages may be 2954 * - mlocked pages, which will be moved to unevictable list when encountered 2955 * - mapped pages, which may require several travels to be reclaimed 2956 * - dirty pages, which is not "instantly" reclaimable 2957 */ 2958 unsigned long global_reclaimable_pages(void) 2959 { 2960 int nr; 2961 2962 nr = global_page_state(NR_ACTIVE_FILE) + 2963 global_page_state(NR_INACTIVE_FILE); 2964 2965 if (nr_swap_pages > 0) 2966 nr += global_page_state(NR_ACTIVE_ANON) + 2967 global_page_state(NR_INACTIVE_ANON); 2968 2969 return nr; 2970 } 2971 2972 unsigned long zone_reclaimable_pages(struct zone *zone) 2973 { 2974 int nr; 2975 2976 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 2977 zone_page_state(zone, NR_INACTIVE_FILE); 2978 2979 if (nr_swap_pages > 0) 2980 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 2981 zone_page_state(zone, NR_INACTIVE_ANON); 2982 2983 return nr; 2984 } 2985 2986 #ifdef CONFIG_HIBERNATION 2987 /* 2988 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 2989 * freed pages. 2990 * 2991 * Rather than trying to age LRUs the aim is to preserve the overall 2992 * LRU order by reclaiming preferentially 2993 * inactive > active > active referenced > active mapped 2994 */ 2995 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 2996 { 2997 struct reclaim_state reclaim_state; 2998 struct scan_control sc = { 2999 .gfp_mask = GFP_HIGHUSER_MOVABLE, 3000 .may_swap = 1, 3001 .may_unmap = 1, 3002 .may_writepage = 1, 3003 .nr_to_reclaim = nr_to_reclaim, 3004 .hibernation_mode = 1, 3005 .order = 0, 3006 .priority = DEF_PRIORITY, 3007 }; 3008 struct shrink_control shrink = { 3009 .gfp_mask = sc.gfp_mask, 3010 }; 3011 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3012 struct task_struct *p = current; 3013 unsigned long nr_reclaimed; 3014 3015 p->flags |= PF_MEMALLOC; 3016 lockdep_set_current_reclaim_state(sc.gfp_mask); 3017 reclaim_state.reclaimed_slab = 0; 3018 p->reclaim_state = &reclaim_state; 3019 3020 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 3021 3022 p->reclaim_state = NULL; 3023 lockdep_clear_current_reclaim_state(); 3024 p->flags &= ~PF_MEMALLOC; 3025 3026 return nr_reclaimed; 3027 } 3028 #endif /* CONFIG_HIBERNATION */ 3029 3030 /* It's optimal to keep kswapds on the same CPUs as their memory, but 3031 not required for correctness. So if the last cpu in a node goes 3032 away, we get changed to run anywhere: as the first one comes back, 3033 restore their cpu bindings. */ 3034 static int __devinit cpu_callback(struct notifier_block *nfb, 3035 unsigned long action, void *hcpu) 3036 { 3037 int nid; 3038 3039 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 3040 for_each_node_state(nid, N_HIGH_MEMORY) { 3041 pg_data_t *pgdat = NODE_DATA(nid); 3042 const struct cpumask *mask; 3043 3044 mask = cpumask_of_node(pgdat->node_id); 3045 3046 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3047 /* One of our CPUs online: restore mask */ 3048 set_cpus_allowed_ptr(pgdat->kswapd, mask); 3049 } 3050 } 3051 return NOTIFY_OK; 3052 } 3053 3054 /* 3055 * This kswapd start function will be called by init and node-hot-add. 3056 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 3057 */ 3058 int kswapd_run(int nid) 3059 { 3060 pg_data_t *pgdat = NODE_DATA(nid); 3061 int ret = 0; 3062 3063 if (pgdat->kswapd) 3064 return 0; 3065 3066 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 3067 if (IS_ERR(pgdat->kswapd)) { 3068 /* failure at boot is fatal */ 3069 BUG_ON(system_state == SYSTEM_BOOTING); 3070 printk("Failed to start kswapd on node %d\n",nid); 3071 ret = -1; 3072 } 3073 return ret; 3074 } 3075 3076 /* 3077 * Called by memory hotplug when all memory in a node is offlined. Caller must 3078 * hold lock_memory_hotplug(). 3079 */ 3080 void kswapd_stop(int nid) 3081 { 3082 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 3083 3084 if (kswapd) { 3085 kthread_stop(kswapd); 3086 NODE_DATA(nid)->kswapd = NULL; 3087 } 3088 } 3089 3090 static int __init kswapd_init(void) 3091 { 3092 int nid; 3093 3094 swap_setup(); 3095 for_each_node_state(nid, N_HIGH_MEMORY) 3096 kswapd_run(nid); 3097 hotcpu_notifier(cpu_callback, 0); 3098 return 0; 3099 } 3100 3101 module_init(kswapd_init) 3102 3103 #ifdef CONFIG_NUMA 3104 /* 3105 * Zone reclaim mode 3106 * 3107 * If non-zero call zone_reclaim when the number of free pages falls below 3108 * the watermarks. 3109 */ 3110 int zone_reclaim_mode __read_mostly; 3111 3112 #define RECLAIM_OFF 0 3113 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 3114 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 3115 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 3116 3117 /* 3118 * Priority for ZONE_RECLAIM. This determines the fraction of pages 3119 * of a node considered for each zone_reclaim. 4 scans 1/16th of 3120 * a zone. 3121 */ 3122 #define ZONE_RECLAIM_PRIORITY 4 3123 3124 /* 3125 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 3126 * occur. 3127 */ 3128 int sysctl_min_unmapped_ratio = 1; 3129 3130 /* 3131 * If the number of slab pages in a zone grows beyond this percentage then 3132 * slab reclaim needs to occur. 3133 */ 3134 int sysctl_min_slab_ratio = 5; 3135 3136 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3137 { 3138 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 3139 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 3140 zone_page_state(zone, NR_ACTIVE_FILE); 3141 3142 /* 3143 * It's possible for there to be more file mapped pages than 3144 * accounted for by the pages on the file LRU lists because 3145 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 3146 */ 3147 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 3148 } 3149 3150 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 3151 static long zone_pagecache_reclaimable(struct zone *zone) 3152 { 3153 long nr_pagecache_reclaimable; 3154 long delta = 0; 3155 3156 /* 3157 * If RECLAIM_SWAP is set, then all file pages are considered 3158 * potentially reclaimable. Otherwise, we have to worry about 3159 * pages like swapcache and zone_unmapped_file_pages() provides 3160 * a better estimate 3161 */ 3162 if (zone_reclaim_mode & RECLAIM_SWAP) 3163 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3164 else 3165 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3166 3167 /* If we can't clean pages, remove dirty pages from consideration */ 3168 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3169 delta += zone_page_state(zone, NR_FILE_DIRTY); 3170 3171 /* Watch for any possible underflows due to delta */ 3172 if (unlikely(delta > nr_pagecache_reclaimable)) 3173 delta = nr_pagecache_reclaimable; 3174 3175 return nr_pagecache_reclaimable - delta; 3176 } 3177 3178 /* 3179 * Try to free up some pages from this zone through reclaim. 3180 */ 3181 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3182 { 3183 /* Minimum pages needed in order to stay on node */ 3184 const unsigned long nr_pages = 1 << order; 3185 struct task_struct *p = current; 3186 struct reclaim_state reclaim_state; 3187 struct scan_control sc = { 3188 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3189 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3190 .may_swap = 1, 3191 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3192 SWAP_CLUSTER_MAX), 3193 .gfp_mask = gfp_mask, 3194 .order = order, 3195 .priority = ZONE_RECLAIM_PRIORITY, 3196 }; 3197 struct shrink_control shrink = { 3198 .gfp_mask = sc.gfp_mask, 3199 }; 3200 unsigned long nr_slab_pages0, nr_slab_pages1; 3201 3202 cond_resched(); 3203 /* 3204 * We need to be able to allocate from the reserves for RECLAIM_SWAP 3205 * and we also need to be able to write out pages for RECLAIM_WRITE 3206 * and RECLAIM_SWAP. 3207 */ 3208 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3209 lockdep_set_current_reclaim_state(gfp_mask); 3210 reclaim_state.reclaimed_slab = 0; 3211 p->reclaim_state = &reclaim_state; 3212 3213 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 3214 /* 3215 * Free memory by calling shrink zone with increasing 3216 * priorities until we have enough memory freed. 3217 */ 3218 do { 3219 shrink_zone(zone, &sc); 3220 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 3221 } 3222 3223 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3224 if (nr_slab_pages0 > zone->min_slab_pages) { 3225 /* 3226 * shrink_slab() does not currently allow us to determine how 3227 * many pages were freed in this zone. So we take the current 3228 * number of slab pages and shake the slab until it is reduced 3229 * by the same nr_pages that we used for reclaiming unmapped 3230 * pages. 3231 * 3232 * Note that shrink_slab will free memory on all zones and may 3233 * take a long time. 3234 */ 3235 for (;;) { 3236 unsigned long lru_pages = zone_reclaimable_pages(zone); 3237 3238 /* No reclaimable slab or very low memory pressure */ 3239 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) 3240 break; 3241 3242 /* Freed enough memory */ 3243 nr_slab_pages1 = zone_page_state(zone, 3244 NR_SLAB_RECLAIMABLE); 3245 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3246 break; 3247 } 3248 3249 /* 3250 * Update nr_reclaimed by the number of slab pages we 3251 * reclaimed from this zone. 3252 */ 3253 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3254 if (nr_slab_pages1 < nr_slab_pages0) 3255 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; 3256 } 3257 3258 p->reclaim_state = NULL; 3259 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3260 lockdep_clear_current_reclaim_state(); 3261 return sc.nr_reclaimed >= nr_pages; 3262 } 3263 3264 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3265 { 3266 int node_id; 3267 int ret; 3268 3269 /* 3270 * Zone reclaim reclaims unmapped file backed pages and 3271 * slab pages if we are over the defined limits. 3272 * 3273 * A small portion of unmapped file backed pages is needed for 3274 * file I/O otherwise pages read by file I/O will be immediately 3275 * thrown out if the zone is overallocated. So we do not reclaim 3276 * if less than a specified percentage of the zone is used by 3277 * unmapped file backed pages. 3278 */ 3279 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 3280 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 3281 return ZONE_RECLAIM_FULL; 3282 3283 if (zone->all_unreclaimable) 3284 return ZONE_RECLAIM_FULL; 3285 3286 /* 3287 * Do not scan if the allocation should not be delayed. 3288 */ 3289 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 3290 return ZONE_RECLAIM_NOSCAN; 3291 3292 /* 3293 * Only run zone reclaim on the local zone or on zones that do not 3294 * have associated processors. This will favor the local processor 3295 * over remote processors and spread off node memory allocations 3296 * as wide as possible. 3297 */ 3298 node_id = zone_to_nid(zone); 3299 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3300 return ZONE_RECLAIM_NOSCAN; 3301 3302 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3303 return ZONE_RECLAIM_NOSCAN; 3304 3305 ret = __zone_reclaim(zone, gfp_mask, order); 3306 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3307 3308 if (!ret) 3309 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3310 3311 return ret; 3312 } 3313 #endif 3314 3315 /* 3316 * page_evictable - test whether a page is evictable 3317 * @page: the page to test 3318 * @vma: the VMA in which the page is or will be mapped, may be NULL 3319 * 3320 * Test whether page is evictable--i.e., should be placed on active/inactive 3321 * lists vs unevictable list. The vma argument is !NULL when called from the 3322 * fault path to determine how to instantate a new page. 3323 * 3324 * Reasons page might not be evictable: 3325 * (1) page's mapping marked unevictable 3326 * (2) page is part of an mlocked VMA 3327 * 3328 */ 3329 int page_evictable(struct page *page, struct vm_area_struct *vma) 3330 { 3331 3332 if (mapping_unevictable(page_mapping(page))) 3333 return 0; 3334 3335 if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page))) 3336 return 0; 3337 3338 return 1; 3339 } 3340 3341 #ifdef CONFIG_SHMEM 3342 /** 3343 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list 3344 * @pages: array of pages to check 3345 * @nr_pages: number of pages to check 3346 * 3347 * Checks pages for evictability and moves them to the appropriate lru list. 3348 * 3349 * This function is only used for SysV IPC SHM_UNLOCK. 3350 */ 3351 void check_move_unevictable_pages(struct page **pages, int nr_pages) 3352 { 3353 struct lruvec *lruvec; 3354 struct zone *zone = NULL; 3355 int pgscanned = 0; 3356 int pgrescued = 0; 3357 int i; 3358 3359 for (i = 0; i < nr_pages; i++) { 3360 struct page *page = pages[i]; 3361 struct zone *pagezone; 3362 3363 pgscanned++; 3364 pagezone = page_zone(page); 3365 if (pagezone != zone) { 3366 if (zone) 3367 spin_unlock_irq(&zone->lru_lock); 3368 zone = pagezone; 3369 spin_lock_irq(&zone->lru_lock); 3370 } 3371 lruvec = mem_cgroup_page_lruvec(page, zone); 3372 3373 if (!PageLRU(page) || !PageUnevictable(page)) 3374 continue; 3375 3376 if (page_evictable(page, NULL)) { 3377 enum lru_list lru = page_lru_base_type(page); 3378 3379 VM_BUG_ON(PageActive(page)); 3380 ClearPageUnevictable(page); 3381 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3382 add_page_to_lru_list(page, lruvec, lru); 3383 pgrescued++; 3384 } 3385 } 3386 3387 if (zone) { 3388 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 3389 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 3390 spin_unlock_irq(&zone->lru_lock); 3391 } 3392 } 3393 #endif /* CONFIG_SHMEM */ 3394 3395 static void warn_scan_unevictable_pages(void) 3396 { 3397 printk_once(KERN_WARNING 3398 "%s: The scan_unevictable_pages sysctl/node-interface has been " 3399 "disabled for lack of a legitimate use case. If you have " 3400 "one, please send an email to linux-mm@kvack.org.\n", 3401 current->comm); 3402 } 3403 3404 /* 3405 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 3406 * all nodes' unevictable lists for evictable pages 3407 */ 3408 unsigned long scan_unevictable_pages; 3409 3410 int scan_unevictable_handler(struct ctl_table *table, int write, 3411 void __user *buffer, 3412 size_t *length, loff_t *ppos) 3413 { 3414 warn_scan_unevictable_pages(); 3415 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3416 scan_unevictable_pages = 0; 3417 return 0; 3418 } 3419 3420 #ifdef CONFIG_NUMA 3421 /* 3422 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 3423 * a specified node's per zone unevictable lists for evictable pages. 3424 */ 3425 3426 static ssize_t read_scan_unevictable_node(struct device *dev, 3427 struct device_attribute *attr, 3428 char *buf) 3429 { 3430 warn_scan_unevictable_pages(); 3431 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3432 } 3433 3434 static ssize_t write_scan_unevictable_node(struct device *dev, 3435 struct device_attribute *attr, 3436 const char *buf, size_t count) 3437 { 3438 warn_scan_unevictable_pages(); 3439 return 1; 3440 } 3441 3442 3443 static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 3444 read_scan_unevictable_node, 3445 write_scan_unevictable_node); 3446 3447 int scan_unevictable_register_node(struct node *node) 3448 { 3449 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); 3450 } 3451 3452 void scan_unevictable_unregister_node(struct node *node) 3453 { 3454 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); 3455 } 3456 #endif 3457