1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/gfp.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/vmstat.h> 23 #include <linux/file.h> 24 #include <linux/writeback.h> 25 #include <linux/blkdev.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28 #include <linux/mm_inline.h> 29 #include <linux/pagevec.h> 30 #include <linux/backing-dev.h> 31 #include <linux/rmap.h> 32 #include <linux/topology.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/compaction.h> 36 #include <linux/notifier.h> 37 #include <linux/rwsem.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/freezer.h> 41 #include <linux/memcontrol.h> 42 #include <linux/delayacct.h> 43 #include <linux/sysctl.h> 44 #include <linux/oom.h> 45 #include <linux/prefetch.h> 46 47 #include <asm/tlbflush.h> 48 #include <asm/div64.h> 49 50 #include <linux/swapops.h> 51 52 #include "internal.h" 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/vmscan.h> 56 57 /* 58 * reclaim_mode determines how the inactive list is shrunk 59 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages 60 * RECLAIM_MODE_ASYNC: Do not block 61 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback 62 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference 63 * page from the LRU and reclaim all pages within a 64 * naturally aligned range 65 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of 66 * order-0 pages and then compact the zone 67 */ 68 typedef unsigned __bitwise__ reclaim_mode_t; 69 #define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) 70 #define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) 71 #define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) 72 #define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) 73 #define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) 74 75 struct scan_control { 76 /* Incremented by the number of inactive pages that were scanned */ 77 unsigned long nr_scanned; 78 79 /* Number of pages freed so far during a call to shrink_zones() */ 80 unsigned long nr_reclaimed; 81 82 /* How many pages shrink_list() should reclaim */ 83 unsigned long nr_to_reclaim; 84 85 unsigned long hibernation_mode; 86 87 /* This context's GFP mask */ 88 gfp_t gfp_mask; 89 90 int may_writepage; 91 92 /* Can mapped pages be reclaimed? */ 93 int may_unmap; 94 95 /* Can pages be swapped as part of reclaim? */ 96 int may_swap; 97 98 int order; 99 100 /* 101 * Intend to reclaim enough continuous memory rather than reclaim 102 * enough amount of memory. i.e, mode for high order allocation. 103 */ 104 reclaim_mode_t reclaim_mode; 105 106 /* Which cgroup do we reclaim from */ 107 struct mem_cgroup *mem_cgroup; 108 109 /* 110 * Nodemask of nodes allowed by the caller. If NULL, all nodes 111 * are scanned. 112 */ 113 nodemask_t *nodemask; 114 }; 115 116 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 117 118 #ifdef ARCH_HAS_PREFETCH 119 #define prefetch_prev_lru_page(_page, _base, _field) \ 120 do { \ 121 if ((_page)->lru.prev != _base) { \ 122 struct page *prev; \ 123 \ 124 prev = lru_to_page(&(_page->lru)); \ 125 prefetch(&prev->_field); \ 126 } \ 127 } while (0) 128 #else 129 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 130 #endif 131 132 #ifdef ARCH_HAS_PREFETCHW 133 #define prefetchw_prev_lru_page(_page, _base, _field) \ 134 do { \ 135 if ((_page)->lru.prev != _base) { \ 136 struct page *prev; \ 137 \ 138 prev = lru_to_page(&(_page->lru)); \ 139 prefetchw(&prev->_field); \ 140 } \ 141 } while (0) 142 #else 143 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 144 #endif 145 146 /* 147 * From 0 .. 100. Higher means more swappy. 148 */ 149 int vm_swappiness = 60; 150 long vm_total_pages; /* The total number of pages which the VM controls */ 151 152 static LIST_HEAD(shrinker_list); 153 static DECLARE_RWSEM(shrinker_rwsem); 154 155 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 156 #define scanning_global_lru(sc) (!(sc)->mem_cgroup) 157 #else 158 #define scanning_global_lru(sc) (1) 159 #endif 160 161 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 162 struct scan_control *sc) 163 { 164 if (!scanning_global_lru(sc)) 165 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); 166 167 return &zone->reclaim_stat; 168 } 169 170 static unsigned long zone_nr_lru_pages(struct zone *zone, 171 struct scan_control *sc, enum lru_list lru) 172 { 173 if (!scanning_global_lru(sc)) 174 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, 175 zone_to_nid(zone), zone_idx(zone), BIT(lru)); 176 177 return zone_page_state(zone, NR_LRU_BASE + lru); 178 } 179 180 181 /* 182 * Add a shrinker callback to be called from the vm 183 */ 184 void register_shrinker(struct shrinker *shrinker) 185 { 186 shrinker->nr = 0; 187 down_write(&shrinker_rwsem); 188 list_add_tail(&shrinker->list, &shrinker_list); 189 up_write(&shrinker_rwsem); 190 } 191 EXPORT_SYMBOL(register_shrinker); 192 193 /* 194 * Remove one 195 */ 196 void unregister_shrinker(struct shrinker *shrinker) 197 { 198 down_write(&shrinker_rwsem); 199 list_del(&shrinker->list); 200 up_write(&shrinker_rwsem); 201 } 202 EXPORT_SYMBOL(unregister_shrinker); 203 204 static inline int do_shrinker_shrink(struct shrinker *shrinker, 205 struct shrink_control *sc, 206 unsigned long nr_to_scan) 207 { 208 sc->nr_to_scan = nr_to_scan; 209 return (*shrinker->shrink)(shrinker, sc); 210 } 211 212 #define SHRINK_BATCH 128 213 /* 214 * Call the shrink functions to age shrinkable caches 215 * 216 * Here we assume it costs one seek to replace a lru page and that it also 217 * takes a seek to recreate a cache object. With this in mind we age equal 218 * percentages of the lru and ageable caches. This should balance the seeks 219 * generated by these structures. 220 * 221 * If the vm encountered mapped pages on the LRU it increase the pressure on 222 * slab to avoid swapping. 223 * 224 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 225 * 226 * `lru_pages' represents the number of on-LRU pages in all the zones which 227 * are eligible for the caller's allocation attempt. It is used for balancing 228 * slab reclaim versus page reclaim. 229 * 230 * Returns the number of slab objects which we shrunk. 231 */ 232 unsigned long shrink_slab(struct shrink_control *shrink, 233 unsigned long nr_pages_scanned, 234 unsigned long lru_pages) 235 { 236 struct shrinker *shrinker; 237 unsigned long ret = 0; 238 239 if (nr_pages_scanned == 0) 240 nr_pages_scanned = SWAP_CLUSTER_MAX; 241 242 if (!down_read_trylock(&shrinker_rwsem)) { 243 /* Assume we'll be able to shrink next time */ 244 ret = 1; 245 goto out; 246 } 247 248 list_for_each_entry(shrinker, &shrinker_list, list) { 249 unsigned long long delta; 250 unsigned long total_scan; 251 unsigned long max_pass; 252 int shrink_ret = 0; 253 long nr; 254 long new_nr; 255 long batch_size = shrinker->batch ? shrinker->batch 256 : SHRINK_BATCH; 257 258 /* 259 * copy the current shrinker scan count into a local variable 260 * and zero it so that other concurrent shrinker invocations 261 * don't also do this scanning work. 262 */ 263 do { 264 nr = shrinker->nr; 265 } while (cmpxchg(&shrinker->nr, nr, 0) != nr); 266 267 total_scan = nr; 268 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 269 delta = (4 * nr_pages_scanned) / shrinker->seeks; 270 delta *= max_pass; 271 do_div(delta, lru_pages + 1); 272 total_scan += delta; 273 if (total_scan < 0) { 274 printk(KERN_ERR "shrink_slab: %pF negative objects to " 275 "delete nr=%ld\n", 276 shrinker->shrink, total_scan); 277 total_scan = max_pass; 278 } 279 280 /* 281 * We need to avoid excessive windup on filesystem shrinkers 282 * due to large numbers of GFP_NOFS allocations causing the 283 * shrinkers to return -1 all the time. This results in a large 284 * nr being built up so when a shrink that can do some work 285 * comes along it empties the entire cache due to nr >>> 286 * max_pass. This is bad for sustaining a working set in 287 * memory. 288 * 289 * Hence only allow the shrinker to scan the entire cache when 290 * a large delta change is calculated directly. 291 */ 292 if (delta < max_pass / 4) 293 total_scan = min(total_scan, max_pass / 2); 294 295 /* 296 * Avoid risking looping forever due to too large nr value: 297 * never try to free more than twice the estimate number of 298 * freeable entries. 299 */ 300 if (total_scan > max_pass * 2) 301 total_scan = max_pass * 2; 302 303 trace_mm_shrink_slab_start(shrinker, shrink, nr, 304 nr_pages_scanned, lru_pages, 305 max_pass, delta, total_scan); 306 307 while (total_scan >= batch_size) { 308 int nr_before; 309 310 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 311 shrink_ret = do_shrinker_shrink(shrinker, shrink, 312 batch_size); 313 if (shrink_ret == -1) 314 break; 315 if (shrink_ret < nr_before) 316 ret += nr_before - shrink_ret; 317 count_vm_events(SLABS_SCANNED, batch_size); 318 total_scan -= batch_size; 319 320 cond_resched(); 321 } 322 323 /* 324 * move the unused scan count back into the shrinker in a 325 * manner that handles concurrent updates. If we exhausted the 326 * scan, there is no need to do an update. 327 */ 328 do { 329 nr = shrinker->nr; 330 new_nr = total_scan + nr; 331 if (total_scan <= 0) 332 break; 333 } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr); 334 335 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 336 } 337 up_read(&shrinker_rwsem); 338 out: 339 cond_resched(); 340 return ret; 341 } 342 343 static void set_reclaim_mode(int priority, struct scan_control *sc, 344 bool sync) 345 { 346 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; 347 348 /* 349 * Initially assume we are entering either lumpy reclaim or 350 * reclaim/compaction.Depending on the order, we will either set the 351 * sync mode or just reclaim order-0 pages later. 352 */ 353 if (COMPACTION_BUILD) 354 sc->reclaim_mode = RECLAIM_MODE_COMPACTION; 355 else 356 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; 357 358 /* 359 * Avoid using lumpy reclaim or reclaim/compaction if possible by 360 * restricting when its set to either costly allocations or when 361 * under memory pressure 362 */ 363 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 364 sc->reclaim_mode |= syncmode; 365 else if (sc->order && priority < DEF_PRIORITY - 2) 366 sc->reclaim_mode |= syncmode; 367 else 368 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 369 } 370 371 static void reset_reclaim_mode(struct scan_control *sc) 372 { 373 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 374 } 375 376 static inline int is_page_cache_freeable(struct page *page) 377 { 378 /* 379 * A freeable page cache page is referenced only by the caller 380 * that isolated the page, the page cache radix tree and 381 * optional buffer heads at page->private. 382 */ 383 return page_count(page) - page_has_private(page) == 2; 384 } 385 386 static int may_write_to_queue(struct backing_dev_info *bdi, 387 struct scan_control *sc) 388 { 389 if (current->flags & PF_SWAPWRITE) 390 return 1; 391 if (!bdi_write_congested(bdi)) 392 return 1; 393 if (bdi == current->backing_dev_info) 394 return 1; 395 396 /* lumpy reclaim for hugepage often need a lot of write */ 397 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 398 return 1; 399 return 0; 400 } 401 402 /* 403 * We detected a synchronous write error writing a page out. Probably 404 * -ENOSPC. We need to propagate that into the address_space for a subsequent 405 * fsync(), msync() or close(). 406 * 407 * The tricky part is that after writepage we cannot touch the mapping: nothing 408 * prevents it from being freed up. But we have a ref on the page and once 409 * that page is locked, the mapping is pinned. 410 * 411 * We're allowed to run sleeping lock_page() here because we know the caller has 412 * __GFP_FS. 413 */ 414 static void handle_write_error(struct address_space *mapping, 415 struct page *page, int error) 416 { 417 lock_page(page); 418 if (page_mapping(page) == mapping) 419 mapping_set_error(mapping, error); 420 unlock_page(page); 421 } 422 423 /* possible outcome of pageout() */ 424 typedef enum { 425 /* failed to write page out, page is locked */ 426 PAGE_KEEP, 427 /* move page to the active list, page is locked */ 428 PAGE_ACTIVATE, 429 /* page has been sent to the disk successfully, page is unlocked */ 430 PAGE_SUCCESS, 431 /* page is clean and locked */ 432 PAGE_CLEAN, 433 } pageout_t; 434 435 /* 436 * pageout is called by shrink_page_list() for each dirty page. 437 * Calls ->writepage(). 438 */ 439 static pageout_t pageout(struct page *page, struct address_space *mapping, 440 struct scan_control *sc) 441 { 442 /* 443 * If the page is dirty, only perform writeback if that write 444 * will be non-blocking. To prevent this allocation from being 445 * stalled by pagecache activity. But note that there may be 446 * stalls if we need to run get_block(). We could test 447 * PagePrivate for that. 448 * 449 * If this process is currently in __generic_file_aio_write() against 450 * this page's queue, we can perform writeback even if that 451 * will block. 452 * 453 * If the page is swapcache, write it back even if that would 454 * block, for some throttling. This happens by accident, because 455 * swap_backing_dev_info is bust: it doesn't reflect the 456 * congestion state of the swapdevs. Easy to fix, if needed. 457 */ 458 if (!is_page_cache_freeable(page)) 459 return PAGE_KEEP; 460 if (!mapping) { 461 /* 462 * Some data journaling orphaned pages can have 463 * page->mapping == NULL while being dirty with clean buffers. 464 */ 465 if (page_has_private(page)) { 466 if (try_to_free_buffers(page)) { 467 ClearPageDirty(page); 468 printk("%s: orphaned page\n", __func__); 469 return PAGE_CLEAN; 470 } 471 } 472 return PAGE_KEEP; 473 } 474 if (mapping->a_ops->writepage == NULL) 475 return PAGE_ACTIVATE; 476 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 477 return PAGE_KEEP; 478 479 if (clear_page_dirty_for_io(page)) { 480 int res; 481 struct writeback_control wbc = { 482 .sync_mode = WB_SYNC_NONE, 483 .nr_to_write = SWAP_CLUSTER_MAX, 484 .range_start = 0, 485 .range_end = LLONG_MAX, 486 .for_reclaim = 1, 487 }; 488 489 SetPageReclaim(page); 490 res = mapping->a_ops->writepage(page, &wbc); 491 if (res < 0) 492 handle_write_error(mapping, page, res); 493 if (res == AOP_WRITEPAGE_ACTIVATE) { 494 ClearPageReclaim(page); 495 return PAGE_ACTIVATE; 496 } 497 498 if (!PageWriteback(page)) { 499 /* synchronous write or broken a_ops? */ 500 ClearPageReclaim(page); 501 } 502 trace_mm_vmscan_writepage(page, 503 trace_reclaim_flags(page, sc->reclaim_mode)); 504 inc_zone_page_state(page, NR_VMSCAN_WRITE); 505 return PAGE_SUCCESS; 506 } 507 508 return PAGE_CLEAN; 509 } 510 511 /* 512 * Same as remove_mapping, but if the page is removed from the mapping, it 513 * gets returned with a refcount of 0. 514 */ 515 static int __remove_mapping(struct address_space *mapping, struct page *page) 516 { 517 BUG_ON(!PageLocked(page)); 518 BUG_ON(mapping != page_mapping(page)); 519 520 spin_lock_irq(&mapping->tree_lock); 521 /* 522 * The non racy check for a busy page. 523 * 524 * Must be careful with the order of the tests. When someone has 525 * a ref to the page, it may be possible that they dirty it then 526 * drop the reference. So if PageDirty is tested before page_count 527 * here, then the following race may occur: 528 * 529 * get_user_pages(&page); 530 * [user mapping goes away] 531 * write_to(page); 532 * !PageDirty(page) [good] 533 * SetPageDirty(page); 534 * put_page(page); 535 * !page_count(page) [good, discard it] 536 * 537 * [oops, our write_to data is lost] 538 * 539 * Reversing the order of the tests ensures such a situation cannot 540 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 541 * load is not satisfied before that of page->_count. 542 * 543 * Note that if SetPageDirty is always performed via set_page_dirty, 544 * and thus under tree_lock, then this ordering is not required. 545 */ 546 if (!page_freeze_refs(page, 2)) 547 goto cannot_free; 548 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 549 if (unlikely(PageDirty(page))) { 550 page_unfreeze_refs(page, 2); 551 goto cannot_free; 552 } 553 554 if (PageSwapCache(page)) { 555 swp_entry_t swap = { .val = page_private(page) }; 556 __delete_from_swap_cache(page); 557 spin_unlock_irq(&mapping->tree_lock); 558 swapcache_free(swap, page); 559 } else { 560 void (*freepage)(struct page *); 561 562 freepage = mapping->a_ops->freepage; 563 564 __delete_from_page_cache(page); 565 spin_unlock_irq(&mapping->tree_lock); 566 mem_cgroup_uncharge_cache_page(page); 567 568 if (freepage != NULL) 569 freepage(page); 570 } 571 572 return 1; 573 574 cannot_free: 575 spin_unlock_irq(&mapping->tree_lock); 576 return 0; 577 } 578 579 /* 580 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 581 * someone else has a ref on the page, abort and return 0. If it was 582 * successfully detached, return 1. Assumes the caller has a single ref on 583 * this page. 584 */ 585 int remove_mapping(struct address_space *mapping, struct page *page) 586 { 587 if (__remove_mapping(mapping, page)) { 588 /* 589 * Unfreezing the refcount with 1 rather than 2 effectively 590 * drops the pagecache ref for us without requiring another 591 * atomic operation. 592 */ 593 page_unfreeze_refs(page, 1); 594 return 1; 595 } 596 return 0; 597 } 598 599 /** 600 * putback_lru_page - put previously isolated page onto appropriate LRU list 601 * @page: page to be put back to appropriate lru list 602 * 603 * Add previously isolated @page to appropriate LRU list. 604 * Page may still be unevictable for other reasons. 605 * 606 * lru_lock must not be held, interrupts must be enabled. 607 */ 608 void putback_lru_page(struct page *page) 609 { 610 int lru; 611 int active = !!TestClearPageActive(page); 612 int was_unevictable = PageUnevictable(page); 613 614 VM_BUG_ON(PageLRU(page)); 615 616 redo: 617 ClearPageUnevictable(page); 618 619 if (page_evictable(page, NULL)) { 620 /* 621 * For evictable pages, we can use the cache. 622 * In event of a race, worst case is we end up with an 623 * unevictable page on [in]active list. 624 * We know how to handle that. 625 */ 626 lru = active + page_lru_base_type(page); 627 lru_cache_add_lru(page, lru); 628 } else { 629 /* 630 * Put unevictable pages directly on zone's unevictable 631 * list. 632 */ 633 lru = LRU_UNEVICTABLE; 634 add_page_to_unevictable_list(page); 635 /* 636 * When racing with an mlock or AS_UNEVICTABLE clearing 637 * (page is unlocked) make sure that if the other thread 638 * does not observe our setting of PG_lru and fails 639 * isolation/check_move_unevictable_page, 640 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 641 * the page back to the evictable list. 642 * 643 * The other side is TestClearPageMlocked() or shmem_lock(). 644 */ 645 smp_mb(); 646 } 647 648 /* 649 * page's status can change while we move it among lru. If an evictable 650 * page is on unevictable list, it never be freed. To avoid that, 651 * check after we added it to the list, again. 652 */ 653 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 654 if (!isolate_lru_page(page)) { 655 put_page(page); 656 goto redo; 657 } 658 /* This means someone else dropped this page from LRU 659 * So, it will be freed or putback to LRU again. There is 660 * nothing to do here. 661 */ 662 } 663 664 if (was_unevictable && lru != LRU_UNEVICTABLE) 665 count_vm_event(UNEVICTABLE_PGRESCUED); 666 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 667 count_vm_event(UNEVICTABLE_PGCULLED); 668 669 put_page(page); /* drop ref from isolate */ 670 } 671 672 enum page_references { 673 PAGEREF_RECLAIM, 674 PAGEREF_RECLAIM_CLEAN, 675 PAGEREF_KEEP, 676 PAGEREF_ACTIVATE, 677 }; 678 679 static enum page_references page_check_references(struct page *page, 680 struct scan_control *sc) 681 { 682 int referenced_ptes, referenced_page; 683 unsigned long vm_flags; 684 685 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); 686 referenced_page = TestClearPageReferenced(page); 687 688 /* Lumpy reclaim - ignore references */ 689 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 690 return PAGEREF_RECLAIM; 691 692 /* 693 * Mlock lost the isolation race with us. Let try_to_unmap() 694 * move the page to the unevictable list. 695 */ 696 if (vm_flags & VM_LOCKED) 697 return PAGEREF_RECLAIM; 698 699 if (referenced_ptes) { 700 if (PageAnon(page)) 701 return PAGEREF_ACTIVATE; 702 /* 703 * All mapped pages start out with page table 704 * references from the instantiating fault, so we need 705 * to look twice if a mapped file page is used more 706 * than once. 707 * 708 * Mark it and spare it for another trip around the 709 * inactive list. Another page table reference will 710 * lead to its activation. 711 * 712 * Note: the mark is set for activated pages as well 713 * so that recently deactivated but used pages are 714 * quickly recovered. 715 */ 716 SetPageReferenced(page); 717 718 if (referenced_page) 719 return PAGEREF_ACTIVATE; 720 721 return PAGEREF_KEEP; 722 } 723 724 /* Reclaim if clean, defer dirty pages to writeback */ 725 if (referenced_page && !PageSwapBacked(page)) 726 return PAGEREF_RECLAIM_CLEAN; 727 728 return PAGEREF_RECLAIM; 729 } 730 731 static noinline_for_stack void free_page_list(struct list_head *free_pages) 732 { 733 struct pagevec freed_pvec; 734 struct page *page, *tmp; 735 736 pagevec_init(&freed_pvec, 1); 737 738 list_for_each_entry_safe(page, tmp, free_pages, lru) { 739 list_del(&page->lru); 740 if (!pagevec_add(&freed_pvec, page)) { 741 __pagevec_free(&freed_pvec); 742 pagevec_reinit(&freed_pvec); 743 } 744 } 745 746 pagevec_free(&freed_pvec); 747 } 748 749 /* 750 * shrink_page_list() returns the number of reclaimed pages 751 */ 752 static unsigned long shrink_page_list(struct list_head *page_list, 753 struct zone *zone, 754 struct scan_control *sc, 755 int priority, 756 unsigned long *ret_nr_dirty, 757 unsigned long *ret_nr_writeback) 758 { 759 LIST_HEAD(ret_pages); 760 LIST_HEAD(free_pages); 761 int pgactivate = 0; 762 unsigned long nr_dirty = 0; 763 unsigned long nr_congested = 0; 764 unsigned long nr_reclaimed = 0; 765 unsigned long nr_writeback = 0; 766 767 cond_resched(); 768 769 while (!list_empty(page_list)) { 770 enum page_references references; 771 struct address_space *mapping; 772 struct page *page; 773 int may_enter_fs; 774 775 cond_resched(); 776 777 page = lru_to_page(page_list); 778 list_del(&page->lru); 779 780 if (!trylock_page(page)) 781 goto keep; 782 783 VM_BUG_ON(PageActive(page)); 784 VM_BUG_ON(page_zone(page) != zone); 785 786 sc->nr_scanned++; 787 788 if (unlikely(!page_evictable(page, NULL))) 789 goto cull_mlocked; 790 791 if (!sc->may_unmap && page_mapped(page)) 792 goto keep_locked; 793 794 /* Double the slab pressure for mapped and swapcache pages */ 795 if (page_mapped(page) || PageSwapCache(page)) 796 sc->nr_scanned++; 797 798 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 799 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 800 801 if (PageWriteback(page)) { 802 nr_writeback++; 803 /* 804 * Synchronous reclaim cannot queue pages for 805 * writeback due to the possibility of stack overflow 806 * but if it encounters a page under writeback, wait 807 * for the IO to complete. 808 */ 809 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && 810 may_enter_fs) 811 wait_on_page_writeback(page); 812 else { 813 unlock_page(page); 814 goto keep_lumpy; 815 } 816 } 817 818 references = page_check_references(page, sc); 819 switch (references) { 820 case PAGEREF_ACTIVATE: 821 goto activate_locked; 822 case PAGEREF_KEEP: 823 goto keep_locked; 824 case PAGEREF_RECLAIM: 825 case PAGEREF_RECLAIM_CLEAN: 826 ; /* try to reclaim the page below */ 827 } 828 829 /* 830 * Anonymous process memory has backing store? 831 * Try to allocate it some swap space here. 832 */ 833 if (PageAnon(page) && !PageSwapCache(page)) { 834 if (!(sc->gfp_mask & __GFP_IO)) 835 goto keep_locked; 836 if (!add_to_swap(page)) 837 goto activate_locked; 838 may_enter_fs = 1; 839 } 840 841 mapping = page_mapping(page); 842 843 /* 844 * The page is mapped into the page tables of one or more 845 * processes. Try to unmap it here. 846 */ 847 if (page_mapped(page) && mapping) { 848 switch (try_to_unmap(page, TTU_UNMAP)) { 849 case SWAP_FAIL: 850 goto activate_locked; 851 case SWAP_AGAIN: 852 goto keep_locked; 853 case SWAP_MLOCK: 854 goto cull_mlocked; 855 case SWAP_SUCCESS: 856 ; /* try to free the page below */ 857 } 858 } 859 860 if (PageDirty(page)) { 861 nr_dirty++; 862 863 /* 864 * Only kswapd can writeback filesystem pages to 865 * avoid risk of stack overflow but do not writeback 866 * unless under significant pressure. 867 */ 868 if (page_is_file_cache(page) && 869 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 870 /* 871 * Immediately reclaim when written back. 872 * Similar in principal to deactivate_page() 873 * except we already have the page isolated 874 * and know it's dirty 875 */ 876 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); 877 SetPageReclaim(page); 878 879 goto keep_locked; 880 } 881 882 if (references == PAGEREF_RECLAIM_CLEAN) 883 goto keep_locked; 884 if (!may_enter_fs) 885 goto keep_locked; 886 if (!sc->may_writepage) 887 goto keep_locked; 888 889 /* Page is dirty, try to write it out here */ 890 switch (pageout(page, mapping, sc)) { 891 case PAGE_KEEP: 892 nr_congested++; 893 goto keep_locked; 894 case PAGE_ACTIVATE: 895 goto activate_locked; 896 case PAGE_SUCCESS: 897 if (PageWriteback(page)) 898 goto keep_lumpy; 899 if (PageDirty(page)) 900 goto keep; 901 902 /* 903 * A synchronous write - probably a ramdisk. Go 904 * ahead and try to reclaim the page. 905 */ 906 if (!trylock_page(page)) 907 goto keep; 908 if (PageDirty(page) || PageWriteback(page)) 909 goto keep_locked; 910 mapping = page_mapping(page); 911 case PAGE_CLEAN: 912 ; /* try to free the page below */ 913 } 914 } 915 916 /* 917 * If the page has buffers, try to free the buffer mappings 918 * associated with this page. If we succeed we try to free 919 * the page as well. 920 * 921 * We do this even if the page is PageDirty(). 922 * try_to_release_page() does not perform I/O, but it is 923 * possible for a page to have PageDirty set, but it is actually 924 * clean (all its buffers are clean). This happens if the 925 * buffers were written out directly, with submit_bh(). ext3 926 * will do this, as well as the blockdev mapping. 927 * try_to_release_page() will discover that cleanness and will 928 * drop the buffers and mark the page clean - it can be freed. 929 * 930 * Rarely, pages can have buffers and no ->mapping. These are 931 * the pages which were not successfully invalidated in 932 * truncate_complete_page(). We try to drop those buffers here 933 * and if that worked, and the page is no longer mapped into 934 * process address space (page_count == 1) it can be freed. 935 * Otherwise, leave the page on the LRU so it is swappable. 936 */ 937 if (page_has_private(page)) { 938 if (!try_to_release_page(page, sc->gfp_mask)) 939 goto activate_locked; 940 if (!mapping && page_count(page) == 1) { 941 unlock_page(page); 942 if (put_page_testzero(page)) 943 goto free_it; 944 else { 945 /* 946 * rare race with speculative reference. 947 * the speculative reference will free 948 * this page shortly, so we may 949 * increment nr_reclaimed here (and 950 * leave it off the LRU). 951 */ 952 nr_reclaimed++; 953 continue; 954 } 955 } 956 } 957 958 if (!mapping || !__remove_mapping(mapping, page)) 959 goto keep_locked; 960 961 /* 962 * At this point, we have no other references and there is 963 * no way to pick any more up (removed from LRU, removed 964 * from pagecache). Can use non-atomic bitops now (and 965 * we obviously don't have to worry about waking up a process 966 * waiting on the page lock, because there are no references. 967 */ 968 __clear_page_locked(page); 969 free_it: 970 nr_reclaimed++; 971 972 /* 973 * Is there need to periodically free_page_list? It would 974 * appear not as the counts should be low 975 */ 976 list_add(&page->lru, &free_pages); 977 continue; 978 979 cull_mlocked: 980 if (PageSwapCache(page)) 981 try_to_free_swap(page); 982 unlock_page(page); 983 putback_lru_page(page); 984 reset_reclaim_mode(sc); 985 continue; 986 987 activate_locked: 988 /* Not a candidate for swapping, so reclaim swap space. */ 989 if (PageSwapCache(page) && vm_swap_full()) 990 try_to_free_swap(page); 991 VM_BUG_ON(PageActive(page)); 992 SetPageActive(page); 993 pgactivate++; 994 keep_locked: 995 unlock_page(page); 996 keep: 997 reset_reclaim_mode(sc); 998 keep_lumpy: 999 list_add(&page->lru, &ret_pages); 1000 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1001 } 1002 1003 /* 1004 * Tag a zone as congested if all the dirty pages encountered were 1005 * backed by a congested BDI. In this case, reclaimers should just 1006 * back off and wait for congestion to clear because further reclaim 1007 * will encounter the same problem 1008 */ 1009 if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) 1010 zone_set_flag(zone, ZONE_CONGESTED); 1011 1012 free_page_list(&free_pages); 1013 1014 list_splice(&ret_pages, page_list); 1015 count_vm_events(PGACTIVATE, pgactivate); 1016 *ret_nr_dirty += nr_dirty; 1017 *ret_nr_writeback += nr_writeback; 1018 return nr_reclaimed; 1019 } 1020 1021 /* 1022 * Attempt to remove the specified page from its LRU. Only take this page 1023 * if it is of the appropriate PageActive status. Pages which are being 1024 * freed elsewhere are also ignored. 1025 * 1026 * page: page to consider 1027 * mode: one of the LRU isolation modes defined above 1028 * 1029 * returns 0 on success, -ve errno on failure. 1030 */ 1031 int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) 1032 { 1033 bool all_lru_mode; 1034 int ret = -EINVAL; 1035 1036 /* Only take pages on the LRU. */ 1037 if (!PageLRU(page)) 1038 return ret; 1039 1040 all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) == 1041 (ISOLATE_ACTIVE|ISOLATE_INACTIVE); 1042 1043 /* 1044 * When checking the active state, we need to be sure we are 1045 * dealing with comparible boolean values. Take the logical not 1046 * of each. 1047 */ 1048 if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE)) 1049 return ret; 1050 1051 if (!all_lru_mode && !!page_is_file_cache(page) != file) 1052 return ret; 1053 1054 /* 1055 * When this function is being called for lumpy reclaim, we 1056 * initially look into all LRU pages, active, inactive and 1057 * unevictable; only give shrink_page_list evictable pages. 1058 */ 1059 if (PageUnevictable(page)) 1060 return ret; 1061 1062 ret = -EBUSY; 1063 1064 if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page))) 1065 return ret; 1066 1067 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1068 return ret; 1069 1070 if (likely(get_page_unless_zero(page))) { 1071 /* 1072 * Be careful not to clear PageLRU until after we're 1073 * sure the page is not being freed elsewhere -- the 1074 * page release code relies on it. 1075 */ 1076 ClearPageLRU(page); 1077 ret = 0; 1078 } 1079 1080 return ret; 1081 } 1082 1083 /* 1084 * zone->lru_lock is heavily contended. Some of the functions that 1085 * shrink the lists perform better by taking out a batch of pages 1086 * and working on them outside the LRU lock. 1087 * 1088 * For pagecache intensive workloads, this function is the hottest 1089 * spot in the kernel (apart from copy_*_user functions). 1090 * 1091 * Appropriate locks must be held before calling this function. 1092 * 1093 * @nr_to_scan: The number of pages to look through on the list. 1094 * @src: The LRU list to pull pages off. 1095 * @dst: The temp list to put pages on to. 1096 * @scanned: The number of pages that were scanned. 1097 * @order: The caller's attempted allocation order 1098 * @mode: One of the LRU isolation modes 1099 * @file: True [1] if isolating file [!anon] pages 1100 * 1101 * returns how many pages were moved onto *@dst. 1102 */ 1103 static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1104 struct list_head *src, struct list_head *dst, 1105 unsigned long *scanned, int order, isolate_mode_t mode, 1106 int file) 1107 { 1108 unsigned long nr_taken = 0; 1109 unsigned long nr_lumpy_taken = 0; 1110 unsigned long nr_lumpy_dirty = 0; 1111 unsigned long nr_lumpy_failed = 0; 1112 unsigned long scan; 1113 1114 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1115 struct page *page; 1116 unsigned long pfn; 1117 unsigned long end_pfn; 1118 unsigned long page_pfn; 1119 int zone_id; 1120 1121 page = lru_to_page(src); 1122 prefetchw_prev_lru_page(page, src, flags); 1123 1124 VM_BUG_ON(!PageLRU(page)); 1125 1126 switch (__isolate_lru_page(page, mode, file)) { 1127 case 0: 1128 list_move(&page->lru, dst); 1129 mem_cgroup_del_lru(page); 1130 nr_taken += hpage_nr_pages(page); 1131 break; 1132 1133 case -EBUSY: 1134 /* else it is being freed elsewhere */ 1135 list_move(&page->lru, src); 1136 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1137 continue; 1138 1139 default: 1140 BUG(); 1141 } 1142 1143 if (!order) 1144 continue; 1145 1146 /* 1147 * Attempt to take all pages in the order aligned region 1148 * surrounding the tag page. Only take those pages of 1149 * the same active state as that tag page. We may safely 1150 * round the target page pfn down to the requested order 1151 * as the mem_map is guaranteed valid out to MAX_ORDER, 1152 * where that page is in a different zone we will detect 1153 * it from its zone id and abort this block scan. 1154 */ 1155 zone_id = page_zone_id(page); 1156 page_pfn = page_to_pfn(page); 1157 pfn = page_pfn & ~((1 << order) - 1); 1158 end_pfn = pfn + (1 << order); 1159 for (; pfn < end_pfn; pfn++) { 1160 struct page *cursor_page; 1161 1162 /* The target page is in the block, ignore it. */ 1163 if (unlikely(pfn == page_pfn)) 1164 continue; 1165 1166 /* Avoid holes within the zone. */ 1167 if (unlikely(!pfn_valid_within(pfn))) 1168 break; 1169 1170 cursor_page = pfn_to_page(pfn); 1171 1172 /* Check that we have not crossed a zone boundary. */ 1173 if (unlikely(page_zone_id(cursor_page) != zone_id)) 1174 break; 1175 1176 /* 1177 * If we don't have enough swap space, reclaiming of 1178 * anon page which don't already have a swap slot is 1179 * pointless. 1180 */ 1181 if (nr_swap_pages <= 0 && PageAnon(cursor_page) && 1182 !PageSwapCache(cursor_page)) 1183 break; 1184 1185 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1186 list_move(&cursor_page->lru, dst); 1187 mem_cgroup_del_lru(cursor_page); 1188 nr_taken += hpage_nr_pages(page); 1189 nr_lumpy_taken++; 1190 if (PageDirty(cursor_page)) 1191 nr_lumpy_dirty++; 1192 scan++; 1193 } else { 1194 /* 1195 * Check if the page is freed already. 1196 * 1197 * We can't use page_count() as that 1198 * requires compound_head and we don't 1199 * have a pin on the page here. If a 1200 * page is tail, we may or may not 1201 * have isolated the head, so assume 1202 * it's not free, it'd be tricky to 1203 * track the head status without a 1204 * page pin. 1205 */ 1206 if (!PageTail(cursor_page) && 1207 !atomic_read(&cursor_page->_count)) 1208 continue; 1209 break; 1210 } 1211 } 1212 1213 /* If we break out of the loop above, lumpy reclaim failed */ 1214 if (pfn < end_pfn) 1215 nr_lumpy_failed++; 1216 } 1217 1218 *scanned = scan; 1219 1220 trace_mm_vmscan_lru_isolate(order, 1221 nr_to_scan, scan, 1222 nr_taken, 1223 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, 1224 mode); 1225 return nr_taken; 1226 } 1227 1228 static unsigned long isolate_pages_global(unsigned long nr, 1229 struct list_head *dst, 1230 unsigned long *scanned, int order, 1231 isolate_mode_t mode, 1232 struct zone *z, int active, int file) 1233 { 1234 int lru = LRU_BASE; 1235 if (active) 1236 lru += LRU_ACTIVE; 1237 if (file) 1238 lru += LRU_FILE; 1239 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 1240 mode, file); 1241 } 1242 1243 /* 1244 * clear_active_flags() is a helper for shrink_active_list(), clearing 1245 * any active bits from the pages in the list. 1246 */ 1247 static unsigned long clear_active_flags(struct list_head *page_list, 1248 unsigned int *count) 1249 { 1250 int nr_active = 0; 1251 int lru; 1252 struct page *page; 1253 1254 list_for_each_entry(page, page_list, lru) { 1255 int numpages = hpage_nr_pages(page); 1256 lru = page_lru_base_type(page); 1257 if (PageActive(page)) { 1258 lru += LRU_ACTIVE; 1259 ClearPageActive(page); 1260 nr_active += numpages; 1261 } 1262 if (count) 1263 count[lru] += numpages; 1264 } 1265 1266 return nr_active; 1267 } 1268 1269 /** 1270 * isolate_lru_page - tries to isolate a page from its LRU list 1271 * @page: page to isolate from its LRU list 1272 * 1273 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1274 * vmstat statistic corresponding to whatever LRU list the page was on. 1275 * 1276 * Returns 0 if the page was removed from an LRU list. 1277 * Returns -EBUSY if the page was not on an LRU list. 1278 * 1279 * The returned page will have PageLRU() cleared. If it was found on 1280 * the active list, it will have PageActive set. If it was found on 1281 * the unevictable list, it will have the PageUnevictable bit set. That flag 1282 * may need to be cleared by the caller before letting the page go. 1283 * 1284 * The vmstat statistic corresponding to the list on which the page was 1285 * found will be decremented. 1286 * 1287 * Restrictions: 1288 * (1) Must be called with an elevated refcount on the page. This is a 1289 * fundamentnal difference from isolate_lru_pages (which is called 1290 * without a stable reference). 1291 * (2) the lru_lock must not be held. 1292 * (3) interrupts must be enabled. 1293 */ 1294 int isolate_lru_page(struct page *page) 1295 { 1296 int ret = -EBUSY; 1297 1298 VM_BUG_ON(!page_count(page)); 1299 1300 if (PageLRU(page)) { 1301 struct zone *zone = page_zone(page); 1302 1303 spin_lock_irq(&zone->lru_lock); 1304 if (PageLRU(page)) { 1305 int lru = page_lru(page); 1306 ret = 0; 1307 get_page(page); 1308 ClearPageLRU(page); 1309 1310 del_page_from_lru_list(zone, page, lru); 1311 } 1312 spin_unlock_irq(&zone->lru_lock); 1313 } 1314 return ret; 1315 } 1316 1317 /* 1318 * Are there way too many processes in the direct reclaim path already? 1319 */ 1320 static int too_many_isolated(struct zone *zone, int file, 1321 struct scan_control *sc) 1322 { 1323 unsigned long inactive, isolated; 1324 1325 if (current_is_kswapd()) 1326 return 0; 1327 1328 if (!scanning_global_lru(sc)) 1329 return 0; 1330 1331 if (file) { 1332 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1333 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1334 } else { 1335 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1336 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1337 } 1338 1339 return isolated > inactive; 1340 } 1341 1342 /* 1343 * TODO: Try merging with migrations version of putback_lru_pages 1344 */ 1345 static noinline_for_stack void 1346 putback_lru_pages(struct zone *zone, struct scan_control *sc, 1347 unsigned long nr_anon, unsigned long nr_file, 1348 struct list_head *page_list) 1349 { 1350 struct page *page; 1351 struct pagevec pvec; 1352 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1353 1354 pagevec_init(&pvec, 1); 1355 1356 /* 1357 * Put back any unfreeable pages. 1358 */ 1359 spin_lock(&zone->lru_lock); 1360 while (!list_empty(page_list)) { 1361 int lru; 1362 page = lru_to_page(page_list); 1363 VM_BUG_ON(PageLRU(page)); 1364 list_del(&page->lru); 1365 if (unlikely(!page_evictable(page, NULL))) { 1366 spin_unlock_irq(&zone->lru_lock); 1367 putback_lru_page(page); 1368 spin_lock_irq(&zone->lru_lock); 1369 continue; 1370 } 1371 SetPageLRU(page); 1372 lru = page_lru(page); 1373 add_page_to_lru_list(zone, page, lru); 1374 if (is_active_lru(lru)) { 1375 int file = is_file_lru(lru); 1376 int numpages = hpage_nr_pages(page); 1377 reclaim_stat->recent_rotated[file] += numpages; 1378 } 1379 if (!pagevec_add(&pvec, page)) { 1380 spin_unlock_irq(&zone->lru_lock); 1381 __pagevec_release(&pvec); 1382 spin_lock_irq(&zone->lru_lock); 1383 } 1384 } 1385 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1386 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1387 1388 spin_unlock_irq(&zone->lru_lock); 1389 pagevec_release(&pvec); 1390 } 1391 1392 static noinline_for_stack void update_isolated_counts(struct zone *zone, 1393 struct scan_control *sc, 1394 unsigned long *nr_anon, 1395 unsigned long *nr_file, 1396 struct list_head *isolated_list) 1397 { 1398 unsigned long nr_active; 1399 unsigned int count[NR_LRU_LISTS] = { 0, }; 1400 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1401 1402 nr_active = clear_active_flags(isolated_list, count); 1403 __count_vm_events(PGDEACTIVATE, nr_active); 1404 1405 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1406 -count[LRU_ACTIVE_FILE]); 1407 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1408 -count[LRU_INACTIVE_FILE]); 1409 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1410 -count[LRU_ACTIVE_ANON]); 1411 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1412 -count[LRU_INACTIVE_ANON]); 1413 1414 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1415 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1416 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon); 1417 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file); 1418 1419 reclaim_stat->recent_scanned[0] += *nr_anon; 1420 reclaim_stat->recent_scanned[1] += *nr_file; 1421 } 1422 1423 /* 1424 * Returns true if a direct reclaim should wait on pages under writeback. 1425 * 1426 * If we are direct reclaiming for contiguous pages and we do not reclaim 1427 * everything in the list, try again and wait for writeback IO to complete. 1428 * This will stall high-order allocations noticeably. Only do that when really 1429 * need to free the pages under high memory pressure. 1430 */ 1431 static inline bool should_reclaim_stall(unsigned long nr_taken, 1432 unsigned long nr_freed, 1433 int priority, 1434 struct scan_control *sc) 1435 { 1436 int lumpy_stall_priority; 1437 1438 /* kswapd should not stall on sync IO */ 1439 if (current_is_kswapd()) 1440 return false; 1441 1442 /* Only stall on lumpy reclaim */ 1443 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) 1444 return false; 1445 1446 /* If we have reclaimed everything on the isolated list, no stall */ 1447 if (nr_freed == nr_taken) 1448 return false; 1449 1450 /* 1451 * For high-order allocations, there are two stall thresholds. 1452 * High-cost allocations stall immediately where as lower 1453 * order allocations such as stacks require the scanning 1454 * priority to be much higher before stalling. 1455 */ 1456 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1457 lumpy_stall_priority = DEF_PRIORITY; 1458 else 1459 lumpy_stall_priority = DEF_PRIORITY / 3; 1460 1461 return priority <= lumpy_stall_priority; 1462 } 1463 1464 /* 1465 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1466 * of reclaimed pages 1467 */ 1468 static noinline_for_stack unsigned long 1469 shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, 1470 struct scan_control *sc, int priority, int file) 1471 { 1472 LIST_HEAD(page_list); 1473 unsigned long nr_scanned; 1474 unsigned long nr_reclaimed = 0; 1475 unsigned long nr_taken; 1476 unsigned long nr_anon; 1477 unsigned long nr_file; 1478 unsigned long nr_dirty = 0; 1479 unsigned long nr_writeback = 0; 1480 isolate_mode_t reclaim_mode = ISOLATE_INACTIVE; 1481 1482 while (unlikely(too_many_isolated(zone, file, sc))) { 1483 congestion_wait(BLK_RW_ASYNC, HZ/10); 1484 1485 /* We are about to die and free our memory. Return now. */ 1486 if (fatal_signal_pending(current)) 1487 return SWAP_CLUSTER_MAX; 1488 } 1489 1490 set_reclaim_mode(priority, sc, false); 1491 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 1492 reclaim_mode |= ISOLATE_ACTIVE; 1493 1494 lru_add_drain(); 1495 1496 if (!sc->may_unmap) 1497 reclaim_mode |= ISOLATE_UNMAPPED; 1498 if (!sc->may_writepage) 1499 reclaim_mode |= ISOLATE_CLEAN; 1500 1501 spin_lock_irq(&zone->lru_lock); 1502 1503 if (scanning_global_lru(sc)) { 1504 nr_taken = isolate_pages_global(nr_to_scan, &page_list, 1505 &nr_scanned, sc->order, reclaim_mode, zone, 0, file); 1506 zone->pages_scanned += nr_scanned; 1507 if (current_is_kswapd()) 1508 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1509 nr_scanned); 1510 else 1511 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1512 nr_scanned); 1513 } else { 1514 nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, 1515 &nr_scanned, sc->order, reclaim_mode, zone, 1516 sc->mem_cgroup, 0, file); 1517 /* 1518 * mem_cgroup_isolate_pages() keeps track of 1519 * scanned pages on its own. 1520 */ 1521 } 1522 1523 if (nr_taken == 0) { 1524 spin_unlock_irq(&zone->lru_lock); 1525 return 0; 1526 } 1527 1528 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list); 1529 1530 spin_unlock_irq(&zone->lru_lock); 1531 1532 nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority, 1533 &nr_dirty, &nr_writeback); 1534 1535 /* Check if we should syncronously wait for writeback */ 1536 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { 1537 set_reclaim_mode(priority, sc, true); 1538 nr_reclaimed += shrink_page_list(&page_list, zone, sc, 1539 priority, &nr_dirty, &nr_writeback); 1540 } 1541 1542 local_irq_disable(); 1543 if (current_is_kswapd()) 1544 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1545 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1546 1547 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); 1548 1549 /* 1550 * If reclaim is isolating dirty pages under writeback, it implies 1551 * that the long-lived page allocation rate is exceeding the page 1552 * laundering rate. Either the global limits are not being effective 1553 * at throttling processes due to the page distribution throughout 1554 * zones or there is heavy usage of a slow backing device. The 1555 * only option is to throttle from reclaim context which is not ideal 1556 * as there is no guarantee the dirtying process is throttled in the 1557 * same way balance_dirty_pages() manages. 1558 * 1559 * This scales the number of dirty pages that must be under writeback 1560 * before throttling depending on priority. It is a simple backoff 1561 * function that has the most effect in the range DEF_PRIORITY to 1562 * DEF_PRIORITY-2 which is the priority reclaim is considered to be 1563 * in trouble and reclaim is considered to be in trouble. 1564 * 1565 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle 1566 * DEF_PRIORITY-1 50% must be PageWriteback 1567 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble 1568 * ... 1569 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1570 * isolated page is PageWriteback 1571 */ 1572 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority))) 1573 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1574 1575 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1576 zone_idx(zone), 1577 nr_scanned, nr_reclaimed, 1578 priority, 1579 trace_shrink_flags(file, sc->reclaim_mode)); 1580 return nr_reclaimed; 1581 } 1582 1583 /* 1584 * This moves pages from the active list to the inactive list. 1585 * 1586 * We move them the other way if the page is referenced by one or more 1587 * processes, from rmap. 1588 * 1589 * If the pages are mostly unmapped, the processing is fast and it is 1590 * appropriate to hold zone->lru_lock across the whole operation. But if 1591 * the pages are mapped, the processing is slow (page_referenced()) so we 1592 * should drop zone->lru_lock around each page. It's impossible to balance 1593 * this, so instead we remove the pages from the LRU while processing them. 1594 * It is safe to rely on PG_active against the non-LRU pages in here because 1595 * nobody will play with that bit on a non-LRU page. 1596 * 1597 * The downside is that we have to touch page->_count against each page. 1598 * But we had to alter page->flags anyway. 1599 */ 1600 1601 static void move_active_pages_to_lru(struct zone *zone, 1602 struct list_head *list, 1603 enum lru_list lru) 1604 { 1605 unsigned long pgmoved = 0; 1606 struct pagevec pvec; 1607 struct page *page; 1608 1609 pagevec_init(&pvec, 1); 1610 1611 while (!list_empty(list)) { 1612 page = lru_to_page(list); 1613 1614 VM_BUG_ON(PageLRU(page)); 1615 SetPageLRU(page); 1616 1617 list_move(&page->lru, &zone->lru[lru].list); 1618 mem_cgroup_add_lru_list(page, lru); 1619 pgmoved += hpage_nr_pages(page); 1620 1621 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1622 spin_unlock_irq(&zone->lru_lock); 1623 if (buffer_heads_over_limit) 1624 pagevec_strip(&pvec); 1625 __pagevec_release(&pvec); 1626 spin_lock_irq(&zone->lru_lock); 1627 } 1628 } 1629 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1630 if (!is_active_lru(lru)) 1631 __count_vm_events(PGDEACTIVATE, pgmoved); 1632 } 1633 1634 static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1635 struct scan_control *sc, int priority, int file) 1636 { 1637 unsigned long nr_taken; 1638 unsigned long pgscanned; 1639 unsigned long vm_flags; 1640 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1641 LIST_HEAD(l_active); 1642 LIST_HEAD(l_inactive); 1643 struct page *page; 1644 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1645 unsigned long nr_rotated = 0; 1646 isolate_mode_t reclaim_mode = ISOLATE_ACTIVE; 1647 1648 lru_add_drain(); 1649 1650 if (!sc->may_unmap) 1651 reclaim_mode |= ISOLATE_UNMAPPED; 1652 if (!sc->may_writepage) 1653 reclaim_mode |= ISOLATE_CLEAN; 1654 1655 spin_lock_irq(&zone->lru_lock); 1656 if (scanning_global_lru(sc)) { 1657 nr_taken = isolate_pages_global(nr_pages, &l_hold, 1658 &pgscanned, sc->order, 1659 reclaim_mode, zone, 1660 1, file); 1661 zone->pages_scanned += pgscanned; 1662 } else { 1663 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, 1664 &pgscanned, sc->order, 1665 reclaim_mode, zone, 1666 sc->mem_cgroup, 1, file); 1667 /* 1668 * mem_cgroup_isolate_pages() keeps track of 1669 * scanned pages on its own. 1670 */ 1671 } 1672 1673 reclaim_stat->recent_scanned[file] += nr_taken; 1674 1675 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1676 if (file) 1677 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1678 else 1679 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1680 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1681 spin_unlock_irq(&zone->lru_lock); 1682 1683 while (!list_empty(&l_hold)) { 1684 cond_resched(); 1685 page = lru_to_page(&l_hold); 1686 list_del(&page->lru); 1687 1688 if (unlikely(!page_evictable(page, NULL))) { 1689 putback_lru_page(page); 1690 continue; 1691 } 1692 1693 if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1694 nr_rotated += hpage_nr_pages(page); 1695 /* 1696 * Identify referenced, file-backed active pages and 1697 * give them one more trip around the active list. So 1698 * that executable code get better chances to stay in 1699 * memory under moderate memory pressure. Anon pages 1700 * are not likely to be evicted by use-once streaming 1701 * IO, plus JVM can create lots of anon VM_EXEC pages, 1702 * so we ignore them here. 1703 */ 1704 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1705 list_add(&page->lru, &l_active); 1706 continue; 1707 } 1708 } 1709 1710 ClearPageActive(page); /* we are de-activating */ 1711 list_add(&page->lru, &l_inactive); 1712 } 1713 1714 /* 1715 * Move pages back to the lru list. 1716 */ 1717 spin_lock_irq(&zone->lru_lock); 1718 /* 1719 * Count referenced pages from currently used mappings as rotated, 1720 * even though only some of them are actually re-activated. This 1721 * helps balance scan pressure between file and anonymous pages in 1722 * get_scan_ratio. 1723 */ 1724 reclaim_stat->recent_rotated[file] += nr_rotated; 1725 1726 move_active_pages_to_lru(zone, &l_active, 1727 LRU_ACTIVE + file * LRU_FILE); 1728 move_active_pages_to_lru(zone, &l_inactive, 1729 LRU_BASE + file * LRU_FILE); 1730 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1731 spin_unlock_irq(&zone->lru_lock); 1732 } 1733 1734 #ifdef CONFIG_SWAP 1735 static int inactive_anon_is_low_global(struct zone *zone) 1736 { 1737 unsigned long active, inactive; 1738 1739 active = zone_page_state(zone, NR_ACTIVE_ANON); 1740 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1741 1742 if (inactive * zone->inactive_ratio < active) 1743 return 1; 1744 1745 return 0; 1746 } 1747 1748 /** 1749 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1750 * @zone: zone to check 1751 * @sc: scan control of this context 1752 * 1753 * Returns true if the zone does not have enough inactive anon pages, 1754 * meaning some active anon pages need to be deactivated. 1755 */ 1756 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) 1757 { 1758 int low; 1759 1760 /* 1761 * If we don't have swap space, anonymous page deactivation 1762 * is pointless. 1763 */ 1764 if (!total_swap_pages) 1765 return 0; 1766 1767 if (scanning_global_lru(sc)) 1768 low = inactive_anon_is_low_global(zone); 1769 else 1770 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); 1771 return low; 1772 } 1773 #else 1774 static inline int inactive_anon_is_low(struct zone *zone, 1775 struct scan_control *sc) 1776 { 1777 return 0; 1778 } 1779 #endif 1780 1781 static int inactive_file_is_low_global(struct zone *zone) 1782 { 1783 unsigned long active, inactive; 1784 1785 active = zone_page_state(zone, NR_ACTIVE_FILE); 1786 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1787 1788 return (active > inactive); 1789 } 1790 1791 /** 1792 * inactive_file_is_low - check if file pages need to be deactivated 1793 * @zone: zone to check 1794 * @sc: scan control of this context 1795 * 1796 * When the system is doing streaming IO, memory pressure here 1797 * ensures that active file pages get deactivated, until more 1798 * than half of the file pages are on the inactive list. 1799 * 1800 * Once we get to that situation, protect the system's working 1801 * set from being evicted by disabling active file page aging. 1802 * 1803 * This uses a different ratio than the anonymous pages, because 1804 * the page cache uses a use-once replacement algorithm. 1805 */ 1806 static int inactive_file_is_low(struct zone *zone, struct scan_control *sc) 1807 { 1808 int low; 1809 1810 if (scanning_global_lru(sc)) 1811 low = inactive_file_is_low_global(zone); 1812 else 1813 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup, zone); 1814 return low; 1815 } 1816 1817 static int inactive_list_is_low(struct zone *zone, struct scan_control *sc, 1818 int file) 1819 { 1820 if (file) 1821 return inactive_file_is_low(zone, sc); 1822 else 1823 return inactive_anon_is_low(zone, sc); 1824 } 1825 1826 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1827 struct zone *zone, struct scan_control *sc, int priority) 1828 { 1829 int file = is_file_lru(lru); 1830 1831 if (is_active_lru(lru)) { 1832 if (inactive_list_is_low(zone, sc, file)) 1833 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1834 return 0; 1835 } 1836 1837 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1838 } 1839 1840 static int vmscan_swappiness(struct scan_control *sc) 1841 { 1842 if (scanning_global_lru(sc)) 1843 return vm_swappiness; 1844 return mem_cgroup_swappiness(sc->mem_cgroup); 1845 } 1846 1847 /* 1848 * Determine how aggressively the anon and file LRU lists should be 1849 * scanned. The relative value of each set of LRU lists is determined 1850 * by looking at the fraction of the pages scanned we did rotate back 1851 * onto the active list instead of evict. 1852 * 1853 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1854 */ 1855 static void get_scan_count(struct zone *zone, struct scan_control *sc, 1856 unsigned long *nr, int priority) 1857 { 1858 unsigned long anon, file, free; 1859 unsigned long anon_prio, file_prio; 1860 unsigned long ap, fp; 1861 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1862 u64 fraction[2], denominator; 1863 enum lru_list l; 1864 int noswap = 0; 1865 bool force_scan = false; 1866 1867 /* 1868 * If the zone or memcg is small, nr[l] can be 0. This 1869 * results in no scanning on this priority and a potential 1870 * priority drop. Global direct reclaim can go to the next 1871 * zone and tends to have no problems. Global kswapd is for 1872 * zone balancing and it needs to scan a minimum amount. When 1873 * reclaiming for a memcg, a priority drop can cause high 1874 * latencies, so it's better to scan a minimum amount there as 1875 * well. 1876 */ 1877 if (scanning_global_lru(sc) && current_is_kswapd()) 1878 force_scan = true; 1879 if (!scanning_global_lru(sc)) 1880 force_scan = true; 1881 1882 /* If we have no swap space, do not bother scanning anon pages. */ 1883 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1884 noswap = 1; 1885 fraction[0] = 0; 1886 fraction[1] = 1; 1887 denominator = 1; 1888 goto out; 1889 } 1890 1891 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1892 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1893 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1894 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1895 1896 if (scanning_global_lru(sc)) { 1897 free = zone_page_state(zone, NR_FREE_PAGES); 1898 /* If we have very few page cache pages, 1899 force-scan anon pages. */ 1900 if (unlikely(file + free <= high_wmark_pages(zone))) { 1901 fraction[0] = 1; 1902 fraction[1] = 0; 1903 denominator = 1; 1904 goto out; 1905 } 1906 } 1907 1908 /* 1909 * With swappiness at 100, anonymous and file have the same priority. 1910 * This scanning priority is essentially the inverse of IO cost. 1911 */ 1912 anon_prio = vmscan_swappiness(sc); 1913 file_prio = 200 - vmscan_swappiness(sc); 1914 1915 /* 1916 * OK, so we have swap space and a fair amount of page cache 1917 * pages. We use the recently rotated / recently scanned 1918 * ratios to determine how valuable each cache is. 1919 * 1920 * Because workloads change over time (and to avoid overflow) 1921 * we keep these statistics as a floating average, which ends 1922 * up weighing recent references more than old ones. 1923 * 1924 * anon in [0], file in [1] 1925 */ 1926 spin_lock_irq(&zone->lru_lock); 1927 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1928 reclaim_stat->recent_scanned[0] /= 2; 1929 reclaim_stat->recent_rotated[0] /= 2; 1930 } 1931 1932 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1933 reclaim_stat->recent_scanned[1] /= 2; 1934 reclaim_stat->recent_rotated[1] /= 2; 1935 } 1936 1937 /* 1938 * The amount of pressure on anon vs file pages is inversely 1939 * proportional to the fraction of recently scanned pages on 1940 * each list that were recently referenced and in active use. 1941 */ 1942 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1943 ap /= reclaim_stat->recent_rotated[0] + 1; 1944 1945 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1946 fp /= reclaim_stat->recent_rotated[1] + 1; 1947 spin_unlock_irq(&zone->lru_lock); 1948 1949 fraction[0] = ap; 1950 fraction[1] = fp; 1951 denominator = ap + fp + 1; 1952 out: 1953 for_each_evictable_lru(l) { 1954 int file = is_file_lru(l); 1955 unsigned long scan; 1956 1957 scan = zone_nr_lru_pages(zone, sc, l); 1958 if (priority || noswap) { 1959 scan >>= priority; 1960 if (!scan && force_scan) 1961 scan = SWAP_CLUSTER_MAX; 1962 scan = div64_u64(scan * fraction[file], denominator); 1963 } 1964 nr[l] = scan; 1965 } 1966 } 1967 1968 /* 1969 * Reclaim/compaction depends on a number of pages being freed. To avoid 1970 * disruption to the system, a small number of order-0 pages continue to be 1971 * rotated and reclaimed in the normal fashion. However, by the time we get 1972 * back to the allocator and call try_to_compact_zone(), we ensure that 1973 * there are enough free pages for it to be likely successful 1974 */ 1975 static inline bool should_continue_reclaim(struct zone *zone, 1976 unsigned long nr_reclaimed, 1977 unsigned long nr_scanned, 1978 struct scan_control *sc) 1979 { 1980 unsigned long pages_for_compaction; 1981 unsigned long inactive_lru_pages; 1982 1983 /* If not in reclaim/compaction mode, stop */ 1984 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1985 return false; 1986 1987 /* Consider stopping depending on scan and reclaim activity */ 1988 if (sc->gfp_mask & __GFP_REPEAT) { 1989 /* 1990 * For __GFP_REPEAT allocations, stop reclaiming if the 1991 * full LRU list has been scanned and we are still failing 1992 * to reclaim pages. This full LRU scan is potentially 1993 * expensive but a __GFP_REPEAT caller really wants to succeed 1994 */ 1995 if (!nr_reclaimed && !nr_scanned) 1996 return false; 1997 } else { 1998 /* 1999 * For non-__GFP_REPEAT allocations which can presumably 2000 * fail without consequence, stop if we failed to reclaim 2001 * any pages from the last SWAP_CLUSTER_MAX number of 2002 * pages that were scanned. This will return to the 2003 * caller faster at the risk reclaim/compaction and 2004 * the resulting allocation attempt fails 2005 */ 2006 if (!nr_reclaimed) 2007 return false; 2008 } 2009 2010 /* 2011 * If we have not reclaimed enough pages for compaction and the 2012 * inactive lists are large enough, continue reclaiming 2013 */ 2014 pages_for_compaction = (2UL << sc->order); 2015 inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) + 2016 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 2017 if (sc->nr_reclaimed < pages_for_compaction && 2018 inactive_lru_pages > pages_for_compaction) 2019 return true; 2020 2021 /* If compaction would go ahead or the allocation would succeed, stop */ 2022 switch (compaction_suitable(zone, sc->order)) { 2023 case COMPACT_PARTIAL: 2024 case COMPACT_CONTINUE: 2025 return false; 2026 default: 2027 return true; 2028 } 2029 } 2030 2031 /* 2032 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 2033 */ 2034 static void shrink_zone(int priority, struct zone *zone, 2035 struct scan_control *sc) 2036 { 2037 unsigned long nr[NR_LRU_LISTS]; 2038 unsigned long nr_to_scan; 2039 enum lru_list l; 2040 unsigned long nr_reclaimed, nr_scanned; 2041 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 2042 struct blk_plug plug; 2043 2044 restart: 2045 nr_reclaimed = 0; 2046 nr_scanned = sc->nr_scanned; 2047 get_scan_count(zone, sc, nr, priority); 2048 2049 blk_start_plug(&plug); 2050 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2051 nr[LRU_INACTIVE_FILE]) { 2052 for_each_evictable_lru(l) { 2053 if (nr[l]) { 2054 nr_to_scan = min_t(unsigned long, 2055 nr[l], SWAP_CLUSTER_MAX); 2056 nr[l] -= nr_to_scan; 2057 2058 nr_reclaimed += shrink_list(l, nr_to_scan, 2059 zone, sc, priority); 2060 } 2061 } 2062 /* 2063 * On large memory systems, scan >> priority can become 2064 * really large. This is fine for the starting priority; 2065 * we want to put equal scanning pressure on each zone. 2066 * However, if the VM has a harder time of freeing pages, 2067 * with multiple processes reclaiming pages, the total 2068 * freeing target can get unreasonably large. 2069 */ 2070 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 2071 break; 2072 } 2073 blk_finish_plug(&plug); 2074 sc->nr_reclaimed += nr_reclaimed; 2075 2076 /* 2077 * Even if we did not try to evict anon pages at all, we want to 2078 * rebalance the anon lru active/inactive ratio. 2079 */ 2080 if (inactive_anon_is_low(zone, sc)) 2081 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 2082 2083 /* reclaim/compaction might need reclaim to continue */ 2084 if (should_continue_reclaim(zone, nr_reclaimed, 2085 sc->nr_scanned - nr_scanned, sc)) 2086 goto restart; 2087 2088 throttle_vm_writeout(sc->gfp_mask); 2089 } 2090 2091 /* 2092 * This is the direct reclaim path, for page-allocating processes. We only 2093 * try to reclaim pages from zones which will satisfy the caller's allocation 2094 * request. 2095 * 2096 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 2097 * Because: 2098 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 2099 * allocation or 2100 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 2101 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 2102 * zone defense algorithm. 2103 * 2104 * If a zone is deemed to be full of pinned pages then just give it a light 2105 * scan then give up on it. 2106 * 2107 * This function returns true if a zone is being reclaimed for a costly 2108 * high-order allocation and compaction is either ready to begin or deferred. 2109 * This indicates to the caller that it should retry the allocation or fail. 2110 */ 2111 static bool shrink_zones(int priority, struct zonelist *zonelist, 2112 struct scan_control *sc) 2113 { 2114 struct zoneref *z; 2115 struct zone *zone; 2116 unsigned long nr_soft_reclaimed; 2117 unsigned long nr_soft_scanned; 2118 bool should_abort_reclaim = false; 2119 2120 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2121 gfp_zone(sc->gfp_mask), sc->nodemask) { 2122 if (!populated_zone(zone)) 2123 continue; 2124 /* 2125 * Take care memory controller reclaiming has small influence 2126 * to global LRU. 2127 */ 2128 if (scanning_global_lru(sc)) { 2129 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2130 continue; 2131 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2132 continue; /* Let kswapd poll it */ 2133 if (COMPACTION_BUILD) { 2134 /* 2135 * If we already have plenty of memory free for 2136 * compaction in this zone, don't free any more. 2137 * Even though compaction is invoked for any 2138 * non-zero order, only frequent costly order 2139 * reclamation is disruptive enough to become a 2140 * noticable problem, like transparent huge page 2141 * allocations. 2142 */ 2143 if (sc->order > PAGE_ALLOC_COSTLY_ORDER && 2144 (compaction_suitable(zone, sc->order) || 2145 compaction_deferred(zone))) { 2146 should_abort_reclaim = true; 2147 continue; 2148 } 2149 } 2150 /* 2151 * This steals pages from memory cgroups over softlimit 2152 * and returns the number of reclaimed pages and 2153 * scanned pages. This works for global memory pressure 2154 * and balancing, not for a memcg's limit. 2155 */ 2156 nr_soft_scanned = 0; 2157 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2158 sc->order, sc->gfp_mask, 2159 &nr_soft_scanned); 2160 sc->nr_reclaimed += nr_soft_reclaimed; 2161 sc->nr_scanned += nr_soft_scanned; 2162 /* need some check for avoid more shrink_zone() */ 2163 } 2164 2165 shrink_zone(priority, zone, sc); 2166 } 2167 2168 return should_abort_reclaim; 2169 } 2170 2171 static bool zone_reclaimable(struct zone *zone) 2172 { 2173 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 2174 } 2175 2176 /* All zones in zonelist are unreclaimable? */ 2177 static bool all_unreclaimable(struct zonelist *zonelist, 2178 struct scan_control *sc) 2179 { 2180 struct zoneref *z; 2181 struct zone *zone; 2182 2183 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2184 gfp_zone(sc->gfp_mask), sc->nodemask) { 2185 if (!populated_zone(zone)) 2186 continue; 2187 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2188 continue; 2189 if (!zone->all_unreclaimable) 2190 return false; 2191 } 2192 2193 return true; 2194 } 2195 2196 /* 2197 * This is the main entry point to direct page reclaim. 2198 * 2199 * If a full scan of the inactive list fails to free enough memory then we 2200 * are "out of memory" and something needs to be killed. 2201 * 2202 * If the caller is !__GFP_FS then the probability of a failure is reasonably 2203 * high - the zone may be full of dirty or under-writeback pages, which this 2204 * caller can't do much about. We kick the writeback threads and take explicit 2205 * naps in the hope that some of these pages can be written. But if the 2206 * allocating task holds filesystem locks which prevent writeout this might not 2207 * work, and the allocation attempt will fail. 2208 * 2209 * returns: 0, if no pages reclaimed 2210 * else, the number of pages reclaimed 2211 */ 2212 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2213 struct scan_control *sc, 2214 struct shrink_control *shrink) 2215 { 2216 int priority; 2217 unsigned long total_scanned = 0; 2218 struct reclaim_state *reclaim_state = current->reclaim_state; 2219 struct zoneref *z; 2220 struct zone *zone; 2221 unsigned long writeback_threshold; 2222 2223 get_mems_allowed(); 2224 delayacct_freepages_start(); 2225 2226 if (scanning_global_lru(sc)) 2227 count_vm_event(ALLOCSTALL); 2228 2229 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2230 sc->nr_scanned = 0; 2231 if (!priority) 2232 disable_swap_token(sc->mem_cgroup); 2233 if (shrink_zones(priority, zonelist, sc)) 2234 break; 2235 2236 /* 2237 * Don't shrink slabs when reclaiming memory from 2238 * over limit cgroups 2239 */ 2240 if (scanning_global_lru(sc)) { 2241 unsigned long lru_pages = 0; 2242 for_each_zone_zonelist(zone, z, zonelist, 2243 gfp_zone(sc->gfp_mask)) { 2244 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2245 continue; 2246 2247 lru_pages += zone_reclaimable_pages(zone); 2248 } 2249 2250 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2251 if (reclaim_state) { 2252 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2253 reclaim_state->reclaimed_slab = 0; 2254 } 2255 } 2256 total_scanned += sc->nr_scanned; 2257 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2258 goto out; 2259 2260 /* 2261 * Try to write back as many pages as we just scanned. This 2262 * tends to cause slow streaming writers to write data to the 2263 * disk smoothly, at the dirtying rate, which is nice. But 2264 * that's undesirable in laptop mode, where we *want* lumpy 2265 * writeout. So in laptop mode, write out the whole world. 2266 */ 2267 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 2268 if (total_scanned > writeback_threshold) { 2269 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, 2270 WB_REASON_TRY_TO_FREE_PAGES); 2271 sc->may_writepage = 1; 2272 } 2273 2274 /* Take a nap, wait for some writeback to complete */ 2275 if (!sc->hibernation_mode && sc->nr_scanned && 2276 priority < DEF_PRIORITY - 2) { 2277 struct zone *preferred_zone; 2278 2279 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2280 &cpuset_current_mems_allowed, 2281 &preferred_zone); 2282 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2283 } 2284 } 2285 2286 out: 2287 delayacct_freepages_end(); 2288 put_mems_allowed(); 2289 2290 if (sc->nr_reclaimed) 2291 return sc->nr_reclaimed; 2292 2293 /* 2294 * As hibernation is going on, kswapd is freezed so that it can't mark 2295 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable 2296 * check. 2297 */ 2298 if (oom_killer_disabled) 2299 return 0; 2300 2301 /* top priority shrink_zones still had more to do? don't OOM, then */ 2302 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) 2303 return 1; 2304 2305 return 0; 2306 } 2307 2308 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2309 gfp_t gfp_mask, nodemask_t *nodemask) 2310 { 2311 unsigned long nr_reclaimed; 2312 struct scan_control sc = { 2313 .gfp_mask = gfp_mask, 2314 .may_writepage = !laptop_mode, 2315 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2316 .may_unmap = 1, 2317 .may_swap = 1, 2318 .order = order, 2319 .mem_cgroup = NULL, 2320 .nodemask = nodemask, 2321 }; 2322 struct shrink_control shrink = { 2323 .gfp_mask = sc.gfp_mask, 2324 }; 2325 2326 trace_mm_vmscan_direct_reclaim_begin(order, 2327 sc.may_writepage, 2328 gfp_mask); 2329 2330 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2331 2332 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2333 2334 return nr_reclaimed; 2335 } 2336 2337 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 2338 2339 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2340 gfp_t gfp_mask, bool noswap, 2341 struct zone *zone, 2342 unsigned long *nr_scanned) 2343 { 2344 struct scan_control sc = { 2345 .nr_scanned = 0, 2346 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2347 .may_writepage = !laptop_mode, 2348 .may_unmap = 1, 2349 .may_swap = !noswap, 2350 .order = 0, 2351 .mem_cgroup = mem, 2352 }; 2353 2354 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2355 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2356 2357 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2358 sc.may_writepage, 2359 sc.gfp_mask); 2360 2361 /* 2362 * NOTE: Although we can get the priority field, using it 2363 * here is not a good idea, since it limits the pages we can scan. 2364 * if we don't reclaim here, the shrink_zone from balance_pgdat 2365 * will pick up pages from other mem cgroup's as well. We hack 2366 * the priority and make it zero. 2367 */ 2368 shrink_zone(0, zone, &sc); 2369 2370 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2371 2372 *nr_scanned = sc.nr_scanned; 2373 return sc.nr_reclaimed; 2374 } 2375 2376 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2377 gfp_t gfp_mask, 2378 bool noswap) 2379 { 2380 struct zonelist *zonelist; 2381 unsigned long nr_reclaimed; 2382 int nid; 2383 struct scan_control sc = { 2384 .may_writepage = !laptop_mode, 2385 .may_unmap = 1, 2386 .may_swap = !noswap, 2387 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2388 .order = 0, 2389 .mem_cgroup = mem_cont, 2390 .nodemask = NULL, /* we don't care the placement */ 2391 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2392 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2393 }; 2394 struct shrink_control shrink = { 2395 .gfp_mask = sc.gfp_mask, 2396 }; 2397 2398 /* 2399 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2400 * take care of from where we get pages. So the node where we start the 2401 * scan does not need to be the current node. 2402 */ 2403 nid = mem_cgroup_select_victim_node(mem_cont); 2404 2405 zonelist = NODE_DATA(nid)->node_zonelists; 2406 2407 trace_mm_vmscan_memcg_reclaim_begin(0, 2408 sc.may_writepage, 2409 sc.gfp_mask); 2410 2411 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2412 2413 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2414 2415 return nr_reclaimed; 2416 } 2417 #endif 2418 2419 /* 2420 * pgdat_balanced is used when checking if a node is balanced for high-order 2421 * allocations. Only zones that meet watermarks and are in a zone allowed 2422 * by the callers classzone_idx are added to balanced_pages. The total of 2423 * balanced pages must be at least 25% of the zones allowed by classzone_idx 2424 * for the node to be considered balanced. Forcing all zones to be balanced 2425 * for high orders can cause excessive reclaim when there are imbalanced zones. 2426 * The choice of 25% is due to 2427 * o a 16M DMA zone that is balanced will not balance a zone on any 2428 * reasonable sized machine 2429 * o On all other machines, the top zone must be at least a reasonable 2430 * percentage of the middle zones. For example, on 32-bit x86, highmem 2431 * would need to be at least 256M for it to be balance a whole node. 2432 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2433 * to balance a node on its own. These seemed like reasonable ratios. 2434 */ 2435 static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, 2436 int classzone_idx) 2437 { 2438 unsigned long present_pages = 0; 2439 int i; 2440 2441 for (i = 0; i <= classzone_idx; i++) 2442 present_pages += pgdat->node_zones[i].present_pages; 2443 2444 /* A special case here: if zone has no page, we think it's balanced */ 2445 return balanced_pages >= (present_pages >> 2); 2446 } 2447 2448 /* is kswapd sleeping prematurely? */ 2449 static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, 2450 int classzone_idx) 2451 { 2452 int i; 2453 unsigned long balanced = 0; 2454 bool all_zones_ok = true; 2455 2456 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 2457 if (remaining) 2458 return true; 2459 2460 /* Check the watermark levels */ 2461 for (i = 0; i <= classzone_idx; i++) { 2462 struct zone *zone = pgdat->node_zones + i; 2463 2464 if (!populated_zone(zone)) 2465 continue; 2466 2467 /* 2468 * balance_pgdat() skips over all_unreclaimable after 2469 * DEF_PRIORITY. Effectively, it considers them balanced so 2470 * they must be considered balanced here as well if kswapd 2471 * is to sleep 2472 */ 2473 if (zone->all_unreclaimable) { 2474 balanced += zone->present_pages; 2475 continue; 2476 } 2477 2478 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2479 i, 0)) 2480 all_zones_ok = false; 2481 else 2482 balanced += zone->present_pages; 2483 } 2484 2485 /* 2486 * For high-order requests, the balanced zones must contain at least 2487 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones 2488 * must be balanced 2489 */ 2490 if (order) 2491 return !pgdat_balanced(pgdat, balanced, classzone_idx); 2492 else 2493 return !all_zones_ok; 2494 } 2495 2496 /* 2497 * For kswapd, balance_pgdat() will work across all this node's zones until 2498 * they are all at high_wmark_pages(zone). 2499 * 2500 * Returns the final order kswapd was reclaiming at 2501 * 2502 * There is special handling here for zones which are full of pinned pages. 2503 * This can happen if the pages are all mlocked, or if they are all used by 2504 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 2505 * What we do is to detect the case where all pages in the zone have been 2506 * scanned twice and there has been zero successful reclaim. Mark the zone as 2507 * dead and from now on, only perform a short scan. Basically we're polling 2508 * the zone for when the problem goes away. 2509 * 2510 * kswapd scans the zones in the highmem->normal->dma direction. It skips 2511 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 2512 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 2513 * lower zones regardless of the number of free pages in the lower zones. This 2514 * interoperates with the page allocator fallback scheme to ensure that aging 2515 * of pages is balanced across the zones. 2516 */ 2517 static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2518 int *classzone_idx) 2519 { 2520 int all_zones_ok; 2521 unsigned long balanced; 2522 int priority; 2523 int i; 2524 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2525 unsigned long total_scanned; 2526 struct reclaim_state *reclaim_state = current->reclaim_state; 2527 unsigned long nr_soft_reclaimed; 2528 unsigned long nr_soft_scanned; 2529 struct scan_control sc = { 2530 .gfp_mask = GFP_KERNEL, 2531 .may_unmap = 1, 2532 .may_swap = 1, 2533 /* 2534 * kswapd doesn't want to be bailed out while reclaim. because 2535 * we want to put equal scanning pressure on each zone. 2536 */ 2537 .nr_to_reclaim = ULONG_MAX, 2538 .order = order, 2539 .mem_cgroup = NULL, 2540 }; 2541 struct shrink_control shrink = { 2542 .gfp_mask = sc.gfp_mask, 2543 }; 2544 loop_again: 2545 total_scanned = 0; 2546 sc.nr_reclaimed = 0; 2547 sc.may_writepage = !laptop_mode; 2548 count_vm_event(PAGEOUTRUN); 2549 2550 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2551 unsigned long lru_pages = 0; 2552 int has_under_min_watermark_zone = 0; 2553 2554 /* The swap token gets in the way of swapout... */ 2555 if (!priority) 2556 disable_swap_token(NULL); 2557 2558 all_zones_ok = 1; 2559 balanced = 0; 2560 2561 /* 2562 * Scan in the highmem->dma direction for the highest 2563 * zone which needs scanning 2564 */ 2565 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2566 struct zone *zone = pgdat->node_zones + i; 2567 2568 if (!populated_zone(zone)) 2569 continue; 2570 2571 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2572 continue; 2573 2574 /* 2575 * Do some background aging of the anon list, to give 2576 * pages a chance to be referenced before reclaiming. 2577 */ 2578 if (inactive_anon_is_low(zone, &sc)) 2579 shrink_active_list(SWAP_CLUSTER_MAX, zone, 2580 &sc, priority, 0); 2581 2582 if (!zone_watermark_ok_safe(zone, order, 2583 high_wmark_pages(zone), 0, 0)) { 2584 end_zone = i; 2585 break; 2586 } else { 2587 /* If balanced, clear the congested flag */ 2588 zone_clear_flag(zone, ZONE_CONGESTED); 2589 } 2590 } 2591 if (i < 0) 2592 goto out; 2593 2594 for (i = 0; i <= end_zone; i++) { 2595 struct zone *zone = pgdat->node_zones + i; 2596 2597 lru_pages += zone_reclaimable_pages(zone); 2598 } 2599 2600 /* 2601 * Now scan the zone in the dma->highmem direction, stopping 2602 * at the last zone which needs scanning. 2603 * 2604 * We do this because the page allocator works in the opposite 2605 * direction. This prevents the page allocator from allocating 2606 * pages behind kswapd's direction of progress, which would 2607 * cause too much scanning of the lower zones. 2608 */ 2609 for (i = 0; i <= end_zone; i++) { 2610 struct zone *zone = pgdat->node_zones + i; 2611 int nr_slab; 2612 unsigned long balance_gap; 2613 2614 if (!populated_zone(zone)) 2615 continue; 2616 2617 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2618 continue; 2619 2620 sc.nr_scanned = 0; 2621 2622 nr_soft_scanned = 0; 2623 /* 2624 * Call soft limit reclaim before calling shrink_zone. 2625 */ 2626 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2627 order, sc.gfp_mask, 2628 &nr_soft_scanned); 2629 sc.nr_reclaimed += nr_soft_reclaimed; 2630 total_scanned += nr_soft_scanned; 2631 2632 /* 2633 * We put equal pressure on every zone, unless 2634 * one zone has way too many pages free 2635 * already. The "too many pages" is defined 2636 * as the high wmark plus a "gap" where the 2637 * gap is either the low watermark or 1% 2638 * of the zone, whichever is smaller. 2639 */ 2640 balance_gap = min(low_wmark_pages(zone), 2641 (zone->present_pages + 2642 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2643 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2644 if (!zone_watermark_ok_safe(zone, order, 2645 high_wmark_pages(zone) + balance_gap, 2646 end_zone, 0)) { 2647 shrink_zone(priority, zone, &sc); 2648 2649 reclaim_state->reclaimed_slab = 0; 2650 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2651 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2652 total_scanned += sc.nr_scanned; 2653 2654 if (nr_slab == 0 && !zone_reclaimable(zone)) 2655 zone->all_unreclaimable = 1; 2656 } 2657 2658 /* 2659 * If we've done a decent amount of scanning and 2660 * the reclaim ratio is low, start doing writepage 2661 * even in laptop mode 2662 */ 2663 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2664 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2665 sc.may_writepage = 1; 2666 2667 if (zone->all_unreclaimable) { 2668 if (end_zone && end_zone == i) 2669 end_zone--; 2670 continue; 2671 } 2672 2673 if (!zone_watermark_ok_safe(zone, order, 2674 high_wmark_pages(zone), end_zone, 0)) { 2675 all_zones_ok = 0; 2676 /* 2677 * We are still under min water mark. This 2678 * means that we have a GFP_ATOMIC allocation 2679 * failure risk. Hurry up! 2680 */ 2681 if (!zone_watermark_ok_safe(zone, order, 2682 min_wmark_pages(zone), end_zone, 0)) 2683 has_under_min_watermark_zone = 1; 2684 } else { 2685 /* 2686 * If a zone reaches its high watermark, 2687 * consider it to be no longer congested. It's 2688 * possible there are dirty pages backed by 2689 * congested BDIs but as pressure is relieved, 2690 * spectulatively avoid congestion waits 2691 */ 2692 zone_clear_flag(zone, ZONE_CONGESTED); 2693 if (i <= *classzone_idx) 2694 balanced += zone->present_pages; 2695 } 2696 2697 } 2698 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2699 break; /* kswapd: all done */ 2700 /* 2701 * OK, kswapd is getting into trouble. Take a nap, then take 2702 * another pass across the zones. 2703 */ 2704 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2705 if (has_under_min_watermark_zone) 2706 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2707 else 2708 congestion_wait(BLK_RW_ASYNC, HZ/10); 2709 } 2710 2711 /* 2712 * We do this so kswapd doesn't build up large priorities for 2713 * example when it is freeing in parallel with allocators. It 2714 * matches the direct reclaim path behaviour in terms of impact 2715 * on zone->*_priority. 2716 */ 2717 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2718 break; 2719 } 2720 out: 2721 2722 /* 2723 * order-0: All zones must meet high watermark for a balanced node 2724 * high-order: Balanced zones must make up at least 25% of the node 2725 * for the node to be balanced 2726 */ 2727 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2728 cond_resched(); 2729 2730 try_to_freeze(); 2731 2732 /* 2733 * Fragmentation may mean that the system cannot be 2734 * rebalanced for high-order allocations in all zones. 2735 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2736 * it means the zones have been fully scanned and are still 2737 * not balanced. For high-order allocations, there is 2738 * little point trying all over again as kswapd may 2739 * infinite loop. 2740 * 2741 * Instead, recheck all watermarks at order-0 as they 2742 * are the most important. If watermarks are ok, kswapd will go 2743 * back to sleep. High-order users can still perform direct 2744 * reclaim if they wish. 2745 */ 2746 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2747 order = sc.order = 0; 2748 2749 goto loop_again; 2750 } 2751 2752 /* 2753 * If kswapd was reclaiming at a higher order, it has the option of 2754 * sleeping without all zones being balanced. Before it does, it must 2755 * ensure that the watermarks for order-0 on *all* zones are met and 2756 * that the congestion flags are cleared. The congestion flag must 2757 * be cleared as kswapd is the only mechanism that clears the flag 2758 * and it is potentially going to sleep here. 2759 */ 2760 if (order) { 2761 for (i = 0; i <= end_zone; i++) { 2762 struct zone *zone = pgdat->node_zones + i; 2763 2764 if (!populated_zone(zone)) 2765 continue; 2766 2767 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2768 continue; 2769 2770 /* Confirm the zone is balanced for order-0 */ 2771 if (!zone_watermark_ok(zone, 0, 2772 high_wmark_pages(zone), 0, 0)) { 2773 order = sc.order = 0; 2774 goto loop_again; 2775 } 2776 2777 /* If balanced, clear the congested flag */ 2778 zone_clear_flag(zone, ZONE_CONGESTED); 2779 if (i <= *classzone_idx) 2780 balanced += zone->present_pages; 2781 } 2782 } 2783 2784 /* 2785 * Return the order we were reclaiming at so sleeping_prematurely() 2786 * makes a decision on the order we were last reclaiming at. However, 2787 * if another caller entered the allocator slow path while kswapd 2788 * was awake, order will remain at the higher level 2789 */ 2790 *classzone_idx = end_zone; 2791 return order; 2792 } 2793 2794 static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) 2795 { 2796 long remaining = 0; 2797 DEFINE_WAIT(wait); 2798 2799 if (freezing(current) || kthread_should_stop()) 2800 return; 2801 2802 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2803 2804 /* Try to sleep for a short interval */ 2805 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2806 remaining = schedule_timeout(HZ/10); 2807 finish_wait(&pgdat->kswapd_wait, &wait); 2808 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2809 } 2810 2811 /* 2812 * After a short sleep, check if it was a premature sleep. If not, then 2813 * go fully to sleep until explicitly woken up. 2814 */ 2815 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2816 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2817 2818 /* 2819 * vmstat counters are not perfectly accurate and the estimated 2820 * value for counters such as NR_FREE_PAGES can deviate from the 2821 * true value by nr_online_cpus * threshold. To avoid the zone 2822 * watermarks being breached while under pressure, we reduce the 2823 * per-cpu vmstat threshold while kswapd is awake and restore 2824 * them before going back to sleep. 2825 */ 2826 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 2827 schedule(); 2828 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 2829 } else { 2830 if (remaining) 2831 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2832 else 2833 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2834 } 2835 finish_wait(&pgdat->kswapd_wait, &wait); 2836 } 2837 2838 /* 2839 * The background pageout daemon, started as a kernel thread 2840 * from the init process. 2841 * 2842 * This basically trickles out pages so that we have _some_ 2843 * free memory available even if there is no other activity 2844 * that frees anything up. This is needed for things like routing 2845 * etc, where we otherwise might have all activity going on in 2846 * asynchronous contexts that cannot page things out. 2847 * 2848 * If there are applications that are active memory-allocators 2849 * (most normal use), this basically shouldn't matter. 2850 */ 2851 static int kswapd(void *p) 2852 { 2853 unsigned long order, new_order; 2854 unsigned balanced_order; 2855 int classzone_idx, new_classzone_idx; 2856 int balanced_classzone_idx; 2857 pg_data_t *pgdat = (pg_data_t*)p; 2858 struct task_struct *tsk = current; 2859 2860 struct reclaim_state reclaim_state = { 2861 .reclaimed_slab = 0, 2862 }; 2863 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2864 2865 lockdep_set_current_reclaim_state(GFP_KERNEL); 2866 2867 if (!cpumask_empty(cpumask)) 2868 set_cpus_allowed_ptr(tsk, cpumask); 2869 current->reclaim_state = &reclaim_state; 2870 2871 /* 2872 * Tell the memory management that we're a "memory allocator", 2873 * and that if we need more memory we should get access to it 2874 * regardless (see "__alloc_pages()"). "kswapd" should 2875 * never get caught in the normal page freeing logic. 2876 * 2877 * (Kswapd normally doesn't need memory anyway, but sometimes 2878 * you need a small amount of memory in order to be able to 2879 * page out something else, and this flag essentially protects 2880 * us from recursively trying to free more memory as we're 2881 * trying to free the first piece of memory in the first place). 2882 */ 2883 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2884 set_freezable(); 2885 2886 order = new_order = 0; 2887 balanced_order = 0; 2888 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; 2889 balanced_classzone_idx = classzone_idx; 2890 for ( ; ; ) { 2891 int ret; 2892 2893 /* 2894 * If the last balance_pgdat was unsuccessful it's unlikely a 2895 * new request of a similar or harder type will succeed soon 2896 * so consider going to sleep on the basis we reclaimed at 2897 */ 2898 if (balanced_classzone_idx >= new_classzone_idx && 2899 balanced_order == new_order) { 2900 new_order = pgdat->kswapd_max_order; 2901 new_classzone_idx = pgdat->classzone_idx; 2902 pgdat->kswapd_max_order = 0; 2903 pgdat->classzone_idx = pgdat->nr_zones - 1; 2904 } 2905 2906 if (order < new_order || classzone_idx > new_classzone_idx) { 2907 /* 2908 * Don't sleep if someone wants a larger 'order' 2909 * allocation or has tigher zone constraints 2910 */ 2911 order = new_order; 2912 classzone_idx = new_classzone_idx; 2913 } else { 2914 kswapd_try_to_sleep(pgdat, balanced_order, 2915 balanced_classzone_idx); 2916 order = pgdat->kswapd_max_order; 2917 classzone_idx = pgdat->classzone_idx; 2918 new_order = order; 2919 new_classzone_idx = classzone_idx; 2920 pgdat->kswapd_max_order = 0; 2921 pgdat->classzone_idx = pgdat->nr_zones - 1; 2922 } 2923 2924 ret = try_to_freeze(); 2925 if (kthread_should_stop()) 2926 break; 2927 2928 /* 2929 * We can speed up thawing tasks if we don't call balance_pgdat 2930 * after returning from the refrigerator 2931 */ 2932 if (!ret) { 2933 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); 2934 balanced_classzone_idx = classzone_idx; 2935 balanced_order = balance_pgdat(pgdat, order, 2936 &balanced_classzone_idx); 2937 } 2938 } 2939 return 0; 2940 } 2941 2942 /* 2943 * A zone is low on free memory, so wake its kswapd task to service it. 2944 */ 2945 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 2946 { 2947 pg_data_t *pgdat; 2948 2949 if (!populated_zone(zone)) 2950 return; 2951 2952 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2953 return; 2954 pgdat = zone->zone_pgdat; 2955 if (pgdat->kswapd_max_order < order) { 2956 pgdat->kswapd_max_order = order; 2957 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); 2958 } 2959 if (!waitqueue_active(&pgdat->kswapd_wait)) 2960 return; 2961 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) 2962 return; 2963 2964 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 2965 wake_up_interruptible(&pgdat->kswapd_wait); 2966 } 2967 2968 /* 2969 * The reclaimable count would be mostly accurate. 2970 * The less reclaimable pages may be 2971 * - mlocked pages, which will be moved to unevictable list when encountered 2972 * - mapped pages, which may require several travels to be reclaimed 2973 * - dirty pages, which is not "instantly" reclaimable 2974 */ 2975 unsigned long global_reclaimable_pages(void) 2976 { 2977 int nr; 2978 2979 nr = global_page_state(NR_ACTIVE_FILE) + 2980 global_page_state(NR_INACTIVE_FILE); 2981 2982 if (nr_swap_pages > 0) 2983 nr += global_page_state(NR_ACTIVE_ANON) + 2984 global_page_state(NR_INACTIVE_ANON); 2985 2986 return nr; 2987 } 2988 2989 unsigned long zone_reclaimable_pages(struct zone *zone) 2990 { 2991 int nr; 2992 2993 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 2994 zone_page_state(zone, NR_INACTIVE_FILE); 2995 2996 if (nr_swap_pages > 0) 2997 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 2998 zone_page_state(zone, NR_INACTIVE_ANON); 2999 3000 return nr; 3001 } 3002 3003 #ifdef CONFIG_HIBERNATION 3004 /* 3005 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3006 * freed pages. 3007 * 3008 * Rather than trying to age LRUs the aim is to preserve the overall 3009 * LRU order by reclaiming preferentially 3010 * inactive > active > active referenced > active mapped 3011 */ 3012 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 3013 { 3014 struct reclaim_state reclaim_state; 3015 struct scan_control sc = { 3016 .gfp_mask = GFP_HIGHUSER_MOVABLE, 3017 .may_swap = 1, 3018 .may_unmap = 1, 3019 .may_writepage = 1, 3020 .nr_to_reclaim = nr_to_reclaim, 3021 .hibernation_mode = 1, 3022 .order = 0, 3023 }; 3024 struct shrink_control shrink = { 3025 .gfp_mask = sc.gfp_mask, 3026 }; 3027 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3028 struct task_struct *p = current; 3029 unsigned long nr_reclaimed; 3030 3031 p->flags |= PF_MEMALLOC; 3032 lockdep_set_current_reclaim_state(sc.gfp_mask); 3033 reclaim_state.reclaimed_slab = 0; 3034 p->reclaim_state = &reclaim_state; 3035 3036 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 3037 3038 p->reclaim_state = NULL; 3039 lockdep_clear_current_reclaim_state(); 3040 p->flags &= ~PF_MEMALLOC; 3041 3042 return nr_reclaimed; 3043 } 3044 #endif /* CONFIG_HIBERNATION */ 3045 3046 /* It's optimal to keep kswapds on the same CPUs as their memory, but 3047 not required for correctness. So if the last cpu in a node goes 3048 away, we get changed to run anywhere: as the first one comes back, 3049 restore their cpu bindings. */ 3050 static int __devinit cpu_callback(struct notifier_block *nfb, 3051 unsigned long action, void *hcpu) 3052 { 3053 int nid; 3054 3055 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 3056 for_each_node_state(nid, N_HIGH_MEMORY) { 3057 pg_data_t *pgdat = NODE_DATA(nid); 3058 const struct cpumask *mask; 3059 3060 mask = cpumask_of_node(pgdat->node_id); 3061 3062 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3063 /* One of our CPUs online: restore mask */ 3064 set_cpus_allowed_ptr(pgdat->kswapd, mask); 3065 } 3066 } 3067 return NOTIFY_OK; 3068 } 3069 3070 /* 3071 * This kswapd start function will be called by init and node-hot-add. 3072 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 3073 */ 3074 int kswapd_run(int nid) 3075 { 3076 pg_data_t *pgdat = NODE_DATA(nid); 3077 int ret = 0; 3078 3079 if (pgdat->kswapd) 3080 return 0; 3081 3082 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 3083 if (IS_ERR(pgdat->kswapd)) { 3084 /* failure at boot is fatal */ 3085 BUG_ON(system_state == SYSTEM_BOOTING); 3086 printk("Failed to start kswapd on node %d\n",nid); 3087 ret = -1; 3088 } 3089 return ret; 3090 } 3091 3092 /* 3093 * Called by memory hotplug when all memory in a node is offlined. 3094 */ 3095 void kswapd_stop(int nid) 3096 { 3097 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 3098 3099 if (kswapd) 3100 kthread_stop(kswapd); 3101 } 3102 3103 static int __init kswapd_init(void) 3104 { 3105 int nid; 3106 3107 swap_setup(); 3108 for_each_node_state(nid, N_HIGH_MEMORY) 3109 kswapd_run(nid); 3110 hotcpu_notifier(cpu_callback, 0); 3111 return 0; 3112 } 3113 3114 module_init(kswapd_init) 3115 3116 #ifdef CONFIG_NUMA 3117 /* 3118 * Zone reclaim mode 3119 * 3120 * If non-zero call zone_reclaim when the number of free pages falls below 3121 * the watermarks. 3122 */ 3123 int zone_reclaim_mode __read_mostly; 3124 3125 #define RECLAIM_OFF 0 3126 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 3127 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 3128 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 3129 3130 /* 3131 * Priority for ZONE_RECLAIM. This determines the fraction of pages 3132 * of a node considered for each zone_reclaim. 4 scans 1/16th of 3133 * a zone. 3134 */ 3135 #define ZONE_RECLAIM_PRIORITY 4 3136 3137 /* 3138 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 3139 * occur. 3140 */ 3141 int sysctl_min_unmapped_ratio = 1; 3142 3143 /* 3144 * If the number of slab pages in a zone grows beyond this percentage then 3145 * slab reclaim needs to occur. 3146 */ 3147 int sysctl_min_slab_ratio = 5; 3148 3149 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3150 { 3151 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 3152 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 3153 zone_page_state(zone, NR_ACTIVE_FILE); 3154 3155 /* 3156 * It's possible for there to be more file mapped pages than 3157 * accounted for by the pages on the file LRU lists because 3158 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 3159 */ 3160 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 3161 } 3162 3163 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 3164 static long zone_pagecache_reclaimable(struct zone *zone) 3165 { 3166 long nr_pagecache_reclaimable; 3167 long delta = 0; 3168 3169 /* 3170 * If RECLAIM_SWAP is set, then all file pages are considered 3171 * potentially reclaimable. Otherwise, we have to worry about 3172 * pages like swapcache and zone_unmapped_file_pages() provides 3173 * a better estimate 3174 */ 3175 if (zone_reclaim_mode & RECLAIM_SWAP) 3176 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3177 else 3178 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3179 3180 /* If we can't clean pages, remove dirty pages from consideration */ 3181 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3182 delta += zone_page_state(zone, NR_FILE_DIRTY); 3183 3184 /* Watch for any possible underflows due to delta */ 3185 if (unlikely(delta > nr_pagecache_reclaimable)) 3186 delta = nr_pagecache_reclaimable; 3187 3188 return nr_pagecache_reclaimable - delta; 3189 } 3190 3191 /* 3192 * Try to free up some pages from this zone through reclaim. 3193 */ 3194 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3195 { 3196 /* Minimum pages needed in order to stay on node */ 3197 const unsigned long nr_pages = 1 << order; 3198 struct task_struct *p = current; 3199 struct reclaim_state reclaim_state; 3200 int priority; 3201 struct scan_control sc = { 3202 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3203 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3204 .may_swap = 1, 3205 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3206 SWAP_CLUSTER_MAX), 3207 .gfp_mask = gfp_mask, 3208 .order = order, 3209 }; 3210 struct shrink_control shrink = { 3211 .gfp_mask = sc.gfp_mask, 3212 }; 3213 unsigned long nr_slab_pages0, nr_slab_pages1; 3214 3215 cond_resched(); 3216 /* 3217 * We need to be able to allocate from the reserves for RECLAIM_SWAP 3218 * and we also need to be able to write out pages for RECLAIM_WRITE 3219 * and RECLAIM_SWAP. 3220 */ 3221 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3222 lockdep_set_current_reclaim_state(gfp_mask); 3223 reclaim_state.reclaimed_slab = 0; 3224 p->reclaim_state = &reclaim_state; 3225 3226 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 3227 /* 3228 * Free memory by calling shrink zone with increasing 3229 * priorities until we have enough memory freed. 3230 */ 3231 priority = ZONE_RECLAIM_PRIORITY; 3232 do { 3233 shrink_zone(priority, zone, &sc); 3234 priority--; 3235 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3236 } 3237 3238 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3239 if (nr_slab_pages0 > zone->min_slab_pages) { 3240 /* 3241 * shrink_slab() does not currently allow us to determine how 3242 * many pages were freed in this zone. So we take the current 3243 * number of slab pages and shake the slab until it is reduced 3244 * by the same nr_pages that we used for reclaiming unmapped 3245 * pages. 3246 * 3247 * Note that shrink_slab will free memory on all zones and may 3248 * take a long time. 3249 */ 3250 for (;;) { 3251 unsigned long lru_pages = zone_reclaimable_pages(zone); 3252 3253 /* No reclaimable slab or very low memory pressure */ 3254 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) 3255 break; 3256 3257 /* Freed enough memory */ 3258 nr_slab_pages1 = zone_page_state(zone, 3259 NR_SLAB_RECLAIMABLE); 3260 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3261 break; 3262 } 3263 3264 /* 3265 * Update nr_reclaimed by the number of slab pages we 3266 * reclaimed from this zone. 3267 */ 3268 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3269 if (nr_slab_pages1 < nr_slab_pages0) 3270 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; 3271 } 3272 3273 p->reclaim_state = NULL; 3274 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3275 lockdep_clear_current_reclaim_state(); 3276 return sc.nr_reclaimed >= nr_pages; 3277 } 3278 3279 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3280 { 3281 int node_id; 3282 int ret; 3283 3284 /* 3285 * Zone reclaim reclaims unmapped file backed pages and 3286 * slab pages if we are over the defined limits. 3287 * 3288 * A small portion of unmapped file backed pages is needed for 3289 * file I/O otherwise pages read by file I/O will be immediately 3290 * thrown out if the zone is overallocated. So we do not reclaim 3291 * if less than a specified percentage of the zone is used by 3292 * unmapped file backed pages. 3293 */ 3294 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 3295 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 3296 return ZONE_RECLAIM_FULL; 3297 3298 if (zone->all_unreclaimable) 3299 return ZONE_RECLAIM_FULL; 3300 3301 /* 3302 * Do not scan if the allocation should not be delayed. 3303 */ 3304 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 3305 return ZONE_RECLAIM_NOSCAN; 3306 3307 /* 3308 * Only run zone reclaim on the local zone or on zones that do not 3309 * have associated processors. This will favor the local processor 3310 * over remote processors and spread off node memory allocations 3311 * as wide as possible. 3312 */ 3313 node_id = zone_to_nid(zone); 3314 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3315 return ZONE_RECLAIM_NOSCAN; 3316 3317 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3318 return ZONE_RECLAIM_NOSCAN; 3319 3320 ret = __zone_reclaim(zone, gfp_mask, order); 3321 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3322 3323 if (!ret) 3324 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3325 3326 return ret; 3327 } 3328 #endif 3329 3330 /* 3331 * page_evictable - test whether a page is evictable 3332 * @page: the page to test 3333 * @vma: the VMA in which the page is or will be mapped, may be NULL 3334 * 3335 * Test whether page is evictable--i.e., should be placed on active/inactive 3336 * lists vs unevictable list. The vma argument is !NULL when called from the 3337 * fault path to determine how to instantate a new page. 3338 * 3339 * Reasons page might not be evictable: 3340 * (1) page's mapping marked unevictable 3341 * (2) page is part of an mlocked VMA 3342 * 3343 */ 3344 int page_evictable(struct page *page, struct vm_area_struct *vma) 3345 { 3346 3347 if (mapping_unevictable(page_mapping(page))) 3348 return 0; 3349 3350 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 3351 return 0; 3352 3353 return 1; 3354 } 3355 3356 /** 3357 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3358 * @page: page to check evictability and move to appropriate lru list 3359 * @zone: zone page is in 3360 * 3361 * Checks a page for evictability and moves the page to the appropriate 3362 * zone lru list. 3363 * 3364 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3365 * have PageUnevictable set. 3366 */ 3367 static void check_move_unevictable_page(struct page *page, struct zone *zone) 3368 { 3369 VM_BUG_ON(PageActive(page)); 3370 3371 retry: 3372 ClearPageUnevictable(page); 3373 if (page_evictable(page, NULL)) { 3374 enum lru_list l = page_lru_base_type(page); 3375 3376 __dec_zone_state(zone, NR_UNEVICTABLE); 3377 list_move(&page->lru, &zone->lru[l].list); 3378 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 3379 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 3380 __count_vm_event(UNEVICTABLE_PGRESCUED); 3381 } else { 3382 /* 3383 * rotate unevictable list 3384 */ 3385 SetPageUnevictable(page); 3386 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 3387 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 3388 if (page_evictable(page, NULL)) 3389 goto retry; 3390 } 3391 } 3392 3393 /** 3394 * scan_mapping_unevictable_pages - scan an address space for evictable pages 3395 * @mapping: struct address_space to scan for evictable pages 3396 * 3397 * Scan all pages in mapping. Check unevictable pages for 3398 * evictability and move them to the appropriate zone lru list. 3399 */ 3400 void scan_mapping_unevictable_pages(struct address_space *mapping) 3401 { 3402 pgoff_t next = 0; 3403 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 3404 PAGE_CACHE_SHIFT; 3405 struct zone *zone; 3406 struct pagevec pvec; 3407 3408 if (mapping->nrpages == 0) 3409 return; 3410 3411 pagevec_init(&pvec, 0); 3412 while (next < end && 3413 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 3414 int i; 3415 int pg_scanned = 0; 3416 3417 zone = NULL; 3418 3419 for (i = 0; i < pagevec_count(&pvec); i++) { 3420 struct page *page = pvec.pages[i]; 3421 pgoff_t page_index = page->index; 3422 struct zone *pagezone = page_zone(page); 3423 3424 pg_scanned++; 3425 if (page_index > next) 3426 next = page_index; 3427 next++; 3428 3429 if (pagezone != zone) { 3430 if (zone) 3431 spin_unlock_irq(&zone->lru_lock); 3432 zone = pagezone; 3433 spin_lock_irq(&zone->lru_lock); 3434 } 3435 3436 if (PageLRU(page) && PageUnevictable(page)) 3437 check_move_unevictable_page(page, zone); 3438 } 3439 if (zone) 3440 spin_unlock_irq(&zone->lru_lock); 3441 pagevec_release(&pvec); 3442 3443 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 3444 } 3445 3446 } 3447 3448 static void warn_scan_unevictable_pages(void) 3449 { 3450 printk_once(KERN_WARNING 3451 "The scan_unevictable_pages sysctl/node-interface has been " 3452 "disabled for lack of a legitimate use case. If you have " 3453 "one, please send an email to linux-mm@kvack.org.\n"); 3454 } 3455 3456 /* 3457 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 3458 * all nodes' unevictable lists for evictable pages 3459 */ 3460 unsigned long scan_unevictable_pages; 3461 3462 int scan_unevictable_handler(struct ctl_table *table, int write, 3463 void __user *buffer, 3464 size_t *length, loff_t *ppos) 3465 { 3466 warn_scan_unevictable_pages(); 3467 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3468 scan_unevictable_pages = 0; 3469 return 0; 3470 } 3471 3472 #ifdef CONFIG_NUMA 3473 /* 3474 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 3475 * a specified node's per zone unevictable lists for evictable pages. 3476 */ 3477 3478 static ssize_t read_scan_unevictable_node(struct sys_device *dev, 3479 struct sysdev_attribute *attr, 3480 char *buf) 3481 { 3482 warn_scan_unevictable_pages(); 3483 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3484 } 3485 3486 static ssize_t write_scan_unevictable_node(struct sys_device *dev, 3487 struct sysdev_attribute *attr, 3488 const char *buf, size_t count) 3489 { 3490 warn_scan_unevictable_pages(); 3491 return 1; 3492 } 3493 3494 3495 static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 3496 read_scan_unevictable_node, 3497 write_scan_unevictable_node); 3498 3499 int scan_unevictable_register_node(struct node *node) 3500 { 3501 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages); 3502 } 3503 3504 void scan_unevictable_unregister_node(struct node *node) 3505 { 3506 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); 3507 } 3508 #endif 3509