1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 10 * Multiqueue VM started 5.8.00, Rik van Riel. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/mm.h> 16 #include <linux/sched/mm.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/swap.h> 21 #include <linux/pagemap.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/vmpressure.h> 25 #include <linux/vmstat.h> 26 #include <linux/file.h> 27 #include <linux/writeback.h> 28 #include <linux/blkdev.h> 29 #include <linux/buffer_head.h> /* for try_to_release_page(), 30 buffer_heads_over_limit */ 31 #include <linux/mm_inline.h> 32 #include <linux/backing-dev.h> 33 #include <linux/rmap.h> 34 #include <linux/topology.h> 35 #include <linux/cpu.h> 36 #include <linux/cpuset.h> 37 #include <linux/compaction.h> 38 #include <linux/notifier.h> 39 #include <linux/rwsem.h> 40 #include <linux/delay.h> 41 #include <linux/kthread.h> 42 #include <linux/freezer.h> 43 #include <linux/memcontrol.h> 44 #include <linux/migrate.h> 45 #include <linux/delayacct.h> 46 #include <linux/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/pagevec.h> 49 #include <linux/prefetch.h> 50 #include <linux/printk.h> 51 #include <linux/dax.h> 52 #include <linux/psi.h> 53 54 #include <asm/tlbflush.h> 55 #include <asm/div64.h> 56 57 #include <linux/swapops.h> 58 #include <linux/balloon_compaction.h> 59 #include <linux/sched/sysctl.h> 60 61 #include "internal.h" 62 #include "swap.h" 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/vmscan.h> 66 67 struct scan_control { 68 /* How many pages shrink_list() should reclaim */ 69 unsigned long nr_to_reclaim; 70 71 /* 72 * Nodemask of nodes allowed by the caller. If NULL, all nodes 73 * are scanned. 74 */ 75 nodemask_t *nodemask; 76 77 /* 78 * The memory cgroup that hit its limit and as a result is the 79 * primary target of this reclaim invocation. 80 */ 81 struct mem_cgroup *target_mem_cgroup; 82 83 /* 84 * Scan pressure balancing between anon and file LRUs 85 */ 86 unsigned long anon_cost; 87 unsigned long file_cost; 88 89 /* Can active pages be deactivated as part of reclaim? */ 90 #define DEACTIVATE_ANON 1 91 #define DEACTIVATE_FILE 2 92 unsigned int may_deactivate:2; 93 unsigned int force_deactivate:1; 94 unsigned int skipped_deactivate:1; 95 96 /* Writepage batching in laptop mode; RECLAIM_WRITE */ 97 unsigned int may_writepage:1; 98 99 /* Can mapped pages be reclaimed? */ 100 unsigned int may_unmap:1; 101 102 /* Can pages be swapped as part of reclaim? */ 103 unsigned int may_swap:1; 104 105 /* 106 * Cgroup memory below memory.low is protected as long as we 107 * don't threaten to OOM. If any cgroup is reclaimed at 108 * reduced force or passed over entirely due to its memory.low 109 * setting (memcg_low_skipped), and nothing is reclaimed as a 110 * result, then go back for one more cycle that reclaims the protected 111 * memory (memcg_low_reclaim) to avert OOM. 112 */ 113 unsigned int memcg_low_reclaim:1; 114 unsigned int memcg_low_skipped:1; 115 116 unsigned int hibernation_mode:1; 117 118 /* One of the zones is ready for compaction */ 119 unsigned int compaction_ready:1; 120 121 /* There is easily reclaimable cold cache in the current node */ 122 unsigned int cache_trim_mode:1; 123 124 /* The file pages on the current node are dangerously low */ 125 unsigned int file_is_tiny:1; 126 127 /* Always discard instead of demoting to lower tier memory */ 128 unsigned int no_demotion:1; 129 130 /* Allocation order */ 131 s8 order; 132 133 /* Scan (total_size >> priority) pages at once */ 134 s8 priority; 135 136 /* The highest zone to isolate pages for reclaim from */ 137 s8 reclaim_idx; 138 139 /* This context's GFP mask */ 140 gfp_t gfp_mask; 141 142 /* Incremented by the number of inactive pages that were scanned */ 143 unsigned long nr_scanned; 144 145 /* Number of pages freed so far during a call to shrink_zones() */ 146 unsigned long nr_reclaimed; 147 148 struct { 149 unsigned int dirty; 150 unsigned int unqueued_dirty; 151 unsigned int congested; 152 unsigned int writeback; 153 unsigned int immediate; 154 unsigned int file_taken; 155 unsigned int taken; 156 } nr; 157 158 /* for recording the reclaimed slab by now */ 159 struct reclaim_state reclaim_state; 160 }; 161 162 #ifdef ARCH_HAS_PREFETCHW 163 #define prefetchw_prev_lru_page(_page, _base, _field) \ 164 do { \ 165 if ((_page)->lru.prev != _base) { \ 166 struct page *prev; \ 167 \ 168 prev = lru_to_page(&(_page->lru)); \ 169 prefetchw(&prev->_field); \ 170 } \ 171 } while (0) 172 #else 173 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 174 #endif 175 176 /* 177 * From 0 .. 200. Higher means more swappy. 178 */ 179 int vm_swappiness = 60; 180 181 static void set_task_reclaim_state(struct task_struct *task, 182 struct reclaim_state *rs) 183 { 184 /* Check for an overwrite */ 185 WARN_ON_ONCE(rs && task->reclaim_state); 186 187 /* Check for the nulling of an already-nulled member */ 188 WARN_ON_ONCE(!rs && !task->reclaim_state); 189 190 task->reclaim_state = rs; 191 } 192 193 LIST_HEAD(shrinker_list); 194 DECLARE_RWSEM(shrinker_rwsem); 195 196 #ifdef CONFIG_MEMCG 197 static int shrinker_nr_max; 198 199 /* The shrinker_info is expanded in a batch of BITS_PER_LONG */ 200 static inline int shrinker_map_size(int nr_items) 201 { 202 return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long)); 203 } 204 205 static inline int shrinker_defer_size(int nr_items) 206 { 207 return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t)); 208 } 209 210 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, 211 int nid) 212 { 213 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, 214 lockdep_is_held(&shrinker_rwsem)); 215 } 216 217 static int expand_one_shrinker_info(struct mem_cgroup *memcg, 218 int map_size, int defer_size, 219 int old_map_size, int old_defer_size) 220 { 221 struct shrinker_info *new, *old; 222 struct mem_cgroup_per_node *pn; 223 int nid; 224 int size = map_size + defer_size; 225 226 for_each_node(nid) { 227 pn = memcg->nodeinfo[nid]; 228 old = shrinker_info_protected(memcg, nid); 229 /* Not yet online memcg */ 230 if (!old) 231 return 0; 232 233 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 234 if (!new) 235 return -ENOMEM; 236 237 new->nr_deferred = (atomic_long_t *)(new + 1); 238 new->map = (void *)new->nr_deferred + defer_size; 239 240 /* map: set all old bits, clear all new bits */ 241 memset(new->map, (int)0xff, old_map_size); 242 memset((void *)new->map + old_map_size, 0, map_size - old_map_size); 243 /* nr_deferred: copy old values, clear all new values */ 244 memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); 245 memset((void *)new->nr_deferred + old_defer_size, 0, 246 defer_size - old_defer_size); 247 248 rcu_assign_pointer(pn->shrinker_info, new); 249 kvfree_rcu(old, rcu); 250 } 251 252 return 0; 253 } 254 255 void free_shrinker_info(struct mem_cgroup *memcg) 256 { 257 struct mem_cgroup_per_node *pn; 258 struct shrinker_info *info; 259 int nid; 260 261 for_each_node(nid) { 262 pn = memcg->nodeinfo[nid]; 263 info = rcu_dereference_protected(pn->shrinker_info, true); 264 kvfree(info); 265 rcu_assign_pointer(pn->shrinker_info, NULL); 266 } 267 } 268 269 int alloc_shrinker_info(struct mem_cgroup *memcg) 270 { 271 struct shrinker_info *info; 272 int nid, size, ret = 0; 273 int map_size, defer_size = 0; 274 275 down_write(&shrinker_rwsem); 276 map_size = shrinker_map_size(shrinker_nr_max); 277 defer_size = shrinker_defer_size(shrinker_nr_max); 278 size = map_size + defer_size; 279 for_each_node(nid) { 280 info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid); 281 if (!info) { 282 free_shrinker_info(memcg); 283 ret = -ENOMEM; 284 break; 285 } 286 info->nr_deferred = (atomic_long_t *)(info + 1); 287 info->map = (void *)info->nr_deferred + defer_size; 288 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); 289 } 290 up_write(&shrinker_rwsem); 291 292 return ret; 293 } 294 295 static inline bool need_expand(int nr_max) 296 { 297 return round_up(nr_max, BITS_PER_LONG) > 298 round_up(shrinker_nr_max, BITS_PER_LONG); 299 } 300 301 static int expand_shrinker_info(int new_id) 302 { 303 int ret = 0; 304 int new_nr_max = new_id + 1; 305 int map_size, defer_size = 0; 306 int old_map_size, old_defer_size = 0; 307 struct mem_cgroup *memcg; 308 309 if (!need_expand(new_nr_max)) 310 goto out; 311 312 if (!root_mem_cgroup) 313 goto out; 314 315 lockdep_assert_held(&shrinker_rwsem); 316 317 map_size = shrinker_map_size(new_nr_max); 318 defer_size = shrinker_defer_size(new_nr_max); 319 old_map_size = shrinker_map_size(shrinker_nr_max); 320 old_defer_size = shrinker_defer_size(shrinker_nr_max); 321 322 memcg = mem_cgroup_iter(NULL, NULL, NULL); 323 do { 324 ret = expand_one_shrinker_info(memcg, map_size, defer_size, 325 old_map_size, old_defer_size); 326 if (ret) { 327 mem_cgroup_iter_break(NULL, memcg); 328 goto out; 329 } 330 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 331 out: 332 if (!ret) 333 shrinker_nr_max = new_nr_max; 334 335 return ret; 336 } 337 338 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 339 { 340 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 341 struct shrinker_info *info; 342 343 rcu_read_lock(); 344 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); 345 /* Pairs with smp mb in shrink_slab() */ 346 smp_mb__before_atomic(); 347 set_bit(shrinker_id, info->map); 348 rcu_read_unlock(); 349 } 350 } 351 352 static DEFINE_IDR(shrinker_idr); 353 354 static int prealloc_memcg_shrinker(struct shrinker *shrinker) 355 { 356 int id, ret = -ENOMEM; 357 358 if (mem_cgroup_disabled()) 359 return -ENOSYS; 360 361 down_write(&shrinker_rwsem); 362 /* This may call shrinker, so it must use down_read_trylock() */ 363 id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); 364 if (id < 0) 365 goto unlock; 366 367 if (id >= shrinker_nr_max) { 368 if (expand_shrinker_info(id)) { 369 idr_remove(&shrinker_idr, id); 370 goto unlock; 371 } 372 } 373 shrinker->id = id; 374 ret = 0; 375 unlock: 376 up_write(&shrinker_rwsem); 377 return ret; 378 } 379 380 static void unregister_memcg_shrinker(struct shrinker *shrinker) 381 { 382 int id = shrinker->id; 383 384 BUG_ON(id < 0); 385 386 lockdep_assert_held(&shrinker_rwsem); 387 388 idr_remove(&shrinker_idr, id); 389 } 390 391 static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, 392 struct mem_cgroup *memcg) 393 { 394 struct shrinker_info *info; 395 396 info = shrinker_info_protected(memcg, nid); 397 return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); 398 } 399 400 static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, 401 struct mem_cgroup *memcg) 402 { 403 struct shrinker_info *info; 404 405 info = shrinker_info_protected(memcg, nid); 406 return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); 407 } 408 409 void reparent_shrinker_deferred(struct mem_cgroup *memcg) 410 { 411 int i, nid; 412 long nr; 413 struct mem_cgroup *parent; 414 struct shrinker_info *child_info, *parent_info; 415 416 parent = parent_mem_cgroup(memcg); 417 if (!parent) 418 parent = root_mem_cgroup; 419 420 /* Prevent from concurrent shrinker_info expand */ 421 down_read(&shrinker_rwsem); 422 for_each_node(nid) { 423 child_info = shrinker_info_protected(memcg, nid); 424 parent_info = shrinker_info_protected(parent, nid); 425 for (i = 0; i < shrinker_nr_max; i++) { 426 nr = atomic_long_read(&child_info->nr_deferred[i]); 427 atomic_long_add(nr, &parent_info->nr_deferred[i]); 428 } 429 } 430 up_read(&shrinker_rwsem); 431 } 432 433 static bool cgroup_reclaim(struct scan_control *sc) 434 { 435 return sc->target_mem_cgroup; 436 } 437 438 /** 439 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 440 * @sc: scan_control in question 441 * 442 * The normal page dirty throttling mechanism in balance_dirty_pages() is 443 * completely broken with the legacy memcg and direct stalling in 444 * shrink_page_list() is used for throttling instead, which lacks all the 445 * niceties such as fairness, adaptive pausing, bandwidth proportional 446 * allocation and configurability. 447 * 448 * This function tests whether the vmscan currently in progress can assume 449 * that the normal dirty throttling mechanism is operational. 450 */ 451 static bool writeback_throttling_sane(struct scan_control *sc) 452 { 453 if (!cgroup_reclaim(sc)) 454 return true; 455 #ifdef CONFIG_CGROUP_WRITEBACK 456 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 457 return true; 458 #endif 459 return false; 460 } 461 #else 462 static int prealloc_memcg_shrinker(struct shrinker *shrinker) 463 { 464 return -ENOSYS; 465 } 466 467 static void unregister_memcg_shrinker(struct shrinker *shrinker) 468 { 469 } 470 471 static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, 472 struct mem_cgroup *memcg) 473 { 474 return 0; 475 } 476 477 static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, 478 struct mem_cgroup *memcg) 479 { 480 return 0; 481 } 482 483 static bool cgroup_reclaim(struct scan_control *sc) 484 { 485 return false; 486 } 487 488 static bool writeback_throttling_sane(struct scan_control *sc) 489 { 490 return true; 491 } 492 #endif 493 494 static long xchg_nr_deferred(struct shrinker *shrinker, 495 struct shrink_control *sc) 496 { 497 int nid = sc->nid; 498 499 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) 500 nid = 0; 501 502 if (sc->memcg && 503 (shrinker->flags & SHRINKER_MEMCG_AWARE)) 504 return xchg_nr_deferred_memcg(nid, shrinker, 505 sc->memcg); 506 507 return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); 508 } 509 510 511 static long add_nr_deferred(long nr, struct shrinker *shrinker, 512 struct shrink_control *sc) 513 { 514 int nid = sc->nid; 515 516 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) 517 nid = 0; 518 519 if (sc->memcg && 520 (shrinker->flags & SHRINKER_MEMCG_AWARE)) 521 return add_nr_deferred_memcg(nr, nid, shrinker, 522 sc->memcg); 523 524 return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); 525 } 526 527 static bool can_demote(int nid, struct scan_control *sc) 528 { 529 if (!numa_demotion_enabled) 530 return false; 531 if (sc && sc->no_demotion) 532 return false; 533 if (next_demotion_node(nid) == NUMA_NO_NODE) 534 return false; 535 536 return true; 537 } 538 539 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, 540 int nid, 541 struct scan_control *sc) 542 { 543 if (memcg == NULL) { 544 /* 545 * For non-memcg reclaim, is there 546 * space in any swap device? 547 */ 548 if (get_nr_swap_pages() > 0) 549 return true; 550 } else { 551 /* Is the memcg below its swap limit? */ 552 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) 553 return true; 554 } 555 556 /* 557 * The page can not be swapped. 558 * 559 * Can it be reclaimed from this node via demotion? 560 */ 561 return can_demote(nid, sc); 562 } 563 564 /* 565 * This misses isolated pages which are not accounted for to save counters. 566 * As the data only determines if reclaim or compaction continues, it is 567 * not expected that isolated pages will be a dominating factor. 568 */ 569 unsigned long zone_reclaimable_pages(struct zone *zone) 570 { 571 unsigned long nr; 572 573 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 574 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 575 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) 576 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 577 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 578 579 return nr; 580 } 581 582 /** 583 * lruvec_lru_size - Returns the number of pages on the given LRU list. 584 * @lruvec: lru vector 585 * @lru: lru to use 586 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) 587 */ 588 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, 589 int zone_idx) 590 { 591 unsigned long size = 0; 592 int zid; 593 594 for (zid = 0; zid <= zone_idx; zid++) { 595 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 596 597 if (!managed_zone(zone)) 598 continue; 599 600 if (!mem_cgroup_disabled()) 601 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 602 else 603 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); 604 } 605 return size; 606 } 607 608 /* 609 * Add a shrinker callback to be called from the vm. 610 */ 611 static int __prealloc_shrinker(struct shrinker *shrinker) 612 { 613 unsigned int size; 614 int err; 615 616 if (shrinker->flags & SHRINKER_MEMCG_AWARE) { 617 err = prealloc_memcg_shrinker(shrinker); 618 if (err != -ENOSYS) 619 return err; 620 621 shrinker->flags &= ~SHRINKER_MEMCG_AWARE; 622 } 623 624 size = sizeof(*shrinker->nr_deferred); 625 if (shrinker->flags & SHRINKER_NUMA_AWARE) 626 size *= nr_node_ids; 627 628 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); 629 if (!shrinker->nr_deferred) 630 return -ENOMEM; 631 632 return 0; 633 } 634 635 #ifdef CONFIG_SHRINKER_DEBUG 636 int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...) 637 { 638 va_list ap; 639 int err; 640 641 va_start(ap, fmt); 642 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); 643 va_end(ap); 644 if (!shrinker->name) 645 return -ENOMEM; 646 647 err = __prealloc_shrinker(shrinker); 648 if (err) 649 kfree_const(shrinker->name); 650 651 return err; 652 } 653 #else 654 int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...) 655 { 656 return __prealloc_shrinker(shrinker); 657 } 658 #endif 659 660 void free_prealloced_shrinker(struct shrinker *shrinker) 661 { 662 #ifdef CONFIG_SHRINKER_DEBUG 663 kfree_const(shrinker->name); 664 #endif 665 if (shrinker->flags & SHRINKER_MEMCG_AWARE) { 666 down_write(&shrinker_rwsem); 667 unregister_memcg_shrinker(shrinker); 668 up_write(&shrinker_rwsem); 669 return; 670 } 671 672 kfree(shrinker->nr_deferred); 673 shrinker->nr_deferred = NULL; 674 } 675 676 void register_shrinker_prepared(struct shrinker *shrinker) 677 { 678 down_write(&shrinker_rwsem); 679 list_add_tail(&shrinker->list, &shrinker_list); 680 shrinker->flags |= SHRINKER_REGISTERED; 681 shrinker_debugfs_add(shrinker); 682 up_write(&shrinker_rwsem); 683 } 684 685 static int __register_shrinker(struct shrinker *shrinker) 686 { 687 int err = __prealloc_shrinker(shrinker); 688 689 if (err) 690 return err; 691 register_shrinker_prepared(shrinker); 692 return 0; 693 } 694 695 #ifdef CONFIG_SHRINKER_DEBUG 696 int register_shrinker(struct shrinker *shrinker, const char *fmt, ...) 697 { 698 va_list ap; 699 int err; 700 701 va_start(ap, fmt); 702 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); 703 va_end(ap); 704 if (!shrinker->name) 705 return -ENOMEM; 706 707 err = __register_shrinker(shrinker); 708 if (err) 709 kfree_const(shrinker->name); 710 return err; 711 } 712 #else 713 int register_shrinker(struct shrinker *shrinker, const char *fmt, ...) 714 { 715 return __register_shrinker(shrinker); 716 } 717 #endif 718 EXPORT_SYMBOL(register_shrinker); 719 720 /* 721 * Remove one 722 */ 723 void unregister_shrinker(struct shrinker *shrinker) 724 { 725 if (!(shrinker->flags & SHRINKER_REGISTERED)) 726 return; 727 728 down_write(&shrinker_rwsem); 729 list_del(&shrinker->list); 730 shrinker->flags &= ~SHRINKER_REGISTERED; 731 if (shrinker->flags & SHRINKER_MEMCG_AWARE) 732 unregister_memcg_shrinker(shrinker); 733 shrinker_debugfs_remove(shrinker); 734 up_write(&shrinker_rwsem); 735 736 kfree(shrinker->nr_deferred); 737 shrinker->nr_deferred = NULL; 738 } 739 EXPORT_SYMBOL(unregister_shrinker); 740 741 /** 742 * synchronize_shrinkers - Wait for all running shrinkers to complete. 743 * 744 * This is equivalent to calling unregister_shrink() and register_shrinker(), 745 * but atomically and with less overhead. This is useful to guarantee that all 746 * shrinker invocations have seen an update, before freeing memory, similar to 747 * rcu. 748 */ 749 void synchronize_shrinkers(void) 750 { 751 down_write(&shrinker_rwsem); 752 up_write(&shrinker_rwsem); 753 } 754 EXPORT_SYMBOL(synchronize_shrinkers); 755 756 #define SHRINK_BATCH 128 757 758 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, 759 struct shrinker *shrinker, int priority) 760 { 761 unsigned long freed = 0; 762 unsigned long long delta; 763 long total_scan; 764 long freeable; 765 long nr; 766 long new_nr; 767 long batch_size = shrinker->batch ? shrinker->batch 768 : SHRINK_BATCH; 769 long scanned = 0, next_deferred; 770 771 freeable = shrinker->count_objects(shrinker, shrinkctl); 772 if (freeable == 0 || freeable == SHRINK_EMPTY) 773 return freeable; 774 775 /* 776 * copy the current shrinker scan count into a local variable 777 * and zero it so that other concurrent shrinker invocations 778 * don't also do this scanning work. 779 */ 780 nr = xchg_nr_deferred(shrinker, shrinkctl); 781 782 if (shrinker->seeks) { 783 delta = freeable >> priority; 784 delta *= 4; 785 do_div(delta, shrinker->seeks); 786 } else { 787 /* 788 * These objects don't require any IO to create. Trim 789 * them aggressively under memory pressure to keep 790 * them from causing refetches in the IO caches. 791 */ 792 delta = freeable / 2; 793 } 794 795 total_scan = nr >> priority; 796 total_scan += delta; 797 total_scan = min(total_scan, (2 * freeable)); 798 799 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, 800 freeable, delta, total_scan, priority); 801 802 /* 803 * Normally, we should not scan less than batch_size objects in one 804 * pass to avoid too frequent shrinker calls, but if the slab has less 805 * than batch_size objects in total and we are really tight on memory, 806 * we will try to reclaim all available objects, otherwise we can end 807 * up failing allocations although there are plenty of reclaimable 808 * objects spread over several slabs with usage less than the 809 * batch_size. 810 * 811 * We detect the "tight on memory" situations by looking at the total 812 * number of objects we want to scan (total_scan). If it is greater 813 * than the total number of objects on slab (freeable), we must be 814 * scanning at high prio and therefore should try to reclaim as much as 815 * possible. 816 */ 817 while (total_scan >= batch_size || 818 total_scan >= freeable) { 819 unsigned long ret; 820 unsigned long nr_to_scan = min(batch_size, total_scan); 821 822 shrinkctl->nr_to_scan = nr_to_scan; 823 shrinkctl->nr_scanned = nr_to_scan; 824 ret = shrinker->scan_objects(shrinker, shrinkctl); 825 if (ret == SHRINK_STOP) 826 break; 827 freed += ret; 828 829 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); 830 total_scan -= shrinkctl->nr_scanned; 831 scanned += shrinkctl->nr_scanned; 832 833 cond_resched(); 834 } 835 836 /* 837 * The deferred work is increased by any new work (delta) that wasn't 838 * done, decreased by old deferred work that was done now. 839 * 840 * And it is capped to two times of the freeable items. 841 */ 842 next_deferred = max_t(long, (nr + delta - scanned), 0); 843 next_deferred = min(next_deferred, (2 * freeable)); 844 845 /* 846 * move the unused scan count back into the shrinker in a 847 * manner that handles concurrent updates. 848 */ 849 new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl); 850 851 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); 852 return freed; 853 } 854 855 #ifdef CONFIG_MEMCG 856 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, 857 struct mem_cgroup *memcg, int priority) 858 { 859 struct shrinker_info *info; 860 unsigned long ret, freed = 0; 861 int i; 862 863 if (!mem_cgroup_online(memcg)) 864 return 0; 865 866 if (!down_read_trylock(&shrinker_rwsem)) 867 return 0; 868 869 info = shrinker_info_protected(memcg, nid); 870 if (unlikely(!info)) 871 goto unlock; 872 873 for_each_set_bit(i, info->map, shrinker_nr_max) { 874 struct shrink_control sc = { 875 .gfp_mask = gfp_mask, 876 .nid = nid, 877 .memcg = memcg, 878 }; 879 struct shrinker *shrinker; 880 881 shrinker = idr_find(&shrinker_idr, i); 882 if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { 883 if (!shrinker) 884 clear_bit(i, info->map); 885 continue; 886 } 887 888 /* Call non-slab shrinkers even though kmem is disabled */ 889 if (!memcg_kmem_enabled() && 890 !(shrinker->flags & SHRINKER_NONSLAB)) 891 continue; 892 893 ret = do_shrink_slab(&sc, shrinker, priority); 894 if (ret == SHRINK_EMPTY) { 895 clear_bit(i, info->map); 896 /* 897 * After the shrinker reported that it had no objects to 898 * free, but before we cleared the corresponding bit in 899 * the memcg shrinker map, a new object might have been 900 * added. To make sure, we have the bit set in this 901 * case, we invoke the shrinker one more time and reset 902 * the bit if it reports that it is not empty anymore. 903 * The memory barrier here pairs with the barrier in 904 * set_shrinker_bit(): 905 * 906 * list_lru_add() shrink_slab_memcg() 907 * list_add_tail() clear_bit() 908 * <MB> <MB> 909 * set_bit() do_shrink_slab() 910 */ 911 smp_mb__after_atomic(); 912 ret = do_shrink_slab(&sc, shrinker, priority); 913 if (ret == SHRINK_EMPTY) 914 ret = 0; 915 else 916 set_shrinker_bit(memcg, nid, i); 917 } 918 freed += ret; 919 920 if (rwsem_is_contended(&shrinker_rwsem)) { 921 freed = freed ? : 1; 922 break; 923 } 924 } 925 unlock: 926 up_read(&shrinker_rwsem); 927 return freed; 928 } 929 #else /* CONFIG_MEMCG */ 930 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, 931 struct mem_cgroup *memcg, int priority) 932 { 933 return 0; 934 } 935 #endif /* CONFIG_MEMCG */ 936 937 /** 938 * shrink_slab - shrink slab caches 939 * @gfp_mask: allocation context 940 * @nid: node whose slab caches to target 941 * @memcg: memory cgroup whose slab caches to target 942 * @priority: the reclaim priority 943 * 944 * Call the shrink functions to age shrinkable caches. 945 * 946 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, 947 * unaware shrinkers will receive a node id of 0 instead. 948 * 949 * @memcg specifies the memory cgroup to target. Unaware shrinkers 950 * are called only if it is the root cgroup. 951 * 952 * @priority is sc->priority, we take the number of objects and >> by priority 953 * in order to get the scan target. 954 * 955 * Returns the number of reclaimed slab objects. 956 */ 957 static unsigned long shrink_slab(gfp_t gfp_mask, int nid, 958 struct mem_cgroup *memcg, 959 int priority) 960 { 961 unsigned long ret, freed = 0; 962 struct shrinker *shrinker; 963 964 /* 965 * The root memcg might be allocated even though memcg is disabled 966 * via "cgroup_disable=memory" boot parameter. This could make 967 * mem_cgroup_is_root() return false, then just run memcg slab 968 * shrink, but skip global shrink. This may result in premature 969 * oom. 970 */ 971 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) 972 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); 973 974 if (!down_read_trylock(&shrinker_rwsem)) 975 goto out; 976 977 list_for_each_entry(shrinker, &shrinker_list, list) { 978 struct shrink_control sc = { 979 .gfp_mask = gfp_mask, 980 .nid = nid, 981 .memcg = memcg, 982 }; 983 984 ret = do_shrink_slab(&sc, shrinker, priority); 985 if (ret == SHRINK_EMPTY) 986 ret = 0; 987 freed += ret; 988 /* 989 * Bail out if someone want to register a new shrinker to 990 * prevent the registration from being stalled for long periods 991 * by parallel ongoing shrinking. 992 */ 993 if (rwsem_is_contended(&shrinker_rwsem)) { 994 freed = freed ? : 1; 995 break; 996 } 997 } 998 999 up_read(&shrinker_rwsem); 1000 out: 1001 cond_resched(); 1002 return freed; 1003 } 1004 1005 static void drop_slab_node(int nid) 1006 { 1007 unsigned long freed; 1008 int shift = 0; 1009 1010 do { 1011 struct mem_cgroup *memcg = NULL; 1012 1013 if (fatal_signal_pending(current)) 1014 return; 1015 1016 freed = 0; 1017 memcg = mem_cgroup_iter(NULL, NULL, NULL); 1018 do { 1019 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 1020 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 1021 } while ((freed >> shift++) > 1); 1022 } 1023 1024 void drop_slab(void) 1025 { 1026 int nid; 1027 1028 for_each_online_node(nid) 1029 drop_slab_node(nid); 1030 } 1031 1032 static inline int is_page_cache_freeable(struct folio *folio) 1033 { 1034 /* 1035 * A freeable page cache page is referenced only by the caller 1036 * that isolated the page, the page cache and optional buffer 1037 * heads at page->private. 1038 */ 1039 return folio_ref_count(folio) - folio_test_private(folio) == 1040 1 + folio_nr_pages(folio); 1041 } 1042 1043 /* 1044 * We detected a synchronous write error writing a folio out. Probably 1045 * -ENOSPC. We need to propagate that into the address_space for a subsequent 1046 * fsync(), msync() or close(). 1047 * 1048 * The tricky part is that after writepage we cannot touch the mapping: nothing 1049 * prevents it from being freed up. But we have a ref on the folio and once 1050 * that folio is locked, the mapping is pinned. 1051 * 1052 * We're allowed to run sleeping folio_lock() here because we know the caller has 1053 * __GFP_FS. 1054 */ 1055 static void handle_write_error(struct address_space *mapping, 1056 struct folio *folio, int error) 1057 { 1058 folio_lock(folio); 1059 if (folio_mapping(folio) == mapping) 1060 mapping_set_error(mapping, error); 1061 folio_unlock(folio); 1062 } 1063 1064 static bool skip_throttle_noprogress(pg_data_t *pgdat) 1065 { 1066 int reclaimable = 0, write_pending = 0; 1067 int i; 1068 1069 /* 1070 * If kswapd is disabled, reschedule if necessary but do not 1071 * throttle as the system is likely near OOM. 1072 */ 1073 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 1074 return true; 1075 1076 /* 1077 * If there are a lot of dirty/writeback pages then do not 1078 * throttle as throttling will occur when the pages cycle 1079 * towards the end of the LRU if still under writeback. 1080 */ 1081 for (i = 0; i < MAX_NR_ZONES; i++) { 1082 struct zone *zone = pgdat->node_zones + i; 1083 1084 if (!managed_zone(zone)) 1085 continue; 1086 1087 reclaimable += zone_reclaimable_pages(zone); 1088 write_pending += zone_page_state_snapshot(zone, 1089 NR_ZONE_WRITE_PENDING); 1090 } 1091 if (2 * write_pending <= reclaimable) 1092 return true; 1093 1094 return false; 1095 } 1096 1097 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) 1098 { 1099 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; 1100 long timeout, ret; 1101 DEFINE_WAIT(wait); 1102 1103 /* 1104 * Do not throttle IO workers, kthreads other than kswapd or 1105 * workqueues. They may be required for reclaim to make 1106 * forward progress (e.g. journalling workqueues or kthreads). 1107 */ 1108 if (!current_is_kswapd() && 1109 current->flags & (PF_IO_WORKER|PF_KTHREAD)) { 1110 cond_resched(); 1111 return; 1112 } 1113 1114 /* 1115 * These figures are pulled out of thin air. 1116 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many 1117 * parallel reclaimers which is a short-lived event so the timeout is 1118 * short. Failing to make progress or waiting on writeback are 1119 * potentially long-lived events so use a longer timeout. This is shaky 1120 * logic as a failure to make progress could be due to anything from 1121 * writeback to a slow device to excessive references pages at the tail 1122 * of the inactive LRU. 1123 */ 1124 switch(reason) { 1125 case VMSCAN_THROTTLE_WRITEBACK: 1126 timeout = HZ/10; 1127 1128 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { 1129 WRITE_ONCE(pgdat->nr_reclaim_start, 1130 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); 1131 } 1132 1133 break; 1134 case VMSCAN_THROTTLE_CONGESTED: 1135 fallthrough; 1136 case VMSCAN_THROTTLE_NOPROGRESS: 1137 if (skip_throttle_noprogress(pgdat)) { 1138 cond_resched(); 1139 return; 1140 } 1141 1142 timeout = 1; 1143 1144 break; 1145 case VMSCAN_THROTTLE_ISOLATED: 1146 timeout = HZ/50; 1147 break; 1148 default: 1149 WARN_ON_ONCE(1); 1150 timeout = HZ; 1151 break; 1152 } 1153 1154 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 1155 ret = schedule_timeout(timeout); 1156 finish_wait(wqh, &wait); 1157 1158 if (reason == VMSCAN_THROTTLE_WRITEBACK) 1159 atomic_dec(&pgdat->nr_writeback_throttled); 1160 1161 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), 1162 jiffies_to_usecs(timeout - ret), 1163 reason); 1164 } 1165 1166 /* 1167 * Account for pages written if tasks are throttled waiting on dirty 1168 * pages to clean. If enough pages have been cleaned since throttling 1169 * started then wakeup the throttled tasks. 1170 */ 1171 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 1172 int nr_throttled) 1173 { 1174 unsigned long nr_written; 1175 1176 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); 1177 1178 /* 1179 * This is an inaccurate read as the per-cpu deltas may not 1180 * be synchronised. However, given that the system is 1181 * writeback throttled, it is not worth taking the penalty 1182 * of getting an accurate count. At worst, the throttle 1183 * timeout guarantees forward progress. 1184 */ 1185 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - 1186 READ_ONCE(pgdat->nr_reclaim_start); 1187 1188 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) 1189 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); 1190 } 1191 1192 /* possible outcome of pageout() */ 1193 typedef enum { 1194 /* failed to write page out, page is locked */ 1195 PAGE_KEEP, 1196 /* move page to the active list, page is locked */ 1197 PAGE_ACTIVATE, 1198 /* page has been sent to the disk successfully, page is unlocked */ 1199 PAGE_SUCCESS, 1200 /* page is clean and locked */ 1201 PAGE_CLEAN, 1202 } pageout_t; 1203 1204 /* 1205 * pageout is called by shrink_page_list() for each dirty page. 1206 * Calls ->writepage(). 1207 */ 1208 static pageout_t pageout(struct folio *folio, struct address_space *mapping, 1209 struct swap_iocb **plug) 1210 { 1211 /* 1212 * If the folio is dirty, only perform writeback if that write 1213 * will be non-blocking. To prevent this allocation from being 1214 * stalled by pagecache activity. But note that there may be 1215 * stalls if we need to run get_block(). We could test 1216 * PagePrivate for that. 1217 * 1218 * If this process is currently in __generic_file_write_iter() against 1219 * this folio's queue, we can perform writeback even if that 1220 * will block. 1221 * 1222 * If the folio is swapcache, write it back even if that would 1223 * block, for some throttling. This happens by accident, because 1224 * swap_backing_dev_info is bust: it doesn't reflect the 1225 * congestion state of the swapdevs. Easy to fix, if needed. 1226 */ 1227 if (!is_page_cache_freeable(folio)) 1228 return PAGE_KEEP; 1229 if (!mapping) { 1230 /* 1231 * Some data journaling orphaned folios can have 1232 * folio->mapping == NULL while being dirty with clean buffers. 1233 */ 1234 if (folio_test_private(folio)) { 1235 if (try_to_free_buffers(folio)) { 1236 folio_clear_dirty(folio); 1237 pr_info("%s: orphaned folio\n", __func__); 1238 return PAGE_CLEAN; 1239 } 1240 } 1241 return PAGE_KEEP; 1242 } 1243 if (mapping->a_ops->writepage == NULL) 1244 return PAGE_ACTIVATE; 1245 1246 if (folio_clear_dirty_for_io(folio)) { 1247 int res; 1248 struct writeback_control wbc = { 1249 .sync_mode = WB_SYNC_NONE, 1250 .nr_to_write = SWAP_CLUSTER_MAX, 1251 .range_start = 0, 1252 .range_end = LLONG_MAX, 1253 .for_reclaim = 1, 1254 .swap_plug = plug, 1255 }; 1256 1257 folio_set_reclaim(folio); 1258 res = mapping->a_ops->writepage(&folio->page, &wbc); 1259 if (res < 0) 1260 handle_write_error(mapping, folio, res); 1261 if (res == AOP_WRITEPAGE_ACTIVATE) { 1262 folio_clear_reclaim(folio); 1263 return PAGE_ACTIVATE; 1264 } 1265 1266 if (!folio_test_writeback(folio)) { 1267 /* synchronous write or broken a_ops? */ 1268 folio_clear_reclaim(folio); 1269 } 1270 trace_mm_vmscan_write_folio(folio); 1271 node_stat_add_folio(folio, NR_VMSCAN_WRITE); 1272 return PAGE_SUCCESS; 1273 } 1274 1275 return PAGE_CLEAN; 1276 } 1277 1278 /* 1279 * Same as remove_mapping, but if the page is removed from the mapping, it 1280 * gets returned with a refcount of 0. 1281 */ 1282 static int __remove_mapping(struct address_space *mapping, struct folio *folio, 1283 bool reclaimed, struct mem_cgroup *target_memcg) 1284 { 1285 int refcount; 1286 void *shadow = NULL; 1287 1288 BUG_ON(!folio_test_locked(folio)); 1289 BUG_ON(mapping != folio_mapping(folio)); 1290 1291 if (!folio_test_swapcache(folio)) 1292 spin_lock(&mapping->host->i_lock); 1293 xa_lock_irq(&mapping->i_pages); 1294 /* 1295 * The non racy check for a busy page. 1296 * 1297 * Must be careful with the order of the tests. When someone has 1298 * a ref to the page, it may be possible that they dirty it then 1299 * drop the reference. So if PageDirty is tested before page_count 1300 * here, then the following race may occur: 1301 * 1302 * get_user_pages(&page); 1303 * [user mapping goes away] 1304 * write_to(page); 1305 * !PageDirty(page) [good] 1306 * SetPageDirty(page); 1307 * put_page(page); 1308 * !page_count(page) [good, discard it] 1309 * 1310 * [oops, our write_to data is lost] 1311 * 1312 * Reversing the order of the tests ensures such a situation cannot 1313 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 1314 * load is not satisfied before that of page->_refcount. 1315 * 1316 * Note that if SetPageDirty is always performed via set_page_dirty, 1317 * and thus under the i_pages lock, then this ordering is not required. 1318 */ 1319 refcount = 1 + folio_nr_pages(folio); 1320 if (!folio_ref_freeze(folio, refcount)) 1321 goto cannot_free; 1322 /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */ 1323 if (unlikely(folio_test_dirty(folio))) { 1324 folio_ref_unfreeze(folio, refcount); 1325 goto cannot_free; 1326 } 1327 1328 if (folio_test_swapcache(folio)) { 1329 swp_entry_t swap = folio_swap_entry(folio); 1330 mem_cgroup_swapout(folio, swap); 1331 if (reclaimed && !mapping_exiting(mapping)) 1332 shadow = workingset_eviction(folio, target_memcg); 1333 __delete_from_swap_cache(&folio->page, swap, shadow); 1334 xa_unlock_irq(&mapping->i_pages); 1335 put_swap_page(&folio->page, swap); 1336 } else { 1337 void (*free_folio)(struct folio *); 1338 1339 free_folio = mapping->a_ops->free_folio; 1340 /* 1341 * Remember a shadow entry for reclaimed file cache in 1342 * order to detect refaults, thus thrashing, later on. 1343 * 1344 * But don't store shadows in an address space that is 1345 * already exiting. This is not just an optimization, 1346 * inode reclaim needs to empty out the radix tree or 1347 * the nodes are lost. Don't plant shadows behind its 1348 * back. 1349 * 1350 * We also don't store shadows for DAX mappings because the 1351 * only page cache pages found in these are zero pages 1352 * covering holes, and because we don't want to mix DAX 1353 * exceptional entries and shadow exceptional entries in the 1354 * same address_space. 1355 */ 1356 if (reclaimed && folio_is_file_lru(folio) && 1357 !mapping_exiting(mapping) && !dax_mapping(mapping)) 1358 shadow = workingset_eviction(folio, target_memcg); 1359 __filemap_remove_folio(folio, shadow); 1360 xa_unlock_irq(&mapping->i_pages); 1361 if (mapping_shrinkable(mapping)) 1362 inode_add_lru(mapping->host); 1363 spin_unlock(&mapping->host->i_lock); 1364 1365 if (free_folio) 1366 free_folio(folio); 1367 } 1368 1369 return 1; 1370 1371 cannot_free: 1372 xa_unlock_irq(&mapping->i_pages); 1373 if (!folio_test_swapcache(folio)) 1374 spin_unlock(&mapping->host->i_lock); 1375 return 0; 1376 } 1377 1378 /** 1379 * remove_mapping() - Attempt to remove a folio from its mapping. 1380 * @mapping: The address space. 1381 * @folio: The folio to remove. 1382 * 1383 * If the folio is dirty, under writeback or if someone else has a ref 1384 * on it, removal will fail. 1385 * Return: The number of pages removed from the mapping. 0 if the folio 1386 * could not be removed. 1387 * Context: The caller should have a single refcount on the folio and 1388 * hold its lock. 1389 */ 1390 long remove_mapping(struct address_space *mapping, struct folio *folio) 1391 { 1392 if (__remove_mapping(mapping, folio, false, NULL)) { 1393 /* 1394 * Unfreezing the refcount with 1 effectively 1395 * drops the pagecache ref for us without requiring another 1396 * atomic operation. 1397 */ 1398 folio_ref_unfreeze(folio, 1); 1399 return folio_nr_pages(folio); 1400 } 1401 return 0; 1402 } 1403 1404 /** 1405 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. 1406 * @folio: Folio to be returned to an LRU list. 1407 * 1408 * Add previously isolated @folio to appropriate LRU list. 1409 * The folio may still be unevictable for other reasons. 1410 * 1411 * Context: lru_lock must not be held, interrupts must be enabled. 1412 */ 1413 void folio_putback_lru(struct folio *folio) 1414 { 1415 folio_add_lru(folio); 1416 folio_put(folio); /* drop ref from isolate */ 1417 } 1418 1419 enum page_references { 1420 PAGEREF_RECLAIM, 1421 PAGEREF_RECLAIM_CLEAN, 1422 PAGEREF_KEEP, 1423 PAGEREF_ACTIVATE, 1424 }; 1425 1426 static enum page_references folio_check_references(struct folio *folio, 1427 struct scan_control *sc) 1428 { 1429 int referenced_ptes, referenced_folio; 1430 unsigned long vm_flags; 1431 1432 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, 1433 &vm_flags); 1434 referenced_folio = folio_test_clear_referenced(folio); 1435 1436 /* 1437 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. 1438 * Let the folio, now marked Mlocked, be moved to the unevictable list. 1439 */ 1440 if (vm_flags & VM_LOCKED) 1441 return PAGEREF_ACTIVATE; 1442 1443 /* rmap lock contention: rotate */ 1444 if (referenced_ptes == -1) 1445 return PAGEREF_KEEP; 1446 1447 if (referenced_ptes) { 1448 /* 1449 * All mapped folios start out with page table 1450 * references from the instantiating fault, so we need 1451 * to look twice if a mapped file/anon folio is used more 1452 * than once. 1453 * 1454 * Mark it and spare it for another trip around the 1455 * inactive list. Another page table reference will 1456 * lead to its activation. 1457 * 1458 * Note: the mark is set for activated folios as well 1459 * so that recently deactivated but used folios are 1460 * quickly recovered. 1461 */ 1462 folio_set_referenced(folio); 1463 1464 if (referenced_folio || referenced_ptes > 1) 1465 return PAGEREF_ACTIVATE; 1466 1467 /* 1468 * Activate file-backed executable folios after first usage. 1469 */ 1470 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) 1471 return PAGEREF_ACTIVATE; 1472 1473 return PAGEREF_KEEP; 1474 } 1475 1476 /* Reclaim if clean, defer dirty folios to writeback */ 1477 if (referenced_folio && folio_is_file_lru(folio)) 1478 return PAGEREF_RECLAIM_CLEAN; 1479 1480 return PAGEREF_RECLAIM; 1481 } 1482 1483 /* Check if a page is dirty or under writeback */ 1484 static void folio_check_dirty_writeback(struct folio *folio, 1485 bool *dirty, bool *writeback) 1486 { 1487 struct address_space *mapping; 1488 1489 /* 1490 * Anonymous pages are not handled by flushers and must be written 1491 * from reclaim context. Do not stall reclaim based on them. 1492 * MADV_FREE anonymous pages are put into inactive file list too. 1493 * They could be mistakenly treated as file lru. So further anon 1494 * test is needed. 1495 */ 1496 if (!folio_is_file_lru(folio) || 1497 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { 1498 *dirty = false; 1499 *writeback = false; 1500 return; 1501 } 1502 1503 /* By default assume that the folio flags are accurate */ 1504 *dirty = folio_test_dirty(folio); 1505 *writeback = folio_test_writeback(folio); 1506 1507 /* Verify dirty/writeback state if the filesystem supports it */ 1508 if (!folio_test_private(folio)) 1509 return; 1510 1511 mapping = folio_mapping(folio); 1512 if (mapping && mapping->a_ops->is_dirty_writeback) 1513 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); 1514 } 1515 1516 static struct page *alloc_demote_page(struct page *page, unsigned long node) 1517 { 1518 struct migration_target_control mtc = { 1519 /* 1520 * Allocate from 'node', or fail quickly and quietly. 1521 * When this happens, 'page' will likely just be discarded 1522 * instead of migrated. 1523 */ 1524 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | 1525 __GFP_THISNODE | __GFP_NOWARN | 1526 __GFP_NOMEMALLOC | GFP_NOWAIT, 1527 .nid = node 1528 }; 1529 1530 return alloc_migration_target(page, (unsigned long)&mtc); 1531 } 1532 1533 /* 1534 * Take pages on @demote_list and attempt to demote them to 1535 * another node. Pages which are not demoted are left on 1536 * @demote_pages. 1537 */ 1538 static unsigned int demote_page_list(struct list_head *demote_pages, 1539 struct pglist_data *pgdat) 1540 { 1541 int target_nid = next_demotion_node(pgdat->node_id); 1542 unsigned int nr_succeeded; 1543 1544 if (list_empty(demote_pages)) 1545 return 0; 1546 1547 if (target_nid == NUMA_NO_NODE) 1548 return 0; 1549 1550 /* Demotion ignores all cpuset and mempolicy settings */ 1551 migrate_pages(demote_pages, alloc_demote_page, NULL, 1552 target_nid, MIGRATE_ASYNC, MR_DEMOTION, 1553 &nr_succeeded); 1554 1555 if (current_is_kswapd()) 1556 __count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded); 1557 else 1558 __count_vm_events(PGDEMOTE_DIRECT, nr_succeeded); 1559 1560 return nr_succeeded; 1561 } 1562 1563 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) 1564 { 1565 if (gfp_mask & __GFP_FS) 1566 return true; 1567 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) 1568 return false; 1569 /* 1570 * We can "enter_fs" for swap-cache with only __GFP_IO 1571 * providing this isn't SWP_FS_OPS. 1572 * ->flags can be updated non-atomicially (scan_swap_map_slots), 1573 * but that will never affect SWP_FS_OPS, so the data_race 1574 * is safe. 1575 */ 1576 return !data_race(page_swap_flags(&folio->page) & SWP_FS_OPS); 1577 } 1578 1579 /* 1580 * shrink_page_list() returns the number of reclaimed pages 1581 */ 1582 static unsigned int shrink_page_list(struct list_head *page_list, 1583 struct pglist_data *pgdat, 1584 struct scan_control *sc, 1585 struct reclaim_stat *stat, 1586 bool ignore_references) 1587 { 1588 LIST_HEAD(ret_pages); 1589 LIST_HEAD(free_pages); 1590 LIST_HEAD(demote_pages); 1591 unsigned int nr_reclaimed = 0; 1592 unsigned int pgactivate = 0; 1593 bool do_demote_pass; 1594 struct swap_iocb *plug = NULL; 1595 1596 memset(stat, 0, sizeof(*stat)); 1597 cond_resched(); 1598 do_demote_pass = can_demote(pgdat->node_id, sc); 1599 1600 retry: 1601 while (!list_empty(page_list)) { 1602 struct address_space *mapping; 1603 struct folio *folio; 1604 enum page_references references = PAGEREF_RECLAIM; 1605 bool dirty, writeback; 1606 unsigned int nr_pages; 1607 1608 cond_resched(); 1609 1610 folio = lru_to_folio(page_list); 1611 list_del(&folio->lru); 1612 1613 if (!folio_trylock(folio)) 1614 goto keep; 1615 1616 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1617 1618 nr_pages = folio_nr_pages(folio); 1619 1620 /* Account the number of base pages */ 1621 sc->nr_scanned += nr_pages; 1622 1623 if (unlikely(!folio_evictable(folio))) 1624 goto activate_locked; 1625 1626 if (!sc->may_unmap && folio_mapped(folio)) 1627 goto keep_locked; 1628 1629 /* 1630 * The number of dirty pages determines if a node is marked 1631 * reclaim_congested. kswapd will stall and start writing 1632 * folios if the tail of the LRU is all dirty unqueued folios. 1633 */ 1634 folio_check_dirty_writeback(folio, &dirty, &writeback); 1635 if (dirty || writeback) 1636 stat->nr_dirty += nr_pages; 1637 1638 if (dirty && !writeback) 1639 stat->nr_unqueued_dirty += nr_pages; 1640 1641 /* 1642 * Treat this folio as congested if folios are cycling 1643 * through the LRU so quickly that the folios marked 1644 * for immediate reclaim are making it to the end of 1645 * the LRU a second time. 1646 */ 1647 if (writeback && folio_test_reclaim(folio)) 1648 stat->nr_congested += nr_pages; 1649 1650 /* 1651 * If a folio at the tail of the LRU is under writeback, there 1652 * are three cases to consider. 1653 * 1654 * 1) If reclaim is encountering an excessive number 1655 * of folios under writeback and this folio has both 1656 * the writeback and reclaim flags set, then it 1657 * indicates that folios are being queued for I/O but 1658 * are being recycled through the LRU before the I/O 1659 * can complete. Waiting on the folio itself risks an 1660 * indefinite stall if it is impossible to writeback 1661 * the folio due to I/O error or disconnected storage 1662 * so instead note that the LRU is being scanned too 1663 * quickly and the caller can stall after the folio 1664 * list has been processed. 1665 * 1666 * 2) Global or new memcg reclaim encounters a folio that is 1667 * not marked for immediate reclaim, or the caller does not 1668 * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1669 * not to fs). In this case mark the folio for immediate 1670 * reclaim and continue scanning. 1671 * 1672 * Require may_enter_fs() because we would wait on fs, which 1673 * may not have submitted I/O yet. And the loop driver might 1674 * enter reclaim, and deadlock if it waits on a folio for 1675 * which it is needed to do the write (loop masks off 1676 * __GFP_IO|__GFP_FS for this reason); but more thought 1677 * would probably show more reasons. 1678 * 1679 * 3) Legacy memcg encounters a folio that already has the 1680 * reclaim flag set. memcg does not have any dirty folio 1681 * throttling so we could easily OOM just because too many 1682 * folios are in writeback and there is nothing else to 1683 * reclaim. Wait for the writeback to complete. 1684 * 1685 * In cases 1) and 2) we activate the folios to get them out of 1686 * the way while we continue scanning for clean folios on the 1687 * inactive list and refilling from the active list. The 1688 * observation here is that waiting for disk writes is more 1689 * expensive than potentially causing reloads down the line. 1690 * Since they're marked for immediate reclaim, they won't put 1691 * memory pressure on the cache working set any longer than it 1692 * takes to write them to disk. 1693 */ 1694 if (folio_test_writeback(folio)) { 1695 /* Case 1 above */ 1696 if (current_is_kswapd() && 1697 folio_test_reclaim(folio) && 1698 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1699 stat->nr_immediate += nr_pages; 1700 goto activate_locked; 1701 1702 /* Case 2 above */ 1703 } else if (writeback_throttling_sane(sc) || 1704 !folio_test_reclaim(folio) || 1705 !may_enter_fs(folio, sc->gfp_mask)) { 1706 /* 1707 * This is slightly racy - 1708 * folio_end_writeback() might have 1709 * just cleared the reclaim flag, then 1710 * setting the reclaim flag here ends up 1711 * interpreted as the readahead flag - but 1712 * that does not matter enough to care. 1713 * What we do want is for this folio to 1714 * have the reclaim flag set next time 1715 * memcg reclaim reaches the tests above, 1716 * so it will then wait for writeback to 1717 * avoid OOM; and it's also appropriate 1718 * in global reclaim. 1719 */ 1720 folio_set_reclaim(folio); 1721 stat->nr_writeback += nr_pages; 1722 goto activate_locked; 1723 1724 /* Case 3 above */ 1725 } else { 1726 folio_unlock(folio); 1727 folio_wait_writeback(folio); 1728 /* then go back and try same folio again */ 1729 list_add_tail(&folio->lru, page_list); 1730 continue; 1731 } 1732 } 1733 1734 if (!ignore_references) 1735 references = folio_check_references(folio, sc); 1736 1737 switch (references) { 1738 case PAGEREF_ACTIVATE: 1739 goto activate_locked; 1740 case PAGEREF_KEEP: 1741 stat->nr_ref_keep += nr_pages; 1742 goto keep_locked; 1743 case PAGEREF_RECLAIM: 1744 case PAGEREF_RECLAIM_CLEAN: 1745 ; /* try to reclaim the folio below */ 1746 } 1747 1748 /* 1749 * Before reclaiming the folio, try to relocate 1750 * its contents to another node. 1751 */ 1752 if (do_demote_pass && 1753 (thp_migration_supported() || !folio_test_large(folio))) { 1754 list_add(&folio->lru, &demote_pages); 1755 folio_unlock(folio); 1756 continue; 1757 } 1758 1759 /* 1760 * Anonymous process memory has backing store? 1761 * Try to allocate it some swap space here. 1762 * Lazyfree folio could be freed directly 1763 */ 1764 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { 1765 if (!folio_test_swapcache(folio)) { 1766 if (!(sc->gfp_mask & __GFP_IO)) 1767 goto keep_locked; 1768 if (folio_maybe_dma_pinned(folio)) 1769 goto keep_locked; 1770 if (folio_test_large(folio)) { 1771 /* cannot split folio, skip it */ 1772 if (!can_split_folio(folio, NULL)) 1773 goto activate_locked; 1774 /* 1775 * Split folios without a PMD map right 1776 * away. Chances are some or all of the 1777 * tail pages can be freed without IO. 1778 */ 1779 if (!folio_entire_mapcount(folio) && 1780 split_folio_to_list(folio, 1781 page_list)) 1782 goto activate_locked; 1783 } 1784 if (!add_to_swap(folio)) { 1785 if (!folio_test_large(folio)) 1786 goto activate_locked_split; 1787 /* Fallback to swap normal pages */ 1788 if (split_folio_to_list(folio, 1789 page_list)) 1790 goto activate_locked; 1791 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1792 count_vm_event(THP_SWPOUT_FALLBACK); 1793 #endif 1794 if (!add_to_swap(folio)) 1795 goto activate_locked_split; 1796 } 1797 } 1798 } else if (folio_test_swapbacked(folio) && 1799 folio_test_large(folio)) { 1800 /* Split shmem folio */ 1801 if (split_folio_to_list(folio, page_list)) 1802 goto keep_locked; 1803 } 1804 1805 /* 1806 * If the folio was split above, the tail pages will make 1807 * their own pass through this function and be accounted 1808 * then. 1809 */ 1810 if ((nr_pages > 1) && !folio_test_large(folio)) { 1811 sc->nr_scanned -= (nr_pages - 1); 1812 nr_pages = 1; 1813 } 1814 1815 /* 1816 * The folio is mapped into the page tables of one or more 1817 * processes. Try to unmap it here. 1818 */ 1819 if (folio_mapped(folio)) { 1820 enum ttu_flags flags = TTU_BATCH_FLUSH; 1821 bool was_swapbacked = folio_test_swapbacked(folio); 1822 1823 if (folio_test_pmd_mappable(folio)) 1824 flags |= TTU_SPLIT_HUGE_PMD; 1825 1826 try_to_unmap(folio, flags); 1827 if (folio_mapped(folio)) { 1828 stat->nr_unmap_fail += nr_pages; 1829 if (!was_swapbacked && 1830 folio_test_swapbacked(folio)) 1831 stat->nr_lazyfree_fail += nr_pages; 1832 goto activate_locked; 1833 } 1834 } 1835 1836 mapping = folio_mapping(folio); 1837 if (folio_test_dirty(folio)) { 1838 /* 1839 * Only kswapd can writeback filesystem folios 1840 * to avoid risk of stack overflow. But avoid 1841 * injecting inefficient single-folio I/O into 1842 * flusher writeback as much as possible: only 1843 * write folios when we've encountered many 1844 * dirty folios, and when we've already scanned 1845 * the rest of the LRU for clean folios and see 1846 * the same dirty folios again (with the reclaim 1847 * flag set). 1848 */ 1849 if (folio_is_file_lru(folio) && 1850 (!current_is_kswapd() || 1851 !folio_test_reclaim(folio) || 1852 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 1853 /* 1854 * Immediately reclaim when written back. 1855 * Similar in principle to deactivate_page() 1856 * except we already have the folio isolated 1857 * and know it's dirty 1858 */ 1859 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, 1860 nr_pages); 1861 folio_set_reclaim(folio); 1862 1863 goto activate_locked; 1864 } 1865 1866 if (references == PAGEREF_RECLAIM_CLEAN) 1867 goto keep_locked; 1868 if (!may_enter_fs(folio, sc->gfp_mask)) 1869 goto keep_locked; 1870 if (!sc->may_writepage) 1871 goto keep_locked; 1872 1873 /* 1874 * Folio is dirty. Flush the TLB if a writable entry 1875 * potentially exists to avoid CPU writes after I/O 1876 * starts and then write it out here. 1877 */ 1878 try_to_unmap_flush_dirty(); 1879 switch (pageout(folio, mapping, &plug)) { 1880 case PAGE_KEEP: 1881 goto keep_locked; 1882 case PAGE_ACTIVATE: 1883 goto activate_locked; 1884 case PAGE_SUCCESS: 1885 stat->nr_pageout += nr_pages; 1886 1887 if (folio_test_writeback(folio)) 1888 goto keep; 1889 if (folio_test_dirty(folio)) 1890 goto keep; 1891 1892 /* 1893 * A synchronous write - probably a ramdisk. Go 1894 * ahead and try to reclaim the folio. 1895 */ 1896 if (!folio_trylock(folio)) 1897 goto keep; 1898 if (folio_test_dirty(folio) || 1899 folio_test_writeback(folio)) 1900 goto keep_locked; 1901 mapping = folio_mapping(folio); 1902 fallthrough; 1903 case PAGE_CLEAN: 1904 ; /* try to free the folio below */ 1905 } 1906 } 1907 1908 /* 1909 * If the folio has buffers, try to free the buffer 1910 * mappings associated with this folio. If we succeed 1911 * we try to free the folio as well. 1912 * 1913 * We do this even if the folio is dirty. 1914 * filemap_release_folio() does not perform I/O, but it 1915 * is possible for a folio to have the dirty flag set, 1916 * but it is actually clean (all its buffers are clean). 1917 * This happens if the buffers were written out directly, 1918 * with submit_bh(). ext3 will do this, as well as 1919 * the blockdev mapping. filemap_release_folio() will 1920 * discover that cleanness and will drop the buffers 1921 * and mark the folio clean - it can be freed. 1922 * 1923 * Rarely, folios can have buffers and no ->mapping. 1924 * These are the folios which were not successfully 1925 * invalidated in truncate_cleanup_folio(). We try to 1926 * drop those buffers here and if that worked, and the 1927 * folio is no longer mapped into process address space 1928 * (refcount == 1) it can be freed. Otherwise, leave 1929 * the folio on the LRU so it is swappable. 1930 */ 1931 if (folio_has_private(folio)) { 1932 if (!filemap_release_folio(folio, sc->gfp_mask)) 1933 goto activate_locked; 1934 if (!mapping && folio_ref_count(folio) == 1) { 1935 folio_unlock(folio); 1936 if (folio_put_testzero(folio)) 1937 goto free_it; 1938 else { 1939 /* 1940 * rare race with speculative reference. 1941 * the speculative reference will free 1942 * this folio shortly, so we may 1943 * increment nr_reclaimed here (and 1944 * leave it off the LRU). 1945 */ 1946 nr_reclaimed += nr_pages; 1947 continue; 1948 } 1949 } 1950 } 1951 1952 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { 1953 /* follow __remove_mapping for reference */ 1954 if (!folio_ref_freeze(folio, 1)) 1955 goto keep_locked; 1956 /* 1957 * The folio has only one reference left, which is 1958 * from the isolation. After the caller puts the 1959 * folio back on the lru and drops the reference, the 1960 * folio will be freed anyway. It doesn't matter 1961 * which lru it goes on. So we don't bother checking 1962 * the dirty flag here. 1963 */ 1964 count_vm_events(PGLAZYFREED, nr_pages); 1965 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); 1966 } else if (!mapping || !__remove_mapping(mapping, folio, true, 1967 sc->target_mem_cgroup)) 1968 goto keep_locked; 1969 1970 folio_unlock(folio); 1971 free_it: 1972 /* 1973 * Folio may get swapped out as a whole, need to account 1974 * all pages in it. 1975 */ 1976 nr_reclaimed += nr_pages; 1977 1978 /* 1979 * Is there need to periodically free_page_list? It would 1980 * appear not as the counts should be low 1981 */ 1982 if (unlikely(folio_test_large(folio))) 1983 destroy_compound_page(&folio->page); 1984 else 1985 list_add(&folio->lru, &free_pages); 1986 continue; 1987 1988 activate_locked_split: 1989 /* 1990 * The tail pages that are failed to add into swap cache 1991 * reach here. Fixup nr_scanned and nr_pages. 1992 */ 1993 if (nr_pages > 1) { 1994 sc->nr_scanned -= (nr_pages - 1); 1995 nr_pages = 1; 1996 } 1997 activate_locked: 1998 /* Not a candidate for swapping, so reclaim swap space. */ 1999 if (folio_test_swapcache(folio) && 2000 (mem_cgroup_swap_full(&folio->page) || 2001 folio_test_mlocked(folio))) 2002 try_to_free_swap(&folio->page); 2003 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 2004 if (!folio_test_mlocked(folio)) { 2005 int type = folio_is_file_lru(folio); 2006 folio_set_active(folio); 2007 stat->nr_activate[type] += nr_pages; 2008 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); 2009 } 2010 keep_locked: 2011 folio_unlock(folio); 2012 keep: 2013 list_add(&folio->lru, &ret_pages); 2014 VM_BUG_ON_FOLIO(folio_test_lru(folio) || 2015 folio_test_unevictable(folio), folio); 2016 } 2017 /* 'page_list' is always empty here */ 2018 2019 /* Migrate folios selected for demotion */ 2020 nr_reclaimed += demote_page_list(&demote_pages, pgdat); 2021 /* Folios that could not be demoted are still in @demote_pages */ 2022 if (!list_empty(&demote_pages)) { 2023 /* Folios which weren't demoted go back on @page_list for retry: */ 2024 list_splice_init(&demote_pages, page_list); 2025 do_demote_pass = false; 2026 goto retry; 2027 } 2028 2029 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; 2030 2031 mem_cgroup_uncharge_list(&free_pages); 2032 try_to_unmap_flush(); 2033 free_unref_page_list(&free_pages); 2034 2035 list_splice(&ret_pages, page_list); 2036 count_vm_events(PGACTIVATE, pgactivate); 2037 2038 if (plug) 2039 swap_write_unplug(plug); 2040 return nr_reclaimed; 2041 } 2042 2043 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 2044 struct list_head *page_list) 2045 { 2046 struct scan_control sc = { 2047 .gfp_mask = GFP_KERNEL, 2048 .may_unmap = 1, 2049 }; 2050 struct reclaim_stat stat; 2051 unsigned int nr_reclaimed; 2052 struct page *page, *next; 2053 LIST_HEAD(clean_pages); 2054 unsigned int noreclaim_flag; 2055 2056 list_for_each_entry_safe(page, next, page_list, lru) { 2057 if (!PageHuge(page) && page_is_file_lru(page) && 2058 !PageDirty(page) && !__PageMovable(page) && 2059 !PageUnevictable(page)) { 2060 ClearPageActive(page); 2061 list_move(&page->lru, &clean_pages); 2062 } 2063 } 2064 2065 /* 2066 * We should be safe here since we are only dealing with file pages and 2067 * we are not kswapd and therefore cannot write dirty file pages. But 2068 * call memalloc_noreclaim_save() anyway, just in case these conditions 2069 * change in the future. 2070 */ 2071 noreclaim_flag = memalloc_noreclaim_save(); 2072 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, 2073 &stat, true); 2074 memalloc_noreclaim_restore(noreclaim_flag); 2075 2076 list_splice(&clean_pages, page_list); 2077 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 2078 -(long)nr_reclaimed); 2079 /* 2080 * Since lazyfree pages are isolated from file LRU from the beginning, 2081 * they will rotate back to anonymous LRU in the end if it failed to 2082 * discard so isolated count will be mismatched. 2083 * Compensate the isolated count for both LRU lists. 2084 */ 2085 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, 2086 stat.nr_lazyfree_fail); 2087 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 2088 -(long)stat.nr_lazyfree_fail); 2089 return nr_reclaimed; 2090 } 2091 2092 /* 2093 * Update LRU sizes after isolating pages. The LRU size updates must 2094 * be complete before mem_cgroup_update_lru_size due to a sanity check. 2095 */ 2096 static __always_inline void update_lru_sizes(struct lruvec *lruvec, 2097 enum lru_list lru, unsigned long *nr_zone_taken) 2098 { 2099 int zid; 2100 2101 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2102 if (!nr_zone_taken[zid]) 2103 continue; 2104 2105 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 2106 } 2107 2108 } 2109 2110 /* 2111 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. 2112 * 2113 * lruvec->lru_lock is heavily contended. Some of the functions that 2114 * shrink the lists perform better by taking out a batch of pages 2115 * and working on them outside the LRU lock. 2116 * 2117 * For pagecache intensive workloads, this function is the hottest 2118 * spot in the kernel (apart from copy_*_user functions). 2119 * 2120 * Lru_lock must be held before calling this function. 2121 * 2122 * @nr_to_scan: The number of eligible pages to look through on the list. 2123 * @lruvec: The LRU vector to pull pages from. 2124 * @dst: The temp list to put pages on to. 2125 * @nr_scanned: The number of pages that were scanned. 2126 * @sc: The scan_control struct for this reclaim session 2127 * @lru: LRU list id for isolating 2128 * 2129 * returns how many pages were moved onto *@dst. 2130 */ 2131 static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 2132 struct lruvec *lruvec, struct list_head *dst, 2133 unsigned long *nr_scanned, struct scan_control *sc, 2134 enum lru_list lru) 2135 { 2136 struct list_head *src = &lruvec->lists[lru]; 2137 unsigned long nr_taken = 0; 2138 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 2139 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 2140 unsigned long skipped = 0; 2141 unsigned long scan, total_scan, nr_pages; 2142 LIST_HEAD(pages_skipped); 2143 2144 total_scan = 0; 2145 scan = 0; 2146 while (scan < nr_to_scan && !list_empty(src)) { 2147 struct list_head *move_to = src; 2148 struct page *page; 2149 2150 page = lru_to_page(src); 2151 prefetchw_prev_lru_page(page, src, flags); 2152 2153 nr_pages = compound_nr(page); 2154 total_scan += nr_pages; 2155 2156 if (page_zonenum(page) > sc->reclaim_idx) { 2157 nr_skipped[page_zonenum(page)] += nr_pages; 2158 move_to = &pages_skipped; 2159 goto move; 2160 } 2161 2162 /* 2163 * Do not count skipped pages because that makes the function 2164 * return with no isolated pages if the LRU mostly contains 2165 * ineligible pages. This causes the VM to not reclaim any 2166 * pages, triggering a premature OOM. 2167 * Account all tail pages of THP. 2168 */ 2169 scan += nr_pages; 2170 2171 if (!PageLRU(page)) 2172 goto move; 2173 if (!sc->may_unmap && page_mapped(page)) 2174 goto move; 2175 2176 /* 2177 * Be careful not to clear PageLRU until after we're 2178 * sure the page is not being freed elsewhere -- the 2179 * page release code relies on it. 2180 */ 2181 if (unlikely(!get_page_unless_zero(page))) 2182 goto move; 2183 2184 if (!TestClearPageLRU(page)) { 2185 /* Another thread is already isolating this page */ 2186 put_page(page); 2187 goto move; 2188 } 2189 2190 nr_taken += nr_pages; 2191 nr_zone_taken[page_zonenum(page)] += nr_pages; 2192 move_to = dst; 2193 move: 2194 list_move(&page->lru, move_to); 2195 } 2196 2197 /* 2198 * Splice any skipped pages to the start of the LRU list. Note that 2199 * this disrupts the LRU order when reclaiming for lower zones but 2200 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 2201 * scanning would soon rescan the same pages to skip and waste lots 2202 * of cpu cycles. 2203 */ 2204 if (!list_empty(&pages_skipped)) { 2205 int zid; 2206 2207 list_splice(&pages_skipped, src); 2208 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2209 if (!nr_skipped[zid]) 2210 continue; 2211 2212 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 2213 skipped += nr_skipped[zid]; 2214 } 2215 } 2216 *nr_scanned = total_scan; 2217 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 2218 total_scan, skipped, nr_taken, 2219 sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru); 2220 update_lru_sizes(lruvec, lru, nr_zone_taken); 2221 return nr_taken; 2222 } 2223 2224 /** 2225 * folio_isolate_lru() - Try to isolate a folio from its LRU list. 2226 * @folio: Folio to isolate from its LRU list. 2227 * 2228 * Isolate a @folio from an LRU list and adjust the vmstat statistic 2229 * corresponding to whatever LRU list the folio was on. 2230 * 2231 * The folio will have its LRU flag cleared. If it was found on the 2232 * active list, it will have the Active flag set. If it was found on the 2233 * unevictable list, it will have the Unevictable flag set. These flags 2234 * may need to be cleared by the caller before letting the page go. 2235 * 2236 * Context: 2237 * 2238 * (1) Must be called with an elevated refcount on the page. This is a 2239 * fundamental difference from isolate_lru_pages() (which is called 2240 * without a stable reference). 2241 * (2) The lru_lock must not be held. 2242 * (3) Interrupts must be enabled. 2243 * 2244 * Return: 0 if the folio was removed from an LRU list. 2245 * -EBUSY if the folio was not on an LRU list. 2246 */ 2247 int folio_isolate_lru(struct folio *folio) 2248 { 2249 int ret = -EBUSY; 2250 2251 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); 2252 2253 if (folio_test_clear_lru(folio)) { 2254 struct lruvec *lruvec; 2255 2256 folio_get(folio); 2257 lruvec = folio_lruvec_lock_irq(folio); 2258 lruvec_del_folio(lruvec, folio); 2259 unlock_page_lruvec_irq(lruvec); 2260 ret = 0; 2261 } 2262 2263 return ret; 2264 } 2265 2266 /* 2267 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 2268 * then get rescheduled. When there are massive number of tasks doing page 2269 * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 2270 * the LRU list will go small and be scanned faster than necessary, leading to 2271 * unnecessary swapping, thrashing and OOM. 2272 */ 2273 static int too_many_isolated(struct pglist_data *pgdat, int file, 2274 struct scan_control *sc) 2275 { 2276 unsigned long inactive, isolated; 2277 bool too_many; 2278 2279 if (current_is_kswapd()) 2280 return 0; 2281 2282 if (!writeback_throttling_sane(sc)) 2283 return 0; 2284 2285 if (file) { 2286 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 2287 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 2288 } else { 2289 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 2290 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 2291 } 2292 2293 /* 2294 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 2295 * won't get blocked by normal direct-reclaimers, forming a circular 2296 * deadlock. 2297 */ 2298 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 2299 inactive >>= 3; 2300 2301 too_many = isolated > inactive; 2302 2303 /* Wake up tasks throttled due to too_many_isolated. */ 2304 if (!too_many) 2305 wake_throttle_isolated(pgdat); 2306 2307 return too_many; 2308 } 2309 2310 /* 2311 * move_pages_to_lru() moves pages from private @list to appropriate LRU list. 2312 * On return, @list is reused as a list of pages to be freed by the caller. 2313 * 2314 * Returns the number of pages moved to the given lruvec. 2315 */ 2316 static unsigned int move_pages_to_lru(struct lruvec *lruvec, 2317 struct list_head *list) 2318 { 2319 int nr_pages, nr_moved = 0; 2320 LIST_HEAD(pages_to_free); 2321 struct page *page; 2322 2323 while (!list_empty(list)) { 2324 page = lru_to_page(list); 2325 VM_BUG_ON_PAGE(PageLRU(page), page); 2326 list_del(&page->lru); 2327 if (unlikely(!page_evictable(page))) { 2328 spin_unlock_irq(&lruvec->lru_lock); 2329 putback_lru_page(page); 2330 spin_lock_irq(&lruvec->lru_lock); 2331 continue; 2332 } 2333 2334 /* 2335 * The SetPageLRU needs to be kept here for list integrity. 2336 * Otherwise: 2337 * #0 move_pages_to_lru #1 release_pages 2338 * if !put_page_testzero 2339 * if (put_page_testzero()) 2340 * !PageLRU //skip lru_lock 2341 * SetPageLRU() 2342 * list_add(&page->lru,) 2343 * list_add(&page->lru,) 2344 */ 2345 SetPageLRU(page); 2346 2347 if (unlikely(put_page_testzero(page))) { 2348 __clear_page_lru_flags(page); 2349 2350 if (unlikely(PageCompound(page))) { 2351 spin_unlock_irq(&lruvec->lru_lock); 2352 destroy_compound_page(page); 2353 spin_lock_irq(&lruvec->lru_lock); 2354 } else 2355 list_add(&page->lru, &pages_to_free); 2356 2357 continue; 2358 } 2359 2360 /* 2361 * All pages were isolated from the same lruvec (and isolation 2362 * inhibits memcg migration). 2363 */ 2364 VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page); 2365 add_page_to_lru_list(page, lruvec); 2366 nr_pages = thp_nr_pages(page); 2367 nr_moved += nr_pages; 2368 if (PageActive(page)) 2369 workingset_age_nonresident(lruvec, nr_pages); 2370 } 2371 2372 /* 2373 * To save our caller's stack, now use input list for pages to free. 2374 */ 2375 list_splice(&pages_to_free, list); 2376 2377 return nr_moved; 2378 } 2379 2380 /* 2381 * If a kernel thread (such as nfsd for loop-back mounts) services a backing 2382 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case 2383 * we should not throttle. Otherwise it is safe to do so. 2384 */ 2385 static int current_may_throttle(void) 2386 { 2387 return !(current->flags & PF_LOCAL_THROTTLE); 2388 } 2389 2390 /* 2391 * shrink_inactive_list() is a helper for shrink_node(). It returns the number 2392 * of reclaimed pages 2393 */ 2394 static unsigned long 2395 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, 2396 struct scan_control *sc, enum lru_list lru) 2397 { 2398 LIST_HEAD(page_list); 2399 unsigned long nr_scanned; 2400 unsigned int nr_reclaimed = 0; 2401 unsigned long nr_taken; 2402 struct reclaim_stat stat; 2403 bool file = is_file_lru(lru); 2404 enum vm_event_item item; 2405 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2406 bool stalled = false; 2407 2408 while (unlikely(too_many_isolated(pgdat, file, sc))) { 2409 if (stalled) 2410 return 0; 2411 2412 /* wait a bit for the reclaimer. */ 2413 stalled = true; 2414 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 2415 2416 /* We are about to die and free our memory. Return now. */ 2417 if (fatal_signal_pending(current)) 2418 return SWAP_CLUSTER_MAX; 2419 } 2420 2421 lru_add_drain(); 2422 2423 spin_lock_irq(&lruvec->lru_lock); 2424 2425 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 2426 &nr_scanned, sc, lru); 2427 2428 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2429 item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT; 2430 if (!cgroup_reclaim(sc)) 2431 __count_vm_events(item, nr_scanned); 2432 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); 2433 __count_vm_events(PGSCAN_ANON + file, nr_scanned); 2434 2435 spin_unlock_irq(&lruvec->lru_lock); 2436 2437 if (nr_taken == 0) 2438 return 0; 2439 2440 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); 2441 2442 spin_lock_irq(&lruvec->lru_lock); 2443 move_pages_to_lru(lruvec, &page_list); 2444 2445 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2446 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT; 2447 if (!cgroup_reclaim(sc)) 2448 __count_vm_events(item, nr_reclaimed); 2449 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); 2450 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); 2451 spin_unlock_irq(&lruvec->lru_lock); 2452 2453 lru_note_cost(lruvec, file, stat.nr_pageout); 2454 mem_cgroup_uncharge_list(&page_list); 2455 free_unref_page_list(&page_list); 2456 2457 /* 2458 * If dirty pages are scanned that are not queued for IO, it 2459 * implies that flushers are not doing their job. This can 2460 * happen when memory pressure pushes dirty pages to the end of 2461 * the LRU before the dirty limits are breached and the dirty 2462 * data has expired. It can also happen when the proportion of 2463 * dirty pages grows not through writes but through memory 2464 * pressure reclaiming all the clean cache. And in some cases, 2465 * the flushers simply cannot keep up with the allocation 2466 * rate. Nudge the flusher threads in case they are asleep. 2467 */ 2468 if (stat.nr_unqueued_dirty == nr_taken) 2469 wakeup_flusher_threads(WB_REASON_VMSCAN); 2470 2471 sc->nr.dirty += stat.nr_dirty; 2472 sc->nr.congested += stat.nr_congested; 2473 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 2474 sc->nr.writeback += stat.nr_writeback; 2475 sc->nr.immediate += stat.nr_immediate; 2476 sc->nr.taken += nr_taken; 2477 if (file) 2478 sc->nr.file_taken += nr_taken; 2479 2480 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 2481 nr_scanned, nr_reclaimed, &stat, sc->priority, file); 2482 return nr_reclaimed; 2483 } 2484 2485 /* 2486 * shrink_active_list() moves pages from the active LRU to the inactive LRU. 2487 * 2488 * We move them the other way if the page is referenced by one or more 2489 * processes. 2490 * 2491 * If the pages are mostly unmapped, the processing is fast and it is 2492 * appropriate to hold lru_lock across the whole operation. But if 2493 * the pages are mapped, the processing is slow (folio_referenced()), so 2494 * we should drop lru_lock around each page. It's impossible to balance 2495 * this, so instead we remove the pages from the LRU while processing them. 2496 * It is safe to rely on PG_active against the non-LRU pages in here because 2497 * nobody will play with that bit on a non-LRU page. 2498 * 2499 * The downside is that we have to touch page->_refcount against each page. 2500 * But we had to alter page->flags anyway. 2501 */ 2502 static void shrink_active_list(unsigned long nr_to_scan, 2503 struct lruvec *lruvec, 2504 struct scan_control *sc, 2505 enum lru_list lru) 2506 { 2507 unsigned long nr_taken; 2508 unsigned long nr_scanned; 2509 unsigned long vm_flags; 2510 LIST_HEAD(l_hold); /* The pages which were snipped off */ 2511 LIST_HEAD(l_active); 2512 LIST_HEAD(l_inactive); 2513 unsigned nr_deactivate, nr_activate; 2514 unsigned nr_rotated = 0; 2515 int file = is_file_lru(lru); 2516 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2517 2518 lru_add_drain(); 2519 2520 spin_lock_irq(&lruvec->lru_lock); 2521 2522 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 2523 &nr_scanned, sc, lru); 2524 2525 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2526 2527 if (!cgroup_reclaim(sc)) 2528 __count_vm_events(PGREFILL, nr_scanned); 2529 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 2530 2531 spin_unlock_irq(&lruvec->lru_lock); 2532 2533 while (!list_empty(&l_hold)) { 2534 struct folio *folio; 2535 struct page *page; 2536 2537 cond_resched(); 2538 folio = lru_to_folio(&l_hold); 2539 list_del(&folio->lru); 2540 page = &folio->page; 2541 2542 if (unlikely(!page_evictable(page))) { 2543 putback_lru_page(page); 2544 continue; 2545 } 2546 2547 if (unlikely(buffer_heads_over_limit)) { 2548 if (page_has_private(page) && trylock_page(page)) { 2549 if (page_has_private(page)) 2550 try_to_release_page(page, 0); 2551 unlock_page(page); 2552 } 2553 } 2554 2555 /* Referenced or rmap lock contention: rotate */ 2556 if (folio_referenced(folio, 0, sc->target_mem_cgroup, 2557 &vm_flags) != 0) { 2558 /* 2559 * Identify referenced, file-backed active pages and 2560 * give them one more trip around the active list. So 2561 * that executable code get better chances to stay in 2562 * memory under moderate memory pressure. Anon pages 2563 * are not likely to be evicted by use-once streaming 2564 * IO, plus JVM can create lots of anon VM_EXEC pages, 2565 * so we ignore them here. 2566 */ 2567 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { 2568 nr_rotated += thp_nr_pages(page); 2569 list_add(&page->lru, &l_active); 2570 continue; 2571 } 2572 } 2573 2574 ClearPageActive(page); /* we are de-activating */ 2575 SetPageWorkingset(page); 2576 list_add(&page->lru, &l_inactive); 2577 } 2578 2579 /* 2580 * Move pages back to the lru list. 2581 */ 2582 spin_lock_irq(&lruvec->lru_lock); 2583 2584 nr_activate = move_pages_to_lru(lruvec, &l_active); 2585 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive); 2586 /* Keep all free pages in l_active list */ 2587 list_splice(&l_inactive, &l_active); 2588 2589 __count_vm_events(PGDEACTIVATE, nr_deactivate); 2590 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); 2591 2592 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2593 spin_unlock_irq(&lruvec->lru_lock); 2594 2595 mem_cgroup_uncharge_list(&l_active); 2596 free_unref_page_list(&l_active); 2597 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 2598 nr_deactivate, nr_rotated, sc->priority, file); 2599 } 2600 2601 static unsigned int reclaim_page_list(struct list_head *page_list, 2602 struct pglist_data *pgdat) 2603 { 2604 struct reclaim_stat dummy_stat; 2605 unsigned int nr_reclaimed; 2606 struct folio *folio; 2607 struct scan_control sc = { 2608 .gfp_mask = GFP_KERNEL, 2609 .may_writepage = 1, 2610 .may_unmap = 1, 2611 .may_swap = 1, 2612 .no_demotion = 1, 2613 }; 2614 2615 nr_reclaimed = shrink_page_list(page_list, pgdat, &sc, &dummy_stat, false); 2616 while (!list_empty(page_list)) { 2617 folio = lru_to_folio(page_list); 2618 list_del(&folio->lru); 2619 folio_putback_lru(folio); 2620 } 2621 2622 return nr_reclaimed; 2623 } 2624 2625 unsigned long reclaim_pages(struct list_head *page_list) 2626 { 2627 int nid; 2628 unsigned int nr_reclaimed = 0; 2629 LIST_HEAD(node_page_list); 2630 struct page *page; 2631 unsigned int noreclaim_flag; 2632 2633 if (list_empty(page_list)) 2634 return nr_reclaimed; 2635 2636 noreclaim_flag = memalloc_noreclaim_save(); 2637 2638 nid = page_to_nid(lru_to_page(page_list)); 2639 do { 2640 page = lru_to_page(page_list); 2641 2642 if (nid == page_to_nid(page)) { 2643 ClearPageActive(page); 2644 list_move(&page->lru, &node_page_list); 2645 continue; 2646 } 2647 2648 nr_reclaimed += reclaim_page_list(&node_page_list, NODE_DATA(nid)); 2649 nid = page_to_nid(lru_to_page(page_list)); 2650 } while (!list_empty(page_list)); 2651 2652 nr_reclaimed += reclaim_page_list(&node_page_list, NODE_DATA(nid)); 2653 2654 memalloc_noreclaim_restore(noreclaim_flag); 2655 2656 return nr_reclaimed; 2657 } 2658 2659 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2660 struct lruvec *lruvec, struct scan_control *sc) 2661 { 2662 if (is_active_lru(lru)) { 2663 if (sc->may_deactivate & (1 << is_file_lru(lru))) 2664 shrink_active_list(nr_to_scan, lruvec, sc, lru); 2665 else 2666 sc->skipped_deactivate = 1; 2667 return 0; 2668 } 2669 2670 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2671 } 2672 2673 /* 2674 * The inactive anon list should be small enough that the VM never has 2675 * to do too much work. 2676 * 2677 * The inactive file list should be small enough to leave most memory 2678 * to the established workingset on the scan-resistant active list, 2679 * but large enough to avoid thrashing the aggregate readahead window. 2680 * 2681 * Both inactive lists should also be large enough that each inactive 2682 * page has a chance to be referenced again before it is reclaimed. 2683 * 2684 * If that fails and refaulting is observed, the inactive list grows. 2685 * 2686 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages 2687 * on this LRU, maintained by the pageout code. An inactive_ratio 2688 * of 3 means 3:1 or 25% of the pages are kept on the inactive list. 2689 * 2690 * total target max 2691 * memory ratio inactive 2692 * ------------------------------------- 2693 * 10MB 1 5MB 2694 * 100MB 1 50MB 2695 * 1GB 3 250MB 2696 * 10GB 10 0.9GB 2697 * 100GB 31 3GB 2698 * 1TB 101 10GB 2699 * 10TB 320 32GB 2700 */ 2701 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) 2702 { 2703 enum lru_list active_lru = inactive_lru + LRU_ACTIVE; 2704 unsigned long inactive, active; 2705 unsigned long inactive_ratio; 2706 unsigned long gb; 2707 2708 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); 2709 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); 2710 2711 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2712 if (gb) 2713 inactive_ratio = int_sqrt(10 * gb); 2714 else 2715 inactive_ratio = 1; 2716 2717 return inactive * inactive_ratio < active; 2718 } 2719 2720 enum scan_balance { 2721 SCAN_EQUAL, 2722 SCAN_FRACT, 2723 SCAN_ANON, 2724 SCAN_FILE, 2725 }; 2726 2727 /* 2728 * Determine how aggressively the anon and file LRU lists should be 2729 * scanned. 2730 * 2731 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan 2732 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan 2733 */ 2734 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, 2735 unsigned long *nr) 2736 { 2737 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2738 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2739 unsigned long anon_cost, file_cost, total_cost; 2740 int swappiness = mem_cgroup_swappiness(memcg); 2741 u64 fraction[ANON_AND_FILE]; 2742 u64 denominator = 0; /* gcc */ 2743 enum scan_balance scan_balance; 2744 unsigned long ap, fp; 2745 enum lru_list lru; 2746 2747 /* If we have no swap space, do not bother scanning anon pages. */ 2748 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { 2749 scan_balance = SCAN_FILE; 2750 goto out; 2751 } 2752 2753 /* 2754 * Global reclaim will swap to prevent OOM even with no 2755 * swappiness, but memcg users want to use this knob to 2756 * disable swapping for individual groups completely when 2757 * using the memory controller's swap limit feature would be 2758 * too expensive. 2759 */ 2760 if (cgroup_reclaim(sc) && !swappiness) { 2761 scan_balance = SCAN_FILE; 2762 goto out; 2763 } 2764 2765 /* 2766 * Do not apply any pressure balancing cleverness when the 2767 * system is close to OOM, scan both anon and file equally 2768 * (unless the swappiness setting disagrees with swapping). 2769 */ 2770 if (!sc->priority && swappiness) { 2771 scan_balance = SCAN_EQUAL; 2772 goto out; 2773 } 2774 2775 /* 2776 * If the system is almost out of file pages, force-scan anon. 2777 */ 2778 if (sc->file_is_tiny) { 2779 scan_balance = SCAN_ANON; 2780 goto out; 2781 } 2782 2783 /* 2784 * If there is enough inactive page cache, we do not reclaim 2785 * anything from the anonymous working right now. 2786 */ 2787 if (sc->cache_trim_mode) { 2788 scan_balance = SCAN_FILE; 2789 goto out; 2790 } 2791 2792 scan_balance = SCAN_FRACT; 2793 /* 2794 * Calculate the pressure balance between anon and file pages. 2795 * 2796 * The amount of pressure we put on each LRU is inversely 2797 * proportional to the cost of reclaiming each list, as 2798 * determined by the share of pages that are refaulting, times 2799 * the relative IO cost of bringing back a swapped out 2800 * anonymous page vs reloading a filesystem page (swappiness). 2801 * 2802 * Although we limit that influence to ensure no list gets 2803 * left behind completely: at least a third of the pressure is 2804 * applied, before swappiness. 2805 * 2806 * With swappiness at 100, anon and file have equal IO cost. 2807 */ 2808 total_cost = sc->anon_cost + sc->file_cost; 2809 anon_cost = total_cost + sc->anon_cost; 2810 file_cost = total_cost + sc->file_cost; 2811 total_cost = anon_cost + file_cost; 2812 2813 ap = swappiness * (total_cost + 1); 2814 ap /= anon_cost + 1; 2815 2816 fp = (200 - swappiness) * (total_cost + 1); 2817 fp /= file_cost + 1; 2818 2819 fraction[0] = ap; 2820 fraction[1] = fp; 2821 denominator = ap + fp; 2822 out: 2823 for_each_evictable_lru(lru) { 2824 int file = is_file_lru(lru); 2825 unsigned long lruvec_size; 2826 unsigned long low, min; 2827 unsigned long scan; 2828 2829 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2830 mem_cgroup_protection(sc->target_mem_cgroup, memcg, 2831 &min, &low); 2832 2833 if (min || low) { 2834 /* 2835 * Scale a cgroup's reclaim pressure by proportioning 2836 * its current usage to its memory.low or memory.min 2837 * setting. 2838 * 2839 * This is important, as otherwise scanning aggression 2840 * becomes extremely binary -- from nothing as we 2841 * approach the memory protection threshold, to totally 2842 * nominal as we exceed it. This results in requiring 2843 * setting extremely liberal protection thresholds. It 2844 * also means we simply get no protection at all if we 2845 * set it too low, which is not ideal. 2846 * 2847 * If there is any protection in place, we reduce scan 2848 * pressure by how much of the total memory used is 2849 * within protection thresholds. 2850 * 2851 * There is one special case: in the first reclaim pass, 2852 * we skip over all groups that are within their low 2853 * protection. If that fails to reclaim enough pages to 2854 * satisfy the reclaim goal, we come back and override 2855 * the best-effort low protection. However, we still 2856 * ideally want to honor how well-behaved groups are in 2857 * that case instead of simply punishing them all 2858 * equally. As such, we reclaim them based on how much 2859 * memory they are using, reducing the scan pressure 2860 * again by how much of the total memory used is under 2861 * hard protection. 2862 */ 2863 unsigned long cgroup_size = mem_cgroup_size(memcg); 2864 unsigned long protection; 2865 2866 /* memory.low scaling, make sure we retry before OOM */ 2867 if (!sc->memcg_low_reclaim && low > min) { 2868 protection = low; 2869 sc->memcg_low_skipped = 1; 2870 } else { 2871 protection = min; 2872 } 2873 2874 /* Avoid TOCTOU with earlier protection check */ 2875 cgroup_size = max(cgroup_size, protection); 2876 2877 scan = lruvec_size - lruvec_size * protection / 2878 (cgroup_size + 1); 2879 2880 /* 2881 * Minimally target SWAP_CLUSTER_MAX pages to keep 2882 * reclaim moving forwards, avoiding decrementing 2883 * sc->priority further than desirable. 2884 */ 2885 scan = max(scan, SWAP_CLUSTER_MAX); 2886 } else { 2887 scan = lruvec_size; 2888 } 2889 2890 scan >>= sc->priority; 2891 2892 /* 2893 * If the cgroup's already been deleted, make sure to 2894 * scrape out the remaining cache. 2895 */ 2896 if (!scan && !mem_cgroup_online(memcg)) 2897 scan = min(lruvec_size, SWAP_CLUSTER_MAX); 2898 2899 switch (scan_balance) { 2900 case SCAN_EQUAL: 2901 /* Scan lists relative to size */ 2902 break; 2903 case SCAN_FRACT: 2904 /* 2905 * Scan types proportional to swappiness and 2906 * their relative recent reclaim efficiency. 2907 * Make sure we don't miss the last page on 2908 * the offlined memory cgroups because of a 2909 * round-off error. 2910 */ 2911 scan = mem_cgroup_online(memcg) ? 2912 div64_u64(scan * fraction[file], denominator) : 2913 DIV64_U64_ROUND_UP(scan * fraction[file], 2914 denominator); 2915 break; 2916 case SCAN_FILE: 2917 case SCAN_ANON: 2918 /* Scan one type exclusively */ 2919 if ((scan_balance == SCAN_FILE) != file) 2920 scan = 0; 2921 break; 2922 default: 2923 /* Look ma, no brain */ 2924 BUG(); 2925 } 2926 2927 nr[lru] = scan; 2928 } 2929 } 2930 2931 /* 2932 * Anonymous LRU management is a waste if there is 2933 * ultimately no way to reclaim the memory. 2934 */ 2935 static bool can_age_anon_pages(struct pglist_data *pgdat, 2936 struct scan_control *sc) 2937 { 2938 /* Aging the anon LRU is valuable if swap is present: */ 2939 if (total_swap_pages > 0) 2940 return true; 2941 2942 /* Also valuable if anon pages can be demoted: */ 2943 return can_demote(pgdat->node_id, sc); 2944 } 2945 2946 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 2947 { 2948 unsigned long nr[NR_LRU_LISTS]; 2949 unsigned long targets[NR_LRU_LISTS]; 2950 unsigned long nr_to_scan; 2951 enum lru_list lru; 2952 unsigned long nr_reclaimed = 0; 2953 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 2954 struct blk_plug plug; 2955 bool scan_adjusted; 2956 2957 get_scan_count(lruvec, sc, nr); 2958 2959 /* Record the original scan target for proportional adjustments later */ 2960 memcpy(targets, nr, sizeof(nr)); 2961 2962 /* 2963 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 2964 * event that can occur when there is little memory pressure e.g. 2965 * multiple streaming readers/writers. Hence, we do not abort scanning 2966 * when the requested number of pages are reclaimed when scanning at 2967 * DEF_PRIORITY on the assumption that the fact we are direct 2968 * reclaiming implies that kswapd is not keeping up and it is best to 2969 * do a batch of work at once. For memcg reclaim one check is made to 2970 * abort proportional reclaim if either the file or anon lru has already 2971 * dropped to zero at the first pass. 2972 */ 2973 scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() && 2974 sc->priority == DEF_PRIORITY); 2975 2976 blk_start_plug(&plug); 2977 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2978 nr[LRU_INACTIVE_FILE]) { 2979 unsigned long nr_anon, nr_file, percentage; 2980 unsigned long nr_scanned; 2981 2982 for_each_evictable_lru(lru) { 2983 if (nr[lru]) { 2984 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 2985 nr[lru] -= nr_to_scan; 2986 2987 nr_reclaimed += shrink_list(lru, nr_to_scan, 2988 lruvec, sc); 2989 } 2990 } 2991 2992 cond_resched(); 2993 2994 if (nr_reclaimed < nr_to_reclaim || scan_adjusted) 2995 continue; 2996 2997 /* 2998 * For kswapd and memcg, reclaim at least the number of pages 2999 * requested. Ensure that the anon and file LRUs are scanned 3000 * proportionally what was requested by get_scan_count(). We 3001 * stop reclaiming one LRU and reduce the amount scanning 3002 * proportional to the original scan target. 3003 */ 3004 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 3005 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 3006 3007 /* 3008 * It's just vindictive to attack the larger once the smaller 3009 * has gone to zero. And given the way we stop scanning the 3010 * smaller below, this makes sure that we only make one nudge 3011 * towards proportionality once we've got nr_to_reclaim. 3012 */ 3013 if (!nr_file || !nr_anon) 3014 break; 3015 3016 if (nr_file > nr_anon) { 3017 unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 3018 targets[LRU_ACTIVE_ANON] + 1; 3019 lru = LRU_BASE; 3020 percentage = nr_anon * 100 / scan_target; 3021 } else { 3022 unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 3023 targets[LRU_ACTIVE_FILE] + 1; 3024 lru = LRU_FILE; 3025 percentage = nr_file * 100 / scan_target; 3026 } 3027 3028 /* Stop scanning the smaller of the LRU */ 3029 nr[lru] = 0; 3030 nr[lru + LRU_ACTIVE] = 0; 3031 3032 /* 3033 * Recalculate the other LRU scan count based on its original 3034 * scan target and the percentage scanning already complete 3035 */ 3036 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 3037 nr_scanned = targets[lru] - nr[lru]; 3038 nr[lru] = targets[lru] * (100 - percentage) / 100; 3039 nr[lru] -= min(nr[lru], nr_scanned); 3040 3041 lru += LRU_ACTIVE; 3042 nr_scanned = targets[lru] - nr[lru]; 3043 nr[lru] = targets[lru] * (100 - percentage) / 100; 3044 nr[lru] -= min(nr[lru], nr_scanned); 3045 3046 scan_adjusted = true; 3047 } 3048 blk_finish_plug(&plug); 3049 sc->nr_reclaimed += nr_reclaimed; 3050 3051 /* 3052 * Even if we did not try to evict anon pages at all, we want to 3053 * rebalance the anon lru active/inactive ratio. 3054 */ 3055 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && 3056 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 3057 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 3058 sc, LRU_ACTIVE_ANON); 3059 } 3060 3061 /* Use reclaim/compaction for costly allocs or under memory pressure */ 3062 static bool in_reclaim_compaction(struct scan_control *sc) 3063 { 3064 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && 3065 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 3066 sc->priority < DEF_PRIORITY - 2)) 3067 return true; 3068 3069 return false; 3070 } 3071 3072 /* 3073 * Reclaim/compaction is used for high-order allocation requests. It reclaims 3074 * order-0 pages before compacting the zone. should_continue_reclaim() returns 3075 * true if more pages should be reclaimed such that when the page allocator 3076 * calls try_to_compact_pages() that it will have enough free pages to succeed. 3077 * It will give up earlier than that if there is difficulty reclaiming pages. 3078 */ 3079 static inline bool should_continue_reclaim(struct pglist_data *pgdat, 3080 unsigned long nr_reclaimed, 3081 struct scan_control *sc) 3082 { 3083 unsigned long pages_for_compaction; 3084 unsigned long inactive_lru_pages; 3085 int z; 3086 3087 /* If not in reclaim/compaction mode, stop */ 3088 if (!in_reclaim_compaction(sc)) 3089 return false; 3090 3091 /* 3092 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX 3093 * number of pages that were scanned. This will return to the caller 3094 * with the risk reclaim/compaction and the resulting allocation attempt 3095 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL 3096 * allocations through requiring that the full LRU list has been scanned 3097 * first, by assuming that zero delta of sc->nr_scanned means full LRU 3098 * scan, but that approximation was wrong, and there were corner cases 3099 * where always a non-zero amount of pages were scanned. 3100 */ 3101 if (!nr_reclaimed) 3102 return false; 3103 3104 /* If compaction would go ahead or the allocation would succeed, stop */ 3105 for (z = 0; z <= sc->reclaim_idx; z++) { 3106 struct zone *zone = &pgdat->node_zones[z]; 3107 if (!managed_zone(zone)) 3108 continue; 3109 3110 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { 3111 case COMPACT_SUCCESS: 3112 case COMPACT_CONTINUE: 3113 return false; 3114 default: 3115 /* check next zone */ 3116 ; 3117 } 3118 } 3119 3120 /* 3121 * If we have not reclaimed enough pages for compaction and the 3122 * inactive lists are large enough, continue reclaiming 3123 */ 3124 pages_for_compaction = compact_gap(sc->order); 3125 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 3126 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 3127 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 3128 3129 return inactive_lru_pages > pages_for_compaction; 3130 } 3131 3132 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) 3133 { 3134 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; 3135 struct mem_cgroup *memcg; 3136 3137 memcg = mem_cgroup_iter(target_memcg, NULL, NULL); 3138 do { 3139 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 3140 unsigned long reclaimed; 3141 unsigned long scanned; 3142 3143 /* 3144 * This loop can become CPU-bound when target memcgs 3145 * aren't eligible for reclaim - either because they 3146 * don't have any reclaimable pages, or because their 3147 * memory is explicitly protected. Avoid soft lockups. 3148 */ 3149 cond_resched(); 3150 3151 mem_cgroup_calculate_protection(target_memcg, memcg); 3152 3153 if (mem_cgroup_below_min(memcg)) { 3154 /* 3155 * Hard protection. 3156 * If there is no reclaimable memory, OOM. 3157 */ 3158 continue; 3159 } else if (mem_cgroup_below_low(memcg)) { 3160 /* 3161 * Soft protection. 3162 * Respect the protection only as long as 3163 * there is an unprotected supply 3164 * of reclaimable memory from other cgroups. 3165 */ 3166 if (!sc->memcg_low_reclaim) { 3167 sc->memcg_low_skipped = 1; 3168 continue; 3169 } 3170 memcg_memory_event(memcg, MEMCG_LOW); 3171 } 3172 3173 reclaimed = sc->nr_reclaimed; 3174 scanned = sc->nr_scanned; 3175 3176 shrink_lruvec(lruvec, sc); 3177 3178 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, 3179 sc->priority); 3180 3181 /* Record the group's reclaim efficiency */ 3182 vmpressure(sc->gfp_mask, memcg, false, 3183 sc->nr_scanned - scanned, 3184 sc->nr_reclaimed - reclaimed); 3185 3186 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); 3187 } 3188 3189 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 3190 { 3191 struct reclaim_state *reclaim_state = current->reclaim_state; 3192 unsigned long nr_reclaimed, nr_scanned; 3193 struct lruvec *target_lruvec; 3194 bool reclaimable = false; 3195 unsigned long file; 3196 3197 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 3198 3199 again: 3200 /* 3201 * Flush the memory cgroup stats, so that we read accurate per-memcg 3202 * lruvec stats for heuristics. 3203 */ 3204 mem_cgroup_flush_stats(); 3205 3206 memset(&sc->nr, 0, sizeof(sc->nr)); 3207 3208 nr_reclaimed = sc->nr_reclaimed; 3209 nr_scanned = sc->nr_scanned; 3210 3211 /* 3212 * Determine the scan balance between anon and file LRUs. 3213 */ 3214 spin_lock_irq(&target_lruvec->lru_lock); 3215 sc->anon_cost = target_lruvec->anon_cost; 3216 sc->file_cost = target_lruvec->file_cost; 3217 spin_unlock_irq(&target_lruvec->lru_lock); 3218 3219 /* 3220 * Target desirable inactive:active list ratios for the anon 3221 * and file LRU lists. 3222 */ 3223 if (!sc->force_deactivate) { 3224 unsigned long refaults; 3225 3226 refaults = lruvec_page_state(target_lruvec, 3227 WORKINGSET_ACTIVATE_ANON); 3228 if (refaults != target_lruvec->refaults[0] || 3229 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) 3230 sc->may_deactivate |= DEACTIVATE_ANON; 3231 else 3232 sc->may_deactivate &= ~DEACTIVATE_ANON; 3233 3234 /* 3235 * When refaults are being observed, it means a new 3236 * workingset is being established. Deactivate to get 3237 * rid of any stale active pages quickly. 3238 */ 3239 refaults = lruvec_page_state(target_lruvec, 3240 WORKINGSET_ACTIVATE_FILE); 3241 if (refaults != target_lruvec->refaults[1] || 3242 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) 3243 sc->may_deactivate |= DEACTIVATE_FILE; 3244 else 3245 sc->may_deactivate &= ~DEACTIVATE_FILE; 3246 } else 3247 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; 3248 3249 /* 3250 * If we have plenty of inactive file pages that aren't 3251 * thrashing, try to reclaim those first before touching 3252 * anonymous pages. 3253 */ 3254 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); 3255 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) 3256 sc->cache_trim_mode = 1; 3257 else 3258 sc->cache_trim_mode = 0; 3259 3260 /* 3261 * Prevent the reclaimer from falling into the cache trap: as 3262 * cache pages start out inactive, every cache fault will tip 3263 * the scan balance towards the file LRU. And as the file LRU 3264 * shrinks, so does the window for rotation from references. 3265 * This means we have a runaway feedback loop where a tiny 3266 * thrashing file LRU becomes infinitely more attractive than 3267 * anon pages. Try to detect this based on file LRU size. 3268 */ 3269 if (!cgroup_reclaim(sc)) { 3270 unsigned long total_high_wmark = 0; 3271 unsigned long free, anon; 3272 int z; 3273 3274 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 3275 file = node_page_state(pgdat, NR_ACTIVE_FILE) + 3276 node_page_state(pgdat, NR_INACTIVE_FILE); 3277 3278 for (z = 0; z < MAX_NR_ZONES; z++) { 3279 struct zone *zone = &pgdat->node_zones[z]; 3280 if (!managed_zone(zone)) 3281 continue; 3282 3283 total_high_wmark += high_wmark_pages(zone); 3284 } 3285 3286 /* 3287 * Consider anon: if that's low too, this isn't a 3288 * runaway file reclaim problem, but rather just 3289 * extreme pressure. Reclaim as per usual then. 3290 */ 3291 anon = node_page_state(pgdat, NR_INACTIVE_ANON); 3292 3293 sc->file_is_tiny = 3294 file + free <= total_high_wmark && 3295 !(sc->may_deactivate & DEACTIVATE_ANON) && 3296 anon >> sc->priority; 3297 } 3298 3299 shrink_node_memcgs(pgdat, sc); 3300 3301 if (reclaim_state) { 3302 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 3303 reclaim_state->reclaimed_slab = 0; 3304 } 3305 3306 /* Record the subtree's reclaim efficiency */ 3307 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 3308 sc->nr_scanned - nr_scanned, 3309 sc->nr_reclaimed - nr_reclaimed); 3310 3311 if (sc->nr_reclaimed - nr_reclaimed) 3312 reclaimable = true; 3313 3314 if (current_is_kswapd()) { 3315 /* 3316 * If reclaim is isolating dirty pages under writeback, 3317 * it implies that the long-lived page allocation rate 3318 * is exceeding the page laundering rate. Either the 3319 * global limits are not being effective at throttling 3320 * processes due to the page distribution throughout 3321 * zones or there is heavy usage of a slow backing 3322 * device. The only option is to throttle from reclaim 3323 * context which is not ideal as there is no guarantee 3324 * the dirtying process is throttled in the same way 3325 * balance_dirty_pages() manages. 3326 * 3327 * Once a node is flagged PGDAT_WRITEBACK, kswapd will 3328 * count the number of pages under pages flagged for 3329 * immediate reclaim and stall if any are encountered 3330 * in the nr_immediate check below. 3331 */ 3332 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 3333 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 3334 3335 /* Allow kswapd to start writing pages during reclaim.*/ 3336 if (sc->nr.unqueued_dirty == sc->nr.file_taken) 3337 set_bit(PGDAT_DIRTY, &pgdat->flags); 3338 3339 /* 3340 * If kswapd scans pages marked for immediate 3341 * reclaim and under writeback (nr_immediate), it 3342 * implies that pages are cycling through the LRU 3343 * faster than they are written so forcibly stall 3344 * until some pages complete writeback. 3345 */ 3346 if (sc->nr.immediate) 3347 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 3348 } 3349 3350 /* 3351 * Tag a node/memcg as congested if all the dirty pages were marked 3352 * for writeback and immediate reclaim (counted in nr.congested). 3353 * 3354 * Legacy memcg will stall in page writeback so avoid forcibly 3355 * stalling in reclaim_throttle(). 3356 */ 3357 if ((current_is_kswapd() || 3358 (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) && 3359 sc->nr.dirty && sc->nr.dirty == sc->nr.congested) 3360 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags); 3361 3362 /* 3363 * Stall direct reclaim for IO completions if the lruvec is 3364 * node is congested. Allow kswapd to continue until it 3365 * starts encountering unqueued dirty pages or cycling through 3366 * the LRU too quickly. 3367 */ 3368 if (!current_is_kswapd() && current_may_throttle() && 3369 !sc->hibernation_mode && 3370 test_bit(LRUVEC_CONGESTED, &target_lruvec->flags)) 3371 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); 3372 3373 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, 3374 sc)) 3375 goto again; 3376 3377 /* 3378 * Kswapd gives up on balancing particular nodes after too 3379 * many failures to reclaim anything from them and goes to 3380 * sleep. On reclaim progress, reset the failure counter. A 3381 * successful direct reclaim run will revive a dormant kswapd. 3382 */ 3383 if (reclaimable) 3384 pgdat->kswapd_failures = 0; 3385 } 3386 3387 /* 3388 * Returns true if compaction should go ahead for a costly-order request, or 3389 * the allocation would already succeed without compaction. Return false if we 3390 * should reclaim first. 3391 */ 3392 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 3393 { 3394 unsigned long watermark; 3395 enum compact_result suitable; 3396 3397 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); 3398 if (suitable == COMPACT_SUCCESS) 3399 /* Allocation should succeed already. Don't reclaim. */ 3400 return true; 3401 if (suitable == COMPACT_SKIPPED) 3402 /* Compaction cannot yet proceed. Do reclaim. */ 3403 return false; 3404 3405 /* 3406 * Compaction is already possible, but it takes time to run and there 3407 * are potentially other callers using the pages just freed. So proceed 3408 * with reclaim to make a buffer of free pages available to give 3409 * compaction a reasonable chance of completing and allocating the page. 3410 * Note that we won't actually reclaim the whole buffer in one attempt 3411 * as the target watermark in should_continue_reclaim() is lower. But if 3412 * we are already above the high+gap watermark, don't reclaim at all. 3413 */ 3414 watermark = high_wmark_pages(zone) + compact_gap(sc->order); 3415 3416 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 3417 } 3418 3419 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) 3420 { 3421 /* 3422 * If reclaim is making progress greater than 12% efficiency then 3423 * wake all the NOPROGRESS throttled tasks. 3424 */ 3425 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { 3426 wait_queue_head_t *wqh; 3427 3428 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; 3429 if (waitqueue_active(wqh)) 3430 wake_up(wqh); 3431 3432 return; 3433 } 3434 3435 /* 3436 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will 3437 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages 3438 * under writeback and marked for immediate reclaim at the tail of the 3439 * LRU. 3440 */ 3441 if (current_is_kswapd() || cgroup_reclaim(sc)) 3442 return; 3443 3444 /* Throttle if making no progress at high prioities. */ 3445 if (sc->priority == 1 && !sc->nr_reclaimed) 3446 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); 3447 } 3448 3449 /* 3450 * This is the direct reclaim path, for page-allocating processes. We only 3451 * try to reclaim pages from zones which will satisfy the caller's allocation 3452 * request. 3453 * 3454 * If a zone is deemed to be full of pinned pages then just give it a light 3455 * scan then give up on it. 3456 */ 3457 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 3458 { 3459 struct zoneref *z; 3460 struct zone *zone; 3461 unsigned long nr_soft_reclaimed; 3462 unsigned long nr_soft_scanned; 3463 gfp_t orig_mask; 3464 pg_data_t *last_pgdat = NULL; 3465 pg_data_t *first_pgdat = NULL; 3466 3467 /* 3468 * If the number of buffer_heads in the machine exceeds the maximum 3469 * allowed level, force direct reclaim to scan the highmem zone as 3470 * highmem pages could be pinning lowmem pages storing buffer_heads 3471 */ 3472 orig_mask = sc->gfp_mask; 3473 if (buffer_heads_over_limit) { 3474 sc->gfp_mask |= __GFP_HIGHMEM; 3475 sc->reclaim_idx = gfp_zone(sc->gfp_mask); 3476 } 3477 3478 for_each_zone_zonelist_nodemask(zone, z, zonelist, 3479 sc->reclaim_idx, sc->nodemask) { 3480 /* 3481 * Take care memory controller reclaiming has small influence 3482 * to global LRU. 3483 */ 3484 if (!cgroup_reclaim(sc)) { 3485 if (!cpuset_zone_allowed(zone, 3486 GFP_KERNEL | __GFP_HARDWALL)) 3487 continue; 3488 3489 /* 3490 * If we already have plenty of memory free for 3491 * compaction in this zone, don't free any more. 3492 * Even though compaction is invoked for any 3493 * non-zero order, only frequent costly order 3494 * reclamation is disruptive enough to become a 3495 * noticeable problem, like transparent huge 3496 * page allocations. 3497 */ 3498 if (IS_ENABLED(CONFIG_COMPACTION) && 3499 sc->order > PAGE_ALLOC_COSTLY_ORDER && 3500 compaction_ready(zone, sc)) { 3501 sc->compaction_ready = true; 3502 continue; 3503 } 3504 3505 /* 3506 * Shrink each node in the zonelist once. If the 3507 * zonelist is ordered by zone (not the default) then a 3508 * node may be shrunk multiple times but in that case 3509 * the user prefers lower zones being preserved. 3510 */ 3511 if (zone->zone_pgdat == last_pgdat) 3512 continue; 3513 3514 /* 3515 * This steals pages from memory cgroups over softlimit 3516 * and returns the number of reclaimed pages and 3517 * scanned pages. This works for global memory pressure 3518 * and balancing, not for a memcg's limit. 3519 */ 3520 nr_soft_scanned = 0; 3521 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, 3522 sc->order, sc->gfp_mask, 3523 &nr_soft_scanned); 3524 sc->nr_reclaimed += nr_soft_reclaimed; 3525 sc->nr_scanned += nr_soft_scanned; 3526 /* need some check for avoid more shrink_zone() */ 3527 } 3528 3529 if (!first_pgdat) 3530 first_pgdat = zone->zone_pgdat; 3531 3532 /* See comment about same check for global reclaim above */ 3533 if (zone->zone_pgdat == last_pgdat) 3534 continue; 3535 last_pgdat = zone->zone_pgdat; 3536 shrink_node(zone->zone_pgdat, sc); 3537 } 3538 3539 if (first_pgdat) 3540 consider_reclaim_throttle(first_pgdat, sc); 3541 3542 /* 3543 * Restore to original mask to avoid the impact on the caller if we 3544 * promoted it to __GFP_HIGHMEM. 3545 */ 3546 sc->gfp_mask = orig_mask; 3547 } 3548 3549 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) 3550 { 3551 struct lruvec *target_lruvec; 3552 unsigned long refaults; 3553 3554 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 3555 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); 3556 target_lruvec->refaults[0] = refaults; 3557 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); 3558 target_lruvec->refaults[1] = refaults; 3559 } 3560 3561 /* 3562 * This is the main entry point to direct page reclaim. 3563 * 3564 * If a full scan of the inactive list fails to free enough memory then we 3565 * are "out of memory" and something needs to be killed. 3566 * 3567 * If the caller is !__GFP_FS then the probability of a failure is reasonably 3568 * high - the zone may be full of dirty or under-writeback pages, which this 3569 * caller can't do much about. We kick the writeback threads and take explicit 3570 * naps in the hope that some of these pages can be written. But if the 3571 * allocating task holds filesystem locks which prevent writeout this might not 3572 * work, and the allocation attempt will fail. 3573 * 3574 * returns: 0, if no pages reclaimed 3575 * else, the number of pages reclaimed 3576 */ 3577 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 3578 struct scan_control *sc) 3579 { 3580 int initial_priority = sc->priority; 3581 pg_data_t *last_pgdat; 3582 struct zoneref *z; 3583 struct zone *zone; 3584 retry: 3585 delayacct_freepages_start(); 3586 3587 if (!cgroup_reclaim(sc)) 3588 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 3589 3590 do { 3591 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 3592 sc->priority); 3593 sc->nr_scanned = 0; 3594 shrink_zones(zonelist, sc); 3595 3596 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 3597 break; 3598 3599 if (sc->compaction_ready) 3600 break; 3601 3602 /* 3603 * If we're getting trouble reclaiming, start doing 3604 * writepage even in laptop mode. 3605 */ 3606 if (sc->priority < DEF_PRIORITY - 2) 3607 sc->may_writepage = 1; 3608 } while (--sc->priority >= 0); 3609 3610 last_pgdat = NULL; 3611 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 3612 sc->nodemask) { 3613 if (zone->zone_pgdat == last_pgdat) 3614 continue; 3615 last_pgdat = zone->zone_pgdat; 3616 3617 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 3618 3619 if (cgroup_reclaim(sc)) { 3620 struct lruvec *lruvec; 3621 3622 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, 3623 zone->zone_pgdat); 3624 clear_bit(LRUVEC_CONGESTED, &lruvec->flags); 3625 } 3626 } 3627 3628 delayacct_freepages_end(); 3629 3630 if (sc->nr_reclaimed) 3631 return sc->nr_reclaimed; 3632 3633 /* Aborted reclaim to try compaction? don't OOM, then */ 3634 if (sc->compaction_ready) 3635 return 1; 3636 3637 /* 3638 * We make inactive:active ratio decisions based on the node's 3639 * composition of memory, but a restrictive reclaim_idx or a 3640 * memory.low cgroup setting can exempt large amounts of 3641 * memory from reclaim. Neither of which are very common, so 3642 * instead of doing costly eligibility calculations of the 3643 * entire cgroup subtree up front, we assume the estimates are 3644 * good, and retry with forcible deactivation if that fails. 3645 */ 3646 if (sc->skipped_deactivate) { 3647 sc->priority = initial_priority; 3648 sc->force_deactivate = 1; 3649 sc->skipped_deactivate = 0; 3650 goto retry; 3651 } 3652 3653 /* Untapped cgroup reserves? Don't OOM, retry. */ 3654 if (sc->memcg_low_skipped) { 3655 sc->priority = initial_priority; 3656 sc->force_deactivate = 0; 3657 sc->memcg_low_reclaim = 1; 3658 sc->memcg_low_skipped = 0; 3659 goto retry; 3660 } 3661 3662 return 0; 3663 } 3664 3665 static bool allow_direct_reclaim(pg_data_t *pgdat) 3666 { 3667 struct zone *zone; 3668 unsigned long pfmemalloc_reserve = 0; 3669 unsigned long free_pages = 0; 3670 int i; 3671 bool wmark_ok; 3672 3673 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 3674 return true; 3675 3676 for (i = 0; i <= ZONE_NORMAL; i++) { 3677 zone = &pgdat->node_zones[i]; 3678 if (!managed_zone(zone)) 3679 continue; 3680 3681 if (!zone_reclaimable_pages(zone)) 3682 continue; 3683 3684 pfmemalloc_reserve += min_wmark_pages(zone); 3685 free_pages += zone_page_state(zone, NR_FREE_PAGES); 3686 } 3687 3688 /* If there are no reserves (unexpected config) then do not throttle */ 3689 if (!pfmemalloc_reserve) 3690 return true; 3691 3692 wmark_ok = free_pages > pfmemalloc_reserve / 2; 3693 3694 /* kswapd must be awake if processes are being throttled */ 3695 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 3696 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) 3697 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); 3698 3699 wake_up_interruptible(&pgdat->kswapd_wait); 3700 } 3701 3702 return wmark_ok; 3703 } 3704 3705 /* 3706 * Throttle direct reclaimers if backing storage is backed by the network 3707 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 3708 * depleted. kswapd will continue to make progress and wake the processes 3709 * when the low watermark is reached. 3710 * 3711 * Returns true if a fatal signal was delivered during throttling. If this 3712 * happens, the page allocator should not consider triggering the OOM killer. 3713 */ 3714 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 3715 nodemask_t *nodemask) 3716 { 3717 struct zoneref *z; 3718 struct zone *zone; 3719 pg_data_t *pgdat = NULL; 3720 3721 /* 3722 * Kernel threads should not be throttled as they may be indirectly 3723 * responsible for cleaning pages necessary for reclaim to make forward 3724 * progress. kjournald for example may enter direct reclaim while 3725 * committing a transaction where throttling it could forcing other 3726 * processes to block on log_wait_commit(). 3727 */ 3728 if (current->flags & PF_KTHREAD) 3729 goto out; 3730 3731 /* 3732 * If a fatal signal is pending, this process should not throttle. 3733 * It should return quickly so it can exit and free its memory 3734 */ 3735 if (fatal_signal_pending(current)) 3736 goto out; 3737 3738 /* 3739 * Check if the pfmemalloc reserves are ok by finding the first node 3740 * with a usable ZONE_NORMAL or lower zone. The expectation is that 3741 * GFP_KERNEL will be required for allocating network buffers when 3742 * swapping over the network so ZONE_HIGHMEM is unusable. 3743 * 3744 * Throttling is based on the first usable node and throttled processes 3745 * wait on a queue until kswapd makes progress and wakes them. There 3746 * is an affinity then between processes waking up and where reclaim 3747 * progress has been made assuming the process wakes on the same node. 3748 * More importantly, processes running on remote nodes will not compete 3749 * for remote pfmemalloc reserves and processes on different nodes 3750 * should make reasonable progress. 3751 */ 3752 for_each_zone_zonelist_nodemask(zone, z, zonelist, 3753 gfp_zone(gfp_mask), nodemask) { 3754 if (zone_idx(zone) > ZONE_NORMAL) 3755 continue; 3756 3757 /* Throttle based on the first usable node */ 3758 pgdat = zone->zone_pgdat; 3759 if (allow_direct_reclaim(pgdat)) 3760 goto out; 3761 break; 3762 } 3763 3764 /* If no zone was usable by the allocation flags then do not throttle */ 3765 if (!pgdat) 3766 goto out; 3767 3768 /* Account for the throttling */ 3769 count_vm_event(PGSCAN_DIRECT_THROTTLE); 3770 3771 /* 3772 * If the caller cannot enter the filesystem, it's possible that it 3773 * is due to the caller holding an FS lock or performing a journal 3774 * transaction in the case of a filesystem like ext[3|4]. In this case, 3775 * it is not safe to block on pfmemalloc_wait as kswapd could be 3776 * blocked waiting on the same lock. Instead, throttle for up to a 3777 * second before continuing. 3778 */ 3779 if (!(gfp_mask & __GFP_FS)) 3780 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 3781 allow_direct_reclaim(pgdat), HZ); 3782 else 3783 /* Throttle until kswapd wakes the process */ 3784 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 3785 allow_direct_reclaim(pgdat)); 3786 3787 if (fatal_signal_pending(current)) 3788 return true; 3789 3790 out: 3791 return false; 3792 } 3793 3794 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 3795 gfp_t gfp_mask, nodemask_t *nodemask) 3796 { 3797 unsigned long nr_reclaimed; 3798 struct scan_control sc = { 3799 .nr_to_reclaim = SWAP_CLUSTER_MAX, 3800 .gfp_mask = current_gfp_context(gfp_mask), 3801 .reclaim_idx = gfp_zone(gfp_mask), 3802 .order = order, 3803 .nodemask = nodemask, 3804 .priority = DEF_PRIORITY, 3805 .may_writepage = !laptop_mode, 3806 .may_unmap = 1, 3807 .may_swap = 1, 3808 }; 3809 3810 /* 3811 * scan_control uses s8 fields for order, priority, and reclaim_idx. 3812 * Confirm they are large enough for max values. 3813 */ 3814 BUILD_BUG_ON(MAX_ORDER > S8_MAX); 3815 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 3816 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 3817 3818 /* 3819 * Do not enter reclaim if fatal signal was delivered while throttled. 3820 * 1 is returned so that the page allocator does not OOM kill at this 3821 * point. 3822 */ 3823 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 3824 return 1; 3825 3826 set_task_reclaim_state(current, &sc.reclaim_state); 3827 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); 3828 3829 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3830 3831 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 3832 set_task_reclaim_state(current, NULL); 3833 3834 return nr_reclaimed; 3835 } 3836 3837 #ifdef CONFIG_MEMCG 3838 3839 /* Only used by soft limit reclaim. Do not reuse for anything else. */ 3840 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 3841 gfp_t gfp_mask, bool noswap, 3842 pg_data_t *pgdat, 3843 unsigned long *nr_scanned) 3844 { 3845 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 3846 struct scan_control sc = { 3847 .nr_to_reclaim = SWAP_CLUSTER_MAX, 3848 .target_mem_cgroup = memcg, 3849 .may_writepage = !laptop_mode, 3850 .may_unmap = 1, 3851 .reclaim_idx = MAX_NR_ZONES - 1, 3852 .may_swap = !noswap, 3853 }; 3854 3855 WARN_ON_ONCE(!current->reclaim_state); 3856 3857 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 3858 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3859 3860 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 3861 sc.gfp_mask); 3862 3863 /* 3864 * NOTE: Although we can get the priority field, using it 3865 * here is not a good idea, since it limits the pages we can scan. 3866 * if we don't reclaim here, the shrink_node from balance_pgdat 3867 * will pick up pages from other mem cgroup's as well. We hack 3868 * the priority and make it zero. 3869 */ 3870 shrink_lruvec(lruvec, &sc); 3871 3872 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3873 3874 *nr_scanned = sc.nr_scanned; 3875 3876 return sc.nr_reclaimed; 3877 } 3878 3879 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 3880 unsigned long nr_pages, 3881 gfp_t gfp_mask, 3882 bool may_swap) 3883 { 3884 unsigned long nr_reclaimed; 3885 unsigned int noreclaim_flag; 3886 struct scan_control sc = { 3887 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 3888 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 3889 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 3890 .reclaim_idx = MAX_NR_ZONES - 1, 3891 .target_mem_cgroup = memcg, 3892 .priority = DEF_PRIORITY, 3893 .may_writepage = !laptop_mode, 3894 .may_unmap = 1, 3895 .may_swap = may_swap, 3896 }; 3897 /* 3898 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put 3899 * equal pressure on all the nodes. This is based on the assumption that 3900 * the reclaim does not bail out early. 3901 */ 3902 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3903 3904 set_task_reclaim_state(current, &sc.reclaim_state); 3905 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); 3906 noreclaim_flag = memalloc_noreclaim_save(); 3907 3908 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3909 3910 memalloc_noreclaim_restore(noreclaim_flag); 3911 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 3912 set_task_reclaim_state(current, NULL); 3913 3914 return nr_reclaimed; 3915 } 3916 #endif 3917 3918 static void age_active_anon(struct pglist_data *pgdat, 3919 struct scan_control *sc) 3920 { 3921 struct mem_cgroup *memcg; 3922 struct lruvec *lruvec; 3923 3924 if (!can_age_anon_pages(pgdat, sc)) 3925 return; 3926 3927 lruvec = mem_cgroup_lruvec(NULL, pgdat); 3928 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 3929 return; 3930 3931 memcg = mem_cgroup_iter(NULL, NULL, NULL); 3932 do { 3933 lruvec = mem_cgroup_lruvec(memcg, pgdat); 3934 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 3935 sc, LRU_ACTIVE_ANON); 3936 memcg = mem_cgroup_iter(NULL, memcg, NULL); 3937 } while (memcg); 3938 } 3939 3940 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) 3941 { 3942 int i; 3943 struct zone *zone; 3944 3945 /* 3946 * Check for watermark boosts top-down as the higher zones 3947 * are more likely to be boosted. Both watermarks and boosts 3948 * should not be checked at the same time as reclaim would 3949 * start prematurely when there is no boosting and a lower 3950 * zone is balanced. 3951 */ 3952 for (i = highest_zoneidx; i >= 0; i--) { 3953 zone = pgdat->node_zones + i; 3954 if (!managed_zone(zone)) 3955 continue; 3956 3957 if (zone->watermark_boost) 3958 return true; 3959 } 3960 3961 return false; 3962 } 3963 3964 /* 3965 * Returns true if there is an eligible zone balanced for the request order 3966 * and highest_zoneidx 3967 */ 3968 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) 3969 { 3970 int i; 3971 unsigned long mark = -1; 3972 struct zone *zone; 3973 3974 /* 3975 * Check watermarks bottom-up as lower zones are more likely to 3976 * meet watermarks. 3977 */ 3978 for (i = 0; i <= highest_zoneidx; i++) { 3979 zone = pgdat->node_zones + i; 3980 3981 if (!managed_zone(zone)) 3982 continue; 3983 3984 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) 3985 mark = wmark_pages(zone, WMARK_PROMO); 3986 else 3987 mark = high_wmark_pages(zone); 3988 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) 3989 return true; 3990 } 3991 3992 /* 3993 * If a node has no managed zone within highest_zoneidx, it does not 3994 * need balancing by definition. This can happen if a zone-restricted 3995 * allocation tries to wake a remote kswapd. 3996 */ 3997 if (mark == -1) 3998 return true; 3999 4000 return false; 4001 } 4002 4003 /* Clear pgdat state for congested, dirty or under writeback. */ 4004 static void clear_pgdat_congested(pg_data_t *pgdat) 4005 { 4006 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); 4007 4008 clear_bit(LRUVEC_CONGESTED, &lruvec->flags); 4009 clear_bit(PGDAT_DIRTY, &pgdat->flags); 4010 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 4011 } 4012 4013 /* 4014 * Prepare kswapd for sleeping. This verifies that there are no processes 4015 * waiting in throttle_direct_reclaim() and that watermarks have been met. 4016 * 4017 * Returns true if kswapd is ready to sleep 4018 */ 4019 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, 4020 int highest_zoneidx) 4021 { 4022 /* 4023 * The throttled processes are normally woken up in balance_pgdat() as 4024 * soon as allow_direct_reclaim() is true. But there is a potential 4025 * race between when kswapd checks the watermarks and a process gets 4026 * throttled. There is also a potential race if processes get 4027 * throttled, kswapd wakes, a large process exits thereby balancing the 4028 * zones, which causes kswapd to exit balance_pgdat() before reaching 4029 * the wake up checks. If kswapd is going to sleep, no process should 4030 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 4031 * the wake up is premature, processes will wake kswapd and get 4032 * throttled again. The difference from wake ups in balance_pgdat() is 4033 * that here we are under prepare_to_wait(). 4034 */ 4035 if (waitqueue_active(&pgdat->pfmemalloc_wait)) 4036 wake_up_all(&pgdat->pfmemalloc_wait); 4037 4038 /* Hopeless node, leave it to direct reclaim */ 4039 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 4040 return true; 4041 4042 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { 4043 clear_pgdat_congested(pgdat); 4044 return true; 4045 } 4046 4047 return false; 4048 } 4049 4050 /* 4051 * kswapd shrinks a node of pages that are at or below the highest usable 4052 * zone that is currently unbalanced. 4053 * 4054 * Returns true if kswapd scanned at least the requested number of pages to 4055 * reclaim or if the lack of progress was due to pages under writeback. 4056 * This is used to determine if the scanning priority needs to be raised. 4057 */ 4058 static bool kswapd_shrink_node(pg_data_t *pgdat, 4059 struct scan_control *sc) 4060 { 4061 struct zone *zone; 4062 int z; 4063 4064 /* Reclaim a number of pages proportional to the number of zones */ 4065 sc->nr_to_reclaim = 0; 4066 for (z = 0; z <= sc->reclaim_idx; z++) { 4067 zone = pgdat->node_zones + z; 4068 if (!managed_zone(zone)) 4069 continue; 4070 4071 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 4072 } 4073 4074 /* 4075 * Historically care was taken to put equal pressure on all zones but 4076 * now pressure is applied based on node LRU order. 4077 */ 4078 shrink_node(pgdat, sc); 4079 4080 /* 4081 * Fragmentation may mean that the system cannot be rebalanced for 4082 * high-order allocations. If twice the allocation size has been 4083 * reclaimed then recheck watermarks only at order-0 to prevent 4084 * excessive reclaim. Assume that a process requested a high-order 4085 * can direct reclaim/compact. 4086 */ 4087 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 4088 sc->order = 0; 4089 4090 return sc->nr_scanned >= sc->nr_to_reclaim; 4091 } 4092 4093 /* Page allocator PCP high watermark is lowered if reclaim is active. */ 4094 static inline void 4095 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) 4096 { 4097 int i; 4098 struct zone *zone; 4099 4100 for (i = 0; i <= highest_zoneidx; i++) { 4101 zone = pgdat->node_zones + i; 4102 4103 if (!managed_zone(zone)) 4104 continue; 4105 4106 if (active) 4107 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 4108 else 4109 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 4110 } 4111 } 4112 4113 static inline void 4114 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 4115 { 4116 update_reclaim_active(pgdat, highest_zoneidx, true); 4117 } 4118 4119 static inline void 4120 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 4121 { 4122 update_reclaim_active(pgdat, highest_zoneidx, false); 4123 } 4124 4125 /* 4126 * For kswapd, balance_pgdat() will reclaim pages across a node from zones 4127 * that are eligible for use by the caller until at least one zone is 4128 * balanced. 4129 * 4130 * Returns the order kswapd finished reclaiming at. 4131 * 4132 * kswapd scans the zones in the highmem->normal->dma direction. It skips 4133 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 4134 * found to have free_pages <= high_wmark_pages(zone), any page in that zone 4135 * or lower is eligible for reclaim until at least one usable zone is 4136 * balanced. 4137 */ 4138 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) 4139 { 4140 int i; 4141 unsigned long nr_soft_reclaimed; 4142 unsigned long nr_soft_scanned; 4143 unsigned long pflags; 4144 unsigned long nr_boost_reclaim; 4145 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; 4146 bool boosted; 4147 struct zone *zone; 4148 struct scan_control sc = { 4149 .gfp_mask = GFP_KERNEL, 4150 .order = order, 4151 .may_unmap = 1, 4152 }; 4153 4154 set_task_reclaim_state(current, &sc.reclaim_state); 4155 psi_memstall_enter(&pflags); 4156 __fs_reclaim_acquire(_THIS_IP_); 4157 4158 count_vm_event(PAGEOUTRUN); 4159 4160 /* 4161 * Account for the reclaim boost. Note that the zone boost is left in 4162 * place so that parallel allocations that are near the watermark will 4163 * stall or direct reclaim until kswapd is finished. 4164 */ 4165 nr_boost_reclaim = 0; 4166 for (i = 0; i <= highest_zoneidx; i++) { 4167 zone = pgdat->node_zones + i; 4168 if (!managed_zone(zone)) 4169 continue; 4170 4171 nr_boost_reclaim += zone->watermark_boost; 4172 zone_boosts[i] = zone->watermark_boost; 4173 } 4174 boosted = nr_boost_reclaim; 4175 4176 restart: 4177 set_reclaim_active(pgdat, highest_zoneidx); 4178 sc.priority = DEF_PRIORITY; 4179 do { 4180 unsigned long nr_reclaimed = sc.nr_reclaimed; 4181 bool raise_priority = true; 4182 bool balanced; 4183 bool ret; 4184 4185 sc.reclaim_idx = highest_zoneidx; 4186 4187 /* 4188 * If the number of buffer_heads exceeds the maximum allowed 4189 * then consider reclaiming from all zones. This has a dual 4190 * purpose -- on 64-bit systems it is expected that 4191 * buffer_heads are stripped during active rotation. On 32-bit 4192 * systems, highmem pages can pin lowmem memory and shrinking 4193 * buffers can relieve lowmem pressure. Reclaim may still not 4194 * go ahead if all eligible zones for the original allocation 4195 * request are balanced to avoid excessive reclaim from kswapd. 4196 */ 4197 if (buffer_heads_over_limit) { 4198 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 4199 zone = pgdat->node_zones + i; 4200 if (!managed_zone(zone)) 4201 continue; 4202 4203 sc.reclaim_idx = i; 4204 break; 4205 } 4206 } 4207 4208 /* 4209 * If the pgdat is imbalanced then ignore boosting and preserve 4210 * the watermarks for a later time and restart. Note that the 4211 * zone watermarks will be still reset at the end of balancing 4212 * on the grounds that the normal reclaim should be enough to 4213 * re-evaluate if boosting is required when kswapd next wakes. 4214 */ 4215 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); 4216 if (!balanced && nr_boost_reclaim) { 4217 nr_boost_reclaim = 0; 4218 goto restart; 4219 } 4220 4221 /* 4222 * If boosting is not active then only reclaim if there are no 4223 * eligible zones. Note that sc.reclaim_idx is not used as 4224 * buffer_heads_over_limit may have adjusted it. 4225 */ 4226 if (!nr_boost_reclaim && balanced) 4227 goto out; 4228 4229 /* Limit the priority of boosting to avoid reclaim writeback */ 4230 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) 4231 raise_priority = false; 4232 4233 /* 4234 * Do not writeback or swap pages for boosted reclaim. The 4235 * intent is to relieve pressure not issue sub-optimal IO 4236 * from reclaim context. If no pages are reclaimed, the 4237 * reclaim will be aborted. 4238 */ 4239 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 4240 sc.may_swap = !nr_boost_reclaim; 4241 4242 /* 4243 * Do some background aging of the anon list, to give 4244 * pages a chance to be referenced before reclaiming. All 4245 * pages are rotated regardless of classzone as this is 4246 * about consistent aging. 4247 */ 4248 age_active_anon(pgdat, &sc); 4249 4250 /* 4251 * If we're getting trouble reclaiming, start doing writepage 4252 * even in laptop mode. 4253 */ 4254 if (sc.priority < DEF_PRIORITY - 2) 4255 sc.may_writepage = 1; 4256 4257 /* Call soft limit reclaim before calling shrink_node. */ 4258 sc.nr_scanned = 0; 4259 nr_soft_scanned = 0; 4260 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, 4261 sc.gfp_mask, &nr_soft_scanned); 4262 sc.nr_reclaimed += nr_soft_reclaimed; 4263 4264 /* 4265 * There should be no need to raise the scanning priority if 4266 * enough pages are already being scanned that that high 4267 * watermark would be met at 100% efficiency. 4268 */ 4269 if (kswapd_shrink_node(pgdat, &sc)) 4270 raise_priority = false; 4271 4272 /* 4273 * If the low watermark is met there is no need for processes 4274 * to be throttled on pfmemalloc_wait as they should not be 4275 * able to safely make forward progress. Wake them 4276 */ 4277 if (waitqueue_active(&pgdat->pfmemalloc_wait) && 4278 allow_direct_reclaim(pgdat)) 4279 wake_up_all(&pgdat->pfmemalloc_wait); 4280 4281 /* Check if kswapd should be suspending */ 4282 __fs_reclaim_release(_THIS_IP_); 4283 ret = try_to_freeze(); 4284 __fs_reclaim_acquire(_THIS_IP_); 4285 if (ret || kthread_should_stop()) 4286 break; 4287 4288 /* 4289 * Raise priority if scanning rate is too low or there was no 4290 * progress in reclaiming pages 4291 */ 4292 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 4293 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); 4294 4295 /* 4296 * If reclaim made no progress for a boost, stop reclaim as 4297 * IO cannot be queued and it could be an infinite loop in 4298 * extreme circumstances. 4299 */ 4300 if (nr_boost_reclaim && !nr_reclaimed) 4301 break; 4302 4303 if (raise_priority || !nr_reclaimed) 4304 sc.priority--; 4305 } while (sc.priority >= 1); 4306 4307 if (!sc.nr_reclaimed) 4308 pgdat->kswapd_failures++; 4309 4310 out: 4311 clear_reclaim_active(pgdat, highest_zoneidx); 4312 4313 /* If reclaim was boosted, account for the reclaim done in this pass */ 4314 if (boosted) { 4315 unsigned long flags; 4316 4317 for (i = 0; i <= highest_zoneidx; i++) { 4318 if (!zone_boosts[i]) 4319 continue; 4320 4321 /* Increments are under the zone lock */ 4322 zone = pgdat->node_zones + i; 4323 spin_lock_irqsave(&zone->lock, flags); 4324 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); 4325 spin_unlock_irqrestore(&zone->lock, flags); 4326 } 4327 4328 /* 4329 * As there is now likely space, wakeup kcompact to defragment 4330 * pageblocks. 4331 */ 4332 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); 4333 } 4334 4335 snapshot_refaults(NULL, pgdat); 4336 __fs_reclaim_release(_THIS_IP_); 4337 psi_memstall_leave(&pflags); 4338 set_task_reclaim_state(current, NULL); 4339 4340 /* 4341 * Return the order kswapd stopped reclaiming at as 4342 * prepare_kswapd_sleep() takes it into account. If another caller 4343 * entered the allocator slow path while kswapd was awake, order will 4344 * remain at the higher level. 4345 */ 4346 return sc.order; 4347 } 4348 4349 /* 4350 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to 4351 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is 4352 * not a valid index then either kswapd runs for first time or kswapd couldn't 4353 * sleep after previous reclaim attempt (node is still unbalanced). In that 4354 * case return the zone index of the previous kswapd reclaim cycle. 4355 */ 4356 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, 4357 enum zone_type prev_highest_zoneidx) 4358 { 4359 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 4360 4361 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; 4362 } 4363 4364 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 4365 unsigned int highest_zoneidx) 4366 { 4367 long remaining = 0; 4368 DEFINE_WAIT(wait); 4369 4370 if (freezing(current) || kthread_should_stop()) 4371 return; 4372 4373 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 4374 4375 /* 4376 * Try to sleep for a short interval. Note that kcompactd will only be 4377 * woken if it is possible to sleep for a short interval. This is 4378 * deliberate on the assumption that if reclaim cannot keep an 4379 * eligible zone balanced that it's also unlikely that compaction will 4380 * succeed. 4381 */ 4382 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 4383 /* 4384 * Compaction records what page blocks it recently failed to 4385 * isolate pages from and skips them in the future scanning. 4386 * When kswapd is going to sleep, it is reasonable to assume 4387 * that pages and compaction may succeed so reset the cache. 4388 */ 4389 reset_isolation_suitable(pgdat); 4390 4391 /* 4392 * We have freed the memory, now we should compact it to make 4393 * allocation of the requested order possible. 4394 */ 4395 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); 4396 4397 remaining = schedule_timeout(HZ/10); 4398 4399 /* 4400 * If woken prematurely then reset kswapd_highest_zoneidx and 4401 * order. The values will either be from a wakeup request or 4402 * the previous request that slept prematurely. 4403 */ 4404 if (remaining) { 4405 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, 4406 kswapd_highest_zoneidx(pgdat, 4407 highest_zoneidx)); 4408 4409 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) 4410 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); 4411 } 4412 4413 finish_wait(&pgdat->kswapd_wait, &wait); 4414 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 4415 } 4416 4417 /* 4418 * After a short sleep, check if it was a premature sleep. If not, then 4419 * go fully to sleep until explicitly woken up. 4420 */ 4421 if (!remaining && 4422 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 4423 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 4424 4425 /* 4426 * vmstat counters are not perfectly accurate and the estimated 4427 * value for counters such as NR_FREE_PAGES can deviate from the 4428 * true value by nr_online_cpus * threshold. To avoid the zone 4429 * watermarks being breached while under pressure, we reduce the 4430 * per-cpu vmstat threshold while kswapd is awake and restore 4431 * them before going back to sleep. 4432 */ 4433 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 4434 4435 if (!kthread_should_stop()) 4436 schedule(); 4437 4438 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 4439 } else { 4440 if (remaining) 4441 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 4442 else 4443 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 4444 } 4445 finish_wait(&pgdat->kswapd_wait, &wait); 4446 } 4447 4448 /* 4449 * The background pageout daemon, started as a kernel thread 4450 * from the init process. 4451 * 4452 * This basically trickles out pages so that we have _some_ 4453 * free memory available even if there is no other activity 4454 * that frees anything up. This is needed for things like routing 4455 * etc, where we otherwise might have all activity going on in 4456 * asynchronous contexts that cannot page things out. 4457 * 4458 * If there are applications that are active memory-allocators 4459 * (most normal use), this basically shouldn't matter. 4460 */ 4461 static int kswapd(void *p) 4462 { 4463 unsigned int alloc_order, reclaim_order; 4464 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 4465 pg_data_t *pgdat = (pg_data_t *)p; 4466 struct task_struct *tsk = current; 4467 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 4468 4469 if (!cpumask_empty(cpumask)) 4470 set_cpus_allowed_ptr(tsk, cpumask); 4471 4472 /* 4473 * Tell the memory management that we're a "memory allocator", 4474 * and that if we need more memory we should get access to it 4475 * regardless (see "__alloc_pages()"). "kswapd" should 4476 * never get caught in the normal page freeing logic. 4477 * 4478 * (Kswapd normally doesn't need memory anyway, but sometimes 4479 * you need a small amount of memory in order to be able to 4480 * page out something else, and this flag essentially protects 4481 * us from recursively trying to free more memory as we're 4482 * trying to free the first piece of memory in the first place). 4483 */ 4484 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; 4485 set_freezable(); 4486 4487 WRITE_ONCE(pgdat->kswapd_order, 0); 4488 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 4489 atomic_set(&pgdat->nr_writeback_throttled, 0); 4490 for ( ; ; ) { 4491 bool ret; 4492 4493 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 4494 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 4495 highest_zoneidx); 4496 4497 kswapd_try_sleep: 4498 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 4499 highest_zoneidx); 4500 4501 /* Read the new order and highest_zoneidx */ 4502 alloc_order = READ_ONCE(pgdat->kswapd_order); 4503 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 4504 highest_zoneidx); 4505 WRITE_ONCE(pgdat->kswapd_order, 0); 4506 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 4507 4508 ret = try_to_freeze(); 4509 if (kthread_should_stop()) 4510 break; 4511 4512 /* 4513 * We can speed up thawing tasks if we don't call balance_pgdat 4514 * after returning from the refrigerator 4515 */ 4516 if (ret) 4517 continue; 4518 4519 /* 4520 * Reclaim begins at the requested order but if a high-order 4521 * reclaim fails then kswapd falls back to reclaiming for 4522 * order-0. If that happens, kswapd will consider sleeping 4523 * for the order it finished reclaiming at (reclaim_order) 4524 * but kcompactd is woken to compact for the original 4525 * request (alloc_order). 4526 */ 4527 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, 4528 alloc_order); 4529 reclaim_order = balance_pgdat(pgdat, alloc_order, 4530 highest_zoneidx); 4531 if (reclaim_order < alloc_order) 4532 goto kswapd_try_sleep; 4533 } 4534 4535 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); 4536 4537 return 0; 4538 } 4539 4540 /* 4541 * A zone is low on free memory or too fragmented for high-order memory. If 4542 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 4543 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 4544 * has failed or is not needed, still wake up kcompactd if only compaction is 4545 * needed. 4546 */ 4547 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 4548 enum zone_type highest_zoneidx) 4549 { 4550 pg_data_t *pgdat; 4551 enum zone_type curr_idx; 4552 4553 if (!managed_zone(zone)) 4554 return; 4555 4556 if (!cpuset_zone_allowed(zone, gfp_flags)) 4557 return; 4558 4559 pgdat = zone->zone_pgdat; 4560 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 4561 4562 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) 4563 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); 4564 4565 if (READ_ONCE(pgdat->kswapd_order) < order) 4566 WRITE_ONCE(pgdat->kswapd_order, order); 4567 4568 if (!waitqueue_active(&pgdat->kswapd_wait)) 4569 return; 4570 4571 /* Hopeless node, leave it to direct reclaim if possible */ 4572 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 4573 (pgdat_balanced(pgdat, order, highest_zoneidx) && 4574 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { 4575 /* 4576 * There may be plenty of free memory available, but it's too 4577 * fragmented for high-order allocations. Wake up kcompactd 4578 * and rely on compaction_suitable() to determine if it's 4579 * needed. If it fails, it will defer subsequent attempts to 4580 * ratelimit its work. 4581 */ 4582 if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 4583 wakeup_kcompactd(pgdat, order, highest_zoneidx); 4584 return; 4585 } 4586 4587 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, 4588 gfp_flags); 4589 wake_up_interruptible(&pgdat->kswapd_wait); 4590 } 4591 4592 #ifdef CONFIG_HIBERNATION 4593 /* 4594 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 4595 * freed pages. 4596 * 4597 * Rather than trying to age LRUs the aim is to preserve the overall 4598 * LRU order by reclaiming preferentially 4599 * inactive > active > active referenced > active mapped 4600 */ 4601 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 4602 { 4603 struct scan_control sc = { 4604 .nr_to_reclaim = nr_to_reclaim, 4605 .gfp_mask = GFP_HIGHUSER_MOVABLE, 4606 .reclaim_idx = MAX_NR_ZONES - 1, 4607 .priority = DEF_PRIORITY, 4608 .may_writepage = 1, 4609 .may_unmap = 1, 4610 .may_swap = 1, 4611 .hibernation_mode = 1, 4612 }; 4613 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 4614 unsigned long nr_reclaimed; 4615 unsigned int noreclaim_flag; 4616 4617 fs_reclaim_acquire(sc.gfp_mask); 4618 noreclaim_flag = memalloc_noreclaim_save(); 4619 set_task_reclaim_state(current, &sc.reclaim_state); 4620 4621 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 4622 4623 set_task_reclaim_state(current, NULL); 4624 memalloc_noreclaim_restore(noreclaim_flag); 4625 fs_reclaim_release(sc.gfp_mask); 4626 4627 return nr_reclaimed; 4628 } 4629 #endif /* CONFIG_HIBERNATION */ 4630 4631 /* 4632 * This kswapd start function will be called by init and node-hot-add. 4633 */ 4634 void kswapd_run(int nid) 4635 { 4636 pg_data_t *pgdat = NODE_DATA(nid); 4637 4638 if (pgdat->kswapd) 4639 return; 4640 4641 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 4642 if (IS_ERR(pgdat->kswapd)) { 4643 /* failure at boot is fatal */ 4644 BUG_ON(system_state < SYSTEM_RUNNING); 4645 pr_err("Failed to start kswapd on node %d\n", nid); 4646 pgdat->kswapd = NULL; 4647 } 4648 } 4649 4650 /* 4651 * Called by memory hotplug when all memory in a node is offlined. Caller must 4652 * hold mem_hotplug_begin/end(). 4653 */ 4654 void kswapd_stop(int nid) 4655 { 4656 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 4657 4658 if (kswapd) { 4659 kthread_stop(kswapd); 4660 NODE_DATA(nid)->kswapd = NULL; 4661 } 4662 } 4663 4664 static int __init kswapd_init(void) 4665 { 4666 int nid; 4667 4668 swap_setup(); 4669 for_each_node_state(nid, N_MEMORY) 4670 kswapd_run(nid); 4671 return 0; 4672 } 4673 4674 module_init(kswapd_init) 4675 4676 #ifdef CONFIG_NUMA 4677 /* 4678 * Node reclaim mode 4679 * 4680 * If non-zero call node_reclaim when the number of free pages falls below 4681 * the watermarks. 4682 */ 4683 int node_reclaim_mode __read_mostly; 4684 4685 /* 4686 * Priority for NODE_RECLAIM. This determines the fraction of pages 4687 * of a node considered for each zone_reclaim. 4 scans 1/16th of 4688 * a zone. 4689 */ 4690 #define NODE_RECLAIM_PRIORITY 4 4691 4692 /* 4693 * Percentage of pages in a zone that must be unmapped for node_reclaim to 4694 * occur. 4695 */ 4696 int sysctl_min_unmapped_ratio = 1; 4697 4698 /* 4699 * If the number of slab pages in a zone grows beyond this percentage then 4700 * slab reclaim needs to occur. 4701 */ 4702 int sysctl_min_slab_ratio = 5; 4703 4704 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 4705 { 4706 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 4707 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 4708 node_page_state(pgdat, NR_ACTIVE_FILE); 4709 4710 /* 4711 * It's possible for there to be more file mapped pages than 4712 * accounted for by the pages on the file LRU lists because 4713 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 4714 */ 4715 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 4716 } 4717 4718 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 4719 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 4720 { 4721 unsigned long nr_pagecache_reclaimable; 4722 unsigned long delta = 0; 4723 4724 /* 4725 * If RECLAIM_UNMAP is set, then all file pages are considered 4726 * potentially reclaimable. Otherwise, we have to worry about 4727 * pages like swapcache and node_unmapped_file_pages() provides 4728 * a better estimate 4729 */ 4730 if (node_reclaim_mode & RECLAIM_UNMAP) 4731 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 4732 else 4733 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 4734 4735 /* If we can't clean pages, remove dirty pages from consideration */ 4736 if (!(node_reclaim_mode & RECLAIM_WRITE)) 4737 delta += node_page_state(pgdat, NR_FILE_DIRTY); 4738 4739 /* Watch for any possible underflows due to delta */ 4740 if (unlikely(delta > nr_pagecache_reclaimable)) 4741 delta = nr_pagecache_reclaimable; 4742 4743 return nr_pagecache_reclaimable - delta; 4744 } 4745 4746 /* 4747 * Try to free up some pages from this node through reclaim. 4748 */ 4749 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 4750 { 4751 /* Minimum pages needed in order to stay on node */ 4752 const unsigned long nr_pages = 1 << order; 4753 struct task_struct *p = current; 4754 unsigned int noreclaim_flag; 4755 struct scan_control sc = { 4756 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 4757 .gfp_mask = current_gfp_context(gfp_mask), 4758 .order = order, 4759 .priority = NODE_RECLAIM_PRIORITY, 4760 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 4761 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 4762 .may_swap = 1, 4763 .reclaim_idx = gfp_zone(gfp_mask), 4764 }; 4765 unsigned long pflags; 4766 4767 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, 4768 sc.gfp_mask); 4769 4770 cond_resched(); 4771 psi_memstall_enter(&pflags); 4772 fs_reclaim_acquire(sc.gfp_mask); 4773 /* 4774 * We need to be able to allocate from the reserves for RECLAIM_UNMAP 4775 */ 4776 noreclaim_flag = memalloc_noreclaim_save(); 4777 set_task_reclaim_state(p, &sc.reclaim_state); 4778 4779 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || 4780 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { 4781 /* 4782 * Free memory by calling shrink node with increasing 4783 * priorities until we have enough memory freed. 4784 */ 4785 do { 4786 shrink_node(pgdat, &sc); 4787 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 4788 } 4789 4790 set_task_reclaim_state(p, NULL); 4791 memalloc_noreclaim_restore(noreclaim_flag); 4792 fs_reclaim_release(sc.gfp_mask); 4793 psi_memstall_leave(&pflags); 4794 4795 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); 4796 4797 return sc.nr_reclaimed >= nr_pages; 4798 } 4799 4800 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 4801 { 4802 int ret; 4803 4804 /* 4805 * Node reclaim reclaims unmapped file backed pages and 4806 * slab pages if we are over the defined limits. 4807 * 4808 * A small portion of unmapped file backed pages is needed for 4809 * file I/O otherwise pages read by file I/O will be immediately 4810 * thrown out if the node is overallocated. So we do not reclaim 4811 * if less than a specified percentage of the node is used by 4812 * unmapped file backed pages. 4813 */ 4814 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 4815 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= 4816 pgdat->min_slab_pages) 4817 return NODE_RECLAIM_FULL; 4818 4819 /* 4820 * Do not scan if the allocation should not be delayed. 4821 */ 4822 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 4823 return NODE_RECLAIM_NOSCAN; 4824 4825 /* 4826 * Only run node reclaim on the local node or on nodes that do not 4827 * have associated processors. This will favor the local processor 4828 * over remote processors and spread off node memory allocations 4829 * as wide as possible. 4830 */ 4831 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 4832 return NODE_RECLAIM_NOSCAN; 4833 4834 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 4835 return NODE_RECLAIM_NOSCAN; 4836 4837 ret = __node_reclaim(pgdat, gfp_mask, order); 4838 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 4839 4840 if (!ret) 4841 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 4842 4843 return ret; 4844 } 4845 #endif 4846 4847 /** 4848 * check_move_unevictable_pages - check pages for evictability and move to 4849 * appropriate zone lru list 4850 * @pvec: pagevec with lru pages to check 4851 * 4852 * Checks pages for evictability, if an evictable page is in the unevictable 4853 * lru list, moves it to the appropriate evictable lru list. This function 4854 * should be only used for lru pages. 4855 */ 4856 void check_move_unevictable_pages(struct pagevec *pvec) 4857 { 4858 struct lruvec *lruvec = NULL; 4859 int pgscanned = 0; 4860 int pgrescued = 0; 4861 int i; 4862 4863 for (i = 0; i < pvec->nr; i++) { 4864 struct page *page = pvec->pages[i]; 4865 struct folio *folio = page_folio(page); 4866 int nr_pages; 4867 4868 if (PageTransTail(page)) 4869 continue; 4870 4871 nr_pages = thp_nr_pages(page); 4872 pgscanned += nr_pages; 4873 4874 /* block memcg migration during page moving between lru */ 4875 if (!TestClearPageLRU(page)) 4876 continue; 4877 4878 lruvec = folio_lruvec_relock_irq(folio, lruvec); 4879 if (page_evictable(page) && PageUnevictable(page)) { 4880 del_page_from_lru_list(page, lruvec); 4881 ClearPageUnevictable(page); 4882 add_page_to_lru_list(page, lruvec); 4883 pgrescued += nr_pages; 4884 } 4885 SetPageLRU(page); 4886 } 4887 4888 if (lruvec) { 4889 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 4890 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 4891 unlock_page_lruvec_irq(lruvec); 4892 } else if (pgscanned) { 4893 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 4894 } 4895 } 4896 EXPORT_SYMBOL_GPL(check_move_unevictable_pages); 4897