vmscan.c (e0b72c14d8dcc9477e580c261041dae86d4906fe) | vmscan.c (7a704474b3022dabbb68f72bf18a3d89ec1c0a24) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. --- 415 unchanged lines hidden (view full) --- 424 for (i = 0; i < child_info->map_nr_max; i++) { 425 nr = atomic_long_read(&child_info->nr_deferred[i]); 426 atomic_long_add(nr, &parent_info->nr_deferred[i]); 427 } 428 } 429 up_read(&shrinker_rwsem); 430} 431 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. --- 415 unchanged lines hidden (view full) --- 424 for (i = 0; i < child_info->map_nr_max; i++) { 425 nr = atomic_long_read(&child_info->nr_deferred[i]); 426 atomic_long_add(nr, &parent_info->nr_deferred[i]); 427 } 428 } 429 up_read(&shrinker_rwsem); 430} 431 |
432/* Returns true for reclaim through cgroup limits or cgroup interfaces. */ |
|
432static bool cgroup_reclaim(struct scan_control *sc) 433{ 434 return sc->target_mem_cgroup; 435} 436 | 433static bool cgroup_reclaim(struct scan_control *sc) 434{ 435 return sc->target_mem_cgroup; 436} 437 |
437static bool global_reclaim(struct scan_control *sc) | 438/* 439 * Returns true for reclaim on the root cgroup. This is true for direct 440 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. 441 */ 442static bool root_reclaim(struct scan_control *sc) |
438{ 439 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); 440} 441 442/** 443 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 444 * @sc: scan_control in question 445 * --- 38 unchanged lines hidden (view full) --- 484 return 0; 485} 486 487static bool cgroup_reclaim(struct scan_control *sc) 488{ 489 return false; 490} 491 | 443{ 444 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); 445} 446 447/** 448 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 449 * @sc: scan_control in question 450 * --- 38 unchanged lines hidden (view full) --- 489 return 0; 490} 491 492static bool cgroup_reclaim(struct scan_control *sc) 493{ 494 return false; 495} 496 |
492static bool global_reclaim(struct scan_control *sc) | 497static bool root_reclaim(struct scan_control *sc) |
493{ 494 return true; 495} 496 497static bool writeback_throttling_sane(struct scan_control *sc) 498{ 499 return true; 500} --- 40 unchanged lines hidden (view full) --- 541 * anyway, even if they are not counted here properly, and we will be 542 * able to make forward progress in charging (which is usually in a 543 * retry loop). 544 * 545 * We can go one step further, and report the uncharged objcg pages in 546 * memcg reclaim, to make reporting more accurate and reduce 547 * underestimation, but it's probably not worth the complexity for now. 548 */ | 498{ 499 return true; 500} 501 502static bool writeback_throttling_sane(struct scan_control *sc) 503{ 504 return true; 505} --- 40 unchanged lines hidden (view full) --- 546 * anyway, even if they are not counted here properly, and we will be 547 * able to make forward progress in charging (which is usually in a 548 * retry loop). 549 * 550 * We can go one step further, and report the uncharged objcg pages in 551 * memcg reclaim, to make reporting more accurate and reduce 552 * underestimation, but it's probably not worth the complexity for now. 553 */ |
549 if (current->reclaim_state && global_reclaim(sc)) { | 554 if (current->reclaim_state && root_reclaim(sc)) { |
550 sc->nr_reclaimed += current->reclaim_state->reclaimed; 551 current->reclaim_state->reclaimed = 0; 552 } 553} 554 555static long xchg_nr_deferred(struct shrinker *shrinker, 556 struct shrink_control *sc) 557{ --- 4762 unchanged lines hidden (view full) --- 5320 5321 /* skip this lruvec as it's low on cold folios */ 5322 return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0; 5323} 5324 5325static unsigned long get_nr_to_reclaim(struct scan_control *sc) 5326{ 5327 /* don't abort memcg reclaim to ensure fairness */ | 555 sc->nr_reclaimed += current->reclaim_state->reclaimed; 556 current->reclaim_state->reclaimed = 0; 557 } 558} 559 560static long xchg_nr_deferred(struct shrinker *shrinker, 561 struct shrink_control *sc) 562{ --- 4762 unchanged lines hidden (view full) --- 5325 5326 /* skip this lruvec as it's low on cold folios */ 5327 return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0; 5328} 5329 5330static unsigned long get_nr_to_reclaim(struct scan_control *sc) 5331{ 5332 /* don't abort memcg reclaim to ensure fairness */ |
5328 if (!global_reclaim(sc)) | 5333 if (!root_reclaim(sc)) |
5329 return -1; 5330 5331 return max(sc->nr_to_reclaim, compact_gap(sc->order)); 5332} 5333 5334static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5335{ 5336 long nr_to_scan; --- 135 unchanged lines hidden (view full) --- 5472 if (bin != first_bin) 5473 goto restart; 5474} 5475 5476static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5477{ 5478 struct blk_plug plug; 5479 | 5334 return -1; 5335 5336 return max(sc->nr_to_reclaim, compact_gap(sc->order)); 5337} 5338 5339static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5340{ 5341 long nr_to_scan; --- 135 unchanged lines hidden (view full) --- 5477 if (bin != first_bin) 5478 goto restart; 5479} 5480 5481static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5482{ 5483 struct blk_plug plug; 5484 |
5480 VM_WARN_ON_ONCE(global_reclaim(sc)); | 5485 VM_WARN_ON_ONCE(root_reclaim(sc)); |
5481 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); 5482 5483 lru_add_drain(); 5484 5485 blk_start_plug(&plug); 5486 5487 set_mm_walk(NULL, sc->proactive); 5488 --- 44 unchanged lines hidden (view full) --- 5533 sc->priority = clamp(priority, 0, DEF_PRIORITY); 5534} 5535 5536static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 5537{ 5538 struct blk_plug plug; 5539 unsigned long reclaimed = sc->nr_reclaimed; 5540 | 5486 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); 5487 5488 lru_add_drain(); 5489 5490 blk_start_plug(&plug); 5491 5492 set_mm_walk(NULL, sc->proactive); 5493 --- 44 unchanged lines hidden (view full) --- 5538 sc->priority = clamp(priority, 0, DEF_PRIORITY); 5539} 5540 5541static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 5542{ 5543 struct blk_plug plug; 5544 unsigned long reclaimed = sc->nr_reclaimed; 5545 |
5541 VM_WARN_ON_ONCE(!global_reclaim(sc)); | 5546 VM_WARN_ON_ONCE(!root_reclaim(sc)); |
5542 5543 /* 5544 * Unmapped clean folios are already prioritized. Scanning for more of 5545 * them is likely futile and can cause high reclaim latency when there 5546 * is a large number of memcgs. 5547 */ 5548 if (!sc->may_writepage || !sc->may_unmap) 5549 goto done; --- 705 unchanged lines hidden (view full) --- 6255 unsigned long targets[NR_LRU_LISTS]; 6256 unsigned long nr_to_scan; 6257 enum lru_list lru; 6258 unsigned long nr_reclaimed = 0; 6259 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 6260 bool proportional_reclaim; 6261 struct blk_plug plug; 6262 | 5547 5548 /* 5549 * Unmapped clean folios are already prioritized. Scanning for more of 5550 * them is likely futile and can cause high reclaim latency when there 5551 * is a large number of memcgs. 5552 */ 5553 if (!sc->may_writepage || !sc->may_unmap) 5554 goto done; --- 705 unchanged lines hidden (view full) --- 6260 unsigned long targets[NR_LRU_LISTS]; 6261 unsigned long nr_to_scan; 6262 enum lru_list lru; 6263 unsigned long nr_reclaimed = 0; 6264 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 6265 bool proportional_reclaim; 6266 struct blk_plug plug; 6267 |
6263 if (lru_gen_enabled() && !global_reclaim(sc)) { | 6268 if (lru_gen_enabled() && !root_reclaim(sc)) { |
6264 lru_gen_shrink_lruvec(lruvec, sc); 6265 return; 6266 } 6267 6268 get_scan_count(lruvec, sc, nr); 6269 6270 /* Record the original scan target for proportional adjustments later */ 6271 memcpy(targets, nr, sizeof(nr)); --- 224 unchanged lines hidden (view full) --- 6496} 6497 6498static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 6499{ 6500 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; 6501 struct lruvec *target_lruvec; 6502 bool reclaimable = false; 6503 | 6269 lru_gen_shrink_lruvec(lruvec, sc); 6270 return; 6271 } 6272 6273 get_scan_count(lruvec, sc, nr); 6274 6275 /* Record the original scan target for proportional adjustments later */ 6276 memcpy(targets, nr, sizeof(nr)); --- 224 unchanged lines hidden (view full) --- 6501} 6502 6503static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 6504{ 6505 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; 6506 struct lruvec *target_lruvec; 6507 bool reclaimable = false; 6508 |
6504 if (lru_gen_enabled() && global_reclaim(sc)) { | 6509 if (lru_gen_enabled() && root_reclaim(sc)) { |
6505 lru_gen_shrink_node(pgdat, sc); 6506 return; 6507 } 6508 6509 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 6510 6511again: 6512 memset(&sc->nr, 0, sizeof(sc->nr)); --- 1610 unchanged lines hidden --- | 6510 lru_gen_shrink_node(pgdat, sc); 6511 return; 6512 } 6513 6514 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 6515 6516again: 6517 memset(&sc->nr, 0, sizeof(sc->nr)); --- 1610 unchanged lines hidden --- |