vmscan.c (1ba6fc9af35bf97c84567d9b3eeb26629d1e3af0) vmscan.c (1c6c15971e4709953f75082a5d44212536b1c2b7)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct

--- 2606 unchanged lines hidden (view full) ---

2615 * pages that were scanned. This will return to the
2616 * caller faster at the risk reclaim/compaction and
2617 * the resulting allocation attempt fails
2618 */
2619 if (!nr_reclaimed)
2620 return false;
2621 }
2622
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct

--- 2606 unchanged lines hidden (view full) ---

2615 * pages that were scanned. This will return to the
2616 * caller faster at the risk reclaim/compaction and
2617 * the resulting allocation attempt fails
2618 */
2619 if (!nr_reclaimed)
2620 return false;
2621 }
2622
2623 /*
2624 * If we have not reclaimed enough pages for compaction and the
2625 * inactive lists are large enough, continue reclaiming
2626 */
2627 pages_for_compaction = compact_gap(sc->order);
2628 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2629 if (get_nr_swap_pages() > 0)
2630 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2631 if (sc->nr_reclaimed < pages_for_compaction &&
2632 inactive_lru_pages > pages_for_compaction)
2633 return true;
2634
2635 /* If compaction would go ahead or the allocation would succeed, stop */
2636 for (z = 0; z <= sc->reclaim_idx; z++) {
2637 struct zone *zone = &pgdat->node_zones[z];
2638 if (!managed_zone(zone))
2639 continue;
2640
2641 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2642 case COMPACT_SUCCESS:
2643 case COMPACT_CONTINUE:
2644 return false;
2645 default:
2646 /* check next zone */
2647 ;
2648 }
2649 }
2623 /* If compaction would go ahead or the allocation would succeed, stop */
2624 for (z = 0; z <= sc->reclaim_idx; z++) {
2625 struct zone *zone = &pgdat->node_zones[z];
2626 if (!managed_zone(zone))
2627 continue;
2628
2629 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2630 case COMPACT_SUCCESS:
2631 case COMPACT_CONTINUE:
2632 return false;
2633 default:
2634 /* check next zone */
2635 ;
2636 }
2637 }
2650 return true;
2638
2639 /*
2640 * If we have not reclaimed enough pages for compaction and the
2641 * inactive lists are large enough, continue reclaiming
2642 */
2643 pages_for_compaction = compact_gap(sc->order);
2644 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2645 if (get_nr_swap_pages() > 0)
2646 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2647
2648 return inactive_lru_pages > pages_for_compaction &&
2649 /*
2650 * avoid dryrun with plenty of inactive pages
2651 */
2652 nr_scanned && nr_reclaimed;
2651}
2652
2653static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
2654{
2655 return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
2656 (memcg && memcg_congested(pgdat, memcg));
2657}
2658

--- 1617 unchanged lines hidden ---
2653}
2654
2655static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
2656{
2657 return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
2658 (memcg && memcg_congested(pgdat, memcg));
2659}
2660

--- 1617 unchanged lines hidden ---