vmscan.c (27eb2c4b3d3e13f376a359e293c212a2e9407af5) vmscan.c (75485363ce8552698bfb9970d901f755d5713cca)
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 2587 unchanged lines hidden (view full) ---

2596 wake_up(&pgdat->pfmemalloc_wait);
2597 return false;
2598 }
2599
2600 return pgdat_balanced(pgdat, order, classzone_idx);
2601}
2602
2603/*
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 2587 unchanged lines hidden (view full) ---

2596 wake_up(&pgdat->pfmemalloc_wait);
2597 return false;
2598 }
2599
2600 return pgdat_balanced(pgdat, order, classzone_idx);
2601}
2602
2603/*
2604 * kswapd shrinks the zone by the number of pages required to reach
2605 * the high watermark.
2606 */
2607static void kswapd_shrink_zone(struct zone *zone,
2608 struct scan_control *sc,
2609 unsigned long lru_pages)
2610{
2611 unsigned long nr_slab;
2612 struct reclaim_state *reclaim_state = current->reclaim_state;
2613 struct shrink_control shrink = {
2614 .gfp_mask = sc->gfp_mask,
2615 };
2616
2617 /* Reclaim above the high watermark. */
2618 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
2619 shrink_zone(zone, sc);
2620
2621 reclaim_state->reclaimed_slab = 0;
2622 nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
2623 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2624
2625 if (nr_slab == 0 && !zone_reclaimable(zone))
2626 zone->all_unreclaimable = 1;
2627}
2628
2629/*
2604 * For kswapd, balance_pgdat() will work across all this node's zones until
2605 * they are all at high_wmark_pages(zone).
2606 *
2607 * Returns the final order kswapd was reclaiming at
2608 *
2609 * There is special handling here for zones which are full of pinned pages.
2610 * This can happen if the pages are all mlocked, or if they are all used by
2611 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.

--- 10 unchanged lines hidden (view full) ---

2622 * of pages is balanced across the zones.
2623 */
2624static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2625 int *classzone_idx)
2626{
2627 bool pgdat_is_balanced = false;
2628 int i;
2629 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2630 * For kswapd, balance_pgdat() will work across all this node's zones until
2631 * they are all at high_wmark_pages(zone).
2632 *
2633 * Returns the final order kswapd was reclaiming at
2634 *
2635 * There is special handling here for zones which are full of pinned pages.
2636 * This can happen if the pages are all mlocked, or if they are all used by
2637 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.

--- 10 unchanged lines hidden (view full) ---

2648 * of pages is balanced across the zones.
2649 */
2650static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2651 int *classzone_idx)
2652{
2653 bool pgdat_is_balanced = false;
2654 int i;
2655 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2630 struct reclaim_state *reclaim_state = current->reclaim_state;
2631 unsigned long nr_soft_reclaimed;
2632 unsigned long nr_soft_scanned;
2633 struct scan_control sc = {
2634 .gfp_mask = GFP_KERNEL,
2635 .may_unmap = 1,
2636 .may_swap = 1,
2656 unsigned long nr_soft_reclaimed;
2657 unsigned long nr_soft_scanned;
2658 struct scan_control sc = {
2659 .gfp_mask = GFP_KERNEL,
2660 .may_unmap = 1,
2661 .may_swap = 1,
2637 /*
2638 * kswapd doesn't want to be bailed out while reclaim. because
2639 * we want to put equal scanning pressure on each zone.
2640 */
2641 .nr_to_reclaim = ULONG_MAX,
2642 .order = order,
2643 .target_mem_cgroup = NULL,
2644 };
2662 .order = order,
2663 .target_mem_cgroup = NULL,
2664 };
2645 struct shrink_control shrink = {
2646 .gfp_mask = sc.gfp_mask,
2647 };
2648loop_again:
2649 sc.priority = DEF_PRIORITY;
2650 sc.nr_reclaimed = 0;
2651 sc.may_writepage = !laptop_mode;
2652 count_vm_event(PAGEOUTRUN);
2653
2654 do {
2655 unsigned long lru_pages = 0;

--- 55 unchanged lines hidden (view full) ---

2711 *
2712 * We do this because the page allocator works in the opposite
2713 * direction. This prevents the page allocator from allocating
2714 * pages behind kswapd's direction of progress, which would
2715 * cause too much scanning of the lower zones.
2716 */
2717 for (i = 0; i <= end_zone; i++) {
2718 struct zone *zone = pgdat->node_zones + i;
2665loop_again:
2666 sc.priority = DEF_PRIORITY;
2667 sc.nr_reclaimed = 0;
2668 sc.may_writepage = !laptop_mode;
2669 count_vm_event(PAGEOUTRUN);
2670
2671 do {
2672 unsigned long lru_pages = 0;

--- 55 unchanged lines hidden (view full) ---

2728 *
2729 * We do this because the page allocator works in the opposite
2730 * direction. This prevents the page allocator from allocating
2731 * pages behind kswapd's direction of progress, which would
2732 * cause too much scanning of the lower zones.
2733 */
2734 for (i = 0; i <= end_zone; i++) {
2735 struct zone *zone = pgdat->node_zones + i;
2719 int nr_slab, testorder;
2736 int testorder;
2720 unsigned long balance_gap;
2721
2722 if (!populated_zone(zone))
2723 continue;
2724
2725 if (zone->all_unreclaimable &&
2726 sc.priority != DEF_PRIORITY)
2727 continue;

--- 31 unchanged lines hidden (view full) ---

2759 testorder = order;
2760 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2761 compaction_suitable(zone, order) !=
2762 COMPACT_SKIPPED)
2763 testorder = 0;
2764
2765 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2766 !zone_balanced(zone, testorder,
2737 unsigned long balance_gap;
2738
2739 if (!populated_zone(zone))
2740 continue;
2741
2742 if (zone->all_unreclaimable &&
2743 sc.priority != DEF_PRIORITY)
2744 continue;

--- 31 unchanged lines hidden (view full) ---

2776 testorder = order;
2777 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2778 compaction_suitable(zone, order) !=
2779 COMPACT_SKIPPED)
2780 testorder = 0;
2781
2782 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2783 !zone_balanced(zone, testorder,
2767 balance_gap, end_zone)) {
2768 shrink_zone(zone, &sc);
2784 balance_gap, end_zone))
2785 kswapd_shrink_zone(zone, &sc, lru_pages);
2769
2786
2770 reclaim_state->reclaimed_slab = 0;
2771 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2772 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2773
2774 if (nr_slab == 0 && !zone_reclaimable(zone))
2775 zone->all_unreclaimable = 1;
2776 }
2777
2778 /*
2779 * If we're getting trouble reclaiming, start doing
2780 * writepage even in laptop mode.
2781 */
2782 if (sc.priority < DEF_PRIORITY - 2)
2783 sc.may_writepage = 1;
2784
2785 if (zone->all_unreclaimable) {

--- 787 unchanged lines hidden ---
2787 /*
2788 * If we're getting trouble reclaiming, start doing
2789 * writepage even in laptop mode.
2790 */
2791 if (sc.priority < DEF_PRIORITY - 2)
2792 sc.may_writepage = 1;
2793
2794 if (zone->all_unreclaimable) {

--- 787 unchanged lines hidden ---