Lines Matching +full:non +full:- +full:disruptive
1 // SPDX-License-Identifier: GPL-2.0
31 #include <linux/backing-dev.h>
46 #include <linux/memory-tiers.h>
175 if ((_folio)->lru.prev != _base) { \
178 prev = lru_to_folio(&(_folio->lru)); \
179 prefetchw(&prev->_field); \
211 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected()
226 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info()
233 if (new_nr_max <= old->map_nr_max) in expand_one_shrinker_info()
238 return -ENOMEM; in expand_one_shrinker_info()
240 new->nr_deferred = (atomic_long_t *)(new + 1); in expand_one_shrinker_info()
241 new->map = (void *)new->nr_deferred + defer_size; in expand_one_shrinker_info()
242 new->map_nr_max = new_nr_max; in expand_one_shrinker_info()
245 memset(new->map, (int)0xff, old_map_size); in expand_one_shrinker_info()
246 memset((void *)new->map + old_map_size, 0, map_size - old_map_size); in expand_one_shrinker_info()
248 memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); in expand_one_shrinker_info()
249 memset((void *)new->nr_deferred + old_defer_size, 0, in expand_one_shrinker_info()
250 defer_size - old_defer_size); in expand_one_shrinker_info()
252 rcu_assign_pointer(pn->shrinker_info, new); in expand_one_shrinker_info()
266 pn = memcg->nodeinfo[nid]; in free_shrinker_info()
267 info = rcu_dereference_protected(pn->shrinker_info, true); in free_shrinker_info()
269 rcu_assign_pointer(pn->shrinker_info, NULL); in free_shrinker_info()
287 ret = -ENOMEM; in alloc_shrinker_info()
290 info->nr_deferred = (atomic_long_t *)(info + 1); in alloc_shrinker_info()
291 info->map = (void *)info->nr_deferred + defer_size; in alloc_shrinker_info()
292 info->map_nr_max = shrinker_nr_max; in alloc_shrinker_info()
293 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info()
341 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); in set_shrinker_bit()
342 if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) { in set_shrinker_bit()
345 set_bit(shrinker_id, info->map); in set_shrinker_bit()
355 int id, ret = -ENOMEM; in prealloc_memcg_shrinker()
358 return -ENOSYS; in prealloc_memcg_shrinker()
372 shrinker->id = id; in prealloc_memcg_shrinker()
381 int id = shrinker->id; in unregister_memcg_shrinker()
396 return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); in xchg_nr_deferred_memcg()
405 return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); in add_nr_deferred_memcg()
424 for (i = 0; i < child_info->map_nr_max; i++) { in reparent_shrinker_deferred()
425 nr = atomic_long_read(&child_info->nr_deferred[i]); in reparent_shrinker_deferred()
426 atomic_long_add(nr, &parent_info->nr_deferred[i]); in reparent_shrinker_deferred()
435 return sc->target_mem_cgroup; in cgroup_reclaim()
444 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); in root_reclaim()
448 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
473 return -ENOSYS; in prealloc_memcg_shrinker()
512 WARN_ON_ONCE(rs && task->reclaim_state); in set_task_reclaim_state()
514 /* Check for the nulling of an already-nulled member */ in set_task_reclaim_state()
515 WARN_ON_ONCE(!rs && !task->reclaim_state); in set_task_reclaim_state()
517 task->reclaim_state = rs; in set_task_reclaim_state()
521 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
522 * scan_control->nr_reclaimed.
527 * Currently, reclaim_state->reclaimed includes three types of pages in flush_reclaim_state()
534 * single memcg. For example, a memcg-aware shrinker can free one object in flush_reclaim_state()
537 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
539 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
554 if (current->reclaim_state && root_reclaim(sc)) { in flush_reclaim_state()
555 sc->nr_reclaimed += current->reclaim_state->reclaimed; in flush_reclaim_state()
556 current->reclaim_state->reclaimed = 0; in flush_reclaim_state()
563 int nid = sc->nid; in xchg_nr_deferred()
565 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) in xchg_nr_deferred()
568 if (sc->memcg && in xchg_nr_deferred()
569 (shrinker->flags & SHRINKER_MEMCG_AWARE)) in xchg_nr_deferred()
571 sc->memcg); in xchg_nr_deferred()
573 return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in xchg_nr_deferred()
580 int nid = sc->nid; in add_nr_deferred()
582 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) in add_nr_deferred()
585 if (sc->memcg && in add_nr_deferred()
586 (shrinker->flags & SHRINKER_MEMCG_AWARE)) in add_nr_deferred()
588 sc->memcg); in add_nr_deferred()
590 return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); in add_nr_deferred()
597 if (sc && sc->no_demotion) in can_demote()
611 * For non-memcg reclaim, is there in can_reclaim_anon_pages()
645 * If there are no reclaimable file-backed or anonymous pages, in zone_reclaimable_pages()
656 * lruvec_lru_size - Returns the number of pages on the given LRU list.
659 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
668 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
689 if (shrinker->flags & SHRINKER_MEMCG_AWARE) { in __prealloc_shrinker()
691 if (err != -ENOSYS) in __prealloc_shrinker()
694 shrinker->flags &= ~SHRINKER_MEMCG_AWARE; in __prealloc_shrinker()
697 size = sizeof(*shrinker->nr_deferred); in __prealloc_shrinker()
698 if (shrinker->flags & SHRINKER_NUMA_AWARE) in __prealloc_shrinker()
701 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); in __prealloc_shrinker()
702 if (!shrinker->nr_deferred) in __prealloc_shrinker()
703 return -ENOMEM; in __prealloc_shrinker()
715 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in prealloc_shrinker()
717 if (!shrinker->name) in prealloc_shrinker()
718 return -ENOMEM; in prealloc_shrinker()
722 kfree_const(shrinker->name); in prealloc_shrinker()
723 shrinker->name = NULL; in prealloc_shrinker()
738 kfree_const(shrinker->name); in free_prealloced_shrinker()
739 shrinker->name = NULL; in free_prealloced_shrinker()
741 if (shrinker->flags & SHRINKER_MEMCG_AWARE) { in free_prealloced_shrinker()
748 kfree(shrinker->nr_deferred); in free_prealloced_shrinker()
749 shrinker->nr_deferred = NULL; in free_prealloced_shrinker()
755 list_add_tail(&shrinker->list, &shrinker_list); in register_shrinker_prepared()
756 shrinker->flags |= SHRINKER_REGISTERED; in register_shrinker_prepared()
778 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in register_shrinker()
780 if (!shrinker->name) in register_shrinker()
781 return -ENOMEM; in register_shrinker()
785 kfree_const(shrinker->name); in register_shrinker()
786 shrinker->name = NULL; in register_shrinker()
806 if (!(shrinker->flags & SHRINKER_REGISTERED)) in unregister_shrinker()
810 list_del(&shrinker->list); in unregister_shrinker()
811 shrinker->flags &= ~SHRINKER_REGISTERED; in unregister_shrinker()
812 if (shrinker->flags & SHRINKER_MEMCG_AWARE) in unregister_shrinker()
819 kfree(shrinker->nr_deferred); in unregister_shrinker()
820 shrinker->nr_deferred = NULL; in unregister_shrinker()
825 * synchronize_shrinkers - Wait for all running shrinkers to complete.
850 long batch_size = shrinker->batch ? shrinker->batch in do_shrink_slab()
854 freeable = shrinker->count_objects(shrinker, shrinkctl); in do_shrink_slab()
865 if (shrinker->seeks) { in do_shrink_slab()
868 do_div(delta, shrinker->seeks); in do_shrink_slab()
905 shrinkctl->nr_to_scan = nr_to_scan; in do_shrink_slab()
906 shrinkctl->nr_scanned = nr_to_scan; in do_shrink_slab()
907 ret = shrinker->scan_objects(shrinker, shrinkctl); in do_shrink_slab()
912 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); in do_shrink_slab()
913 total_scan -= shrinkctl->nr_scanned; in do_shrink_slab()
914 scanned += shrinkctl->nr_scanned; in do_shrink_slab()
925 next_deferred = max_t(long, (nr + delta - scanned), 0); in do_shrink_slab()
934 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); in do_shrink_slab()
956 for_each_set_bit(i, info->map, info->map_nr_max) { in shrink_slab_memcg()
965 if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { in shrink_slab_memcg()
967 clear_bit(i, info->map); in shrink_slab_memcg()
971 /* Call non-slab shrinkers even though kmem is disabled */ in shrink_slab_memcg()
973 !(shrinker->flags & SHRINKER_NONSLAB)) in shrink_slab_memcg()
978 clear_bit(i, info->map); in shrink_slab_memcg()
1021 * shrink_slab - shrink slab caches
1035 * @priority is sc->priority, we take the number of objects and >> by priority
1120 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
1121 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); in reclaimer_offset()
1122 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
1123 PGSCAN_DIRECT - PGSCAN_KSWAPD); in reclaimer_offset()
1124 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
1125 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); in reclaimer_offset()
1126 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
1127 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); in reclaimer_offset()
1132 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; in reclaimer_offset()
1133 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; in reclaimer_offset()
1141 * private data at folio->private. in is_page_cache_freeable()
1143 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
1149 * -ENOSPC. We need to propagate that into the address_space for a subsequent
1177 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
1186 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
1203 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
1213 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { in reclaim_throttle()
1221 * parallel reclaimers which is a short-lived event so the timeout is in reclaim_throttle()
1223 * potentially long-lived events so use a longer timeout. This is shaky in reclaim_throttle()
1232 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
1233 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
1263 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
1265 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
1266 jiffies_to_usecs(timeout - ret), in reclaim_throttle()
1283 * This is an inaccurate read as the per-cpu deltas may not in __acct_reclaim_writeback()
1289 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
1290 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
1293 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
1310 * Calls ->writepage().
1317 * will be non-blocking. To prevent this allocation from being in pageout()
1336 * folio->mapping == NULL while being dirty with clean buffers. in pageout()
1347 if (mapping->a_ops->writepage == NULL) in pageout()
1362 res = mapping->a_ops->writepage(&folio->page, &wbc); in pageout()
1396 spin_lock(&mapping->host->i_lock); in __remove_mapping()
1397 xa_lock_irq(&mapping->i_pages); in __remove_mapping()
1399 * The non racy check for a busy folio. in __remove_mapping()
1417 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags in __remove_mapping()
1418 * load is not satisfied before that of folio->_refcount. in __remove_mapping()
1433 swp_entry_t swap = folio->swap; in __remove_mapping()
1439 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
1444 free_folio = mapping->a_ops->free_folio; in __remove_mapping()
1465 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
1467 inode_add_lru(mapping->host); in __remove_mapping()
1468 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
1477 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
1479 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
1484 * remove_mapping() - Attempt to remove a folio from its mapping.
1510 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
1537 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
1549 if (referenced_ptes == -1) in folio_check_references()
1573 * Activate file-backed executable folios after first usage. in folio_check_references()
1617 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback()
1618 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
1630 allowed_mask = mtc->nmask; in alloc_demote_folio()
1640 mtc->nmask = NULL; in alloc_demote_folio()
1641 mtc->gfp_mask |= __GFP_THISNODE; in alloc_demote_folio()
1646 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_demote_folio()
1647 mtc->nmask = allowed_mask; in alloc_demote_folio()
1659 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1700 * We can "enter_fs" for swap-cache with only __GFP_IO in may_enter_fs()
1702 * ->flags can be updated non-atomicially (scan_swap_map_slots), in may_enter_fs()
1726 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_folio_list()
1739 list_del(&folio->lru); in shrink_folio_list()
1749 sc->nr_scanned += nr_pages; in shrink_folio_list()
1754 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1769 stat->nr_dirty += nr_pages; in shrink_folio_list()
1772 stat->nr_unqueued_dirty += nr_pages; in shrink_folio_list()
1781 stat->nr_congested += nr_pages; in shrink_folio_list()
1831 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1832 stat->nr_immediate += nr_pages; in shrink_folio_list()
1838 !may_enter_fs(folio, sc->gfp_mask)) { in shrink_folio_list()
1840 * This is slightly racy - in shrink_folio_list()
1844 * interpreted as the readahead flag - but in shrink_folio_list()
1854 stat->nr_writeback += nr_pages; in shrink_folio_list()
1862 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1874 stat->nr_ref_keep += nr_pages; in shrink_folio_list()
1887 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1899 if (!(sc->gfp_mask & __GFP_IO)) in shrink_folio_list()
1944 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1961 stat->nr_unmap_fail += nr_pages; in shrink_folio_list()
1964 stat->nr_lazyfree_fail += nr_pages; in shrink_folio_list()
1984 * injecting inefficient single-folio I/O into in shrink_folio_list()
1995 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
2011 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
2013 if (!sc->may_writepage) in shrink_folio_list()
2028 stat->nr_pageout += nr_pages; in shrink_folio_list()
2036 * A synchronous write - probably a ramdisk. Go in shrink_folio_list()
2064 * and mark the folio clean - it can be freed. in shrink_folio_list()
2066 * Rarely, folios can have buffers and no ->mapping. in shrink_folio_list()
2075 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
2110 sc->target_mem_cgroup)) in shrink_folio_list()
2128 list_add(&folio->lru, &free_folios); in shrink_folio_list()
2137 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
2149 stat->nr_activate[type] += nr_pages; in shrink_folio_list()
2155 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
2184 if (!sc->proactive) { in shrink_folio_list()
2190 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; in shrink_folio_list()
2222 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
2233 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
2238 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
2239 -(long)nr_reclaimed); in reclaim_clean_pages_from_list()
2246 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
2248 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
2249 -(long)stat.nr_lazyfree_fail); in reclaim_clean_pages_from_list()
2266 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
2274 * lruvec->lru_lock is heavily contended. Some of the functions that
2297 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
2317 if (folio_zonenum(folio) > sc->reclaim_idx) { in isolate_lru_folios()
2334 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
2339 * sure the folio is not being freed elsewhere -- the in isolate_lru_folios()
2355 list_move(&folio->lru, move_to); in isolate_lru_folios()
2378 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_folios()
2380 sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru); in isolate_lru_folios()
2386 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
2456 * won't get blocked by normal direct-reclaimers, forming a circular in too_many_isolated()
2459 if (gfp_has_io_fs(sc->gfp_mask)) in too_many_isolated()
2487 list_del(&folio->lru); in move_folios_to_lru()
2489 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2491 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2503 * list_add(&folio->lru,) in move_folios_to_lru()
2504 * list_add(&folio->lru,) in move_folios_to_lru()
2512 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2514 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2516 list_add(&folio->lru, &folios_to_free); in move_folios_to_lru()
2542 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
2548 return !(current->flags & PF_LOCAL_THROTTLE); in current_may_throttle()
2584 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2596 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2603 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2606 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2612 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2614 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); in shrink_inactive_list()
2644 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
2645 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
2646 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
2647 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
2648 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
2649 sc->nr.taken += nr_taken; in shrink_inactive_list()
2651 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
2653 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2654 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
2669 * It is safe to rely on the active flag against the non-LRU folios in here
2670 * because nobody will play with that bit on a non-LRU folio.
2672 * The downside is that we have to touch folio->_refcount against each folio.
2673 * But we had to alter folio->flags anyway.
2693 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2704 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2711 list_del(&folio->lru); in shrink_active_list()
2727 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2730 * Identify referenced, file-backed active folios and in shrink_active_list()
2734 * are not likely to be evicted by use-once streaming in shrink_active_list()
2740 list_add(&folio->lru, &l_active); in shrink_active_list()
2745 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2747 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2753 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2763 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2764 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2770 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2771 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2791 list_del(&folio->lru); in reclaim_folio_list()
2816 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
2835 if (sc->may_deactivate & (1 << is_file_lru(lru))) in shrink_list()
2838 sc->skipped_deactivate = 1; in shrink_list()
2850 * to the established workingset on the scan-resistant active list,
2864 * -------------------------------------
2883 gb = (inactive + active) >> (30 - PAGE_SHIFT); in inactive_is_low()
2907 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_count()
2910 * Flush the memory cgroup stats, so that we read accurate per-memcg in prepare_scan_count()
2918 spin_lock_irq(&target_lruvec->lru_lock); in prepare_scan_count()
2919 sc->anon_cost = target_lruvec->anon_cost; in prepare_scan_count()
2920 sc->file_cost = target_lruvec->file_cost; in prepare_scan_count()
2921 spin_unlock_irq(&target_lruvec->lru_lock); in prepare_scan_count()
2927 if (!sc->force_deactivate) { in prepare_scan_count()
2937 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || in prepare_scan_count()
2939 sc->may_deactivate |= DEACTIVATE_ANON; in prepare_scan_count()
2941 sc->may_deactivate &= ~DEACTIVATE_ANON; in prepare_scan_count()
2945 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || in prepare_scan_count()
2947 sc->may_deactivate |= DEACTIVATE_FILE; in prepare_scan_count()
2949 sc->may_deactivate &= ~DEACTIVATE_FILE; in prepare_scan_count()
2951 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; in prepare_scan_count()
2959 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) in prepare_scan_count()
2960 sc->cache_trim_mode = 1; in prepare_scan_count()
2962 sc->cache_trim_mode = 0; in prepare_scan_count()
2978 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_count()
2983 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_count()
2998 sc->file_is_tiny = in prepare_scan_count()
3000 !(sc->may_deactivate & DEACTIVATE_ANON) && in prepare_scan_count()
3001 anon >> sc->priority; in prepare_scan_count()
3026 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
3048 if (!sc->priority && swappiness) { in get_scan_count()
3054 * If the system is almost out of file pages, force-scan anon. in get_scan_count()
3056 if (sc->file_is_tiny) { in get_scan_count()
3065 if (sc->cache_trim_mode) { in get_scan_count()
3086 total_cost = sc->anon_cost + sc->file_cost; in get_scan_count()
3087 anon_cost = total_cost + sc->anon_cost; in get_scan_count()
3088 file_cost = total_cost + sc->file_cost; in get_scan_count()
3094 fp = (200 - swappiness) * (total_cost + 1); in get_scan_count()
3107 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
3108 mem_cgroup_protection(sc->target_mem_cgroup, memcg, in get_scan_count()
3118 * becomes extremely binary -- from nothing as we in get_scan_count()
3133 * the best-effort low protection. However, we still in get_scan_count()
3134 * ideally want to honor how well-behaved groups are in in get_scan_count()
3145 if (!sc->memcg_low_reclaim && low > min) { in get_scan_count()
3147 sc->memcg_low_skipped = 1; in get_scan_count()
3155 scan = lruvec_size - lruvec_size * protection / in get_scan_count()
3161 * sc->priority further than desirable. in get_scan_count()
3168 scan >>= sc->priority; in get_scan_count()
3187 * round-off error. in get_scan_count()
3221 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
3251 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
3255 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
3256 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
3273 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec()
3276 if (!lruvec->pgdat) in get_lruvec()
3277 lruvec->pgdat = pgdat; in get_lruvec()
3284 return &pgdat->__lruvec; in get_lruvec()
3292 if (!sc->may_swap) in get_swappiness()
3295 if (!can_demote(pgdat->node_id, sc) && in get_swappiness()
3304 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
3326 * To get rid of non-leaf entries that no longer have enough leaf entries, the
3327 * aging uses the double-buffering technique to flip to the other filter each
3328 * time it produces a new generation. For non-leaf entries that have enough
3354 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); in get_item_key()
3364 filter = READ_ONCE(lruvec->mm_state.filters[gen]); in test_bloom_filter()
3379 filter = READ_ONCE(lruvec->mm_state.filters[gen]); in update_bloom_filter()
3396 filter = lruvec->mm_state.filters[gen]; in reset_bloom_filter()
3404 WRITE_ONCE(lruvec->mm_state.filters[gen], filter); in reset_bloom_filter()
3420 return &memcg->mm_list; in get_mm_list()
3433 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm()
3435 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm()
3436 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
3438 spin_lock(&mm_list->lock); in lru_gen_add_mm()
3444 if (lruvec->mm_state.tail == &mm_list->fifo) in lru_gen_add_mm()
3445 lruvec->mm_state.tail = &mm->lru_gen.list; in lru_gen_add_mm()
3448 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); in lru_gen_add_mm()
3450 spin_unlock(&mm_list->lock); in lru_gen_add_mm()
3459 if (list_empty(&mm->lru_gen.list)) in lru_gen_del_mm()
3463 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
3467 spin_lock(&mm_list->lock); in lru_gen_del_mm()
3473 if (lruvec->mm_state.head == &mm->lru_gen.list) in lru_gen_del_mm()
3474 lruvec->mm_state.head = lruvec->mm_state.head->prev; in lru_gen_del_mm()
3477 if (lruvec->mm_state.tail == &mm->lru_gen.list) in lru_gen_del_mm()
3478 lruvec->mm_state.tail = lruvec->mm_state.tail->next; in lru_gen_del_mm()
3481 list_del_init(&mm->lru_gen.list); in lru_gen_del_mm()
3483 spin_unlock(&mm_list->lock); in lru_gen_del_mm()
3486 mem_cgroup_put(mm->lru_gen.memcg); in lru_gen_del_mm()
3487 mm->lru_gen.memcg = NULL; in lru_gen_del_mm()
3495 struct task_struct *task = rcu_dereference_protected(mm->owner, true); in lru_gen_migrate_mm()
3497 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm()
3498 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
3505 if (!mm->lru_gen.memcg) in lru_gen_migrate_mm()
3511 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
3514 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm()
3526 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
3529 hist = lru_hist_from_seq(walk->max_seq); in reset_mm_stats()
3532 WRITE_ONCE(lruvec->mm_state.stats[hist][i], in reset_mm_stats()
3533 lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
3534 walk->mm_stats[i] = 0; in reset_mm_stats()
3539 hist = lru_hist_from_seq(lruvec->mm_state.seq + 1); in reset_mm_stats()
3542 WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); in reset_mm_stats()
3550 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in should_skip_mm()
3551 int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in should_skip_mm()
3553 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) in should_skip_mm()
3556 clear_bit(key, &mm->lru_gen.bitmap); in should_skip_mm()
3558 for (type = !walk->can_swap; type < ANON_AND_FILE; type++) { in should_skip_mm()
3578 struct lru_gen_mm_state *mm_state = &lruvec->mm_state; in iterate_mm_list()
3581 * mm_state->seq is incremented after each iteration of mm_list. There in iterate_mm_list()
3590 spin_lock(&mm_list->lock); in iterate_mm_list()
3592 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); in iterate_mm_list()
3594 if (walk->max_seq <= mm_state->seq) in iterate_mm_list()
3597 if (!mm_state->head) in iterate_mm_list()
3598 mm_state->head = &mm_list->fifo; in iterate_mm_list()
3600 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
3604 mm_state->head = mm_state->head->next; in iterate_mm_list()
3605 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
3606 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list()
3612 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
3613 mm_state->tail = mm_state->head->next; in iterate_mm_list()
3614 walk->force_scan = true; in iterate_mm_list()
3617 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in iterate_mm_list()
3625 spin_unlock(&mm_list->lock); in iterate_mm_list()
3628 reset_bloom_filter(lruvec, walk->max_seq + 1); in iterate_mm_list()
3643 struct lru_gen_mm_state *mm_state = &lruvec->mm_state; in iterate_mm_list_nowalk()
3645 spin_lock(&mm_list->lock); in iterate_mm_list_nowalk()
3647 VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); in iterate_mm_list_nowalk()
3649 if (max_seq > mm_state->seq) { in iterate_mm_list_nowalk()
3650 mm_state->head = NULL; in iterate_mm_list_nowalk()
3651 mm_state->tail = NULL; in iterate_mm_list_nowalk()
3652 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list_nowalk()
3657 spin_unlock(&mm_list->lock); in iterate_mm_list_nowalk()
3667 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3682 * 1. The D term may discount the other two terms over time so that long-lived
3694 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3695 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in read_ctrl_pos()
3697 pos->refaulted = lrugen->avg_refaulted[type][tier] + in read_ctrl_pos()
3698 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in read_ctrl_pos()
3699 pos->total = lrugen->avg_total[type][tier] + in read_ctrl_pos()
3700 atomic_long_read(&lrugen->evicted[hist][type][tier]); in read_ctrl_pos()
3702 pos->total += lrugen->protected[hist][type][tier - 1]; in read_ctrl_pos()
3703 pos->gain = gain; in read_ctrl_pos()
3709 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3711 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; in reset_ctrl_pos()
3713 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3724 sum = lrugen->avg_refaulted[type][tier] + in reset_ctrl_pos()
3725 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in reset_ctrl_pos()
3726 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); in reset_ctrl_pos()
3728 sum = lrugen->avg_total[type][tier] + in reset_ctrl_pos()
3729 atomic_long_read(&lrugen->evicted[hist][type][tier]); in reset_ctrl_pos()
3731 sum += lrugen->protected[hist][type][tier - 1]; in reset_ctrl_pos()
3732 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); in reset_ctrl_pos()
3736 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); in reset_ctrl_pos()
3737 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); in reset_ctrl_pos()
3739 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); in reset_ctrl_pos()
3750 return pv->refaulted < MIN_LRU_BATCH || in positive_ctrl_err()
3751 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= in positive_ctrl_err()
3752 (sp->refaulted + 1) * pv->total * pv->gain; in positive_ctrl_err()
3762 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3777 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3779 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_update_gen()
3786 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3787 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in folio_inc_gen()
3788 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3793 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_inc_gen()
3805 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3822 walk->batched++; in update_batch_size()
3824 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3825 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3831 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3833 walk->batched = 0; in reset_batch_size()
3837 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size()
3842 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3843 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3844 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3855 struct vm_area_struct *vma = args->vma; in should_skip_vma()
3856 struct lru_gen_mm_walk *walk = args->private; in should_skip_vma()
3867 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
3870 if (vma == get_gate_vma(vma->vm_mm)) in should_skip_vma()
3874 return !walk->can_swap; in should_skip_vma()
3876 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) in should_skip_vma()
3879 mapping = vma->vm_file->f_mapping; in should_skip_vma()
3884 return !walk->can_swap; in should_skip_vma()
3887 return !mapping->a_ops->read_folio; in should_skip_vma()
3891 * Some userspace memory allocators map many single-page VMAs. Instead of
3900 VMA_ITERATOR(vmi, args->mm, start); in get_next_vma()
3905 for_each_vma(vmi, args->vma) { in get_next_vma()
3906 if (end && end <= args->vma->vm_start) in get_next_vma()
3909 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) in get_next_vma()
3912 *vm_start = max(start, args->vma->vm_start); in get_next_vma()
3913 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; in get_next_vma()
3925 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pte_pfn()
3928 return -1; in get_pte_pfn()
3931 return -1; in get_pte_pfn()
3934 return -1; in get_pte_pfn()
3944 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pmd_pfn()
3947 return -1; in get_pmd_pfn()
3950 return -1; in get_pmd_pfn()
3953 return -1; in get_pmd_pfn()
3965 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pfn_folio()
3969 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3999 struct lru_gen_mm_walk *walk = args->private; in walk_pte_range()
4000 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
4001 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
4002 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); in walk_pte_range()
4004 pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); in walk_pte_range()
4020 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pte_range()
4022 pfn = get_pte_pfn(ptent, args->vma, addr); in walk_pte_range()
4023 if (pfn == -1) in walk_pte_range()
4027 walk->mm_stats[MM_LEAF_OLD]++; in walk_pte_range()
4031 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pte_range()
4035 if (!ptep_test_and_clear_young(args->vma, addr, pte + i)) in walk_pte_range()
4039 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pte_range()
4067 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range_locked()
4068 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
4069 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
4070 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); in walk_pmd_range_locked()
4075 if (*first == -1) { in walk_pmd_range_locked()
4081 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); in walk_pmd_range_locked()
4083 __set_bit(i - 1, bitmap); in walk_pmd_range_locked()
4089 ptl = pmd_lockptr(args->mm, pmd); in walk_pmd_range_locked()
4103 if (pfn == -1) in walk_pmd_range_locked()
4112 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pmd_range_locked()
4119 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pmd_range_locked()
4136 *first = -1; in walk_pmd_range_locked()
4154 unsigned long first = -1; in walk_pmd_range()
4155 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range()
4167 vma = args->vma; in walk_pmd_range()
4174 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
4181 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
4183 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
4186 walk->mm_stats[MM_LEAF_OLD]++; in walk_pmd_range()
4191 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in walk_pmd_range()
4198 walk->mm_stats[MM_NONLEAF_TOTAL]++; in walk_pmd_range()
4207 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) in walk_pmd_range()
4210 walk->mm_stats[MM_NONLEAF_FOUND]++; in walk_pmd_range()
4215 walk->mm_stats[MM_NONLEAF_ADDED]++; in walk_pmd_range()
4218 update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i); in walk_pmd_range()
4221 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); in walk_pmd_range()
4234 struct lru_gen_mm_walk *walk = args->private; in walk_pud_range()
4250 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { in walk_pud_range()
4261 if (!end || !args->vma) in walk_pud_range()
4264 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range()
4266 return -EAGAIN; in walk_pud_range()
4280 walk->next_addr = FIRST_USER_ADDRESS; in walk_mm()
4285 err = -EBUSY; in walk_mm()
4288 if (walk->max_seq != max_seq) in walk_mm()
4297 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
4304 if (walk->batched) { in walk_mm()
4305 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
4307 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
4311 } while (err == -EAGAIN); in walk_mm()
4316 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in set_mm_walk()
4321 walk = &pgdat->mm_walk; in set_mm_walk()
4328 current->reclaim_state->mm_walk = walk; in set_mm_walk()
4335 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in clear_mm_walk()
4337 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); in clear_mm_walk()
4338 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); in clear_mm_walk()
4340 current->reclaim_state->mm_walk = NULL; in clear_mm_walk()
4350 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
4351 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in inc_min_seq()
4358 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
4369 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
4371 if (!--remaining) in inc_min_seq()
4377 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); in inc_min_seq()
4386 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
4393 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { in try_to_inc_min_seq()
4397 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
4410 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); in try_to_inc_min_seq()
4414 if (min_seq[type] == lrugen->min_seq[type]) in try_to_inc_min_seq()
4418 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); in try_to_inc_min_seq()
4429 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
4431 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
4435 for (type = ANON_AND_FILE - 1; type >= 0; type--) { in inc_max_seq()
4444 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4455 prev = lru_gen_from_seq(lrugen->max_seq - 1); in inc_max_seq()
4456 next = lru_gen_from_seq(lrugen->max_seq + 1); in inc_max_seq()
4461 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq()
4462 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
4468 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
4475 WRITE_ONCE(lrugen->timestamps[next], jiffies); in inc_max_seq()
4477 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); in inc_max_seq()
4479 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4488 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
4490 VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); in try_to_inc_max_seq()
4493 if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) { in try_to_inc_max_seq()
4515 walk->lruvec = lruvec; in try_to_inc_max_seq()
4516 walk->max_seq = max_seq; in try_to_inc_max_seq()
4517 walk->can_swap = can_swap; in try_to_inc_max_seq()
4518 walk->force_scan = force_scan; in try_to_inc_max_seq()
4541 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) in set_initial_priority()
4549 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in set_initial_priority()
4552 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
4553 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority()
4559 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); in set_initial_priority()
4567 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
4579 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
4584 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in lruvec_is_sizable()
4603 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
4638 .gfp_mask = sc->gfp_mask, in lru_gen_age_node()
4665 pte_t *pte = pvmw->pte; in lru_gen_look_around()
4666 unsigned long addr = pvmw->address; in lru_gen_look_around()
4667 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around()
4668 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
4676 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
4679 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4683 if (vma->vm_flags & VM_SPECIAL) in lru_gen_look_around()
4687 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; in lru_gen_look_around()
4689 start = max(addr & PMD_MASK, vma->vm_start); in lru_gen_look_around()
4690 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; in lru_gen_look_around()
4692 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { in lru_gen_look_around()
4693 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4695 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4696 start = end - MIN_LRU_BATCH * PAGE_SIZE; in lru_gen_look_around()
4698 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around()
4709 pte -= (addr - start) / PAGE_SIZE; in lru_gen_look_around()
4716 if (pfn == -1) in lru_gen_look_around()
4756 update_bloom_filter(lruvec, max_seq, pvmw->pmd); in lru_gen_look_around()
4776 return READ_ONCE(lruvec->lrugen.seg); in lru_gen_memcg_seg()
4787 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4789 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4792 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4800 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4802 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4806 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4807 WRITE_ONCE(lruvec->lrugen.gen, new); in lru_gen_rotate_memcg()
4809 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4812 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4814 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4816 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4817 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4819 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4820 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4822 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4835 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4837 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4839 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4841 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4843 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4844 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4846 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4870 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4872 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4875 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4877 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4878 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4880 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4881 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4883 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4919 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4943 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { in sort_folio()
4944 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4950 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in sort_folio()
4953 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4955 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], in sort_folio()
4956 lrugen->protected[hist][type][tier - 1] + delta); in sort_folio()
4961 if (zone > sc->reclaim_idx) { in sort_folio()
4963 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4971 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4983 if (!(sc->gfp_mask & __GFP_IO) && in isolate_folio()
5000 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); in isolate_folio()
5022 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
5030 gen = lru_gen_from_seq(lrugen->min_seq[type]); in scan_folios()
5032 for (i = MAX_NR_ZONES; i > 0; i--) { in scan_folios()
5035 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_folios()
5036 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
5052 list_add(&folio->lru, list); in scan_folios()
5055 list_move(&folio->lru, &moved); in scan_folios()
5059 if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH) in scan_folios()
5105 return tier - 1; in get_tier_idx()
5112 int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; in get_type_to_scan()
5131 *tier_idx = tier - 1; in get_type_to_scan()
5142 int tier = -1; in isolate_folios()
5170 tier = -1; in isolate_folios()
5194 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
5203 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
5209 sc->nr_reclaimed += reclaimed; in evict_folios()
5213 list_del(&folio->lru); in evict_folios()
5230 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, in evict_folios()
5236 list_move(&folio->lru, &clean); in evict_folios()
5239 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
5243 walk = current->reclaim_state->mm_walk; in evict_folios()
5244 if (walk && walk->batched) in evict_folios()
5253 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
5276 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
5295 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
5311 *nr_to_scan = total >> sc->priority; in should_run_aging()
5347 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) in get_nr_to_scan()
5348 return -1; in get_nr_to_scan()
5354 if (sc->priority == DEF_PRIORITY) in get_nr_to_scan()
5358 return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0; in get_nr_to_scan()
5370 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) in should_abort_scan()
5373 /* check the order to exclude compaction-induced reclaim */ in should_abort_scan()
5374 if (!current_is_kswapd() || sc->order) in should_abort_scan()
5380 for (i = 0; i <= sc->reclaim_idx; i++) { in should_abort_scan()
5381 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
5384 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) in should_abort_scan()
5399 if (swappiness && !(sc->gfp_mask & __GFP_IO)) in try_to_shrink_lruvec()
5430 unsigned long scanned = sc->nr_scanned; in shrink_one()
5431 unsigned long reclaimed = sc->nr_reclaimed; in shrink_one()
5449 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
5451 if (!sc->proactive) in shrink_one()
5452 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, in shrink_one()
5453 sc->nr_reclaimed - reclaimed); in shrink_one()
5481 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
5489 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
5498 if (gen != READ_ONCE(lrugen->gen)) in shrink_many()
5545 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); in lru_gen_shrink_lruvec()
5551 set_mm_walk(NULL, sc->proactive); in lru_gen_shrink_lruvec()
5578 unsigned long reclaimed = sc->nr_reclaimed; in lru_gen_shrink_node()
5587 if (!sc->may_writepage || !sc->may_unmap) in lru_gen_shrink_node()
5594 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
5599 sc->nr_reclaimed = 0; in lru_gen_shrink_node()
5602 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
5607 sc->nr_reclaimed += reclaimed; in lru_gen_shrink_node()
5613 if (sc->nr_reclaimed > reclaimed) in lru_gen_shrink_node()
5614 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
5623 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
5625 if (lrugen->enabled) { in state_is_valid()
5629 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
5636 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
5652 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5661 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5667 if (!--remaining) in fill_evictable()
5681 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5696 if (!--remaining) in drain_evictable()
5730 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5735 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5738 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5740 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5743 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5764 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5771 return -EINVAL; in min_ttl_ms_store()
5796 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5806 caps = -1; in enabled_store()
5808 return -EINVAL; in enabled_store()
5846 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); in lru_gen_seq_start()
5847 if (!m->private) in lru_gen_seq_start()
5848 return ERR_PTR(-ENOMEM); in lru_gen_seq_start()
5855 if (!nr_to_skip--) in lru_gen_seq_start()
5868 kvfree(m->private); in lru_gen_seq_stop()
5869 m->private = NULL; in lru_gen_seq_stop()
5874 int nid = lruvec_pgdat(v)->node_id; in lru_gen_seq_next()
5898 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5908 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); in lru_gen_seq_show_full()
5909 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); in lru_gen_seq_show_full()
5912 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); in lru_gen_seq_show_full()
5913 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); in lru_gen_seq_show_full()
5915 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); in lru_gen_seq_show_full()
5931 n = READ_ONCE(lruvec->mm_state.stats[hist][i]); in lru_gen_seq_show_full()
5934 n = READ_ONCE(lruvec->mm_state.stats[hist][i]); in lru_gen_seq_show_full()
5942 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5946 bool full = !debugfs_real_fops(m->file)->write; in lru_gen_seq_show()
5948 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5949 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5955 const char *path = memcg ? m->private : ""; in lru_gen_seq_show()
5959 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); in lru_gen_seq_show()
5969 seq = max_seq - MAX_NR_GENS + 1; in lru_gen_seq_show()
5976 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5978 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); in lru_gen_seq_show()
5985 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
6016 return -EINVAL; in run_aging()
6018 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) in run_aging()
6019 return -ERANGE; in run_aging()
6032 return -EINVAL; in run_eviction()
6034 sc->nr_reclaimed = 0; in run_eviction()
6042 if (sc->nr_reclaimed >= nr_to_reclaim) in run_eviction()
6051 return -EINTR; in run_eviction()
6058 int err = -EINVAL; in run_cmd()
6062 return -EINVAL; in run_cmd()
6074 return -EINVAL; in run_cmd()
6091 case '-': in run_cmd()
6101 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
6109 int err = -EINVAL; in lru_gen_seq_write()
6114 .reclaim_idx = MAX_NR_ZONES - 1, in lru_gen_seq_write()
6120 return -ENOMEM; in lru_gen_seq_write()
6124 return -EFAULT; in lru_gen_seq_write()
6131 err = -ENOMEM; in lru_gen_seq_write()
6145 unsigned int swappiness = -1; in lru_gen_seq_write()
6146 unsigned long opt = -1; in lru_gen_seq_write()
6155 err = -EINVAL; in lru_gen_seq_write()
6202 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
6204 lrugen->max_seq = MIN_NR_GENS + 1; in lru_gen_init_lruvec()
6205 lrugen->enabled = lru_gen_enabled(); in lru_gen_init_lruvec()
6208 lrugen->timestamps[i] = jiffies; in lru_gen_init_lruvec()
6211 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); in lru_gen_init_lruvec()
6213 lruvec->mm_state.seq = MIN_NR_GENS; in lru_gen_init_lruvec()
6222 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
6226 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
6232 INIT_LIST_HEAD(&memcg->mm_list.fifo); in lru_gen_init_memcg()
6233 spin_lock_init(&memcg->mm_list.lock); in lru_gen_init_memcg()
6241 VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo)); in lru_gen_exit_memcg()
6246 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
6247 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
6249 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
6252 bitmap_free(lruvec->mm_state.filters[i]); in lru_gen_exit_memcg()
6253 lruvec->mm_state.filters[i] = NULL; in lru_gen_exit_memcg()
6298 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
6324 sc->priority == DEF_PRIORITY); in shrink_lruvec()
6335 nr[lru] -= nr_to_scan; in shrink_lruvec()
6387 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
6388 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
6389 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
6392 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
6393 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
6394 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
6397 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
6412 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && in in_reclaim_compaction()
6413 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
6414 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
6421 * Reclaim/compaction is used for high-order allocation requests. It reclaims
6422 * order-0 pages before compacting the zone. should_continue_reclaim() returns
6445 * first, by assuming that zero delta of sc->nr_scanned means full LRU in should_continue_reclaim()
6447 * where always a non-zero amount of pages were scanned. in should_continue_reclaim()
6453 for (z = 0; z <= sc->reclaim_idx; z++) { in should_continue_reclaim()
6454 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
6459 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in should_continue_reclaim()
6460 sc->reclaim_idx, 0)) in should_continue_reclaim()
6463 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) in should_continue_reclaim()
6471 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
6473 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
6481 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; in shrink_node_memcgs()
6491 * This loop can become CPU-bound when target memcgs in shrink_node_memcgs()
6492 * aren't eligible for reclaim - either because they in shrink_node_memcgs()
6513 if (!sc->memcg_low_reclaim) { in shrink_node_memcgs()
6514 sc->memcg_low_skipped = 1; in shrink_node_memcgs()
6520 reclaimed = sc->nr_reclaimed; in shrink_node_memcgs()
6521 scanned = sc->nr_scanned; in shrink_node_memcgs()
6525 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
6526 sc->priority); in shrink_node_memcgs()
6529 if (!sc->proactive) in shrink_node_memcgs()
6530 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
6531 sc->nr_scanned - scanned, in shrink_node_memcgs()
6532 sc->nr_reclaimed - reclaimed); in shrink_node_memcgs()
6548 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
6551 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
6553 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
6554 nr_scanned = sc->nr_scanned; in shrink_node()
6562 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; in shrink_node()
6565 if (!sc->proactive) in shrink_node()
6566 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
6567 sc->nr_scanned - nr_scanned, nr_node_reclaimed); in shrink_node()
6575 * it implies that the long-lived page allocation rate in shrink_node()
6590 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
6591 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
6594 if (sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
6595 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6604 if (sc->nr.immediate) in shrink_node()
6615 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { in shrink_node()
6617 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); in shrink_node()
6620 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); in shrink_node()
6630 !sc->hibernation_mode && in shrink_node()
6631 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || in shrink_node()
6632 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) in shrink_node()
6645 pgdat->kswapd_failures = 0; in shrink_node()
6649 * Returns true if compaction should go ahead for a costly-order request, or
6657 if (!gfp_compaction_allowed(sc->gfp_mask)) in compaction_ready()
6661 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in compaction_ready()
6662 sc->reclaim_idx, 0)) in compaction_ready()
6666 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) in compaction_ready()
6678 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
6680 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
6689 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { in consider_reclaim_throttle()
6692 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6709 if (sc->priority == 1 && !sc->nr_reclaimed) in consider_reclaim_throttle()
6714 * This is the direct reclaim path, for page-allocating processes. We only
6736 orig_mask = sc->gfp_mask; in shrink_zones()
6738 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
6739 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
6743 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6757 * non-zero order, only frequent costly order in shrink_zones()
6758 * reclamation is disruptive enough to become a in shrink_zones()
6763 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
6765 sc->compaction_ready = true; in shrink_zones()
6775 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6785 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6786 sc->order, sc->gfp_mask, in shrink_zones()
6788 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
6789 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
6794 first_pgdat = zone->zone_pgdat; in shrink_zones()
6797 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6799 last_pgdat = zone->zone_pgdat; in shrink_zones()
6800 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6810 sc->gfp_mask = orig_mask; in shrink_zones()
6823 target_lruvec->refaults[WORKINGSET_ANON] = refaults; in snapshot_refaults()
6825 target_lruvec->refaults[WORKINGSET_FILE] = refaults; in snapshot_refaults()
6835 * high - the zone may be full of dirty or under-writeback pages, which this
6847 int initial_priority = sc->priority; in do_try_to_free_pages()
6855 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
6858 if (!sc->proactive) in do_try_to_free_pages()
6859 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
6860 sc->priority); in do_try_to_free_pages()
6861 sc->nr_scanned = 0; in do_try_to_free_pages()
6864 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
6867 if (sc->compaction_ready) in do_try_to_free_pages()
6874 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
6875 sc->may_writepage = 1; in do_try_to_free_pages()
6876 } while (--sc->priority >= 0); in do_try_to_free_pages()
6879 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6880 sc->nodemask) { in do_try_to_free_pages()
6881 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6883 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6885 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6890 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6891 zone->zone_pgdat); in do_try_to_free_pages()
6892 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6898 if (sc->nr_reclaimed) in do_try_to_free_pages()
6899 return sc->nr_reclaimed; in do_try_to_free_pages()
6902 if (sc->compaction_ready) in do_try_to_free_pages()
6914 if (sc->skipped_deactivate) { in do_try_to_free_pages()
6915 sc->priority = initial_priority; in do_try_to_free_pages()
6916 sc->force_deactivate = 1; in do_try_to_free_pages()
6917 sc->skipped_deactivate = 0; in do_try_to_free_pages()
6922 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
6923 sc->priority = initial_priority; in do_try_to_free_pages()
6924 sc->force_deactivate = 0; in do_try_to_free_pages()
6925 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
6926 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
6941 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6945 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6963 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6964 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6965 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6967 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6996 if (current->flags & PF_KTHREAD) in throttle_direct_reclaim()
7026 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
7048 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
7052 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
7119 .reclaim_idx = MAX_NR_ZONES - 1, in mem_cgroup_shrink_node()
7123 WARN_ON_ONCE(!current->reclaim_state); in mem_cgroup_shrink_node()
7158 .reclaim_idx = MAX_NR_ZONES - 1, in try_to_free_mem_cgroup_pages()
7219 * Check for watermark boosts top-down as the higher zones in pgdat_watermark_boosted()
7225 for (i = highest_zoneidx; i >= 0; i--) { in pgdat_watermark_boosted()
7226 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
7230 if (zone->watermark_boost) in pgdat_watermark_boosted()
7244 unsigned long mark = -1; in pgdat_balanced()
7248 * Check watermarks bottom-up as lower zones are more likely to in pgdat_balanced()
7252 zone = pgdat->node_zones + i; in pgdat_balanced()
7267 * need balancing by definition. This can happen if a zone-restricted in pgdat_balanced()
7270 if (mark == -1) in pgdat_balanced()
7281 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
7282 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
7283 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
7284 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
7309 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
7310 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
7313 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
7337 unsigned long nr_reclaimed = sc->nr_reclaimed; in kswapd_shrink_node()
7340 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
7341 for (z = 0; z <= sc->reclaim_idx; z++) { in kswapd_shrink_node()
7342 zone = pgdat->node_zones + z; in kswapd_shrink_node()
7346 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
7357 * high-order allocations. If twice the allocation size has been in kswapd_shrink_node()
7358 * reclaimed then recheck watermarks only at order-0 to prevent in kswapd_shrink_node()
7359 * excessive reclaim. Assume that a process requested a high-order in kswapd_shrink_node()
7362 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
7363 sc->order = 0; in kswapd_shrink_node()
7366 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; in kswapd_shrink_node()
7377 zone = pgdat->node_zones + i; in update_reclaim_active()
7383 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
7385 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
7408 * kswapd scans the zones in the highmem->normal->dma direction. It skips
7443 zone = pgdat->node_zones + i; in balance_pgdat()
7447 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
7448 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
7466 * purpose -- on 64-bit systems it is expected that in balance_pgdat()
7467 * buffer_heads are stripped during active rotation. On 32-bit in balance_pgdat()
7474 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { in balance_pgdat()
7475 zone = pgdat->node_zones + i; in balance_pgdat()
7489 * re-evaluate if boosting is required when kswapd next wakes. in balance_pgdat()
7506 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
7511 * intent is to relieve pressure not issue sub-optimal IO in balance_pgdat()
7529 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
7552 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
7554 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
7567 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
7568 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); in balance_pgdat()
7579 sc.priority--; in balance_pgdat()
7583 pgdat->kswapd_failures++; in balance_pgdat()
7597 zone = pgdat->node_zones + i; in balance_pgdat()
7598 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
7599 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
7600 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
7625 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7634 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7648 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7680 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7684 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7685 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7688 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7689 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7698 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7705 * per-cpu vmstat threshold while kswapd is awake and restore in kswapd_try_to_sleep()
7720 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7733 * If there are applications that are active memory-allocators
7739 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; in kswapd()
7742 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
7759 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; in kswapd()
7762 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7763 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7764 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7768 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7777 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7780 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7781 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7795 * Reclaim begins at the requested order but if a high-order in kswapd()
7797 * order-0. If that happens, kswapd will consider sleeping in kswapd()
7802 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7810 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); in kswapd()
7816 * A zone is low on free memory or too fragmented for high-order memory. If
7834 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7835 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7838 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7840 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7841 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7843 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7847 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7852 * fragmented for high-order allocations. Wake up kcompactd in wakeup_kswapd()
7862 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7864 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7869 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7881 .reclaim_idx = MAX_NR_ZONES - 1, in shrink_all_memory()
7907 * This kswapd start function will be called by init and node-hot-add.
7914 if (!pgdat->kswapd) { in kswapd_run()
7915 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
7916 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7920 pgdat->kswapd = NULL; in kswapd_run()
7936 kswapd = pgdat->kswapd; in kswapd_stop()
7939 pgdat->kswapd = NULL; in kswapd_stop()
7960 * If non-zero call node_reclaim when the number of free pages falls below
7995 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; in node_unmapped_file_pages()
8023 return nr_pagecache_reclaimable - delta; in node_pagecache_reclaimable()
8047 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
8059 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
8060 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
8067 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __node_reclaim()
8094 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
8096 pgdat->min_slab_pages) in node_reclaim()
8102 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
8111 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
8114 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
8118 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
8128 * check_move_unevictable_folios - Move evictable folios to appropriate zone
8143 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios()
8144 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()