1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/vmscan.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie. 81da177e4SLinus Torvalds * kswapd added: 7.1.96 sct 91da177e4SLinus Torvalds * Removed kswapd_ctl limits, and swap out as many pages as needed 101da177e4SLinus Torvalds * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 111da177e4SLinus Torvalds * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 121da177e4SLinus Torvalds * Multiqueue VM started 5.8.00, Rik van Riel. 131da177e4SLinus Torvalds */ 141da177e4SLinus Torvalds 15b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16b1de0d13SMitchel Humpherys 171da177e4SLinus Torvalds #include <linux/mm.h> 185b3cc15aSIngo Molnar #include <linux/sched/mm.h> 191da177e4SLinus Torvalds #include <linux/module.h> 205a0e3ad6STejun Heo #include <linux/gfp.h> 211da177e4SLinus Torvalds #include <linux/kernel_stat.h> 221da177e4SLinus Torvalds #include <linux/swap.h> 231da177e4SLinus Torvalds #include <linux/pagemap.h> 241da177e4SLinus Torvalds #include <linux/init.h> 251da177e4SLinus Torvalds #include <linux/highmem.h> 2670ddf637SAnton Vorontsov #include <linux/vmpressure.h> 27e129b5c2SAndrew Morton #include <linux/vmstat.h> 281da177e4SLinus Torvalds #include <linux/file.h> 291da177e4SLinus Torvalds #include <linux/writeback.h> 301da177e4SLinus Torvalds #include <linux/blkdev.h> 311da177e4SLinus Torvalds #include <linux/buffer_head.h> /* for try_to_release_page(), 321da177e4SLinus Torvalds buffer_heads_over_limit */ 331da177e4SLinus Torvalds #include <linux/mm_inline.h> 341da177e4SLinus Torvalds #include <linux/backing-dev.h> 351da177e4SLinus Torvalds #include <linux/rmap.h> 361da177e4SLinus Torvalds #include <linux/topology.h> 371da177e4SLinus Torvalds #include <linux/cpu.h> 381da177e4SLinus Torvalds #include <linux/cpuset.h> 393e7d3449SMel Gorman #include <linux/compaction.h> 401da177e4SLinus Torvalds #include <linux/notifier.h> 411da177e4SLinus Torvalds #include <linux/rwsem.h> 42248a0301SRafael J. Wysocki #include <linux/delay.h> 433218ae14SYasunori Goto #include <linux/kthread.h> 447dfb7103SNigel Cunningham #include <linux/freezer.h> 4566e1707bSBalbir Singh #include <linux/memcontrol.h> 46873b4771SKeika Kobayashi #include <linux/delayacct.h> 47af936a16SLee Schermerhorn #include <linux/sysctl.h> 48929bea7cSKOSAKI Motohiro #include <linux/oom.h> 4964e3d12fSKuo-Hsin Yang #include <linux/pagevec.h> 50268bb0ceSLinus Torvalds #include <linux/prefetch.h> 51b1de0d13SMitchel Humpherys #include <linux/printk.h> 52f9fe48beSRoss Zwisler #include <linux/dax.h> 53eb414681SJohannes Weiner #include <linux/psi.h> 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds #include <asm/tlbflush.h> 561da177e4SLinus Torvalds #include <asm/div64.h> 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #include <linux/swapops.h> 59117aad1eSRafael Aquini #include <linux/balloon_compaction.h> 601da177e4SLinus Torvalds 610f8053a5SNick Piggin #include "internal.h" 620f8053a5SNick Piggin 6333906bc5SMel Gorman #define CREATE_TRACE_POINTS 6433906bc5SMel Gorman #include <trace/events/vmscan.h> 6533906bc5SMel Gorman 661da177e4SLinus Torvalds struct scan_control { 6722fba335SKOSAKI Motohiro /* How many pages shrink_list() should reclaim */ 6822fba335SKOSAKI Motohiro unsigned long nr_to_reclaim; 6922fba335SKOSAKI Motohiro 70ee814fe2SJohannes Weiner /* 71ee814fe2SJohannes Weiner * Nodemask of nodes allowed by the caller. If NULL, all nodes 72ee814fe2SJohannes Weiner * are scanned. 73ee814fe2SJohannes Weiner */ 74ee814fe2SJohannes Weiner nodemask_t *nodemask; 759e3b2f8cSKonstantin Khlebnikov 765f53e762SKOSAKI Motohiro /* 77f16015fbSJohannes Weiner * The memory cgroup that hit its limit and as a result is the 78f16015fbSJohannes Weiner * primary target of this reclaim invocation. 79f16015fbSJohannes Weiner */ 80f16015fbSJohannes Weiner struct mem_cgroup *target_mem_cgroup; 8166e1707bSBalbir Singh 821276ad68SJohannes Weiner /* Writepage batching in laptop mode; RECLAIM_WRITE */ 83ee814fe2SJohannes Weiner unsigned int may_writepage:1; 84ee814fe2SJohannes Weiner 85ee814fe2SJohannes Weiner /* Can mapped pages be reclaimed? */ 86ee814fe2SJohannes Weiner unsigned int may_unmap:1; 87ee814fe2SJohannes Weiner 88ee814fe2SJohannes Weiner /* Can pages be swapped as part of reclaim? */ 89ee814fe2SJohannes Weiner unsigned int may_swap:1; 90ee814fe2SJohannes Weiner 911c30844dSMel Gorman /* e.g. boosted watermark reclaim leaves slabs alone */ 921c30844dSMel Gorman unsigned int may_shrinkslab:1; 931c30844dSMel Gorman 94d6622f63SYisheng Xie /* 95d6622f63SYisheng Xie * Cgroups are not reclaimed below their configured memory.low, 96d6622f63SYisheng Xie * unless we threaten to OOM. If any cgroups are skipped due to 97d6622f63SYisheng Xie * memory.low and nothing was reclaimed, go back for memory.low. 98d6622f63SYisheng Xie */ 99d6622f63SYisheng Xie unsigned int memcg_low_reclaim:1; 100d6622f63SYisheng Xie unsigned int memcg_low_skipped:1; 101241994edSJohannes Weiner 102ee814fe2SJohannes Weiner unsigned int hibernation_mode:1; 103ee814fe2SJohannes Weiner 104ee814fe2SJohannes Weiner /* One of the zones is ready for compaction */ 105ee814fe2SJohannes Weiner unsigned int compaction_ready:1; 106ee814fe2SJohannes Weiner 107bb451fdfSGreg Thelen /* Allocation order */ 108bb451fdfSGreg Thelen s8 order; 109bb451fdfSGreg Thelen 110bb451fdfSGreg Thelen /* Scan (total_size >> priority) pages at once */ 111bb451fdfSGreg Thelen s8 priority; 112bb451fdfSGreg Thelen 113bb451fdfSGreg Thelen /* The highest zone to isolate pages for reclaim from */ 114bb451fdfSGreg Thelen s8 reclaim_idx; 115bb451fdfSGreg Thelen 116bb451fdfSGreg Thelen /* This context's GFP mask */ 117bb451fdfSGreg Thelen gfp_t gfp_mask; 118bb451fdfSGreg Thelen 119ee814fe2SJohannes Weiner /* Incremented by the number of inactive pages that were scanned */ 120ee814fe2SJohannes Weiner unsigned long nr_scanned; 121ee814fe2SJohannes Weiner 122ee814fe2SJohannes Weiner /* Number of pages freed so far during a call to shrink_zones() */ 123ee814fe2SJohannes Weiner unsigned long nr_reclaimed; 124d108c772SAndrey Ryabinin 125d108c772SAndrey Ryabinin struct { 126d108c772SAndrey Ryabinin unsigned int dirty; 127d108c772SAndrey Ryabinin unsigned int unqueued_dirty; 128d108c772SAndrey Ryabinin unsigned int congested; 129d108c772SAndrey Ryabinin unsigned int writeback; 130d108c772SAndrey Ryabinin unsigned int immediate; 131d108c772SAndrey Ryabinin unsigned int file_taken; 132d108c772SAndrey Ryabinin unsigned int taken; 133d108c772SAndrey Ryabinin } nr; 134e5ca8071SYafang Shao 135e5ca8071SYafang Shao /* for recording the reclaimed slab by now */ 136e5ca8071SYafang Shao struct reclaim_state reclaim_state; 1371da177e4SLinus Torvalds }; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH 1401da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) \ 1411da177e4SLinus Torvalds do { \ 1421da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1431da177e4SLinus Torvalds struct page *prev; \ 1441da177e4SLinus Torvalds \ 1451da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1461da177e4SLinus Torvalds prefetch(&prev->_field); \ 1471da177e4SLinus Torvalds } \ 1481da177e4SLinus Torvalds } while (0) 1491da177e4SLinus Torvalds #else 1501da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 1511da177e4SLinus Torvalds #endif 1521da177e4SLinus Torvalds 1531da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW 1541da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) \ 1551da177e4SLinus Torvalds do { \ 1561da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1571da177e4SLinus Torvalds struct page *prev; \ 1581da177e4SLinus Torvalds \ 1591da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1601da177e4SLinus Torvalds prefetchw(&prev->_field); \ 1611da177e4SLinus Torvalds } \ 1621da177e4SLinus Torvalds } while (0) 1631da177e4SLinus Torvalds #else 1641da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 1651da177e4SLinus Torvalds #endif 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds /* 1681da177e4SLinus Torvalds * From 0 .. 100. Higher means more swappy. 1691da177e4SLinus Torvalds */ 1701da177e4SLinus Torvalds int vm_swappiness = 60; 171d0480be4SWang Sheng-Hui /* 172d0480be4SWang Sheng-Hui * The total number of pages which are beyond the high watermark within all 173d0480be4SWang Sheng-Hui * zones. 174d0480be4SWang Sheng-Hui */ 175d0480be4SWang Sheng-Hui unsigned long vm_total_pages; 1761da177e4SLinus Torvalds 1771da177e4SLinus Torvalds static LIST_HEAD(shrinker_list); 1781da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem); 1791da177e4SLinus Torvalds 180b4c2b231SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 1817e010df5SKirill Tkhai 1827e010df5SKirill Tkhai /* 1837e010df5SKirill Tkhai * We allow subsystems to populate their shrinker-related 1847e010df5SKirill Tkhai * LRU lists before register_shrinker_prepared() is called 1857e010df5SKirill Tkhai * for the shrinker, since we don't want to impose 1867e010df5SKirill Tkhai * restrictions on their internal registration order. 1877e010df5SKirill Tkhai * In this case shrink_slab_memcg() may find corresponding 1887e010df5SKirill Tkhai * bit is set in the shrinkers map. 1897e010df5SKirill Tkhai * 1907e010df5SKirill Tkhai * This value is used by the function to detect registering 1917e010df5SKirill Tkhai * shrinkers and to skip do_shrink_slab() calls for them. 1927e010df5SKirill Tkhai */ 1937e010df5SKirill Tkhai #define SHRINKER_REGISTERING ((struct shrinker *)~0UL) 1947e010df5SKirill Tkhai 195b4c2b231SKirill Tkhai static DEFINE_IDR(shrinker_idr); 196b4c2b231SKirill Tkhai static int shrinker_nr_max; 197b4c2b231SKirill Tkhai 198b4c2b231SKirill Tkhai static int prealloc_memcg_shrinker(struct shrinker *shrinker) 199b4c2b231SKirill Tkhai { 200b4c2b231SKirill Tkhai int id, ret = -ENOMEM; 201b4c2b231SKirill Tkhai 202b4c2b231SKirill Tkhai down_write(&shrinker_rwsem); 203b4c2b231SKirill Tkhai /* This may call shrinker, so it must use down_read_trylock() */ 2047e010df5SKirill Tkhai id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL); 205b4c2b231SKirill Tkhai if (id < 0) 206b4c2b231SKirill Tkhai goto unlock; 207b4c2b231SKirill Tkhai 2080a4465d3SKirill Tkhai if (id >= shrinker_nr_max) { 2090a4465d3SKirill Tkhai if (memcg_expand_shrinker_maps(id)) { 2100a4465d3SKirill Tkhai idr_remove(&shrinker_idr, id); 2110a4465d3SKirill Tkhai goto unlock; 2120a4465d3SKirill Tkhai } 2130a4465d3SKirill Tkhai 214b4c2b231SKirill Tkhai shrinker_nr_max = id + 1; 2150a4465d3SKirill Tkhai } 216b4c2b231SKirill Tkhai shrinker->id = id; 217b4c2b231SKirill Tkhai ret = 0; 218b4c2b231SKirill Tkhai unlock: 219b4c2b231SKirill Tkhai up_write(&shrinker_rwsem); 220b4c2b231SKirill Tkhai return ret; 221b4c2b231SKirill Tkhai } 222b4c2b231SKirill Tkhai 223b4c2b231SKirill Tkhai static void unregister_memcg_shrinker(struct shrinker *shrinker) 224b4c2b231SKirill Tkhai { 225b4c2b231SKirill Tkhai int id = shrinker->id; 226b4c2b231SKirill Tkhai 227b4c2b231SKirill Tkhai BUG_ON(id < 0); 228b4c2b231SKirill Tkhai 229b4c2b231SKirill Tkhai down_write(&shrinker_rwsem); 230b4c2b231SKirill Tkhai idr_remove(&shrinker_idr, id); 231b4c2b231SKirill Tkhai up_write(&shrinker_rwsem); 232b4c2b231SKirill Tkhai } 233b4c2b231SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 234b4c2b231SKirill Tkhai static int prealloc_memcg_shrinker(struct shrinker *shrinker) 235b4c2b231SKirill Tkhai { 236b4c2b231SKirill Tkhai return 0; 237b4c2b231SKirill Tkhai } 238b4c2b231SKirill Tkhai 239b4c2b231SKirill Tkhai static void unregister_memcg_shrinker(struct shrinker *shrinker) 240b4c2b231SKirill Tkhai { 241b4c2b231SKirill Tkhai } 242b4c2b231SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 243b4c2b231SKirill Tkhai 244*1732d2b0SAndrew Morton static void set_task_reclaim_state(struct task_struct *task, 245*1732d2b0SAndrew Morton struct reclaim_state *rs) 246*1732d2b0SAndrew Morton { 247*1732d2b0SAndrew Morton /* Check for an overwrite */ 248*1732d2b0SAndrew Morton WARN_ON_ONCE(rs && task->reclaim_state); 249*1732d2b0SAndrew Morton 250*1732d2b0SAndrew Morton /* Check for the nulling of an already-nulled member */ 251*1732d2b0SAndrew Morton WARN_ON_ONCE(!rs && !task->reclaim_state); 252*1732d2b0SAndrew Morton 253*1732d2b0SAndrew Morton task->reclaim_state = rs; 254*1732d2b0SAndrew Morton } 255*1732d2b0SAndrew Morton 256c255a458SAndrew Morton #ifdef CONFIG_MEMCG 25789b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc) 25889b5fae5SJohannes Weiner { 259f16015fbSJohannes Weiner return !sc->target_mem_cgroup; 26089b5fae5SJohannes Weiner } 26197c9341fSTejun Heo 26297c9341fSTejun Heo /** 26397c9341fSTejun Heo * sane_reclaim - is the usual dirty throttling mechanism operational? 26497c9341fSTejun Heo * @sc: scan_control in question 26597c9341fSTejun Heo * 26697c9341fSTejun Heo * The normal page dirty throttling mechanism in balance_dirty_pages() is 26797c9341fSTejun Heo * completely broken with the legacy memcg and direct stalling in 26897c9341fSTejun Heo * shrink_page_list() is used for throttling instead, which lacks all the 26997c9341fSTejun Heo * niceties such as fairness, adaptive pausing, bandwidth proportional 27097c9341fSTejun Heo * allocation and configurability. 27197c9341fSTejun Heo * 27297c9341fSTejun Heo * This function tests whether the vmscan currently in progress can assume 27397c9341fSTejun Heo * that the normal dirty throttling mechanism is operational. 27497c9341fSTejun Heo */ 27597c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc) 27697c9341fSTejun Heo { 27797c9341fSTejun Heo struct mem_cgroup *memcg = sc->target_mem_cgroup; 27897c9341fSTejun Heo 27997c9341fSTejun Heo if (!memcg) 28097c9341fSTejun Heo return true; 28197c9341fSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK 28269234aceSLinus Torvalds if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 28397c9341fSTejun Heo return true; 28497c9341fSTejun Heo #endif 28597c9341fSTejun Heo return false; 28697c9341fSTejun Heo } 287e3c1ac58SAndrey Ryabinin 288e3c1ac58SAndrey Ryabinin static void set_memcg_congestion(pg_data_t *pgdat, 289e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg, 290e3c1ac58SAndrey Ryabinin bool congested) 291e3c1ac58SAndrey Ryabinin { 292e3c1ac58SAndrey Ryabinin struct mem_cgroup_per_node *mn; 293e3c1ac58SAndrey Ryabinin 294e3c1ac58SAndrey Ryabinin if (!memcg) 295e3c1ac58SAndrey Ryabinin return; 296e3c1ac58SAndrey Ryabinin 297e3c1ac58SAndrey Ryabinin mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 298e3c1ac58SAndrey Ryabinin WRITE_ONCE(mn->congested, congested); 299e3c1ac58SAndrey Ryabinin } 300e3c1ac58SAndrey Ryabinin 301e3c1ac58SAndrey Ryabinin static bool memcg_congested(pg_data_t *pgdat, 302e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg) 303e3c1ac58SAndrey Ryabinin { 304e3c1ac58SAndrey Ryabinin struct mem_cgroup_per_node *mn; 305e3c1ac58SAndrey Ryabinin 306e3c1ac58SAndrey Ryabinin mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 307e3c1ac58SAndrey Ryabinin return READ_ONCE(mn->congested); 308e3c1ac58SAndrey Ryabinin 309e3c1ac58SAndrey Ryabinin } 31091a45470SKAMEZAWA Hiroyuki #else 31189b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc) 31289b5fae5SJohannes Weiner { 31389b5fae5SJohannes Weiner return true; 31489b5fae5SJohannes Weiner } 31597c9341fSTejun Heo 31697c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc) 31797c9341fSTejun Heo { 31897c9341fSTejun Heo return true; 31997c9341fSTejun Heo } 320e3c1ac58SAndrey Ryabinin 321e3c1ac58SAndrey Ryabinin static inline void set_memcg_congestion(struct pglist_data *pgdat, 322e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg, bool congested) 323e3c1ac58SAndrey Ryabinin { 324e3c1ac58SAndrey Ryabinin } 325e3c1ac58SAndrey Ryabinin 326e3c1ac58SAndrey Ryabinin static inline bool memcg_congested(struct pglist_data *pgdat, 327e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg) 328e3c1ac58SAndrey Ryabinin { 329e3c1ac58SAndrey Ryabinin return false; 330e3c1ac58SAndrey Ryabinin 331e3c1ac58SAndrey Ryabinin } 33291a45470SKAMEZAWA Hiroyuki #endif 33391a45470SKAMEZAWA Hiroyuki 3345a1c84b4SMel Gorman /* 3355a1c84b4SMel Gorman * This misses isolated pages which are not accounted for to save counters. 3365a1c84b4SMel Gorman * As the data only determines if reclaim or compaction continues, it is 3375a1c84b4SMel Gorman * not expected that isolated pages will be a dominating factor. 3385a1c84b4SMel Gorman */ 3395a1c84b4SMel Gorman unsigned long zone_reclaimable_pages(struct zone *zone) 3405a1c84b4SMel Gorman { 3415a1c84b4SMel Gorman unsigned long nr; 3425a1c84b4SMel Gorman 3435a1c84b4SMel Gorman nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 3445a1c84b4SMel Gorman zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 3455a1c84b4SMel Gorman if (get_nr_swap_pages() > 0) 3465a1c84b4SMel Gorman nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 3475a1c84b4SMel Gorman zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 3485a1c84b4SMel Gorman 3495a1c84b4SMel Gorman return nr; 3505a1c84b4SMel Gorman } 3515a1c84b4SMel Gorman 352fd538803SMichal Hocko /** 353fd538803SMichal Hocko * lruvec_lru_size - Returns the number of pages on the given LRU list. 354fd538803SMichal Hocko * @lruvec: lru vector 355fd538803SMichal Hocko * @lru: lru to use 356fd538803SMichal Hocko * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list) 357fd538803SMichal Hocko */ 358fd538803SMichal Hocko unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) 359c9f299d9SKOSAKI Motohiro { 360fd538803SMichal Hocko unsigned long lru_size; 361fd538803SMichal Hocko int zid; 362a3d8e054SKOSAKI Motohiro 363fd538803SMichal Hocko if (!mem_cgroup_disabled()) 364205b20ccSJohannes Weiner lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 365fd538803SMichal Hocko else 366fd538803SMichal Hocko lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); 367fd538803SMichal Hocko 368fd538803SMichal Hocko for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) { 369fd538803SMichal Hocko struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 370fd538803SMichal Hocko unsigned long size; 371fd538803SMichal Hocko 372fd538803SMichal Hocko if (!managed_zone(zone)) 373fd538803SMichal Hocko continue; 374fd538803SMichal Hocko 375fd538803SMichal Hocko if (!mem_cgroup_disabled()) 376fd538803SMichal Hocko size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 377fd538803SMichal Hocko else 378fd538803SMichal Hocko size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid], 379fd538803SMichal Hocko NR_ZONE_LRU_BASE + lru); 380fd538803SMichal Hocko lru_size -= min(size, lru_size); 381c9f299d9SKOSAKI Motohiro } 382c9f299d9SKOSAKI Motohiro 383fd538803SMichal Hocko return lru_size; 384b4536f0cSMichal Hocko 385b4536f0cSMichal Hocko } 386b4536f0cSMichal Hocko 3871da177e4SLinus Torvalds /* 3881d3d4437SGlauber Costa * Add a shrinker callback to be called from the vm. 3891da177e4SLinus Torvalds */ 3908e04944fSTetsuo Handa int prealloc_shrinker(struct shrinker *shrinker) 3911da177e4SLinus Torvalds { 392b9726c26SAlexey Dobriyan unsigned int size = sizeof(*shrinker->nr_deferred); 3931d3d4437SGlauber Costa 3941d3d4437SGlauber Costa if (shrinker->flags & SHRINKER_NUMA_AWARE) 3951d3d4437SGlauber Costa size *= nr_node_ids; 3961d3d4437SGlauber Costa 3971d3d4437SGlauber Costa shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); 3981d3d4437SGlauber Costa if (!shrinker->nr_deferred) 3991d3d4437SGlauber Costa return -ENOMEM; 400b4c2b231SKirill Tkhai 401b4c2b231SKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) { 402b4c2b231SKirill Tkhai if (prealloc_memcg_shrinker(shrinker)) 403b4c2b231SKirill Tkhai goto free_deferred; 404b4c2b231SKirill Tkhai } 405b4c2b231SKirill Tkhai 4068e04944fSTetsuo Handa return 0; 407b4c2b231SKirill Tkhai 408b4c2b231SKirill Tkhai free_deferred: 409b4c2b231SKirill Tkhai kfree(shrinker->nr_deferred); 410b4c2b231SKirill Tkhai shrinker->nr_deferred = NULL; 411b4c2b231SKirill Tkhai return -ENOMEM; 4128e04944fSTetsuo Handa } 4131d3d4437SGlauber Costa 4148e04944fSTetsuo Handa void free_prealloced_shrinker(struct shrinker *shrinker) 4158e04944fSTetsuo Handa { 416b4c2b231SKirill Tkhai if (!shrinker->nr_deferred) 417b4c2b231SKirill Tkhai return; 418b4c2b231SKirill Tkhai 419b4c2b231SKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) 420b4c2b231SKirill Tkhai unregister_memcg_shrinker(shrinker); 421b4c2b231SKirill Tkhai 4228e04944fSTetsuo Handa kfree(shrinker->nr_deferred); 4238e04944fSTetsuo Handa shrinker->nr_deferred = NULL; 4248e04944fSTetsuo Handa } 4258e04944fSTetsuo Handa 4268e04944fSTetsuo Handa void register_shrinker_prepared(struct shrinker *shrinker) 4278e04944fSTetsuo Handa { 4281da177e4SLinus Torvalds down_write(&shrinker_rwsem); 4291da177e4SLinus Torvalds list_add_tail(&shrinker->list, &shrinker_list); 4307e010df5SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 4318df4a44cSKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) 4327e010df5SKirill Tkhai idr_replace(&shrinker_idr, shrinker, shrinker->id); 4337e010df5SKirill Tkhai #endif 4341da177e4SLinus Torvalds up_write(&shrinker_rwsem); 4358e04944fSTetsuo Handa } 4368e04944fSTetsuo Handa 4378e04944fSTetsuo Handa int register_shrinker(struct shrinker *shrinker) 4388e04944fSTetsuo Handa { 4398e04944fSTetsuo Handa int err = prealloc_shrinker(shrinker); 4408e04944fSTetsuo Handa 4418e04944fSTetsuo Handa if (err) 4428e04944fSTetsuo Handa return err; 4438e04944fSTetsuo Handa register_shrinker_prepared(shrinker); 4441d3d4437SGlauber Costa return 0; 4451da177e4SLinus Torvalds } 4468e1f936bSRusty Russell EXPORT_SYMBOL(register_shrinker); 4471da177e4SLinus Torvalds 4481da177e4SLinus Torvalds /* 4491da177e4SLinus Torvalds * Remove one 4501da177e4SLinus Torvalds */ 4518e1f936bSRusty Russell void unregister_shrinker(struct shrinker *shrinker) 4521da177e4SLinus Torvalds { 453bb422a73STetsuo Handa if (!shrinker->nr_deferred) 454bb422a73STetsuo Handa return; 455b4c2b231SKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) 456b4c2b231SKirill Tkhai unregister_memcg_shrinker(shrinker); 4571da177e4SLinus Torvalds down_write(&shrinker_rwsem); 4581da177e4SLinus Torvalds list_del(&shrinker->list); 4591da177e4SLinus Torvalds up_write(&shrinker_rwsem); 460ae393321SAndrew Vagin kfree(shrinker->nr_deferred); 461bb422a73STetsuo Handa shrinker->nr_deferred = NULL; 4621da177e4SLinus Torvalds } 4638e1f936bSRusty Russell EXPORT_SYMBOL(unregister_shrinker); 4641da177e4SLinus Torvalds 4651da177e4SLinus Torvalds #define SHRINK_BATCH 128 4661d3d4437SGlauber Costa 467cb731d6cSVladimir Davydov static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, 4689092c71bSJosef Bacik struct shrinker *shrinker, int priority) 4691da177e4SLinus Torvalds { 47024f7c6b9SDave Chinner unsigned long freed = 0; 4711da177e4SLinus Torvalds unsigned long long delta; 472635697c6SKonstantin Khlebnikov long total_scan; 473d5bc5fd3SVladimir Davydov long freeable; 474acf92b48SDave Chinner long nr; 475acf92b48SDave Chinner long new_nr; 4761d3d4437SGlauber Costa int nid = shrinkctl->nid; 477e9299f50SDave Chinner long batch_size = shrinker->batch ? shrinker->batch 478e9299f50SDave Chinner : SHRINK_BATCH; 4795f33a080SShaohua Li long scanned = 0, next_deferred; 4801da177e4SLinus Torvalds 481ac7fb3adSKirill Tkhai if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) 482ac7fb3adSKirill Tkhai nid = 0; 483ac7fb3adSKirill Tkhai 484d5bc5fd3SVladimir Davydov freeable = shrinker->count_objects(shrinker, shrinkctl); 4859b996468SKirill Tkhai if (freeable == 0 || freeable == SHRINK_EMPTY) 4869b996468SKirill Tkhai return freeable; 487635697c6SKonstantin Khlebnikov 488acf92b48SDave Chinner /* 489acf92b48SDave Chinner * copy the current shrinker scan count into a local variable 490acf92b48SDave Chinner * and zero it so that other concurrent shrinker invocations 491acf92b48SDave Chinner * don't also do this scanning work. 492acf92b48SDave Chinner */ 4931d3d4437SGlauber Costa nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); 494acf92b48SDave Chinner 495acf92b48SDave Chinner total_scan = nr; 4964b85afbdSJohannes Weiner if (shrinker->seeks) { 4979092c71bSJosef Bacik delta = freeable >> priority; 4989092c71bSJosef Bacik delta *= 4; 4999092c71bSJosef Bacik do_div(delta, shrinker->seeks); 5004b85afbdSJohannes Weiner } else { 5014b85afbdSJohannes Weiner /* 5024b85afbdSJohannes Weiner * These objects don't require any IO to create. Trim 5034b85afbdSJohannes Weiner * them aggressively under memory pressure to keep 5044b85afbdSJohannes Weiner * them from causing refetches in the IO caches. 5054b85afbdSJohannes Weiner */ 5064b85afbdSJohannes Weiner delta = freeable / 2; 5074b85afbdSJohannes Weiner } 508172b06c3SRoman Gushchin 509acf92b48SDave Chinner total_scan += delta; 510acf92b48SDave Chinner if (total_scan < 0) { 511d75f773cSSakari Ailus pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n", 512a0b02131SDave Chinner shrinker->scan_objects, total_scan); 513d5bc5fd3SVladimir Davydov total_scan = freeable; 5145f33a080SShaohua Li next_deferred = nr; 5155f33a080SShaohua Li } else 5165f33a080SShaohua Li next_deferred = total_scan; 517ea164d73SAndrea Arcangeli 518ea164d73SAndrea Arcangeli /* 5193567b59aSDave Chinner * We need to avoid excessive windup on filesystem shrinkers 5203567b59aSDave Chinner * due to large numbers of GFP_NOFS allocations causing the 5213567b59aSDave Chinner * shrinkers to return -1 all the time. This results in a large 5223567b59aSDave Chinner * nr being built up so when a shrink that can do some work 5233567b59aSDave Chinner * comes along it empties the entire cache due to nr >>> 524d5bc5fd3SVladimir Davydov * freeable. This is bad for sustaining a working set in 5253567b59aSDave Chinner * memory. 5263567b59aSDave Chinner * 5273567b59aSDave Chinner * Hence only allow the shrinker to scan the entire cache when 5283567b59aSDave Chinner * a large delta change is calculated directly. 5293567b59aSDave Chinner */ 530d5bc5fd3SVladimir Davydov if (delta < freeable / 4) 531d5bc5fd3SVladimir Davydov total_scan = min(total_scan, freeable / 2); 5323567b59aSDave Chinner 5333567b59aSDave Chinner /* 534ea164d73SAndrea Arcangeli * Avoid risking looping forever due to too large nr value: 535ea164d73SAndrea Arcangeli * never try to free more than twice the estimate number of 536ea164d73SAndrea Arcangeli * freeable entries. 537ea164d73SAndrea Arcangeli */ 538d5bc5fd3SVladimir Davydov if (total_scan > freeable * 2) 539d5bc5fd3SVladimir Davydov total_scan = freeable * 2; 5401da177e4SLinus Torvalds 54124f7c6b9SDave Chinner trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, 5429092c71bSJosef Bacik freeable, delta, total_scan, priority); 54309576073SDave Chinner 5440b1fb40aSVladimir Davydov /* 5450b1fb40aSVladimir Davydov * Normally, we should not scan less than batch_size objects in one 5460b1fb40aSVladimir Davydov * pass to avoid too frequent shrinker calls, but if the slab has less 5470b1fb40aSVladimir Davydov * than batch_size objects in total and we are really tight on memory, 5480b1fb40aSVladimir Davydov * we will try to reclaim all available objects, otherwise we can end 5490b1fb40aSVladimir Davydov * up failing allocations although there are plenty of reclaimable 5500b1fb40aSVladimir Davydov * objects spread over several slabs with usage less than the 5510b1fb40aSVladimir Davydov * batch_size. 5520b1fb40aSVladimir Davydov * 5530b1fb40aSVladimir Davydov * We detect the "tight on memory" situations by looking at the total 5540b1fb40aSVladimir Davydov * number of objects we want to scan (total_scan). If it is greater 555d5bc5fd3SVladimir Davydov * than the total number of objects on slab (freeable), we must be 5560b1fb40aSVladimir Davydov * scanning at high prio and therefore should try to reclaim as much as 5570b1fb40aSVladimir Davydov * possible. 5580b1fb40aSVladimir Davydov */ 5590b1fb40aSVladimir Davydov while (total_scan >= batch_size || 560d5bc5fd3SVladimir Davydov total_scan >= freeable) { 56124f7c6b9SDave Chinner unsigned long ret; 5620b1fb40aSVladimir Davydov unsigned long nr_to_scan = min(batch_size, total_scan); 5631da177e4SLinus Torvalds 5640b1fb40aSVladimir Davydov shrinkctl->nr_to_scan = nr_to_scan; 565d460acb5SChris Wilson shrinkctl->nr_scanned = nr_to_scan; 56624f7c6b9SDave Chinner ret = shrinker->scan_objects(shrinker, shrinkctl); 56724f7c6b9SDave Chinner if (ret == SHRINK_STOP) 5681da177e4SLinus Torvalds break; 56924f7c6b9SDave Chinner freed += ret; 57024f7c6b9SDave Chinner 571d460acb5SChris Wilson count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); 572d460acb5SChris Wilson total_scan -= shrinkctl->nr_scanned; 573d460acb5SChris Wilson scanned += shrinkctl->nr_scanned; 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds cond_resched(); 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds 5785f33a080SShaohua Li if (next_deferred >= scanned) 5795f33a080SShaohua Li next_deferred -= scanned; 5805f33a080SShaohua Li else 5815f33a080SShaohua Li next_deferred = 0; 582acf92b48SDave Chinner /* 583acf92b48SDave Chinner * move the unused scan count back into the shrinker in a 584acf92b48SDave Chinner * manner that handles concurrent updates. If we exhausted the 585acf92b48SDave Chinner * scan, there is no need to do an update. 586acf92b48SDave Chinner */ 5875f33a080SShaohua Li if (next_deferred > 0) 5885f33a080SShaohua Li new_nr = atomic_long_add_return(next_deferred, 5891d3d4437SGlauber Costa &shrinker->nr_deferred[nid]); 59083aeeadaSKonstantin Khlebnikov else 5911d3d4437SGlauber Costa new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); 592acf92b48SDave Chinner 593df9024a8SDave Hansen trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); 5941d3d4437SGlauber Costa return freed; 5951d3d4437SGlauber Costa } 5961d3d4437SGlauber Costa 597b0dedc49SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 598b0dedc49SKirill Tkhai static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, 599b0dedc49SKirill Tkhai struct mem_cgroup *memcg, int priority) 600b0dedc49SKirill Tkhai { 601b0dedc49SKirill Tkhai struct memcg_shrinker_map *map; 602b8e57efaSKirill Tkhai unsigned long ret, freed = 0; 603b8e57efaSKirill Tkhai int i; 604b0dedc49SKirill Tkhai 605b0dedc49SKirill Tkhai if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)) 606b0dedc49SKirill Tkhai return 0; 607b0dedc49SKirill Tkhai 608b0dedc49SKirill Tkhai if (!down_read_trylock(&shrinker_rwsem)) 609b0dedc49SKirill Tkhai return 0; 610b0dedc49SKirill Tkhai 611b0dedc49SKirill Tkhai map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, 612b0dedc49SKirill Tkhai true); 613b0dedc49SKirill Tkhai if (unlikely(!map)) 614b0dedc49SKirill Tkhai goto unlock; 615b0dedc49SKirill Tkhai 616b0dedc49SKirill Tkhai for_each_set_bit(i, map->map, shrinker_nr_max) { 617b0dedc49SKirill Tkhai struct shrink_control sc = { 618b0dedc49SKirill Tkhai .gfp_mask = gfp_mask, 619b0dedc49SKirill Tkhai .nid = nid, 620b0dedc49SKirill Tkhai .memcg = memcg, 621b0dedc49SKirill Tkhai }; 622b0dedc49SKirill Tkhai struct shrinker *shrinker; 623b0dedc49SKirill Tkhai 624b0dedc49SKirill Tkhai shrinker = idr_find(&shrinker_idr, i); 6257e010df5SKirill Tkhai if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) { 6267e010df5SKirill Tkhai if (!shrinker) 627b0dedc49SKirill Tkhai clear_bit(i, map->map); 628b0dedc49SKirill Tkhai continue; 629b0dedc49SKirill Tkhai } 630b0dedc49SKirill Tkhai 631b0dedc49SKirill Tkhai ret = do_shrink_slab(&sc, shrinker, priority); 632f90280d6SKirill Tkhai if (ret == SHRINK_EMPTY) { 633f90280d6SKirill Tkhai clear_bit(i, map->map); 634f90280d6SKirill Tkhai /* 635f90280d6SKirill Tkhai * After the shrinker reported that it had no objects to 636f90280d6SKirill Tkhai * free, but before we cleared the corresponding bit in 637f90280d6SKirill Tkhai * the memcg shrinker map, a new object might have been 638f90280d6SKirill Tkhai * added. To make sure, we have the bit set in this 639f90280d6SKirill Tkhai * case, we invoke the shrinker one more time and reset 640f90280d6SKirill Tkhai * the bit if it reports that it is not empty anymore. 641f90280d6SKirill Tkhai * The memory barrier here pairs with the barrier in 642f90280d6SKirill Tkhai * memcg_set_shrinker_bit(): 643f90280d6SKirill Tkhai * 644f90280d6SKirill Tkhai * list_lru_add() shrink_slab_memcg() 645f90280d6SKirill Tkhai * list_add_tail() clear_bit() 646f90280d6SKirill Tkhai * <MB> <MB> 647f90280d6SKirill Tkhai * set_bit() do_shrink_slab() 648f90280d6SKirill Tkhai */ 649f90280d6SKirill Tkhai smp_mb__after_atomic(); 650f90280d6SKirill Tkhai ret = do_shrink_slab(&sc, shrinker, priority); 6519b996468SKirill Tkhai if (ret == SHRINK_EMPTY) 6529b996468SKirill Tkhai ret = 0; 653f90280d6SKirill Tkhai else 654f90280d6SKirill Tkhai memcg_set_shrinker_bit(memcg, nid, i); 655f90280d6SKirill Tkhai } 656b0dedc49SKirill Tkhai freed += ret; 657b0dedc49SKirill Tkhai 658b0dedc49SKirill Tkhai if (rwsem_is_contended(&shrinker_rwsem)) { 659b0dedc49SKirill Tkhai freed = freed ? : 1; 660b0dedc49SKirill Tkhai break; 661b0dedc49SKirill Tkhai } 662b0dedc49SKirill Tkhai } 663b0dedc49SKirill Tkhai unlock: 664b0dedc49SKirill Tkhai up_read(&shrinker_rwsem); 665b0dedc49SKirill Tkhai return freed; 666b0dedc49SKirill Tkhai } 667b0dedc49SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 668b0dedc49SKirill Tkhai static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, 669b0dedc49SKirill Tkhai struct mem_cgroup *memcg, int priority) 670b0dedc49SKirill Tkhai { 671b0dedc49SKirill Tkhai return 0; 672b0dedc49SKirill Tkhai } 673b0dedc49SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 674b0dedc49SKirill Tkhai 6756b4f7799SJohannes Weiner /** 676cb731d6cSVladimir Davydov * shrink_slab - shrink slab caches 6776b4f7799SJohannes Weiner * @gfp_mask: allocation context 6786b4f7799SJohannes Weiner * @nid: node whose slab caches to target 679cb731d6cSVladimir Davydov * @memcg: memory cgroup whose slab caches to target 6809092c71bSJosef Bacik * @priority: the reclaim priority 6811d3d4437SGlauber Costa * 6826b4f7799SJohannes Weiner * Call the shrink functions to age shrinkable caches. 6831d3d4437SGlauber Costa * 6846b4f7799SJohannes Weiner * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, 6856b4f7799SJohannes Weiner * unaware shrinkers will receive a node id of 0 instead. 6861d3d4437SGlauber Costa * 687aeed1d32SVladimir Davydov * @memcg specifies the memory cgroup to target. Unaware shrinkers 688aeed1d32SVladimir Davydov * are called only if it is the root cgroup. 689cb731d6cSVladimir Davydov * 6909092c71bSJosef Bacik * @priority is sc->priority, we take the number of objects and >> by priority 6919092c71bSJosef Bacik * in order to get the scan target. 6921d3d4437SGlauber Costa * 6936b4f7799SJohannes Weiner * Returns the number of reclaimed slab objects. 6941d3d4437SGlauber Costa */ 695cb731d6cSVladimir Davydov static unsigned long shrink_slab(gfp_t gfp_mask, int nid, 696cb731d6cSVladimir Davydov struct mem_cgroup *memcg, 6979092c71bSJosef Bacik int priority) 6981d3d4437SGlauber Costa { 699b8e57efaSKirill Tkhai unsigned long ret, freed = 0; 7001d3d4437SGlauber Costa struct shrinker *shrinker; 7011d3d4437SGlauber Costa 702aeed1d32SVladimir Davydov if (!mem_cgroup_is_root(memcg)) 703b0dedc49SKirill Tkhai return shrink_slab_memcg(gfp_mask, nid, memcg, priority); 704cb731d6cSVladimir Davydov 705e830c63aSTetsuo Handa if (!down_read_trylock(&shrinker_rwsem)) 7061d3d4437SGlauber Costa goto out; 7071d3d4437SGlauber Costa 7081d3d4437SGlauber Costa list_for_each_entry(shrinker, &shrinker_list, list) { 7096b4f7799SJohannes Weiner struct shrink_control sc = { 7106b4f7799SJohannes Weiner .gfp_mask = gfp_mask, 7116b4f7799SJohannes Weiner .nid = nid, 712cb731d6cSVladimir Davydov .memcg = memcg, 7136b4f7799SJohannes Weiner }; 7146b4f7799SJohannes Weiner 7159b996468SKirill Tkhai ret = do_shrink_slab(&sc, shrinker, priority); 7169b996468SKirill Tkhai if (ret == SHRINK_EMPTY) 7179b996468SKirill Tkhai ret = 0; 7189b996468SKirill Tkhai freed += ret; 719e496612cSMinchan Kim /* 720e496612cSMinchan Kim * Bail out if someone want to register a new shrinker to 721e496612cSMinchan Kim * prevent the regsitration from being stalled for long periods 722e496612cSMinchan Kim * by parallel ongoing shrinking. 723e496612cSMinchan Kim */ 724e496612cSMinchan Kim if (rwsem_is_contended(&shrinker_rwsem)) { 725e496612cSMinchan Kim freed = freed ? : 1; 726e496612cSMinchan Kim break; 727e496612cSMinchan Kim } 728ec97097bSVladimir Davydov } 7291d3d4437SGlauber Costa 7301da177e4SLinus Torvalds up_read(&shrinker_rwsem); 731f06590bdSMinchan Kim out: 732f06590bdSMinchan Kim cond_resched(); 73324f7c6b9SDave Chinner return freed; 7341da177e4SLinus Torvalds } 7351da177e4SLinus Torvalds 736cb731d6cSVladimir Davydov void drop_slab_node(int nid) 737cb731d6cSVladimir Davydov { 738cb731d6cSVladimir Davydov unsigned long freed; 739cb731d6cSVladimir Davydov 740cb731d6cSVladimir Davydov do { 741cb731d6cSVladimir Davydov struct mem_cgroup *memcg = NULL; 742cb731d6cSVladimir Davydov 743cb731d6cSVladimir Davydov freed = 0; 744aeed1d32SVladimir Davydov memcg = mem_cgroup_iter(NULL, NULL, NULL); 745cb731d6cSVladimir Davydov do { 7469092c71bSJosef Bacik freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 747cb731d6cSVladimir Davydov } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 748cb731d6cSVladimir Davydov } while (freed > 10); 749cb731d6cSVladimir Davydov } 750cb731d6cSVladimir Davydov 751cb731d6cSVladimir Davydov void drop_slab(void) 752cb731d6cSVladimir Davydov { 753cb731d6cSVladimir Davydov int nid; 754cb731d6cSVladimir Davydov 755cb731d6cSVladimir Davydov for_each_online_node(nid) 756cb731d6cSVladimir Davydov drop_slab_node(nid); 757cb731d6cSVladimir Davydov } 758cb731d6cSVladimir Davydov 7591da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page) 7601da177e4SLinus Torvalds { 761ceddc3a5SJohannes Weiner /* 762ceddc3a5SJohannes Weiner * A freeable page cache page is referenced only by the caller 76367891fffSMatthew Wilcox * that isolated the page, the page cache and optional buffer 76467891fffSMatthew Wilcox * heads at page->private. 765ceddc3a5SJohannes Weiner */ 76667891fffSMatthew Wilcox int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ? 767bd4c82c2SHuang Ying HPAGE_PMD_NR : 1; 76867891fffSMatthew Wilcox return page_count(page) - page_has_private(page) == 1 + page_cache_pins; 7691da177e4SLinus Torvalds } 7701da177e4SLinus Torvalds 771703c2708STejun Heo static int may_write_to_inode(struct inode *inode, struct scan_control *sc) 7721da177e4SLinus Torvalds { 773930d9152SChristoph Lameter if (current->flags & PF_SWAPWRITE) 7741da177e4SLinus Torvalds return 1; 775703c2708STejun Heo if (!inode_write_congested(inode)) 7761da177e4SLinus Torvalds return 1; 777703c2708STejun Heo if (inode_to_bdi(inode) == current->backing_dev_info) 7781da177e4SLinus Torvalds return 1; 7791da177e4SLinus Torvalds return 0; 7801da177e4SLinus Torvalds } 7811da177e4SLinus Torvalds 7821da177e4SLinus Torvalds /* 7831da177e4SLinus Torvalds * We detected a synchronous write error writing a page out. Probably 7841da177e4SLinus Torvalds * -ENOSPC. We need to propagate that into the address_space for a subsequent 7851da177e4SLinus Torvalds * fsync(), msync() or close(). 7861da177e4SLinus Torvalds * 7871da177e4SLinus Torvalds * The tricky part is that after writepage we cannot touch the mapping: nothing 7881da177e4SLinus Torvalds * prevents it from being freed up. But we have a ref on the page and once 7891da177e4SLinus Torvalds * that page is locked, the mapping is pinned. 7901da177e4SLinus Torvalds * 7911da177e4SLinus Torvalds * We're allowed to run sleeping lock_page() here because we know the caller has 7921da177e4SLinus Torvalds * __GFP_FS. 7931da177e4SLinus Torvalds */ 7941da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping, 7951da177e4SLinus Torvalds struct page *page, int error) 7961da177e4SLinus Torvalds { 7977eaceaccSJens Axboe lock_page(page); 7983e9f45bdSGuillaume Chazarain if (page_mapping(page) == mapping) 7993e9f45bdSGuillaume Chazarain mapping_set_error(mapping, error); 8001da177e4SLinus Torvalds unlock_page(page); 8011da177e4SLinus Torvalds } 8021da177e4SLinus Torvalds 80304e62a29SChristoph Lameter /* possible outcome of pageout() */ 80404e62a29SChristoph Lameter typedef enum { 80504e62a29SChristoph Lameter /* failed to write page out, page is locked */ 80604e62a29SChristoph Lameter PAGE_KEEP, 80704e62a29SChristoph Lameter /* move page to the active list, page is locked */ 80804e62a29SChristoph Lameter PAGE_ACTIVATE, 80904e62a29SChristoph Lameter /* page has been sent to the disk successfully, page is unlocked */ 81004e62a29SChristoph Lameter PAGE_SUCCESS, 81104e62a29SChristoph Lameter /* page is clean and locked */ 81204e62a29SChristoph Lameter PAGE_CLEAN, 81304e62a29SChristoph Lameter } pageout_t; 81404e62a29SChristoph Lameter 8151da177e4SLinus Torvalds /* 8161742f19fSAndrew Morton * pageout is called by shrink_page_list() for each dirty page. 8171742f19fSAndrew Morton * Calls ->writepage(). 8181da177e4SLinus Torvalds */ 819c661b078SAndy Whitcroft static pageout_t pageout(struct page *page, struct address_space *mapping, 8207d3579e8SKOSAKI Motohiro struct scan_control *sc) 8211da177e4SLinus Torvalds { 8221da177e4SLinus Torvalds /* 8231da177e4SLinus Torvalds * If the page is dirty, only perform writeback if that write 8241da177e4SLinus Torvalds * will be non-blocking. To prevent this allocation from being 8251da177e4SLinus Torvalds * stalled by pagecache activity. But note that there may be 8261da177e4SLinus Torvalds * stalls if we need to run get_block(). We could test 8271da177e4SLinus Torvalds * PagePrivate for that. 8281da177e4SLinus Torvalds * 8298174202bSAl Viro * If this process is currently in __generic_file_write_iter() against 8301da177e4SLinus Torvalds * this page's queue, we can perform writeback even if that 8311da177e4SLinus Torvalds * will block. 8321da177e4SLinus Torvalds * 8331da177e4SLinus Torvalds * If the page is swapcache, write it back even if that would 8341da177e4SLinus Torvalds * block, for some throttling. This happens by accident, because 8351da177e4SLinus Torvalds * swap_backing_dev_info is bust: it doesn't reflect the 8361da177e4SLinus Torvalds * congestion state of the swapdevs. Easy to fix, if needed. 8371da177e4SLinus Torvalds */ 8381da177e4SLinus Torvalds if (!is_page_cache_freeable(page)) 8391da177e4SLinus Torvalds return PAGE_KEEP; 8401da177e4SLinus Torvalds if (!mapping) { 8411da177e4SLinus Torvalds /* 8421da177e4SLinus Torvalds * Some data journaling orphaned pages can have 8431da177e4SLinus Torvalds * page->mapping == NULL while being dirty with clean buffers. 8441da177e4SLinus Torvalds */ 845266cf658SDavid Howells if (page_has_private(page)) { 8461da177e4SLinus Torvalds if (try_to_free_buffers(page)) { 8471da177e4SLinus Torvalds ClearPageDirty(page); 848b1de0d13SMitchel Humpherys pr_info("%s: orphaned page\n", __func__); 8491da177e4SLinus Torvalds return PAGE_CLEAN; 8501da177e4SLinus Torvalds } 8511da177e4SLinus Torvalds } 8521da177e4SLinus Torvalds return PAGE_KEEP; 8531da177e4SLinus Torvalds } 8541da177e4SLinus Torvalds if (mapping->a_ops->writepage == NULL) 8551da177e4SLinus Torvalds return PAGE_ACTIVATE; 856703c2708STejun Heo if (!may_write_to_inode(mapping->host, sc)) 8571da177e4SLinus Torvalds return PAGE_KEEP; 8581da177e4SLinus Torvalds 8591da177e4SLinus Torvalds if (clear_page_dirty_for_io(page)) { 8601da177e4SLinus Torvalds int res; 8611da177e4SLinus Torvalds struct writeback_control wbc = { 8621da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 8631da177e4SLinus Torvalds .nr_to_write = SWAP_CLUSTER_MAX, 864111ebb6eSOGAWA Hirofumi .range_start = 0, 865111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 8661da177e4SLinus Torvalds .for_reclaim = 1, 8671da177e4SLinus Torvalds }; 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds SetPageReclaim(page); 8701da177e4SLinus Torvalds res = mapping->a_ops->writepage(page, &wbc); 8711da177e4SLinus Torvalds if (res < 0) 8721da177e4SLinus Torvalds handle_write_error(mapping, page, res); 873994fc28cSZach Brown if (res == AOP_WRITEPAGE_ACTIVATE) { 8741da177e4SLinus Torvalds ClearPageReclaim(page); 8751da177e4SLinus Torvalds return PAGE_ACTIVATE; 8761da177e4SLinus Torvalds } 877c661b078SAndy Whitcroft 8781da177e4SLinus Torvalds if (!PageWriteback(page)) { 8791da177e4SLinus Torvalds /* synchronous write or broken a_ops? */ 8801da177e4SLinus Torvalds ClearPageReclaim(page); 8811da177e4SLinus Torvalds } 8823aa23851Syalin wang trace_mm_vmscan_writepage(page); 883c4a25635SMel Gorman inc_node_page_state(page, NR_VMSCAN_WRITE); 8841da177e4SLinus Torvalds return PAGE_SUCCESS; 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds return PAGE_CLEAN; 8881da177e4SLinus Torvalds } 8891da177e4SLinus Torvalds 890a649fd92SAndrew Morton /* 891e286781dSNick Piggin * Same as remove_mapping, but if the page is removed from the mapping, it 892e286781dSNick Piggin * gets returned with a refcount of 0. 893a649fd92SAndrew Morton */ 894a528910eSJohannes Weiner static int __remove_mapping(struct address_space *mapping, struct page *page, 895a528910eSJohannes Weiner bool reclaimed) 89649d2e9ccSChristoph Lameter { 897c4843a75SGreg Thelen unsigned long flags; 898bd4c82c2SHuang Ying int refcount; 899c4843a75SGreg Thelen 90028e4d965SNick Piggin BUG_ON(!PageLocked(page)); 90128e4d965SNick Piggin BUG_ON(mapping != page_mapping(page)); 90249d2e9ccSChristoph Lameter 903b93b0163SMatthew Wilcox xa_lock_irqsave(&mapping->i_pages, flags); 90449d2e9ccSChristoph Lameter /* 9050fd0e6b0SNick Piggin * The non racy check for a busy page. 9060fd0e6b0SNick Piggin * 9070fd0e6b0SNick Piggin * Must be careful with the order of the tests. When someone has 9080fd0e6b0SNick Piggin * a ref to the page, it may be possible that they dirty it then 9090fd0e6b0SNick Piggin * drop the reference. So if PageDirty is tested before page_count 9100fd0e6b0SNick Piggin * here, then the following race may occur: 9110fd0e6b0SNick Piggin * 9120fd0e6b0SNick Piggin * get_user_pages(&page); 9130fd0e6b0SNick Piggin * [user mapping goes away] 9140fd0e6b0SNick Piggin * write_to(page); 9150fd0e6b0SNick Piggin * !PageDirty(page) [good] 9160fd0e6b0SNick Piggin * SetPageDirty(page); 9170fd0e6b0SNick Piggin * put_page(page); 9180fd0e6b0SNick Piggin * !page_count(page) [good, discard it] 9190fd0e6b0SNick Piggin * 9200fd0e6b0SNick Piggin * [oops, our write_to data is lost] 9210fd0e6b0SNick Piggin * 9220fd0e6b0SNick Piggin * Reversing the order of the tests ensures such a situation cannot 9230fd0e6b0SNick Piggin * escape unnoticed. The smp_rmb is needed to ensure the page->flags 9240139aa7bSJoonsoo Kim * load is not satisfied before that of page->_refcount. 9250fd0e6b0SNick Piggin * 9260fd0e6b0SNick Piggin * Note that if SetPageDirty is always performed via set_page_dirty, 927b93b0163SMatthew Wilcox * and thus under the i_pages lock, then this ordering is not required. 92849d2e9ccSChristoph Lameter */ 929bd4c82c2SHuang Ying if (unlikely(PageTransHuge(page)) && PageSwapCache(page)) 930bd4c82c2SHuang Ying refcount = 1 + HPAGE_PMD_NR; 931bd4c82c2SHuang Ying else 932bd4c82c2SHuang Ying refcount = 2; 933bd4c82c2SHuang Ying if (!page_ref_freeze(page, refcount)) 93449d2e9ccSChristoph Lameter goto cannot_free; 9351c4c3b99SJiang Biao /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */ 936e286781dSNick Piggin if (unlikely(PageDirty(page))) { 937bd4c82c2SHuang Ying page_ref_unfreeze(page, refcount); 93849d2e9ccSChristoph Lameter goto cannot_free; 939e286781dSNick Piggin } 94049d2e9ccSChristoph Lameter 94149d2e9ccSChristoph Lameter if (PageSwapCache(page)) { 94249d2e9ccSChristoph Lameter swp_entry_t swap = { .val = page_private(page) }; 9430a31bc97SJohannes Weiner mem_cgroup_swapout(page, swap); 9444e17ec25SMatthew Wilcox __delete_from_swap_cache(page, swap); 945b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags); 94675f6d6d2SMinchan Kim put_swap_page(page, swap); 947e286781dSNick Piggin } else { 9486072d13cSLinus Torvalds void (*freepage)(struct page *); 949a528910eSJohannes Weiner void *shadow = NULL; 9506072d13cSLinus Torvalds 9516072d13cSLinus Torvalds freepage = mapping->a_ops->freepage; 952a528910eSJohannes Weiner /* 953a528910eSJohannes Weiner * Remember a shadow entry for reclaimed file cache in 954a528910eSJohannes Weiner * order to detect refaults, thus thrashing, later on. 955a528910eSJohannes Weiner * 956a528910eSJohannes Weiner * But don't store shadows in an address space that is 957a528910eSJohannes Weiner * already exiting. This is not just an optizimation, 958a528910eSJohannes Weiner * inode reclaim needs to empty out the radix tree or 959a528910eSJohannes Weiner * the nodes are lost. Don't plant shadows behind its 960a528910eSJohannes Weiner * back. 961f9fe48beSRoss Zwisler * 962f9fe48beSRoss Zwisler * We also don't store shadows for DAX mappings because the 963f9fe48beSRoss Zwisler * only page cache pages found in these are zero pages 964f9fe48beSRoss Zwisler * covering holes, and because we don't want to mix DAX 965f9fe48beSRoss Zwisler * exceptional entries and shadow exceptional entries in the 966b93b0163SMatthew Wilcox * same address_space. 967a528910eSJohannes Weiner */ 968a528910eSJohannes Weiner if (reclaimed && page_is_file_cache(page) && 969f9fe48beSRoss Zwisler !mapping_exiting(mapping) && !dax_mapping(mapping)) 970a7ca12f9SAndrey Ryabinin shadow = workingset_eviction(page); 97162cccb8cSJohannes Weiner __delete_from_page_cache(page, shadow); 972b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags); 9736072d13cSLinus Torvalds 9746072d13cSLinus Torvalds if (freepage != NULL) 9756072d13cSLinus Torvalds freepage(page); 976e286781dSNick Piggin } 977e286781dSNick Piggin 97849d2e9ccSChristoph Lameter return 1; 97949d2e9ccSChristoph Lameter 98049d2e9ccSChristoph Lameter cannot_free: 981b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags); 98249d2e9ccSChristoph Lameter return 0; 98349d2e9ccSChristoph Lameter } 98449d2e9ccSChristoph Lameter 9851da177e4SLinus Torvalds /* 986e286781dSNick Piggin * Attempt to detach a locked page from its ->mapping. If it is dirty or if 987e286781dSNick Piggin * someone else has a ref on the page, abort and return 0. If it was 988e286781dSNick Piggin * successfully detached, return 1. Assumes the caller has a single ref on 989e286781dSNick Piggin * this page. 990e286781dSNick Piggin */ 991e286781dSNick Piggin int remove_mapping(struct address_space *mapping, struct page *page) 992e286781dSNick Piggin { 993a528910eSJohannes Weiner if (__remove_mapping(mapping, page, false)) { 994e286781dSNick Piggin /* 995e286781dSNick Piggin * Unfreezing the refcount with 1 rather than 2 effectively 996e286781dSNick Piggin * drops the pagecache ref for us without requiring another 997e286781dSNick Piggin * atomic operation. 998e286781dSNick Piggin */ 999fe896d18SJoonsoo Kim page_ref_unfreeze(page, 1); 1000e286781dSNick Piggin return 1; 1001e286781dSNick Piggin } 1002e286781dSNick Piggin return 0; 1003e286781dSNick Piggin } 1004e286781dSNick Piggin 1005894bc310SLee Schermerhorn /** 1006894bc310SLee Schermerhorn * putback_lru_page - put previously isolated page onto appropriate LRU list 1007894bc310SLee Schermerhorn * @page: page to be put back to appropriate lru list 1008894bc310SLee Schermerhorn * 1009894bc310SLee Schermerhorn * Add previously isolated @page to appropriate LRU list. 1010894bc310SLee Schermerhorn * Page may still be unevictable for other reasons. 1011894bc310SLee Schermerhorn * 1012894bc310SLee Schermerhorn * lru_lock must not be held, interrupts must be enabled. 1013894bc310SLee Schermerhorn */ 1014894bc310SLee Schermerhorn void putback_lru_page(struct page *page) 1015894bc310SLee Schermerhorn { 1016c53954a0SMel Gorman lru_cache_add(page); 1017894bc310SLee Schermerhorn put_page(page); /* drop ref from isolate */ 1018894bc310SLee Schermerhorn } 1019894bc310SLee Schermerhorn 1020dfc8d636SJohannes Weiner enum page_references { 1021dfc8d636SJohannes Weiner PAGEREF_RECLAIM, 1022dfc8d636SJohannes Weiner PAGEREF_RECLAIM_CLEAN, 102364574746SJohannes Weiner PAGEREF_KEEP, 1024dfc8d636SJohannes Weiner PAGEREF_ACTIVATE, 1025dfc8d636SJohannes Weiner }; 1026dfc8d636SJohannes Weiner 1027dfc8d636SJohannes Weiner static enum page_references page_check_references(struct page *page, 1028dfc8d636SJohannes Weiner struct scan_control *sc) 1029dfc8d636SJohannes Weiner { 103064574746SJohannes Weiner int referenced_ptes, referenced_page; 1031dfc8d636SJohannes Weiner unsigned long vm_flags; 1032dfc8d636SJohannes Weiner 1033c3ac9a8aSJohannes Weiner referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, 1034c3ac9a8aSJohannes Weiner &vm_flags); 103564574746SJohannes Weiner referenced_page = TestClearPageReferenced(page); 1036dfc8d636SJohannes Weiner 1037dfc8d636SJohannes Weiner /* 1038dfc8d636SJohannes Weiner * Mlock lost the isolation race with us. Let try_to_unmap() 1039dfc8d636SJohannes Weiner * move the page to the unevictable list. 1040dfc8d636SJohannes Weiner */ 1041dfc8d636SJohannes Weiner if (vm_flags & VM_LOCKED) 1042dfc8d636SJohannes Weiner return PAGEREF_RECLAIM; 1043dfc8d636SJohannes Weiner 104464574746SJohannes Weiner if (referenced_ptes) { 1045e4898273SMichal Hocko if (PageSwapBacked(page)) 104664574746SJohannes Weiner return PAGEREF_ACTIVATE; 104764574746SJohannes Weiner /* 104864574746SJohannes Weiner * All mapped pages start out with page table 104964574746SJohannes Weiner * references from the instantiating fault, so we need 105064574746SJohannes Weiner * to look twice if a mapped file page is used more 105164574746SJohannes Weiner * than once. 105264574746SJohannes Weiner * 105364574746SJohannes Weiner * Mark it and spare it for another trip around the 105464574746SJohannes Weiner * inactive list. Another page table reference will 105564574746SJohannes Weiner * lead to its activation. 105664574746SJohannes Weiner * 105764574746SJohannes Weiner * Note: the mark is set for activated pages as well 105864574746SJohannes Weiner * so that recently deactivated but used pages are 105964574746SJohannes Weiner * quickly recovered. 106064574746SJohannes Weiner */ 106164574746SJohannes Weiner SetPageReferenced(page); 106264574746SJohannes Weiner 106334dbc67aSKonstantin Khlebnikov if (referenced_page || referenced_ptes > 1) 1064dfc8d636SJohannes Weiner return PAGEREF_ACTIVATE; 1065dfc8d636SJohannes Weiner 1066c909e993SKonstantin Khlebnikov /* 1067c909e993SKonstantin Khlebnikov * Activate file-backed executable pages after first usage. 1068c909e993SKonstantin Khlebnikov */ 1069c909e993SKonstantin Khlebnikov if (vm_flags & VM_EXEC) 1070c909e993SKonstantin Khlebnikov return PAGEREF_ACTIVATE; 1071c909e993SKonstantin Khlebnikov 107264574746SJohannes Weiner return PAGEREF_KEEP; 107364574746SJohannes Weiner } 107464574746SJohannes Weiner 1075dfc8d636SJohannes Weiner /* Reclaim if clean, defer dirty pages to writeback */ 10762e30244aSKOSAKI Motohiro if (referenced_page && !PageSwapBacked(page)) 1077dfc8d636SJohannes Weiner return PAGEREF_RECLAIM_CLEAN; 107864574746SJohannes Weiner 107964574746SJohannes Weiner return PAGEREF_RECLAIM; 1080dfc8d636SJohannes Weiner } 1081dfc8d636SJohannes Weiner 1082e2be15f6SMel Gorman /* Check if a page is dirty or under writeback */ 1083e2be15f6SMel Gorman static void page_check_dirty_writeback(struct page *page, 1084e2be15f6SMel Gorman bool *dirty, bool *writeback) 1085e2be15f6SMel Gorman { 1086b4597226SMel Gorman struct address_space *mapping; 1087b4597226SMel Gorman 1088e2be15f6SMel Gorman /* 1089e2be15f6SMel Gorman * Anonymous pages are not handled by flushers and must be written 1090e2be15f6SMel Gorman * from reclaim context. Do not stall reclaim based on them 1091e2be15f6SMel Gorman */ 1092802a3a92SShaohua Li if (!page_is_file_cache(page) || 1093802a3a92SShaohua Li (PageAnon(page) && !PageSwapBacked(page))) { 1094e2be15f6SMel Gorman *dirty = false; 1095e2be15f6SMel Gorman *writeback = false; 1096e2be15f6SMel Gorman return; 1097e2be15f6SMel Gorman } 1098e2be15f6SMel Gorman 1099e2be15f6SMel Gorman /* By default assume that the page flags are accurate */ 1100e2be15f6SMel Gorman *dirty = PageDirty(page); 1101e2be15f6SMel Gorman *writeback = PageWriteback(page); 1102b4597226SMel Gorman 1103b4597226SMel Gorman /* Verify dirty/writeback state if the filesystem supports it */ 1104b4597226SMel Gorman if (!page_has_private(page)) 1105b4597226SMel Gorman return; 1106b4597226SMel Gorman 1107b4597226SMel Gorman mapping = page_mapping(page); 1108b4597226SMel Gorman if (mapping && mapping->a_ops->is_dirty_writeback) 1109b4597226SMel Gorman mapping->a_ops->is_dirty_writeback(page, dirty, writeback); 1110e2be15f6SMel Gorman } 1111e2be15f6SMel Gorman 1112e286781dSNick Piggin /* 11131742f19fSAndrew Morton * shrink_page_list() returns the number of reclaimed pages 11141da177e4SLinus Torvalds */ 11151742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list, 1116599d0c95SMel Gorman struct pglist_data *pgdat, 1117f84f6e2bSMel Gorman struct scan_control *sc, 111802c6de8dSMinchan Kim enum ttu_flags ttu_flags, 11193c710c1aSMichal Hocko struct reclaim_stat *stat, 112002c6de8dSMinchan Kim bool force_reclaim) 11211da177e4SLinus Torvalds { 11221da177e4SLinus Torvalds LIST_HEAD(ret_pages); 1123abe4c3b5SMel Gorman LIST_HEAD(free_pages); 11243c710c1aSMichal Hocko unsigned nr_reclaimed = 0; 1125886cf190SKirill Tkhai unsigned pgactivate = 0; 11261da177e4SLinus Torvalds 1127060f005fSKirill Tkhai memset(stat, 0, sizeof(*stat)); 11281da177e4SLinus Torvalds cond_resched(); 11291da177e4SLinus Torvalds 11301da177e4SLinus Torvalds while (!list_empty(page_list)) { 11311da177e4SLinus Torvalds struct address_space *mapping; 11321da177e4SLinus Torvalds struct page *page; 11331da177e4SLinus Torvalds int may_enter_fs; 113402c6de8dSMinchan Kim enum page_references references = PAGEREF_RECLAIM_CLEAN; 1135e2be15f6SMel Gorman bool dirty, writeback; 113698879b3bSYang Shi unsigned int nr_pages; 11371da177e4SLinus Torvalds 11381da177e4SLinus Torvalds cond_resched(); 11391da177e4SLinus Torvalds 11401da177e4SLinus Torvalds page = lru_to_page(page_list); 11411da177e4SLinus Torvalds list_del(&page->lru); 11421da177e4SLinus Torvalds 1143529ae9aaSNick Piggin if (!trylock_page(page)) 11441da177e4SLinus Torvalds goto keep; 11451da177e4SLinus Torvalds 1146309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 11471da177e4SLinus Torvalds 114898879b3bSYang Shi nr_pages = 1 << compound_order(page); 114998879b3bSYang Shi 115098879b3bSYang Shi /* Account the number of base pages even though THP */ 115198879b3bSYang Shi sc->nr_scanned += nr_pages; 115280e43426SChristoph Lameter 115339b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) 1154ad6b6704SMinchan Kim goto activate_locked; 1155894bc310SLee Schermerhorn 1156a6dc60f8SJohannes Weiner if (!sc->may_unmap && page_mapped(page)) 115780e43426SChristoph Lameter goto keep_locked; 115880e43426SChristoph Lameter 1159c661b078SAndy Whitcroft may_enter_fs = (sc->gfp_mask & __GFP_FS) || 1160c661b078SAndy Whitcroft (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 1161c661b078SAndy Whitcroft 1162e62e384eSMichal Hocko /* 1163894befecSAndrey Ryabinin * The number of dirty pages determines if a node is marked 1164e2be15f6SMel Gorman * reclaim_congested which affects wait_iff_congested. kswapd 1165e2be15f6SMel Gorman * will stall and start writing pages if the tail of the LRU 1166e2be15f6SMel Gorman * is all dirty unqueued pages. 1167e2be15f6SMel Gorman */ 1168e2be15f6SMel Gorman page_check_dirty_writeback(page, &dirty, &writeback); 1169e2be15f6SMel Gorman if (dirty || writeback) 1170060f005fSKirill Tkhai stat->nr_dirty++; 1171e2be15f6SMel Gorman 1172e2be15f6SMel Gorman if (dirty && !writeback) 1173060f005fSKirill Tkhai stat->nr_unqueued_dirty++; 1174e2be15f6SMel Gorman 1175d04e8acdSMel Gorman /* 1176d04e8acdSMel Gorman * Treat this page as congested if the underlying BDI is or if 1177d04e8acdSMel Gorman * pages are cycling through the LRU so quickly that the 1178d04e8acdSMel Gorman * pages marked for immediate reclaim are making it to the 1179d04e8acdSMel Gorman * end of the LRU a second time. 1180d04e8acdSMel Gorman */ 1181e2be15f6SMel Gorman mapping = page_mapping(page); 11821da58ee2SJamie Liu if (((dirty || writeback) && mapping && 1183703c2708STejun Heo inode_write_congested(mapping->host)) || 1184d04e8acdSMel Gorman (writeback && PageReclaim(page))) 1185060f005fSKirill Tkhai stat->nr_congested++; 1186e2be15f6SMel Gorman 1187e2be15f6SMel Gorman /* 1188283aba9fSMel Gorman * If a page at the tail of the LRU is under writeback, there 1189283aba9fSMel Gorman * are three cases to consider. 1190e62e384eSMichal Hocko * 1191283aba9fSMel Gorman * 1) If reclaim is encountering an excessive number of pages 1192283aba9fSMel Gorman * under writeback and this page is both under writeback and 1193283aba9fSMel Gorman * PageReclaim then it indicates that pages are being queued 1194283aba9fSMel Gorman * for IO but are being recycled through the LRU before the 1195283aba9fSMel Gorman * IO can complete. Waiting on the page itself risks an 1196283aba9fSMel Gorman * indefinite stall if it is impossible to writeback the 1197283aba9fSMel Gorman * page due to IO error or disconnected storage so instead 1198b1a6f21eSMel Gorman * note that the LRU is being scanned too quickly and the 1199b1a6f21eSMel Gorman * caller can stall after page list has been processed. 1200c3b94f44SHugh Dickins * 120197c9341fSTejun Heo * 2) Global or new memcg reclaim encounters a page that is 1202ecf5fc6eSMichal Hocko * not marked for immediate reclaim, or the caller does not 1203ecf5fc6eSMichal Hocko * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1204ecf5fc6eSMichal Hocko * not to fs). In this case mark the page for immediate 120597c9341fSTejun Heo * reclaim and continue scanning. 1206283aba9fSMel Gorman * 1207ecf5fc6eSMichal Hocko * Require may_enter_fs because we would wait on fs, which 1208ecf5fc6eSMichal Hocko * may not have submitted IO yet. And the loop driver might 1209283aba9fSMel Gorman * enter reclaim, and deadlock if it waits on a page for 1210283aba9fSMel Gorman * which it is needed to do the write (loop masks off 1211283aba9fSMel Gorman * __GFP_IO|__GFP_FS for this reason); but more thought 1212283aba9fSMel Gorman * would probably show more reasons. 1213283aba9fSMel Gorman * 12147fadc820SHugh Dickins * 3) Legacy memcg encounters a page that is already marked 1215283aba9fSMel Gorman * PageReclaim. memcg does not have any dirty pages 1216283aba9fSMel Gorman * throttling so we could easily OOM just because too many 1217283aba9fSMel Gorman * pages are in writeback and there is nothing else to 1218283aba9fSMel Gorman * reclaim. Wait for the writeback to complete. 1219c55e8d03SJohannes Weiner * 1220c55e8d03SJohannes Weiner * In cases 1) and 2) we activate the pages to get them out of 1221c55e8d03SJohannes Weiner * the way while we continue scanning for clean pages on the 1222c55e8d03SJohannes Weiner * inactive list and refilling from the active list. The 1223c55e8d03SJohannes Weiner * observation here is that waiting for disk writes is more 1224c55e8d03SJohannes Weiner * expensive than potentially causing reloads down the line. 1225c55e8d03SJohannes Weiner * Since they're marked for immediate reclaim, they won't put 1226c55e8d03SJohannes Weiner * memory pressure on the cache working set any longer than it 1227c55e8d03SJohannes Weiner * takes to write them to disk. 1228e62e384eSMichal Hocko */ 1229283aba9fSMel Gorman if (PageWriteback(page)) { 1230283aba9fSMel Gorman /* Case 1 above */ 1231283aba9fSMel Gorman if (current_is_kswapd() && 1232283aba9fSMel Gorman PageReclaim(page) && 1233599d0c95SMel Gorman test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1234060f005fSKirill Tkhai stat->nr_immediate++; 1235c55e8d03SJohannes Weiner goto activate_locked; 1236283aba9fSMel Gorman 1237283aba9fSMel Gorman /* Case 2 above */ 123897c9341fSTejun Heo } else if (sane_reclaim(sc) || 1239ecf5fc6eSMichal Hocko !PageReclaim(page) || !may_enter_fs) { 1240c3b94f44SHugh Dickins /* 1241c3b94f44SHugh Dickins * This is slightly racy - end_page_writeback() 1242c3b94f44SHugh Dickins * might have just cleared PageReclaim, then 1243c3b94f44SHugh Dickins * setting PageReclaim here end up interpreted 1244c3b94f44SHugh Dickins * as PageReadahead - but that does not matter 1245c3b94f44SHugh Dickins * enough to care. What we do want is for this 1246c3b94f44SHugh Dickins * page to have PageReclaim set next time memcg 1247c3b94f44SHugh Dickins * reclaim reaches the tests above, so it will 1248c3b94f44SHugh Dickins * then wait_on_page_writeback() to avoid OOM; 1249c3b94f44SHugh Dickins * and it's also appropriate in global reclaim. 1250c3b94f44SHugh Dickins */ 1251c3b94f44SHugh Dickins SetPageReclaim(page); 1252060f005fSKirill Tkhai stat->nr_writeback++; 1253c55e8d03SJohannes Weiner goto activate_locked; 1254283aba9fSMel Gorman 1255283aba9fSMel Gorman /* Case 3 above */ 1256283aba9fSMel Gorman } else { 12577fadc820SHugh Dickins unlock_page(page); 1258c3b94f44SHugh Dickins wait_on_page_writeback(page); 12597fadc820SHugh Dickins /* then go back and try same page again */ 12607fadc820SHugh Dickins list_add_tail(&page->lru, page_list); 12617fadc820SHugh Dickins continue; 1262e62e384eSMichal Hocko } 1263283aba9fSMel Gorman } 12641da177e4SLinus Torvalds 126502c6de8dSMinchan Kim if (!force_reclaim) 12666a18adb3SKonstantin Khlebnikov references = page_check_references(page, sc); 126702c6de8dSMinchan Kim 1268dfc8d636SJohannes Weiner switch (references) { 1269dfc8d636SJohannes Weiner case PAGEREF_ACTIVATE: 12701da177e4SLinus Torvalds goto activate_locked; 127164574746SJohannes Weiner case PAGEREF_KEEP: 127298879b3bSYang Shi stat->nr_ref_keep += nr_pages; 127364574746SJohannes Weiner goto keep_locked; 1274dfc8d636SJohannes Weiner case PAGEREF_RECLAIM: 1275dfc8d636SJohannes Weiner case PAGEREF_RECLAIM_CLEAN: 1276dfc8d636SJohannes Weiner ; /* try to reclaim the page below */ 1277dfc8d636SJohannes Weiner } 12781da177e4SLinus Torvalds 12791da177e4SLinus Torvalds /* 12801da177e4SLinus Torvalds * Anonymous process memory has backing store? 12811da177e4SLinus Torvalds * Try to allocate it some swap space here. 1282802a3a92SShaohua Li * Lazyfree page could be freed directly 12831da177e4SLinus Torvalds */ 1284bd4c82c2SHuang Ying if (PageAnon(page) && PageSwapBacked(page)) { 1285bd4c82c2SHuang Ying if (!PageSwapCache(page)) { 128663eb6b93SHugh Dickins if (!(sc->gfp_mask & __GFP_IO)) 128763eb6b93SHugh Dickins goto keep_locked; 1288747552b1SHuang Ying if (PageTransHuge(page)) { 1289b8f593cdSHuang Ying /* cannot split THP, skip it */ 1290747552b1SHuang Ying if (!can_split_huge_page(page, NULL)) 1291b8f593cdSHuang Ying goto activate_locked; 1292747552b1SHuang Ying /* 1293747552b1SHuang Ying * Split pages without a PMD map right 1294747552b1SHuang Ying * away. Chances are some or all of the 1295747552b1SHuang Ying * tail pages can be freed without IO. 1296747552b1SHuang Ying */ 1297747552b1SHuang Ying if (!compound_mapcount(page) && 1298bd4c82c2SHuang Ying split_huge_page_to_list(page, 1299bd4c82c2SHuang Ying page_list)) 1300747552b1SHuang Ying goto activate_locked; 1301747552b1SHuang Ying } 13020f074658SMinchan Kim if (!add_to_swap(page)) { 13030f074658SMinchan Kim if (!PageTransHuge(page)) 130498879b3bSYang Shi goto activate_locked_split; 1305bd4c82c2SHuang Ying /* Fallback to swap normal pages */ 1306bd4c82c2SHuang Ying if (split_huge_page_to_list(page, 1307bd4c82c2SHuang Ying page_list)) 13080f074658SMinchan Kim goto activate_locked; 1309fe490cc0SHuang Ying #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1310fe490cc0SHuang Ying count_vm_event(THP_SWPOUT_FALLBACK); 1311fe490cc0SHuang Ying #endif 13120f074658SMinchan Kim if (!add_to_swap(page)) 131398879b3bSYang Shi goto activate_locked_split; 13140f074658SMinchan Kim } 13150f074658SMinchan Kim 131663eb6b93SHugh Dickins may_enter_fs = 1; 13171da177e4SLinus Torvalds 1318e2be15f6SMel Gorman /* Adding to swap updated mapping */ 13191da177e4SLinus Torvalds mapping = page_mapping(page); 1320bd4c82c2SHuang Ying } 13217751b2daSKirill A. Shutemov } else if (unlikely(PageTransHuge(page))) { 13227751b2daSKirill A. Shutemov /* Split file THP */ 13237751b2daSKirill A. Shutemov if (split_huge_page_to_list(page, page_list)) 13247751b2daSKirill A. Shutemov goto keep_locked; 1325e2be15f6SMel Gorman } 13261da177e4SLinus Torvalds 13271da177e4SLinus Torvalds /* 132898879b3bSYang Shi * THP may get split above, need minus tail pages and update 132998879b3bSYang Shi * nr_pages to avoid accounting tail pages twice. 133098879b3bSYang Shi * 133198879b3bSYang Shi * The tail pages that are added into swap cache successfully 133298879b3bSYang Shi * reach here. 133398879b3bSYang Shi */ 133498879b3bSYang Shi if ((nr_pages > 1) && !PageTransHuge(page)) { 133598879b3bSYang Shi sc->nr_scanned -= (nr_pages - 1); 133698879b3bSYang Shi nr_pages = 1; 133798879b3bSYang Shi } 133898879b3bSYang Shi 133998879b3bSYang Shi /* 13401da177e4SLinus Torvalds * The page is mapped into the page tables of one or more 13411da177e4SLinus Torvalds * processes. Try to unmap it here. 13421da177e4SLinus Torvalds */ 1343802a3a92SShaohua Li if (page_mapped(page)) { 1344bd4c82c2SHuang Ying enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH; 1345bd4c82c2SHuang Ying 1346bd4c82c2SHuang Ying if (unlikely(PageTransHuge(page))) 1347bd4c82c2SHuang Ying flags |= TTU_SPLIT_HUGE_PMD; 1348bd4c82c2SHuang Ying if (!try_to_unmap(page, flags)) { 134998879b3bSYang Shi stat->nr_unmap_fail += nr_pages; 13501da177e4SLinus Torvalds goto activate_locked; 13511da177e4SLinus Torvalds } 13521da177e4SLinus Torvalds } 13531da177e4SLinus Torvalds 13541da177e4SLinus Torvalds if (PageDirty(page)) { 1355ee72886dSMel Gorman /* 13564eda4823SJohannes Weiner * Only kswapd can writeback filesystem pages 13574eda4823SJohannes Weiner * to avoid risk of stack overflow. But avoid 13584eda4823SJohannes Weiner * injecting inefficient single-page IO into 13594eda4823SJohannes Weiner * flusher writeback as much as possible: only 13604eda4823SJohannes Weiner * write pages when we've encountered many 13614eda4823SJohannes Weiner * dirty pages, and when we've already scanned 13624eda4823SJohannes Weiner * the rest of the LRU for clean pages and see 13634eda4823SJohannes Weiner * the same dirty pages again (PageReclaim). 1364ee72886dSMel Gorman */ 1365f84f6e2bSMel Gorman if (page_is_file_cache(page) && 13664eda4823SJohannes Weiner (!current_is_kswapd() || !PageReclaim(page) || 1367599d0c95SMel Gorman !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 136849ea7eb6SMel Gorman /* 136949ea7eb6SMel Gorman * Immediately reclaim when written back. 137049ea7eb6SMel Gorman * Similar in principal to deactivate_page() 137149ea7eb6SMel Gorman * except we already have the page isolated 137249ea7eb6SMel Gorman * and know it's dirty 137349ea7eb6SMel Gorman */ 1374c4a25635SMel Gorman inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); 137549ea7eb6SMel Gorman SetPageReclaim(page); 137649ea7eb6SMel Gorman 1377c55e8d03SJohannes Weiner goto activate_locked; 1378ee72886dSMel Gorman } 1379ee72886dSMel Gorman 1380dfc8d636SJohannes Weiner if (references == PAGEREF_RECLAIM_CLEAN) 13811da177e4SLinus Torvalds goto keep_locked; 13824dd4b920SAndrew Morton if (!may_enter_fs) 13831da177e4SLinus Torvalds goto keep_locked; 138452a8363eSChristoph Lameter if (!sc->may_writepage) 13851da177e4SLinus Torvalds goto keep_locked; 13861da177e4SLinus Torvalds 1387d950c947SMel Gorman /* 1388d950c947SMel Gorman * Page is dirty. Flush the TLB if a writable entry 1389d950c947SMel Gorman * potentially exists to avoid CPU writes after IO 1390d950c947SMel Gorman * starts and then write it out here. 1391d950c947SMel Gorman */ 1392d950c947SMel Gorman try_to_unmap_flush_dirty(); 13937d3579e8SKOSAKI Motohiro switch (pageout(page, mapping, sc)) { 13941da177e4SLinus Torvalds case PAGE_KEEP: 13951da177e4SLinus Torvalds goto keep_locked; 13961da177e4SLinus Torvalds case PAGE_ACTIVATE: 13971da177e4SLinus Torvalds goto activate_locked; 13981da177e4SLinus Torvalds case PAGE_SUCCESS: 13997d3579e8SKOSAKI Motohiro if (PageWriteback(page)) 140041ac1999SMel Gorman goto keep; 14017d3579e8SKOSAKI Motohiro if (PageDirty(page)) 14021da177e4SLinus Torvalds goto keep; 14037d3579e8SKOSAKI Motohiro 14041da177e4SLinus Torvalds /* 14051da177e4SLinus Torvalds * A synchronous write - probably a ramdisk. Go 14061da177e4SLinus Torvalds * ahead and try to reclaim the page. 14071da177e4SLinus Torvalds */ 1408529ae9aaSNick Piggin if (!trylock_page(page)) 14091da177e4SLinus Torvalds goto keep; 14101da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 14111da177e4SLinus Torvalds goto keep_locked; 14121da177e4SLinus Torvalds mapping = page_mapping(page); 14131da177e4SLinus Torvalds case PAGE_CLEAN: 14141da177e4SLinus Torvalds ; /* try to free the page below */ 14151da177e4SLinus Torvalds } 14161da177e4SLinus Torvalds } 14171da177e4SLinus Torvalds 14181da177e4SLinus Torvalds /* 14191da177e4SLinus Torvalds * If the page has buffers, try to free the buffer mappings 14201da177e4SLinus Torvalds * associated with this page. If we succeed we try to free 14211da177e4SLinus Torvalds * the page as well. 14221da177e4SLinus Torvalds * 14231da177e4SLinus Torvalds * We do this even if the page is PageDirty(). 14241da177e4SLinus Torvalds * try_to_release_page() does not perform I/O, but it is 14251da177e4SLinus Torvalds * possible for a page to have PageDirty set, but it is actually 14261da177e4SLinus Torvalds * clean (all its buffers are clean). This happens if the 14271da177e4SLinus Torvalds * buffers were written out directly, with submit_bh(). ext3 14281da177e4SLinus Torvalds * will do this, as well as the blockdev mapping. 14291da177e4SLinus Torvalds * try_to_release_page() will discover that cleanness and will 14301da177e4SLinus Torvalds * drop the buffers and mark the page clean - it can be freed. 14311da177e4SLinus Torvalds * 14321da177e4SLinus Torvalds * Rarely, pages can have buffers and no ->mapping. These are 14331da177e4SLinus Torvalds * the pages which were not successfully invalidated in 14341da177e4SLinus Torvalds * truncate_complete_page(). We try to drop those buffers here 14351da177e4SLinus Torvalds * and if that worked, and the page is no longer mapped into 14361da177e4SLinus Torvalds * process address space (page_count == 1) it can be freed. 14371da177e4SLinus Torvalds * Otherwise, leave the page on the LRU so it is swappable. 14381da177e4SLinus Torvalds */ 1439266cf658SDavid Howells if (page_has_private(page)) { 14401da177e4SLinus Torvalds if (!try_to_release_page(page, sc->gfp_mask)) 14411da177e4SLinus Torvalds goto activate_locked; 1442e286781dSNick Piggin if (!mapping && page_count(page) == 1) { 1443e286781dSNick Piggin unlock_page(page); 1444e286781dSNick Piggin if (put_page_testzero(page)) 14451da177e4SLinus Torvalds goto free_it; 1446e286781dSNick Piggin else { 1447e286781dSNick Piggin /* 1448e286781dSNick Piggin * rare race with speculative reference. 1449e286781dSNick Piggin * the speculative reference will free 1450e286781dSNick Piggin * this page shortly, so we may 1451e286781dSNick Piggin * increment nr_reclaimed here (and 1452e286781dSNick Piggin * leave it off the LRU). 1453e286781dSNick Piggin */ 1454e286781dSNick Piggin nr_reclaimed++; 1455e286781dSNick Piggin continue; 1456e286781dSNick Piggin } 1457e286781dSNick Piggin } 14581da177e4SLinus Torvalds } 14591da177e4SLinus Torvalds 1460802a3a92SShaohua Li if (PageAnon(page) && !PageSwapBacked(page)) { 1461802a3a92SShaohua Li /* follow __remove_mapping for reference */ 1462802a3a92SShaohua Li if (!page_ref_freeze(page, 1)) 146349d2e9ccSChristoph Lameter goto keep_locked; 1464802a3a92SShaohua Li if (PageDirty(page)) { 1465802a3a92SShaohua Li page_ref_unfreeze(page, 1); 1466802a3a92SShaohua Li goto keep_locked; 1467802a3a92SShaohua Li } 14681da177e4SLinus Torvalds 1469802a3a92SShaohua Li count_vm_event(PGLAZYFREED); 14702262185cSRoman Gushchin count_memcg_page_event(page, PGLAZYFREED); 1471802a3a92SShaohua Li } else if (!mapping || !__remove_mapping(mapping, page, true)) 1472802a3a92SShaohua Li goto keep_locked; 14739a1ea439SHugh Dickins 14749a1ea439SHugh Dickins unlock_page(page); 1475e286781dSNick Piggin free_it: 147698879b3bSYang Shi /* 147798879b3bSYang Shi * THP may get swapped out in a whole, need account 147898879b3bSYang Shi * all base pages. 147998879b3bSYang Shi */ 148098879b3bSYang Shi nr_reclaimed += nr_pages; 1481abe4c3b5SMel Gorman 1482abe4c3b5SMel Gorman /* 1483abe4c3b5SMel Gorman * Is there need to periodically free_page_list? It would 1484abe4c3b5SMel Gorman * appear not as the counts should be low 1485abe4c3b5SMel Gorman */ 1486bd4c82c2SHuang Ying if (unlikely(PageTransHuge(page))) { 1487bd4c82c2SHuang Ying mem_cgroup_uncharge(page); 1488bd4c82c2SHuang Ying (*get_compound_page_dtor(page))(page); 1489bd4c82c2SHuang Ying } else 1490abe4c3b5SMel Gorman list_add(&page->lru, &free_pages); 14911da177e4SLinus Torvalds continue; 14921da177e4SLinus Torvalds 149398879b3bSYang Shi activate_locked_split: 149498879b3bSYang Shi /* 149598879b3bSYang Shi * The tail pages that are failed to add into swap cache 149698879b3bSYang Shi * reach here. Fixup nr_scanned and nr_pages. 149798879b3bSYang Shi */ 149898879b3bSYang Shi if (nr_pages > 1) { 149998879b3bSYang Shi sc->nr_scanned -= (nr_pages - 1); 150098879b3bSYang Shi nr_pages = 1; 150198879b3bSYang Shi } 15021da177e4SLinus Torvalds activate_locked: 150368a22394SRik van Riel /* Not a candidate for swapping, so reclaim swap space. */ 1504ad6b6704SMinchan Kim if (PageSwapCache(page) && (mem_cgroup_swap_full(page) || 1505ad6b6704SMinchan Kim PageMlocked(page))) 1506a2c43eedSHugh Dickins try_to_free_swap(page); 1507309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 1508ad6b6704SMinchan Kim if (!PageMlocked(page)) { 1509886cf190SKirill Tkhai int type = page_is_file_cache(page); 15101da177e4SLinus Torvalds SetPageActive(page); 151198879b3bSYang Shi stat->nr_activate[type] += nr_pages; 15122262185cSRoman Gushchin count_memcg_page_event(page, PGACTIVATE); 1513ad6b6704SMinchan Kim } 15141da177e4SLinus Torvalds keep_locked: 15151da177e4SLinus Torvalds unlock_page(page); 15161da177e4SLinus Torvalds keep: 15171da177e4SLinus Torvalds list_add(&page->lru, &ret_pages); 1518309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 15191da177e4SLinus Torvalds } 1520abe4c3b5SMel Gorman 152198879b3bSYang Shi pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; 152298879b3bSYang Shi 1523747db954SJohannes Weiner mem_cgroup_uncharge_list(&free_pages); 152472b252aeSMel Gorman try_to_unmap_flush(); 15252d4894b5SMel Gorman free_unref_page_list(&free_pages); 1526abe4c3b5SMel Gorman 15271da177e4SLinus Torvalds list_splice(&ret_pages, page_list); 1528886cf190SKirill Tkhai count_vm_events(PGACTIVATE, pgactivate); 15290a31bc97SJohannes Weiner 153005ff5137SAndrew Morton return nr_reclaimed; 15311da177e4SLinus Torvalds } 15321da177e4SLinus Torvalds 153302c6de8dSMinchan Kim unsigned long reclaim_clean_pages_from_list(struct zone *zone, 153402c6de8dSMinchan Kim struct list_head *page_list) 153502c6de8dSMinchan Kim { 153602c6de8dSMinchan Kim struct scan_control sc = { 153702c6de8dSMinchan Kim .gfp_mask = GFP_KERNEL, 153802c6de8dSMinchan Kim .priority = DEF_PRIORITY, 153902c6de8dSMinchan Kim .may_unmap = 1, 154002c6de8dSMinchan Kim }; 1541060f005fSKirill Tkhai struct reclaim_stat dummy_stat; 15423c710c1aSMichal Hocko unsigned long ret; 154302c6de8dSMinchan Kim struct page *page, *next; 154402c6de8dSMinchan Kim LIST_HEAD(clean_pages); 154502c6de8dSMinchan Kim 154602c6de8dSMinchan Kim list_for_each_entry_safe(page, next, page_list, lru) { 1547117aad1eSRafael Aquini if (page_is_file_cache(page) && !PageDirty(page) && 1548a58f2cefSMinchan Kim !__PageMovable(page) && !PageUnevictable(page)) { 154902c6de8dSMinchan Kim ClearPageActive(page); 155002c6de8dSMinchan Kim list_move(&page->lru, &clean_pages); 155102c6de8dSMinchan Kim } 155202c6de8dSMinchan Kim } 155302c6de8dSMinchan Kim 1554599d0c95SMel Gorman ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, 1555060f005fSKirill Tkhai TTU_IGNORE_ACCESS, &dummy_stat, true); 155602c6de8dSMinchan Kim list_splice(&clean_pages, page_list); 1557599d0c95SMel Gorman mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); 155802c6de8dSMinchan Kim return ret; 155902c6de8dSMinchan Kim } 156002c6de8dSMinchan Kim 15615ad333ebSAndy Whitcroft /* 15625ad333ebSAndy Whitcroft * Attempt to remove the specified page from its LRU. Only take this page 15635ad333ebSAndy Whitcroft * if it is of the appropriate PageActive status. Pages which are being 15645ad333ebSAndy Whitcroft * freed elsewhere are also ignored. 15655ad333ebSAndy Whitcroft * 15665ad333ebSAndy Whitcroft * page: page to consider 15675ad333ebSAndy Whitcroft * mode: one of the LRU isolation modes defined above 15685ad333ebSAndy Whitcroft * 15695ad333ebSAndy Whitcroft * returns 0 on success, -ve errno on failure. 15705ad333ebSAndy Whitcroft */ 1571f3fd4a61SKonstantin Khlebnikov int __isolate_lru_page(struct page *page, isolate_mode_t mode) 15725ad333ebSAndy Whitcroft { 15735ad333ebSAndy Whitcroft int ret = -EINVAL; 15745ad333ebSAndy Whitcroft 15755ad333ebSAndy Whitcroft /* Only take pages on the LRU. */ 15765ad333ebSAndy Whitcroft if (!PageLRU(page)) 15775ad333ebSAndy Whitcroft return ret; 15785ad333ebSAndy Whitcroft 1579e46a2879SMinchan Kim /* Compaction should not handle unevictable pages but CMA can do so */ 1580e46a2879SMinchan Kim if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) 1581894bc310SLee Schermerhorn return ret; 1582894bc310SLee Schermerhorn 15835ad333ebSAndy Whitcroft ret = -EBUSY; 158408e552c6SKAMEZAWA Hiroyuki 1585c8244935SMel Gorman /* 1586c8244935SMel Gorman * To minimise LRU disruption, the caller can indicate that it only 1587c8244935SMel Gorman * wants to isolate pages it will be able to operate on without 1588c8244935SMel Gorman * blocking - clean pages for the most part. 1589c8244935SMel Gorman * 1590c8244935SMel Gorman * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages 1591c8244935SMel Gorman * that it is possible to migrate without blocking 1592c8244935SMel Gorman */ 15931276ad68SJohannes Weiner if (mode & ISOLATE_ASYNC_MIGRATE) { 1594c8244935SMel Gorman /* All the caller can do on PageWriteback is block */ 1595c8244935SMel Gorman if (PageWriteback(page)) 159639deaf85SMinchan Kim return ret; 159739deaf85SMinchan Kim 1598c8244935SMel Gorman if (PageDirty(page)) { 1599c8244935SMel Gorman struct address_space *mapping; 160069d763fcSMel Gorman bool migrate_dirty; 1601c8244935SMel Gorman 1602c8244935SMel Gorman /* 1603c8244935SMel Gorman * Only pages without mappings or that have a 1604c8244935SMel Gorman * ->migratepage callback are possible to migrate 160569d763fcSMel Gorman * without blocking. However, we can be racing with 160669d763fcSMel Gorman * truncation so it's necessary to lock the page 160769d763fcSMel Gorman * to stabilise the mapping as truncation holds 160869d763fcSMel Gorman * the page lock until after the page is removed 160969d763fcSMel Gorman * from the page cache. 1610c8244935SMel Gorman */ 161169d763fcSMel Gorman if (!trylock_page(page)) 161269d763fcSMel Gorman return ret; 161369d763fcSMel Gorman 1614c8244935SMel Gorman mapping = page_mapping(page); 1615145e1a71SHugh Dickins migrate_dirty = !mapping || mapping->a_ops->migratepage; 161669d763fcSMel Gorman unlock_page(page); 161769d763fcSMel Gorman if (!migrate_dirty) 1618c8244935SMel Gorman return ret; 1619c8244935SMel Gorman } 1620c8244935SMel Gorman } 1621c8244935SMel Gorman 1622f80c0673SMinchan Kim if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1623f80c0673SMinchan Kim return ret; 1624f80c0673SMinchan Kim 16255ad333ebSAndy Whitcroft if (likely(get_page_unless_zero(page))) { 16265ad333ebSAndy Whitcroft /* 16275ad333ebSAndy Whitcroft * Be careful not to clear PageLRU until after we're 16285ad333ebSAndy Whitcroft * sure the page is not being freed elsewhere -- the 16295ad333ebSAndy Whitcroft * page release code relies on it. 16305ad333ebSAndy Whitcroft */ 16315ad333ebSAndy Whitcroft ClearPageLRU(page); 16325ad333ebSAndy Whitcroft ret = 0; 16335ad333ebSAndy Whitcroft } 16345ad333ebSAndy Whitcroft 16355ad333ebSAndy Whitcroft return ret; 16365ad333ebSAndy Whitcroft } 16375ad333ebSAndy Whitcroft 16387ee36a14SMel Gorman 16397ee36a14SMel Gorman /* 16407ee36a14SMel Gorman * Update LRU sizes after isolating pages. The LRU size updates must 16417ee36a14SMel Gorman * be complete before mem_cgroup_update_lru_size due to a santity check. 16427ee36a14SMel Gorman */ 16437ee36a14SMel Gorman static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1644b4536f0cSMichal Hocko enum lru_list lru, unsigned long *nr_zone_taken) 16457ee36a14SMel Gorman { 16467ee36a14SMel Gorman int zid; 16477ee36a14SMel Gorman 16487ee36a14SMel Gorman for (zid = 0; zid < MAX_NR_ZONES; zid++) { 16497ee36a14SMel Gorman if (!nr_zone_taken[zid]) 16507ee36a14SMel Gorman continue; 16517ee36a14SMel Gorman 16527ee36a14SMel Gorman __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1653b4536f0cSMichal Hocko #ifdef CONFIG_MEMCG 1654b4536f0cSMichal Hocko mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1655b4536f0cSMichal Hocko #endif 16567ee36a14SMel Gorman } 16577ee36a14SMel Gorman 16587ee36a14SMel Gorman } 16597ee36a14SMel Gorman 1660f4b7e272SAndrey Ryabinin /** 1661f4b7e272SAndrey Ryabinin * pgdat->lru_lock is heavily contended. Some of the functions that 16621da177e4SLinus Torvalds * shrink the lists perform better by taking out a batch of pages 16631da177e4SLinus Torvalds * and working on them outside the LRU lock. 16641da177e4SLinus Torvalds * 16651da177e4SLinus Torvalds * For pagecache intensive workloads, this function is the hottest 16661da177e4SLinus Torvalds * spot in the kernel (apart from copy_*_user functions). 16671da177e4SLinus Torvalds * 16681da177e4SLinus Torvalds * Appropriate locks must be held before calling this function. 16691da177e4SLinus Torvalds * 1670791b48b6SMinchan Kim * @nr_to_scan: The number of eligible pages to look through on the list. 16715dc35979SKonstantin Khlebnikov * @lruvec: The LRU vector to pull pages from. 16721da177e4SLinus Torvalds * @dst: The temp list to put pages on to. 1673f626012dSHugh Dickins * @nr_scanned: The number of pages that were scanned. 1674fe2c2a10SRik van Riel * @sc: The scan_control struct for this reclaim session 16755ad333ebSAndy Whitcroft * @mode: One of the LRU isolation modes 16763cb99451SKonstantin Khlebnikov * @lru: LRU list id for isolating 16771da177e4SLinus Torvalds * 16781da177e4SLinus Torvalds * returns how many pages were moved onto *@dst. 16791da177e4SLinus Torvalds */ 168069e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 16815dc35979SKonstantin Khlebnikov struct lruvec *lruvec, struct list_head *dst, 1682fe2c2a10SRik van Riel unsigned long *nr_scanned, struct scan_control *sc, 1683a9e7c39fSKirill Tkhai enum lru_list lru) 16841da177e4SLinus Torvalds { 168575b00af7SHugh Dickins struct list_head *src = &lruvec->lists[lru]; 168669e05944SAndrew Morton unsigned long nr_taken = 0; 1687599d0c95SMel Gorman unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 16887cc30fcfSMel Gorman unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 16893db65812SJohannes Weiner unsigned long skipped = 0; 1690791b48b6SMinchan Kim unsigned long scan, total_scan, nr_pages; 1691b2e18757SMel Gorman LIST_HEAD(pages_skipped); 1692a9e7c39fSKirill Tkhai isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED); 16931da177e4SLinus Torvalds 169498879b3bSYang Shi total_scan = 0; 1695791b48b6SMinchan Kim scan = 0; 169698879b3bSYang Shi while (scan < nr_to_scan && !list_empty(src)) { 16975ad333ebSAndy Whitcroft struct page *page; 16985ad333ebSAndy Whitcroft 16991da177e4SLinus Torvalds page = lru_to_page(src); 17001da177e4SLinus Torvalds prefetchw_prev_lru_page(page, src, flags); 17011da177e4SLinus Torvalds 1702309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 17038d438f96SNick Piggin 170498879b3bSYang Shi nr_pages = 1 << compound_order(page); 170598879b3bSYang Shi total_scan += nr_pages; 170698879b3bSYang Shi 1707b2e18757SMel Gorman if (page_zonenum(page) > sc->reclaim_idx) { 1708b2e18757SMel Gorman list_move(&page->lru, &pages_skipped); 170998879b3bSYang Shi nr_skipped[page_zonenum(page)] += nr_pages; 1710b2e18757SMel Gorman continue; 1711b2e18757SMel Gorman } 1712b2e18757SMel Gorman 1713791b48b6SMinchan Kim /* 1714791b48b6SMinchan Kim * Do not count skipped pages because that makes the function 1715791b48b6SMinchan Kim * return with no isolated pages if the LRU mostly contains 1716791b48b6SMinchan Kim * ineligible pages. This causes the VM to not reclaim any 1717791b48b6SMinchan Kim * pages, triggering a premature OOM. 171898879b3bSYang Shi * 171998879b3bSYang Shi * Account all tail pages of THP. This would not cause 172098879b3bSYang Shi * premature OOM since __isolate_lru_page() returns -EBUSY 172198879b3bSYang Shi * only when the page is being freed somewhere else. 1722791b48b6SMinchan Kim */ 172398879b3bSYang Shi scan += nr_pages; 1724f3fd4a61SKonstantin Khlebnikov switch (__isolate_lru_page(page, mode)) { 17255ad333ebSAndy Whitcroft case 0: 1726599d0c95SMel Gorman nr_taken += nr_pages; 1727599d0c95SMel Gorman nr_zone_taken[page_zonenum(page)] += nr_pages; 17285ad333ebSAndy Whitcroft list_move(&page->lru, dst); 17295ad333ebSAndy Whitcroft break; 17307c8ee9a8SNick Piggin 17315ad333ebSAndy Whitcroft case -EBUSY: 17325ad333ebSAndy Whitcroft /* else it is being freed elsewhere */ 17335ad333ebSAndy Whitcroft list_move(&page->lru, src); 17345ad333ebSAndy Whitcroft continue; 17355ad333ebSAndy Whitcroft 17365ad333ebSAndy Whitcroft default: 17375ad333ebSAndy Whitcroft BUG(); 17385ad333ebSAndy Whitcroft } 17395ad333ebSAndy Whitcroft } 17401da177e4SLinus Torvalds 1741b2e18757SMel Gorman /* 1742b2e18757SMel Gorman * Splice any skipped pages to the start of the LRU list. Note that 1743b2e18757SMel Gorman * this disrupts the LRU order when reclaiming for lower zones but 1744b2e18757SMel Gorman * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1745b2e18757SMel Gorman * scanning would soon rescan the same pages to skip and put the 1746b2e18757SMel Gorman * system at risk of premature OOM. 1747b2e18757SMel Gorman */ 17487cc30fcfSMel Gorman if (!list_empty(&pages_skipped)) { 17497cc30fcfSMel Gorman int zid; 17507cc30fcfSMel Gorman 17513db65812SJohannes Weiner list_splice(&pages_skipped, src); 17527cc30fcfSMel Gorman for (zid = 0; zid < MAX_NR_ZONES; zid++) { 17537cc30fcfSMel Gorman if (!nr_skipped[zid]) 17547cc30fcfSMel Gorman continue; 17557cc30fcfSMel Gorman 17567cc30fcfSMel Gorman __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 17571265e3a6SMichal Hocko skipped += nr_skipped[zid]; 17587cc30fcfSMel Gorman } 17597cc30fcfSMel Gorman } 1760791b48b6SMinchan Kim *nr_scanned = total_scan; 17611265e3a6SMichal Hocko trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 1762791b48b6SMinchan Kim total_scan, skipped, nr_taken, mode, lru); 1763b4536f0cSMichal Hocko update_lru_sizes(lruvec, lru, nr_zone_taken); 17641da177e4SLinus Torvalds return nr_taken; 17651da177e4SLinus Torvalds } 17661da177e4SLinus Torvalds 176762695a84SNick Piggin /** 176862695a84SNick Piggin * isolate_lru_page - tries to isolate a page from its LRU list 176962695a84SNick Piggin * @page: page to isolate from its LRU list 177062695a84SNick Piggin * 177162695a84SNick Piggin * Isolates a @page from an LRU list, clears PageLRU and adjusts the 177262695a84SNick Piggin * vmstat statistic corresponding to whatever LRU list the page was on. 177362695a84SNick Piggin * 177462695a84SNick Piggin * Returns 0 if the page was removed from an LRU list. 177562695a84SNick Piggin * Returns -EBUSY if the page was not on an LRU list. 177662695a84SNick Piggin * 177762695a84SNick Piggin * The returned page will have PageLRU() cleared. If it was found on 1778894bc310SLee Schermerhorn * the active list, it will have PageActive set. If it was found on 1779894bc310SLee Schermerhorn * the unevictable list, it will have the PageUnevictable bit set. That flag 1780894bc310SLee Schermerhorn * may need to be cleared by the caller before letting the page go. 178162695a84SNick Piggin * 178262695a84SNick Piggin * The vmstat statistic corresponding to the list on which the page was 178362695a84SNick Piggin * found will be decremented. 178462695a84SNick Piggin * 178562695a84SNick Piggin * Restrictions: 1786a5d09bedSMike Rapoport * 178762695a84SNick Piggin * (1) Must be called with an elevated refcount on the page. This is a 178862695a84SNick Piggin * fundamentnal difference from isolate_lru_pages (which is called 178962695a84SNick Piggin * without a stable reference). 179062695a84SNick Piggin * (2) the lru_lock must not be held. 179162695a84SNick Piggin * (3) interrupts must be enabled. 179262695a84SNick Piggin */ 179362695a84SNick Piggin int isolate_lru_page(struct page *page) 179462695a84SNick Piggin { 179562695a84SNick Piggin int ret = -EBUSY; 179662695a84SNick Piggin 1797309381feSSasha Levin VM_BUG_ON_PAGE(!page_count(page), page); 1798cf2a82eeSKirill A. Shutemov WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); 17990c917313SKonstantin Khlebnikov 180062695a84SNick Piggin if (PageLRU(page)) { 1801f4b7e272SAndrey Ryabinin pg_data_t *pgdat = page_pgdat(page); 1802fa9add64SHugh Dickins struct lruvec *lruvec; 180362695a84SNick Piggin 1804f4b7e272SAndrey Ryabinin spin_lock_irq(&pgdat->lru_lock); 1805f4b7e272SAndrey Ryabinin lruvec = mem_cgroup_page_lruvec(page, pgdat); 18060c917313SKonstantin Khlebnikov if (PageLRU(page)) { 1807894bc310SLee Schermerhorn int lru = page_lru(page); 18080c917313SKonstantin Khlebnikov get_page(page); 180962695a84SNick Piggin ClearPageLRU(page); 1810fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 1811fa9add64SHugh Dickins ret = 0; 181262695a84SNick Piggin } 1813f4b7e272SAndrey Ryabinin spin_unlock_irq(&pgdat->lru_lock); 181462695a84SNick Piggin } 181562695a84SNick Piggin return ret; 181662695a84SNick Piggin } 181762695a84SNick Piggin 18185ad333ebSAndy Whitcroft /* 1819d37dd5dcSFengguang Wu * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1820d37dd5dcSFengguang Wu * then get resheduled. When there are massive number of tasks doing page 1821d37dd5dcSFengguang Wu * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1822d37dd5dcSFengguang Wu * the LRU list will go small and be scanned faster than necessary, leading to 1823d37dd5dcSFengguang Wu * unnecessary swapping, thrashing and OOM. 182435cd7815SRik van Riel */ 1825599d0c95SMel Gorman static int too_many_isolated(struct pglist_data *pgdat, int file, 182635cd7815SRik van Riel struct scan_control *sc) 182735cd7815SRik van Riel { 182835cd7815SRik van Riel unsigned long inactive, isolated; 182935cd7815SRik van Riel 183035cd7815SRik van Riel if (current_is_kswapd()) 183135cd7815SRik van Riel return 0; 183235cd7815SRik van Riel 183397c9341fSTejun Heo if (!sane_reclaim(sc)) 183435cd7815SRik van Riel return 0; 183535cd7815SRik van Riel 183635cd7815SRik van Riel if (file) { 1837599d0c95SMel Gorman inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1838599d0c95SMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 183935cd7815SRik van Riel } else { 1840599d0c95SMel Gorman inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1841599d0c95SMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 184235cd7815SRik van Riel } 184335cd7815SRik van Riel 18443cf23841SFengguang Wu /* 18453cf23841SFengguang Wu * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 18463cf23841SFengguang Wu * won't get blocked by normal direct-reclaimers, forming a circular 18473cf23841SFengguang Wu * deadlock. 18483cf23841SFengguang Wu */ 1849d0164adcSMel Gorman if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 18503cf23841SFengguang Wu inactive >>= 3; 18513cf23841SFengguang Wu 185235cd7815SRik van Riel return isolated > inactive; 185335cd7815SRik van Riel } 185435cd7815SRik van Riel 1855a222f341SKirill Tkhai /* 1856a222f341SKirill Tkhai * This moves pages from @list to corresponding LRU list. 1857a222f341SKirill Tkhai * 1858a222f341SKirill Tkhai * We move them the other way if the page is referenced by one or more 1859a222f341SKirill Tkhai * processes, from rmap. 1860a222f341SKirill Tkhai * 1861a222f341SKirill Tkhai * If the pages are mostly unmapped, the processing is fast and it is 1862a222f341SKirill Tkhai * appropriate to hold zone_lru_lock across the whole operation. But if 1863a222f341SKirill Tkhai * the pages are mapped, the processing is slow (page_referenced()) so we 1864a222f341SKirill Tkhai * should drop zone_lru_lock around each page. It's impossible to balance 1865a222f341SKirill Tkhai * this, so instead we remove the pages from the LRU while processing them. 1866a222f341SKirill Tkhai * It is safe to rely on PG_active against the non-LRU pages in here because 1867a222f341SKirill Tkhai * nobody will play with that bit on a non-LRU page. 1868a222f341SKirill Tkhai * 1869a222f341SKirill Tkhai * The downside is that we have to touch page->_refcount against each page. 1870a222f341SKirill Tkhai * But we had to alter page->flags anyway. 1871a222f341SKirill Tkhai * 1872a222f341SKirill Tkhai * Returns the number of pages moved to the given lruvec. 1873a222f341SKirill Tkhai */ 1874a222f341SKirill Tkhai 1875a222f341SKirill Tkhai static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, 1876a222f341SKirill Tkhai struct list_head *list) 187766635629SMel Gorman { 1878599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 1879a222f341SKirill Tkhai int nr_pages, nr_moved = 0; 18803f79768fSHugh Dickins LIST_HEAD(pages_to_free); 1881a222f341SKirill Tkhai struct page *page; 1882a222f341SKirill Tkhai enum lru_list lru; 188366635629SMel Gorman 1884a222f341SKirill Tkhai while (!list_empty(list)) { 1885a222f341SKirill Tkhai page = lru_to_page(list); 1886309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 188739b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) { 1888a222f341SKirill Tkhai list_del(&page->lru); 1889599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 189066635629SMel Gorman putback_lru_page(page); 1891599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 189266635629SMel Gorman continue; 189366635629SMel Gorman } 1894599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 1895fa9add64SHugh Dickins 18967a608572SLinus Torvalds SetPageLRU(page); 189766635629SMel Gorman lru = page_lru(page); 1898a222f341SKirill Tkhai 1899a222f341SKirill Tkhai nr_pages = hpage_nr_pages(page); 1900a222f341SKirill Tkhai update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); 1901a222f341SKirill Tkhai list_move(&page->lru, &lruvec->lists[lru]); 1902fa9add64SHugh Dickins 19032bcf8879SHugh Dickins if (put_page_testzero(page)) { 19042bcf8879SHugh Dickins __ClearPageLRU(page); 19052bcf8879SHugh Dickins __ClearPageActive(page); 1906fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 19072bcf8879SHugh Dickins 19082bcf8879SHugh Dickins if (unlikely(PageCompound(page))) { 1909599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1910747db954SJohannes Weiner mem_cgroup_uncharge(page); 19112bcf8879SHugh Dickins (*get_compound_page_dtor(page))(page); 1912599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 19132bcf8879SHugh Dickins } else 19142bcf8879SHugh Dickins list_add(&page->lru, &pages_to_free); 1915a222f341SKirill Tkhai } else { 1916a222f341SKirill Tkhai nr_moved += nr_pages; 191766635629SMel Gorman } 191866635629SMel Gorman } 191966635629SMel Gorman 19203f79768fSHugh Dickins /* 19213f79768fSHugh Dickins * To save our caller's stack, now use input list for pages to free. 19223f79768fSHugh Dickins */ 1923a222f341SKirill Tkhai list_splice(&pages_to_free, list); 1924a222f341SKirill Tkhai 1925a222f341SKirill Tkhai return nr_moved; 192666635629SMel Gorman } 192766635629SMel Gorman 192866635629SMel Gorman /* 1929399ba0b9SNeilBrown * If a kernel thread (such as nfsd for loop-back mounts) services 1930399ba0b9SNeilBrown * a backing device by writing to the page cache it sets PF_LESS_THROTTLE. 1931399ba0b9SNeilBrown * In that case we should only throttle if the backing device it is 1932399ba0b9SNeilBrown * writing to is congested. In other cases it is safe to throttle. 1933399ba0b9SNeilBrown */ 1934399ba0b9SNeilBrown static int current_may_throttle(void) 1935399ba0b9SNeilBrown { 1936399ba0b9SNeilBrown return !(current->flags & PF_LESS_THROTTLE) || 1937399ba0b9SNeilBrown current->backing_dev_info == NULL || 1938399ba0b9SNeilBrown bdi_write_congested(current->backing_dev_info); 1939399ba0b9SNeilBrown } 1940399ba0b9SNeilBrown 1941399ba0b9SNeilBrown /* 1942b2e18757SMel Gorman * shrink_inactive_list() is a helper for shrink_node(). It returns the number 19431742f19fSAndrew Morton * of reclaimed pages 19441da177e4SLinus Torvalds */ 194566635629SMel Gorman static noinline_for_stack unsigned long 19461a93be0eSKonstantin Khlebnikov shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, 19479e3b2f8cSKonstantin Khlebnikov struct scan_control *sc, enum lru_list lru) 19481da177e4SLinus Torvalds { 19491da177e4SLinus Torvalds LIST_HEAD(page_list); 1950e247dbceSKOSAKI Motohiro unsigned long nr_scanned; 195105ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 1952e247dbceSKOSAKI Motohiro unsigned long nr_taken; 1953060f005fSKirill Tkhai struct reclaim_stat stat; 19543cb99451SKonstantin Khlebnikov int file = is_file_lru(lru); 1955f46b7912SKirill Tkhai enum vm_event_item item; 1956599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 19571a93be0eSKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1958db73ee0dSMichal Hocko bool stalled = false; 195978dc583dSKOSAKI Motohiro 1960599d0c95SMel Gorman while (unlikely(too_many_isolated(pgdat, file, sc))) { 1961db73ee0dSMichal Hocko if (stalled) 1962db73ee0dSMichal Hocko return 0; 1963db73ee0dSMichal Hocko 1964db73ee0dSMichal Hocko /* wait a bit for the reclaimer. */ 1965db73ee0dSMichal Hocko msleep(100); 1966db73ee0dSMichal Hocko stalled = true; 196735cd7815SRik van Riel 196835cd7815SRik van Riel /* We are about to die and free our memory. Return now. */ 196935cd7815SRik van Riel if (fatal_signal_pending(current)) 197035cd7815SRik van Riel return SWAP_CLUSTER_MAX; 197135cd7815SRik van Riel } 197235cd7815SRik van Riel 19731da177e4SLinus Torvalds lru_add_drain(); 1974f80c0673SMinchan Kim 1975599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 19761da177e4SLinus Torvalds 19775dc35979SKonstantin Khlebnikov nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 1978a9e7c39fSKirill Tkhai &nr_scanned, sc, lru); 197995d918fcSKonstantin Khlebnikov 1980599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 19819d5e6a9fSHugh Dickins reclaim_stat->recent_scanned[file] += nr_taken; 198295d918fcSKonstantin Khlebnikov 1983f46b7912SKirill Tkhai item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT; 19842262185cSRoman Gushchin if (global_reclaim(sc)) 1985f46b7912SKirill Tkhai __count_vm_events(item, nr_scanned); 1986f46b7912SKirill Tkhai __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); 1987599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1988d563c050SHillf Danton 1989d563c050SHillf Danton if (nr_taken == 0) 199066635629SMel Gorman return 0; 1991b35ea17bSKOSAKI Motohiro 1992a128ca71SShaohua Li nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0, 19933c710c1aSMichal Hocko &stat, false); 1994c661b078SAndy Whitcroft 1995599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 19963f79768fSHugh Dickins 1997f46b7912SKirill Tkhai item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT; 19982262185cSRoman Gushchin if (global_reclaim(sc)) 1999f46b7912SKirill Tkhai __count_vm_events(item, nr_reclaimed); 2000f46b7912SKirill Tkhai __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); 2001b17f18afSKirill Tkhai reclaim_stat->recent_rotated[0] += stat.nr_activate[0]; 2002b17f18afSKirill Tkhai reclaim_stat->recent_rotated[1] += stat.nr_activate[1]; 2003a74609faSNick Piggin 2004a222f341SKirill Tkhai move_pages_to_lru(lruvec, &page_list); 20053f79768fSHugh Dickins 2006599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 20073f79768fSHugh Dickins 2008599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 20093f79768fSHugh Dickins 2010747db954SJohannes Weiner mem_cgroup_uncharge_list(&page_list); 20112d4894b5SMel Gorman free_unref_page_list(&page_list); 2012e11da5b4SMel Gorman 201392df3a72SMel Gorman /* 20141c610d5fSAndrey Ryabinin * If dirty pages are scanned that are not queued for IO, it 20151c610d5fSAndrey Ryabinin * implies that flushers are not doing their job. This can 20161c610d5fSAndrey Ryabinin * happen when memory pressure pushes dirty pages to the end of 20171c610d5fSAndrey Ryabinin * the LRU before the dirty limits are breached and the dirty 20181c610d5fSAndrey Ryabinin * data has expired. It can also happen when the proportion of 20191c610d5fSAndrey Ryabinin * dirty pages grows not through writes but through memory 20201c610d5fSAndrey Ryabinin * pressure reclaiming all the clean cache. And in some cases, 20211c610d5fSAndrey Ryabinin * the flushers simply cannot keep up with the allocation 20221c610d5fSAndrey Ryabinin * rate. Nudge the flusher threads in case they are asleep. 20231c610d5fSAndrey Ryabinin */ 20241c610d5fSAndrey Ryabinin if (stat.nr_unqueued_dirty == nr_taken) 20251c610d5fSAndrey Ryabinin wakeup_flusher_threads(WB_REASON_VMSCAN); 20261c610d5fSAndrey Ryabinin 2027d108c772SAndrey Ryabinin sc->nr.dirty += stat.nr_dirty; 2028d108c772SAndrey Ryabinin sc->nr.congested += stat.nr_congested; 2029d108c772SAndrey Ryabinin sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 2030d108c772SAndrey Ryabinin sc->nr.writeback += stat.nr_writeback; 2031d108c772SAndrey Ryabinin sc->nr.immediate += stat.nr_immediate; 2032d108c772SAndrey Ryabinin sc->nr.taken += nr_taken; 2033d108c772SAndrey Ryabinin if (file) 2034d108c772SAndrey Ryabinin sc->nr.file_taken += nr_taken; 20358e950282SMel Gorman 2036599d0c95SMel Gorman trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 2037d51d1e64SSteven Rostedt nr_scanned, nr_reclaimed, &stat, sc->priority, file); 203805ff5137SAndrew Morton return nr_reclaimed; 20391da177e4SLinus Torvalds } 20401da177e4SLinus Torvalds 2041f626012dSHugh Dickins static void shrink_active_list(unsigned long nr_to_scan, 20421a93be0eSKonstantin Khlebnikov struct lruvec *lruvec, 2043f16015fbSJohannes Weiner struct scan_control *sc, 20449e3b2f8cSKonstantin Khlebnikov enum lru_list lru) 20451cfb419bSKAMEZAWA Hiroyuki { 204644c241f1SKOSAKI Motohiro unsigned long nr_taken; 2047f626012dSHugh Dickins unsigned long nr_scanned; 20486fe6b7e3SWu Fengguang unsigned long vm_flags; 20491cfb419bSKAMEZAWA Hiroyuki LIST_HEAD(l_hold); /* The pages which were snipped off */ 20508cab4754SWu Fengguang LIST_HEAD(l_active); 2051b69408e8SChristoph Lameter LIST_HEAD(l_inactive); 20521cfb419bSKAMEZAWA Hiroyuki struct page *page; 20531a93be0eSKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 20549d998b4fSMichal Hocko unsigned nr_deactivate, nr_activate; 20559d998b4fSMichal Hocko unsigned nr_rotated = 0; 20563cb99451SKonstantin Khlebnikov int file = is_file_lru(lru); 2057599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 20581cfb419bSKAMEZAWA Hiroyuki 20591da177e4SLinus Torvalds lru_add_drain(); 2060f80c0673SMinchan Kim 2061599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 2062925b7673SJohannes Weiner 20635dc35979SKonstantin Khlebnikov nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 2064a9e7c39fSKirill Tkhai &nr_scanned, sc, lru); 206589b5fae5SJohannes Weiner 2066599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2067b7c46d15SJohannes Weiner reclaim_stat->recent_scanned[file] += nr_taken; 20681cfb419bSKAMEZAWA Hiroyuki 2069599d0c95SMel Gorman __count_vm_events(PGREFILL, nr_scanned); 20702fa2690cSYafang Shao __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 20719d5e6a9fSHugh Dickins 2072599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 20731da177e4SLinus Torvalds 20741da177e4SLinus Torvalds while (!list_empty(&l_hold)) { 20751da177e4SLinus Torvalds cond_resched(); 20761da177e4SLinus Torvalds page = lru_to_page(&l_hold); 20771da177e4SLinus Torvalds list_del(&page->lru); 20787e9cd484SRik van Riel 207939b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) { 2080894bc310SLee Schermerhorn putback_lru_page(page); 2081894bc310SLee Schermerhorn continue; 2082894bc310SLee Schermerhorn } 2083894bc310SLee Schermerhorn 2084cc715d99SMel Gorman if (unlikely(buffer_heads_over_limit)) { 2085cc715d99SMel Gorman if (page_has_private(page) && trylock_page(page)) { 2086cc715d99SMel Gorman if (page_has_private(page)) 2087cc715d99SMel Gorman try_to_release_page(page, 0); 2088cc715d99SMel Gorman unlock_page(page); 2089cc715d99SMel Gorman } 2090cc715d99SMel Gorman } 2091cc715d99SMel Gorman 2092c3ac9a8aSJohannes Weiner if (page_referenced(page, 0, sc->target_mem_cgroup, 2093c3ac9a8aSJohannes Weiner &vm_flags)) { 20949992af10SRik van Riel nr_rotated += hpage_nr_pages(page); 20958cab4754SWu Fengguang /* 20968cab4754SWu Fengguang * Identify referenced, file-backed active pages and 20978cab4754SWu Fengguang * give them one more trip around the active list. So 20988cab4754SWu Fengguang * that executable code get better chances to stay in 20998cab4754SWu Fengguang * memory under moderate memory pressure. Anon pages 21008cab4754SWu Fengguang * are not likely to be evicted by use-once streaming 21018cab4754SWu Fengguang * IO, plus JVM can create lots of anon VM_EXEC pages, 21028cab4754SWu Fengguang * so we ignore them here. 21038cab4754SWu Fengguang */ 210441e20983SWu Fengguang if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 21058cab4754SWu Fengguang list_add(&page->lru, &l_active); 21068cab4754SWu Fengguang continue; 21078cab4754SWu Fengguang } 21088cab4754SWu Fengguang } 21097e9cd484SRik van Riel 21105205e56eSKOSAKI Motohiro ClearPageActive(page); /* we are de-activating */ 21111899ad18SJohannes Weiner SetPageWorkingset(page); 21121da177e4SLinus Torvalds list_add(&page->lru, &l_inactive); 21131da177e4SLinus Torvalds } 21141da177e4SLinus Torvalds 2115b555749aSAndrew Morton /* 21168cab4754SWu Fengguang * Move pages back to the lru list. 2117b555749aSAndrew Morton */ 2118599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 21194f98a2feSRik van Riel /* 21208cab4754SWu Fengguang * Count referenced pages from currently used mappings as rotated, 21218cab4754SWu Fengguang * even though only some of them are actually re-activated. This 21228cab4754SWu Fengguang * helps balance scan pressure between file and anonymous pages in 21237c0db9e9SJerome Marchand * get_scan_count. 2124556adecbSRik van Riel */ 2125b7c46d15SJohannes Weiner reclaim_stat->recent_rotated[file] += nr_rotated; 2126556adecbSRik van Riel 2127a222f341SKirill Tkhai nr_activate = move_pages_to_lru(lruvec, &l_active); 2128a222f341SKirill Tkhai nr_deactivate = move_pages_to_lru(lruvec, &l_inactive); 2129f372d89eSKirill Tkhai /* Keep all free pages in l_active list */ 2130f372d89eSKirill Tkhai list_splice(&l_inactive, &l_active); 21319851ac13SKirill Tkhai 21329851ac13SKirill Tkhai __count_vm_events(PGDEACTIVATE, nr_deactivate); 21339851ac13SKirill Tkhai __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); 21349851ac13SKirill Tkhai 2135599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2136599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 21372bcf8879SHugh Dickins 2138f372d89eSKirill Tkhai mem_cgroup_uncharge_list(&l_active); 2139f372d89eSKirill Tkhai free_unref_page_list(&l_active); 21409d998b4fSMichal Hocko trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 21419d998b4fSMichal Hocko nr_deactivate, nr_rotated, sc->priority, file); 21421da177e4SLinus Torvalds } 21431da177e4SLinus Torvalds 214459dc76b0SRik van Riel /* 214559dc76b0SRik van Riel * The inactive anon list should be small enough that the VM never has 214659dc76b0SRik van Riel * to do too much work. 214714797e23SKOSAKI Motohiro * 214859dc76b0SRik van Riel * The inactive file list should be small enough to leave most memory 214959dc76b0SRik van Riel * to the established workingset on the scan-resistant active list, 215059dc76b0SRik van Riel * but large enough to avoid thrashing the aggregate readahead window. 215159dc76b0SRik van Riel * 215259dc76b0SRik van Riel * Both inactive lists should also be large enough that each inactive 215359dc76b0SRik van Riel * page has a chance to be referenced again before it is reclaimed. 215459dc76b0SRik van Riel * 21552a2e4885SJohannes Weiner * If that fails and refaulting is observed, the inactive list grows. 21562a2e4885SJohannes Weiner * 215759dc76b0SRik van Riel * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages 21583a50d14dSAndrey Ryabinin * on this LRU, maintained by the pageout code. An inactive_ratio 215959dc76b0SRik van Riel * of 3 means 3:1 or 25% of the pages are kept on the inactive list. 216059dc76b0SRik van Riel * 216159dc76b0SRik van Riel * total target max 216259dc76b0SRik van Riel * memory ratio inactive 216359dc76b0SRik van Riel * ------------------------------------- 216459dc76b0SRik van Riel * 10MB 1 5MB 216559dc76b0SRik van Riel * 100MB 1 50MB 216659dc76b0SRik van Riel * 1GB 3 250MB 216759dc76b0SRik van Riel * 10GB 10 0.9GB 216859dc76b0SRik van Riel * 100GB 31 3GB 216959dc76b0SRik van Riel * 1TB 101 10GB 217059dc76b0SRik van Riel * 10TB 320 32GB 217114797e23SKOSAKI Motohiro */ 2172f8d1a311SMel Gorman static bool inactive_list_is_low(struct lruvec *lruvec, bool file, 21732c012a4aSKuo-Hsin Yang struct scan_control *sc, bool trace) 217414797e23SKOSAKI Motohiro { 2175fd538803SMichal Hocko enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; 21762a2e4885SJohannes Weiner struct pglist_data *pgdat = lruvec_pgdat(lruvec); 21772a2e4885SJohannes Weiner enum lru_list inactive_lru = file * LRU_FILE; 21782a2e4885SJohannes Weiner unsigned long inactive, active; 21792a2e4885SJohannes Weiner unsigned long inactive_ratio; 21802a2e4885SJohannes Weiner unsigned long refaults; 218159dc76b0SRik van Riel unsigned long gb; 218259dc76b0SRik van Riel 218374e3f3c3SMinchan Kim /* 218474e3f3c3SMinchan Kim * If we don't have swap space, anonymous page deactivation 218574e3f3c3SMinchan Kim * is pointless. 218674e3f3c3SMinchan Kim */ 218759dc76b0SRik van Riel if (!file && !total_swap_pages) 218842e2e457SYaowei Bai return false; 218974e3f3c3SMinchan Kim 2190fd538803SMichal Hocko inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); 2191fd538803SMichal Hocko active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); 2192f8d1a311SMel Gorman 21932a2e4885SJohannes Weiner /* 21942a2e4885SJohannes Weiner * When refaults are being observed, it means a new workingset 21952a2e4885SJohannes Weiner * is being established. Disable active list protection to get 21962a2e4885SJohannes Weiner * rid of the stale workingset quickly. 21972a2e4885SJohannes Weiner */ 2198205b20ccSJohannes Weiner refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE); 21992c012a4aSKuo-Hsin Yang if (file && lruvec->refaults != refaults) { 22002a2e4885SJohannes Weiner inactive_ratio = 0; 22012a2e4885SJohannes Weiner } else { 220259dc76b0SRik van Riel gb = (inactive + active) >> (30 - PAGE_SHIFT); 220359dc76b0SRik van Riel if (gb) 220459dc76b0SRik van Riel inactive_ratio = int_sqrt(10 * gb); 2205b39415b2SRik van Riel else 220659dc76b0SRik van Riel inactive_ratio = 1; 22072a2e4885SJohannes Weiner } 220859dc76b0SRik van Riel 22092c012a4aSKuo-Hsin Yang if (trace) 22102a2e4885SJohannes Weiner trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx, 2211fd538803SMichal Hocko lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive, 2212fd538803SMichal Hocko lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active, 2213fd538803SMichal Hocko inactive_ratio, file); 2214fd538803SMichal Hocko 221559dc76b0SRik van Riel return inactive * inactive_ratio < active; 2216b39415b2SRik van Riel } 2217b39415b2SRik van Riel 22184f98a2feSRik van Riel static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 22193b991208SJohannes Weiner struct lruvec *lruvec, struct scan_control *sc) 2220b69408e8SChristoph Lameter { 2221b39415b2SRik van Riel if (is_active_lru(lru)) { 22223b991208SJohannes Weiner if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) 22231a93be0eSKonstantin Khlebnikov shrink_active_list(nr_to_scan, lruvec, sc, lru); 2224556adecbSRik van Riel return 0; 2225556adecbSRik van Riel } 2226556adecbSRik van Riel 22271a93be0eSKonstantin Khlebnikov return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2228b69408e8SChristoph Lameter } 2229b69408e8SChristoph Lameter 22309a265114SJohannes Weiner enum scan_balance { 22319a265114SJohannes Weiner SCAN_EQUAL, 22329a265114SJohannes Weiner SCAN_FRACT, 22339a265114SJohannes Weiner SCAN_ANON, 22349a265114SJohannes Weiner SCAN_FILE, 22359a265114SJohannes Weiner }; 22369a265114SJohannes Weiner 22371da177e4SLinus Torvalds /* 22384f98a2feSRik van Riel * Determine how aggressively the anon and file LRU lists should be 22394f98a2feSRik van Riel * scanned. The relative value of each set of LRU lists is determined 22404f98a2feSRik van Riel * by looking at the fraction of the pages scanned we did rotate back 22414f98a2feSRik van Riel * onto the active list instead of evict. 22424f98a2feSRik van Riel * 2243be7bd59dSWanpeng Li * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan 2244be7bd59dSWanpeng Li * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan 22454f98a2feSRik van Riel */ 224633377678SVladimir Davydov static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, 22476b4f7799SJohannes Weiner struct scan_control *sc, unsigned long *nr, 22486b4f7799SJohannes Weiner unsigned long *lru_pages) 22494f98a2feSRik van Riel { 225033377678SVladimir Davydov int swappiness = mem_cgroup_swappiness(memcg); 225190126375SKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 22529a265114SJohannes Weiner u64 fraction[2]; 22539a265114SJohannes Weiner u64 denominator = 0; /* gcc */ 2254599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 22559a265114SJohannes Weiner unsigned long anon_prio, file_prio; 22569a265114SJohannes Weiner enum scan_balance scan_balance; 22570bf1457fSJohannes Weiner unsigned long anon, file; 22589a265114SJohannes Weiner unsigned long ap, fp; 22599a265114SJohannes Weiner enum lru_list lru; 226076a33fc3SShaohua Li 226176a33fc3SShaohua Li /* If we have no swap space, do not bother scanning anon pages. */ 2262d8b38438SVladimir Davydov if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { 22639a265114SJohannes Weiner scan_balance = SCAN_FILE; 226476a33fc3SShaohua Li goto out; 226576a33fc3SShaohua Li } 22664f98a2feSRik van Riel 226710316b31SJohannes Weiner /* 226810316b31SJohannes Weiner * Global reclaim will swap to prevent OOM even with no 226910316b31SJohannes Weiner * swappiness, but memcg users want to use this knob to 227010316b31SJohannes Weiner * disable swapping for individual groups completely when 227110316b31SJohannes Weiner * using the memory controller's swap limit feature would be 227210316b31SJohannes Weiner * too expensive. 227310316b31SJohannes Weiner */ 227402695175SJohannes Weiner if (!global_reclaim(sc) && !swappiness) { 22759a265114SJohannes Weiner scan_balance = SCAN_FILE; 227610316b31SJohannes Weiner goto out; 227710316b31SJohannes Weiner } 227810316b31SJohannes Weiner 227910316b31SJohannes Weiner /* 228010316b31SJohannes Weiner * Do not apply any pressure balancing cleverness when the 228110316b31SJohannes Weiner * system is close to OOM, scan both anon and file equally 228210316b31SJohannes Weiner * (unless the swappiness setting disagrees with swapping). 228310316b31SJohannes Weiner */ 228402695175SJohannes Weiner if (!sc->priority && swappiness) { 22859a265114SJohannes Weiner scan_balance = SCAN_EQUAL; 228610316b31SJohannes Weiner goto out; 228710316b31SJohannes Weiner } 228810316b31SJohannes Weiner 228911d16c25SJohannes Weiner /* 229062376251SJohannes Weiner * Prevent the reclaimer from falling into the cache trap: as 229162376251SJohannes Weiner * cache pages start out inactive, every cache fault will tip 229262376251SJohannes Weiner * the scan balance towards the file LRU. And as the file LRU 229362376251SJohannes Weiner * shrinks, so does the window for rotation from references. 229462376251SJohannes Weiner * This means we have a runaway feedback loop where a tiny 229562376251SJohannes Weiner * thrashing file LRU becomes infinitely more attractive than 229662376251SJohannes Weiner * anon pages. Try to detect this based on file LRU size. 229762376251SJohannes Weiner */ 229862376251SJohannes Weiner if (global_reclaim(sc)) { 2299599d0c95SMel Gorman unsigned long pgdatfile; 2300599d0c95SMel Gorman unsigned long pgdatfree; 2301599d0c95SMel Gorman int z; 2302599d0c95SMel Gorman unsigned long total_high_wmark = 0; 230362376251SJohannes Weiner 2304599d0c95SMel Gorman pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2305599d0c95SMel Gorman pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + 2306599d0c95SMel Gorman node_page_state(pgdat, NR_INACTIVE_FILE); 23072ab051e1SJerome Marchand 2308599d0c95SMel Gorman for (z = 0; z < MAX_NR_ZONES; z++) { 2309599d0c95SMel Gorman struct zone *zone = &pgdat->node_zones[z]; 23106aa303deSMel Gorman if (!managed_zone(zone)) 2311599d0c95SMel Gorman continue; 2312599d0c95SMel Gorman 2313599d0c95SMel Gorman total_high_wmark += high_wmark_pages(zone); 2314599d0c95SMel Gorman } 2315599d0c95SMel Gorman 2316599d0c95SMel Gorman if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) { 231706226226SDavid Rientjes /* 231806226226SDavid Rientjes * Force SCAN_ANON if there are enough inactive 231906226226SDavid Rientjes * anonymous pages on the LRU in eligible zones. 232006226226SDavid Rientjes * Otherwise, the small LRU gets thrashed. 232106226226SDavid Rientjes */ 23223b991208SJohannes Weiner if (!inactive_list_is_low(lruvec, false, sc, false) && 232306226226SDavid Rientjes lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) 232406226226SDavid Rientjes >> sc->priority) { 232562376251SJohannes Weiner scan_balance = SCAN_ANON; 232662376251SJohannes Weiner goto out; 232762376251SJohannes Weiner } 232862376251SJohannes Weiner } 232906226226SDavid Rientjes } 233062376251SJohannes Weiner 233162376251SJohannes Weiner /* 2332316bda0eSVladimir Davydov * If there is enough inactive page cache, i.e. if the size of the 2333316bda0eSVladimir Davydov * inactive list is greater than that of the active list *and* the 2334316bda0eSVladimir Davydov * inactive list actually has some pages to scan on this priority, we 2335316bda0eSVladimir Davydov * do not reclaim anything from the anonymous working set right now. 2336316bda0eSVladimir Davydov * Without the second condition we could end up never scanning an 2337316bda0eSVladimir Davydov * lruvec even if it has plenty of old anonymous pages unless the 2338316bda0eSVladimir Davydov * system is under heavy pressure. 2339e9868505SRik van Riel */ 23403b991208SJohannes Weiner if (!inactive_list_is_low(lruvec, true, sc, false) && 234171ab6cfeSMichal Hocko lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { 23429a265114SJohannes Weiner scan_balance = SCAN_FILE; 2343e9868505SRik van Riel goto out; 23444f98a2feSRik van Riel } 23454f98a2feSRik van Riel 23469a265114SJohannes Weiner scan_balance = SCAN_FRACT; 23479a265114SJohannes Weiner 23484f98a2feSRik van Riel /* 234958c37f6eSKOSAKI Motohiro * With swappiness at 100, anonymous and file have the same priority. 235058c37f6eSKOSAKI Motohiro * This scanning priority is essentially the inverse of IO cost. 235158c37f6eSKOSAKI Motohiro */ 235202695175SJohannes Weiner anon_prio = swappiness; 235375b00af7SHugh Dickins file_prio = 200 - anon_prio; 235458c37f6eSKOSAKI Motohiro 235558c37f6eSKOSAKI Motohiro /* 23564f98a2feSRik van Riel * OK, so we have swap space and a fair amount of page cache 23574f98a2feSRik van Riel * pages. We use the recently rotated / recently scanned 23584f98a2feSRik van Riel * ratios to determine how valuable each cache is. 23594f98a2feSRik van Riel * 23604f98a2feSRik van Riel * Because workloads change over time (and to avoid overflow) 23614f98a2feSRik van Riel * we keep these statistics as a floating average, which ends 23624f98a2feSRik van Riel * up weighing recent references more than old ones. 23634f98a2feSRik van Riel * 23644f98a2feSRik van Riel * anon in [0], file in [1] 23654f98a2feSRik van Riel */ 23662ab051e1SJerome Marchand 2367fd538803SMichal Hocko anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + 2368fd538803SMichal Hocko lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); 2369fd538803SMichal Hocko file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + 2370fd538803SMichal Hocko lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); 23712ab051e1SJerome Marchand 2372599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 237358c37f6eSKOSAKI Motohiro if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 23746e901571SKOSAKI Motohiro reclaim_stat->recent_scanned[0] /= 2; 23756e901571SKOSAKI Motohiro reclaim_stat->recent_rotated[0] /= 2; 23764f98a2feSRik van Riel } 23774f98a2feSRik van Riel 23786e901571SKOSAKI Motohiro if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 23796e901571SKOSAKI Motohiro reclaim_stat->recent_scanned[1] /= 2; 23806e901571SKOSAKI Motohiro reclaim_stat->recent_rotated[1] /= 2; 23814f98a2feSRik van Riel } 23824f98a2feSRik van Riel 23834f98a2feSRik van Riel /* 238400d8089cSRik van Riel * The amount of pressure on anon vs file pages is inversely 238500d8089cSRik van Riel * proportional to the fraction of recently scanned pages on 238600d8089cSRik van Riel * each list that were recently referenced and in active use. 23874f98a2feSRik van Riel */ 2388fe35004fSSatoru Moriya ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); 23896e901571SKOSAKI Motohiro ap /= reclaim_stat->recent_rotated[0] + 1; 23904f98a2feSRik van Riel 2391fe35004fSSatoru Moriya fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); 23926e901571SKOSAKI Motohiro fp /= reclaim_stat->recent_rotated[1] + 1; 2393599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 23944f98a2feSRik van Riel 239576a33fc3SShaohua Li fraction[0] = ap; 239676a33fc3SShaohua Li fraction[1] = fp; 239776a33fc3SShaohua Li denominator = ap + fp + 1; 239876a33fc3SShaohua Li out: 23996b4f7799SJohannes Weiner *lru_pages = 0; 24004111304dSHugh Dickins for_each_evictable_lru(lru) { 24014111304dSHugh Dickins int file = is_file_lru(lru); 2402d778df51SJohannes Weiner unsigned long size; 240376a33fc3SShaohua Li unsigned long scan; 240476a33fc3SShaohua Li 240571ab6cfeSMichal Hocko size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2406d778df51SJohannes Weiner scan = size >> sc->priority; 2407688035f7SJohannes Weiner /* 2408688035f7SJohannes Weiner * If the cgroup's already been deleted, make sure to 2409688035f7SJohannes Weiner * scrape out the remaining cache. 2410688035f7SJohannes Weiner */ 2411688035f7SJohannes Weiner if (!scan && !mem_cgroup_online(memcg)) 2412d778df51SJohannes Weiner scan = min(size, SWAP_CLUSTER_MAX); 24139a265114SJohannes Weiner 24149a265114SJohannes Weiner switch (scan_balance) { 24159a265114SJohannes Weiner case SCAN_EQUAL: 24169a265114SJohannes Weiner /* Scan lists relative to size */ 24179a265114SJohannes Weiner break; 24189a265114SJohannes Weiner case SCAN_FRACT: 24199a265114SJohannes Weiner /* 24209a265114SJohannes Weiner * Scan types proportional to swappiness and 24219a265114SJohannes Weiner * their relative recent reclaim efficiency. 242268600f62SRoman Gushchin * Make sure we don't miss the last page 242368600f62SRoman Gushchin * because of a round-off error. 24249a265114SJohannes Weiner */ 242568600f62SRoman Gushchin scan = DIV64_U64_ROUND_UP(scan * fraction[file], 24266f04f48dSSuleiman Souhlal denominator); 24279a265114SJohannes Weiner break; 24289a265114SJohannes Weiner case SCAN_FILE: 24299a265114SJohannes Weiner case SCAN_ANON: 24309a265114SJohannes Weiner /* Scan one type exclusively */ 24316b4f7799SJohannes Weiner if ((scan_balance == SCAN_FILE) != file) { 24326b4f7799SJohannes Weiner size = 0; 24339a265114SJohannes Weiner scan = 0; 24346b4f7799SJohannes Weiner } 24359a265114SJohannes Weiner break; 24369a265114SJohannes Weiner default: 24379a265114SJohannes Weiner /* Look ma, no brain */ 24389a265114SJohannes Weiner BUG(); 24399a265114SJohannes Weiner } 24406b4f7799SJohannes Weiner 24416b4f7799SJohannes Weiner *lru_pages += size; 24424111304dSHugh Dickins nr[lru] = scan; 244376a33fc3SShaohua Li } 24446e08a369SWu Fengguang } 24454f98a2feSRik van Riel 24469b4f98cdSJohannes Weiner /* 2447a9dd0a83SMel Gorman * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 24489b4f98cdSJohannes Weiner */ 2449a9dd0a83SMel Gorman static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, 24506b4f7799SJohannes Weiner struct scan_control *sc, unsigned long *lru_pages) 24519b4f98cdSJohannes Weiner { 2452ef8f2327SMel Gorman struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); 24539b4f98cdSJohannes Weiner unsigned long nr[NR_LRU_LISTS]; 2454e82e0561SMel Gorman unsigned long targets[NR_LRU_LISTS]; 24559b4f98cdSJohannes Weiner unsigned long nr_to_scan; 24569b4f98cdSJohannes Weiner enum lru_list lru; 24579b4f98cdSJohannes Weiner unsigned long nr_reclaimed = 0; 24589b4f98cdSJohannes Weiner unsigned long nr_to_reclaim = sc->nr_to_reclaim; 24599b4f98cdSJohannes Weiner struct blk_plug plug; 24601a501907SMel Gorman bool scan_adjusted; 24619b4f98cdSJohannes Weiner 246233377678SVladimir Davydov get_scan_count(lruvec, memcg, sc, nr, lru_pages); 24639b4f98cdSJohannes Weiner 2464e82e0561SMel Gorman /* Record the original scan target for proportional adjustments later */ 2465e82e0561SMel Gorman memcpy(targets, nr, sizeof(nr)); 2466e82e0561SMel Gorman 24671a501907SMel Gorman /* 24681a501907SMel Gorman * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 24691a501907SMel Gorman * event that can occur when there is little memory pressure e.g. 24701a501907SMel Gorman * multiple streaming readers/writers. Hence, we do not abort scanning 24711a501907SMel Gorman * when the requested number of pages are reclaimed when scanning at 24721a501907SMel Gorman * DEF_PRIORITY on the assumption that the fact we are direct 24731a501907SMel Gorman * reclaiming implies that kswapd is not keeping up and it is best to 24741a501907SMel Gorman * do a batch of work at once. For memcg reclaim one check is made to 24751a501907SMel Gorman * abort proportional reclaim if either the file or anon lru has already 24761a501907SMel Gorman * dropped to zero at the first pass. 24771a501907SMel Gorman */ 24781a501907SMel Gorman scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 24791a501907SMel Gorman sc->priority == DEF_PRIORITY); 24801a501907SMel Gorman 24819b4f98cdSJohannes Weiner blk_start_plug(&plug); 24829b4f98cdSJohannes Weiner while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 24839b4f98cdSJohannes Weiner nr[LRU_INACTIVE_FILE]) { 2484e82e0561SMel Gorman unsigned long nr_anon, nr_file, percentage; 2485e82e0561SMel Gorman unsigned long nr_scanned; 2486e82e0561SMel Gorman 24879b4f98cdSJohannes Weiner for_each_evictable_lru(lru) { 24889b4f98cdSJohannes Weiner if (nr[lru]) { 24899b4f98cdSJohannes Weiner nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 24909b4f98cdSJohannes Weiner nr[lru] -= nr_to_scan; 24919b4f98cdSJohannes Weiner 24929b4f98cdSJohannes Weiner nr_reclaimed += shrink_list(lru, nr_to_scan, 24933b991208SJohannes Weiner lruvec, sc); 24949b4f98cdSJohannes Weiner } 24959b4f98cdSJohannes Weiner } 2496e82e0561SMel Gorman 2497bd041733SMichal Hocko cond_resched(); 2498bd041733SMichal Hocko 2499e82e0561SMel Gorman if (nr_reclaimed < nr_to_reclaim || scan_adjusted) 2500e82e0561SMel Gorman continue; 2501e82e0561SMel Gorman 25029b4f98cdSJohannes Weiner /* 2503e82e0561SMel Gorman * For kswapd and memcg, reclaim at least the number of pages 25041a501907SMel Gorman * requested. Ensure that the anon and file LRUs are scanned 2505e82e0561SMel Gorman * proportionally what was requested by get_scan_count(). We 2506e82e0561SMel Gorman * stop reclaiming one LRU and reduce the amount scanning 2507e82e0561SMel Gorman * proportional to the original scan target. 2508e82e0561SMel Gorman */ 2509e82e0561SMel Gorman nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 2510e82e0561SMel Gorman nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 2511e82e0561SMel Gorman 25121a501907SMel Gorman /* 25131a501907SMel Gorman * It's just vindictive to attack the larger once the smaller 25141a501907SMel Gorman * has gone to zero. And given the way we stop scanning the 25151a501907SMel Gorman * smaller below, this makes sure that we only make one nudge 25161a501907SMel Gorman * towards proportionality once we've got nr_to_reclaim. 25171a501907SMel Gorman */ 25181a501907SMel Gorman if (!nr_file || !nr_anon) 25191a501907SMel Gorman break; 25201a501907SMel Gorman 2521e82e0561SMel Gorman if (nr_file > nr_anon) { 2522e82e0561SMel Gorman unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 2523e82e0561SMel Gorman targets[LRU_ACTIVE_ANON] + 1; 2524e82e0561SMel Gorman lru = LRU_BASE; 2525e82e0561SMel Gorman percentage = nr_anon * 100 / scan_target; 2526e82e0561SMel Gorman } else { 2527e82e0561SMel Gorman unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 2528e82e0561SMel Gorman targets[LRU_ACTIVE_FILE] + 1; 2529e82e0561SMel Gorman lru = LRU_FILE; 2530e82e0561SMel Gorman percentage = nr_file * 100 / scan_target; 2531e82e0561SMel Gorman } 2532e82e0561SMel Gorman 2533e82e0561SMel Gorman /* Stop scanning the smaller of the LRU */ 2534e82e0561SMel Gorman nr[lru] = 0; 2535e82e0561SMel Gorman nr[lru + LRU_ACTIVE] = 0; 2536e82e0561SMel Gorman 2537e82e0561SMel Gorman /* 2538e82e0561SMel Gorman * Recalculate the other LRU scan count based on its original 2539e82e0561SMel Gorman * scan target and the percentage scanning already complete 2540e82e0561SMel Gorman */ 2541e82e0561SMel Gorman lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 2542e82e0561SMel Gorman nr_scanned = targets[lru] - nr[lru]; 2543e82e0561SMel Gorman nr[lru] = targets[lru] * (100 - percentage) / 100; 2544e82e0561SMel Gorman nr[lru] -= min(nr[lru], nr_scanned); 2545e82e0561SMel Gorman 2546e82e0561SMel Gorman lru += LRU_ACTIVE; 2547e82e0561SMel Gorman nr_scanned = targets[lru] - nr[lru]; 2548e82e0561SMel Gorman nr[lru] = targets[lru] * (100 - percentage) / 100; 2549e82e0561SMel Gorman nr[lru] -= min(nr[lru], nr_scanned); 2550e82e0561SMel Gorman 2551e82e0561SMel Gorman scan_adjusted = true; 25529b4f98cdSJohannes Weiner } 25539b4f98cdSJohannes Weiner blk_finish_plug(&plug); 25549b4f98cdSJohannes Weiner sc->nr_reclaimed += nr_reclaimed; 25559b4f98cdSJohannes Weiner 25569b4f98cdSJohannes Weiner /* 25579b4f98cdSJohannes Weiner * Even if we did not try to evict anon pages at all, we want to 25589b4f98cdSJohannes Weiner * rebalance the anon lru active/inactive ratio. 25599b4f98cdSJohannes Weiner */ 25603b991208SJohannes Weiner if (inactive_list_is_low(lruvec, false, sc, true)) 25619b4f98cdSJohannes Weiner shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 25629b4f98cdSJohannes Weiner sc, LRU_ACTIVE_ANON); 25639b4f98cdSJohannes Weiner } 25649b4f98cdSJohannes Weiner 256523b9da55SMel Gorman /* Use reclaim/compaction for costly allocs or under memory pressure */ 25669e3b2f8cSKonstantin Khlebnikov static bool in_reclaim_compaction(struct scan_control *sc) 256723b9da55SMel Gorman { 2568d84da3f9SKirill A. Shutemov if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && 256923b9da55SMel Gorman (sc->order > PAGE_ALLOC_COSTLY_ORDER || 25709e3b2f8cSKonstantin Khlebnikov sc->priority < DEF_PRIORITY - 2)) 257123b9da55SMel Gorman return true; 257223b9da55SMel Gorman 257323b9da55SMel Gorman return false; 257423b9da55SMel Gorman } 257523b9da55SMel Gorman 25764f98a2feSRik van Riel /* 257723b9da55SMel Gorman * Reclaim/compaction is used for high-order allocation requests. It reclaims 257823b9da55SMel Gorman * order-0 pages before compacting the zone. should_continue_reclaim() returns 257923b9da55SMel Gorman * true if more pages should be reclaimed such that when the page allocator 258023b9da55SMel Gorman * calls try_to_compact_zone() that it will have enough free pages to succeed. 258123b9da55SMel Gorman * It will give up earlier than that if there is difficulty reclaiming pages. 25823e7d3449SMel Gorman */ 2583a9dd0a83SMel Gorman static inline bool should_continue_reclaim(struct pglist_data *pgdat, 25843e7d3449SMel Gorman unsigned long nr_reclaimed, 25853e7d3449SMel Gorman unsigned long nr_scanned, 25863e7d3449SMel Gorman struct scan_control *sc) 25873e7d3449SMel Gorman { 25883e7d3449SMel Gorman unsigned long pages_for_compaction; 25893e7d3449SMel Gorman unsigned long inactive_lru_pages; 2590a9dd0a83SMel Gorman int z; 25913e7d3449SMel Gorman 25923e7d3449SMel Gorman /* If not in reclaim/compaction mode, stop */ 25939e3b2f8cSKonstantin Khlebnikov if (!in_reclaim_compaction(sc)) 25943e7d3449SMel Gorman return false; 25953e7d3449SMel Gorman 25962876592fSMel Gorman /* Consider stopping depending on scan and reclaim activity */ 2597dcda9b04SMichal Hocko if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) { 25983e7d3449SMel Gorman /* 2599dcda9b04SMichal Hocko * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the 26002876592fSMel Gorman * full LRU list has been scanned and we are still failing 26012876592fSMel Gorman * to reclaim pages. This full LRU scan is potentially 2602dcda9b04SMichal Hocko * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed 26033e7d3449SMel Gorman */ 26043e7d3449SMel Gorman if (!nr_reclaimed && !nr_scanned) 26053e7d3449SMel Gorman return false; 26062876592fSMel Gorman } else { 26072876592fSMel Gorman /* 2608dcda9b04SMichal Hocko * For non-__GFP_RETRY_MAYFAIL allocations which can presumably 26092876592fSMel Gorman * fail without consequence, stop if we failed to reclaim 26102876592fSMel Gorman * any pages from the last SWAP_CLUSTER_MAX number of 26112876592fSMel Gorman * pages that were scanned. This will return to the 26122876592fSMel Gorman * caller faster at the risk reclaim/compaction and 26132876592fSMel Gorman * the resulting allocation attempt fails 26142876592fSMel Gorman */ 26152876592fSMel Gorman if (!nr_reclaimed) 26162876592fSMel Gorman return false; 26172876592fSMel Gorman } 26183e7d3449SMel Gorman 26193e7d3449SMel Gorman /* 26203e7d3449SMel Gorman * If we have not reclaimed enough pages for compaction and the 26213e7d3449SMel Gorman * inactive lists are large enough, continue reclaiming 26223e7d3449SMel Gorman */ 26239861a62cSVlastimil Babka pages_for_compaction = compact_gap(sc->order); 2624a9dd0a83SMel Gorman inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 2625ec8acf20SShaohua Li if (get_nr_swap_pages() > 0) 2626a9dd0a83SMel Gorman inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 26273e7d3449SMel Gorman if (sc->nr_reclaimed < pages_for_compaction && 26283e7d3449SMel Gorman inactive_lru_pages > pages_for_compaction) 26293e7d3449SMel Gorman return true; 26303e7d3449SMel Gorman 26313e7d3449SMel Gorman /* If compaction would go ahead or the allocation would succeed, stop */ 2632a9dd0a83SMel Gorman for (z = 0; z <= sc->reclaim_idx; z++) { 2633a9dd0a83SMel Gorman struct zone *zone = &pgdat->node_zones[z]; 26346aa303deSMel Gorman if (!managed_zone(zone)) 2635a9dd0a83SMel Gorman continue; 2636a9dd0a83SMel Gorman 2637a9dd0a83SMel Gorman switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { 2638cf378319SVlastimil Babka case COMPACT_SUCCESS: 26393e7d3449SMel Gorman case COMPACT_CONTINUE: 26403e7d3449SMel Gorman return false; 26413e7d3449SMel Gorman default: 2642a9dd0a83SMel Gorman /* check next zone */ 2643a9dd0a83SMel Gorman ; 26443e7d3449SMel Gorman } 26453e7d3449SMel Gorman } 2646a9dd0a83SMel Gorman return true; 2647a9dd0a83SMel Gorman } 26483e7d3449SMel Gorman 2649e3c1ac58SAndrey Ryabinin static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg) 2650e3c1ac58SAndrey Ryabinin { 2651e3c1ac58SAndrey Ryabinin return test_bit(PGDAT_CONGESTED, &pgdat->flags) || 2652e3c1ac58SAndrey Ryabinin (memcg && memcg_congested(pgdat, memcg)); 2653e3c1ac58SAndrey Ryabinin } 2654e3c1ac58SAndrey Ryabinin 2655970a39a3SMel Gorman static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) 2656f16015fbSJohannes Weiner { 2657cb731d6cSVladimir Davydov struct reclaim_state *reclaim_state = current->reclaim_state; 26589b4f98cdSJohannes Weiner unsigned long nr_reclaimed, nr_scanned; 26592344d7e4SJohannes Weiner bool reclaimable = false; 26609b4f98cdSJohannes Weiner 26619b4f98cdSJohannes Weiner do { 26625660048cSJohannes Weiner struct mem_cgroup *root = sc->target_mem_cgroup; 26635660048cSJohannes Weiner struct mem_cgroup_reclaim_cookie reclaim = { 2664ef8f2327SMel Gorman .pgdat = pgdat, 26659e3b2f8cSKonstantin Khlebnikov .priority = sc->priority, 26665660048cSJohannes Weiner }; 2667a9dd0a83SMel Gorman unsigned long node_lru_pages = 0; 2668694fbc0fSAndrew Morton struct mem_cgroup *memcg; 26695660048cSJohannes Weiner 2670d108c772SAndrey Ryabinin memset(&sc->nr, 0, sizeof(sc->nr)); 2671d108c772SAndrey Ryabinin 26729b4f98cdSJohannes Weiner nr_reclaimed = sc->nr_reclaimed; 26739b4f98cdSJohannes Weiner nr_scanned = sc->nr_scanned; 26749b4f98cdSJohannes Weiner 2675694fbc0fSAndrew Morton memcg = mem_cgroup_iter(root, NULL, &reclaim); 2676694fbc0fSAndrew Morton do { 26776b4f7799SJohannes Weiner unsigned long lru_pages; 26788e8ae645SJohannes Weiner unsigned long reclaimed; 2679cb731d6cSVladimir Davydov unsigned long scanned; 26809b4f98cdSJohannes Weiner 2681bf8d5d52SRoman Gushchin switch (mem_cgroup_protected(root, memcg)) { 2682bf8d5d52SRoman Gushchin case MEMCG_PROT_MIN: 2683bf8d5d52SRoman Gushchin /* 2684bf8d5d52SRoman Gushchin * Hard protection. 2685bf8d5d52SRoman Gushchin * If there is no reclaimable memory, OOM. 2686bf8d5d52SRoman Gushchin */ 2687bf8d5d52SRoman Gushchin continue; 2688bf8d5d52SRoman Gushchin case MEMCG_PROT_LOW: 2689bf8d5d52SRoman Gushchin /* 2690bf8d5d52SRoman Gushchin * Soft protection. 2691bf8d5d52SRoman Gushchin * Respect the protection only as long as 2692bf8d5d52SRoman Gushchin * there is an unprotected supply 2693bf8d5d52SRoman Gushchin * of reclaimable memory from other cgroups. 2694bf8d5d52SRoman Gushchin */ 2695d6622f63SYisheng Xie if (!sc->memcg_low_reclaim) { 2696d6622f63SYisheng Xie sc->memcg_low_skipped = 1; 2697241994edSJohannes Weiner continue; 2698d6622f63SYisheng Xie } 2699e27be240SJohannes Weiner memcg_memory_event(memcg, MEMCG_LOW); 2700bf8d5d52SRoman Gushchin break; 2701bf8d5d52SRoman Gushchin case MEMCG_PROT_NONE: 2702bf8d5d52SRoman Gushchin break; 2703241994edSJohannes Weiner } 2704241994edSJohannes Weiner 27058e8ae645SJohannes Weiner reclaimed = sc->nr_reclaimed; 2706cb731d6cSVladimir Davydov scanned = sc->nr_scanned; 2707a9dd0a83SMel Gorman shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2708a9dd0a83SMel Gorman node_lru_pages += lru_pages; 2709f9be23d6SKonstantin Khlebnikov 27101c30844dSMel Gorman if (sc->may_shrinkslab) { 2711a9dd0a83SMel Gorman shrink_slab(sc->gfp_mask, pgdat->node_id, 27129092c71bSJosef Bacik memcg, sc->priority); 27131c30844dSMel Gorman } 2714cb731d6cSVladimir Davydov 27158e8ae645SJohannes Weiner /* Record the group's reclaim efficiency */ 27168e8ae645SJohannes Weiner vmpressure(sc->gfp_mask, memcg, false, 27178e8ae645SJohannes Weiner sc->nr_scanned - scanned, 27188e8ae645SJohannes Weiner sc->nr_reclaimed - reclaimed); 27198e8ae645SJohannes Weiner 27205660048cSJohannes Weiner /* 27212bb0f34fSYang Shi * Kswapd have to scan all memory cgroups to fulfill 27222bb0f34fSYang Shi * the overall scan target for the node. 2723a394cb8eSMichal Hocko * 2724a394cb8eSMichal Hocko * Limit reclaim, on the other hand, only cares about 2725a394cb8eSMichal Hocko * nr_to_reclaim pages to be reclaimed and it will 2726a394cb8eSMichal Hocko * retry with decreasing priority if one round over the 2727a394cb8eSMichal Hocko * whole hierarchy is not sufficient. 27285660048cSJohannes Weiner */ 27292bb0f34fSYang Shi if (!current_is_kswapd() && 2730a394cb8eSMichal Hocko sc->nr_reclaimed >= sc->nr_to_reclaim) { 27315660048cSJohannes Weiner mem_cgroup_iter_break(root, memcg); 27325660048cSJohannes Weiner break; 27335660048cSJohannes Weiner } 2734241994edSJohannes Weiner } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim))); 273570ddf637SAnton Vorontsov 27366b4f7799SJohannes Weiner if (reclaim_state) { 2737cb731d6cSVladimir Davydov sc->nr_reclaimed += reclaim_state->reclaimed_slab; 27386b4f7799SJohannes Weiner reclaim_state->reclaimed_slab = 0; 27396b4f7799SJohannes Weiner } 27406b4f7799SJohannes Weiner 27418e8ae645SJohannes Weiner /* Record the subtree's reclaim efficiency */ 27428e8ae645SJohannes Weiner vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 274370ddf637SAnton Vorontsov sc->nr_scanned - nr_scanned, 274470ddf637SAnton Vorontsov sc->nr_reclaimed - nr_reclaimed); 274570ddf637SAnton Vorontsov 27462344d7e4SJohannes Weiner if (sc->nr_reclaimed - nr_reclaimed) 27472344d7e4SJohannes Weiner reclaimable = true; 27482344d7e4SJohannes Weiner 2749e3c1ac58SAndrey Ryabinin if (current_is_kswapd()) { 2750d108c772SAndrey Ryabinin /* 2751e3c1ac58SAndrey Ryabinin * If reclaim is isolating dirty pages under writeback, 2752e3c1ac58SAndrey Ryabinin * it implies that the long-lived page allocation rate 2753e3c1ac58SAndrey Ryabinin * is exceeding the page laundering rate. Either the 2754e3c1ac58SAndrey Ryabinin * global limits are not being effective at throttling 2755e3c1ac58SAndrey Ryabinin * processes due to the page distribution throughout 2756e3c1ac58SAndrey Ryabinin * zones or there is heavy usage of a slow backing 2757e3c1ac58SAndrey Ryabinin * device. The only option is to throttle from reclaim 2758e3c1ac58SAndrey Ryabinin * context which is not ideal as there is no guarantee 2759d108c772SAndrey Ryabinin * the dirtying process is throttled in the same way 2760d108c772SAndrey Ryabinin * balance_dirty_pages() manages. 2761d108c772SAndrey Ryabinin * 2762e3c1ac58SAndrey Ryabinin * Once a node is flagged PGDAT_WRITEBACK, kswapd will 2763e3c1ac58SAndrey Ryabinin * count the number of pages under pages flagged for 2764e3c1ac58SAndrey Ryabinin * immediate reclaim and stall if any are encountered 2765e3c1ac58SAndrey Ryabinin * in the nr_immediate check below. 2766d108c772SAndrey Ryabinin */ 2767d108c772SAndrey Ryabinin if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 2768d108c772SAndrey Ryabinin set_bit(PGDAT_WRITEBACK, &pgdat->flags); 2769d108c772SAndrey Ryabinin 2770d108c772SAndrey Ryabinin /* 2771d108c772SAndrey Ryabinin * Tag a node as congested if all the dirty pages 2772d108c772SAndrey Ryabinin * scanned were backed by a congested BDI and 2773d108c772SAndrey Ryabinin * wait_iff_congested will stall. 2774d108c772SAndrey Ryabinin */ 2775d108c772SAndrey Ryabinin if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) 2776d108c772SAndrey Ryabinin set_bit(PGDAT_CONGESTED, &pgdat->flags); 2777d108c772SAndrey Ryabinin 2778d108c772SAndrey Ryabinin /* Allow kswapd to start writing pages during reclaim.*/ 2779d108c772SAndrey Ryabinin if (sc->nr.unqueued_dirty == sc->nr.file_taken) 2780d108c772SAndrey Ryabinin set_bit(PGDAT_DIRTY, &pgdat->flags); 2781d108c772SAndrey Ryabinin 2782d108c772SAndrey Ryabinin /* 2783d108c772SAndrey Ryabinin * If kswapd scans pages marked marked for immediate 2784d108c772SAndrey Ryabinin * reclaim and under writeback (nr_immediate), it 2785d108c772SAndrey Ryabinin * implies that pages are cycling through the LRU 2786d108c772SAndrey Ryabinin * faster than they are written so also forcibly stall. 2787d108c772SAndrey Ryabinin */ 2788d108c772SAndrey Ryabinin if (sc->nr.immediate) 2789d108c772SAndrey Ryabinin congestion_wait(BLK_RW_ASYNC, HZ/10); 2790d108c772SAndrey Ryabinin } 2791d108c772SAndrey Ryabinin 2792d108c772SAndrey Ryabinin /* 2793e3c1ac58SAndrey Ryabinin * Legacy memcg will stall in page writeback so avoid forcibly 2794e3c1ac58SAndrey Ryabinin * stalling in wait_iff_congested(). 2795e3c1ac58SAndrey Ryabinin */ 2796e3c1ac58SAndrey Ryabinin if (!global_reclaim(sc) && sane_reclaim(sc) && 2797e3c1ac58SAndrey Ryabinin sc->nr.dirty && sc->nr.dirty == sc->nr.congested) 2798e3c1ac58SAndrey Ryabinin set_memcg_congestion(pgdat, root, true); 2799e3c1ac58SAndrey Ryabinin 2800e3c1ac58SAndrey Ryabinin /* 2801d108c772SAndrey Ryabinin * Stall direct reclaim for IO completions if underlying BDIs 2802d108c772SAndrey Ryabinin * and node is congested. Allow kswapd to continue until it 2803d108c772SAndrey Ryabinin * starts encountering unqueued dirty pages or cycling through 2804d108c772SAndrey Ryabinin * the LRU too quickly. 2805d108c772SAndrey Ryabinin */ 2806d108c772SAndrey Ryabinin if (!sc->hibernation_mode && !current_is_kswapd() && 2807e3c1ac58SAndrey Ryabinin current_may_throttle() && pgdat_memcg_congested(pgdat, root)) 2808e3c1ac58SAndrey Ryabinin wait_iff_congested(BLK_RW_ASYNC, HZ/10); 2809d108c772SAndrey Ryabinin 2810a9dd0a83SMel Gorman } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, 28119b4f98cdSJohannes Weiner sc->nr_scanned - nr_scanned, sc)); 28122344d7e4SJohannes Weiner 2813c73322d0SJohannes Weiner /* 2814c73322d0SJohannes Weiner * Kswapd gives up on balancing particular nodes after too 2815c73322d0SJohannes Weiner * many failures to reclaim anything from them and goes to 2816c73322d0SJohannes Weiner * sleep. On reclaim progress, reset the failure counter. A 2817c73322d0SJohannes Weiner * successful direct reclaim run will revive a dormant kswapd. 2818c73322d0SJohannes Weiner */ 2819c73322d0SJohannes Weiner if (reclaimable) 2820c73322d0SJohannes Weiner pgdat->kswapd_failures = 0; 2821c73322d0SJohannes Weiner 28222344d7e4SJohannes Weiner return reclaimable; 2823f16015fbSJohannes Weiner } 2824f16015fbSJohannes Weiner 282553853e2dSVlastimil Babka /* 2826fdd4c614SVlastimil Babka * Returns true if compaction should go ahead for a costly-order request, or 2827fdd4c614SVlastimil Babka * the allocation would already succeed without compaction. Return false if we 2828fdd4c614SVlastimil Babka * should reclaim first. 282953853e2dSVlastimil Babka */ 28304f588331SMel Gorman static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 2831fe4b1b24SMel Gorman { 283231483b6aSMel Gorman unsigned long watermark; 2833fdd4c614SVlastimil Babka enum compact_result suitable; 2834fe4b1b24SMel Gorman 2835fdd4c614SVlastimil Babka suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); 2836fdd4c614SVlastimil Babka if (suitable == COMPACT_SUCCESS) 2837fdd4c614SVlastimil Babka /* Allocation should succeed already. Don't reclaim. */ 2838fdd4c614SVlastimil Babka return true; 2839fdd4c614SVlastimil Babka if (suitable == COMPACT_SKIPPED) 2840fdd4c614SVlastimil Babka /* Compaction cannot yet proceed. Do reclaim. */ 2841fe4b1b24SMel Gorman return false; 2842fe4b1b24SMel Gorman 2843fdd4c614SVlastimil Babka /* 2844fdd4c614SVlastimil Babka * Compaction is already possible, but it takes time to run and there 2845fdd4c614SVlastimil Babka * are potentially other callers using the pages just freed. So proceed 2846fdd4c614SVlastimil Babka * with reclaim to make a buffer of free pages available to give 2847fdd4c614SVlastimil Babka * compaction a reasonable chance of completing and allocating the page. 2848fdd4c614SVlastimil Babka * Note that we won't actually reclaim the whole buffer in one attempt 2849fdd4c614SVlastimil Babka * as the target watermark in should_continue_reclaim() is lower. But if 2850fdd4c614SVlastimil Babka * we are already above the high+gap watermark, don't reclaim at all. 2851fdd4c614SVlastimil Babka */ 2852fdd4c614SVlastimil Babka watermark = high_wmark_pages(zone) + compact_gap(sc->order); 2853fdd4c614SVlastimil Babka 2854fdd4c614SVlastimil Babka return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 2855fe4b1b24SMel Gorman } 2856fe4b1b24SMel Gorman 28571da177e4SLinus Torvalds /* 28581da177e4SLinus Torvalds * This is the direct reclaim path, for page-allocating processes. We only 28591da177e4SLinus Torvalds * try to reclaim pages from zones which will satisfy the caller's allocation 28601da177e4SLinus Torvalds * request. 28611da177e4SLinus Torvalds * 28621da177e4SLinus Torvalds * If a zone is deemed to be full of pinned pages then just give it a light 28631da177e4SLinus Torvalds * scan then give up on it. 28641da177e4SLinus Torvalds */ 28650a0337e0SMichal Hocko static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 28661da177e4SLinus Torvalds { 2867dd1a239fSMel Gorman struct zoneref *z; 286854a6eb5cSMel Gorman struct zone *zone; 28690608f43dSAndrew Morton unsigned long nr_soft_reclaimed; 28700608f43dSAndrew Morton unsigned long nr_soft_scanned; 2871619d0d76SWeijie Yang gfp_t orig_mask; 287279dafcdcSMel Gorman pg_data_t *last_pgdat = NULL; 28731cfb419bSKAMEZAWA Hiroyuki 2874cc715d99SMel Gorman /* 2875cc715d99SMel Gorman * If the number of buffer_heads in the machine exceeds the maximum 2876cc715d99SMel Gorman * allowed level, force direct reclaim to scan the highmem zone as 2877cc715d99SMel Gorman * highmem pages could be pinning lowmem pages storing buffer_heads 2878cc715d99SMel Gorman */ 2879619d0d76SWeijie Yang orig_mask = sc->gfp_mask; 2880b2e18757SMel Gorman if (buffer_heads_over_limit) { 2881cc715d99SMel Gorman sc->gfp_mask |= __GFP_HIGHMEM; 28824f588331SMel Gorman sc->reclaim_idx = gfp_zone(sc->gfp_mask); 2883b2e18757SMel Gorman } 2884cc715d99SMel Gorman 2885d4debc66SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 2886b2e18757SMel Gorman sc->reclaim_idx, sc->nodemask) { 2887b2e18757SMel Gorman /* 28881cfb419bSKAMEZAWA Hiroyuki * Take care memory controller reclaiming has small influence 28891cfb419bSKAMEZAWA Hiroyuki * to global LRU. 28901cfb419bSKAMEZAWA Hiroyuki */ 289189b5fae5SJohannes Weiner if (global_reclaim(sc)) { 2892344736f2SVladimir Davydov if (!cpuset_zone_allowed(zone, 2893344736f2SVladimir Davydov GFP_KERNEL | __GFP_HARDWALL)) 28941da177e4SLinus Torvalds continue; 289565ec02cbSVladimir Davydov 2896e0887c19SRik van Riel /* 2897e0c23279SMel Gorman * If we already have plenty of memory free for 2898e0c23279SMel Gorman * compaction in this zone, don't free any more. 2899e0c23279SMel Gorman * Even though compaction is invoked for any 2900e0c23279SMel Gorman * non-zero order, only frequent costly order 2901e0c23279SMel Gorman * reclamation is disruptive enough to become a 2902c7cfa37bSCopot Alexandru * noticeable problem, like transparent huge 2903c7cfa37bSCopot Alexandru * page allocations. 2904e0887c19SRik van Riel */ 29050b06496aSJohannes Weiner if (IS_ENABLED(CONFIG_COMPACTION) && 29060b06496aSJohannes Weiner sc->order > PAGE_ALLOC_COSTLY_ORDER && 29074f588331SMel Gorman compaction_ready(zone, sc)) { 29080b06496aSJohannes Weiner sc->compaction_ready = true; 2909e0887c19SRik van Riel continue; 2910e0887c19SRik van Riel } 29110b06496aSJohannes Weiner 29120608f43dSAndrew Morton /* 291379dafcdcSMel Gorman * Shrink each node in the zonelist once. If the 291479dafcdcSMel Gorman * zonelist is ordered by zone (not the default) then a 291579dafcdcSMel Gorman * node may be shrunk multiple times but in that case 291679dafcdcSMel Gorman * the user prefers lower zones being preserved. 291779dafcdcSMel Gorman */ 291879dafcdcSMel Gorman if (zone->zone_pgdat == last_pgdat) 291979dafcdcSMel Gorman continue; 292079dafcdcSMel Gorman 292179dafcdcSMel Gorman /* 29220608f43dSAndrew Morton * This steals pages from memory cgroups over softlimit 29230608f43dSAndrew Morton * and returns the number of reclaimed pages and 29240608f43dSAndrew Morton * scanned pages. This works for global memory pressure 29250608f43dSAndrew Morton * and balancing, not for a memcg's limit. 29260608f43dSAndrew Morton */ 29270608f43dSAndrew Morton nr_soft_scanned = 0; 2928ef8f2327SMel Gorman nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, 29290608f43dSAndrew Morton sc->order, sc->gfp_mask, 29300608f43dSAndrew Morton &nr_soft_scanned); 29310608f43dSAndrew Morton sc->nr_reclaimed += nr_soft_reclaimed; 29320608f43dSAndrew Morton sc->nr_scanned += nr_soft_scanned; 2933ac34a1a3SKAMEZAWA Hiroyuki /* need some check for avoid more shrink_zone() */ 2934ac34a1a3SKAMEZAWA Hiroyuki } 2935d149e3b2SYing Han 293679dafcdcSMel Gorman /* See comment about same check for global reclaim above */ 293779dafcdcSMel Gorman if (zone->zone_pgdat == last_pgdat) 293879dafcdcSMel Gorman continue; 293979dafcdcSMel Gorman last_pgdat = zone->zone_pgdat; 2940970a39a3SMel Gorman shrink_node(zone->zone_pgdat, sc); 29411da177e4SLinus Torvalds } 2942e0c23279SMel Gorman 294365ec02cbSVladimir Davydov /* 2944619d0d76SWeijie Yang * Restore to original mask to avoid the impact on the caller if we 2945619d0d76SWeijie Yang * promoted it to __GFP_HIGHMEM. 2946619d0d76SWeijie Yang */ 2947619d0d76SWeijie Yang sc->gfp_mask = orig_mask; 29481da177e4SLinus Torvalds } 29491da177e4SLinus Torvalds 29502a2e4885SJohannes Weiner static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat) 29512a2e4885SJohannes Weiner { 29522a2e4885SJohannes Weiner struct mem_cgroup *memcg; 29532a2e4885SJohannes Weiner 29542a2e4885SJohannes Weiner memcg = mem_cgroup_iter(root_memcg, NULL, NULL); 29552a2e4885SJohannes Weiner do { 29562a2e4885SJohannes Weiner unsigned long refaults; 29572a2e4885SJohannes Weiner struct lruvec *lruvec; 29582a2e4885SJohannes Weiner 29592a2e4885SJohannes Weiner lruvec = mem_cgroup_lruvec(pgdat, memcg); 2960205b20ccSJohannes Weiner refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE); 29612a2e4885SJohannes Weiner lruvec->refaults = refaults; 29622a2e4885SJohannes Weiner } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); 29632a2e4885SJohannes Weiner } 29642a2e4885SJohannes Weiner 29651da177e4SLinus Torvalds /* 29661da177e4SLinus Torvalds * This is the main entry point to direct page reclaim. 29671da177e4SLinus Torvalds * 29681da177e4SLinus Torvalds * If a full scan of the inactive list fails to free enough memory then we 29691da177e4SLinus Torvalds * are "out of memory" and something needs to be killed. 29701da177e4SLinus Torvalds * 29711da177e4SLinus Torvalds * If the caller is !__GFP_FS then the probability of a failure is reasonably 29721da177e4SLinus Torvalds * high - the zone may be full of dirty or under-writeback pages, which this 29735b0830cbSJens Axboe * caller can't do much about. We kick the writeback threads and take explicit 29745b0830cbSJens Axboe * naps in the hope that some of these pages can be written. But if the 29755b0830cbSJens Axboe * allocating task holds filesystem locks which prevent writeout this might not 29765b0830cbSJens Axboe * work, and the allocation attempt will fail. 2977a41f24eaSNishanth Aravamudan * 2978a41f24eaSNishanth Aravamudan * returns: 0, if no pages reclaimed 2979a41f24eaSNishanth Aravamudan * else, the number of pages reclaimed 29801da177e4SLinus Torvalds */ 2981dac1d27bSMel Gorman static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 29823115cd91SVladimir Davydov struct scan_control *sc) 29831da177e4SLinus Torvalds { 2984241994edSJohannes Weiner int initial_priority = sc->priority; 29852a2e4885SJohannes Weiner pg_data_t *last_pgdat; 29862a2e4885SJohannes Weiner struct zoneref *z; 29872a2e4885SJohannes Weiner struct zone *zone; 2988241994edSJohannes Weiner retry: 2989873b4771SKeika Kobayashi delayacct_freepages_start(); 2990873b4771SKeika Kobayashi 299189b5fae5SJohannes Weiner if (global_reclaim(sc)) 29927cc30fcfSMel Gorman __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 29931da177e4SLinus Torvalds 29949e3b2f8cSKonstantin Khlebnikov do { 299570ddf637SAnton Vorontsov vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 299670ddf637SAnton Vorontsov sc->priority); 299766e1707bSBalbir Singh sc->nr_scanned = 0; 29980a0337e0SMichal Hocko shrink_zones(zonelist, sc); 2999e0c23279SMel Gorman 3000bb21c7ceSKOSAKI Motohiro if (sc->nr_reclaimed >= sc->nr_to_reclaim) 30010b06496aSJohannes Weiner break; 30020b06496aSJohannes Weiner 30030b06496aSJohannes Weiner if (sc->compaction_ready) 30040b06496aSJohannes Weiner break; 30051da177e4SLinus Torvalds 30061da177e4SLinus Torvalds /* 30070e50ce3bSMinchan Kim * If we're getting trouble reclaiming, start doing 30080e50ce3bSMinchan Kim * writepage even in laptop mode. 30090e50ce3bSMinchan Kim */ 30100e50ce3bSMinchan Kim if (sc->priority < DEF_PRIORITY - 2) 30110e50ce3bSMinchan Kim sc->may_writepage = 1; 30120b06496aSJohannes Weiner } while (--sc->priority >= 0); 3013bb21c7ceSKOSAKI Motohiro 30142a2e4885SJohannes Weiner last_pgdat = NULL; 30152a2e4885SJohannes Weiner for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 30162a2e4885SJohannes Weiner sc->nodemask) { 30172a2e4885SJohannes Weiner if (zone->zone_pgdat == last_pgdat) 30182a2e4885SJohannes Weiner continue; 30192a2e4885SJohannes Weiner last_pgdat = zone->zone_pgdat; 30202a2e4885SJohannes Weiner snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 3021e3c1ac58SAndrey Ryabinin set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false); 30222a2e4885SJohannes Weiner } 30232a2e4885SJohannes Weiner 3024873b4771SKeika Kobayashi delayacct_freepages_end(); 3025873b4771SKeika Kobayashi 3026bb21c7ceSKOSAKI Motohiro if (sc->nr_reclaimed) 3027bb21c7ceSKOSAKI Motohiro return sc->nr_reclaimed; 3028bb21c7ceSKOSAKI Motohiro 30290cee34fdSMel Gorman /* Aborted reclaim to try compaction? don't OOM, then */ 30300b06496aSJohannes Weiner if (sc->compaction_ready) 30317335084dSMel Gorman return 1; 30327335084dSMel Gorman 3033241994edSJohannes Weiner /* Untapped cgroup reserves? Don't OOM, retry. */ 3034d6622f63SYisheng Xie if (sc->memcg_low_skipped) { 3035241994edSJohannes Weiner sc->priority = initial_priority; 3036d6622f63SYisheng Xie sc->memcg_low_reclaim = 1; 3037d6622f63SYisheng Xie sc->memcg_low_skipped = 0; 3038241994edSJohannes Weiner goto retry; 3039241994edSJohannes Weiner } 3040241994edSJohannes Weiner 3041bb21c7ceSKOSAKI Motohiro return 0; 30421da177e4SLinus Torvalds } 30431da177e4SLinus Torvalds 3044c73322d0SJohannes Weiner static bool allow_direct_reclaim(pg_data_t *pgdat) 30455515061dSMel Gorman { 30465515061dSMel Gorman struct zone *zone; 30475515061dSMel Gorman unsigned long pfmemalloc_reserve = 0; 30485515061dSMel Gorman unsigned long free_pages = 0; 30495515061dSMel Gorman int i; 30505515061dSMel Gorman bool wmark_ok; 30515515061dSMel Gorman 3052c73322d0SJohannes Weiner if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 3053c73322d0SJohannes Weiner return true; 3054c73322d0SJohannes Weiner 30555515061dSMel Gorman for (i = 0; i <= ZONE_NORMAL; i++) { 30565515061dSMel Gorman zone = &pgdat->node_zones[i]; 3057d450abd8SJohannes Weiner if (!managed_zone(zone)) 3058d450abd8SJohannes Weiner continue; 3059d450abd8SJohannes Weiner 3060d450abd8SJohannes Weiner if (!zone_reclaimable_pages(zone)) 3061675becceSMel Gorman continue; 3062675becceSMel Gorman 30635515061dSMel Gorman pfmemalloc_reserve += min_wmark_pages(zone); 30645515061dSMel Gorman free_pages += zone_page_state(zone, NR_FREE_PAGES); 30655515061dSMel Gorman } 30665515061dSMel Gorman 3067675becceSMel Gorman /* If there are no reserves (unexpected config) then do not throttle */ 3068675becceSMel Gorman if (!pfmemalloc_reserve) 3069675becceSMel Gorman return true; 3070675becceSMel Gorman 30715515061dSMel Gorman wmark_ok = free_pages > pfmemalloc_reserve / 2; 30725515061dSMel Gorman 30735515061dSMel Gorman /* kswapd must be awake if processes are being throttled */ 30745515061dSMel Gorman if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 307538087d9bSMel Gorman pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, 30765515061dSMel Gorman (enum zone_type)ZONE_NORMAL); 30775515061dSMel Gorman wake_up_interruptible(&pgdat->kswapd_wait); 30785515061dSMel Gorman } 30795515061dSMel Gorman 30805515061dSMel Gorman return wmark_ok; 30815515061dSMel Gorman } 30825515061dSMel Gorman 30835515061dSMel Gorman /* 30845515061dSMel Gorman * Throttle direct reclaimers if backing storage is backed by the network 30855515061dSMel Gorman * and the PFMEMALLOC reserve for the preferred node is getting dangerously 30865515061dSMel Gorman * depleted. kswapd will continue to make progress and wake the processes 308750694c28SMel Gorman * when the low watermark is reached. 308850694c28SMel Gorman * 308950694c28SMel Gorman * Returns true if a fatal signal was delivered during throttling. If this 309050694c28SMel Gorman * happens, the page allocator should not consider triggering the OOM killer. 30915515061dSMel Gorman */ 309250694c28SMel Gorman static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 30935515061dSMel Gorman nodemask_t *nodemask) 30945515061dSMel Gorman { 3095675becceSMel Gorman struct zoneref *z; 30965515061dSMel Gorman struct zone *zone; 3097675becceSMel Gorman pg_data_t *pgdat = NULL; 30985515061dSMel Gorman 30995515061dSMel Gorman /* 31005515061dSMel Gorman * Kernel threads should not be throttled as they may be indirectly 31015515061dSMel Gorman * responsible for cleaning pages necessary for reclaim to make forward 31025515061dSMel Gorman * progress. kjournald for example may enter direct reclaim while 31035515061dSMel Gorman * committing a transaction where throttling it could forcing other 31045515061dSMel Gorman * processes to block on log_wait_commit(). 31055515061dSMel Gorman */ 31065515061dSMel Gorman if (current->flags & PF_KTHREAD) 310750694c28SMel Gorman goto out; 310850694c28SMel Gorman 310950694c28SMel Gorman /* 311050694c28SMel Gorman * If a fatal signal is pending, this process should not throttle. 311150694c28SMel Gorman * It should return quickly so it can exit and free its memory 311250694c28SMel Gorman */ 311350694c28SMel Gorman if (fatal_signal_pending(current)) 311450694c28SMel Gorman goto out; 31155515061dSMel Gorman 3116675becceSMel Gorman /* 3117675becceSMel Gorman * Check if the pfmemalloc reserves are ok by finding the first node 3118675becceSMel Gorman * with a usable ZONE_NORMAL or lower zone. The expectation is that 3119675becceSMel Gorman * GFP_KERNEL will be required for allocating network buffers when 3120675becceSMel Gorman * swapping over the network so ZONE_HIGHMEM is unusable. 3121675becceSMel Gorman * 3122675becceSMel Gorman * Throttling is based on the first usable node and throttled processes 3123675becceSMel Gorman * wait on a queue until kswapd makes progress and wakes them. There 3124675becceSMel Gorman * is an affinity then between processes waking up and where reclaim 3125675becceSMel Gorman * progress has been made assuming the process wakes on the same node. 3126675becceSMel Gorman * More importantly, processes running on remote nodes will not compete 3127675becceSMel Gorman * for remote pfmemalloc reserves and processes on different nodes 3128675becceSMel Gorman * should make reasonable progress. 3129675becceSMel Gorman */ 3130675becceSMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 313117636faaSMichael S. Tsirkin gfp_zone(gfp_mask), nodemask) { 3132675becceSMel Gorman if (zone_idx(zone) > ZONE_NORMAL) 3133675becceSMel Gorman continue; 3134675becceSMel Gorman 3135675becceSMel Gorman /* Throttle based on the first usable node */ 31365515061dSMel Gorman pgdat = zone->zone_pgdat; 3137c73322d0SJohannes Weiner if (allow_direct_reclaim(pgdat)) 313850694c28SMel Gorman goto out; 3139675becceSMel Gorman break; 3140675becceSMel Gorman } 3141675becceSMel Gorman 3142675becceSMel Gorman /* If no zone was usable by the allocation flags then do not throttle */ 3143675becceSMel Gorman if (!pgdat) 3144675becceSMel Gorman goto out; 31455515061dSMel Gorman 314668243e76SMel Gorman /* Account for the throttling */ 314768243e76SMel Gorman count_vm_event(PGSCAN_DIRECT_THROTTLE); 314868243e76SMel Gorman 31495515061dSMel Gorman /* 31505515061dSMel Gorman * If the caller cannot enter the filesystem, it's possible that it 31515515061dSMel Gorman * is due to the caller holding an FS lock or performing a journal 31525515061dSMel Gorman * transaction in the case of a filesystem like ext[3|4]. In this case, 31535515061dSMel Gorman * it is not safe to block on pfmemalloc_wait as kswapd could be 31545515061dSMel Gorman * blocked waiting on the same lock. Instead, throttle for up to a 31555515061dSMel Gorman * second before continuing. 31565515061dSMel Gorman */ 31575515061dSMel Gorman if (!(gfp_mask & __GFP_FS)) { 31585515061dSMel Gorman wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 3159c73322d0SJohannes Weiner allow_direct_reclaim(pgdat), HZ); 316050694c28SMel Gorman 316150694c28SMel Gorman goto check_pending; 31625515061dSMel Gorman } 31635515061dSMel Gorman 31645515061dSMel Gorman /* Throttle until kswapd wakes the process */ 31655515061dSMel Gorman wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 3166c73322d0SJohannes Weiner allow_direct_reclaim(pgdat)); 316750694c28SMel Gorman 316850694c28SMel Gorman check_pending: 316950694c28SMel Gorman if (fatal_signal_pending(current)) 317050694c28SMel Gorman return true; 317150694c28SMel Gorman 317250694c28SMel Gorman out: 317350694c28SMel Gorman return false; 31745515061dSMel Gorman } 31755515061dSMel Gorman 3176dac1d27bSMel Gorman unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 3177327c0e96SKAMEZAWA Hiroyuki gfp_t gfp_mask, nodemask_t *nodemask) 317866e1707bSBalbir Singh { 317933906bc5SMel Gorman unsigned long nr_reclaimed; 318066e1707bSBalbir Singh struct scan_control sc = { 318122fba335SKOSAKI Motohiro .nr_to_reclaim = SWAP_CLUSTER_MAX, 3182f2f43e56SNick Desaulniers .gfp_mask = current_gfp_context(gfp_mask), 3183b2e18757SMel Gorman .reclaim_idx = gfp_zone(gfp_mask), 3184ee814fe2SJohannes Weiner .order = order, 3185ee814fe2SJohannes Weiner .nodemask = nodemask, 3186ee814fe2SJohannes Weiner .priority = DEF_PRIORITY, 3187ee814fe2SJohannes Weiner .may_writepage = !laptop_mode, 3188a6dc60f8SJohannes Weiner .may_unmap = 1, 31892e2e4259SKOSAKI Motohiro .may_swap = 1, 31901c30844dSMel Gorman .may_shrinkslab = 1, 319166e1707bSBalbir Singh }; 319266e1707bSBalbir Singh 31935515061dSMel Gorman /* 3194bb451fdfSGreg Thelen * scan_control uses s8 fields for order, priority, and reclaim_idx. 3195bb451fdfSGreg Thelen * Confirm they are large enough for max values. 3196bb451fdfSGreg Thelen */ 3197bb451fdfSGreg Thelen BUILD_BUG_ON(MAX_ORDER > S8_MAX); 3198bb451fdfSGreg Thelen BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 3199bb451fdfSGreg Thelen BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 3200bb451fdfSGreg Thelen 3201bb451fdfSGreg Thelen /* 320250694c28SMel Gorman * Do not enter reclaim if fatal signal was delivered while throttled. 320350694c28SMel Gorman * 1 is returned so that the page allocator does not OOM kill at this 320450694c28SMel Gorman * point. 32055515061dSMel Gorman */ 3206f2f43e56SNick Desaulniers if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 32075515061dSMel Gorman return 1; 32085515061dSMel Gorman 3209*1732d2b0SAndrew Morton set_task_reclaim_state(current, &sc.reclaim_state); 32103481c37fSYafang Shao trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); 321133906bc5SMel Gorman 32123115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 321333906bc5SMel Gorman 321433906bc5SMel Gorman trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 3215*1732d2b0SAndrew Morton set_task_reclaim_state(current, NULL); 321633906bc5SMel Gorman 321733906bc5SMel Gorman return nr_reclaimed; 321866e1707bSBalbir Singh } 321966e1707bSBalbir Singh 3220c255a458SAndrew Morton #ifdef CONFIG_MEMCG 322166e1707bSBalbir Singh 3222a9dd0a83SMel Gorman unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 32234e416953SBalbir Singh gfp_t gfp_mask, bool noswap, 3224ef8f2327SMel Gorman pg_data_t *pgdat, 32250ae5e89cSYing Han unsigned long *nr_scanned) 32264e416953SBalbir Singh { 32274e416953SBalbir Singh struct scan_control sc = { 3228b8f5c566SKOSAKI Motohiro .nr_to_reclaim = SWAP_CLUSTER_MAX, 3229ee814fe2SJohannes Weiner .target_mem_cgroup = memcg, 32304e416953SBalbir Singh .may_writepage = !laptop_mode, 32314e416953SBalbir Singh .may_unmap = 1, 3232b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 32334e416953SBalbir Singh .may_swap = !noswap, 32341c30844dSMel Gorman .may_shrinkslab = 1, 32354e416953SBalbir Singh }; 32366b4f7799SJohannes Weiner unsigned long lru_pages; 32370ae5e89cSYing Han 3238*1732d2b0SAndrew Morton set_task_reclaim_state(current, &sc.reclaim_state); 32394e416953SBalbir Singh sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 32404e416953SBalbir Singh (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3241bdce6d9eSKOSAKI Motohiro 32429e3b2f8cSKonstantin Khlebnikov trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 32433481c37fSYafang Shao sc.gfp_mask); 3244bdce6d9eSKOSAKI Motohiro 32454e416953SBalbir Singh /* 32464e416953SBalbir Singh * NOTE: Although we can get the priority field, using it 32474e416953SBalbir Singh * here is not a good idea, since it limits the pages we can scan. 3248a9dd0a83SMel Gorman * if we don't reclaim here, the shrink_node from balance_pgdat 32494e416953SBalbir Singh * will pick up pages from other mem cgroup's as well. We hack 32504e416953SBalbir Singh * the priority and make it zero. 32514e416953SBalbir Singh */ 3252ef8f2327SMel Gorman shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); 3253bdce6d9eSKOSAKI Motohiro 3254bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3255bdce6d9eSKOSAKI Motohiro 3256*1732d2b0SAndrew Morton set_task_reclaim_state(current, NULL); 32570ae5e89cSYing Han *nr_scanned = sc.nr_scanned; 32580308f7cfSYafang Shao 32594e416953SBalbir Singh return sc.nr_reclaimed; 32604e416953SBalbir Singh } 32614e416953SBalbir Singh 326272835c86SJohannes Weiner unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 3263b70a2a21SJohannes Weiner unsigned long nr_pages, 32648c7c6e34SKAMEZAWA Hiroyuki gfp_t gfp_mask, 3265b70a2a21SJohannes Weiner bool may_swap) 326666e1707bSBalbir Singh { 32674e416953SBalbir Singh struct zonelist *zonelist; 3268bdce6d9eSKOSAKI Motohiro unsigned long nr_reclaimed; 3269eb414681SJohannes Weiner unsigned long pflags; 3270889976dbSYing Han int nid; 3271499118e9SVlastimil Babka unsigned int noreclaim_flag; 327266e1707bSBalbir Singh struct scan_control sc = { 3273b70a2a21SJohannes Weiner .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 32747dea19f9SMichal Hocko .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 3275ee814fe2SJohannes Weiner (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 3276b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 3277ee814fe2SJohannes Weiner .target_mem_cgroup = memcg, 3278ee814fe2SJohannes Weiner .priority = DEF_PRIORITY, 327966e1707bSBalbir Singh .may_writepage = !laptop_mode, 3280a6dc60f8SJohannes Weiner .may_unmap = 1, 3281b70a2a21SJohannes Weiner .may_swap = may_swap, 32821c30844dSMel Gorman .may_shrinkslab = 1, 3283a09ed5e0SYing Han }; 328466e1707bSBalbir Singh 3285*1732d2b0SAndrew Morton set_task_reclaim_state(current, &sc.reclaim_state); 3286889976dbSYing Han /* 3287889976dbSYing Han * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 3288889976dbSYing Han * take care of from where we get pages. So the node where we start the 3289889976dbSYing Han * scan does not need to be the current node. 3290889976dbSYing Han */ 329172835c86SJohannes Weiner nid = mem_cgroup_select_victim_node(memcg); 3292889976dbSYing Han 3293c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; 3294bdce6d9eSKOSAKI Motohiro 32953481c37fSYafang Shao trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); 3296bdce6d9eSKOSAKI Motohiro 3297eb414681SJohannes Weiner psi_memstall_enter(&pflags); 3298499118e9SVlastimil Babka noreclaim_flag = memalloc_noreclaim_save(); 3299eb414681SJohannes Weiner 33003115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3301eb414681SJohannes Weiner 3302499118e9SVlastimil Babka memalloc_noreclaim_restore(noreclaim_flag); 3303eb414681SJohannes Weiner psi_memstall_leave(&pflags); 3304bdce6d9eSKOSAKI Motohiro 3305bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 3306*1732d2b0SAndrew Morton set_task_reclaim_state(current, NULL); 3307bdce6d9eSKOSAKI Motohiro 3308bdce6d9eSKOSAKI Motohiro return nr_reclaimed; 330966e1707bSBalbir Singh } 331066e1707bSBalbir Singh #endif 331166e1707bSBalbir Singh 33121d82de61SMel Gorman static void age_active_anon(struct pglist_data *pgdat, 3313ef8f2327SMel Gorman struct scan_control *sc) 3314f16015fbSJohannes Weiner { 3315b95a2f2dSJohannes Weiner struct mem_cgroup *memcg; 3316b95a2f2dSJohannes Weiner 3317b95a2f2dSJohannes Weiner if (!total_swap_pages) 3318b95a2f2dSJohannes Weiner return; 3319b95a2f2dSJohannes Weiner 3320b95a2f2dSJohannes Weiner memcg = mem_cgroup_iter(NULL, NULL, NULL); 3321b95a2f2dSJohannes Weiner do { 3322ef8f2327SMel Gorman struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); 3323f16015fbSJohannes Weiner 33243b991208SJohannes Weiner if (inactive_list_is_low(lruvec, false, sc, true)) 33251a93be0eSKonstantin Khlebnikov shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 33269e3b2f8cSKonstantin Khlebnikov sc, LRU_ACTIVE_ANON); 3327b95a2f2dSJohannes Weiner 3328b95a2f2dSJohannes Weiner memcg = mem_cgroup_iter(NULL, memcg, NULL); 3329b95a2f2dSJohannes Weiner } while (memcg); 3330f16015fbSJohannes Weiner } 3331f16015fbSJohannes Weiner 33321c30844dSMel Gorman static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx) 33331c30844dSMel Gorman { 33341c30844dSMel Gorman int i; 33351c30844dSMel Gorman struct zone *zone; 33361c30844dSMel Gorman 33371c30844dSMel Gorman /* 33381c30844dSMel Gorman * Check for watermark boosts top-down as the higher zones 33391c30844dSMel Gorman * are more likely to be boosted. Both watermarks and boosts 33401c30844dSMel Gorman * should not be checked at the time time as reclaim would 33411c30844dSMel Gorman * start prematurely when there is no boosting and a lower 33421c30844dSMel Gorman * zone is balanced. 33431c30844dSMel Gorman */ 33441c30844dSMel Gorman for (i = classzone_idx; i >= 0; i--) { 33451c30844dSMel Gorman zone = pgdat->node_zones + i; 33461c30844dSMel Gorman if (!managed_zone(zone)) 33471c30844dSMel Gorman continue; 33481c30844dSMel Gorman 33491c30844dSMel Gorman if (zone->watermark_boost) 33501c30844dSMel Gorman return true; 33511c30844dSMel Gorman } 33521c30844dSMel Gorman 33531c30844dSMel Gorman return false; 33541c30844dSMel Gorman } 33551c30844dSMel Gorman 3356e716f2ebSMel Gorman /* 3357e716f2ebSMel Gorman * Returns true if there is an eligible zone balanced for the request order 3358e716f2ebSMel Gorman * and classzone_idx 3359e716f2ebSMel Gorman */ 3360e716f2ebSMel Gorman static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) 336160cefed4SJohannes Weiner { 3362e716f2ebSMel Gorman int i; 3363e716f2ebSMel Gorman unsigned long mark = -1; 3364e716f2ebSMel Gorman struct zone *zone; 336560cefed4SJohannes Weiner 33661c30844dSMel Gorman /* 33671c30844dSMel Gorman * Check watermarks bottom-up as lower zones are more likely to 33681c30844dSMel Gorman * meet watermarks. 33691c30844dSMel Gorman */ 3370e716f2ebSMel Gorman for (i = 0; i <= classzone_idx; i++) { 3371e716f2ebSMel Gorman zone = pgdat->node_zones + i; 33726256c6b4SMel Gorman 3373e716f2ebSMel Gorman if (!managed_zone(zone)) 3374e716f2ebSMel Gorman continue; 3375e716f2ebSMel Gorman 3376e716f2ebSMel Gorman mark = high_wmark_pages(zone); 3377e716f2ebSMel Gorman if (zone_watermark_ok_safe(zone, order, mark, classzone_idx)) 33786256c6b4SMel Gorman return true; 337960cefed4SJohannes Weiner } 338060cefed4SJohannes Weiner 3381e716f2ebSMel Gorman /* 3382e716f2ebSMel Gorman * If a node has no populated zone within classzone_idx, it does not 3383e716f2ebSMel Gorman * need balancing by definition. This can happen if a zone-restricted 3384e716f2ebSMel Gorman * allocation tries to wake a remote kswapd. 3385e716f2ebSMel Gorman */ 3386e716f2ebSMel Gorman if (mark == -1) 3387e716f2ebSMel Gorman return true; 3388e716f2ebSMel Gorman 3389e716f2ebSMel Gorman return false; 3390e716f2ebSMel Gorman } 3391e716f2ebSMel Gorman 3392631b6e08SMel Gorman /* Clear pgdat state for congested, dirty or under writeback. */ 3393631b6e08SMel Gorman static void clear_pgdat_congested(pg_data_t *pgdat) 3394631b6e08SMel Gorman { 3395631b6e08SMel Gorman clear_bit(PGDAT_CONGESTED, &pgdat->flags); 3396631b6e08SMel Gorman clear_bit(PGDAT_DIRTY, &pgdat->flags); 3397631b6e08SMel Gorman clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 3398631b6e08SMel Gorman } 3399631b6e08SMel Gorman 34001741c877SMel Gorman /* 34015515061dSMel Gorman * Prepare kswapd for sleeping. This verifies that there are no processes 34025515061dSMel Gorman * waiting in throttle_direct_reclaim() and that watermarks have been met. 34035515061dSMel Gorman * 34045515061dSMel Gorman * Returns true if kswapd is ready to sleep 34055515061dSMel Gorman */ 3406d9f21d42SMel Gorman static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx) 3407f50de2d3SMel Gorman { 34085515061dSMel Gorman /* 34099e5e3661SVlastimil Babka * The throttled processes are normally woken up in balance_pgdat() as 3410c73322d0SJohannes Weiner * soon as allow_direct_reclaim() is true. But there is a potential 34119e5e3661SVlastimil Babka * race between when kswapd checks the watermarks and a process gets 34129e5e3661SVlastimil Babka * throttled. There is also a potential race if processes get 34139e5e3661SVlastimil Babka * throttled, kswapd wakes, a large process exits thereby balancing the 34149e5e3661SVlastimil Babka * zones, which causes kswapd to exit balance_pgdat() before reaching 34159e5e3661SVlastimil Babka * the wake up checks. If kswapd is going to sleep, no process should 34169e5e3661SVlastimil Babka * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 34179e5e3661SVlastimil Babka * the wake up is premature, processes will wake kswapd and get 34189e5e3661SVlastimil Babka * throttled again. The difference from wake ups in balance_pgdat() is 34199e5e3661SVlastimil Babka * that here we are under prepare_to_wait(). 34205515061dSMel Gorman */ 34219e5e3661SVlastimil Babka if (waitqueue_active(&pgdat->pfmemalloc_wait)) 34229e5e3661SVlastimil Babka wake_up_all(&pgdat->pfmemalloc_wait); 3423f50de2d3SMel Gorman 3424c73322d0SJohannes Weiner /* Hopeless node, leave it to direct reclaim */ 3425c73322d0SJohannes Weiner if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 3426c73322d0SJohannes Weiner return true; 3427c73322d0SJohannes Weiner 3428e716f2ebSMel Gorman if (pgdat_balanced(pgdat, order, classzone_idx)) { 3429631b6e08SMel Gorman clear_pgdat_congested(pgdat); 3430333b0a45SShantanu Goel return true; 34311d82de61SMel Gorman } 34321d82de61SMel Gorman 3433333b0a45SShantanu Goel return false; 3434f50de2d3SMel Gorman } 3435f50de2d3SMel Gorman 34361da177e4SLinus Torvalds /* 34371d82de61SMel Gorman * kswapd shrinks a node of pages that are at or below the highest usable 34381d82de61SMel Gorman * zone that is currently unbalanced. 3439b8e83b94SMel Gorman * 3440b8e83b94SMel Gorman * Returns true if kswapd scanned at least the requested number of pages to 3441283aba9fSMel Gorman * reclaim or if the lack of progress was due to pages under writeback. 3442283aba9fSMel Gorman * This is used to determine if the scanning priority needs to be raised. 344375485363SMel Gorman */ 34441d82de61SMel Gorman static bool kswapd_shrink_node(pg_data_t *pgdat, 3445accf6242SVlastimil Babka struct scan_control *sc) 344675485363SMel Gorman { 34471d82de61SMel Gorman struct zone *zone; 34481d82de61SMel Gorman int z; 344975485363SMel Gorman 34501d82de61SMel Gorman /* Reclaim a number of pages proportional to the number of zones */ 34511d82de61SMel Gorman sc->nr_to_reclaim = 0; 3452970a39a3SMel Gorman for (z = 0; z <= sc->reclaim_idx; z++) { 34531d82de61SMel Gorman zone = pgdat->node_zones + z; 34546aa303deSMel Gorman if (!managed_zone(zone)) 34551d82de61SMel Gorman continue; 34567c954f6dSMel Gorman 34571d82de61SMel Gorman sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 34587c954f6dSMel Gorman } 34597c954f6dSMel Gorman 34601d82de61SMel Gorman /* 34611d82de61SMel Gorman * Historically care was taken to put equal pressure on all zones but 34621d82de61SMel Gorman * now pressure is applied based on node LRU order. 34631d82de61SMel Gorman */ 3464970a39a3SMel Gorman shrink_node(pgdat, sc); 34651d82de61SMel Gorman 34661d82de61SMel Gorman /* 34671d82de61SMel Gorman * Fragmentation may mean that the system cannot be rebalanced for 34681d82de61SMel Gorman * high-order allocations. If twice the allocation size has been 34691d82de61SMel Gorman * reclaimed then recheck watermarks only at order-0 to prevent 34701d82de61SMel Gorman * excessive reclaim. Assume that a process requested a high-order 34711d82de61SMel Gorman * can direct reclaim/compact. 34721d82de61SMel Gorman */ 34739861a62cSVlastimil Babka if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 34741d82de61SMel Gorman sc->order = 0; 34751d82de61SMel Gorman 3476b8e83b94SMel Gorman return sc->nr_scanned >= sc->nr_to_reclaim; 347775485363SMel Gorman } 347875485363SMel Gorman 347975485363SMel Gorman /* 34801d82de61SMel Gorman * For kswapd, balance_pgdat() will reclaim pages across a node from zones 34811d82de61SMel Gorman * that are eligible for use by the caller until at least one zone is 34821d82de61SMel Gorman * balanced. 34831da177e4SLinus Torvalds * 34841d82de61SMel Gorman * Returns the order kswapd finished reclaiming at. 34851da177e4SLinus Torvalds * 34861da177e4SLinus Torvalds * kswapd scans the zones in the highmem->normal->dma direction. It skips 348741858966SMel Gorman * zones which have free_pages > high_wmark_pages(zone), but once a zone is 34888bb4e7a2SWei Yang * found to have free_pages <= high_wmark_pages(zone), any page in that zone 34891d82de61SMel Gorman * or lower is eligible for reclaim until at least one usable zone is 34901d82de61SMel Gorman * balanced. 34911da177e4SLinus Torvalds */ 3492accf6242SVlastimil Babka static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) 34931da177e4SLinus Torvalds { 34941da177e4SLinus Torvalds int i; 34950608f43dSAndrew Morton unsigned long nr_soft_reclaimed; 34960608f43dSAndrew Morton unsigned long nr_soft_scanned; 3497eb414681SJohannes Weiner unsigned long pflags; 34981c30844dSMel Gorman unsigned long nr_boost_reclaim; 34991c30844dSMel Gorman unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; 35001c30844dSMel Gorman bool boosted; 35011d82de61SMel Gorman struct zone *zone; 3502179e9639SAndrew Morton struct scan_control sc = { 3503179e9639SAndrew Morton .gfp_mask = GFP_KERNEL, 3504ee814fe2SJohannes Weiner .order = order, 3505a6dc60f8SJohannes Weiner .may_unmap = 1, 3506179e9639SAndrew Morton }; 350793781325SOmar Sandoval 3508*1732d2b0SAndrew Morton set_task_reclaim_state(current, &sc.reclaim_state); 3509eb414681SJohannes Weiner psi_memstall_enter(&pflags); 351093781325SOmar Sandoval __fs_reclaim_acquire(); 351193781325SOmar Sandoval 3512f8891e5eSChristoph Lameter count_vm_event(PAGEOUTRUN); 35131da177e4SLinus Torvalds 35141c30844dSMel Gorman /* 35151c30844dSMel Gorman * Account for the reclaim boost. Note that the zone boost is left in 35161c30844dSMel Gorman * place so that parallel allocations that are near the watermark will 35171c30844dSMel Gorman * stall or direct reclaim until kswapd is finished. 35181c30844dSMel Gorman */ 35191c30844dSMel Gorman nr_boost_reclaim = 0; 35201c30844dSMel Gorman for (i = 0; i <= classzone_idx; i++) { 35211c30844dSMel Gorman zone = pgdat->node_zones + i; 35221c30844dSMel Gorman if (!managed_zone(zone)) 35231c30844dSMel Gorman continue; 35241c30844dSMel Gorman 35251c30844dSMel Gorman nr_boost_reclaim += zone->watermark_boost; 35261c30844dSMel Gorman zone_boosts[i] = zone->watermark_boost; 35271c30844dSMel Gorman } 35281c30844dSMel Gorman boosted = nr_boost_reclaim; 35291c30844dSMel Gorman 35301c30844dSMel Gorman restart: 35311c30844dSMel Gorman sc.priority = DEF_PRIORITY; 35329e3b2f8cSKonstantin Khlebnikov do { 3533c73322d0SJohannes Weiner unsigned long nr_reclaimed = sc.nr_reclaimed; 3534b8e83b94SMel Gorman bool raise_priority = true; 35351c30844dSMel Gorman bool balanced; 353693781325SOmar Sandoval bool ret; 3537b8e83b94SMel Gorman 353884c7a777SMel Gorman sc.reclaim_idx = classzone_idx; 35391da177e4SLinus Torvalds 354086c79f6bSMel Gorman /* 354184c7a777SMel Gorman * If the number of buffer_heads exceeds the maximum allowed 354284c7a777SMel Gorman * then consider reclaiming from all zones. This has a dual 354384c7a777SMel Gorman * purpose -- on 64-bit systems it is expected that 354484c7a777SMel Gorman * buffer_heads are stripped during active rotation. On 32-bit 354584c7a777SMel Gorman * systems, highmem pages can pin lowmem memory and shrinking 354684c7a777SMel Gorman * buffers can relieve lowmem pressure. Reclaim may still not 354784c7a777SMel Gorman * go ahead if all eligible zones for the original allocation 354884c7a777SMel Gorman * request are balanced to avoid excessive reclaim from kswapd. 354986c79f6bSMel Gorman */ 355086c79f6bSMel Gorman if (buffer_heads_over_limit) { 355186c79f6bSMel Gorman for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 355286c79f6bSMel Gorman zone = pgdat->node_zones + i; 35536aa303deSMel Gorman if (!managed_zone(zone)) 355486c79f6bSMel Gorman continue; 355586c79f6bSMel Gorman 3556970a39a3SMel Gorman sc.reclaim_idx = i; 355786c79f6bSMel Gorman break; 355886c79f6bSMel Gorman } 355986c79f6bSMel Gorman } 356086c79f6bSMel Gorman 356186c79f6bSMel Gorman /* 35621c30844dSMel Gorman * If the pgdat is imbalanced then ignore boosting and preserve 35631c30844dSMel Gorman * the watermarks for a later time and restart. Note that the 35641c30844dSMel Gorman * zone watermarks will be still reset at the end of balancing 35651c30844dSMel Gorman * on the grounds that the normal reclaim should be enough to 35661c30844dSMel Gorman * re-evaluate if boosting is required when kswapd next wakes. 356786c79f6bSMel Gorman */ 35681c30844dSMel Gorman balanced = pgdat_balanced(pgdat, sc.order, classzone_idx); 35691c30844dSMel Gorman if (!balanced && nr_boost_reclaim) { 35701c30844dSMel Gorman nr_boost_reclaim = 0; 35711c30844dSMel Gorman goto restart; 35721c30844dSMel Gorman } 35731c30844dSMel Gorman 35741c30844dSMel Gorman /* 35751c30844dSMel Gorman * If boosting is not active then only reclaim if there are no 35761c30844dSMel Gorman * eligible zones. Note that sc.reclaim_idx is not used as 35771c30844dSMel Gorman * buffer_heads_over_limit may have adjusted it. 35781c30844dSMel Gorman */ 35791c30844dSMel Gorman if (!nr_boost_reclaim && balanced) 35801da177e4SLinus Torvalds goto out; 3581e1dbeda6SAndrew Morton 35821c30844dSMel Gorman /* Limit the priority of boosting to avoid reclaim writeback */ 35831c30844dSMel Gorman if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) 35841c30844dSMel Gorman raise_priority = false; 35851c30844dSMel Gorman 35861c30844dSMel Gorman /* 35871c30844dSMel Gorman * Do not writeback or swap pages for boosted reclaim. The 35881c30844dSMel Gorman * intent is to relieve pressure not issue sub-optimal IO 35891c30844dSMel Gorman * from reclaim context. If no pages are reclaimed, the 35901c30844dSMel Gorman * reclaim will be aborted. 35911c30844dSMel Gorman */ 35921c30844dSMel Gorman sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 35931c30844dSMel Gorman sc.may_swap = !nr_boost_reclaim; 35941c30844dSMel Gorman sc.may_shrinkslab = !nr_boost_reclaim; 35951c30844dSMel Gorman 35961da177e4SLinus Torvalds /* 35971d82de61SMel Gorman * Do some background aging of the anon list, to give 35981d82de61SMel Gorman * pages a chance to be referenced before reclaiming. All 35991d82de61SMel Gorman * pages are rotated regardless of classzone as this is 36001d82de61SMel Gorman * about consistent aging. 36011d82de61SMel Gorman */ 3602ef8f2327SMel Gorman age_active_anon(pgdat, &sc); 36031d82de61SMel Gorman 36041d82de61SMel Gorman /* 3605b7ea3c41SMel Gorman * If we're getting trouble reclaiming, start doing writepage 3606b7ea3c41SMel Gorman * even in laptop mode. 3607b7ea3c41SMel Gorman */ 3608047d72c3SJohannes Weiner if (sc.priority < DEF_PRIORITY - 2) 3609b7ea3c41SMel Gorman sc.may_writepage = 1; 3610b7ea3c41SMel Gorman 36111d82de61SMel Gorman /* Call soft limit reclaim before calling shrink_node. */ 36121da177e4SLinus Torvalds sc.nr_scanned = 0; 36130608f43dSAndrew Morton nr_soft_scanned = 0; 3614ef8f2327SMel Gorman nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, 36151d82de61SMel Gorman sc.gfp_mask, &nr_soft_scanned); 36160608f43dSAndrew Morton sc.nr_reclaimed += nr_soft_reclaimed; 36170608f43dSAndrew Morton 361832a4330dSRik van Riel /* 36191d82de61SMel Gorman * There should be no need to raise the scanning priority if 36201d82de61SMel Gorman * enough pages are already being scanned that that high 36211d82de61SMel Gorman * watermark would be met at 100% efficiency. 362232a4330dSRik van Riel */ 3623970a39a3SMel Gorman if (kswapd_shrink_node(pgdat, &sc)) 3624b8e83b94SMel Gorman raise_priority = false; 3625d7868daeSMel Gorman 36265515061dSMel Gorman /* 36275515061dSMel Gorman * If the low watermark is met there is no need for processes 36285515061dSMel Gorman * to be throttled on pfmemalloc_wait as they should not be 36295515061dSMel Gorman * able to safely make forward progress. Wake them 36305515061dSMel Gorman */ 36315515061dSMel Gorman if (waitqueue_active(&pgdat->pfmemalloc_wait) && 3632c73322d0SJohannes Weiner allow_direct_reclaim(pgdat)) 3633cfc51155SVlastimil Babka wake_up_all(&pgdat->pfmemalloc_wait); 36345515061dSMel Gorman 3635b8e83b94SMel Gorman /* Check if kswapd should be suspending */ 363693781325SOmar Sandoval __fs_reclaim_release(); 363793781325SOmar Sandoval ret = try_to_freeze(); 363893781325SOmar Sandoval __fs_reclaim_acquire(); 363993781325SOmar Sandoval if (ret || kthread_should_stop()) 3640b8e83b94SMel Gorman break; 3641b8e83b94SMel Gorman 3642b8e83b94SMel Gorman /* 3643b8e83b94SMel Gorman * Raise priority if scanning rate is too low or there was no 3644b8e83b94SMel Gorman * progress in reclaiming pages 3645b8e83b94SMel Gorman */ 3646c73322d0SJohannes Weiner nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 36471c30844dSMel Gorman nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); 36481c30844dSMel Gorman 36491c30844dSMel Gorman /* 36501c30844dSMel Gorman * If reclaim made no progress for a boost, stop reclaim as 36511c30844dSMel Gorman * IO cannot be queued and it could be an infinite loop in 36521c30844dSMel Gorman * extreme circumstances. 36531c30844dSMel Gorman */ 36541c30844dSMel Gorman if (nr_boost_reclaim && !nr_reclaimed) 36551c30844dSMel Gorman break; 36561c30844dSMel Gorman 3657c73322d0SJohannes Weiner if (raise_priority || !nr_reclaimed) 3658b8e83b94SMel Gorman sc.priority--; 36591d82de61SMel Gorman } while (sc.priority >= 1); 36601da177e4SLinus Torvalds 3661c73322d0SJohannes Weiner if (!sc.nr_reclaimed) 3662c73322d0SJohannes Weiner pgdat->kswapd_failures++; 3663c73322d0SJohannes Weiner 3664b8e83b94SMel Gorman out: 36651c30844dSMel Gorman /* If reclaim was boosted, account for the reclaim done in this pass */ 36661c30844dSMel Gorman if (boosted) { 36671c30844dSMel Gorman unsigned long flags; 36681c30844dSMel Gorman 36691c30844dSMel Gorman for (i = 0; i <= classzone_idx; i++) { 36701c30844dSMel Gorman if (!zone_boosts[i]) 36711c30844dSMel Gorman continue; 36721c30844dSMel Gorman 36731c30844dSMel Gorman /* Increments are under the zone lock */ 36741c30844dSMel Gorman zone = pgdat->node_zones + i; 36751c30844dSMel Gorman spin_lock_irqsave(&zone->lock, flags); 36761c30844dSMel Gorman zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); 36771c30844dSMel Gorman spin_unlock_irqrestore(&zone->lock, flags); 36781c30844dSMel Gorman } 36791c30844dSMel Gorman 36801c30844dSMel Gorman /* 36811c30844dSMel Gorman * As there is now likely space, wakeup kcompact to defragment 36821c30844dSMel Gorman * pageblocks. 36831c30844dSMel Gorman */ 36841c30844dSMel Gorman wakeup_kcompactd(pgdat, pageblock_order, classzone_idx); 36851c30844dSMel Gorman } 36861c30844dSMel Gorman 36872a2e4885SJohannes Weiner snapshot_refaults(NULL, pgdat); 368893781325SOmar Sandoval __fs_reclaim_release(); 3689eb414681SJohannes Weiner psi_memstall_leave(&pflags); 3690*1732d2b0SAndrew Morton set_task_reclaim_state(current, NULL); 3691e5ca8071SYafang Shao 36920abdee2bSMel Gorman /* 36931d82de61SMel Gorman * Return the order kswapd stopped reclaiming at as 36941d82de61SMel Gorman * prepare_kswapd_sleep() takes it into account. If another caller 36951d82de61SMel Gorman * entered the allocator slow path while kswapd was awake, order will 36961d82de61SMel Gorman * remain at the higher level. 36970abdee2bSMel Gorman */ 36981d82de61SMel Gorman return sc.order; 36991da177e4SLinus Torvalds } 37001da177e4SLinus Torvalds 3701e716f2ebSMel Gorman /* 3702dffcac2cSShakeel Butt * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be 3703dffcac2cSShakeel Butt * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not 3704dffcac2cSShakeel Butt * a valid index then either kswapd runs for first time or kswapd couldn't sleep 3705dffcac2cSShakeel Butt * after previous reclaim attempt (node is still unbalanced). In that case 3706dffcac2cSShakeel Butt * return the zone index of the previous kswapd reclaim cycle. 3707e716f2ebSMel Gorman */ 3708e716f2ebSMel Gorman static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, 3709dffcac2cSShakeel Butt enum zone_type prev_classzone_idx) 3710e716f2ebSMel Gorman { 3711e716f2ebSMel Gorman if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) 3712dffcac2cSShakeel Butt return prev_classzone_idx; 3713dffcac2cSShakeel Butt return pgdat->kswapd_classzone_idx; 3714e716f2ebSMel Gorman } 3715e716f2ebSMel Gorman 371638087d9bSMel Gorman static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 371738087d9bSMel Gorman unsigned int classzone_idx) 3718f0bc0a60SKOSAKI Motohiro { 3719f0bc0a60SKOSAKI Motohiro long remaining = 0; 3720f0bc0a60SKOSAKI Motohiro DEFINE_WAIT(wait); 3721f0bc0a60SKOSAKI Motohiro 3722f0bc0a60SKOSAKI Motohiro if (freezing(current) || kthread_should_stop()) 3723f0bc0a60SKOSAKI Motohiro return; 3724f0bc0a60SKOSAKI Motohiro 3725f0bc0a60SKOSAKI Motohiro prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3726f0bc0a60SKOSAKI Motohiro 3727333b0a45SShantanu Goel /* 3728333b0a45SShantanu Goel * Try to sleep for a short interval. Note that kcompactd will only be 3729333b0a45SShantanu Goel * woken if it is possible to sleep for a short interval. This is 3730333b0a45SShantanu Goel * deliberate on the assumption that if reclaim cannot keep an 3731333b0a45SShantanu Goel * eligible zone balanced that it's also unlikely that compaction will 3732333b0a45SShantanu Goel * succeed. 3733333b0a45SShantanu Goel */ 3734d9f21d42SMel Gorman if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3735fd901c95SVlastimil Babka /* 3736fd901c95SVlastimil Babka * Compaction records what page blocks it recently failed to 3737fd901c95SVlastimil Babka * isolate pages from and skips them in the future scanning. 3738fd901c95SVlastimil Babka * When kswapd is going to sleep, it is reasonable to assume 3739fd901c95SVlastimil Babka * that pages and compaction may succeed so reset the cache. 3740fd901c95SVlastimil Babka */ 3741fd901c95SVlastimil Babka reset_isolation_suitable(pgdat); 3742fd901c95SVlastimil Babka 3743fd901c95SVlastimil Babka /* 3744fd901c95SVlastimil Babka * We have freed the memory, now we should compact it to make 3745fd901c95SVlastimil Babka * allocation of the requested order possible. 3746fd901c95SVlastimil Babka */ 374738087d9bSMel Gorman wakeup_kcompactd(pgdat, alloc_order, classzone_idx); 3748fd901c95SVlastimil Babka 3749f0bc0a60SKOSAKI Motohiro remaining = schedule_timeout(HZ/10); 375038087d9bSMel Gorman 375138087d9bSMel Gorman /* 375238087d9bSMel Gorman * If woken prematurely then reset kswapd_classzone_idx and 375338087d9bSMel Gorman * order. The values will either be from a wakeup request or 375438087d9bSMel Gorman * the previous request that slept prematurely. 375538087d9bSMel Gorman */ 375638087d9bSMel Gorman if (remaining) { 3757e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 375838087d9bSMel Gorman pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); 375938087d9bSMel Gorman } 376038087d9bSMel Gorman 3761f0bc0a60SKOSAKI Motohiro finish_wait(&pgdat->kswapd_wait, &wait); 3762f0bc0a60SKOSAKI Motohiro prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3763f0bc0a60SKOSAKI Motohiro } 3764f0bc0a60SKOSAKI Motohiro 3765f0bc0a60SKOSAKI Motohiro /* 3766f0bc0a60SKOSAKI Motohiro * After a short sleep, check if it was a premature sleep. If not, then 3767f0bc0a60SKOSAKI Motohiro * go fully to sleep until explicitly woken up. 3768f0bc0a60SKOSAKI Motohiro */ 3769d9f21d42SMel Gorman if (!remaining && 3770d9f21d42SMel Gorman prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3771f0bc0a60SKOSAKI Motohiro trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 3772f0bc0a60SKOSAKI Motohiro 3773f0bc0a60SKOSAKI Motohiro /* 3774f0bc0a60SKOSAKI Motohiro * vmstat counters are not perfectly accurate and the estimated 3775f0bc0a60SKOSAKI Motohiro * value for counters such as NR_FREE_PAGES can deviate from the 3776f0bc0a60SKOSAKI Motohiro * true value by nr_online_cpus * threshold. To avoid the zone 3777f0bc0a60SKOSAKI Motohiro * watermarks being breached while under pressure, we reduce the 3778f0bc0a60SKOSAKI Motohiro * per-cpu vmstat threshold while kswapd is awake and restore 3779f0bc0a60SKOSAKI Motohiro * them before going back to sleep. 3780f0bc0a60SKOSAKI Motohiro */ 3781f0bc0a60SKOSAKI Motohiro set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 37821c7e7f6cSAaditya Kumar 37831c7e7f6cSAaditya Kumar if (!kthread_should_stop()) 3784f0bc0a60SKOSAKI Motohiro schedule(); 37851c7e7f6cSAaditya Kumar 3786f0bc0a60SKOSAKI Motohiro set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 3787f0bc0a60SKOSAKI Motohiro } else { 3788f0bc0a60SKOSAKI Motohiro if (remaining) 3789f0bc0a60SKOSAKI Motohiro count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 3790f0bc0a60SKOSAKI Motohiro else 3791f0bc0a60SKOSAKI Motohiro count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 3792f0bc0a60SKOSAKI Motohiro } 3793f0bc0a60SKOSAKI Motohiro finish_wait(&pgdat->kswapd_wait, &wait); 3794f0bc0a60SKOSAKI Motohiro } 3795f0bc0a60SKOSAKI Motohiro 37961da177e4SLinus Torvalds /* 37971da177e4SLinus Torvalds * The background pageout daemon, started as a kernel thread 37981da177e4SLinus Torvalds * from the init process. 37991da177e4SLinus Torvalds * 38001da177e4SLinus Torvalds * This basically trickles out pages so that we have _some_ 38011da177e4SLinus Torvalds * free memory available even if there is no other activity 38021da177e4SLinus Torvalds * that frees anything up. This is needed for things like routing 38031da177e4SLinus Torvalds * etc, where we otherwise might have all activity going on in 38041da177e4SLinus Torvalds * asynchronous contexts that cannot page things out. 38051da177e4SLinus Torvalds * 38061da177e4SLinus Torvalds * If there are applications that are active memory-allocators 38071da177e4SLinus Torvalds * (most normal use), this basically shouldn't matter. 38081da177e4SLinus Torvalds */ 38091da177e4SLinus Torvalds static int kswapd(void *p) 38101da177e4SLinus Torvalds { 3811e716f2ebSMel Gorman unsigned int alloc_order, reclaim_order; 3812e716f2ebSMel Gorman unsigned int classzone_idx = MAX_NR_ZONES - 1; 38131da177e4SLinus Torvalds pg_data_t *pgdat = (pg_data_t*)p; 38141da177e4SLinus Torvalds struct task_struct *tsk = current; 3815a70f7302SRusty Russell const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 38161da177e4SLinus Torvalds 3817174596a0SRusty Russell if (!cpumask_empty(cpumask)) 3818c5f59f08SMike Travis set_cpus_allowed_ptr(tsk, cpumask); 38191da177e4SLinus Torvalds 38201da177e4SLinus Torvalds /* 38211da177e4SLinus Torvalds * Tell the memory management that we're a "memory allocator", 38221da177e4SLinus Torvalds * and that if we need more memory we should get access to it 38231da177e4SLinus Torvalds * regardless (see "__alloc_pages()"). "kswapd" should 38241da177e4SLinus Torvalds * never get caught in the normal page freeing logic. 38251da177e4SLinus Torvalds * 38261da177e4SLinus Torvalds * (Kswapd normally doesn't need memory anyway, but sometimes 38271da177e4SLinus Torvalds * you need a small amount of memory in order to be able to 38281da177e4SLinus Torvalds * page out something else, and this flag essentially protects 38291da177e4SLinus Torvalds * us from recursively trying to free more memory as we're 38301da177e4SLinus Torvalds * trying to free the first piece of memory in the first place). 38311da177e4SLinus Torvalds */ 3832930d9152SChristoph Lameter tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 383383144186SRafael J. Wysocki set_freezable(); 38341da177e4SLinus Torvalds 3835e716f2ebSMel Gorman pgdat->kswapd_order = 0; 3836e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = MAX_NR_ZONES; 38371da177e4SLinus Torvalds for ( ; ; ) { 38386f6313d4SJeff Liu bool ret; 38393e1d1d28SChristoph Lameter 3840e716f2ebSMel Gorman alloc_order = reclaim_order = pgdat->kswapd_order; 3841e716f2ebSMel Gorman classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 3842e716f2ebSMel Gorman 384338087d9bSMel Gorman kswapd_try_sleep: 384438087d9bSMel Gorman kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 384538087d9bSMel Gorman classzone_idx); 3846215ddd66SMel Gorman 384738087d9bSMel Gorman /* Read the new order and classzone_idx */ 384838087d9bSMel Gorman alloc_order = reclaim_order = pgdat->kswapd_order; 3849dffcac2cSShakeel Butt classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 385038087d9bSMel Gorman pgdat->kswapd_order = 0; 3851e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = MAX_NR_ZONES; 38521da177e4SLinus Torvalds 38538fe23e05SDavid Rientjes ret = try_to_freeze(); 38548fe23e05SDavid Rientjes if (kthread_should_stop()) 38558fe23e05SDavid Rientjes break; 38568fe23e05SDavid Rientjes 38578fe23e05SDavid Rientjes /* 38588fe23e05SDavid Rientjes * We can speed up thawing tasks if we don't call balance_pgdat 38598fe23e05SDavid Rientjes * after returning from the refrigerator 3860b1296cc4SRafael J. Wysocki */ 386138087d9bSMel Gorman if (ret) 386238087d9bSMel Gorman continue; 38631d82de61SMel Gorman 386438087d9bSMel Gorman /* 386538087d9bSMel Gorman * Reclaim begins at the requested order but if a high-order 386638087d9bSMel Gorman * reclaim fails then kswapd falls back to reclaiming for 386738087d9bSMel Gorman * order-0. If that happens, kswapd will consider sleeping 386838087d9bSMel Gorman * for the order it finished reclaiming at (reclaim_order) 386938087d9bSMel Gorman * but kcompactd is woken to compact for the original 387038087d9bSMel Gorman * request (alloc_order). 387138087d9bSMel Gorman */ 3872e5146b12SMel Gorman trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx, 3873e5146b12SMel Gorman alloc_order); 387438087d9bSMel Gorman reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); 387538087d9bSMel Gorman if (reclaim_order < alloc_order) 387638087d9bSMel Gorman goto kswapd_try_sleep; 387733906bc5SMel Gorman } 3878b0a8cc58STakamori Yamaguchi 387971abdc15SJohannes Weiner tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); 388071abdc15SJohannes Weiner 38811da177e4SLinus Torvalds return 0; 38821da177e4SLinus Torvalds } 38831da177e4SLinus Torvalds 38841da177e4SLinus Torvalds /* 38855ecd9d40SDavid Rientjes * A zone is low on free memory or too fragmented for high-order memory. If 38865ecd9d40SDavid Rientjes * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 38875ecd9d40SDavid Rientjes * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 38885ecd9d40SDavid Rientjes * has failed or is not needed, still wake up kcompactd if only compaction is 38895ecd9d40SDavid Rientjes * needed. 38901da177e4SLinus Torvalds */ 38915ecd9d40SDavid Rientjes void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 38925ecd9d40SDavid Rientjes enum zone_type classzone_idx) 38931da177e4SLinus Torvalds { 38941da177e4SLinus Torvalds pg_data_t *pgdat; 38951da177e4SLinus Torvalds 38966aa303deSMel Gorman if (!managed_zone(zone)) 38971da177e4SLinus Torvalds return; 38981da177e4SLinus Torvalds 38995ecd9d40SDavid Rientjes if (!cpuset_zone_allowed(zone, gfp_flags)) 39001da177e4SLinus Torvalds return; 390188f5acf8SMel Gorman pgdat = zone->zone_pgdat; 3902dffcac2cSShakeel Butt 3903dffcac2cSShakeel Butt if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) 3904dffcac2cSShakeel Butt pgdat->kswapd_classzone_idx = classzone_idx; 3905dffcac2cSShakeel Butt else 3906dffcac2cSShakeel Butt pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, 3907e716f2ebSMel Gorman classzone_idx); 390838087d9bSMel Gorman pgdat->kswapd_order = max(pgdat->kswapd_order, order); 39098d0986e2SCon Kolivas if (!waitqueue_active(&pgdat->kswapd_wait)) 39101da177e4SLinus Torvalds return; 3911e1a55637SMel Gorman 39125ecd9d40SDavid Rientjes /* Hopeless node, leave it to direct reclaim if possible */ 39135ecd9d40SDavid Rientjes if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 39141c30844dSMel Gorman (pgdat_balanced(pgdat, order, classzone_idx) && 39151c30844dSMel Gorman !pgdat_watermark_boosted(pgdat, classzone_idx))) { 39165ecd9d40SDavid Rientjes /* 39175ecd9d40SDavid Rientjes * There may be plenty of free memory available, but it's too 39185ecd9d40SDavid Rientjes * fragmented for high-order allocations. Wake up kcompactd 39195ecd9d40SDavid Rientjes * and rely on compaction_suitable() to determine if it's 39205ecd9d40SDavid Rientjes * needed. If it fails, it will defer subsequent attempts to 39215ecd9d40SDavid Rientjes * ratelimit its work. 39225ecd9d40SDavid Rientjes */ 39235ecd9d40SDavid Rientjes if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 39245ecd9d40SDavid Rientjes wakeup_kcompactd(pgdat, order, classzone_idx); 3925c73322d0SJohannes Weiner return; 39265ecd9d40SDavid Rientjes } 3927c73322d0SJohannes Weiner 39285ecd9d40SDavid Rientjes trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order, 39295ecd9d40SDavid Rientjes gfp_flags); 39308d0986e2SCon Kolivas wake_up_interruptible(&pgdat->kswapd_wait); 39311da177e4SLinus Torvalds } 39321da177e4SLinus Torvalds 3933c6f37f12SRafael J. Wysocki #ifdef CONFIG_HIBERNATION 39341da177e4SLinus Torvalds /* 39357b51755cSKOSAKI Motohiro * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3936d6277db4SRafael J. Wysocki * freed pages. 3937d6277db4SRafael J. Wysocki * 3938d6277db4SRafael J. Wysocki * Rather than trying to age LRUs the aim is to preserve the overall 3939d6277db4SRafael J. Wysocki * LRU order by reclaiming preferentially 3940d6277db4SRafael J. Wysocki * inactive > active > active referenced > active mapped 39411da177e4SLinus Torvalds */ 39427b51755cSKOSAKI Motohiro unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 39431da177e4SLinus Torvalds { 3944d6277db4SRafael J. Wysocki struct scan_control sc = { 39457b51755cSKOSAKI Motohiro .nr_to_reclaim = nr_to_reclaim, 3946ee814fe2SJohannes Weiner .gfp_mask = GFP_HIGHUSER_MOVABLE, 3947b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 39489e3b2f8cSKonstantin Khlebnikov .priority = DEF_PRIORITY, 3949ee814fe2SJohannes Weiner .may_writepage = 1, 3950ee814fe2SJohannes Weiner .may_unmap = 1, 3951ee814fe2SJohannes Weiner .may_swap = 1, 3952ee814fe2SJohannes Weiner .hibernation_mode = 1, 39531da177e4SLinus Torvalds }; 39547b51755cSKOSAKI Motohiro struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 39557b51755cSKOSAKI Motohiro unsigned long nr_reclaimed; 3956499118e9SVlastimil Babka unsigned int noreclaim_flag; 39571da177e4SLinus Torvalds 3958d92a8cfcSPeter Zijlstra fs_reclaim_acquire(sc.gfp_mask); 395993781325SOmar Sandoval noreclaim_flag = memalloc_noreclaim_save(); 3960*1732d2b0SAndrew Morton set_task_reclaim_state(current, &sc.reclaim_state); 3961d6277db4SRafael J. Wysocki 39623115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3963d6277db4SRafael J. Wysocki 3964*1732d2b0SAndrew Morton set_task_reclaim_state(current, NULL); 3965499118e9SVlastimil Babka memalloc_noreclaim_restore(noreclaim_flag); 396693781325SOmar Sandoval fs_reclaim_release(sc.gfp_mask); 3967d6277db4SRafael J. Wysocki 39687b51755cSKOSAKI Motohiro return nr_reclaimed; 39691da177e4SLinus Torvalds } 3970c6f37f12SRafael J. Wysocki #endif /* CONFIG_HIBERNATION */ 39711da177e4SLinus Torvalds 39721da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but 39731da177e4SLinus Torvalds not required for correctness. So if the last cpu in a node goes 39741da177e4SLinus Torvalds away, we get changed to run anywhere: as the first one comes back, 39751da177e4SLinus Torvalds restore their cpu bindings. */ 3976517bbed9SSebastian Andrzej Siewior static int kswapd_cpu_online(unsigned int cpu) 39771da177e4SLinus Torvalds { 397858c0a4a7SYasunori Goto int nid; 39791da177e4SLinus Torvalds 398048fb2e24SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 3981c5f59f08SMike Travis pg_data_t *pgdat = NODE_DATA(nid); 3982a70f7302SRusty Russell const struct cpumask *mask; 3983a70f7302SRusty Russell 3984a70f7302SRusty Russell mask = cpumask_of_node(pgdat->node_id); 3985c5f59f08SMike Travis 39863e597945SRusty Russell if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 39871da177e4SLinus Torvalds /* One of our CPUs online: restore mask */ 3988c5f59f08SMike Travis set_cpus_allowed_ptr(pgdat->kswapd, mask); 39891da177e4SLinus Torvalds } 3990517bbed9SSebastian Andrzej Siewior return 0; 39911da177e4SLinus Torvalds } 39921da177e4SLinus Torvalds 39933218ae14SYasunori Goto /* 39943218ae14SYasunori Goto * This kswapd start function will be called by init and node-hot-add. 39953218ae14SYasunori Goto * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 39963218ae14SYasunori Goto */ 39973218ae14SYasunori Goto int kswapd_run(int nid) 39983218ae14SYasunori Goto { 39993218ae14SYasunori Goto pg_data_t *pgdat = NODE_DATA(nid); 40003218ae14SYasunori Goto int ret = 0; 40013218ae14SYasunori Goto 40023218ae14SYasunori Goto if (pgdat->kswapd) 40033218ae14SYasunori Goto return 0; 40043218ae14SYasunori Goto 40053218ae14SYasunori Goto pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 40063218ae14SYasunori Goto if (IS_ERR(pgdat->kswapd)) { 40073218ae14SYasunori Goto /* failure at boot is fatal */ 4008c6202adfSThomas Gleixner BUG_ON(system_state < SYSTEM_RUNNING); 4009d5dc0ad9SGavin Shan pr_err("Failed to start kswapd on node %d\n", nid); 4010d5dc0ad9SGavin Shan ret = PTR_ERR(pgdat->kswapd); 4011d72515b8SXishi Qiu pgdat->kswapd = NULL; 40123218ae14SYasunori Goto } 40133218ae14SYasunori Goto return ret; 40143218ae14SYasunori Goto } 40153218ae14SYasunori Goto 40168fe23e05SDavid Rientjes /* 4017d8adde17SJiang Liu * Called by memory hotplug when all memory in a node is offlined. Caller must 4018bfc8c901SVladimir Davydov * hold mem_hotplug_begin/end(). 40198fe23e05SDavid Rientjes */ 40208fe23e05SDavid Rientjes void kswapd_stop(int nid) 40218fe23e05SDavid Rientjes { 40228fe23e05SDavid Rientjes struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 40238fe23e05SDavid Rientjes 4024d8adde17SJiang Liu if (kswapd) { 40258fe23e05SDavid Rientjes kthread_stop(kswapd); 4026d8adde17SJiang Liu NODE_DATA(nid)->kswapd = NULL; 4027d8adde17SJiang Liu } 40288fe23e05SDavid Rientjes } 40298fe23e05SDavid Rientjes 40301da177e4SLinus Torvalds static int __init kswapd_init(void) 40311da177e4SLinus Torvalds { 4032517bbed9SSebastian Andrzej Siewior int nid, ret; 403369e05944SAndrew Morton 40341da177e4SLinus Torvalds swap_setup(); 403548fb2e24SLai Jiangshan for_each_node_state(nid, N_MEMORY) 40363218ae14SYasunori Goto kswapd_run(nid); 4037517bbed9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 4038517bbed9SSebastian Andrzej Siewior "mm/vmscan:online", kswapd_cpu_online, 4039517bbed9SSebastian Andrzej Siewior NULL); 4040517bbed9SSebastian Andrzej Siewior WARN_ON(ret < 0); 40411da177e4SLinus Torvalds return 0; 40421da177e4SLinus Torvalds } 40431da177e4SLinus Torvalds 40441da177e4SLinus Torvalds module_init(kswapd_init) 40459eeff239SChristoph Lameter 40469eeff239SChristoph Lameter #ifdef CONFIG_NUMA 40479eeff239SChristoph Lameter /* 4048a5f5f91dSMel Gorman * Node reclaim mode 40499eeff239SChristoph Lameter * 4050a5f5f91dSMel Gorman * If non-zero call node_reclaim when the number of free pages falls below 40519eeff239SChristoph Lameter * the watermarks. 40529eeff239SChristoph Lameter */ 4053a5f5f91dSMel Gorman int node_reclaim_mode __read_mostly; 40549eeff239SChristoph Lameter 40551b2ffb78SChristoph Lameter #define RECLAIM_OFF 0 40567d03431cSFernando Luis Vazquez Cao #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 40571b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 405895bbc0c7SZhihui Zhang #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ 40591b2ffb78SChristoph Lameter 40609eeff239SChristoph Lameter /* 4061a5f5f91dSMel Gorman * Priority for NODE_RECLAIM. This determines the fraction of pages 4062a92f7126SChristoph Lameter * of a node considered for each zone_reclaim. 4 scans 1/16th of 4063a92f7126SChristoph Lameter * a zone. 4064a92f7126SChristoph Lameter */ 4065a5f5f91dSMel Gorman #define NODE_RECLAIM_PRIORITY 4 4066a92f7126SChristoph Lameter 40679eeff239SChristoph Lameter /* 4068a5f5f91dSMel Gorman * Percentage of pages in a zone that must be unmapped for node_reclaim to 40699614634fSChristoph Lameter * occur. 40709614634fSChristoph Lameter */ 40719614634fSChristoph Lameter int sysctl_min_unmapped_ratio = 1; 40729614634fSChristoph Lameter 40739614634fSChristoph Lameter /* 40740ff38490SChristoph Lameter * If the number of slab pages in a zone grows beyond this percentage then 40750ff38490SChristoph Lameter * slab reclaim needs to occur. 40760ff38490SChristoph Lameter */ 40770ff38490SChristoph Lameter int sysctl_min_slab_ratio = 5; 40780ff38490SChristoph Lameter 407911fb9989SMel Gorman static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 408090afa5deSMel Gorman { 408111fb9989SMel Gorman unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 408211fb9989SMel Gorman unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 408311fb9989SMel Gorman node_page_state(pgdat, NR_ACTIVE_FILE); 408490afa5deSMel Gorman 408590afa5deSMel Gorman /* 408690afa5deSMel Gorman * It's possible for there to be more file mapped pages than 408790afa5deSMel Gorman * accounted for by the pages on the file LRU lists because 408890afa5deSMel Gorman * tmpfs pages accounted for as ANON can also be FILE_MAPPED 408990afa5deSMel Gorman */ 409090afa5deSMel Gorman return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 409190afa5deSMel Gorman } 409290afa5deSMel Gorman 409390afa5deSMel Gorman /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 4094a5f5f91dSMel Gorman static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 409590afa5deSMel Gorman { 4096d031a157SAlexandru Moise unsigned long nr_pagecache_reclaimable; 4097d031a157SAlexandru Moise unsigned long delta = 0; 409890afa5deSMel Gorman 409990afa5deSMel Gorman /* 410095bbc0c7SZhihui Zhang * If RECLAIM_UNMAP is set, then all file pages are considered 410190afa5deSMel Gorman * potentially reclaimable. Otherwise, we have to worry about 410211fb9989SMel Gorman * pages like swapcache and node_unmapped_file_pages() provides 410390afa5deSMel Gorman * a better estimate 410490afa5deSMel Gorman */ 4105a5f5f91dSMel Gorman if (node_reclaim_mode & RECLAIM_UNMAP) 4106a5f5f91dSMel Gorman nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 410790afa5deSMel Gorman else 4108a5f5f91dSMel Gorman nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 410990afa5deSMel Gorman 411090afa5deSMel Gorman /* If we can't clean pages, remove dirty pages from consideration */ 4111a5f5f91dSMel Gorman if (!(node_reclaim_mode & RECLAIM_WRITE)) 4112a5f5f91dSMel Gorman delta += node_page_state(pgdat, NR_FILE_DIRTY); 411390afa5deSMel Gorman 411490afa5deSMel Gorman /* Watch for any possible underflows due to delta */ 411590afa5deSMel Gorman if (unlikely(delta > nr_pagecache_reclaimable)) 411690afa5deSMel Gorman delta = nr_pagecache_reclaimable; 411790afa5deSMel Gorman 411890afa5deSMel Gorman return nr_pagecache_reclaimable - delta; 411990afa5deSMel Gorman } 412090afa5deSMel Gorman 41210ff38490SChristoph Lameter /* 4122a5f5f91dSMel Gorman * Try to free up some pages from this node through reclaim. 41239eeff239SChristoph Lameter */ 4124a5f5f91dSMel Gorman static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 41259eeff239SChristoph Lameter { 41267fb2d46dSChristoph Lameter /* Minimum pages needed in order to stay on node */ 412769e05944SAndrew Morton const unsigned long nr_pages = 1 << order; 41289eeff239SChristoph Lameter struct task_struct *p = current; 4129499118e9SVlastimil Babka unsigned int noreclaim_flag; 4130179e9639SAndrew Morton struct scan_control sc = { 413162b726c1SAndrew Morton .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 4132f2f43e56SNick Desaulniers .gfp_mask = current_gfp_context(gfp_mask), 4133bd2f6199SJohannes Weiner .order = order, 4134a5f5f91dSMel Gorman .priority = NODE_RECLAIM_PRIORITY, 4135a5f5f91dSMel Gorman .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 4136a5f5f91dSMel Gorman .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 4137ee814fe2SJohannes Weiner .may_swap = 1, 4138f2f43e56SNick Desaulniers .reclaim_idx = gfp_zone(gfp_mask), 4139179e9639SAndrew Morton }; 41409eeff239SChristoph Lameter 4141132bb8cfSYafang Shao trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, 4142132bb8cfSYafang Shao sc.gfp_mask); 4143132bb8cfSYafang Shao 41449eeff239SChristoph Lameter cond_resched(); 414593781325SOmar Sandoval fs_reclaim_acquire(sc.gfp_mask); 4146d4f7796eSChristoph Lameter /* 414795bbc0c7SZhihui Zhang * We need to be able to allocate from the reserves for RECLAIM_UNMAP 4148d4f7796eSChristoph Lameter * and we also need to be able to write out pages for RECLAIM_WRITE 414995bbc0c7SZhihui Zhang * and RECLAIM_UNMAP. 4150d4f7796eSChristoph Lameter */ 4151499118e9SVlastimil Babka noreclaim_flag = memalloc_noreclaim_save(); 4152499118e9SVlastimil Babka p->flags |= PF_SWAPWRITE; 4153*1732d2b0SAndrew Morton set_task_reclaim_state(p, &sc.reclaim_state); 4154c84db23cSChristoph Lameter 4155a5f5f91dSMel Gorman if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { 4156a92f7126SChristoph Lameter /* 4157894befecSAndrey Ryabinin * Free memory by calling shrink node with increasing 41580ff38490SChristoph Lameter * priorities until we have enough memory freed. 4159a92f7126SChristoph Lameter */ 4160a92f7126SChristoph Lameter do { 4161970a39a3SMel Gorman shrink_node(pgdat, &sc); 41629e3b2f8cSKonstantin Khlebnikov } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 41630ff38490SChristoph Lameter } 4164a92f7126SChristoph Lameter 4165*1732d2b0SAndrew Morton set_task_reclaim_state(p, NULL); 4166499118e9SVlastimil Babka current->flags &= ~PF_SWAPWRITE; 4167499118e9SVlastimil Babka memalloc_noreclaim_restore(noreclaim_flag); 416893781325SOmar Sandoval fs_reclaim_release(sc.gfp_mask); 4169132bb8cfSYafang Shao 4170132bb8cfSYafang Shao trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); 4171132bb8cfSYafang Shao 4172a79311c1SRik van Riel return sc.nr_reclaimed >= nr_pages; 41739eeff239SChristoph Lameter } 4174179e9639SAndrew Morton 4175a5f5f91dSMel Gorman int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 4176179e9639SAndrew Morton { 4177d773ed6bSDavid Rientjes int ret; 4178179e9639SAndrew Morton 4179179e9639SAndrew Morton /* 4180a5f5f91dSMel Gorman * Node reclaim reclaims unmapped file backed pages and 41810ff38490SChristoph Lameter * slab pages if we are over the defined limits. 418234aa1330SChristoph Lameter * 41839614634fSChristoph Lameter * A small portion of unmapped file backed pages is needed for 41849614634fSChristoph Lameter * file I/O otherwise pages read by file I/O will be immediately 4185a5f5f91dSMel Gorman * thrown out if the node is overallocated. So we do not reclaim 4186a5f5f91dSMel Gorman * if less than a specified percentage of the node is used by 41879614634fSChristoph Lameter * unmapped file backed pages. 4188179e9639SAndrew Morton */ 4189a5f5f91dSMel Gorman if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 4190385386cfSJohannes Weiner node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) 4191a5f5f91dSMel Gorman return NODE_RECLAIM_FULL; 4192179e9639SAndrew Morton 4193179e9639SAndrew Morton /* 4194d773ed6bSDavid Rientjes * Do not scan if the allocation should not be delayed. 4195179e9639SAndrew Morton */ 4196d0164adcSMel Gorman if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 4197a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 4198179e9639SAndrew Morton 4199179e9639SAndrew Morton /* 4200a5f5f91dSMel Gorman * Only run node reclaim on the local node or on nodes that do not 4201179e9639SAndrew Morton * have associated processors. This will favor the local processor 4202179e9639SAndrew Morton * over remote processors and spread off node memory allocations 4203179e9639SAndrew Morton * as wide as possible. 4204179e9639SAndrew Morton */ 4205a5f5f91dSMel Gorman if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 4206a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 4207d773ed6bSDavid Rientjes 4208a5f5f91dSMel Gorman if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 4209a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 4210fa5e084eSMel Gorman 4211a5f5f91dSMel Gorman ret = __node_reclaim(pgdat, gfp_mask, order); 4212a5f5f91dSMel Gorman clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 4213d773ed6bSDavid Rientjes 421424cf7251SMel Gorman if (!ret) 421524cf7251SMel Gorman count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 421624cf7251SMel Gorman 4217d773ed6bSDavid Rientjes return ret; 4218179e9639SAndrew Morton } 42199eeff239SChristoph Lameter #endif 4220894bc310SLee Schermerhorn 4221894bc310SLee Schermerhorn /* 4222894bc310SLee Schermerhorn * page_evictable - test whether a page is evictable 4223894bc310SLee Schermerhorn * @page: the page to test 4224894bc310SLee Schermerhorn * 4225894bc310SLee Schermerhorn * Test whether page is evictable--i.e., should be placed on active/inactive 422639b5f29aSHugh Dickins * lists vs unevictable list. 4227894bc310SLee Schermerhorn * 4228894bc310SLee Schermerhorn * Reasons page might not be evictable: 4229ba9ddf49SLee Schermerhorn * (1) page's mapping marked unevictable 4230b291f000SNick Piggin * (2) page is part of an mlocked VMA 4231ba9ddf49SLee Schermerhorn * 4232894bc310SLee Schermerhorn */ 423339b5f29aSHugh Dickins int page_evictable(struct page *page) 4234894bc310SLee Schermerhorn { 4235e92bb4ddSHuang Ying int ret; 4236e92bb4ddSHuang Ying 4237e92bb4ddSHuang Ying /* Prevent address_space of inode and swap cache from being freed */ 4238e92bb4ddSHuang Ying rcu_read_lock(); 4239e92bb4ddSHuang Ying ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); 4240e92bb4ddSHuang Ying rcu_read_unlock(); 4241e92bb4ddSHuang Ying return ret; 4242894bc310SLee Schermerhorn } 424389e004eaSLee Schermerhorn 424489e004eaSLee Schermerhorn /** 424564e3d12fSKuo-Hsin Yang * check_move_unevictable_pages - check pages for evictability and move to 424664e3d12fSKuo-Hsin Yang * appropriate zone lru list 424764e3d12fSKuo-Hsin Yang * @pvec: pagevec with lru pages to check 424889e004eaSLee Schermerhorn * 424964e3d12fSKuo-Hsin Yang * Checks pages for evictability, if an evictable page is in the unevictable 425064e3d12fSKuo-Hsin Yang * lru list, moves it to the appropriate evictable lru list. This function 425164e3d12fSKuo-Hsin Yang * should be only used for lru pages. 425289e004eaSLee Schermerhorn */ 425364e3d12fSKuo-Hsin Yang void check_move_unevictable_pages(struct pagevec *pvec) 425489e004eaSLee Schermerhorn { 4255925b7673SJohannes Weiner struct lruvec *lruvec; 4256785b99feSMel Gorman struct pglist_data *pgdat = NULL; 425724513264SHugh Dickins int pgscanned = 0; 425824513264SHugh Dickins int pgrescued = 0; 425989e004eaSLee Schermerhorn int i; 426089e004eaSLee Schermerhorn 426164e3d12fSKuo-Hsin Yang for (i = 0; i < pvec->nr; i++) { 426264e3d12fSKuo-Hsin Yang struct page *page = pvec->pages[i]; 4263785b99feSMel Gorman struct pglist_data *pagepgdat = page_pgdat(page); 426489e004eaSLee Schermerhorn 426524513264SHugh Dickins pgscanned++; 4266785b99feSMel Gorman if (pagepgdat != pgdat) { 4267785b99feSMel Gorman if (pgdat) 4268785b99feSMel Gorman spin_unlock_irq(&pgdat->lru_lock); 4269785b99feSMel Gorman pgdat = pagepgdat; 4270785b99feSMel Gorman spin_lock_irq(&pgdat->lru_lock); 427189e004eaSLee Schermerhorn } 4272785b99feSMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 427389e004eaSLee Schermerhorn 427424513264SHugh Dickins if (!PageLRU(page) || !PageUnevictable(page)) 427524513264SHugh Dickins continue; 427689e004eaSLee Schermerhorn 427739b5f29aSHugh Dickins if (page_evictable(page)) { 427824513264SHugh Dickins enum lru_list lru = page_lru_base_type(page); 427924513264SHugh Dickins 4280309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 428124513264SHugh Dickins ClearPageUnevictable(page); 4282fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 4283fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 428424513264SHugh Dickins pgrescued++; 428589e004eaSLee Schermerhorn } 428689e004eaSLee Schermerhorn } 428724513264SHugh Dickins 4288785b99feSMel Gorman if (pgdat) { 428924513264SHugh Dickins __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 429024513264SHugh Dickins __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 4291785b99feSMel Gorman spin_unlock_irq(&pgdat->lru_lock); 429224513264SHugh Dickins } 429385046579SHugh Dickins } 429464e3d12fSKuo-Hsin Yang EXPORT_SYMBOL_GPL(check_move_unevictable_pages); 4295