1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/vmscan.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie. 81da177e4SLinus Torvalds * kswapd added: 7.1.96 sct 91da177e4SLinus Torvalds * Removed kswapd_ctl limits, and swap out as many pages as needed 101da177e4SLinus Torvalds * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 111da177e4SLinus Torvalds * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 121da177e4SLinus Torvalds * Multiqueue VM started 5.8.00, Rik van Riel. 131da177e4SLinus Torvalds */ 141da177e4SLinus Torvalds 15b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16b1de0d13SMitchel Humpherys 171da177e4SLinus Torvalds #include <linux/mm.h> 185b3cc15aSIngo Molnar #include <linux/sched/mm.h> 191da177e4SLinus Torvalds #include <linux/module.h> 205a0e3ad6STejun Heo #include <linux/gfp.h> 211da177e4SLinus Torvalds #include <linux/kernel_stat.h> 221da177e4SLinus Torvalds #include <linux/swap.h> 231da177e4SLinus Torvalds #include <linux/pagemap.h> 241da177e4SLinus Torvalds #include <linux/init.h> 251da177e4SLinus Torvalds #include <linux/highmem.h> 2670ddf637SAnton Vorontsov #include <linux/vmpressure.h> 27e129b5c2SAndrew Morton #include <linux/vmstat.h> 281da177e4SLinus Torvalds #include <linux/file.h> 291da177e4SLinus Torvalds #include <linux/writeback.h> 301da177e4SLinus Torvalds #include <linux/blkdev.h> 311da177e4SLinus Torvalds #include <linux/buffer_head.h> /* for try_to_release_page(), 321da177e4SLinus Torvalds buffer_heads_over_limit */ 331da177e4SLinus Torvalds #include <linux/mm_inline.h> 341da177e4SLinus Torvalds #include <linux/backing-dev.h> 351da177e4SLinus Torvalds #include <linux/rmap.h> 361da177e4SLinus Torvalds #include <linux/topology.h> 371da177e4SLinus Torvalds #include <linux/cpu.h> 381da177e4SLinus Torvalds #include <linux/cpuset.h> 393e7d3449SMel Gorman #include <linux/compaction.h> 401da177e4SLinus Torvalds #include <linux/notifier.h> 411da177e4SLinus Torvalds #include <linux/rwsem.h> 42248a0301SRafael J. Wysocki #include <linux/delay.h> 433218ae14SYasunori Goto #include <linux/kthread.h> 447dfb7103SNigel Cunningham #include <linux/freezer.h> 4566e1707bSBalbir Singh #include <linux/memcontrol.h> 46873b4771SKeika Kobayashi #include <linux/delayacct.h> 47af936a16SLee Schermerhorn #include <linux/sysctl.h> 48929bea7cSKOSAKI Motohiro #include <linux/oom.h> 49268bb0ceSLinus Torvalds #include <linux/prefetch.h> 50b1de0d13SMitchel Humpherys #include <linux/printk.h> 51f9fe48beSRoss Zwisler #include <linux/dax.h> 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds #include <asm/tlbflush.h> 541da177e4SLinus Torvalds #include <asm/div64.h> 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds #include <linux/swapops.h> 57117aad1eSRafael Aquini #include <linux/balloon_compaction.h> 581da177e4SLinus Torvalds 590f8053a5SNick Piggin #include "internal.h" 600f8053a5SNick Piggin 6133906bc5SMel Gorman #define CREATE_TRACE_POINTS 6233906bc5SMel Gorman #include <trace/events/vmscan.h> 6333906bc5SMel Gorman 641da177e4SLinus Torvalds struct scan_control { 6522fba335SKOSAKI Motohiro /* How many pages shrink_list() should reclaim */ 6622fba335SKOSAKI Motohiro unsigned long nr_to_reclaim; 6722fba335SKOSAKI Motohiro 68ee814fe2SJohannes Weiner /* 69ee814fe2SJohannes Weiner * Nodemask of nodes allowed by the caller. If NULL, all nodes 70ee814fe2SJohannes Weiner * are scanned. 71ee814fe2SJohannes Weiner */ 72ee814fe2SJohannes Weiner nodemask_t *nodemask; 739e3b2f8cSKonstantin Khlebnikov 745f53e762SKOSAKI Motohiro /* 75f16015fbSJohannes Weiner * The memory cgroup that hit its limit and as a result is the 76f16015fbSJohannes Weiner * primary target of this reclaim invocation. 77f16015fbSJohannes Weiner */ 78f16015fbSJohannes Weiner struct mem_cgroup *target_mem_cgroup; 7966e1707bSBalbir Singh 801276ad68SJohannes Weiner /* Writepage batching in laptop mode; RECLAIM_WRITE */ 81ee814fe2SJohannes Weiner unsigned int may_writepage:1; 82ee814fe2SJohannes Weiner 83ee814fe2SJohannes Weiner /* Can mapped pages be reclaimed? */ 84ee814fe2SJohannes Weiner unsigned int may_unmap:1; 85ee814fe2SJohannes Weiner 86ee814fe2SJohannes Weiner /* Can pages be swapped as part of reclaim? */ 87ee814fe2SJohannes Weiner unsigned int may_swap:1; 88ee814fe2SJohannes Weiner 89d6622f63SYisheng Xie /* 90d6622f63SYisheng Xie * Cgroups are not reclaimed below their configured memory.low, 91d6622f63SYisheng Xie * unless we threaten to OOM. If any cgroups are skipped due to 92d6622f63SYisheng Xie * memory.low and nothing was reclaimed, go back for memory.low. 93d6622f63SYisheng Xie */ 94d6622f63SYisheng Xie unsigned int memcg_low_reclaim:1; 95d6622f63SYisheng Xie unsigned int memcg_low_skipped:1; 96241994edSJohannes Weiner 97ee814fe2SJohannes Weiner unsigned int hibernation_mode:1; 98ee814fe2SJohannes Weiner 99ee814fe2SJohannes Weiner /* One of the zones is ready for compaction */ 100ee814fe2SJohannes Weiner unsigned int compaction_ready:1; 101ee814fe2SJohannes Weiner 102bb451fdfSGreg Thelen /* Allocation order */ 103bb451fdfSGreg Thelen s8 order; 104bb451fdfSGreg Thelen 105bb451fdfSGreg Thelen /* Scan (total_size >> priority) pages at once */ 106bb451fdfSGreg Thelen s8 priority; 107bb451fdfSGreg Thelen 108bb451fdfSGreg Thelen /* The highest zone to isolate pages for reclaim from */ 109bb451fdfSGreg Thelen s8 reclaim_idx; 110bb451fdfSGreg Thelen 111bb451fdfSGreg Thelen /* This context's GFP mask */ 112bb451fdfSGreg Thelen gfp_t gfp_mask; 113bb451fdfSGreg Thelen 114ee814fe2SJohannes Weiner /* Incremented by the number of inactive pages that were scanned */ 115ee814fe2SJohannes Weiner unsigned long nr_scanned; 116ee814fe2SJohannes Weiner 117ee814fe2SJohannes Weiner /* Number of pages freed so far during a call to shrink_zones() */ 118ee814fe2SJohannes Weiner unsigned long nr_reclaimed; 119d108c772SAndrey Ryabinin 120d108c772SAndrey Ryabinin struct { 121d108c772SAndrey Ryabinin unsigned int dirty; 122d108c772SAndrey Ryabinin unsigned int unqueued_dirty; 123d108c772SAndrey Ryabinin unsigned int congested; 124d108c772SAndrey Ryabinin unsigned int writeback; 125d108c772SAndrey Ryabinin unsigned int immediate; 126d108c772SAndrey Ryabinin unsigned int file_taken; 127d108c772SAndrey Ryabinin unsigned int taken; 128d108c772SAndrey Ryabinin } nr; 1291da177e4SLinus Torvalds }; 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH 1321da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) \ 1331da177e4SLinus Torvalds do { \ 1341da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1351da177e4SLinus Torvalds struct page *prev; \ 1361da177e4SLinus Torvalds \ 1371da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1381da177e4SLinus Torvalds prefetch(&prev->_field); \ 1391da177e4SLinus Torvalds } \ 1401da177e4SLinus Torvalds } while (0) 1411da177e4SLinus Torvalds #else 1421da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 1431da177e4SLinus Torvalds #endif 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW 1461da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) \ 1471da177e4SLinus Torvalds do { \ 1481da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1491da177e4SLinus Torvalds struct page *prev; \ 1501da177e4SLinus Torvalds \ 1511da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1521da177e4SLinus Torvalds prefetchw(&prev->_field); \ 1531da177e4SLinus Torvalds } \ 1541da177e4SLinus Torvalds } while (0) 1551da177e4SLinus Torvalds #else 1561da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 1571da177e4SLinus Torvalds #endif 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds /* 1601da177e4SLinus Torvalds * From 0 .. 100. Higher means more swappy. 1611da177e4SLinus Torvalds */ 1621da177e4SLinus Torvalds int vm_swappiness = 60; 163d0480be4SWang Sheng-Hui /* 164d0480be4SWang Sheng-Hui * The total number of pages which are beyond the high watermark within all 165d0480be4SWang Sheng-Hui * zones. 166d0480be4SWang Sheng-Hui */ 167d0480be4SWang Sheng-Hui unsigned long vm_total_pages; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds static LIST_HEAD(shrinker_list); 1701da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem); 1711da177e4SLinus Torvalds 172b4c2b231SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 1737e010df5SKirill Tkhai 1747e010df5SKirill Tkhai /* 1757e010df5SKirill Tkhai * We allow subsystems to populate their shrinker-related 1767e010df5SKirill Tkhai * LRU lists before register_shrinker_prepared() is called 1777e010df5SKirill Tkhai * for the shrinker, since we don't want to impose 1787e010df5SKirill Tkhai * restrictions on their internal registration order. 1797e010df5SKirill Tkhai * In this case shrink_slab_memcg() may find corresponding 1807e010df5SKirill Tkhai * bit is set in the shrinkers map. 1817e010df5SKirill Tkhai * 1827e010df5SKirill Tkhai * This value is used by the function to detect registering 1837e010df5SKirill Tkhai * shrinkers and to skip do_shrink_slab() calls for them. 1847e010df5SKirill Tkhai */ 1857e010df5SKirill Tkhai #define SHRINKER_REGISTERING ((struct shrinker *)~0UL) 1867e010df5SKirill Tkhai 187b4c2b231SKirill Tkhai static DEFINE_IDR(shrinker_idr); 188b4c2b231SKirill Tkhai static int shrinker_nr_max; 189b4c2b231SKirill Tkhai 190b4c2b231SKirill Tkhai static int prealloc_memcg_shrinker(struct shrinker *shrinker) 191b4c2b231SKirill Tkhai { 192b4c2b231SKirill Tkhai int id, ret = -ENOMEM; 193b4c2b231SKirill Tkhai 194b4c2b231SKirill Tkhai down_write(&shrinker_rwsem); 195b4c2b231SKirill Tkhai /* This may call shrinker, so it must use down_read_trylock() */ 1967e010df5SKirill Tkhai id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL); 197b4c2b231SKirill Tkhai if (id < 0) 198b4c2b231SKirill Tkhai goto unlock; 199b4c2b231SKirill Tkhai 2000a4465d3SKirill Tkhai if (id >= shrinker_nr_max) { 2010a4465d3SKirill Tkhai if (memcg_expand_shrinker_maps(id)) { 2020a4465d3SKirill Tkhai idr_remove(&shrinker_idr, id); 2030a4465d3SKirill Tkhai goto unlock; 2040a4465d3SKirill Tkhai } 2050a4465d3SKirill Tkhai 206b4c2b231SKirill Tkhai shrinker_nr_max = id + 1; 2070a4465d3SKirill Tkhai } 208b4c2b231SKirill Tkhai shrinker->id = id; 209b4c2b231SKirill Tkhai ret = 0; 210b4c2b231SKirill Tkhai unlock: 211b4c2b231SKirill Tkhai up_write(&shrinker_rwsem); 212b4c2b231SKirill Tkhai return ret; 213b4c2b231SKirill Tkhai } 214b4c2b231SKirill Tkhai 215b4c2b231SKirill Tkhai static void unregister_memcg_shrinker(struct shrinker *shrinker) 216b4c2b231SKirill Tkhai { 217b4c2b231SKirill Tkhai int id = shrinker->id; 218b4c2b231SKirill Tkhai 219b4c2b231SKirill Tkhai BUG_ON(id < 0); 220b4c2b231SKirill Tkhai 221b4c2b231SKirill Tkhai down_write(&shrinker_rwsem); 222b4c2b231SKirill Tkhai idr_remove(&shrinker_idr, id); 223b4c2b231SKirill Tkhai up_write(&shrinker_rwsem); 224b4c2b231SKirill Tkhai } 225b4c2b231SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 226b4c2b231SKirill Tkhai static int prealloc_memcg_shrinker(struct shrinker *shrinker) 227b4c2b231SKirill Tkhai { 228b4c2b231SKirill Tkhai return 0; 229b4c2b231SKirill Tkhai } 230b4c2b231SKirill Tkhai 231b4c2b231SKirill Tkhai static void unregister_memcg_shrinker(struct shrinker *shrinker) 232b4c2b231SKirill Tkhai { 233b4c2b231SKirill Tkhai } 234b4c2b231SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 235b4c2b231SKirill Tkhai 236c255a458SAndrew Morton #ifdef CONFIG_MEMCG 23789b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc) 23889b5fae5SJohannes Weiner { 239f16015fbSJohannes Weiner return !sc->target_mem_cgroup; 24089b5fae5SJohannes Weiner } 24197c9341fSTejun Heo 24297c9341fSTejun Heo /** 24397c9341fSTejun Heo * sane_reclaim - is the usual dirty throttling mechanism operational? 24497c9341fSTejun Heo * @sc: scan_control in question 24597c9341fSTejun Heo * 24697c9341fSTejun Heo * The normal page dirty throttling mechanism in balance_dirty_pages() is 24797c9341fSTejun Heo * completely broken with the legacy memcg and direct stalling in 24897c9341fSTejun Heo * shrink_page_list() is used for throttling instead, which lacks all the 24997c9341fSTejun Heo * niceties such as fairness, adaptive pausing, bandwidth proportional 25097c9341fSTejun Heo * allocation and configurability. 25197c9341fSTejun Heo * 25297c9341fSTejun Heo * This function tests whether the vmscan currently in progress can assume 25397c9341fSTejun Heo * that the normal dirty throttling mechanism is operational. 25497c9341fSTejun Heo */ 25597c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc) 25697c9341fSTejun Heo { 25797c9341fSTejun Heo struct mem_cgroup *memcg = sc->target_mem_cgroup; 25897c9341fSTejun Heo 25997c9341fSTejun Heo if (!memcg) 26097c9341fSTejun Heo return true; 26197c9341fSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK 26269234aceSLinus Torvalds if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 26397c9341fSTejun Heo return true; 26497c9341fSTejun Heo #endif 26597c9341fSTejun Heo return false; 26697c9341fSTejun Heo } 267e3c1ac58SAndrey Ryabinin 268e3c1ac58SAndrey Ryabinin static void set_memcg_congestion(pg_data_t *pgdat, 269e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg, 270e3c1ac58SAndrey Ryabinin bool congested) 271e3c1ac58SAndrey Ryabinin { 272e3c1ac58SAndrey Ryabinin struct mem_cgroup_per_node *mn; 273e3c1ac58SAndrey Ryabinin 274e3c1ac58SAndrey Ryabinin if (!memcg) 275e3c1ac58SAndrey Ryabinin return; 276e3c1ac58SAndrey Ryabinin 277e3c1ac58SAndrey Ryabinin mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 278e3c1ac58SAndrey Ryabinin WRITE_ONCE(mn->congested, congested); 279e3c1ac58SAndrey Ryabinin } 280e3c1ac58SAndrey Ryabinin 281e3c1ac58SAndrey Ryabinin static bool memcg_congested(pg_data_t *pgdat, 282e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg) 283e3c1ac58SAndrey Ryabinin { 284e3c1ac58SAndrey Ryabinin struct mem_cgroup_per_node *mn; 285e3c1ac58SAndrey Ryabinin 286e3c1ac58SAndrey Ryabinin mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 287e3c1ac58SAndrey Ryabinin return READ_ONCE(mn->congested); 288e3c1ac58SAndrey Ryabinin 289e3c1ac58SAndrey Ryabinin } 29091a45470SKAMEZAWA Hiroyuki #else 29189b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc) 29289b5fae5SJohannes Weiner { 29389b5fae5SJohannes Weiner return true; 29489b5fae5SJohannes Weiner } 29597c9341fSTejun Heo 29697c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc) 29797c9341fSTejun Heo { 29897c9341fSTejun Heo return true; 29997c9341fSTejun Heo } 300e3c1ac58SAndrey Ryabinin 301e3c1ac58SAndrey Ryabinin static inline void set_memcg_congestion(struct pglist_data *pgdat, 302e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg, bool congested) 303e3c1ac58SAndrey Ryabinin { 304e3c1ac58SAndrey Ryabinin } 305e3c1ac58SAndrey Ryabinin 306e3c1ac58SAndrey Ryabinin static inline bool memcg_congested(struct pglist_data *pgdat, 307e3c1ac58SAndrey Ryabinin struct mem_cgroup *memcg) 308e3c1ac58SAndrey Ryabinin { 309e3c1ac58SAndrey Ryabinin return false; 310e3c1ac58SAndrey Ryabinin 311e3c1ac58SAndrey Ryabinin } 31291a45470SKAMEZAWA Hiroyuki #endif 31391a45470SKAMEZAWA Hiroyuki 3145a1c84b4SMel Gorman /* 3155a1c84b4SMel Gorman * This misses isolated pages which are not accounted for to save counters. 3165a1c84b4SMel Gorman * As the data only determines if reclaim or compaction continues, it is 3175a1c84b4SMel Gorman * not expected that isolated pages will be a dominating factor. 3185a1c84b4SMel Gorman */ 3195a1c84b4SMel Gorman unsigned long zone_reclaimable_pages(struct zone *zone) 3205a1c84b4SMel Gorman { 3215a1c84b4SMel Gorman unsigned long nr; 3225a1c84b4SMel Gorman 3235a1c84b4SMel Gorman nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 3245a1c84b4SMel Gorman zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 3255a1c84b4SMel Gorman if (get_nr_swap_pages() > 0) 3265a1c84b4SMel Gorman nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 3275a1c84b4SMel Gorman zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 3285a1c84b4SMel Gorman 3295a1c84b4SMel Gorman return nr; 3305a1c84b4SMel Gorman } 3315a1c84b4SMel Gorman 332fd538803SMichal Hocko /** 333fd538803SMichal Hocko * lruvec_lru_size - Returns the number of pages on the given LRU list. 334fd538803SMichal Hocko * @lruvec: lru vector 335fd538803SMichal Hocko * @lru: lru to use 336fd538803SMichal Hocko * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list) 337fd538803SMichal Hocko */ 338fd538803SMichal Hocko unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) 339c9f299d9SKOSAKI Motohiro { 340fd538803SMichal Hocko unsigned long lru_size; 341fd538803SMichal Hocko int zid; 342a3d8e054SKOSAKI Motohiro 343fd538803SMichal Hocko if (!mem_cgroup_disabled()) 344fd538803SMichal Hocko lru_size = mem_cgroup_get_lru_size(lruvec, lru); 345fd538803SMichal Hocko else 346fd538803SMichal Hocko lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); 347fd538803SMichal Hocko 348fd538803SMichal Hocko for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) { 349fd538803SMichal Hocko struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 350fd538803SMichal Hocko unsigned long size; 351fd538803SMichal Hocko 352fd538803SMichal Hocko if (!managed_zone(zone)) 353fd538803SMichal Hocko continue; 354fd538803SMichal Hocko 355fd538803SMichal Hocko if (!mem_cgroup_disabled()) 356fd538803SMichal Hocko size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 357fd538803SMichal Hocko else 358fd538803SMichal Hocko size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid], 359fd538803SMichal Hocko NR_ZONE_LRU_BASE + lru); 360fd538803SMichal Hocko lru_size -= min(size, lru_size); 361c9f299d9SKOSAKI Motohiro } 362c9f299d9SKOSAKI Motohiro 363fd538803SMichal Hocko return lru_size; 364b4536f0cSMichal Hocko 365b4536f0cSMichal Hocko } 366b4536f0cSMichal Hocko 3671da177e4SLinus Torvalds /* 3681d3d4437SGlauber Costa * Add a shrinker callback to be called from the vm. 3691da177e4SLinus Torvalds */ 3708e04944fSTetsuo Handa int prealloc_shrinker(struct shrinker *shrinker) 3711da177e4SLinus Torvalds { 3721d3d4437SGlauber Costa size_t size = sizeof(*shrinker->nr_deferred); 3731d3d4437SGlauber Costa 3741d3d4437SGlauber Costa if (shrinker->flags & SHRINKER_NUMA_AWARE) 3751d3d4437SGlauber Costa size *= nr_node_ids; 3761d3d4437SGlauber Costa 3771d3d4437SGlauber Costa shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); 3781d3d4437SGlauber Costa if (!shrinker->nr_deferred) 3791d3d4437SGlauber Costa return -ENOMEM; 380b4c2b231SKirill Tkhai 381b4c2b231SKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) { 382b4c2b231SKirill Tkhai if (prealloc_memcg_shrinker(shrinker)) 383b4c2b231SKirill Tkhai goto free_deferred; 384b4c2b231SKirill Tkhai } 385b4c2b231SKirill Tkhai 3868e04944fSTetsuo Handa return 0; 387b4c2b231SKirill Tkhai 388b4c2b231SKirill Tkhai free_deferred: 389b4c2b231SKirill Tkhai kfree(shrinker->nr_deferred); 390b4c2b231SKirill Tkhai shrinker->nr_deferred = NULL; 391b4c2b231SKirill Tkhai return -ENOMEM; 3928e04944fSTetsuo Handa } 3931d3d4437SGlauber Costa 3948e04944fSTetsuo Handa void free_prealloced_shrinker(struct shrinker *shrinker) 3958e04944fSTetsuo Handa { 396b4c2b231SKirill Tkhai if (!shrinker->nr_deferred) 397b4c2b231SKirill Tkhai return; 398b4c2b231SKirill Tkhai 399b4c2b231SKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) 400b4c2b231SKirill Tkhai unregister_memcg_shrinker(shrinker); 401b4c2b231SKirill Tkhai 4028e04944fSTetsuo Handa kfree(shrinker->nr_deferred); 4038e04944fSTetsuo Handa shrinker->nr_deferred = NULL; 4048e04944fSTetsuo Handa } 4058e04944fSTetsuo Handa 4068e04944fSTetsuo Handa void register_shrinker_prepared(struct shrinker *shrinker) 4078e04944fSTetsuo Handa { 4081da177e4SLinus Torvalds down_write(&shrinker_rwsem); 4091da177e4SLinus Torvalds list_add_tail(&shrinker->list, &shrinker_list); 4107e010df5SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 4118df4a44cSKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) 4127e010df5SKirill Tkhai idr_replace(&shrinker_idr, shrinker, shrinker->id); 4137e010df5SKirill Tkhai #endif 4141da177e4SLinus Torvalds up_write(&shrinker_rwsem); 4158e04944fSTetsuo Handa } 4168e04944fSTetsuo Handa 4178e04944fSTetsuo Handa int register_shrinker(struct shrinker *shrinker) 4188e04944fSTetsuo Handa { 4198e04944fSTetsuo Handa int err = prealloc_shrinker(shrinker); 4208e04944fSTetsuo Handa 4218e04944fSTetsuo Handa if (err) 4228e04944fSTetsuo Handa return err; 4238e04944fSTetsuo Handa register_shrinker_prepared(shrinker); 4241d3d4437SGlauber Costa return 0; 4251da177e4SLinus Torvalds } 4268e1f936bSRusty Russell EXPORT_SYMBOL(register_shrinker); 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds /* 4291da177e4SLinus Torvalds * Remove one 4301da177e4SLinus Torvalds */ 4318e1f936bSRusty Russell void unregister_shrinker(struct shrinker *shrinker) 4321da177e4SLinus Torvalds { 433bb422a73STetsuo Handa if (!shrinker->nr_deferred) 434bb422a73STetsuo Handa return; 435b4c2b231SKirill Tkhai if (shrinker->flags & SHRINKER_MEMCG_AWARE) 436b4c2b231SKirill Tkhai unregister_memcg_shrinker(shrinker); 4371da177e4SLinus Torvalds down_write(&shrinker_rwsem); 4381da177e4SLinus Torvalds list_del(&shrinker->list); 4391da177e4SLinus Torvalds up_write(&shrinker_rwsem); 440ae393321SAndrew Vagin kfree(shrinker->nr_deferred); 441bb422a73STetsuo Handa shrinker->nr_deferred = NULL; 4421da177e4SLinus Torvalds } 4438e1f936bSRusty Russell EXPORT_SYMBOL(unregister_shrinker); 4441da177e4SLinus Torvalds 4451da177e4SLinus Torvalds #define SHRINK_BATCH 128 4461d3d4437SGlauber Costa 447cb731d6cSVladimir Davydov static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, 4489092c71bSJosef Bacik struct shrinker *shrinker, int priority) 4491da177e4SLinus Torvalds { 45024f7c6b9SDave Chinner unsigned long freed = 0; 4511da177e4SLinus Torvalds unsigned long long delta; 452635697c6SKonstantin Khlebnikov long total_scan; 453d5bc5fd3SVladimir Davydov long freeable; 454acf92b48SDave Chinner long nr; 455acf92b48SDave Chinner long new_nr; 4561d3d4437SGlauber Costa int nid = shrinkctl->nid; 457e9299f50SDave Chinner long batch_size = shrinker->batch ? shrinker->batch 458e9299f50SDave Chinner : SHRINK_BATCH; 4595f33a080SShaohua Li long scanned = 0, next_deferred; 4601da177e4SLinus Torvalds 461ac7fb3adSKirill Tkhai if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) 462ac7fb3adSKirill Tkhai nid = 0; 463ac7fb3adSKirill Tkhai 464d5bc5fd3SVladimir Davydov freeable = shrinker->count_objects(shrinker, shrinkctl); 4659b996468SKirill Tkhai if (freeable == 0 || freeable == SHRINK_EMPTY) 4669b996468SKirill Tkhai return freeable; 467635697c6SKonstantin Khlebnikov 468acf92b48SDave Chinner /* 469acf92b48SDave Chinner * copy the current shrinker scan count into a local variable 470acf92b48SDave Chinner * and zero it so that other concurrent shrinker invocations 471acf92b48SDave Chinner * don't also do this scanning work. 472acf92b48SDave Chinner */ 4731d3d4437SGlauber Costa nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); 474acf92b48SDave Chinner 475acf92b48SDave Chinner total_scan = nr; 4769092c71bSJosef Bacik delta = freeable >> priority; 4779092c71bSJosef Bacik delta *= 4; 4789092c71bSJosef Bacik do_div(delta, shrinker->seeks); 479172b06c3SRoman Gushchin 480172b06c3SRoman Gushchin /* 481172b06c3SRoman Gushchin * Make sure we apply some minimal pressure on default priority 482172b06c3SRoman Gushchin * even on small cgroups. Stale objects are not only consuming memory 483172b06c3SRoman Gushchin * by themselves, but can also hold a reference to a dying cgroup, 484172b06c3SRoman Gushchin * preventing it from being reclaimed. A dying cgroup with all 485172b06c3SRoman Gushchin * corresponding structures like per-cpu stats and kmem caches 486172b06c3SRoman Gushchin * can be really big, so it may lead to a significant waste of memory. 487172b06c3SRoman Gushchin */ 488172b06c3SRoman Gushchin delta = max_t(unsigned long long, delta, min(freeable, batch_size)); 489172b06c3SRoman Gushchin 490acf92b48SDave Chinner total_scan += delta; 491acf92b48SDave Chinner if (total_scan < 0) { 4928612c663SPintu Kumar pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 493a0b02131SDave Chinner shrinker->scan_objects, total_scan); 494d5bc5fd3SVladimir Davydov total_scan = freeable; 4955f33a080SShaohua Li next_deferred = nr; 4965f33a080SShaohua Li } else 4975f33a080SShaohua Li next_deferred = total_scan; 498ea164d73SAndrea Arcangeli 499ea164d73SAndrea Arcangeli /* 5003567b59aSDave Chinner * We need to avoid excessive windup on filesystem shrinkers 5013567b59aSDave Chinner * due to large numbers of GFP_NOFS allocations causing the 5023567b59aSDave Chinner * shrinkers to return -1 all the time. This results in a large 5033567b59aSDave Chinner * nr being built up so when a shrink that can do some work 5043567b59aSDave Chinner * comes along it empties the entire cache due to nr >>> 505d5bc5fd3SVladimir Davydov * freeable. This is bad for sustaining a working set in 5063567b59aSDave Chinner * memory. 5073567b59aSDave Chinner * 5083567b59aSDave Chinner * Hence only allow the shrinker to scan the entire cache when 5093567b59aSDave Chinner * a large delta change is calculated directly. 5103567b59aSDave Chinner */ 511d5bc5fd3SVladimir Davydov if (delta < freeable / 4) 512d5bc5fd3SVladimir Davydov total_scan = min(total_scan, freeable / 2); 5133567b59aSDave Chinner 5143567b59aSDave Chinner /* 515ea164d73SAndrea Arcangeli * Avoid risking looping forever due to too large nr value: 516ea164d73SAndrea Arcangeli * never try to free more than twice the estimate number of 517ea164d73SAndrea Arcangeli * freeable entries. 518ea164d73SAndrea Arcangeli */ 519d5bc5fd3SVladimir Davydov if (total_scan > freeable * 2) 520d5bc5fd3SVladimir Davydov total_scan = freeable * 2; 5211da177e4SLinus Torvalds 52224f7c6b9SDave Chinner trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, 5239092c71bSJosef Bacik freeable, delta, total_scan, priority); 52409576073SDave Chinner 5250b1fb40aSVladimir Davydov /* 5260b1fb40aSVladimir Davydov * Normally, we should not scan less than batch_size objects in one 5270b1fb40aSVladimir Davydov * pass to avoid too frequent shrinker calls, but if the slab has less 5280b1fb40aSVladimir Davydov * than batch_size objects in total and we are really tight on memory, 5290b1fb40aSVladimir Davydov * we will try to reclaim all available objects, otherwise we can end 5300b1fb40aSVladimir Davydov * up failing allocations although there are plenty of reclaimable 5310b1fb40aSVladimir Davydov * objects spread over several slabs with usage less than the 5320b1fb40aSVladimir Davydov * batch_size. 5330b1fb40aSVladimir Davydov * 5340b1fb40aSVladimir Davydov * We detect the "tight on memory" situations by looking at the total 5350b1fb40aSVladimir Davydov * number of objects we want to scan (total_scan). If it is greater 536d5bc5fd3SVladimir Davydov * than the total number of objects on slab (freeable), we must be 5370b1fb40aSVladimir Davydov * scanning at high prio and therefore should try to reclaim as much as 5380b1fb40aSVladimir Davydov * possible. 5390b1fb40aSVladimir Davydov */ 5400b1fb40aSVladimir Davydov while (total_scan >= batch_size || 541d5bc5fd3SVladimir Davydov total_scan >= freeable) { 54224f7c6b9SDave Chinner unsigned long ret; 5430b1fb40aSVladimir Davydov unsigned long nr_to_scan = min(batch_size, total_scan); 5441da177e4SLinus Torvalds 5450b1fb40aSVladimir Davydov shrinkctl->nr_to_scan = nr_to_scan; 546d460acb5SChris Wilson shrinkctl->nr_scanned = nr_to_scan; 54724f7c6b9SDave Chinner ret = shrinker->scan_objects(shrinker, shrinkctl); 54824f7c6b9SDave Chinner if (ret == SHRINK_STOP) 5491da177e4SLinus Torvalds break; 55024f7c6b9SDave Chinner freed += ret; 55124f7c6b9SDave Chinner 552d460acb5SChris Wilson count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); 553d460acb5SChris Wilson total_scan -= shrinkctl->nr_scanned; 554d460acb5SChris Wilson scanned += shrinkctl->nr_scanned; 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds cond_resched(); 5571da177e4SLinus Torvalds } 5581da177e4SLinus Torvalds 5595f33a080SShaohua Li if (next_deferred >= scanned) 5605f33a080SShaohua Li next_deferred -= scanned; 5615f33a080SShaohua Li else 5625f33a080SShaohua Li next_deferred = 0; 563acf92b48SDave Chinner /* 564acf92b48SDave Chinner * move the unused scan count back into the shrinker in a 565acf92b48SDave Chinner * manner that handles concurrent updates. If we exhausted the 566acf92b48SDave Chinner * scan, there is no need to do an update. 567acf92b48SDave Chinner */ 5685f33a080SShaohua Li if (next_deferred > 0) 5695f33a080SShaohua Li new_nr = atomic_long_add_return(next_deferred, 5701d3d4437SGlauber Costa &shrinker->nr_deferred[nid]); 57183aeeadaSKonstantin Khlebnikov else 5721d3d4437SGlauber Costa new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); 573acf92b48SDave Chinner 574df9024a8SDave Hansen trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); 5751d3d4437SGlauber Costa return freed; 5761d3d4437SGlauber Costa } 5771d3d4437SGlauber Costa 578b0dedc49SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM 579b0dedc49SKirill Tkhai static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, 580b0dedc49SKirill Tkhai struct mem_cgroup *memcg, int priority) 581b0dedc49SKirill Tkhai { 582b0dedc49SKirill Tkhai struct memcg_shrinker_map *map; 583b8e57efaSKirill Tkhai unsigned long ret, freed = 0; 584b8e57efaSKirill Tkhai int i; 585b0dedc49SKirill Tkhai 586b0dedc49SKirill Tkhai if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)) 587b0dedc49SKirill Tkhai return 0; 588b0dedc49SKirill Tkhai 589b0dedc49SKirill Tkhai if (!down_read_trylock(&shrinker_rwsem)) 590b0dedc49SKirill Tkhai return 0; 591b0dedc49SKirill Tkhai 592b0dedc49SKirill Tkhai map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, 593b0dedc49SKirill Tkhai true); 594b0dedc49SKirill Tkhai if (unlikely(!map)) 595b0dedc49SKirill Tkhai goto unlock; 596b0dedc49SKirill Tkhai 597b0dedc49SKirill Tkhai for_each_set_bit(i, map->map, shrinker_nr_max) { 598b0dedc49SKirill Tkhai struct shrink_control sc = { 599b0dedc49SKirill Tkhai .gfp_mask = gfp_mask, 600b0dedc49SKirill Tkhai .nid = nid, 601b0dedc49SKirill Tkhai .memcg = memcg, 602b0dedc49SKirill Tkhai }; 603b0dedc49SKirill Tkhai struct shrinker *shrinker; 604b0dedc49SKirill Tkhai 605b0dedc49SKirill Tkhai shrinker = idr_find(&shrinker_idr, i); 6067e010df5SKirill Tkhai if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) { 6077e010df5SKirill Tkhai if (!shrinker) 608b0dedc49SKirill Tkhai clear_bit(i, map->map); 609b0dedc49SKirill Tkhai continue; 610b0dedc49SKirill Tkhai } 611b0dedc49SKirill Tkhai 612b0dedc49SKirill Tkhai ret = do_shrink_slab(&sc, shrinker, priority); 613f90280d6SKirill Tkhai if (ret == SHRINK_EMPTY) { 614f90280d6SKirill Tkhai clear_bit(i, map->map); 615f90280d6SKirill Tkhai /* 616f90280d6SKirill Tkhai * After the shrinker reported that it had no objects to 617f90280d6SKirill Tkhai * free, but before we cleared the corresponding bit in 618f90280d6SKirill Tkhai * the memcg shrinker map, a new object might have been 619f90280d6SKirill Tkhai * added. To make sure, we have the bit set in this 620f90280d6SKirill Tkhai * case, we invoke the shrinker one more time and reset 621f90280d6SKirill Tkhai * the bit if it reports that it is not empty anymore. 622f90280d6SKirill Tkhai * The memory barrier here pairs with the barrier in 623f90280d6SKirill Tkhai * memcg_set_shrinker_bit(): 624f90280d6SKirill Tkhai * 625f90280d6SKirill Tkhai * list_lru_add() shrink_slab_memcg() 626f90280d6SKirill Tkhai * list_add_tail() clear_bit() 627f90280d6SKirill Tkhai * <MB> <MB> 628f90280d6SKirill Tkhai * set_bit() do_shrink_slab() 629f90280d6SKirill Tkhai */ 630f90280d6SKirill Tkhai smp_mb__after_atomic(); 631f90280d6SKirill Tkhai ret = do_shrink_slab(&sc, shrinker, priority); 6329b996468SKirill Tkhai if (ret == SHRINK_EMPTY) 6339b996468SKirill Tkhai ret = 0; 634f90280d6SKirill Tkhai else 635f90280d6SKirill Tkhai memcg_set_shrinker_bit(memcg, nid, i); 636f90280d6SKirill Tkhai } 637b0dedc49SKirill Tkhai freed += ret; 638b0dedc49SKirill Tkhai 639b0dedc49SKirill Tkhai if (rwsem_is_contended(&shrinker_rwsem)) { 640b0dedc49SKirill Tkhai freed = freed ? : 1; 641b0dedc49SKirill Tkhai break; 642b0dedc49SKirill Tkhai } 643b0dedc49SKirill Tkhai } 644b0dedc49SKirill Tkhai unlock: 645b0dedc49SKirill Tkhai up_read(&shrinker_rwsem); 646b0dedc49SKirill Tkhai return freed; 647b0dedc49SKirill Tkhai } 648b0dedc49SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */ 649b0dedc49SKirill Tkhai static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, 650b0dedc49SKirill Tkhai struct mem_cgroup *memcg, int priority) 651b0dedc49SKirill Tkhai { 652b0dedc49SKirill Tkhai return 0; 653b0dedc49SKirill Tkhai } 654b0dedc49SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */ 655b0dedc49SKirill Tkhai 6566b4f7799SJohannes Weiner /** 657cb731d6cSVladimir Davydov * shrink_slab - shrink slab caches 6586b4f7799SJohannes Weiner * @gfp_mask: allocation context 6596b4f7799SJohannes Weiner * @nid: node whose slab caches to target 660cb731d6cSVladimir Davydov * @memcg: memory cgroup whose slab caches to target 6619092c71bSJosef Bacik * @priority: the reclaim priority 6621d3d4437SGlauber Costa * 6636b4f7799SJohannes Weiner * Call the shrink functions to age shrinkable caches. 6641d3d4437SGlauber Costa * 6656b4f7799SJohannes Weiner * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, 6666b4f7799SJohannes Weiner * unaware shrinkers will receive a node id of 0 instead. 6671d3d4437SGlauber Costa * 668aeed1d32SVladimir Davydov * @memcg specifies the memory cgroup to target. Unaware shrinkers 669aeed1d32SVladimir Davydov * are called only if it is the root cgroup. 670cb731d6cSVladimir Davydov * 6719092c71bSJosef Bacik * @priority is sc->priority, we take the number of objects and >> by priority 6729092c71bSJosef Bacik * in order to get the scan target. 6731d3d4437SGlauber Costa * 6746b4f7799SJohannes Weiner * Returns the number of reclaimed slab objects. 6751d3d4437SGlauber Costa */ 676cb731d6cSVladimir Davydov static unsigned long shrink_slab(gfp_t gfp_mask, int nid, 677cb731d6cSVladimir Davydov struct mem_cgroup *memcg, 6789092c71bSJosef Bacik int priority) 6791d3d4437SGlauber Costa { 680b8e57efaSKirill Tkhai unsigned long ret, freed = 0; 6811d3d4437SGlauber Costa struct shrinker *shrinker; 6821d3d4437SGlauber Costa 683aeed1d32SVladimir Davydov if (!mem_cgroup_is_root(memcg)) 684b0dedc49SKirill Tkhai return shrink_slab_memcg(gfp_mask, nid, memcg, priority); 685cb731d6cSVladimir Davydov 686e830c63aSTetsuo Handa if (!down_read_trylock(&shrinker_rwsem)) 6871d3d4437SGlauber Costa goto out; 6881d3d4437SGlauber Costa 6891d3d4437SGlauber Costa list_for_each_entry(shrinker, &shrinker_list, list) { 6906b4f7799SJohannes Weiner struct shrink_control sc = { 6916b4f7799SJohannes Weiner .gfp_mask = gfp_mask, 6926b4f7799SJohannes Weiner .nid = nid, 693cb731d6cSVladimir Davydov .memcg = memcg, 6946b4f7799SJohannes Weiner }; 6956b4f7799SJohannes Weiner 6969b996468SKirill Tkhai ret = do_shrink_slab(&sc, shrinker, priority); 6979b996468SKirill Tkhai if (ret == SHRINK_EMPTY) 6989b996468SKirill Tkhai ret = 0; 6999b996468SKirill Tkhai freed += ret; 700e496612cSMinchan Kim /* 701e496612cSMinchan Kim * Bail out if someone want to register a new shrinker to 702e496612cSMinchan Kim * prevent the regsitration from being stalled for long periods 703e496612cSMinchan Kim * by parallel ongoing shrinking. 704e496612cSMinchan Kim */ 705e496612cSMinchan Kim if (rwsem_is_contended(&shrinker_rwsem)) { 706e496612cSMinchan Kim freed = freed ? : 1; 707e496612cSMinchan Kim break; 708e496612cSMinchan Kim } 709ec97097bSVladimir Davydov } 7101d3d4437SGlauber Costa 7111da177e4SLinus Torvalds up_read(&shrinker_rwsem); 712f06590bdSMinchan Kim out: 713f06590bdSMinchan Kim cond_resched(); 71424f7c6b9SDave Chinner return freed; 7151da177e4SLinus Torvalds } 7161da177e4SLinus Torvalds 717cb731d6cSVladimir Davydov void drop_slab_node(int nid) 718cb731d6cSVladimir Davydov { 719cb731d6cSVladimir Davydov unsigned long freed; 720cb731d6cSVladimir Davydov 721cb731d6cSVladimir Davydov do { 722cb731d6cSVladimir Davydov struct mem_cgroup *memcg = NULL; 723cb731d6cSVladimir Davydov 724cb731d6cSVladimir Davydov freed = 0; 725aeed1d32SVladimir Davydov memcg = mem_cgroup_iter(NULL, NULL, NULL); 726cb731d6cSVladimir Davydov do { 7279092c71bSJosef Bacik freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 728cb731d6cSVladimir Davydov } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 729cb731d6cSVladimir Davydov } while (freed > 10); 730cb731d6cSVladimir Davydov } 731cb731d6cSVladimir Davydov 732cb731d6cSVladimir Davydov void drop_slab(void) 733cb731d6cSVladimir Davydov { 734cb731d6cSVladimir Davydov int nid; 735cb731d6cSVladimir Davydov 736cb731d6cSVladimir Davydov for_each_online_node(nid) 737cb731d6cSVladimir Davydov drop_slab_node(nid); 738cb731d6cSVladimir Davydov } 739cb731d6cSVladimir Davydov 7401da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page) 7411da177e4SLinus Torvalds { 742ceddc3a5SJohannes Weiner /* 743ceddc3a5SJohannes Weiner * A freeable page cache page is referenced only by the caller 744ceddc3a5SJohannes Weiner * that isolated the page, the page cache radix tree and 745ceddc3a5SJohannes Weiner * optional buffer heads at page->private. 746ceddc3a5SJohannes Weiner */ 747bd4c82c2SHuang Ying int radix_pins = PageTransHuge(page) && PageSwapCache(page) ? 748bd4c82c2SHuang Ying HPAGE_PMD_NR : 1; 749bd4c82c2SHuang Ying return page_count(page) - page_has_private(page) == 1 + radix_pins; 7501da177e4SLinus Torvalds } 7511da177e4SLinus Torvalds 752703c2708STejun Heo static int may_write_to_inode(struct inode *inode, struct scan_control *sc) 7531da177e4SLinus Torvalds { 754930d9152SChristoph Lameter if (current->flags & PF_SWAPWRITE) 7551da177e4SLinus Torvalds return 1; 756703c2708STejun Heo if (!inode_write_congested(inode)) 7571da177e4SLinus Torvalds return 1; 758703c2708STejun Heo if (inode_to_bdi(inode) == current->backing_dev_info) 7591da177e4SLinus Torvalds return 1; 7601da177e4SLinus Torvalds return 0; 7611da177e4SLinus Torvalds } 7621da177e4SLinus Torvalds 7631da177e4SLinus Torvalds /* 7641da177e4SLinus Torvalds * We detected a synchronous write error writing a page out. Probably 7651da177e4SLinus Torvalds * -ENOSPC. We need to propagate that into the address_space for a subsequent 7661da177e4SLinus Torvalds * fsync(), msync() or close(). 7671da177e4SLinus Torvalds * 7681da177e4SLinus Torvalds * The tricky part is that after writepage we cannot touch the mapping: nothing 7691da177e4SLinus Torvalds * prevents it from being freed up. But we have a ref on the page and once 7701da177e4SLinus Torvalds * that page is locked, the mapping is pinned. 7711da177e4SLinus Torvalds * 7721da177e4SLinus Torvalds * We're allowed to run sleeping lock_page() here because we know the caller has 7731da177e4SLinus Torvalds * __GFP_FS. 7741da177e4SLinus Torvalds */ 7751da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping, 7761da177e4SLinus Torvalds struct page *page, int error) 7771da177e4SLinus Torvalds { 7787eaceaccSJens Axboe lock_page(page); 7793e9f45bdSGuillaume Chazarain if (page_mapping(page) == mapping) 7803e9f45bdSGuillaume Chazarain mapping_set_error(mapping, error); 7811da177e4SLinus Torvalds unlock_page(page); 7821da177e4SLinus Torvalds } 7831da177e4SLinus Torvalds 78404e62a29SChristoph Lameter /* possible outcome of pageout() */ 78504e62a29SChristoph Lameter typedef enum { 78604e62a29SChristoph Lameter /* failed to write page out, page is locked */ 78704e62a29SChristoph Lameter PAGE_KEEP, 78804e62a29SChristoph Lameter /* move page to the active list, page is locked */ 78904e62a29SChristoph Lameter PAGE_ACTIVATE, 79004e62a29SChristoph Lameter /* page has been sent to the disk successfully, page is unlocked */ 79104e62a29SChristoph Lameter PAGE_SUCCESS, 79204e62a29SChristoph Lameter /* page is clean and locked */ 79304e62a29SChristoph Lameter PAGE_CLEAN, 79404e62a29SChristoph Lameter } pageout_t; 79504e62a29SChristoph Lameter 7961da177e4SLinus Torvalds /* 7971742f19fSAndrew Morton * pageout is called by shrink_page_list() for each dirty page. 7981742f19fSAndrew Morton * Calls ->writepage(). 7991da177e4SLinus Torvalds */ 800c661b078SAndy Whitcroft static pageout_t pageout(struct page *page, struct address_space *mapping, 8017d3579e8SKOSAKI Motohiro struct scan_control *sc) 8021da177e4SLinus Torvalds { 8031da177e4SLinus Torvalds /* 8041da177e4SLinus Torvalds * If the page is dirty, only perform writeback if that write 8051da177e4SLinus Torvalds * will be non-blocking. To prevent this allocation from being 8061da177e4SLinus Torvalds * stalled by pagecache activity. But note that there may be 8071da177e4SLinus Torvalds * stalls if we need to run get_block(). We could test 8081da177e4SLinus Torvalds * PagePrivate for that. 8091da177e4SLinus Torvalds * 8108174202bSAl Viro * If this process is currently in __generic_file_write_iter() against 8111da177e4SLinus Torvalds * this page's queue, we can perform writeback even if that 8121da177e4SLinus Torvalds * will block. 8131da177e4SLinus Torvalds * 8141da177e4SLinus Torvalds * If the page is swapcache, write it back even if that would 8151da177e4SLinus Torvalds * block, for some throttling. This happens by accident, because 8161da177e4SLinus Torvalds * swap_backing_dev_info is bust: it doesn't reflect the 8171da177e4SLinus Torvalds * congestion state of the swapdevs. Easy to fix, if needed. 8181da177e4SLinus Torvalds */ 8191da177e4SLinus Torvalds if (!is_page_cache_freeable(page)) 8201da177e4SLinus Torvalds return PAGE_KEEP; 8211da177e4SLinus Torvalds if (!mapping) { 8221da177e4SLinus Torvalds /* 8231da177e4SLinus Torvalds * Some data journaling orphaned pages can have 8241da177e4SLinus Torvalds * page->mapping == NULL while being dirty with clean buffers. 8251da177e4SLinus Torvalds */ 826266cf658SDavid Howells if (page_has_private(page)) { 8271da177e4SLinus Torvalds if (try_to_free_buffers(page)) { 8281da177e4SLinus Torvalds ClearPageDirty(page); 829b1de0d13SMitchel Humpherys pr_info("%s: orphaned page\n", __func__); 8301da177e4SLinus Torvalds return PAGE_CLEAN; 8311da177e4SLinus Torvalds } 8321da177e4SLinus Torvalds } 8331da177e4SLinus Torvalds return PAGE_KEEP; 8341da177e4SLinus Torvalds } 8351da177e4SLinus Torvalds if (mapping->a_ops->writepage == NULL) 8361da177e4SLinus Torvalds return PAGE_ACTIVATE; 837703c2708STejun Heo if (!may_write_to_inode(mapping->host, sc)) 8381da177e4SLinus Torvalds return PAGE_KEEP; 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds if (clear_page_dirty_for_io(page)) { 8411da177e4SLinus Torvalds int res; 8421da177e4SLinus Torvalds struct writeback_control wbc = { 8431da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 8441da177e4SLinus Torvalds .nr_to_write = SWAP_CLUSTER_MAX, 845111ebb6eSOGAWA Hirofumi .range_start = 0, 846111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 8471da177e4SLinus Torvalds .for_reclaim = 1, 8481da177e4SLinus Torvalds }; 8491da177e4SLinus Torvalds 8501da177e4SLinus Torvalds SetPageReclaim(page); 8511da177e4SLinus Torvalds res = mapping->a_ops->writepage(page, &wbc); 8521da177e4SLinus Torvalds if (res < 0) 8531da177e4SLinus Torvalds handle_write_error(mapping, page, res); 854994fc28cSZach Brown if (res == AOP_WRITEPAGE_ACTIVATE) { 8551da177e4SLinus Torvalds ClearPageReclaim(page); 8561da177e4SLinus Torvalds return PAGE_ACTIVATE; 8571da177e4SLinus Torvalds } 858c661b078SAndy Whitcroft 8591da177e4SLinus Torvalds if (!PageWriteback(page)) { 8601da177e4SLinus Torvalds /* synchronous write or broken a_ops? */ 8611da177e4SLinus Torvalds ClearPageReclaim(page); 8621da177e4SLinus Torvalds } 8633aa23851Syalin wang trace_mm_vmscan_writepage(page); 864c4a25635SMel Gorman inc_node_page_state(page, NR_VMSCAN_WRITE); 8651da177e4SLinus Torvalds return PAGE_SUCCESS; 8661da177e4SLinus Torvalds } 8671da177e4SLinus Torvalds 8681da177e4SLinus Torvalds return PAGE_CLEAN; 8691da177e4SLinus Torvalds } 8701da177e4SLinus Torvalds 871a649fd92SAndrew Morton /* 872e286781dSNick Piggin * Same as remove_mapping, but if the page is removed from the mapping, it 873e286781dSNick Piggin * gets returned with a refcount of 0. 874a649fd92SAndrew Morton */ 875a528910eSJohannes Weiner static int __remove_mapping(struct address_space *mapping, struct page *page, 876a528910eSJohannes Weiner bool reclaimed) 87749d2e9ccSChristoph Lameter { 878c4843a75SGreg Thelen unsigned long flags; 879bd4c82c2SHuang Ying int refcount; 880c4843a75SGreg Thelen 88128e4d965SNick Piggin BUG_ON(!PageLocked(page)); 88228e4d965SNick Piggin BUG_ON(mapping != page_mapping(page)); 88349d2e9ccSChristoph Lameter 884b93b0163SMatthew Wilcox xa_lock_irqsave(&mapping->i_pages, flags); 88549d2e9ccSChristoph Lameter /* 8860fd0e6b0SNick Piggin * The non racy check for a busy page. 8870fd0e6b0SNick Piggin * 8880fd0e6b0SNick Piggin * Must be careful with the order of the tests. When someone has 8890fd0e6b0SNick Piggin * a ref to the page, it may be possible that they dirty it then 8900fd0e6b0SNick Piggin * drop the reference. So if PageDirty is tested before page_count 8910fd0e6b0SNick Piggin * here, then the following race may occur: 8920fd0e6b0SNick Piggin * 8930fd0e6b0SNick Piggin * get_user_pages(&page); 8940fd0e6b0SNick Piggin * [user mapping goes away] 8950fd0e6b0SNick Piggin * write_to(page); 8960fd0e6b0SNick Piggin * !PageDirty(page) [good] 8970fd0e6b0SNick Piggin * SetPageDirty(page); 8980fd0e6b0SNick Piggin * put_page(page); 8990fd0e6b0SNick Piggin * !page_count(page) [good, discard it] 9000fd0e6b0SNick Piggin * 9010fd0e6b0SNick Piggin * [oops, our write_to data is lost] 9020fd0e6b0SNick Piggin * 9030fd0e6b0SNick Piggin * Reversing the order of the tests ensures such a situation cannot 9040fd0e6b0SNick Piggin * escape unnoticed. The smp_rmb is needed to ensure the page->flags 9050139aa7bSJoonsoo Kim * load is not satisfied before that of page->_refcount. 9060fd0e6b0SNick Piggin * 9070fd0e6b0SNick Piggin * Note that if SetPageDirty is always performed via set_page_dirty, 908b93b0163SMatthew Wilcox * and thus under the i_pages lock, then this ordering is not required. 90949d2e9ccSChristoph Lameter */ 910bd4c82c2SHuang Ying if (unlikely(PageTransHuge(page)) && PageSwapCache(page)) 911bd4c82c2SHuang Ying refcount = 1 + HPAGE_PMD_NR; 912bd4c82c2SHuang Ying else 913bd4c82c2SHuang Ying refcount = 2; 914bd4c82c2SHuang Ying if (!page_ref_freeze(page, refcount)) 91549d2e9ccSChristoph Lameter goto cannot_free; 9161c4c3b99SJiang Biao /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */ 917e286781dSNick Piggin if (unlikely(PageDirty(page))) { 918bd4c82c2SHuang Ying page_ref_unfreeze(page, refcount); 91949d2e9ccSChristoph Lameter goto cannot_free; 920e286781dSNick Piggin } 92149d2e9ccSChristoph Lameter 92249d2e9ccSChristoph Lameter if (PageSwapCache(page)) { 92349d2e9ccSChristoph Lameter swp_entry_t swap = { .val = page_private(page) }; 9240a31bc97SJohannes Weiner mem_cgroup_swapout(page, swap); 92549d2e9ccSChristoph Lameter __delete_from_swap_cache(page); 926b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags); 92775f6d6d2SMinchan Kim put_swap_page(page, swap); 928e286781dSNick Piggin } else { 9296072d13cSLinus Torvalds void (*freepage)(struct page *); 930a528910eSJohannes Weiner void *shadow = NULL; 9316072d13cSLinus Torvalds 9326072d13cSLinus Torvalds freepage = mapping->a_ops->freepage; 933a528910eSJohannes Weiner /* 934a528910eSJohannes Weiner * Remember a shadow entry for reclaimed file cache in 935a528910eSJohannes Weiner * order to detect refaults, thus thrashing, later on. 936a528910eSJohannes Weiner * 937a528910eSJohannes Weiner * But don't store shadows in an address space that is 938a528910eSJohannes Weiner * already exiting. This is not just an optizimation, 939a528910eSJohannes Weiner * inode reclaim needs to empty out the radix tree or 940a528910eSJohannes Weiner * the nodes are lost. Don't plant shadows behind its 941a528910eSJohannes Weiner * back. 942f9fe48beSRoss Zwisler * 943f9fe48beSRoss Zwisler * We also don't store shadows for DAX mappings because the 944f9fe48beSRoss Zwisler * only page cache pages found in these are zero pages 945f9fe48beSRoss Zwisler * covering holes, and because we don't want to mix DAX 946f9fe48beSRoss Zwisler * exceptional entries and shadow exceptional entries in the 947b93b0163SMatthew Wilcox * same address_space. 948a528910eSJohannes Weiner */ 949a528910eSJohannes Weiner if (reclaimed && page_is_file_cache(page) && 950f9fe48beSRoss Zwisler !mapping_exiting(mapping) && !dax_mapping(mapping)) 951a528910eSJohannes Weiner shadow = workingset_eviction(mapping, page); 95262cccb8cSJohannes Weiner __delete_from_page_cache(page, shadow); 953b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags); 9546072d13cSLinus Torvalds 9556072d13cSLinus Torvalds if (freepage != NULL) 9566072d13cSLinus Torvalds freepage(page); 957e286781dSNick Piggin } 958e286781dSNick Piggin 95949d2e9ccSChristoph Lameter return 1; 96049d2e9ccSChristoph Lameter 96149d2e9ccSChristoph Lameter cannot_free: 962b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags); 96349d2e9ccSChristoph Lameter return 0; 96449d2e9ccSChristoph Lameter } 96549d2e9ccSChristoph Lameter 9661da177e4SLinus Torvalds /* 967e286781dSNick Piggin * Attempt to detach a locked page from its ->mapping. If it is dirty or if 968e286781dSNick Piggin * someone else has a ref on the page, abort and return 0. If it was 969e286781dSNick Piggin * successfully detached, return 1. Assumes the caller has a single ref on 970e286781dSNick Piggin * this page. 971e286781dSNick Piggin */ 972e286781dSNick Piggin int remove_mapping(struct address_space *mapping, struct page *page) 973e286781dSNick Piggin { 974a528910eSJohannes Weiner if (__remove_mapping(mapping, page, false)) { 975e286781dSNick Piggin /* 976e286781dSNick Piggin * Unfreezing the refcount with 1 rather than 2 effectively 977e286781dSNick Piggin * drops the pagecache ref for us without requiring another 978e286781dSNick Piggin * atomic operation. 979e286781dSNick Piggin */ 980fe896d18SJoonsoo Kim page_ref_unfreeze(page, 1); 981e286781dSNick Piggin return 1; 982e286781dSNick Piggin } 983e286781dSNick Piggin return 0; 984e286781dSNick Piggin } 985e286781dSNick Piggin 986894bc310SLee Schermerhorn /** 987894bc310SLee Schermerhorn * putback_lru_page - put previously isolated page onto appropriate LRU list 988894bc310SLee Schermerhorn * @page: page to be put back to appropriate lru list 989894bc310SLee Schermerhorn * 990894bc310SLee Schermerhorn * Add previously isolated @page to appropriate LRU list. 991894bc310SLee Schermerhorn * Page may still be unevictable for other reasons. 992894bc310SLee Schermerhorn * 993894bc310SLee Schermerhorn * lru_lock must not be held, interrupts must be enabled. 994894bc310SLee Schermerhorn */ 995894bc310SLee Schermerhorn void putback_lru_page(struct page *page) 996894bc310SLee Schermerhorn { 997c53954a0SMel Gorman lru_cache_add(page); 998894bc310SLee Schermerhorn put_page(page); /* drop ref from isolate */ 999894bc310SLee Schermerhorn } 1000894bc310SLee Schermerhorn 1001dfc8d636SJohannes Weiner enum page_references { 1002dfc8d636SJohannes Weiner PAGEREF_RECLAIM, 1003dfc8d636SJohannes Weiner PAGEREF_RECLAIM_CLEAN, 100464574746SJohannes Weiner PAGEREF_KEEP, 1005dfc8d636SJohannes Weiner PAGEREF_ACTIVATE, 1006dfc8d636SJohannes Weiner }; 1007dfc8d636SJohannes Weiner 1008dfc8d636SJohannes Weiner static enum page_references page_check_references(struct page *page, 1009dfc8d636SJohannes Weiner struct scan_control *sc) 1010dfc8d636SJohannes Weiner { 101164574746SJohannes Weiner int referenced_ptes, referenced_page; 1012dfc8d636SJohannes Weiner unsigned long vm_flags; 1013dfc8d636SJohannes Weiner 1014c3ac9a8aSJohannes Weiner referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, 1015c3ac9a8aSJohannes Weiner &vm_flags); 101664574746SJohannes Weiner referenced_page = TestClearPageReferenced(page); 1017dfc8d636SJohannes Weiner 1018dfc8d636SJohannes Weiner /* 1019dfc8d636SJohannes Weiner * Mlock lost the isolation race with us. Let try_to_unmap() 1020dfc8d636SJohannes Weiner * move the page to the unevictable list. 1021dfc8d636SJohannes Weiner */ 1022dfc8d636SJohannes Weiner if (vm_flags & VM_LOCKED) 1023dfc8d636SJohannes Weiner return PAGEREF_RECLAIM; 1024dfc8d636SJohannes Weiner 102564574746SJohannes Weiner if (referenced_ptes) { 1026e4898273SMichal Hocko if (PageSwapBacked(page)) 102764574746SJohannes Weiner return PAGEREF_ACTIVATE; 102864574746SJohannes Weiner /* 102964574746SJohannes Weiner * All mapped pages start out with page table 103064574746SJohannes Weiner * references from the instantiating fault, so we need 103164574746SJohannes Weiner * to look twice if a mapped file page is used more 103264574746SJohannes Weiner * than once. 103364574746SJohannes Weiner * 103464574746SJohannes Weiner * Mark it and spare it for another trip around the 103564574746SJohannes Weiner * inactive list. Another page table reference will 103664574746SJohannes Weiner * lead to its activation. 103764574746SJohannes Weiner * 103864574746SJohannes Weiner * Note: the mark is set for activated pages as well 103964574746SJohannes Weiner * so that recently deactivated but used pages are 104064574746SJohannes Weiner * quickly recovered. 104164574746SJohannes Weiner */ 104264574746SJohannes Weiner SetPageReferenced(page); 104364574746SJohannes Weiner 104434dbc67aSKonstantin Khlebnikov if (referenced_page || referenced_ptes > 1) 1045dfc8d636SJohannes Weiner return PAGEREF_ACTIVATE; 1046dfc8d636SJohannes Weiner 1047c909e993SKonstantin Khlebnikov /* 1048c909e993SKonstantin Khlebnikov * Activate file-backed executable pages after first usage. 1049c909e993SKonstantin Khlebnikov */ 1050c909e993SKonstantin Khlebnikov if (vm_flags & VM_EXEC) 1051c909e993SKonstantin Khlebnikov return PAGEREF_ACTIVATE; 1052c909e993SKonstantin Khlebnikov 105364574746SJohannes Weiner return PAGEREF_KEEP; 105464574746SJohannes Weiner } 105564574746SJohannes Weiner 1056dfc8d636SJohannes Weiner /* Reclaim if clean, defer dirty pages to writeback */ 10572e30244aSKOSAKI Motohiro if (referenced_page && !PageSwapBacked(page)) 1058dfc8d636SJohannes Weiner return PAGEREF_RECLAIM_CLEAN; 105964574746SJohannes Weiner 106064574746SJohannes Weiner return PAGEREF_RECLAIM; 1061dfc8d636SJohannes Weiner } 1062dfc8d636SJohannes Weiner 1063e2be15f6SMel Gorman /* Check if a page is dirty or under writeback */ 1064e2be15f6SMel Gorman static void page_check_dirty_writeback(struct page *page, 1065e2be15f6SMel Gorman bool *dirty, bool *writeback) 1066e2be15f6SMel Gorman { 1067b4597226SMel Gorman struct address_space *mapping; 1068b4597226SMel Gorman 1069e2be15f6SMel Gorman /* 1070e2be15f6SMel Gorman * Anonymous pages are not handled by flushers and must be written 1071e2be15f6SMel Gorman * from reclaim context. Do not stall reclaim based on them 1072e2be15f6SMel Gorman */ 1073802a3a92SShaohua Li if (!page_is_file_cache(page) || 1074802a3a92SShaohua Li (PageAnon(page) && !PageSwapBacked(page))) { 1075e2be15f6SMel Gorman *dirty = false; 1076e2be15f6SMel Gorman *writeback = false; 1077e2be15f6SMel Gorman return; 1078e2be15f6SMel Gorman } 1079e2be15f6SMel Gorman 1080e2be15f6SMel Gorman /* By default assume that the page flags are accurate */ 1081e2be15f6SMel Gorman *dirty = PageDirty(page); 1082e2be15f6SMel Gorman *writeback = PageWriteback(page); 1083b4597226SMel Gorman 1084b4597226SMel Gorman /* Verify dirty/writeback state if the filesystem supports it */ 1085b4597226SMel Gorman if (!page_has_private(page)) 1086b4597226SMel Gorman return; 1087b4597226SMel Gorman 1088b4597226SMel Gorman mapping = page_mapping(page); 1089b4597226SMel Gorman if (mapping && mapping->a_ops->is_dirty_writeback) 1090b4597226SMel Gorman mapping->a_ops->is_dirty_writeback(page, dirty, writeback); 1091e2be15f6SMel Gorman } 1092e2be15f6SMel Gorman 1093e286781dSNick Piggin /* 10941742f19fSAndrew Morton * shrink_page_list() returns the number of reclaimed pages 10951da177e4SLinus Torvalds */ 10961742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list, 1097599d0c95SMel Gorman struct pglist_data *pgdat, 1098f84f6e2bSMel Gorman struct scan_control *sc, 109902c6de8dSMinchan Kim enum ttu_flags ttu_flags, 11003c710c1aSMichal Hocko struct reclaim_stat *stat, 110102c6de8dSMinchan Kim bool force_reclaim) 11021da177e4SLinus Torvalds { 11031da177e4SLinus Torvalds LIST_HEAD(ret_pages); 1104abe4c3b5SMel Gorman LIST_HEAD(free_pages); 11051da177e4SLinus Torvalds int pgactivate = 0; 11063c710c1aSMichal Hocko unsigned nr_unqueued_dirty = 0; 11073c710c1aSMichal Hocko unsigned nr_dirty = 0; 11083c710c1aSMichal Hocko unsigned nr_congested = 0; 11093c710c1aSMichal Hocko unsigned nr_reclaimed = 0; 11103c710c1aSMichal Hocko unsigned nr_writeback = 0; 11113c710c1aSMichal Hocko unsigned nr_immediate = 0; 11125bccd166SMichal Hocko unsigned nr_ref_keep = 0; 11135bccd166SMichal Hocko unsigned nr_unmap_fail = 0; 11141da177e4SLinus Torvalds 11151da177e4SLinus Torvalds cond_resched(); 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds while (!list_empty(page_list)) { 11181da177e4SLinus Torvalds struct address_space *mapping; 11191da177e4SLinus Torvalds struct page *page; 11201da177e4SLinus Torvalds int may_enter_fs; 112102c6de8dSMinchan Kim enum page_references references = PAGEREF_RECLAIM_CLEAN; 1122e2be15f6SMel Gorman bool dirty, writeback; 11231da177e4SLinus Torvalds 11241da177e4SLinus Torvalds cond_resched(); 11251da177e4SLinus Torvalds 11261da177e4SLinus Torvalds page = lru_to_page(page_list); 11271da177e4SLinus Torvalds list_del(&page->lru); 11281da177e4SLinus Torvalds 1129529ae9aaSNick Piggin if (!trylock_page(page)) 11301da177e4SLinus Torvalds goto keep; 11311da177e4SLinus Torvalds 1132309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 11331da177e4SLinus Torvalds 11341da177e4SLinus Torvalds sc->nr_scanned++; 113580e43426SChristoph Lameter 113639b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) 1137ad6b6704SMinchan Kim goto activate_locked; 1138894bc310SLee Schermerhorn 1139a6dc60f8SJohannes Weiner if (!sc->may_unmap && page_mapped(page)) 114080e43426SChristoph Lameter goto keep_locked; 114180e43426SChristoph Lameter 11421da177e4SLinus Torvalds /* Double the slab pressure for mapped and swapcache pages */ 1143802a3a92SShaohua Li if ((page_mapped(page) || PageSwapCache(page)) && 1144802a3a92SShaohua Li !(PageAnon(page) && !PageSwapBacked(page))) 11451da177e4SLinus Torvalds sc->nr_scanned++; 11461da177e4SLinus Torvalds 1147c661b078SAndy Whitcroft may_enter_fs = (sc->gfp_mask & __GFP_FS) || 1148c661b078SAndy Whitcroft (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 1149c661b078SAndy Whitcroft 1150e62e384eSMichal Hocko /* 1151894befecSAndrey Ryabinin * The number of dirty pages determines if a node is marked 1152e2be15f6SMel Gorman * reclaim_congested which affects wait_iff_congested. kswapd 1153e2be15f6SMel Gorman * will stall and start writing pages if the tail of the LRU 1154e2be15f6SMel Gorman * is all dirty unqueued pages. 1155e2be15f6SMel Gorman */ 1156e2be15f6SMel Gorman page_check_dirty_writeback(page, &dirty, &writeback); 1157e2be15f6SMel Gorman if (dirty || writeback) 1158e2be15f6SMel Gorman nr_dirty++; 1159e2be15f6SMel Gorman 1160e2be15f6SMel Gorman if (dirty && !writeback) 1161e2be15f6SMel Gorman nr_unqueued_dirty++; 1162e2be15f6SMel Gorman 1163d04e8acdSMel Gorman /* 1164d04e8acdSMel Gorman * Treat this page as congested if the underlying BDI is or if 1165d04e8acdSMel Gorman * pages are cycling through the LRU so quickly that the 1166d04e8acdSMel Gorman * pages marked for immediate reclaim are making it to the 1167d04e8acdSMel Gorman * end of the LRU a second time. 1168d04e8acdSMel Gorman */ 1169e2be15f6SMel Gorman mapping = page_mapping(page); 11701da58ee2SJamie Liu if (((dirty || writeback) && mapping && 1171703c2708STejun Heo inode_write_congested(mapping->host)) || 1172d04e8acdSMel Gorman (writeback && PageReclaim(page))) 1173e2be15f6SMel Gorman nr_congested++; 1174e2be15f6SMel Gorman 1175e2be15f6SMel Gorman /* 1176283aba9fSMel Gorman * If a page at the tail of the LRU is under writeback, there 1177283aba9fSMel Gorman * are three cases to consider. 1178e62e384eSMichal Hocko * 1179283aba9fSMel Gorman * 1) If reclaim is encountering an excessive number of pages 1180283aba9fSMel Gorman * under writeback and this page is both under writeback and 1181283aba9fSMel Gorman * PageReclaim then it indicates that pages are being queued 1182283aba9fSMel Gorman * for IO but are being recycled through the LRU before the 1183283aba9fSMel Gorman * IO can complete. Waiting on the page itself risks an 1184283aba9fSMel Gorman * indefinite stall if it is impossible to writeback the 1185283aba9fSMel Gorman * page due to IO error or disconnected storage so instead 1186b1a6f21eSMel Gorman * note that the LRU is being scanned too quickly and the 1187b1a6f21eSMel Gorman * caller can stall after page list has been processed. 1188c3b94f44SHugh Dickins * 118997c9341fSTejun Heo * 2) Global or new memcg reclaim encounters a page that is 1190ecf5fc6eSMichal Hocko * not marked for immediate reclaim, or the caller does not 1191ecf5fc6eSMichal Hocko * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1192ecf5fc6eSMichal Hocko * not to fs). In this case mark the page for immediate 119397c9341fSTejun Heo * reclaim and continue scanning. 1194283aba9fSMel Gorman * 1195ecf5fc6eSMichal Hocko * Require may_enter_fs because we would wait on fs, which 1196ecf5fc6eSMichal Hocko * may not have submitted IO yet. And the loop driver might 1197283aba9fSMel Gorman * enter reclaim, and deadlock if it waits on a page for 1198283aba9fSMel Gorman * which it is needed to do the write (loop masks off 1199283aba9fSMel Gorman * __GFP_IO|__GFP_FS for this reason); but more thought 1200283aba9fSMel Gorman * would probably show more reasons. 1201283aba9fSMel Gorman * 12027fadc820SHugh Dickins * 3) Legacy memcg encounters a page that is already marked 1203283aba9fSMel Gorman * PageReclaim. memcg does not have any dirty pages 1204283aba9fSMel Gorman * throttling so we could easily OOM just because too many 1205283aba9fSMel Gorman * pages are in writeback and there is nothing else to 1206283aba9fSMel Gorman * reclaim. Wait for the writeback to complete. 1207c55e8d03SJohannes Weiner * 1208c55e8d03SJohannes Weiner * In cases 1) and 2) we activate the pages to get them out of 1209c55e8d03SJohannes Weiner * the way while we continue scanning for clean pages on the 1210c55e8d03SJohannes Weiner * inactive list and refilling from the active list. The 1211c55e8d03SJohannes Weiner * observation here is that waiting for disk writes is more 1212c55e8d03SJohannes Weiner * expensive than potentially causing reloads down the line. 1213c55e8d03SJohannes Weiner * Since they're marked for immediate reclaim, they won't put 1214c55e8d03SJohannes Weiner * memory pressure on the cache working set any longer than it 1215c55e8d03SJohannes Weiner * takes to write them to disk. 1216e62e384eSMichal Hocko */ 1217283aba9fSMel Gorman if (PageWriteback(page)) { 1218283aba9fSMel Gorman /* Case 1 above */ 1219283aba9fSMel Gorman if (current_is_kswapd() && 1220283aba9fSMel Gorman PageReclaim(page) && 1221599d0c95SMel Gorman test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1222b1a6f21eSMel Gorman nr_immediate++; 1223c55e8d03SJohannes Weiner goto activate_locked; 1224283aba9fSMel Gorman 1225283aba9fSMel Gorman /* Case 2 above */ 122697c9341fSTejun Heo } else if (sane_reclaim(sc) || 1227ecf5fc6eSMichal Hocko !PageReclaim(page) || !may_enter_fs) { 1228c3b94f44SHugh Dickins /* 1229c3b94f44SHugh Dickins * This is slightly racy - end_page_writeback() 1230c3b94f44SHugh Dickins * might have just cleared PageReclaim, then 1231c3b94f44SHugh Dickins * setting PageReclaim here end up interpreted 1232c3b94f44SHugh Dickins * as PageReadahead - but that does not matter 1233c3b94f44SHugh Dickins * enough to care. What we do want is for this 1234c3b94f44SHugh Dickins * page to have PageReclaim set next time memcg 1235c3b94f44SHugh Dickins * reclaim reaches the tests above, so it will 1236c3b94f44SHugh Dickins * then wait_on_page_writeback() to avoid OOM; 1237c3b94f44SHugh Dickins * and it's also appropriate in global reclaim. 1238c3b94f44SHugh Dickins */ 1239c3b94f44SHugh Dickins SetPageReclaim(page); 124092df3a72SMel Gorman nr_writeback++; 1241c55e8d03SJohannes Weiner goto activate_locked; 1242283aba9fSMel Gorman 1243283aba9fSMel Gorman /* Case 3 above */ 1244283aba9fSMel Gorman } else { 12457fadc820SHugh Dickins unlock_page(page); 1246c3b94f44SHugh Dickins wait_on_page_writeback(page); 12477fadc820SHugh Dickins /* then go back and try same page again */ 12487fadc820SHugh Dickins list_add_tail(&page->lru, page_list); 12497fadc820SHugh Dickins continue; 1250e62e384eSMichal Hocko } 1251283aba9fSMel Gorman } 12521da177e4SLinus Torvalds 125302c6de8dSMinchan Kim if (!force_reclaim) 12546a18adb3SKonstantin Khlebnikov references = page_check_references(page, sc); 125502c6de8dSMinchan Kim 1256dfc8d636SJohannes Weiner switch (references) { 1257dfc8d636SJohannes Weiner case PAGEREF_ACTIVATE: 12581da177e4SLinus Torvalds goto activate_locked; 125964574746SJohannes Weiner case PAGEREF_KEEP: 12605bccd166SMichal Hocko nr_ref_keep++; 126164574746SJohannes Weiner goto keep_locked; 1262dfc8d636SJohannes Weiner case PAGEREF_RECLAIM: 1263dfc8d636SJohannes Weiner case PAGEREF_RECLAIM_CLEAN: 1264dfc8d636SJohannes Weiner ; /* try to reclaim the page below */ 1265dfc8d636SJohannes Weiner } 12661da177e4SLinus Torvalds 12671da177e4SLinus Torvalds /* 12681da177e4SLinus Torvalds * Anonymous process memory has backing store? 12691da177e4SLinus Torvalds * Try to allocate it some swap space here. 1270802a3a92SShaohua Li * Lazyfree page could be freed directly 12711da177e4SLinus Torvalds */ 1272bd4c82c2SHuang Ying if (PageAnon(page) && PageSwapBacked(page)) { 1273bd4c82c2SHuang Ying if (!PageSwapCache(page)) { 127463eb6b93SHugh Dickins if (!(sc->gfp_mask & __GFP_IO)) 127563eb6b93SHugh Dickins goto keep_locked; 1276747552b1SHuang Ying if (PageTransHuge(page)) { 1277b8f593cdSHuang Ying /* cannot split THP, skip it */ 1278747552b1SHuang Ying if (!can_split_huge_page(page, NULL)) 1279b8f593cdSHuang Ying goto activate_locked; 1280747552b1SHuang Ying /* 1281747552b1SHuang Ying * Split pages without a PMD map right 1282747552b1SHuang Ying * away. Chances are some or all of the 1283747552b1SHuang Ying * tail pages can be freed without IO. 1284747552b1SHuang Ying */ 1285747552b1SHuang Ying if (!compound_mapcount(page) && 1286bd4c82c2SHuang Ying split_huge_page_to_list(page, 1287bd4c82c2SHuang Ying page_list)) 1288747552b1SHuang Ying goto activate_locked; 1289747552b1SHuang Ying } 12900f074658SMinchan Kim if (!add_to_swap(page)) { 12910f074658SMinchan Kim if (!PageTransHuge(page)) 12921da177e4SLinus Torvalds goto activate_locked; 1293bd4c82c2SHuang Ying /* Fallback to swap normal pages */ 1294bd4c82c2SHuang Ying if (split_huge_page_to_list(page, 1295bd4c82c2SHuang Ying page_list)) 12960f074658SMinchan Kim goto activate_locked; 1297fe490cc0SHuang Ying #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1298fe490cc0SHuang Ying count_vm_event(THP_SWPOUT_FALLBACK); 1299fe490cc0SHuang Ying #endif 13000f074658SMinchan Kim if (!add_to_swap(page)) 13010f074658SMinchan Kim goto activate_locked; 13020f074658SMinchan Kim } 13030f074658SMinchan Kim 130463eb6b93SHugh Dickins may_enter_fs = 1; 13051da177e4SLinus Torvalds 1306e2be15f6SMel Gorman /* Adding to swap updated mapping */ 13071da177e4SLinus Torvalds mapping = page_mapping(page); 1308bd4c82c2SHuang Ying } 13097751b2daSKirill A. Shutemov } else if (unlikely(PageTransHuge(page))) { 13107751b2daSKirill A. Shutemov /* Split file THP */ 13117751b2daSKirill A. Shutemov if (split_huge_page_to_list(page, page_list)) 13127751b2daSKirill A. Shutemov goto keep_locked; 1313e2be15f6SMel Gorman } 13141da177e4SLinus Torvalds 13151da177e4SLinus Torvalds /* 13161da177e4SLinus Torvalds * The page is mapped into the page tables of one or more 13171da177e4SLinus Torvalds * processes. Try to unmap it here. 13181da177e4SLinus Torvalds */ 1319802a3a92SShaohua Li if (page_mapped(page)) { 1320bd4c82c2SHuang Ying enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH; 1321bd4c82c2SHuang Ying 1322bd4c82c2SHuang Ying if (unlikely(PageTransHuge(page))) 1323bd4c82c2SHuang Ying flags |= TTU_SPLIT_HUGE_PMD; 1324bd4c82c2SHuang Ying if (!try_to_unmap(page, flags)) { 13255bccd166SMichal Hocko nr_unmap_fail++; 13261da177e4SLinus Torvalds goto activate_locked; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds if (PageDirty(page)) { 1331ee72886dSMel Gorman /* 13324eda4823SJohannes Weiner * Only kswapd can writeback filesystem pages 13334eda4823SJohannes Weiner * to avoid risk of stack overflow. But avoid 13344eda4823SJohannes Weiner * injecting inefficient single-page IO into 13354eda4823SJohannes Weiner * flusher writeback as much as possible: only 13364eda4823SJohannes Weiner * write pages when we've encountered many 13374eda4823SJohannes Weiner * dirty pages, and when we've already scanned 13384eda4823SJohannes Weiner * the rest of the LRU for clean pages and see 13394eda4823SJohannes Weiner * the same dirty pages again (PageReclaim). 1340ee72886dSMel Gorman */ 1341f84f6e2bSMel Gorman if (page_is_file_cache(page) && 13424eda4823SJohannes Weiner (!current_is_kswapd() || !PageReclaim(page) || 1343599d0c95SMel Gorman !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 134449ea7eb6SMel Gorman /* 134549ea7eb6SMel Gorman * Immediately reclaim when written back. 134649ea7eb6SMel Gorman * Similar in principal to deactivate_page() 134749ea7eb6SMel Gorman * except we already have the page isolated 134849ea7eb6SMel Gorman * and know it's dirty 134949ea7eb6SMel Gorman */ 1350c4a25635SMel Gorman inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); 135149ea7eb6SMel Gorman SetPageReclaim(page); 135249ea7eb6SMel Gorman 1353c55e8d03SJohannes Weiner goto activate_locked; 1354ee72886dSMel Gorman } 1355ee72886dSMel Gorman 1356dfc8d636SJohannes Weiner if (references == PAGEREF_RECLAIM_CLEAN) 13571da177e4SLinus Torvalds goto keep_locked; 13584dd4b920SAndrew Morton if (!may_enter_fs) 13591da177e4SLinus Torvalds goto keep_locked; 136052a8363eSChristoph Lameter if (!sc->may_writepage) 13611da177e4SLinus Torvalds goto keep_locked; 13621da177e4SLinus Torvalds 1363d950c947SMel Gorman /* 1364d950c947SMel Gorman * Page is dirty. Flush the TLB if a writable entry 1365d950c947SMel Gorman * potentially exists to avoid CPU writes after IO 1366d950c947SMel Gorman * starts and then write it out here. 1367d950c947SMel Gorman */ 1368d950c947SMel Gorman try_to_unmap_flush_dirty(); 13697d3579e8SKOSAKI Motohiro switch (pageout(page, mapping, sc)) { 13701da177e4SLinus Torvalds case PAGE_KEEP: 13711da177e4SLinus Torvalds goto keep_locked; 13721da177e4SLinus Torvalds case PAGE_ACTIVATE: 13731da177e4SLinus Torvalds goto activate_locked; 13741da177e4SLinus Torvalds case PAGE_SUCCESS: 13757d3579e8SKOSAKI Motohiro if (PageWriteback(page)) 137641ac1999SMel Gorman goto keep; 13777d3579e8SKOSAKI Motohiro if (PageDirty(page)) 13781da177e4SLinus Torvalds goto keep; 13797d3579e8SKOSAKI Motohiro 13801da177e4SLinus Torvalds /* 13811da177e4SLinus Torvalds * A synchronous write - probably a ramdisk. Go 13821da177e4SLinus Torvalds * ahead and try to reclaim the page. 13831da177e4SLinus Torvalds */ 1384529ae9aaSNick Piggin if (!trylock_page(page)) 13851da177e4SLinus Torvalds goto keep; 13861da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 13871da177e4SLinus Torvalds goto keep_locked; 13881da177e4SLinus Torvalds mapping = page_mapping(page); 13891da177e4SLinus Torvalds case PAGE_CLEAN: 13901da177e4SLinus Torvalds ; /* try to free the page below */ 13911da177e4SLinus Torvalds } 13921da177e4SLinus Torvalds } 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds /* 13951da177e4SLinus Torvalds * If the page has buffers, try to free the buffer mappings 13961da177e4SLinus Torvalds * associated with this page. If we succeed we try to free 13971da177e4SLinus Torvalds * the page as well. 13981da177e4SLinus Torvalds * 13991da177e4SLinus Torvalds * We do this even if the page is PageDirty(). 14001da177e4SLinus Torvalds * try_to_release_page() does not perform I/O, but it is 14011da177e4SLinus Torvalds * possible for a page to have PageDirty set, but it is actually 14021da177e4SLinus Torvalds * clean (all its buffers are clean). This happens if the 14031da177e4SLinus Torvalds * buffers were written out directly, with submit_bh(). ext3 14041da177e4SLinus Torvalds * will do this, as well as the blockdev mapping. 14051da177e4SLinus Torvalds * try_to_release_page() will discover that cleanness and will 14061da177e4SLinus Torvalds * drop the buffers and mark the page clean - it can be freed. 14071da177e4SLinus Torvalds * 14081da177e4SLinus Torvalds * Rarely, pages can have buffers and no ->mapping. These are 14091da177e4SLinus Torvalds * the pages which were not successfully invalidated in 14101da177e4SLinus Torvalds * truncate_complete_page(). We try to drop those buffers here 14111da177e4SLinus Torvalds * and if that worked, and the page is no longer mapped into 14121da177e4SLinus Torvalds * process address space (page_count == 1) it can be freed. 14131da177e4SLinus Torvalds * Otherwise, leave the page on the LRU so it is swappable. 14141da177e4SLinus Torvalds */ 1415266cf658SDavid Howells if (page_has_private(page)) { 14161da177e4SLinus Torvalds if (!try_to_release_page(page, sc->gfp_mask)) 14171da177e4SLinus Torvalds goto activate_locked; 1418e286781dSNick Piggin if (!mapping && page_count(page) == 1) { 1419e286781dSNick Piggin unlock_page(page); 1420e286781dSNick Piggin if (put_page_testzero(page)) 14211da177e4SLinus Torvalds goto free_it; 1422e286781dSNick Piggin else { 1423e286781dSNick Piggin /* 1424e286781dSNick Piggin * rare race with speculative reference. 1425e286781dSNick Piggin * the speculative reference will free 1426e286781dSNick Piggin * this page shortly, so we may 1427e286781dSNick Piggin * increment nr_reclaimed here (and 1428e286781dSNick Piggin * leave it off the LRU). 1429e286781dSNick Piggin */ 1430e286781dSNick Piggin nr_reclaimed++; 1431e286781dSNick Piggin continue; 1432e286781dSNick Piggin } 1433e286781dSNick Piggin } 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 1436802a3a92SShaohua Li if (PageAnon(page) && !PageSwapBacked(page)) { 1437802a3a92SShaohua Li /* follow __remove_mapping for reference */ 1438802a3a92SShaohua Li if (!page_ref_freeze(page, 1)) 143949d2e9ccSChristoph Lameter goto keep_locked; 1440802a3a92SShaohua Li if (PageDirty(page)) { 1441802a3a92SShaohua Li page_ref_unfreeze(page, 1); 1442802a3a92SShaohua Li goto keep_locked; 1443802a3a92SShaohua Li } 14441da177e4SLinus Torvalds 1445802a3a92SShaohua Li count_vm_event(PGLAZYFREED); 14462262185cSRoman Gushchin count_memcg_page_event(page, PGLAZYFREED); 1447802a3a92SShaohua Li } else if (!mapping || !__remove_mapping(mapping, page, true)) 1448802a3a92SShaohua Li goto keep_locked; 1449a978d6f5SNick Piggin /* 1450a978d6f5SNick Piggin * At this point, we have no other references and there is 1451a978d6f5SNick Piggin * no way to pick any more up (removed from LRU, removed 1452a978d6f5SNick Piggin * from pagecache). Can use non-atomic bitops now (and 1453a978d6f5SNick Piggin * we obviously don't have to worry about waking up a process 1454a978d6f5SNick Piggin * waiting on the page lock, because there are no references. 1455a978d6f5SNick Piggin */ 145648c935adSKirill A. Shutemov __ClearPageLocked(page); 1457e286781dSNick Piggin free_it: 145805ff5137SAndrew Morton nr_reclaimed++; 1459abe4c3b5SMel Gorman 1460abe4c3b5SMel Gorman /* 1461abe4c3b5SMel Gorman * Is there need to periodically free_page_list? It would 1462abe4c3b5SMel Gorman * appear not as the counts should be low 1463abe4c3b5SMel Gorman */ 1464bd4c82c2SHuang Ying if (unlikely(PageTransHuge(page))) { 1465bd4c82c2SHuang Ying mem_cgroup_uncharge(page); 1466bd4c82c2SHuang Ying (*get_compound_page_dtor(page))(page); 1467bd4c82c2SHuang Ying } else 1468abe4c3b5SMel Gorman list_add(&page->lru, &free_pages); 14691da177e4SLinus Torvalds continue; 14701da177e4SLinus Torvalds 14711da177e4SLinus Torvalds activate_locked: 147268a22394SRik van Riel /* Not a candidate for swapping, so reclaim swap space. */ 1473ad6b6704SMinchan Kim if (PageSwapCache(page) && (mem_cgroup_swap_full(page) || 1474ad6b6704SMinchan Kim PageMlocked(page))) 1475a2c43eedSHugh Dickins try_to_free_swap(page); 1476309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 1477ad6b6704SMinchan Kim if (!PageMlocked(page)) { 14781da177e4SLinus Torvalds SetPageActive(page); 14791da177e4SLinus Torvalds pgactivate++; 14802262185cSRoman Gushchin count_memcg_page_event(page, PGACTIVATE); 1481ad6b6704SMinchan Kim } 14821da177e4SLinus Torvalds keep_locked: 14831da177e4SLinus Torvalds unlock_page(page); 14841da177e4SLinus Torvalds keep: 14851da177e4SLinus Torvalds list_add(&page->lru, &ret_pages); 1486309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 14871da177e4SLinus Torvalds } 1488abe4c3b5SMel Gorman 1489747db954SJohannes Weiner mem_cgroup_uncharge_list(&free_pages); 149072b252aeSMel Gorman try_to_unmap_flush(); 14912d4894b5SMel Gorman free_unref_page_list(&free_pages); 1492abe4c3b5SMel Gorman 14931da177e4SLinus Torvalds list_splice(&ret_pages, page_list); 1494f8891e5eSChristoph Lameter count_vm_events(PGACTIVATE, pgactivate); 14950a31bc97SJohannes Weiner 14963c710c1aSMichal Hocko if (stat) { 14973c710c1aSMichal Hocko stat->nr_dirty = nr_dirty; 14983c710c1aSMichal Hocko stat->nr_congested = nr_congested; 14993c710c1aSMichal Hocko stat->nr_unqueued_dirty = nr_unqueued_dirty; 15003c710c1aSMichal Hocko stat->nr_writeback = nr_writeback; 15013c710c1aSMichal Hocko stat->nr_immediate = nr_immediate; 15025bccd166SMichal Hocko stat->nr_activate = pgactivate; 15035bccd166SMichal Hocko stat->nr_ref_keep = nr_ref_keep; 15045bccd166SMichal Hocko stat->nr_unmap_fail = nr_unmap_fail; 15053c710c1aSMichal Hocko } 150605ff5137SAndrew Morton return nr_reclaimed; 15071da177e4SLinus Torvalds } 15081da177e4SLinus Torvalds 150902c6de8dSMinchan Kim unsigned long reclaim_clean_pages_from_list(struct zone *zone, 151002c6de8dSMinchan Kim struct list_head *page_list) 151102c6de8dSMinchan Kim { 151202c6de8dSMinchan Kim struct scan_control sc = { 151302c6de8dSMinchan Kim .gfp_mask = GFP_KERNEL, 151402c6de8dSMinchan Kim .priority = DEF_PRIORITY, 151502c6de8dSMinchan Kim .may_unmap = 1, 151602c6de8dSMinchan Kim }; 15173c710c1aSMichal Hocko unsigned long ret; 151802c6de8dSMinchan Kim struct page *page, *next; 151902c6de8dSMinchan Kim LIST_HEAD(clean_pages); 152002c6de8dSMinchan Kim 152102c6de8dSMinchan Kim list_for_each_entry_safe(page, next, page_list, lru) { 1522117aad1eSRafael Aquini if (page_is_file_cache(page) && !PageDirty(page) && 1523b1123ea6SMinchan Kim !__PageMovable(page)) { 152402c6de8dSMinchan Kim ClearPageActive(page); 152502c6de8dSMinchan Kim list_move(&page->lru, &clean_pages); 152602c6de8dSMinchan Kim } 152702c6de8dSMinchan Kim } 152802c6de8dSMinchan Kim 1529599d0c95SMel Gorman ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, 1530a128ca71SShaohua Li TTU_IGNORE_ACCESS, NULL, true); 153102c6de8dSMinchan Kim list_splice(&clean_pages, page_list); 1532599d0c95SMel Gorman mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); 153302c6de8dSMinchan Kim return ret; 153402c6de8dSMinchan Kim } 153502c6de8dSMinchan Kim 15365ad333ebSAndy Whitcroft /* 15375ad333ebSAndy Whitcroft * Attempt to remove the specified page from its LRU. Only take this page 15385ad333ebSAndy Whitcroft * if it is of the appropriate PageActive status. Pages which are being 15395ad333ebSAndy Whitcroft * freed elsewhere are also ignored. 15405ad333ebSAndy Whitcroft * 15415ad333ebSAndy Whitcroft * page: page to consider 15425ad333ebSAndy Whitcroft * mode: one of the LRU isolation modes defined above 15435ad333ebSAndy Whitcroft * 15445ad333ebSAndy Whitcroft * returns 0 on success, -ve errno on failure. 15455ad333ebSAndy Whitcroft */ 1546f3fd4a61SKonstantin Khlebnikov int __isolate_lru_page(struct page *page, isolate_mode_t mode) 15475ad333ebSAndy Whitcroft { 15485ad333ebSAndy Whitcroft int ret = -EINVAL; 15495ad333ebSAndy Whitcroft 15505ad333ebSAndy Whitcroft /* Only take pages on the LRU. */ 15515ad333ebSAndy Whitcroft if (!PageLRU(page)) 15525ad333ebSAndy Whitcroft return ret; 15535ad333ebSAndy Whitcroft 1554e46a2879SMinchan Kim /* Compaction should not handle unevictable pages but CMA can do so */ 1555e46a2879SMinchan Kim if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) 1556894bc310SLee Schermerhorn return ret; 1557894bc310SLee Schermerhorn 15585ad333ebSAndy Whitcroft ret = -EBUSY; 155908e552c6SKAMEZAWA Hiroyuki 1560c8244935SMel Gorman /* 1561c8244935SMel Gorman * To minimise LRU disruption, the caller can indicate that it only 1562c8244935SMel Gorman * wants to isolate pages it will be able to operate on without 1563c8244935SMel Gorman * blocking - clean pages for the most part. 1564c8244935SMel Gorman * 1565c8244935SMel Gorman * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages 1566c8244935SMel Gorman * that it is possible to migrate without blocking 1567c8244935SMel Gorman */ 15681276ad68SJohannes Weiner if (mode & ISOLATE_ASYNC_MIGRATE) { 1569c8244935SMel Gorman /* All the caller can do on PageWriteback is block */ 1570c8244935SMel Gorman if (PageWriteback(page)) 157139deaf85SMinchan Kim return ret; 157239deaf85SMinchan Kim 1573c8244935SMel Gorman if (PageDirty(page)) { 1574c8244935SMel Gorman struct address_space *mapping; 157569d763fcSMel Gorman bool migrate_dirty; 1576c8244935SMel Gorman 1577c8244935SMel Gorman /* 1578c8244935SMel Gorman * Only pages without mappings or that have a 1579c8244935SMel Gorman * ->migratepage callback are possible to migrate 158069d763fcSMel Gorman * without blocking. However, we can be racing with 158169d763fcSMel Gorman * truncation so it's necessary to lock the page 158269d763fcSMel Gorman * to stabilise the mapping as truncation holds 158369d763fcSMel Gorman * the page lock until after the page is removed 158469d763fcSMel Gorman * from the page cache. 1585c8244935SMel Gorman */ 158669d763fcSMel Gorman if (!trylock_page(page)) 158769d763fcSMel Gorman return ret; 158869d763fcSMel Gorman 1589c8244935SMel Gorman mapping = page_mapping(page); 1590145e1a71SHugh Dickins migrate_dirty = !mapping || mapping->a_ops->migratepage; 159169d763fcSMel Gorman unlock_page(page); 159269d763fcSMel Gorman if (!migrate_dirty) 1593c8244935SMel Gorman return ret; 1594c8244935SMel Gorman } 1595c8244935SMel Gorman } 1596c8244935SMel Gorman 1597f80c0673SMinchan Kim if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1598f80c0673SMinchan Kim return ret; 1599f80c0673SMinchan Kim 16005ad333ebSAndy Whitcroft if (likely(get_page_unless_zero(page))) { 16015ad333ebSAndy Whitcroft /* 16025ad333ebSAndy Whitcroft * Be careful not to clear PageLRU until after we're 16035ad333ebSAndy Whitcroft * sure the page is not being freed elsewhere -- the 16045ad333ebSAndy Whitcroft * page release code relies on it. 16055ad333ebSAndy Whitcroft */ 16065ad333ebSAndy Whitcroft ClearPageLRU(page); 16075ad333ebSAndy Whitcroft ret = 0; 16085ad333ebSAndy Whitcroft } 16095ad333ebSAndy Whitcroft 16105ad333ebSAndy Whitcroft return ret; 16115ad333ebSAndy Whitcroft } 16125ad333ebSAndy Whitcroft 16137ee36a14SMel Gorman 16147ee36a14SMel Gorman /* 16157ee36a14SMel Gorman * Update LRU sizes after isolating pages. The LRU size updates must 16167ee36a14SMel Gorman * be complete before mem_cgroup_update_lru_size due to a santity check. 16177ee36a14SMel Gorman */ 16187ee36a14SMel Gorman static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1619b4536f0cSMichal Hocko enum lru_list lru, unsigned long *nr_zone_taken) 16207ee36a14SMel Gorman { 16217ee36a14SMel Gorman int zid; 16227ee36a14SMel Gorman 16237ee36a14SMel Gorman for (zid = 0; zid < MAX_NR_ZONES; zid++) { 16247ee36a14SMel Gorman if (!nr_zone_taken[zid]) 16257ee36a14SMel Gorman continue; 16267ee36a14SMel Gorman 16277ee36a14SMel Gorman __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1628b4536f0cSMichal Hocko #ifdef CONFIG_MEMCG 1629b4536f0cSMichal Hocko mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1630b4536f0cSMichal Hocko #endif 16317ee36a14SMel Gorman } 16327ee36a14SMel Gorman 16337ee36a14SMel Gorman } 16347ee36a14SMel Gorman 163549d2e9ccSChristoph Lameter /* 1636a52633d8SMel Gorman * zone_lru_lock is heavily contended. Some of the functions that 16371da177e4SLinus Torvalds * shrink the lists perform better by taking out a batch of pages 16381da177e4SLinus Torvalds * and working on them outside the LRU lock. 16391da177e4SLinus Torvalds * 16401da177e4SLinus Torvalds * For pagecache intensive workloads, this function is the hottest 16411da177e4SLinus Torvalds * spot in the kernel (apart from copy_*_user functions). 16421da177e4SLinus Torvalds * 16431da177e4SLinus Torvalds * Appropriate locks must be held before calling this function. 16441da177e4SLinus Torvalds * 1645791b48b6SMinchan Kim * @nr_to_scan: The number of eligible pages to look through on the list. 16465dc35979SKonstantin Khlebnikov * @lruvec: The LRU vector to pull pages from. 16471da177e4SLinus Torvalds * @dst: The temp list to put pages on to. 1648f626012dSHugh Dickins * @nr_scanned: The number of pages that were scanned. 1649fe2c2a10SRik van Riel * @sc: The scan_control struct for this reclaim session 16505ad333ebSAndy Whitcroft * @mode: One of the LRU isolation modes 16513cb99451SKonstantin Khlebnikov * @lru: LRU list id for isolating 16521da177e4SLinus Torvalds * 16531da177e4SLinus Torvalds * returns how many pages were moved onto *@dst. 16541da177e4SLinus Torvalds */ 165569e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 16565dc35979SKonstantin Khlebnikov struct lruvec *lruvec, struct list_head *dst, 1657fe2c2a10SRik van Riel unsigned long *nr_scanned, struct scan_control *sc, 16583cb99451SKonstantin Khlebnikov isolate_mode_t mode, enum lru_list lru) 16591da177e4SLinus Torvalds { 166075b00af7SHugh Dickins struct list_head *src = &lruvec->lists[lru]; 166169e05944SAndrew Morton unsigned long nr_taken = 0; 1662599d0c95SMel Gorman unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 16637cc30fcfSMel Gorman unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 16643db65812SJohannes Weiner unsigned long skipped = 0; 1665791b48b6SMinchan Kim unsigned long scan, total_scan, nr_pages; 1666b2e18757SMel Gorman LIST_HEAD(pages_skipped); 16671da177e4SLinus Torvalds 1668791b48b6SMinchan Kim scan = 0; 1669791b48b6SMinchan Kim for (total_scan = 0; 1670791b48b6SMinchan Kim scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src); 1671791b48b6SMinchan Kim total_scan++) { 16725ad333ebSAndy Whitcroft struct page *page; 16735ad333ebSAndy Whitcroft 16741da177e4SLinus Torvalds page = lru_to_page(src); 16751da177e4SLinus Torvalds prefetchw_prev_lru_page(page, src, flags); 16761da177e4SLinus Torvalds 1677309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 16788d438f96SNick Piggin 1679b2e18757SMel Gorman if (page_zonenum(page) > sc->reclaim_idx) { 1680b2e18757SMel Gorman list_move(&page->lru, &pages_skipped); 16817cc30fcfSMel Gorman nr_skipped[page_zonenum(page)]++; 1682b2e18757SMel Gorman continue; 1683b2e18757SMel Gorman } 1684b2e18757SMel Gorman 1685791b48b6SMinchan Kim /* 1686791b48b6SMinchan Kim * Do not count skipped pages because that makes the function 1687791b48b6SMinchan Kim * return with no isolated pages if the LRU mostly contains 1688791b48b6SMinchan Kim * ineligible pages. This causes the VM to not reclaim any 1689791b48b6SMinchan Kim * pages, triggering a premature OOM. 1690791b48b6SMinchan Kim */ 1691791b48b6SMinchan Kim scan++; 1692f3fd4a61SKonstantin Khlebnikov switch (__isolate_lru_page(page, mode)) { 16935ad333ebSAndy Whitcroft case 0: 1694599d0c95SMel Gorman nr_pages = hpage_nr_pages(page); 1695599d0c95SMel Gorman nr_taken += nr_pages; 1696599d0c95SMel Gorman nr_zone_taken[page_zonenum(page)] += nr_pages; 16975ad333ebSAndy Whitcroft list_move(&page->lru, dst); 16985ad333ebSAndy Whitcroft break; 16997c8ee9a8SNick Piggin 17005ad333ebSAndy Whitcroft case -EBUSY: 17015ad333ebSAndy Whitcroft /* else it is being freed elsewhere */ 17025ad333ebSAndy Whitcroft list_move(&page->lru, src); 17035ad333ebSAndy Whitcroft continue; 17045ad333ebSAndy Whitcroft 17055ad333ebSAndy Whitcroft default: 17065ad333ebSAndy Whitcroft BUG(); 17075ad333ebSAndy Whitcroft } 17085ad333ebSAndy Whitcroft } 17091da177e4SLinus Torvalds 1710b2e18757SMel Gorman /* 1711b2e18757SMel Gorman * Splice any skipped pages to the start of the LRU list. Note that 1712b2e18757SMel Gorman * this disrupts the LRU order when reclaiming for lower zones but 1713b2e18757SMel Gorman * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1714b2e18757SMel Gorman * scanning would soon rescan the same pages to skip and put the 1715b2e18757SMel Gorman * system at risk of premature OOM. 1716b2e18757SMel Gorman */ 17177cc30fcfSMel Gorman if (!list_empty(&pages_skipped)) { 17187cc30fcfSMel Gorman int zid; 17197cc30fcfSMel Gorman 17203db65812SJohannes Weiner list_splice(&pages_skipped, src); 17217cc30fcfSMel Gorman for (zid = 0; zid < MAX_NR_ZONES; zid++) { 17227cc30fcfSMel Gorman if (!nr_skipped[zid]) 17237cc30fcfSMel Gorman continue; 17247cc30fcfSMel Gorman 17257cc30fcfSMel Gorman __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 17261265e3a6SMichal Hocko skipped += nr_skipped[zid]; 17277cc30fcfSMel Gorman } 17287cc30fcfSMel Gorman } 1729791b48b6SMinchan Kim *nr_scanned = total_scan; 17301265e3a6SMichal Hocko trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 1731791b48b6SMinchan Kim total_scan, skipped, nr_taken, mode, lru); 1732b4536f0cSMichal Hocko update_lru_sizes(lruvec, lru, nr_zone_taken); 17331da177e4SLinus Torvalds return nr_taken; 17341da177e4SLinus Torvalds } 17351da177e4SLinus Torvalds 173662695a84SNick Piggin /** 173762695a84SNick Piggin * isolate_lru_page - tries to isolate a page from its LRU list 173862695a84SNick Piggin * @page: page to isolate from its LRU list 173962695a84SNick Piggin * 174062695a84SNick Piggin * Isolates a @page from an LRU list, clears PageLRU and adjusts the 174162695a84SNick Piggin * vmstat statistic corresponding to whatever LRU list the page was on. 174262695a84SNick Piggin * 174362695a84SNick Piggin * Returns 0 if the page was removed from an LRU list. 174462695a84SNick Piggin * Returns -EBUSY if the page was not on an LRU list. 174562695a84SNick Piggin * 174662695a84SNick Piggin * The returned page will have PageLRU() cleared. If it was found on 1747894bc310SLee Schermerhorn * the active list, it will have PageActive set. If it was found on 1748894bc310SLee Schermerhorn * the unevictable list, it will have the PageUnevictable bit set. That flag 1749894bc310SLee Schermerhorn * may need to be cleared by the caller before letting the page go. 175062695a84SNick Piggin * 175162695a84SNick Piggin * The vmstat statistic corresponding to the list on which the page was 175262695a84SNick Piggin * found will be decremented. 175362695a84SNick Piggin * 175462695a84SNick Piggin * Restrictions: 1755a5d09bedSMike Rapoport * 175662695a84SNick Piggin * (1) Must be called with an elevated refcount on the page. This is a 175762695a84SNick Piggin * fundamentnal difference from isolate_lru_pages (which is called 175862695a84SNick Piggin * without a stable reference). 175962695a84SNick Piggin * (2) the lru_lock must not be held. 176062695a84SNick Piggin * (3) interrupts must be enabled. 176162695a84SNick Piggin */ 176262695a84SNick Piggin int isolate_lru_page(struct page *page) 176362695a84SNick Piggin { 176462695a84SNick Piggin int ret = -EBUSY; 176562695a84SNick Piggin 1766309381feSSasha Levin VM_BUG_ON_PAGE(!page_count(page), page); 1767cf2a82eeSKirill A. Shutemov WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); 17680c917313SKonstantin Khlebnikov 176962695a84SNick Piggin if (PageLRU(page)) { 177062695a84SNick Piggin struct zone *zone = page_zone(page); 1771fa9add64SHugh Dickins struct lruvec *lruvec; 177262695a84SNick Piggin 1773a52633d8SMel Gorman spin_lock_irq(zone_lru_lock(zone)); 1774599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 17750c917313SKonstantin Khlebnikov if (PageLRU(page)) { 1776894bc310SLee Schermerhorn int lru = page_lru(page); 17770c917313SKonstantin Khlebnikov get_page(page); 177862695a84SNick Piggin ClearPageLRU(page); 1779fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 1780fa9add64SHugh Dickins ret = 0; 178162695a84SNick Piggin } 1782a52633d8SMel Gorman spin_unlock_irq(zone_lru_lock(zone)); 178362695a84SNick Piggin } 178462695a84SNick Piggin return ret; 178562695a84SNick Piggin } 178662695a84SNick Piggin 17875ad333ebSAndy Whitcroft /* 1788d37dd5dcSFengguang Wu * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1789d37dd5dcSFengguang Wu * then get resheduled. When there are massive number of tasks doing page 1790d37dd5dcSFengguang Wu * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1791d37dd5dcSFengguang Wu * the LRU list will go small and be scanned faster than necessary, leading to 1792d37dd5dcSFengguang Wu * unnecessary swapping, thrashing and OOM. 179335cd7815SRik van Riel */ 1794599d0c95SMel Gorman static int too_many_isolated(struct pglist_data *pgdat, int file, 179535cd7815SRik van Riel struct scan_control *sc) 179635cd7815SRik van Riel { 179735cd7815SRik van Riel unsigned long inactive, isolated; 179835cd7815SRik van Riel 179935cd7815SRik van Riel if (current_is_kswapd()) 180035cd7815SRik van Riel return 0; 180135cd7815SRik van Riel 180297c9341fSTejun Heo if (!sane_reclaim(sc)) 180335cd7815SRik van Riel return 0; 180435cd7815SRik van Riel 180535cd7815SRik van Riel if (file) { 1806599d0c95SMel Gorman inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1807599d0c95SMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 180835cd7815SRik van Riel } else { 1809599d0c95SMel Gorman inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1810599d0c95SMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 181135cd7815SRik van Riel } 181235cd7815SRik van Riel 18133cf23841SFengguang Wu /* 18143cf23841SFengguang Wu * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 18153cf23841SFengguang Wu * won't get blocked by normal direct-reclaimers, forming a circular 18163cf23841SFengguang Wu * deadlock. 18173cf23841SFengguang Wu */ 1818d0164adcSMel Gorman if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 18193cf23841SFengguang Wu inactive >>= 3; 18203cf23841SFengguang Wu 182135cd7815SRik van Riel return isolated > inactive; 182235cd7815SRik van Riel } 182335cd7815SRik van Riel 182466635629SMel Gorman static noinline_for_stack void 182575b00af7SHugh Dickins putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) 182666635629SMel Gorman { 182727ac81d8SKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1828599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 18293f79768fSHugh Dickins LIST_HEAD(pages_to_free); 183066635629SMel Gorman 183166635629SMel Gorman /* 183266635629SMel Gorman * Put back any unfreeable pages. 183366635629SMel Gorman */ 183466635629SMel Gorman while (!list_empty(page_list)) { 18353f79768fSHugh Dickins struct page *page = lru_to_page(page_list); 183666635629SMel Gorman int lru; 18373f79768fSHugh Dickins 1838309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 183966635629SMel Gorman list_del(&page->lru); 184039b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) { 1841599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 184266635629SMel Gorman putback_lru_page(page); 1843599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 184466635629SMel Gorman continue; 184566635629SMel Gorman } 1846fa9add64SHugh Dickins 1847599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 1848fa9add64SHugh Dickins 18497a608572SLinus Torvalds SetPageLRU(page); 185066635629SMel Gorman lru = page_lru(page); 1851fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 1852fa9add64SHugh Dickins 185366635629SMel Gorman if (is_active_lru(lru)) { 185466635629SMel Gorman int file = is_file_lru(lru); 18559992af10SRik van Riel int numpages = hpage_nr_pages(page); 18569992af10SRik van Riel reclaim_stat->recent_rotated[file] += numpages; 185766635629SMel Gorman } 18582bcf8879SHugh Dickins if (put_page_testzero(page)) { 18592bcf8879SHugh Dickins __ClearPageLRU(page); 18602bcf8879SHugh Dickins __ClearPageActive(page); 1861fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 18622bcf8879SHugh Dickins 18632bcf8879SHugh Dickins if (unlikely(PageCompound(page))) { 1864599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1865747db954SJohannes Weiner mem_cgroup_uncharge(page); 18662bcf8879SHugh Dickins (*get_compound_page_dtor(page))(page); 1867599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 18682bcf8879SHugh Dickins } else 18692bcf8879SHugh Dickins list_add(&page->lru, &pages_to_free); 187066635629SMel Gorman } 187166635629SMel Gorman } 187266635629SMel Gorman 18733f79768fSHugh Dickins /* 18743f79768fSHugh Dickins * To save our caller's stack, now use input list for pages to free. 18753f79768fSHugh Dickins */ 18763f79768fSHugh Dickins list_splice(&pages_to_free, page_list); 187766635629SMel Gorman } 187866635629SMel Gorman 187966635629SMel Gorman /* 1880399ba0b9SNeilBrown * If a kernel thread (such as nfsd for loop-back mounts) services 1881399ba0b9SNeilBrown * a backing device by writing to the page cache it sets PF_LESS_THROTTLE. 1882399ba0b9SNeilBrown * In that case we should only throttle if the backing device it is 1883399ba0b9SNeilBrown * writing to is congested. In other cases it is safe to throttle. 1884399ba0b9SNeilBrown */ 1885399ba0b9SNeilBrown static int current_may_throttle(void) 1886399ba0b9SNeilBrown { 1887399ba0b9SNeilBrown return !(current->flags & PF_LESS_THROTTLE) || 1888399ba0b9SNeilBrown current->backing_dev_info == NULL || 1889399ba0b9SNeilBrown bdi_write_congested(current->backing_dev_info); 1890399ba0b9SNeilBrown } 1891399ba0b9SNeilBrown 1892399ba0b9SNeilBrown /* 1893b2e18757SMel Gorman * shrink_inactive_list() is a helper for shrink_node(). It returns the number 18941742f19fSAndrew Morton * of reclaimed pages 18951da177e4SLinus Torvalds */ 189666635629SMel Gorman static noinline_for_stack unsigned long 18971a93be0eSKonstantin Khlebnikov shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, 18989e3b2f8cSKonstantin Khlebnikov struct scan_control *sc, enum lru_list lru) 18991da177e4SLinus Torvalds { 19001da177e4SLinus Torvalds LIST_HEAD(page_list); 1901e247dbceSKOSAKI Motohiro unsigned long nr_scanned; 190205ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 1903e247dbceSKOSAKI Motohiro unsigned long nr_taken; 19043c710c1aSMichal Hocko struct reclaim_stat stat = {}; 1905f3fd4a61SKonstantin Khlebnikov isolate_mode_t isolate_mode = 0; 19063cb99451SKonstantin Khlebnikov int file = is_file_lru(lru); 1907599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 19081a93be0eSKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1909db73ee0dSMichal Hocko bool stalled = false; 191078dc583dSKOSAKI Motohiro 1911599d0c95SMel Gorman while (unlikely(too_many_isolated(pgdat, file, sc))) { 1912db73ee0dSMichal Hocko if (stalled) 1913db73ee0dSMichal Hocko return 0; 1914db73ee0dSMichal Hocko 1915db73ee0dSMichal Hocko /* wait a bit for the reclaimer. */ 1916db73ee0dSMichal Hocko msleep(100); 1917db73ee0dSMichal Hocko stalled = true; 191835cd7815SRik van Riel 191935cd7815SRik van Riel /* We are about to die and free our memory. Return now. */ 192035cd7815SRik van Riel if (fatal_signal_pending(current)) 192135cd7815SRik van Riel return SWAP_CLUSTER_MAX; 192235cd7815SRik van Riel } 192335cd7815SRik van Riel 19241da177e4SLinus Torvalds lru_add_drain(); 1925f80c0673SMinchan Kim 1926f80c0673SMinchan Kim if (!sc->may_unmap) 192761317289SHillf Danton isolate_mode |= ISOLATE_UNMAPPED; 1928f80c0673SMinchan Kim 1929599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 19301da177e4SLinus Torvalds 19315dc35979SKonstantin Khlebnikov nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 19325dc35979SKonstantin Khlebnikov &nr_scanned, sc, isolate_mode, lru); 193395d918fcSKonstantin Khlebnikov 1934599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 19359d5e6a9fSHugh Dickins reclaim_stat->recent_scanned[file] += nr_taken; 193695d918fcSKonstantin Khlebnikov 19372262185cSRoman Gushchin if (current_is_kswapd()) { 19382262185cSRoman Gushchin if (global_reclaim(sc)) 1939599d0c95SMel Gorman __count_vm_events(PGSCAN_KSWAPD, nr_scanned); 19402262185cSRoman Gushchin count_memcg_events(lruvec_memcg(lruvec), PGSCAN_KSWAPD, 19412262185cSRoman Gushchin nr_scanned); 19422262185cSRoman Gushchin } else { 19432262185cSRoman Gushchin if (global_reclaim(sc)) 1944599d0c95SMel Gorman __count_vm_events(PGSCAN_DIRECT, nr_scanned); 19452262185cSRoman Gushchin count_memcg_events(lruvec_memcg(lruvec), PGSCAN_DIRECT, 19462262185cSRoman Gushchin nr_scanned); 1947b35ea17bSKOSAKI Motohiro } 1948599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1949d563c050SHillf Danton 1950d563c050SHillf Danton if (nr_taken == 0) 195166635629SMel Gorman return 0; 1952b35ea17bSKOSAKI Motohiro 1953a128ca71SShaohua Li nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0, 19543c710c1aSMichal Hocko &stat, false); 1955c661b078SAndy Whitcroft 1956599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 19573f79768fSHugh Dickins 19582262185cSRoman Gushchin if (current_is_kswapd()) { 19592262185cSRoman Gushchin if (global_reclaim(sc)) 1960599d0c95SMel Gorman __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed); 19612262185cSRoman Gushchin count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_KSWAPD, 19622262185cSRoman Gushchin nr_reclaimed); 19632262185cSRoman Gushchin } else { 19642262185cSRoman Gushchin if (global_reclaim(sc)) 1965599d0c95SMel Gorman __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed); 19662262185cSRoman Gushchin count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_DIRECT, 19672262185cSRoman Gushchin nr_reclaimed); 1968904249aaSYing Han } 1969a74609faSNick Piggin 197027ac81d8SKonstantin Khlebnikov putback_inactive_pages(lruvec, &page_list); 19713f79768fSHugh Dickins 1972599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 19733f79768fSHugh Dickins 1974599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 19753f79768fSHugh Dickins 1976747db954SJohannes Weiner mem_cgroup_uncharge_list(&page_list); 19772d4894b5SMel Gorman free_unref_page_list(&page_list); 1978e11da5b4SMel Gorman 197992df3a72SMel Gorman /* 19801c610d5fSAndrey Ryabinin * If dirty pages are scanned that are not queued for IO, it 19811c610d5fSAndrey Ryabinin * implies that flushers are not doing their job. This can 19821c610d5fSAndrey Ryabinin * happen when memory pressure pushes dirty pages to the end of 19831c610d5fSAndrey Ryabinin * the LRU before the dirty limits are breached and the dirty 19841c610d5fSAndrey Ryabinin * data has expired. It can also happen when the proportion of 19851c610d5fSAndrey Ryabinin * dirty pages grows not through writes but through memory 19861c610d5fSAndrey Ryabinin * pressure reclaiming all the clean cache. And in some cases, 19871c610d5fSAndrey Ryabinin * the flushers simply cannot keep up with the allocation 19881c610d5fSAndrey Ryabinin * rate. Nudge the flusher threads in case they are asleep. 19891c610d5fSAndrey Ryabinin */ 19901c610d5fSAndrey Ryabinin if (stat.nr_unqueued_dirty == nr_taken) 19911c610d5fSAndrey Ryabinin wakeup_flusher_threads(WB_REASON_VMSCAN); 19921c610d5fSAndrey Ryabinin 1993d108c772SAndrey Ryabinin sc->nr.dirty += stat.nr_dirty; 1994d108c772SAndrey Ryabinin sc->nr.congested += stat.nr_congested; 1995d108c772SAndrey Ryabinin sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 1996d108c772SAndrey Ryabinin sc->nr.writeback += stat.nr_writeback; 1997d108c772SAndrey Ryabinin sc->nr.immediate += stat.nr_immediate; 1998d108c772SAndrey Ryabinin sc->nr.taken += nr_taken; 1999d108c772SAndrey Ryabinin if (file) 2000d108c772SAndrey Ryabinin sc->nr.file_taken += nr_taken; 20018e950282SMel Gorman 2002599d0c95SMel Gorman trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 2003d51d1e64SSteven Rostedt nr_scanned, nr_reclaimed, &stat, sc->priority, file); 200405ff5137SAndrew Morton return nr_reclaimed; 20051da177e4SLinus Torvalds } 20061da177e4SLinus Torvalds 20073bb1a852SMartin Bligh /* 20081cfb419bSKAMEZAWA Hiroyuki * This moves pages from the active list to the inactive list. 20091cfb419bSKAMEZAWA Hiroyuki * 20101cfb419bSKAMEZAWA Hiroyuki * We move them the other way if the page is referenced by one or more 20111cfb419bSKAMEZAWA Hiroyuki * processes, from rmap. 20121cfb419bSKAMEZAWA Hiroyuki * 20131cfb419bSKAMEZAWA Hiroyuki * If the pages are mostly unmapped, the processing is fast and it is 2014a52633d8SMel Gorman * appropriate to hold zone_lru_lock across the whole operation. But if 20151cfb419bSKAMEZAWA Hiroyuki * the pages are mapped, the processing is slow (page_referenced()) so we 2016a52633d8SMel Gorman * should drop zone_lru_lock around each page. It's impossible to balance 20171cfb419bSKAMEZAWA Hiroyuki * this, so instead we remove the pages from the LRU while processing them. 20181cfb419bSKAMEZAWA Hiroyuki * It is safe to rely on PG_active against the non-LRU pages in here because 20191cfb419bSKAMEZAWA Hiroyuki * nobody will play with that bit on a non-LRU page. 20201cfb419bSKAMEZAWA Hiroyuki * 20210139aa7bSJoonsoo Kim * The downside is that we have to touch page->_refcount against each page. 20221cfb419bSKAMEZAWA Hiroyuki * But we had to alter page->flags anyway. 20239d998b4fSMichal Hocko * 20249d998b4fSMichal Hocko * Returns the number of pages moved to the given lru. 20251cfb419bSKAMEZAWA Hiroyuki */ 20261cfb419bSKAMEZAWA Hiroyuki 20279d998b4fSMichal Hocko static unsigned move_active_pages_to_lru(struct lruvec *lruvec, 20283eb4140fSWu Fengguang struct list_head *list, 20292bcf8879SHugh Dickins struct list_head *pages_to_free, 20303eb4140fSWu Fengguang enum lru_list lru) 20313eb4140fSWu Fengguang { 2032599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 20333eb4140fSWu Fengguang struct page *page; 2034fa9add64SHugh Dickins int nr_pages; 20359d998b4fSMichal Hocko int nr_moved = 0; 20363eb4140fSWu Fengguang 20373eb4140fSWu Fengguang while (!list_empty(list)) { 20383eb4140fSWu Fengguang page = lru_to_page(list); 2039599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 20403eb4140fSWu Fengguang 2041309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 20423eb4140fSWu Fengguang SetPageLRU(page); 20433eb4140fSWu Fengguang 2044fa9add64SHugh Dickins nr_pages = hpage_nr_pages(page); 2045599d0c95SMel Gorman update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); 2046925b7673SJohannes Weiner list_move(&page->lru, &lruvec->lists[lru]); 20473eb4140fSWu Fengguang 20482bcf8879SHugh Dickins if (put_page_testzero(page)) { 20492bcf8879SHugh Dickins __ClearPageLRU(page); 20502bcf8879SHugh Dickins __ClearPageActive(page); 2051fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 20522bcf8879SHugh Dickins 20532bcf8879SHugh Dickins if (unlikely(PageCompound(page))) { 2054599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 2055747db954SJohannes Weiner mem_cgroup_uncharge(page); 20562bcf8879SHugh Dickins (*get_compound_page_dtor(page))(page); 2057599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 20582bcf8879SHugh Dickins } else 20592bcf8879SHugh Dickins list_add(&page->lru, pages_to_free); 20609d998b4fSMichal Hocko } else { 20619d998b4fSMichal Hocko nr_moved += nr_pages; 20623eb4140fSWu Fengguang } 20633eb4140fSWu Fengguang } 20649d5e6a9fSHugh Dickins 20652262185cSRoman Gushchin if (!is_active_lru(lru)) { 2066f0958906SMichal Hocko __count_vm_events(PGDEACTIVATE, nr_moved); 20672262185cSRoman Gushchin count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 20682262185cSRoman Gushchin nr_moved); 20692262185cSRoman Gushchin } 20709d998b4fSMichal Hocko 20719d998b4fSMichal Hocko return nr_moved; 20723eb4140fSWu Fengguang } 20731cfb419bSKAMEZAWA Hiroyuki 2074f626012dSHugh Dickins static void shrink_active_list(unsigned long nr_to_scan, 20751a93be0eSKonstantin Khlebnikov struct lruvec *lruvec, 2076f16015fbSJohannes Weiner struct scan_control *sc, 20779e3b2f8cSKonstantin Khlebnikov enum lru_list lru) 20781cfb419bSKAMEZAWA Hiroyuki { 207944c241f1SKOSAKI Motohiro unsigned long nr_taken; 2080f626012dSHugh Dickins unsigned long nr_scanned; 20816fe6b7e3SWu Fengguang unsigned long vm_flags; 20821cfb419bSKAMEZAWA Hiroyuki LIST_HEAD(l_hold); /* The pages which were snipped off */ 20838cab4754SWu Fengguang LIST_HEAD(l_active); 2084b69408e8SChristoph Lameter LIST_HEAD(l_inactive); 20851cfb419bSKAMEZAWA Hiroyuki struct page *page; 20861a93be0eSKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 20879d998b4fSMichal Hocko unsigned nr_deactivate, nr_activate; 20889d998b4fSMichal Hocko unsigned nr_rotated = 0; 2089f3fd4a61SKonstantin Khlebnikov isolate_mode_t isolate_mode = 0; 20903cb99451SKonstantin Khlebnikov int file = is_file_lru(lru); 2091599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 20921cfb419bSKAMEZAWA Hiroyuki 20931da177e4SLinus Torvalds lru_add_drain(); 2094f80c0673SMinchan Kim 2095f80c0673SMinchan Kim if (!sc->may_unmap) 209661317289SHillf Danton isolate_mode |= ISOLATE_UNMAPPED; 2097f80c0673SMinchan Kim 2098599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 2099925b7673SJohannes Weiner 21005dc35979SKonstantin Khlebnikov nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 21015dc35979SKonstantin Khlebnikov &nr_scanned, sc, isolate_mode, lru); 210289b5fae5SJohannes Weiner 2103599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2104b7c46d15SJohannes Weiner reclaim_stat->recent_scanned[file] += nr_taken; 21051cfb419bSKAMEZAWA Hiroyuki 2106599d0c95SMel Gorman __count_vm_events(PGREFILL, nr_scanned); 21072262185cSRoman Gushchin count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 21089d5e6a9fSHugh Dickins 2109599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 21101da177e4SLinus Torvalds 21111da177e4SLinus Torvalds while (!list_empty(&l_hold)) { 21121da177e4SLinus Torvalds cond_resched(); 21131da177e4SLinus Torvalds page = lru_to_page(&l_hold); 21141da177e4SLinus Torvalds list_del(&page->lru); 21157e9cd484SRik van Riel 211639b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) { 2117894bc310SLee Schermerhorn putback_lru_page(page); 2118894bc310SLee Schermerhorn continue; 2119894bc310SLee Schermerhorn } 2120894bc310SLee Schermerhorn 2121cc715d99SMel Gorman if (unlikely(buffer_heads_over_limit)) { 2122cc715d99SMel Gorman if (page_has_private(page) && trylock_page(page)) { 2123cc715d99SMel Gorman if (page_has_private(page)) 2124cc715d99SMel Gorman try_to_release_page(page, 0); 2125cc715d99SMel Gorman unlock_page(page); 2126cc715d99SMel Gorman } 2127cc715d99SMel Gorman } 2128cc715d99SMel Gorman 2129c3ac9a8aSJohannes Weiner if (page_referenced(page, 0, sc->target_mem_cgroup, 2130c3ac9a8aSJohannes Weiner &vm_flags)) { 21319992af10SRik van Riel nr_rotated += hpage_nr_pages(page); 21328cab4754SWu Fengguang /* 21338cab4754SWu Fengguang * Identify referenced, file-backed active pages and 21348cab4754SWu Fengguang * give them one more trip around the active list. So 21358cab4754SWu Fengguang * that executable code get better chances to stay in 21368cab4754SWu Fengguang * memory under moderate memory pressure. Anon pages 21378cab4754SWu Fengguang * are not likely to be evicted by use-once streaming 21388cab4754SWu Fengguang * IO, plus JVM can create lots of anon VM_EXEC pages, 21398cab4754SWu Fengguang * so we ignore them here. 21408cab4754SWu Fengguang */ 214141e20983SWu Fengguang if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 21428cab4754SWu Fengguang list_add(&page->lru, &l_active); 21438cab4754SWu Fengguang continue; 21448cab4754SWu Fengguang } 21458cab4754SWu Fengguang } 21467e9cd484SRik van Riel 21475205e56eSKOSAKI Motohiro ClearPageActive(page); /* we are de-activating */ 21481da177e4SLinus Torvalds list_add(&page->lru, &l_inactive); 21491da177e4SLinus Torvalds } 21501da177e4SLinus Torvalds 2151b555749aSAndrew Morton /* 21528cab4754SWu Fengguang * Move pages back to the lru list. 2153b555749aSAndrew Morton */ 2154599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 21554f98a2feSRik van Riel /* 21568cab4754SWu Fengguang * Count referenced pages from currently used mappings as rotated, 21578cab4754SWu Fengguang * even though only some of them are actually re-activated. This 21588cab4754SWu Fengguang * helps balance scan pressure between file and anonymous pages in 21597c0db9e9SJerome Marchand * get_scan_count. 2160556adecbSRik van Riel */ 2161b7c46d15SJohannes Weiner reclaim_stat->recent_rotated[file] += nr_rotated; 2162556adecbSRik van Riel 21639d998b4fSMichal Hocko nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); 21649d998b4fSMichal Hocko nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); 2165599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2166599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 21672bcf8879SHugh Dickins 2168747db954SJohannes Weiner mem_cgroup_uncharge_list(&l_hold); 21692d4894b5SMel Gorman free_unref_page_list(&l_hold); 21709d998b4fSMichal Hocko trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 21719d998b4fSMichal Hocko nr_deactivate, nr_rotated, sc->priority, file); 21721da177e4SLinus Torvalds } 21731da177e4SLinus Torvalds 217459dc76b0SRik van Riel /* 217559dc76b0SRik van Riel * The inactive anon list should be small enough that the VM never has 217659dc76b0SRik van Riel * to do too much work. 217714797e23SKOSAKI Motohiro * 217859dc76b0SRik van Riel * The inactive file list should be small enough to leave most memory 217959dc76b0SRik van Riel * to the established workingset on the scan-resistant active list, 218059dc76b0SRik van Riel * but large enough to avoid thrashing the aggregate readahead window. 218159dc76b0SRik van Riel * 218259dc76b0SRik van Riel * Both inactive lists should also be large enough that each inactive 218359dc76b0SRik van Riel * page has a chance to be referenced again before it is reclaimed. 218459dc76b0SRik van Riel * 21852a2e4885SJohannes Weiner * If that fails and refaulting is observed, the inactive list grows. 21862a2e4885SJohannes Weiner * 218759dc76b0SRik van Riel * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages 21883a50d14dSAndrey Ryabinin * on this LRU, maintained by the pageout code. An inactive_ratio 218959dc76b0SRik van Riel * of 3 means 3:1 or 25% of the pages are kept on the inactive list. 219059dc76b0SRik van Riel * 219159dc76b0SRik van Riel * total target max 219259dc76b0SRik van Riel * memory ratio inactive 219359dc76b0SRik van Riel * ------------------------------------- 219459dc76b0SRik van Riel * 10MB 1 5MB 219559dc76b0SRik van Riel * 100MB 1 50MB 219659dc76b0SRik van Riel * 1GB 3 250MB 219759dc76b0SRik van Riel * 10GB 10 0.9GB 219859dc76b0SRik van Riel * 100GB 31 3GB 219959dc76b0SRik van Riel * 1TB 101 10GB 220059dc76b0SRik van Riel * 10TB 320 32GB 220114797e23SKOSAKI Motohiro */ 2202f8d1a311SMel Gorman static bool inactive_list_is_low(struct lruvec *lruvec, bool file, 22032a2e4885SJohannes Weiner struct mem_cgroup *memcg, 22042a2e4885SJohannes Weiner struct scan_control *sc, bool actual_reclaim) 220514797e23SKOSAKI Motohiro { 2206fd538803SMichal Hocko enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; 22072a2e4885SJohannes Weiner struct pglist_data *pgdat = lruvec_pgdat(lruvec); 22082a2e4885SJohannes Weiner enum lru_list inactive_lru = file * LRU_FILE; 22092a2e4885SJohannes Weiner unsigned long inactive, active; 22102a2e4885SJohannes Weiner unsigned long inactive_ratio; 22112a2e4885SJohannes Weiner unsigned long refaults; 221259dc76b0SRik van Riel unsigned long gb; 221359dc76b0SRik van Riel 221474e3f3c3SMinchan Kim /* 221574e3f3c3SMinchan Kim * If we don't have swap space, anonymous page deactivation 221674e3f3c3SMinchan Kim * is pointless. 221774e3f3c3SMinchan Kim */ 221859dc76b0SRik van Riel if (!file && !total_swap_pages) 221942e2e457SYaowei Bai return false; 222074e3f3c3SMinchan Kim 2221fd538803SMichal Hocko inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); 2222fd538803SMichal Hocko active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); 2223f8d1a311SMel Gorman 22242a2e4885SJohannes Weiner if (memcg) 2225ccda7f43SJohannes Weiner refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE); 22262a2e4885SJohannes Weiner else 22272a2e4885SJohannes Weiner refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); 22282a2e4885SJohannes Weiner 22292a2e4885SJohannes Weiner /* 22302a2e4885SJohannes Weiner * When refaults are being observed, it means a new workingset 22312a2e4885SJohannes Weiner * is being established. Disable active list protection to get 22322a2e4885SJohannes Weiner * rid of the stale workingset quickly. 22332a2e4885SJohannes Weiner */ 22342a2e4885SJohannes Weiner if (file && actual_reclaim && lruvec->refaults != refaults) { 22352a2e4885SJohannes Weiner inactive_ratio = 0; 22362a2e4885SJohannes Weiner } else { 223759dc76b0SRik van Riel gb = (inactive + active) >> (30 - PAGE_SHIFT); 223859dc76b0SRik van Riel if (gb) 223959dc76b0SRik van Riel inactive_ratio = int_sqrt(10 * gb); 2240b39415b2SRik van Riel else 224159dc76b0SRik van Riel inactive_ratio = 1; 22422a2e4885SJohannes Weiner } 224359dc76b0SRik van Riel 22442a2e4885SJohannes Weiner if (actual_reclaim) 22452a2e4885SJohannes Weiner trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx, 2246fd538803SMichal Hocko lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive, 2247fd538803SMichal Hocko lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active, 2248fd538803SMichal Hocko inactive_ratio, file); 2249fd538803SMichal Hocko 225059dc76b0SRik van Riel return inactive * inactive_ratio < active; 2251b39415b2SRik van Riel } 2252b39415b2SRik van Riel 22534f98a2feSRik van Riel static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 22542a2e4885SJohannes Weiner struct lruvec *lruvec, struct mem_cgroup *memcg, 22552a2e4885SJohannes Weiner struct scan_control *sc) 2256b69408e8SChristoph Lameter { 2257b39415b2SRik van Riel if (is_active_lru(lru)) { 22582a2e4885SJohannes Weiner if (inactive_list_is_low(lruvec, is_file_lru(lru), 22592a2e4885SJohannes Weiner memcg, sc, true)) 22601a93be0eSKonstantin Khlebnikov shrink_active_list(nr_to_scan, lruvec, sc, lru); 2261556adecbSRik van Riel return 0; 2262556adecbSRik van Riel } 2263556adecbSRik van Riel 22641a93be0eSKonstantin Khlebnikov return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2265b69408e8SChristoph Lameter } 2266b69408e8SChristoph Lameter 22679a265114SJohannes Weiner enum scan_balance { 22689a265114SJohannes Weiner SCAN_EQUAL, 22699a265114SJohannes Weiner SCAN_FRACT, 22709a265114SJohannes Weiner SCAN_ANON, 22719a265114SJohannes Weiner SCAN_FILE, 22729a265114SJohannes Weiner }; 22739a265114SJohannes Weiner 22741da177e4SLinus Torvalds /* 22754f98a2feSRik van Riel * Determine how aggressively the anon and file LRU lists should be 22764f98a2feSRik van Riel * scanned. The relative value of each set of LRU lists is determined 22774f98a2feSRik van Riel * by looking at the fraction of the pages scanned we did rotate back 22784f98a2feSRik van Riel * onto the active list instead of evict. 22794f98a2feSRik van Riel * 2280be7bd59dSWanpeng Li * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan 2281be7bd59dSWanpeng Li * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan 22824f98a2feSRik van Riel */ 228333377678SVladimir Davydov static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, 22846b4f7799SJohannes Weiner struct scan_control *sc, unsigned long *nr, 22856b4f7799SJohannes Weiner unsigned long *lru_pages) 22864f98a2feSRik van Riel { 228733377678SVladimir Davydov int swappiness = mem_cgroup_swappiness(memcg); 228890126375SKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 22899a265114SJohannes Weiner u64 fraction[2]; 22909a265114SJohannes Weiner u64 denominator = 0; /* gcc */ 2291599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 22929a265114SJohannes Weiner unsigned long anon_prio, file_prio; 22939a265114SJohannes Weiner enum scan_balance scan_balance; 22940bf1457fSJohannes Weiner unsigned long anon, file; 22959a265114SJohannes Weiner unsigned long ap, fp; 22969a265114SJohannes Weiner enum lru_list lru; 229776a33fc3SShaohua Li 229876a33fc3SShaohua Li /* If we have no swap space, do not bother scanning anon pages. */ 2299d8b38438SVladimir Davydov if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { 23009a265114SJohannes Weiner scan_balance = SCAN_FILE; 230176a33fc3SShaohua Li goto out; 230276a33fc3SShaohua Li } 23034f98a2feSRik van Riel 230410316b31SJohannes Weiner /* 230510316b31SJohannes Weiner * Global reclaim will swap to prevent OOM even with no 230610316b31SJohannes Weiner * swappiness, but memcg users want to use this knob to 230710316b31SJohannes Weiner * disable swapping for individual groups completely when 230810316b31SJohannes Weiner * using the memory controller's swap limit feature would be 230910316b31SJohannes Weiner * too expensive. 231010316b31SJohannes Weiner */ 231102695175SJohannes Weiner if (!global_reclaim(sc) && !swappiness) { 23129a265114SJohannes Weiner scan_balance = SCAN_FILE; 231310316b31SJohannes Weiner goto out; 231410316b31SJohannes Weiner } 231510316b31SJohannes Weiner 231610316b31SJohannes Weiner /* 231710316b31SJohannes Weiner * Do not apply any pressure balancing cleverness when the 231810316b31SJohannes Weiner * system is close to OOM, scan both anon and file equally 231910316b31SJohannes Weiner * (unless the swappiness setting disagrees with swapping). 232010316b31SJohannes Weiner */ 232102695175SJohannes Weiner if (!sc->priority && swappiness) { 23229a265114SJohannes Weiner scan_balance = SCAN_EQUAL; 232310316b31SJohannes Weiner goto out; 232410316b31SJohannes Weiner } 232510316b31SJohannes Weiner 232611d16c25SJohannes Weiner /* 232762376251SJohannes Weiner * Prevent the reclaimer from falling into the cache trap: as 232862376251SJohannes Weiner * cache pages start out inactive, every cache fault will tip 232962376251SJohannes Weiner * the scan balance towards the file LRU. And as the file LRU 233062376251SJohannes Weiner * shrinks, so does the window for rotation from references. 233162376251SJohannes Weiner * This means we have a runaway feedback loop where a tiny 233262376251SJohannes Weiner * thrashing file LRU becomes infinitely more attractive than 233362376251SJohannes Weiner * anon pages. Try to detect this based on file LRU size. 233462376251SJohannes Weiner */ 233562376251SJohannes Weiner if (global_reclaim(sc)) { 2336599d0c95SMel Gorman unsigned long pgdatfile; 2337599d0c95SMel Gorman unsigned long pgdatfree; 2338599d0c95SMel Gorman int z; 2339599d0c95SMel Gorman unsigned long total_high_wmark = 0; 234062376251SJohannes Weiner 2341599d0c95SMel Gorman pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2342599d0c95SMel Gorman pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + 2343599d0c95SMel Gorman node_page_state(pgdat, NR_INACTIVE_FILE); 23442ab051e1SJerome Marchand 2345599d0c95SMel Gorman for (z = 0; z < MAX_NR_ZONES; z++) { 2346599d0c95SMel Gorman struct zone *zone = &pgdat->node_zones[z]; 23476aa303deSMel Gorman if (!managed_zone(zone)) 2348599d0c95SMel Gorman continue; 2349599d0c95SMel Gorman 2350599d0c95SMel Gorman total_high_wmark += high_wmark_pages(zone); 2351599d0c95SMel Gorman } 2352599d0c95SMel Gorman 2353599d0c95SMel Gorman if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) { 235406226226SDavid Rientjes /* 235506226226SDavid Rientjes * Force SCAN_ANON if there are enough inactive 235606226226SDavid Rientjes * anonymous pages on the LRU in eligible zones. 235706226226SDavid Rientjes * Otherwise, the small LRU gets thrashed. 235806226226SDavid Rientjes */ 235906226226SDavid Rientjes if (!inactive_list_is_low(lruvec, false, memcg, sc, false) && 236006226226SDavid Rientjes lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) 236106226226SDavid Rientjes >> sc->priority) { 236262376251SJohannes Weiner scan_balance = SCAN_ANON; 236362376251SJohannes Weiner goto out; 236462376251SJohannes Weiner } 236562376251SJohannes Weiner } 236606226226SDavid Rientjes } 236762376251SJohannes Weiner 236862376251SJohannes Weiner /* 2369316bda0eSVladimir Davydov * If there is enough inactive page cache, i.e. if the size of the 2370316bda0eSVladimir Davydov * inactive list is greater than that of the active list *and* the 2371316bda0eSVladimir Davydov * inactive list actually has some pages to scan on this priority, we 2372316bda0eSVladimir Davydov * do not reclaim anything from the anonymous working set right now. 2373316bda0eSVladimir Davydov * Without the second condition we could end up never scanning an 2374316bda0eSVladimir Davydov * lruvec even if it has plenty of old anonymous pages unless the 2375316bda0eSVladimir Davydov * system is under heavy pressure. 2376e9868505SRik van Riel */ 23772a2e4885SJohannes Weiner if (!inactive_list_is_low(lruvec, true, memcg, sc, false) && 237871ab6cfeSMichal Hocko lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { 23799a265114SJohannes Weiner scan_balance = SCAN_FILE; 2380e9868505SRik van Riel goto out; 23814f98a2feSRik van Riel } 23824f98a2feSRik van Riel 23839a265114SJohannes Weiner scan_balance = SCAN_FRACT; 23849a265114SJohannes Weiner 23854f98a2feSRik van Riel /* 238658c37f6eSKOSAKI Motohiro * With swappiness at 100, anonymous and file have the same priority. 238758c37f6eSKOSAKI Motohiro * This scanning priority is essentially the inverse of IO cost. 238858c37f6eSKOSAKI Motohiro */ 238902695175SJohannes Weiner anon_prio = swappiness; 239075b00af7SHugh Dickins file_prio = 200 - anon_prio; 239158c37f6eSKOSAKI Motohiro 239258c37f6eSKOSAKI Motohiro /* 23934f98a2feSRik van Riel * OK, so we have swap space and a fair amount of page cache 23944f98a2feSRik van Riel * pages. We use the recently rotated / recently scanned 23954f98a2feSRik van Riel * ratios to determine how valuable each cache is. 23964f98a2feSRik van Riel * 23974f98a2feSRik van Riel * Because workloads change over time (and to avoid overflow) 23984f98a2feSRik van Riel * we keep these statistics as a floating average, which ends 23994f98a2feSRik van Riel * up weighing recent references more than old ones. 24004f98a2feSRik van Riel * 24014f98a2feSRik van Riel * anon in [0], file in [1] 24024f98a2feSRik van Riel */ 24032ab051e1SJerome Marchand 2404fd538803SMichal Hocko anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + 2405fd538803SMichal Hocko lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); 2406fd538803SMichal Hocko file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + 2407fd538803SMichal Hocko lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); 24082ab051e1SJerome Marchand 2409599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 241058c37f6eSKOSAKI Motohiro if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 24116e901571SKOSAKI Motohiro reclaim_stat->recent_scanned[0] /= 2; 24126e901571SKOSAKI Motohiro reclaim_stat->recent_rotated[0] /= 2; 24134f98a2feSRik van Riel } 24144f98a2feSRik van Riel 24156e901571SKOSAKI Motohiro if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 24166e901571SKOSAKI Motohiro reclaim_stat->recent_scanned[1] /= 2; 24176e901571SKOSAKI Motohiro reclaim_stat->recent_rotated[1] /= 2; 24184f98a2feSRik van Riel } 24194f98a2feSRik van Riel 24204f98a2feSRik van Riel /* 242100d8089cSRik van Riel * The amount of pressure on anon vs file pages is inversely 242200d8089cSRik van Riel * proportional to the fraction of recently scanned pages on 242300d8089cSRik van Riel * each list that were recently referenced and in active use. 24244f98a2feSRik van Riel */ 2425fe35004fSSatoru Moriya ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); 24266e901571SKOSAKI Motohiro ap /= reclaim_stat->recent_rotated[0] + 1; 24274f98a2feSRik van Riel 2428fe35004fSSatoru Moriya fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); 24296e901571SKOSAKI Motohiro fp /= reclaim_stat->recent_rotated[1] + 1; 2430599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 24314f98a2feSRik van Riel 243276a33fc3SShaohua Li fraction[0] = ap; 243376a33fc3SShaohua Li fraction[1] = fp; 243476a33fc3SShaohua Li denominator = ap + fp + 1; 243576a33fc3SShaohua Li out: 24366b4f7799SJohannes Weiner *lru_pages = 0; 24374111304dSHugh Dickins for_each_evictable_lru(lru) { 24384111304dSHugh Dickins int file = is_file_lru(lru); 2439d778df51SJohannes Weiner unsigned long size; 244076a33fc3SShaohua Li unsigned long scan; 244176a33fc3SShaohua Li 244271ab6cfeSMichal Hocko size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2443d778df51SJohannes Weiner scan = size >> sc->priority; 2444688035f7SJohannes Weiner /* 2445688035f7SJohannes Weiner * If the cgroup's already been deleted, make sure to 2446688035f7SJohannes Weiner * scrape out the remaining cache. 2447688035f7SJohannes Weiner */ 2448688035f7SJohannes Weiner if (!scan && !mem_cgroup_online(memcg)) 2449d778df51SJohannes Weiner scan = min(size, SWAP_CLUSTER_MAX); 24509a265114SJohannes Weiner 24519a265114SJohannes Weiner switch (scan_balance) { 24529a265114SJohannes Weiner case SCAN_EQUAL: 24539a265114SJohannes Weiner /* Scan lists relative to size */ 24549a265114SJohannes Weiner break; 24559a265114SJohannes Weiner case SCAN_FRACT: 24569a265114SJohannes Weiner /* 24579a265114SJohannes Weiner * Scan types proportional to swappiness and 24589a265114SJohannes Weiner * their relative recent reclaim efficiency. 2459*68600f62SRoman Gushchin * Make sure we don't miss the last page 2460*68600f62SRoman Gushchin * because of a round-off error. 24619a265114SJohannes Weiner */ 2462*68600f62SRoman Gushchin scan = DIV64_U64_ROUND_UP(scan * fraction[file], 24636f04f48dSSuleiman Souhlal denominator); 24649a265114SJohannes Weiner break; 24659a265114SJohannes Weiner case SCAN_FILE: 24669a265114SJohannes Weiner case SCAN_ANON: 24679a265114SJohannes Weiner /* Scan one type exclusively */ 24686b4f7799SJohannes Weiner if ((scan_balance == SCAN_FILE) != file) { 24696b4f7799SJohannes Weiner size = 0; 24709a265114SJohannes Weiner scan = 0; 24716b4f7799SJohannes Weiner } 24729a265114SJohannes Weiner break; 24739a265114SJohannes Weiner default: 24749a265114SJohannes Weiner /* Look ma, no brain */ 24759a265114SJohannes Weiner BUG(); 24769a265114SJohannes Weiner } 24776b4f7799SJohannes Weiner 24786b4f7799SJohannes Weiner *lru_pages += size; 24794111304dSHugh Dickins nr[lru] = scan; 248076a33fc3SShaohua Li } 24816e08a369SWu Fengguang } 24824f98a2feSRik van Riel 24839b4f98cdSJohannes Weiner /* 2484a9dd0a83SMel Gorman * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 24859b4f98cdSJohannes Weiner */ 2486a9dd0a83SMel Gorman static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, 24876b4f7799SJohannes Weiner struct scan_control *sc, unsigned long *lru_pages) 24889b4f98cdSJohannes Weiner { 2489ef8f2327SMel Gorman struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); 24909b4f98cdSJohannes Weiner unsigned long nr[NR_LRU_LISTS]; 2491e82e0561SMel Gorman unsigned long targets[NR_LRU_LISTS]; 24929b4f98cdSJohannes Weiner unsigned long nr_to_scan; 24939b4f98cdSJohannes Weiner enum lru_list lru; 24949b4f98cdSJohannes Weiner unsigned long nr_reclaimed = 0; 24959b4f98cdSJohannes Weiner unsigned long nr_to_reclaim = sc->nr_to_reclaim; 24969b4f98cdSJohannes Weiner struct blk_plug plug; 24971a501907SMel Gorman bool scan_adjusted; 24989b4f98cdSJohannes Weiner 249933377678SVladimir Davydov get_scan_count(lruvec, memcg, sc, nr, lru_pages); 25009b4f98cdSJohannes Weiner 2501e82e0561SMel Gorman /* Record the original scan target for proportional adjustments later */ 2502e82e0561SMel Gorman memcpy(targets, nr, sizeof(nr)); 2503e82e0561SMel Gorman 25041a501907SMel Gorman /* 25051a501907SMel Gorman * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 25061a501907SMel Gorman * event that can occur when there is little memory pressure e.g. 25071a501907SMel Gorman * multiple streaming readers/writers. Hence, we do not abort scanning 25081a501907SMel Gorman * when the requested number of pages are reclaimed when scanning at 25091a501907SMel Gorman * DEF_PRIORITY on the assumption that the fact we are direct 25101a501907SMel Gorman * reclaiming implies that kswapd is not keeping up and it is best to 25111a501907SMel Gorman * do a batch of work at once. For memcg reclaim one check is made to 25121a501907SMel Gorman * abort proportional reclaim if either the file or anon lru has already 25131a501907SMel Gorman * dropped to zero at the first pass. 25141a501907SMel Gorman */ 25151a501907SMel Gorman scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 25161a501907SMel Gorman sc->priority == DEF_PRIORITY); 25171a501907SMel Gorman 25189b4f98cdSJohannes Weiner blk_start_plug(&plug); 25199b4f98cdSJohannes Weiner while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 25209b4f98cdSJohannes Weiner nr[LRU_INACTIVE_FILE]) { 2521e82e0561SMel Gorman unsigned long nr_anon, nr_file, percentage; 2522e82e0561SMel Gorman unsigned long nr_scanned; 2523e82e0561SMel Gorman 25249b4f98cdSJohannes Weiner for_each_evictable_lru(lru) { 25259b4f98cdSJohannes Weiner if (nr[lru]) { 25269b4f98cdSJohannes Weiner nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 25279b4f98cdSJohannes Weiner nr[lru] -= nr_to_scan; 25289b4f98cdSJohannes Weiner 25299b4f98cdSJohannes Weiner nr_reclaimed += shrink_list(lru, nr_to_scan, 25302a2e4885SJohannes Weiner lruvec, memcg, sc); 25319b4f98cdSJohannes Weiner } 25329b4f98cdSJohannes Weiner } 2533e82e0561SMel Gorman 2534bd041733SMichal Hocko cond_resched(); 2535bd041733SMichal Hocko 2536e82e0561SMel Gorman if (nr_reclaimed < nr_to_reclaim || scan_adjusted) 2537e82e0561SMel Gorman continue; 2538e82e0561SMel Gorman 25399b4f98cdSJohannes Weiner /* 2540e82e0561SMel Gorman * For kswapd and memcg, reclaim at least the number of pages 25411a501907SMel Gorman * requested. Ensure that the anon and file LRUs are scanned 2542e82e0561SMel Gorman * proportionally what was requested by get_scan_count(). We 2543e82e0561SMel Gorman * stop reclaiming one LRU and reduce the amount scanning 2544e82e0561SMel Gorman * proportional to the original scan target. 2545e82e0561SMel Gorman */ 2546e82e0561SMel Gorman nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 2547e82e0561SMel Gorman nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 2548e82e0561SMel Gorman 25491a501907SMel Gorman /* 25501a501907SMel Gorman * It's just vindictive to attack the larger once the smaller 25511a501907SMel Gorman * has gone to zero. And given the way we stop scanning the 25521a501907SMel Gorman * smaller below, this makes sure that we only make one nudge 25531a501907SMel Gorman * towards proportionality once we've got nr_to_reclaim. 25541a501907SMel Gorman */ 25551a501907SMel Gorman if (!nr_file || !nr_anon) 25561a501907SMel Gorman break; 25571a501907SMel Gorman 2558e82e0561SMel Gorman if (nr_file > nr_anon) { 2559e82e0561SMel Gorman unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 2560e82e0561SMel Gorman targets[LRU_ACTIVE_ANON] + 1; 2561e82e0561SMel Gorman lru = LRU_BASE; 2562e82e0561SMel Gorman percentage = nr_anon * 100 / scan_target; 2563e82e0561SMel Gorman } else { 2564e82e0561SMel Gorman unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 2565e82e0561SMel Gorman targets[LRU_ACTIVE_FILE] + 1; 2566e82e0561SMel Gorman lru = LRU_FILE; 2567e82e0561SMel Gorman percentage = nr_file * 100 / scan_target; 2568e82e0561SMel Gorman } 2569e82e0561SMel Gorman 2570e82e0561SMel Gorman /* Stop scanning the smaller of the LRU */ 2571e82e0561SMel Gorman nr[lru] = 0; 2572e82e0561SMel Gorman nr[lru + LRU_ACTIVE] = 0; 2573e82e0561SMel Gorman 2574e82e0561SMel Gorman /* 2575e82e0561SMel Gorman * Recalculate the other LRU scan count based on its original 2576e82e0561SMel Gorman * scan target and the percentage scanning already complete 2577e82e0561SMel Gorman */ 2578e82e0561SMel Gorman lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 2579e82e0561SMel Gorman nr_scanned = targets[lru] - nr[lru]; 2580e82e0561SMel Gorman nr[lru] = targets[lru] * (100 - percentage) / 100; 2581e82e0561SMel Gorman nr[lru] -= min(nr[lru], nr_scanned); 2582e82e0561SMel Gorman 2583e82e0561SMel Gorman lru += LRU_ACTIVE; 2584e82e0561SMel Gorman nr_scanned = targets[lru] - nr[lru]; 2585e82e0561SMel Gorman nr[lru] = targets[lru] * (100 - percentage) / 100; 2586e82e0561SMel Gorman nr[lru] -= min(nr[lru], nr_scanned); 2587e82e0561SMel Gorman 2588e82e0561SMel Gorman scan_adjusted = true; 25899b4f98cdSJohannes Weiner } 25909b4f98cdSJohannes Weiner blk_finish_plug(&plug); 25919b4f98cdSJohannes Weiner sc->nr_reclaimed += nr_reclaimed; 25929b4f98cdSJohannes Weiner 25939b4f98cdSJohannes Weiner /* 25949b4f98cdSJohannes Weiner * Even if we did not try to evict anon pages at all, we want to 25959b4f98cdSJohannes Weiner * rebalance the anon lru active/inactive ratio. 25969b4f98cdSJohannes Weiner */ 25972a2e4885SJohannes Weiner if (inactive_list_is_low(lruvec, false, memcg, sc, true)) 25989b4f98cdSJohannes Weiner shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 25999b4f98cdSJohannes Weiner sc, LRU_ACTIVE_ANON); 26009b4f98cdSJohannes Weiner } 26019b4f98cdSJohannes Weiner 260223b9da55SMel Gorman /* Use reclaim/compaction for costly allocs or under memory pressure */ 26039e3b2f8cSKonstantin Khlebnikov static bool in_reclaim_compaction(struct scan_control *sc) 260423b9da55SMel Gorman { 2605d84da3f9SKirill A. Shutemov if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && 260623b9da55SMel Gorman (sc->order > PAGE_ALLOC_COSTLY_ORDER || 26079e3b2f8cSKonstantin Khlebnikov sc->priority < DEF_PRIORITY - 2)) 260823b9da55SMel Gorman return true; 260923b9da55SMel Gorman 261023b9da55SMel Gorman return false; 261123b9da55SMel Gorman } 261223b9da55SMel Gorman 26134f98a2feSRik van Riel /* 261423b9da55SMel Gorman * Reclaim/compaction is used for high-order allocation requests. It reclaims 261523b9da55SMel Gorman * order-0 pages before compacting the zone. should_continue_reclaim() returns 261623b9da55SMel Gorman * true if more pages should be reclaimed such that when the page allocator 261723b9da55SMel Gorman * calls try_to_compact_zone() that it will have enough free pages to succeed. 261823b9da55SMel Gorman * It will give up earlier than that if there is difficulty reclaiming pages. 26193e7d3449SMel Gorman */ 2620a9dd0a83SMel Gorman static inline bool should_continue_reclaim(struct pglist_data *pgdat, 26213e7d3449SMel Gorman unsigned long nr_reclaimed, 26223e7d3449SMel Gorman unsigned long nr_scanned, 26233e7d3449SMel Gorman struct scan_control *sc) 26243e7d3449SMel Gorman { 26253e7d3449SMel Gorman unsigned long pages_for_compaction; 26263e7d3449SMel Gorman unsigned long inactive_lru_pages; 2627a9dd0a83SMel Gorman int z; 26283e7d3449SMel Gorman 26293e7d3449SMel Gorman /* If not in reclaim/compaction mode, stop */ 26309e3b2f8cSKonstantin Khlebnikov if (!in_reclaim_compaction(sc)) 26313e7d3449SMel Gorman return false; 26323e7d3449SMel Gorman 26332876592fSMel Gorman /* Consider stopping depending on scan and reclaim activity */ 2634dcda9b04SMichal Hocko if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) { 26353e7d3449SMel Gorman /* 2636dcda9b04SMichal Hocko * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the 26372876592fSMel Gorman * full LRU list has been scanned and we are still failing 26382876592fSMel Gorman * to reclaim pages. This full LRU scan is potentially 2639dcda9b04SMichal Hocko * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed 26403e7d3449SMel Gorman */ 26413e7d3449SMel Gorman if (!nr_reclaimed && !nr_scanned) 26423e7d3449SMel Gorman return false; 26432876592fSMel Gorman } else { 26442876592fSMel Gorman /* 2645dcda9b04SMichal Hocko * For non-__GFP_RETRY_MAYFAIL allocations which can presumably 26462876592fSMel Gorman * fail without consequence, stop if we failed to reclaim 26472876592fSMel Gorman * any pages from the last SWAP_CLUSTER_MAX number of 26482876592fSMel Gorman * pages that were scanned. This will return to the 26492876592fSMel Gorman * caller faster at the risk reclaim/compaction and 26502876592fSMel Gorman * the resulting allocation attempt fails 26512876592fSMel Gorman */ 26522876592fSMel Gorman if (!nr_reclaimed) 26532876592fSMel Gorman return false; 26542876592fSMel Gorman } 26553e7d3449SMel Gorman 26563e7d3449SMel Gorman /* 26573e7d3449SMel Gorman * If we have not reclaimed enough pages for compaction and the 26583e7d3449SMel Gorman * inactive lists are large enough, continue reclaiming 26593e7d3449SMel Gorman */ 26609861a62cSVlastimil Babka pages_for_compaction = compact_gap(sc->order); 2661a9dd0a83SMel Gorman inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 2662ec8acf20SShaohua Li if (get_nr_swap_pages() > 0) 2663a9dd0a83SMel Gorman inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 26643e7d3449SMel Gorman if (sc->nr_reclaimed < pages_for_compaction && 26653e7d3449SMel Gorman inactive_lru_pages > pages_for_compaction) 26663e7d3449SMel Gorman return true; 26673e7d3449SMel Gorman 26683e7d3449SMel Gorman /* If compaction would go ahead or the allocation would succeed, stop */ 2669a9dd0a83SMel Gorman for (z = 0; z <= sc->reclaim_idx; z++) { 2670a9dd0a83SMel Gorman struct zone *zone = &pgdat->node_zones[z]; 26716aa303deSMel Gorman if (!managed_zone(zone)) 2672a9dd0a83SMel Gorman continue; 2673a9dd0a83SMel Gorman 2674a9dd0a83SMel Gorman switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { 2675cf378319SVlastimil Babka case COMPACT_SUCCESS: 26763e7d3449SMel Gorman case COMPACT_CONTINUE: 26773e7d3449SMel Gorman return false; 26783e7d3449SMel Gorman default: 2679a9dd0a83SMel Gorman /* check next zone */ 2680a9dd0a83SMel Gorman ; 26813e7d3449SMel Gorman } 26823e7d3449SMel Gorman } 2683a9dd0a83SMel Gorman return true; 2684a9dd0a83SMel Gorman } 26853e7d3449SMel Gorman 2686e3c1ac58SAndrey Ryabinin static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg) 2687e3c1ac58SAndrey Ryabinin { 2688e3c1ac58SAndrey Ryabinin return test_bit(PGDAT_CONGESTED, &pgdat->flags) || 2689e3c1ac58SAndrey Ryabinin (memcg && memcg_congested(pgdat, memcg)); 2690e3c1ac58SAndrey Ryabinin } 2691e3c1ac58SAndrey Ryabinin 2692970a39a3SMel Gorman static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) 2693f16015fbSJohannes Weiner { 2694cb731d6cSVladimir Davydov struct reclaim_state *reclaim_state = current->reclaim_state; 26959b4f98cdSJohannes Weiner unsigned long nr_reclaimed, nr_scanned; 26962344d7e4SJohannes Weiner bool reclaimable = false; 26979b4f98cdSJohannes Weiner 26989b4f98cdSJohannes Weiner do { 26995660048cSJohannes Weiner struct mem_cgroup *root = sc->target_mem_cgroup; 27005660048cSJohannes Weiner struct mem_cgroup_reclaim_cookie reclaim = { 2701ef8f2327SMel Gorman .pgdat = pgdat, 27029e3b2f8cSKonstantin Khlebnikov .priority = sc->priority, 27035660048cSJohannes Weiner }; 2704a9dd0a83SMel Gorman unsigned long node_lru_pages = 0; 2705694fbc0fSAndrew Morton struct mem_cgroup *memcg; 27065660048cSJohannes Weiner 2707d108c772SAndrey Ryabinin memset(&sc->nr, 0, sizeof(sc->nr)); 2708d108c772SAndrey Ryabinin 27099b4f98cdSJohannes Weiner nr_reclaimed = sc->nr_reclaimed; 27109b4f98cdSJohannes Weiner nr_scanned = sc->nr_scanned; 27119b4f98cdSJohannes Weiner 2712694fbc0fSAndrew Morton memcg = mem_cgroup_iter(root, NULL, &reclaim); 2713694fbc0fSAndrew Morton do { 27146b4f7799SJohannes Weiner unsigned long lru_pages; 27158e8ae645SJohannes Weiner unsigned long reclaimed; 2716cb731d6cSVladimir Davydov unsigned long scanned; 27179b4f98cdSJohannes Weiner 2718bf8d5d52SRoman Gushchin switch (mem_cgroup_protected(root, memcg)) { 2719bf8d5d52SRoman Gushchin case MEMCG_PROT_MIN: 2720bf8d5d52SRoman Gushchin /* 2721bf8d5d52SRoman Gushchin * Hard protection. 2722bf8d5d52SRoman Gushchin * If there is no reclaimable memory, OOM. 2723bf8d5d52SRoman Gushchin */ 2724bf8d5d52SRoman Gushchin continue; 2725bf8d5d52SRoman Gushchin case MEMCG_PROT_LOW: 2726bf8d5d52SRoman Gushchin /* 2727bf8d5d52SRoman Gushchin * Soft protection. 2728bf8d5d52SRoman Gushchin * Respect the protection only as long as 2729bf8d5d52SRoman Gushchin * there is an unprotected supply 2730bf8d5d52SRoman Gushchin * of reclaimable memory from other cgroups. 2731bf8d5d52SRoman Gushchin */ 2732d6622f63SYisheng Xie if (!sc->memcg_low_reclaim) { 2733d6622f63SYisheng Xie sc->memcg_low_skipped = 1; 2734241994edSJohannes Weiner continue; 2735d6622f63SYisheng Xie } 2736e27be240SJohannes Weiner memcg_memory_event(memcg, MEMCG_LOW); 2737bf8d5d52SRoman Gushchin break; 2738bf8d5d52SRoman Gushchin case MEMCG_PROT_NONE: 2739bf8d5d52SRoman Gushchin break; 2740241994edSJohannes Weiner } 2741241994edSJohannes Weiner 27428e8ae645SJohannes Weiner reclaimed = sc->nr_reclaimed; 2743cb731d6cSVladimir Davydov scanned = sc->nr_scanned; 2744a9dd0a83SMel Gorman shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2745a9dd0a83SMel Gorman node_lru_pages += lru_pages; 2746f9be23d6SKonstantin Khlebnikov 2747a9dd0a83SMel Gorman shrink_slab(sc->gfp_mask, pgdat->node_id, 27489092c71bSJosef Bacik memcg, sc->priority); 2749cb731d6cSVladimir Davydov 27508e8ae645SJohannes Weiner /* Record the group's reclaim efficiency */ 27518e8ae645SJohannes Weiner vmpressure(sc->gfp_mask, memcg, false, 27528e8ae645SJohannes Weiner sc->nr_scanned - scanned, 27538e8ae645SJohannes Weiner sc->nr_reclaimed - reclaimed); 27548e8ae645SJohannes Weiner 27555660048cSJohannes Weiner /* 2756a394cb8eSMichal Hocko * Direct reclaim and kswapd have to scan all memory 2757a394cb8eSMichal Hocko * cgroups to fulfill the overall scan target for the 2758a9dd0a83SMel Gorman * node. 2759a394cb8eSMichal Hocko * 2760a394cb8eSMichal Hocko * Limit reclaim, on the other hand, only cares about 2761a394cb8eSMichal Hocko * nr_to_reclaim pages to be reclaimed and it will 2762a394cb8eSMichal Hocko * retry with decreasing priority if one round over the 2763a394cb8eSMichal Hocko * whole hierarchy is not sufficient. 27645660048cSJohannes Weiner */ 2765a394cb8eSMichal Hocko if (!global_reclaim(sc) && 2766a394cb8eSMichal Hocko sc->nr_reclaimed >= sc->nr_to_reclaim) { 27675660048cSJohannes Weiner mem_cgroup_iter_break(root, memcg); 27685660048cSJohannes Weiner break; 27695660048cSJohannes Weiner } 2770241994edSJohannes Weiner } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim))); 277170ddf637SAnton Vorontsov 27726b4f7799SJohannes Weiner if (reclaim_state) { 2773cb731d6cSVladimir Davydov sc->nr_reclaimed += reclaim_state->reclaimed_slab; 27746b4f7799SJohannes Weiner reclaim_state->reclaimed_slab = 0; 27756b4f7799SJohannes Weiner } 27766b4f7799SJohannes Weiner 27778e8ae645SJohannes Weiner /* Record the subtree's reclaim efficiency */ 27788e8ae645SJohannes Weiner vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 277970ddf637SAnton Vorontsov sc->nr_scanned - nr_scanned, 278070ddf637SAnton Vorontsov sc->nr_reclaimed - nr_reclaimed); 278170ddf637SAnton Vorontsov 27822344d7e4SJohannes Weiner if (sc->nr_reclaimed - nr_reclaimed) 27832344d7e4SJohannes Weiner reclaimable = true; 27842344d7e4SJohannes Weiner 2785e3c1ac58SAndrey Ryabinin if (current_is_kswapd()) { 2786d108c772SAndrey Ryabinin /* 2787e3c1ac58SAndrey Ryabinin * If reclaim is isolating dirty pages under writeback, 2788e3c1ac58SAndrey Ryabinin * it implies that the long-lived page allocation rate 2789e3c1ac58SAndrey Ryabinin * is exceeding the page laundering rate. Either the 2790e3c1ac58SAndrey Ryabinin * global limits are not being effective at throttling 2791e3c1ac58SAndrey Ryabinin * processes due to the page distribution throughout 2792e3c1ac58SAndrey Ryabinin * zones or there is heavy usage of a slow backing 2793e3c1ac58SAndrey Ryabinin * device. The only option is to throttle from reclaim 2794e3c1ac58SAndrey Ryabinin * context which is not ideal as there is no guarantee 2795d108c772SAndrey Ryabinin * the dirtying process is throttled in the same way 2796d108c772SAndrey Ryabinin * balance_dirty_pages() manages. 2797d108c772SAndrey Ryabinin * 2798e3c1ac58SAndrey Ryabinin * Once a node is flagged PGDAT_WRITEBACK, kswapd will 2799e3c1ac58SAndrey Ryabinin * count the number of pages under pages flagged for 2800e3c1ac58SAndrey Ryabinin * immediate reclaim and stall if any are encountered 2801e3c1ac58SAndrey Ryabinin * in the nr_immediate check below. 2802d108c772SAndrey Ryabinin */ 2803d108c772SAndrey Ryabinin if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 2804d108c772SAndrey Ryabinin set_bit(PGDAT_WRITEBACK, &pgdat->flags); 2805d108c772SAndrey Ryabinin 2806d108c772SAndrey Ryabinin /* 2807d108c772SAndrey Ryabinin * Tag a node as congested if all the dirty pages 2808d108c772SAndrey Ryabinin * scanned were backed by a congested BDI and 2809d108c772SAndrey Ryabinin * wait_iff_congested will stall. 2810d108c772SAndrey Ryabinin */ 2811d108c772SAndrey Ryabinin if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) 2812d108c772SAndrey Ryabinin set_bit(PGDAT_CONGESTED, &pgdat->flags); 2813d108c772SAndrey Ryabinin 2814d108c772SAndrey Ryabinin /* Allow kswapd to start writing pages during reclaim.*/ 2815d108c772SAndrey Ryabinin if (sc->nr.unqueued_dirty == sc->nr.file_taken) 2816d108c772SAndrey Ryabinin set_bit(PGDAT_DIRTY, &pgdat->flags); 2817d108c772SAndrey Ryabinin 2818d108c772SAndrey Ryabinin /* 2819d108c772SAndrey Ryabinin * If kswapd scans pages marked marked for immediate 2820d108c772SAndrey Ryabinin * reclaim and under writeback (nr_immediate), it 2821d108c772SAndrey Ryabinin * implies that pages are cycling through the LRU 2822d108c772SAndrey Ryabinin * faster than they are written so also forcibly stall. 2823d108c772SAndrey Ryabinin */ 2824d108c772SAndrey Ryabinin if (sc->nr.immediate) 2825d108c772SAndrey Ryabinin congestion_wait(BLK_RW_ASYNC, HZ/10); 2826d108c772SAndrey Ryabinin } 2827d108c772SAndrey Ryabinin 2828d108c772SAndrey Ryabinin /* 2829e3c1ac58SAndrey Ryabinin * Legacy memcg will stall in page writeback so avoid forcibly 2830e3c1ac58SAndrey Ryabinin * stalling in wait_iff_congested(). 2831e3c1ac58SAndrey Ryabinin */ 2832e3c1ac58SAndrey Ryabinin if (!global_reclaim(sc) && sane_reclaim(sc) && 2833e3c1ac58SAndrey Ryabinin sc->nr.dirty && sc->nr.dirty == sc->nr.congested) 2834e3c1ac58SAndrey Ryabinin set_memcg_congestion(pgdat, root, true); 2835e3c1ac58SAndrey Ryabinin 2836e3c1ac58SAndrey Ryabinin /* 2837d108c772SAndrey Ryabinin * Stall direct reclaim for IO completions if underlying BDIs 2838d108c772SAndrey Ryabinin * and node is congested. Allow kswapd to continue until it 2839d108c772SAndrey Ryabinin * starts encountering unqueued dirty pages or cycling through 2840d108c772SAndrey Ryabinin * the LRU too quickly. 2841d108c772SAndrey Ryabinin */ 2842d108c772SAndrey Ryabinin if (!sc->hibernation_mode && !current_is_kswapd() && 2843e3c1ac58SAndrey Ryabinin current_may_throttle() && pgdat_memcg_congested(pgdat, root)) 2844e3c1ac58SAndrey Ryabinin wait_iff_congested(BLK_RW_ASYNC, HZ/10); 2845d108c772SAndrey Ryabinin 2846a9dd0a83SMel Gorman } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, 28479b4f98cdSJohannes Weiner sc->nr_scanned - nr_scanned, sc)); 28482344d7e4SJohannes Weiner 2849c73322d0SJohannes Weiner /* 2850c73322d0SJohannes Weiner * Kswapd gives up on balancing particular nodes after too 2851c73322d0SJohannes Weiner * many failures to reclaim anything from them and goes to 2852c73322d0SJohannes Weiner * sleep. On reclaim progress, reset the failure counter. A 2853c73322d0SJohannes Weiner * successful direct reclaim run will revive a dormant kswapd. 2854c73322d0SJohannes Weiner */ 2855c73322d0SJohannes Weiner if (reclaimable) 2856c73322d0SJohannes Weiner pgdat->kswapd_failures = 0; 2857c73322d0SJohannes Weiner 28582344d7e4SJohannes Weiner return reclaimable; 2859f16015fbSJohannes Weiner } 2860f16015fbSJohannes Weiner 286153853e2dSVlastimil Babka /* 2862fdd4c614SVlastimil Babka * Returns true if compaction should go ahead for a costly-order request, or 2863fdd4c614SVlastimil Babka * the allocation would already succeed without compaction. Return false if we 2864fdd4c614SVlastimil Babka * should reclaim first. 286553853e2dSVlastimil Babka */ 28664f588331SMel Gorman static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 2867fe4b1b24SMel Gorman { 286831483b6aSMel Gorman unsigned long watermark; 2869fdd4c614SVlastimil Babka enum compact_result suitable; 2870fe4b1b24SMel Gorman 2871fdd4c614SVlastimil Babka suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); 2872fdd4c614SVlastimil Babka if (suitable == COMPACT_SUCCESS) 2873fdd4c614SVlastimil Babka /* Allocation should succeed already. Don't reclaim. */ 2874fdd4c614SVlastimil Babka return true; 2875fdd4c614SVlastimil Babka if (suitable == COMPACT_SKIPPED) 2876fdd4c614SVlastimil Babka /* Compaction cannot yet proceed. Do reclaim. */ 2877fe4b1b24SMel Gorman return false; 2878fe4b1b24SMel Gorman 2879fdd4c614SVlastimil Babka /* 2880fdd4c614SVlastimil Babka * Compaction is already possible, but it takes time to run and there 2881fdd4c614SVlastimil Babka * are potentially other callers using the pages just freed. So proceed 2882fdd4c614SVlastimil Babka * with reclaim to make a buffer of free pages available to give 2883fdd4c614SVlastimil Babka * compaction a reasonable chance of completing and allocating the page. 2884fdd4c614SVlastimil Babka * Note that we won't actually reclaim the whole buffer in one attempt 2885fdd4c614SVlastimil Babka * as the target watermark in should_continue_reclaim() is lower. But if 2886fdd4c614SVlastimil Babka * we are already above the high+gap watermark, don't reclaim at all. 2887fdd4c614SVlastimil Babka */ 2888fdd4c614SVlastimil Babka watermark = high_wmark_pages(zone) + compact_gap(sc->order); 2889fdd4c614SVlastimil Babka 2890fdd4c614SVlastimil Babka return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 2891fe4b1b24SMel Gorman } 2892fe4b1b24SMel Gorman 28931da177e4SLinus Torvalds /* 28941da177e4SLinus Torvalds * This is the direct reclaim path, for page-allocating processes. We only 28951da177e4SLinus Torvalds * try to reclaim pages from zones which will satisfy the caller's allocation 28961da177e4SLinus Torvalds * request. 28971da177e4SLinus Torvalds * 28981da177e4SLinus Torvalds * If a zone is deemed to be full of pinned pages then just give it a light 28991da177e4SLinus Torvalds * scan then give up on it. 29001da177e4SLinus Torvalds */ 29010a0337e0SMichal Hocko static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 29021da177e4SLinus Torvalds { 2903dd1a239fSMel Gorman struct zoneref *z; 290454a6eb5cSMel Gorman struct zone *zone; 29050608f43dSAndrew Morton unsigned long nr_soft_reclaimed; 29060608f43dSAndrew Morton unsigned long nr_soft_scanned; 2907619d0d76SWeijie Yang gfp_t orig_mask; 290879dafcdcSMel Gorman pg_data_t *last_pgdat = NULL; 29091cfb419bSKAMEZAWA Hiroyuki 2910cc715d99SMel Gorman /* 2911cc715d99SMel Gorman * If the number of buffer_heads in the machine exceeds the maximum 2912cc715d99SMel Gorman * allowed level, force direct reclaim to scan the highmem zone as 2913cc715d99SMel Gorman * highmem pages could be pinning lowmem pages storing buffer_heads 2914cc715d99SMel Gorman */ 2915619d0d76SWeijie Yang orig_mask = sc->gfp_mask; 2916b2e18757SMel Gorman if (buffer_heads_over_limit) { 2917cc715d99SMel Gorman sc->gfp_mask |= __GFP_HIGHMEM; 29184f588331SMel Gorman sc->reclaim_idx = gfp_zone(sc->gfp_mask); 2919b2e18757SMel Gorman } 2920cc715d99SMel Gorman 2921d4debc66SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 2922b2e18757SMel Gorman sc->reclaim_idx, sc->nodemask) { 2923b2e18757SMel Gorman /* 29241cfb419bSKAMEZAWA Hiroyuki * Take care memory controller reclaiming has small influence 29251cfb419bSKAMEZAWA Hiroyuki * to global LRU. 29261cfb419bSKAMEZAWA Hiroyuki */ 292789b5fae5SJohannes Weiner if (global_reclaim(sc)) { 2928344736f2SVladimir Davydov if (!cpuset_zone_allowed(zone, 2929344736f2SVladimir Davydov GFP_KERNEL | __GFP_HARDWALL)) 29301da177e4SLinus Torvalds continue; 293165ec02cbSVladimir Davydov 2932e0887c19SRik van Riel /* 2933e0c23279SMel Gorman * If we already have plenty of memory free for 2934e0c23279SMel Gorman * compaction in this zone, don't free any more. 2935e0c23279SMel Gorman * Even though compaction is invoked for any 2936e0c23279SMel Gorman * non-zero order, only frequent costly order 2937e0c23279SMel Gorman * reclamation is disruptive enough to become a 2938c7cfa37bSCopot Alexandru * noticeable problem, like transparent huge 2939c7cfa37bSCopot Alexandru * page allocations. 2940e0887c19SRik van Riel */ 29410b06496aSJohannes Weiner if (IS_ENABLED(CONFIG_COMPACTION) && 29420b06496aSJohannes Weiner sc->order > PAGE_ALLOC_COSTLY_ORDER && 29434f588331SMel Gorman compaction_ready(zone, sc)) { 29440b06496aSJohannes Weiner sc->compaction_ready = true; 2945e0887c19SRik van Riel continue; 2946e0887c19SRik van Riel } 29470b06496aSJohannes Weiner 29480608f43dSAndrew Morton /* 294979dafcdcSMel Gorman * Shrink each node in the zonelist once. If the 295079dafcdcSMel Gorman * zonelist is ordered by zone (not the default) then a 295179dafcdcSMel Gorman * node may be shrunk multiple times but in that case 295279dafcdcSMel Gorman * the user prefers lower zones being preserved. 295379dafcdcSMel Gorman */ 295479dafcdcSMel Gorman if (zone->zone_pgdat == last_pgdat) 295579dafcdcSMel Gorman continue; 295679dafcdcSMel Gorman 295779dafcdcSMel Gorman /* 29580608f43dSAndrew Morton * This steals pages from memory cgroups over softlimit 29590608f43dSAndrew Morton * and returns the number of reclaimed pages and 29600608f43dSAndrew Morton * scanned pages. This works for global memory pressure 29610608f43dSAndrew Morton * and balancing, not for a memcg's limit. 29620608f43dSAndrew Morton */ 29630608f43dSAndrew Morton nr_soft_scanned = 0; 2964ef8f2327SMel Gorman nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, 29650608f43dSAndrew Morton sc->order, sc->gfp_mask, 29660608f43dSAndrew Morton &nr_soft_scanned); 29670608f43dSAndrew Morton sc->nr_reclaimed += nr_soft_reclaimed; 29680608f43dSAndrew Morton sc->nr_scanned += nr_soft_scanned; 2969ac34a1a3SKAMEZAWA Hiroyuki /* need some check for avoid more shrink_zone() */ 2970ac34a1a3SKAMEZAWA Hiroyuki } 2971d149e3b2SYing Han 297279dafcdcSMel Gorman /* See comment about same check for global reclaim above */ 297379dafcdcSMel Gorman if (zone->zone_pgdat == last_pgdat) 297479dafcdcSMel Gorman continue; 297579dafcdcSMel Gorman last_pgdat = zone->zone_pgdat; 2976970a39a3SMel Gorman shrink_node(zone->zone_pgdat, sc); 29771da177e4SLinus Torvalds } 2978e0c23279SMel Gorman 297965ec02cbSVladimir Davydov /* 2980619d0d76SWeijie Yang * Restore to original mask to avoid the impact on the caller if we 2981619d0d76SWeijie Yang * promoted it to __GFP_HIGHMEM. 2982619d0d76SWeijie Yang */ 2983619d0d76SWeijie Yang sc->gfp_mask = orig_mask; 29841da177e4SLinus Torvalds } 29851da177e4SLinus Torvalds 29862a2e4885SJohannes Weiner static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat) 29872a2e4885SJohannes Weiner { 29882a2e4885SJohannes Weiner struct mem_cgroup *memcg; 29892a2e4885SJohannes Weiner 29902a2e4885SJohannes Weiner memcg = mem_cgroup_iter(root_memcg, NULL, NULL); 29912a2e4885SJohannes Weiner do { 29922a2e4885SJohannes Weiner unsigned long refaults; 29932a2e4885SJohannes Weiner struct lruvec *lruvec; 29942a2e4885SJohannes Weiner 29952a2e4885SJohannes Weiner if (memcg) 2996ccda7f43SJohannes Weiner refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE); 29972a2e4885SJohannes Weiner else 29982a2e4885SJohannes Weiner refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); 29992a2e4885SJohannes Weiner 30002a2e4885SJohannes Weiner lruvec = mem_cgroup_lruvec(pgdat, memcg); 30012a2e4885SJohannes Weiner lruvec->refaults = refaults; 30022a2e4885SJohannes Weiner } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); 30032a2e4885SJohannes Weiner } 30042a2e4885SJohannes Weiner 30051da177e4SLinus Torvalds /* 30061da177e4SLinus Torvalds * This is the main entry point to direct page reclaim. 30071da177e4SLinus Torvalds * 30081da177e4SLinus Torvalds * If a full scan of the inactive list fails to free enough memory then we 30091da177e4SLinus Torvalds * are "out of memory" and something needs to be killed. 30101da177e4SLinus Torvalds * 30111da177e4SLinus Torvalds * If the caller is !__GFP_FS then the probability of a failure is reasonably 30121da177e4SLinus Torvalds * high - the zone may be full of dirty or under-writeback pages, which this 30135b0830cbSJens Axboe * caller can't do much about. We kick the writeback threads and take explicit 30145b0830cbSJens Axboe * naps in the hope that some of these pages can be written. But if the 30155b0830cbSJens Axboe * allocating task holds filesystem locks which prevent writeout this might not 30165b0830cbSJens Axboe * work, and the allocation attempt will fail. 3017a41f24eaSNishanth Aravamudan * 3018a41f24eaSNishanth Aravamudan * returns: 0, if no pages reclaimed 3019a41f24eaSNishanth Aravamudan * else, the number of pages reclaimed 30201da177e4SLinus Torvalds */ 3021dac1d27bSMel Gorman static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 30223115cd91SVladimir Davydov struct scan_control *sc) 30231da177e4SLinus Torvalds { 3024241994edSJohannes Weiner int initial_priority = sc->priority; 30252a2e4885SJohannes Weiner pg_data_t *last_pgdat; 30262a2e4885SJohannes Weiner struct zoneref *z; 30272a2e4885SJohannes Weiner struct zone *zone; 3028241994edSJohannes Weiner retry: 3029873b4771SKeika Kobayashi delayacct_freepages_start(); 3030873b4771SKeika Kobayashi 303189b5fae5SJohannes Weiner if (global_reclaim(sc)) 30327cc30fcfSMel Gorman __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 30331da177e4SLinus Torvalds 30349e3b2f8cSKonstantin Khlebnikov do { 303570ddf637SAnton Vorontsov vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 303670ddf637SAnton Vorontsov sc->priority); 303766e1707bSBalbir Singh sc->nr_scanned = 0; 30380a0337e0SMichal Hocko shrink_zones(zonelist, sc); 3039e0c23279SMel Gorman 3040bb21c7ceSKOSAKI Motohiro if (sc->nr_reclaimed >= sc->nr_to_reclaim) 30410b06496aSJohannes Weiner break; 30420b06496aSJohannes Weiner 30430b06496aSJohannes Weiner if (sc->compaction_ready) 30440b06496aSJohannes Weiner break; 30451da177e4SLinus Torvalds 30461da177e4SLinus Torvalds /* 30470e50ce3bSMinchan Kim * If we're getting trouble reclaiming, start doing 30480e50ce3bSMinchan Kim * writepage even in laptop mode. 30490e50ce3bSMinchan Kim */ 30500e50ce3bSMinchan Kim if (sc->priority < DEF_PRIORITY - 2) 30510e50ce3bSMinchan Kim sc->may_writepage = 1; 30520b06496aSJohannes Weiner } while (--sc->priority >= 0); 3053bb21c7ceSKOSAKI Motohiro 30542a2e4885SJohannes Weiner last_pgdat = NULL; 30552a2e4885SJohannes Weiner for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 30562a2e4885SJohannes Weiner sc->nodemask) { 30572a2e4885SJohannes Weiner if (zone->zone_pgdat == last_pgdat) 30582a2e4885SJohannes Weiner continue; 30592a2e4885SJohannes Weiner last_pgdat = zone->zone_pgdat; 30602a2e4885SJohannes Weiner snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 3061e3c1ac58SAndrey Ryabinin set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false); 30622a2e4885SJohannes Weiner } 30632a2e4885SJohannes Weiner 3064873b4771SKeika Kobayashi delayacct_freepages_end(); 3065873b4771SKeika Kobayashi 3066bb21c7ceSKOSAKI Motohiro if (sc->nr_reclaimed) 3067bb21c7ceSKOSAKI Motohiro return sc->nr_reclaimed; 3068bb21c7ceSKOSAKI Motohiro 30690cee34fdSMel Gorman /* Aborted reclaim to try compaction? don't OOM, then */ 30700b06496aSJohannes Weiner if (sc->compaction_ready) 30717335084dSMel Gorman return 1; 30727335084dSMel Gorman 3073241994edSJohannes Weiner /* Untapped cgroup reserves? Don't OOM, retry. */ 3074d6622f63SYisheng Xie if (sc->memcg_low_skipped) { 3075241994edSJohannes Weiner sc->priority = initial_priority; 3076d6622f63SYisheng Xie sc->memcg_low_reclaim = 1; 3077d6622f63SYisheng Xie sc->memcg_low_skipped = 0; 3078241994edSJohannes Weiner goto retry; 3079241994edSJohannes Weiner } 3080241994edSJohannes Weiner 3081bb21c7ceSKOSAKI Motohiro return 0; 30821da177e4SLinus Torvalds } 30831da177e4SLinus Torvalds 3084c73322d0SJohannes Weiner static bool allow_direct_reclaim(pg_data_t *pgdat) 30855515061dSMel Gorman { 30865515061dSMel Gorman struct zone *zone; 30875515061dSMel Gorman unsigned long pfmemalloc_reserve = 0; 30885515061dSMel Gorman unsigned long free_pages = 0; 30895515061dSMel Gorman int i; 30905515061dSMel Gorman bool wmark_ok; 30915515061dSMel Gorman 3092c73322d0SJohannes Weiner if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 3093c73322d0SJohannes Weiner return true; 3094c73322d0SJohannes Weiner 30955515061dSMel Gorman for (i = 0; i <= ZONE_NORMAL; i++) { 30965515061dSMel Gorman zone = &pgdat->node_zones[i]; 3097d450abd8SJohannes Weiner if (!managed_zone(zone)) 3098d450abd8SJohannes Weiner continue; 3099d450abd8SJohannes Weiner 3100d450abd8SJohannes Weiner if (!zone_reclaimable_pages(zone)) 3101675becceSMel Gorman continue; 3102675becceSMel Gorman 31035515061dSMel Gorman pfmemalloc_reserve += min_wmark_pages(zone); 31045515061dSMel Gorman free_pages += zone_page_state(zone, NR_FREE_PAGES); 31055515061dSMel Gorman } 31065515061dSMel Gorman 3107675becceSMel Gorman /* If there are no reserves (unexpected config) then do not throttle */ 3108675becceSMel Gorman if (!pfmemalloc_reserve) 3109675becceSMel Gorman return true; 3110675becceSMel Gorman 31115515061dSMel Gorman wmark_ok = free_pages > pfmemalloc_reserve / 2; 31125515061dSMel Gorman 31135515061dSMel Gorman /* kswapd must be awake if processes are being throttled */ 31145515061dSMel Gorman if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 311538087d9bSMel Gorman pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, 31165515061dSMel Gorman (enum zone_type)ZONE_NORMAL); 31175515061dSMel Gorman wake_up_interruptible(&pgdat->kswapd_wait); 31185515061dSMel Gorman } 31195515061dSMel Gorman 31205515061dSMel Gorman return wmark_ok; 31215515061dSMel Gorman } 31225515061dSMel Gorman 31235515061dSMel Gorman /* 31245515061dSMel Gorman * Throttle direct reclaimers if backing storage is backed by the network 31255515061dSMel Gorman * and the PFMEMALLOC reserve for the preferred node is getting dangerously 31265515061dSMel Gorman * depleted. kswapd will continue to make progress and wake the processes 312750694c28SMel Gorman * when the low watermark is reached. 312850694c28SMel Gorman * 312950694c28SMel Gorman * Returns true if a fatal signal was delivered during throttling. If this 313050694c28SMel Gorman * happens, the page allocator should not consider triggering the OOM killer. 31315515061dSMel Gorman */ 313250694c28SMel Gorman static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 31335515061dSMel Gorman nodemask_t *nodemask) 31345515061dSMel Gorman { 3135675becceSMel Gorman struct zoneref *z; 31365515061dSMel Gorman struct zone *zone; 3137675becceSMel Gorman pg_data_t *pgdat = NULL; 31385515061dSMel Gorman 31395515061dSMel Gorman /* 31405515061dSMel Gorman * Kernel threads should not be throttled as they may be indirectly 31415515061dSMel Gorman * responsible for cleaning pages necessary for reclaim to make forward 31425515061dSMel Gorman * progress. kjournald for example may enter direct reclaim while 31435515061dSMel Gorman * committing a transaction where throttling it could forcing other 31445515061dSMel Gorman * processes to block on log_wait_commit(). 31455515061dSMel Gorman */ 31465515061dSMel Gorman if (current->flags & PF_KTHREAD) 314750694c28SMel Gorman goto out; 314850694c28SMel Gorman 314950694c28SMel Gorman /* 315050694c28SMel Gorman * If a fatal signal is pending, this process should not throttle. 315150694c28SMel Gorman * It should return quickly so it can exit and free its memory 315250694c28SMel Gorman */ 315350694c28SMel Gorman if (fatal_signal_pending(current)) 315450694c28SMel Gorman goto out; 31555515061dSMel Gorman 3156675becceSMel Gorman /* 3157675becceSMel Gorman * Check if the pfmemalloc reserves are ok by finding the first node 3158675becceSMel Gorman * with a usable ZONE_NORMAL or lower zone. The expectation is that 3159675becceSMel Gorman * GFP_KERNEL will be required for allocating network buffers when 3160675becceSMel Gorman * swapping over the network so ZONE_HIGHMEM is unusable. 3161675becceSMel Gorman * 3162675becceSMel Gorman * Throttling is based on the first usable node and throttled processes 3163675becceSMel Gorman * wait on a queue until kswapd makes progress and wakes them. There 3164675becceSMel Gorman * is an affinity then between processes waking up and where reclaim 3165675becceSMel Gorman * progress has been made assuming the process wakes on the same node. 3166675becceSMel Gorman * More importantly, processes running on remote nodes will not compete 3167675becceSMel Gorman * for remote pfmemalloc reserves and processes on different nodes 3168675becceSMel Gorman * should make reasonable progress. 3169675becceSMel Gorman */ 3170675becceSMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 317117636faaSMichael S. Tsirkin gfp_zone(gfp_mask), nodemask) { 3172675becceSMel Gorman if (zone_idx(zone) > ZONE_NORMAL) 3173675becceSMel Gorman continue; 3174675becceSMel Gorman 3175675becceSMel Gorman /* Throttle based on the first usable node */ 31765515061dSMel Gorman pgdat = zone->zone_pgdat; 3177c73322d0SJohannes Weiner if (allow_direct_reclaim(pgdat)) 317850694c28SMel Gorman goto out; 3179675becceSMel Gorman break; 3180675becceSMel Gorman } 3181675becceSMel Gorman 3182675becceSMel Gorman /* If no zone was usable by the allocation flags then do not throttle */ 3183675becceSMel Gorman if (!pgdat) 3184675becceSMel Gorman goto out; 31855515061dSMel Gorman 318668243e76SMel Gorman /* Account for the throttling */ 318768243e76SMel Gorman count_vm_event(PGSCAN_DIRECT_THROTTLE); 318868243e76SMel Gorman 31895515061dSMel Gorman /* 31905515061dSMel Gorman * If the caller cannot enter the filesystem, it's possible that it 31915515061dSMel Gorman * is due to the caller holding an FS lock or performing a journal 31925515061dSMel Gorman * transaction in the case of a filesystem like ext[3|4]. In this case, 31935515061dSMel Gorman * it is not safe to block on pfmemalloc_wait as kswapd could be 31945515061dSMel Gorman * blocked waiting on the same lock. Instead, throttle for up to a 31955515061dSMel Gorman * second before continuing. 31965515061dSMel Gorman */ 31975515061dSMel Gorman if (!(gfp_mask & __GFP_FS)) { 31985515061dSMel Gorman wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 3199c73322d0SJohannes Weiner allow_direct_reclaim(pgdat), HZ); 320050694c28SMel Gorman 320150694c28SMel Gorman goto check_pending; 32025515061dSMel Gorman } 32035515061dSMel Gorman 32045515061dSMel Gorman /* Throttle until kswapd wakes the process */ 32055515061dSMel Gorman wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 3206c73322d0SJohannes Weiner allow_direct_reclaim(pgdat)); 320750694c28SMel Gorman 320850694c28SMel Gorman check_pending: 320950694c28SMel Gorman if (fatal_signal_pending(current)) 321050694c28SMel Gorman return true; 321150694c28SMel Gorman 321250694c28SMel Gorman out: 321350694c28SMel Gorman return false; 32145515061dSMel Gorman } 32155515061dSMel Gorman 3216dac1d27bSMel Gorman unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 3217327c0e96SKAMEZAWA Hiroyuki gfp_t gfp_mask, nodemask_t *nodemask) 321866e1707bSBalbir Singh { 321933906bc5SMel Gorman unsigned long nr_reclaimed; 322066e1707bSBalbir Singh struct scan_control sc = { 322122fba335SKOSAKI Motohiro .nr_to_reclaim = SWAP_CLUSTER_MAX, 3222f2f43e56SNick Desaulniers .gfp_mask = current_gfp_context(gfp_mask), 3223b2e18757SMel Gorman .reclaim_idx = gfp_zone(gfp_mask), 3224ee814fe2SJohannes Weiner .order = order, 3225ee814fe2SJohannes Weiner .nodemask = nodemask, 3226ee814fe2SJohannes Weiner .priority = DEF_PRIORITY, 3227ee814fe2SJohannes Weiner .may_writepage = !laptop_mode, 3228a6dc60f8SJohannes Weiner .may_unmap = 1, 32292e2e4259SKOSAKI Motohiro .may_swap = 1, 323066e1707bSBalbir Singh }; 323166e1707bSBalbir Singh 32325515061dSMel Gorman /* 3233bb451fdfSGreg Thelen * scan_control uses s8 fields for order, priority, and reclaim_idx. 3234bb451fdfSGreg Thelen * Confirm they are large enough for max values. 3235bb451fdfSGreg Thelen */ 3236bb451fdfSGreg Thelen BUILD_BUG_ON(MAX_ORDER > S8_MAX); 3237bb451fdfSGreg Thelen BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 3238bb451fdfSGreg Thelen BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 3239bb451fdfSGreg Thelen 3240bb451fdfSGreg Thelen /* 324150694c28SMel Gorman * Do not enter reclaim if fatal signal was delivered while throttled. 324250694c28SMel Gorman * 1 is returned so that the page allocator does not OOM kill at this 324350694c28SMel Gorman * point. 32445515061dSMel Gorman */ 3245f2f43e56SNick Desaulniers if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 32465515061dSMel Gorman return 1; 32475515061dSMel Gorman 324833906bc5SMel Gorman trace_mm_vmscan_direct_reclaim_begin(order, 324933906bc5SMel Gorman sc.may_writepage, 3250f2f43e56SNick Desaulniers sc.gfp_mask, 3251e5146b12SMel Gorman sc.reclaim_idx); 325233906bc5SMel Gorman 32533115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 325433906bc5SMel Gorman 325533906bc5SMel Gorman trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 325633906bc5SMel Gorman 325733906bc5SMel Gorman return nr_reclaimed; 325866e1707bSBalbir Singh } 325966e1707bSBalbir Singh 3260c255a458SAndrew Morton #ifdef CONFIG_MEMCG 326166e1707bSBalbir Singh 3262a9dd0a83SMel Gorman unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 32634e416953SBalbir Singh gfp_t gfp_mask, bool noswap, 3264ef8f2327SMel Gorman pg_data_t *pgdat, 32650ae5e89cSYing Han unsigned long *nr_scanned) 32664e416953SBalbir Singh { 32674e416953SBalbir Singh struct scan_control sc = { 3268b8f5c566SKOSAKI Motohiro .nr_to_reclaim = SWAP_CLUSTER_MAX, 3269ee814fe2SJohannes Weiner .target_mem_cgroup = memcg, 32704e416953SBalbir Singh .may_writepage = !laptop_mode, 32714e416953SBalbir Singh .may_unmap = 1, 3272b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 32734e416953SBalbir Singh .may_swap = !noswap, 32744e416953SBalbir Singh }; 32756b4f7799SJohannes Weiner unsigned long lru_pages; 32760ae5e89cSYing Han 32774e416953SBalbir Singh sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 32784e416953SBalbir Singh (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3279bdce6d9eSKOSAKI Motohiro 32809e3b2f8cSKonstantin Khlebnikov trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 3281bdce6d9eSKOSAKI Motohiro sc.may_writepage, 3282e5146b12SMel Gorman sc.gfp_mask, 3283e5146b12SMel Gorman sc.reclaim_idx); 3284bdce6d9eSKOSAKI Motohiro 32854e416953SBalbir Singh /* 32864e416953SBalbir Singh * NOTE: Although we can get the priority field, using it 32874e416953SBalbir Singh * here is not a good idea, since it limits the pages we can scan. 3288a9dd0a83SMel Gorman * if we don't reclaim here, the shrink_node from balance_pgdat 32894e416953SBalbir Singh * will pick up pages from other mem cgroup's as well. We hack 32904e416953SBalbir Singh * the priority and make it zero. 32914e416953SBalbir Singh */ 3292ef8f2327SMel Gorman shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); 3293bdce6d9eSKOSAKI Motohiro 3294bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3295bdce6d9eSKOSAKI Motohiro 32960ae5e89cSYing Han *nr_scanned = sc.nr_scanned; 32974e416953SBalbir Singh return sc.nr_reclaimed; 32984e416953SBalbir Singh } 32994e416953SBalbir Singh 330072835c86SJohannes Weiner unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 3301b70a2a21SJohannes Weiner unsigned long nr_pages, 33028c7c6e34SKAMEZAWA Hiroyuki gfp_t gfp_mask, 3303b70a2a21SJohannes Weiner bool may_swap) 330466e1707bSBalbir Singh { 33054e416953SBalbir Singh struct zonelist *zonelist; 3306bdce6d9eSKOSAKI Motohiro unsigned long nr_reclaimed; 3307889976dbSYing Han int nid; 3308499118e9SVlastimil Babka unsigned int noreclaim_flag; 330966e1707bSBalbir Singh struct scan_control sc = { 3310b70a2a21SJohannes Weiner .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 33117dea19f9SMichal Hocko .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 3312ee814fe2SJohannes Weiner (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 3313b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 3314ee814fe2SJohannes Weiner .target_mem_cgroup = memcg, 3315ee814fe2SJohannes Weiner .priority = DEF_PRIORITY, 331666e1707bSBalbir Singh .may_writepage = !laptop_mode, 3317a6dc60f8SJohannes Weiner .may_unmap = 1, 3318b70a2a21SJohannes Weiner .may_swap = may_swap, 3319a09ed5e0SYing Han }; 332066e1707bSBalbir Singh 3321889976dbSYing Han /* 3322889976dbSYing Han * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 3323889976dbSYing Han * take care of from where we get pages. So the node where we start the 3324889976dbSYing Han * scan does not need to be the current node. 3325889976dbSYing Han */ 332672835c86SJohannes Weiner nid = mem_cgroup_select_victim_node(memcg); 3327889976dbSYing Han 3328c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; 3329bdce6d9eSKOSAKI Motohiro 3330bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_reclaim_begin(0, 3331bdce6d9eSKOSAKI Motohiro sc.may_writepage, 3332e5146b12SMel Gorman sc.gfp_mask, 3333e5146b12SMel Gorman sc.reclaim_idx); 3334bdce6d9eSKOSAKI Motohiro 3335499118e9SVlastimil Babka noreclaim_flag = memalloc_noreclaim_save(); 33363115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3337499118e9SVlastimil Babka memalloc_noreclaim_restore(noreclaim_flag); 3338bdce6d9eSKOSAKI Motohiro 3339bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 3340bdce6d9eSKOSAKI Motohiro 3341bdce6d9eSKOSAKI Motohiro return nr_reclaimed; 334266e1707bSBalbir Singh } 334366e1707bSBalbir Singh #endif 334466e1707bSBalbir Singh 33451d82de61SMel Gorman static void age_active_anon(struct pglist_data *pgdat, 3346ef8f2327SMel Gorman struct scan_control *sc) 3347f16015fbSJohannes Weiner { 3348b95a2f2dSJohannes Weiner struct mem_cgroup *memcg; 3349b95a2f2dSJohannes Weiner 3350b95a2f2dSJohannes Weiner if (!total_swap_pages) 3351b95a2f2dSJohannes Weiner return; 3352b95a2f2dSJohannes Weiner 3353b95a2f2dSJohannes Weiner memcg = mem_cgroup_iter(NULL, NULL, NULL); 3354b95a2f2dSJohannes Weiner do { 3355ef8f2327SMel Gorman struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); 3356f16015fbSJohannes Weiner 33572a2e4885SJohannes Weiner if (inactive_list_is_low(lruvec, false, memcg, sc, true)) 33581a93be0eSKonstantin Khlebnikov shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 33599e3b2f8cSKonstantin Khlebnikov sc, LRU_ACTIVE_ANON); 3360b95a2f2dSJohannes Weiner 3361b95a2f2dSJohannes Weiner memcg = mem_cgroup_iter(NULL, memcg, NULL); 3362b95a2f2dSJohannes Weiner } while (memcg); 3363f16015fbSJohannes Weiner } 3364f16015fbSJohannes Weiner 3365e716f2ebSMel Gorman /* 3366e716f2ebSMel Gorman * Returns true if there is an eligible zone balanced for the request order 3367e716f2ebSMel Gorman * and classzone_idx 3368e716f2ebSMel Gorman */ 3369e716f2ebSMel Gorman static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) 337060cefed4SJohannes Weiner { 3371e716f2ebSMel Gorman int i; 3372e716f2ebSMel Gorman unsigned long mark = -1; 3373e716f2ebSMel Gorman struct zone *zone; 337460cefed4SJohannes Weiner 3375e716f2ebSMel Gorman for (i = 0; i <= classzone_idx; i++) { 3376e716f2ebSMel Gorman zone = pgdat->node_zones + i; 33776256c6b4SMel Gorman 3378e716f2ebSMel Gorman if (!managed_zone(zone)) 3379e716f2ebSMel Gorman continue; 3380e716f2ebSMel Gorman 3381e716f2ebSMel Gorman mark = high_wmark_pages(zone); 3382e716f2ebSMel Gorman if (zone_watermark_ok_safe(zone, order, mark, classzone_idx)) 33836256c6b4SMel Gorman return true; 338460cefed4SJohannes Weiner } 338560cefed4SJohannes Weiner 3386e716f2ebSMel Gorman /* 3387e716f2ebSMel Gorman * If a node has no populated zone within classzone_idx, it does not 3388e716f2ebSMel Gorman * need balancing by definition. This can happen if a zone-restricted 3389e716f2ebSMel Gorman * allocation tries to wake a remote kswapd. 3390e716f2ebSMel Gorman */ 3391e716f2ebSMel Gorman if (mark == -1) 3392e716f2ebSMel Gorman return true; 3393e716f2ebSMel Gorman 3394e716f2ebSMel Gorman return false; 3395e716f2ebSMel Gorman } 3396e716f2ebSMel Gorman 3397631b6e08SMel Gorman /* Clear pgdat state for congested, dirty or under writeback. */ 3398631b6e08SMel Gorman static void clear_pgdat_congested(pg_data_t *pgdat) 3399631b6e08SMel Gorman { 3400631b6e08SMel Gorman clear_bit(PGDAT_CONGESTED, &pgdat->flags); 3401631b6e08SMel Gorman clear_bit(PGDAT_DIRTY, &pgdat->flags); 3402631b6e08SMel Gorman clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 3403631b6e08SMel Gorman } 3404631b6e08SMel Gorman 34051741c877SMel Gorman /* 34065515061dSMel Gorman * Prepare kswapd for sleeping. This verifies that there are no processes 34075515061dSMel Gorman * waiting in throttle_direct_reclaim() and that watermarks have been met. 34085515061dSMel Gorman * 34095515061dSMel Gorman * Returns true if kswapd is ready to sleep 34105515061dSMel Gorman */ 3411d9f21d42SMel Gorman static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx) 3412f50de2d3SMel Gorman { 34135515061dSMel Gorman /* 34149e5e3661SVlastimil Babka * The throttled processes are normally woken up in balance_pgdat() as 3415c73322d0SJohannes Weiner * soon as allow_direct_reclaim() is true. But there is a potential 34169e5e3661SVlastimil Babka * race between when kswapd checks the watermarks and a process gets 34179e5e3661SVlastimil Babka * throttled. There is also a potential race if processes get 34189e5e3661SVlastimil Babka * throttled, kswapd wakes, a large process exits thereby balancing the 34199e5e3661SVlastimil Babka * zones, which causes kswapd to exit balance_pgdat() before reaching 34209e5e3661SVlastimil Babka * the wake up checks. If kswapd is going to sleep, no process should 34219e5e3661SVlastimil Babka * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 34229e5e3661SVlastimil Babka * the wake up is premature, processes will wake kswapd and get 34239e5e3661SVlastimil Babka * throttled again. The difference from wake ups in balance_pgdat() is 34249e5e3661SVlastimil Babka * that here we are under prepare_to_wait(). 34255515061dSMel Gorman */ 34269e5e3661SVlastimil Babka if (waitqueue_active(&pgdat->pfmemalloc_wait)) 34279e5e3661SVlastimil Babka wake_up_all(&pgdat->pfmemalloc_wait); 3428f50de2d3SMel Gorman 3429c73322d0SJohannes Weiner /* Hopeless node, leave it to direct reclaim */ 3430c73322d0SJohannes Weiner if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 3431c73322d0SJohannes Weiner return true; 3432c73322d0SJohannes Weiner 3433e716f2ebSMel Gorman if (pgdat_balanced(pgdat, order, classzone_idx)) { 3434631b6e08SMel Gorman clear_pgdat_congested(pgdat); 3435333b0a45SShantanu Goel return true; 34361d82de61SMel Gorman } 34371d82de61SMel Gorman 3438333b0a45SShantanu Goel return false; 3439f50de2d3SMel Gorman } 3440f50de2d3SMel Gorman 34411da177e4SLinus Torvalds /* 34421d82de61SMel Gorman * kswapd shrinks a node of pages that are at or below the highest usable 34431d82de61SMel Gorman * zone that is currently unbalanced. 3444b8e83b94SMel Gorman * 3445b8e83b94SMel Gorman * Returns true if kswapd scanned at least the requested number of pages to 3446283aba9fSMel Gorman * reclaim or if the lack of progress was due to pages under writeback. 3447283aba9fSMel Gorman * This is used to determine if the scanning priority needs to be raised. 344875485363SMel Gorman */ 34491d82de61SMel Gorman static bool kswapd_shrink_node(pg_data_t *pgdat, 3450accf6242SVlastimil Babka struct scan_control *sc) 345175485363SMel Gorman { 34521d82de61SMel Gorman struct zone *zone; 34531d82de61SMel Gorman int z; 345475485363SMel Gorman 34551d82de61SMel Gorman /* Reclaim a number of pages proportional to the number of zones */ 34561d82de61SMel Gorman sc->nr_to_reclaim = 0; 3457970a39a3SMel Gorman for (z = 0; z <= sc->reclaim_idx; z++) { 34581d82de61SMel Gorman zone = pgdat->node_zones + z; 34596aa303deSMel Gorman if (!managed_zone(zone)) 34601d82de61SMel Gorman continue; 34617c954f6dSMel Gorman 34621d82de61SMel Gorman sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 34637c954f6dSMel Gorman } 34647c954f6dSMel Gorman 34651d82de61SMel Gorman /* 34661d82de61SMel Gorman * Historically care was taken to put equal pressure on all zones but 34671d82de61SMel Gorman * now pressure is applied based on node LRU order. 34681d82de61SMel Gorman */ 3469970a39a3SMel Gorman shrink_node(pgdat, sc); 34701d82de61SMel Gorman 34711d82de61SMel Gorman /* 34721d82de61SMel Gorman * Fragmentation may mean that the system cannot be rebalanced for 34731d82de61SMel Gorman * high-order allocations. If twice the allocation size has been 34741d82de61SMel Gorman * reclaimed then recheck watermarks only at order-0 to prevent 34751d82de61SMel Gorman * excessive reclaim. Assume that a process requested a high-order 34761d82de61SMel Gorman * can direct reclaim/compact. 34771d82de61SMel Gorman */ 34789861a62cSVlastimil Babka if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 34791d82de61SMel Gorman sc->order = 0; 34801d82de61SMel Gorman 3481b8e83b94SMel Gorman return sc->nr_scanned >= sc->nr_to_reclaim; 348275485363SMel Gorman } 348375485363SMel Gorman 348475485363SMel Gorman /* 34851d82de61SMel Gorman * For kswapd, balance_pgdat() will reclaim pages across a node from zones 34861d82de61SMel Gorman * that are eligible for use by the caller until at least one zone is 34871d82de61SMel Gorman * balanced. 34881da177e4SLinus Torvalds * 34891d82de61SMel Gorman * Returns the order kswapd finished reclaiming at. 34901da177e4SLinus Torvalds * 34911da177e4SLinus Torvalds * kswapd scans the zones in the highmem->normal->dma direction. It skips 349241858966SMel Gorman * zones which have free_pages > high_wmark_pages(zone), but once a zone is 34931d82de61SMel Gorman * found to have free_pages <= high_wmark_pages(zone), any page is that zone 34941d82de61SMel Gorman * or lower is eligible for reclaim until at least one usable zone is 34951d82de61SMel Gorman * balanced. 34961da177e4SLinus Torvalds */ 3497accf6242SVlastimil Babka static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) 34981da177e4SLinus Torvalds { 34991da177e4SLinus Torvalds int i; 35000608f43dSAndrew Morton unsigned long nr_soft_reclaimed; 35010608f43dSAndrew Morton unsigned long nr_soft_scanned; 35021d82de61SMel Gorman struct zone *zone; 3503179e9639SAndrew Morton struct scan_control sc = { 3504179e9639SAndrew Morton .gfp_mask = GFP_KERNEL, 3505ee814fe2SJohannes Weiner .order = order, 3506b8e83b94SMel Gorman .priority = DEF_PRIORITY, 3507ee814fe2SJohannes Weiner .may_writepage = !laptop_mode, 3508a6dc60f8SJohannes Weiner .may_unmap = 1, 35092e2e4259SKOSAKI Motohiro .may_swap = 1, 3510179e9639SAndrew Morton }; 351193781325SOmar Sandoval 351293781325SOmar Sandoval __fs_reclaim_acquire(); 351393781325SOmar Sandoval 3514f8891e5eSChristoph Lameter count_vm_event(PAGEOUTRUN); 35151da177e4SLinus Torvalds 35169e3b2f8cSKonstantin Khlebnikov do { 3517c73322d0SJohannes Weiner unsigned long nr_reclaimed = sc.nr_reclaimed; 3518b8e83b94SMel Gorman bool raise_priority = true; 351993781325SOmar Sandoval bool ret; 3520b8e83b94SMel Gorman 352184c7a777SMel Gorman sc.reclaim_idx = classzone_idx; 35221da177e4SLinus Torvalds 352386c79f6bSMel Gorman /* 352484c7a777SMel Gorman * If the number of buffer_heads exceeds the maximum allowed 352584c7a777SMel Gorman * then consider reclaiming from all zones. This has a dual 352684c7a777SMel Gorman * purpose -- on 64-bit systems it is expected that 352784c7a777SMel Gorman * buffer_heads are stripped during active rotation. On 32-bit 352884c7a777SMel Gorman * systems, highmem pages can pin lowmem memory and shrinking 352984c7a777SMel Gorman * buffers can relieve lowmem pressure. Reclaim may still not 353084c7a777SMel Gorman * go ahead if all eligible zones for the original allocation 353184c7a777SMel Gorman * request are balanced to avoid excessive reclaim from kswapd. 353286c79f6bSMel Gorman */ 353386c79f6bSMel Gorman if (buffer_heads_over_limit) { 353486c79f6bSMel Gorman for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 353586c79f6bSMel Gorman zone = pgdat->node_zones + i; 35366aa303deSMel Gorman if (!managed_zone(zone)) 353786c79f6bSMel Gorman continue; 353886c79f6bSMel Gorman 3539970a39a3SMel Gorman sc.reclaim_idx = i; 354086c79f6bSMel Gorman break; 354186c79f6bSMel Gorman } 354286c79f6bSMel Gorman } 354386c79f6bSMel Gorman 354486c79f6bSMel Gorman /* 3545e716f2ebSMel Gorman * Only reclaim if there are no eligible zones. Note that 3546e716f2ebSMel Gorman * sc.reclaim_idx is not used as buffer_heads_over_limit may 3547e716f2ebSMel Gorman * have adjusted it. 354886c79f6bSMel Gorman */ 3549e716f2ebSMel Gorman if (pgdat_balanced(pgdat, sc.order, classzone_idx)) 35501da177e4SLinus Torvalds goto out; 3551e1dbeda6SAndrew Morton 35521da177e4SLinus Torvalds /* 35531d82de61SMel Gorman * Do some background aging of the anon list, to give 35541d82de61SMel Gorman * pages a chance to be referenced before reclaiming. All 35551d82de61SMel Gorman * pages are rotated regardless of classzone as this is 35561d82de61SMel Gorman * about consistent aging. 35571d82de61SMel Gorman */ 3558ef8f2327SMel Gorman age_active_anon(pgdat, &sc); 35591d82de61SMel Gorman 35601d82de61SMel Gorman /* 3561b7ea3c41SMel Gorman * If we're getting trouble reclaiming, start doing writepage 3562b7ea3c41SMel Gorman * even in laptop mode. 3563b7ea3c41SMel Gorman */ 3564047d72c3SJohannes Weiner if (sc.priority < DEF_PRIORITY - 2) 3565b7ea3c41SMel Gorman sc.may_writepage = 1; 3566b7ea3c41SMel Gorman 35671d82de61SMel Gorman /* Call soft limit reclaim before calling shrink_node. */ 35681da177e4SLinus Torvalds sc.nr_scanned = 0; 35690608f43dSAndrew Morton nr_soft_scanned = 0; 3570ef8f2327SMel Gorman nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, 35711d82de61SMel Gorman sc.gfp_mask, &nr_soft_scanned); 35720608f43dSAndrew Morton sc.nr_reclaimed += nr_soft_reclaimed; 35730608f43dSAndrew Morton 357432a4330dSRik van Riel /* 35751d82de61SMel Gorman * There should be no need to raise the scanning priority if 35761d82de61SMel Gorman * enough pages are already being scanned that that high 35771d82de61SMel Gorman * watermark would be met at 100% efficiency. 357832a4330dSRik van Riel */ 3579970a39a3SMel Gorman if (kswapd_shrink_node(pgdat, &sc)) 3580b8e83b94SMel Gorman raise_priority = false; 3581d7868daeSMel Gorman 35825515061dSMel Gorman /* 35835515061dSMel Gorman * If the low watermark is met there is no need for processes 35845515061dSMel Gorman * to be throttled on pfmemalloc_wait as they should not be 35855515061dSMel Gorman * able to safely make forward progress. Wake them 35865515061dSMel Gorman */ 35875515061dSMel Gorman if (waitqueue_active(&pgdat->pfmemalloc_wait) && 3588c73322d0SJohannes Weiner allow_direct_reclaim(pgdat)) 3589cfc51155SVlastimil Babka wake_up_all(&pgdat->pfmemalloc_wait); 35905515061dSMel Gorman 3591b8e83b94SMel Gorman /* Check if kswapd should be suspending */ 359293781325SOmar Sandoval __fs_reclaim_release(); 359393781325SOmar Sandoval ret = try_to_freeze(); 359493781325SOmar Sandoval __fs_reclaim_acquire(); 359593781325SOmar Sandoval if (ret || kthread_should_stop()) 3596b8e83b94SMel Gorman break; 3597b8e83b94SMel Gorman 3598b8e83b94SMel Gorman /* 3599b8e83b94SMel Gorman * Raise priority if scanning rate is too low or there was no 3600b8e83b94SMel Gorman * progress in reclaiming pages 3601b8e83b94SMel Gorman */ 3602c73322d0SJohannes Weiner nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 3603c73322d0SJohannes Weiner if (raise_priority || !nr_reclaimed) 3604b8e83b94SMel Gorman sc.priority--; 36051d82de61SMel Gorman } while (sc.priority >= 1); 36061da177e4SLinus Torvalds 3607c73322d0SJohannes Weiner if (!sc.nr_reclaimed) 3608c73322d0SJohannes Weiner pgdat->kswapd_failures++; 3609c73322d0SJohannes Weiner 3610b8e83b94SMel Gorman out: 36112a2e4885SJohannes Weiner snapshot_refaults(NULL, pgdat); 361293781325SOmar Sandoval __fs_reclaim_release(); 36130abdee2bSMel Gorman /* 36141d82de61SMel Gorman * Return the order kswapd stopped reclaiming at as 36151d82de61SMel Gorman * prepare_kswapd_sleep() takes it into account. If another caller 36161d82de61SMel Gorman * entered the allocator slow path while kswapd was awake, order will 36171d82de61SMel Gorman * remain at the higher level. 36180abdee2bSMel Gorman */ 36191d82de61SMel Gorman return sc.order; 36201da177e4SLinus Torvalds } 36211da177e4SLinus Torvalds 3622e716f2ebSMel Gorman /* 3623e716f2ebSMel Gorman * pgdat->kswapd_classzone_idx is the highest zone index that a recent 3624e716f2ebSMel Gorman * allocation request woke kswapd for. When kswapd has not woken recently, 3625e716f2ebSMel Gorman * the value is MAX_NR_ZONES which is not a valid index. This compares a 3626e716f2ebSMel Gorman * given classzone and returns it or the highest classzone index kswapd 3627e716f2ebSMel Gorman * was recently woke for. 3628e716f2ebSMel Gorman */ 3629e716f2ebSMel Gorman static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, 3630e716f2ebSMel Gorman enum zone_type classzone_idx) 3631e716f2ebSMel Gorman { 3632e716f2ebSMel Gorman if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) 3633e716f2ebSMel Gorman return classzone_idx; 3634e716f2ebSMel Gorman 3635e716f2ebSMel Gorman return max(pgdat->kswapd_classzone_idx, classzone_idx); 3636e716f2ebSMel Gorman } 3637e716f2ebSMel Gorman 363838087d9bSMel Gorman static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 363938087d9bSMel Gorman unsigned int classzone_idx) 3640f0bc0a60SKOSAKI Motohiro { 3641f0bc0a60SKOSAKI Motohiro long remaining = 0; 3642f0bc0a60SKOSAKI Motohiro DEFINE_WAIT(wait); 3643f0bc0a60SKOSAKI Motohiro 3644f0bc0a60SKOSAKI Motohiro if (freezing(current) || kthread_should_stop()) 3645f0bc0a60SKOSAKI Motohiro return; 3646f0bc0a60SKOSAKI Motohiro 3647f0bc0a60SKOSAKI Motohiro prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3648f0bc0a60SKOSAKI Motohiro 3649333b0a45SShantanu Goel /* 3650333b0a45SShantanu Goel * Try to sleep for a short interval. Note that kcompactd will only be 3651333b0a45SShantanu Goel * woken if it is possible to sleep for a short interval. This is 3652333b0a45SShantanu Goel * deliberate on the assumption that if reclaim cannot keep an 3653333b0a45SShantanu Goel * eligible zone balanced that it's also unlikely that compaction will 3654333b0a45SShantanu Goel * succeed. 3655333b0a45SShantanu Goel */ 3656d9f21d42SMel Gorman if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3657fd901c95SVlastimil Babka /* 3658fd901c95SVlastimil Babka * Compaction records what page blocks it recently failed to 3659fd901c95SVlastimil Babka * isolate pages from and skips them in the future scanning. 3660fd901c95SVlastimil Babka * When kswapd is going to sleep, it is reasonable to assume 3661fd901c95SVlastimil Babka * that pages and compaction may succeed so reset the cache. 3662fd901c95SVlastimil Babka */ 3663fd901c95SVlastimil Babka reset_isolation_suitable(pgdat); 3664fd901c95SVlastimil Babka 3665fd901c95SVlastimil Babka /* 3666fd901c95SVlastimil Babka * We have freed the memory, now we should compact it to make 3667fd901c95SVlastimil Babka * allocation of the requested order possible. 3668fd901c95SVlastimil Babka */ 366938087d9bSMel Gorman wakeup_kcompactd(pgdat, alloc_order, classzone_idx); 3670fd901c95SVlastimil Babka 3671f0bc0a60SKOSAKI Motohiro remaining = schedule_timeout(HZ/10); 367238087d9bSMel Gorman 367338087d9bSMel Gorman /* 367438087d9bSMel Gorman * If woken prematurely then reset kswapd_classzone_idx and 367538087d9bSMel Gorman * order. The values will either be from a wakeup request or 367638087d9bSMel Gorman * the previous request that slept prematurely. 367738087d9bSMel Gorman */ 367838087d9bSMel Gorman if (remaining) { 3679e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 368038087d9bSMel Gorman pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); 368138087d9bSMel Gorman } 368238087d9bSMel Gorman 3683f0bc0a60SKOSAKI Motohiro finish_wait(&pgdat->kswapd_wait, &wait); 3684f0bc0a60SKOSAKI Motohiro prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3685f0bc0a60SKOSAKI Motohiro } 3686f0bc0a60SKOSAKI Motohiro 3687f0bc0a60SKOSAKI Motohiro /* 3688f0bc0a60SKOSAKI Motohiro * After a short sleep, check if it was a premature sleep. If not, then 3689f0bc0a60SKOSAKI Motohiro * go fully to sleep until explicitly woken up. 3690f0bc0a60SKOSAKI Motohiro */ 3691d9f21d42SMel Gorman if (!remaining && 3692d9f21d42SMel Gorman prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3693f0bc0a60SKOSAKI Motohiro trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 3694f0bc0a60SKOSAKI Motohiro 3695f0bc0a60SKOSAKI Motohiro /* 3696f0bc0a60SKOSAKI Motohiro * vmstat counters are not perfectly accurate and the estimated 3697f0bc0a60SKOSAKI Motohiro * value for counters such as NR_FREE_PAGES can deviate from the 3698f0bc0a60SKOSAKI Motohiro * true value by nr_online_cpus * threshold. To avoid the zone 3699f0bc0a60SKOSAKI Motohiro * watermarks being breached while under pressure, we reduce the 3700f0bc0a60SKOSAKI Motohiro * per-cpu vmstat threshold while kswapd is awake and restore 3701f0bc0a60SKOSAKI Motohiro * them before going back to sleep. 3702f0bc0a60SKOSAKI Motohiro */ 3703f0bc0a60SKOSAKI Motohiro set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 37041c7e7f6cSAaditya Kumar 37051c7e7f6cSAaditya Kumar if (!kthread_should_stop()) 3706f0bc0a60SKOSAKI Motohiro schedule(); 37071c7e7f6cSAaditya Kumar 3708f0bc0a60SKOSAKI Motohiro set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 3709f0bc0a60SKOSAKI Motohiro } else { 3710f0bc0a60SKOSAKI Motohiro if (remaining) 3711f0bc0a60SKOSAKI Motohiro count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 3712f0bc0a60SKOSAKI Motohiro else 3713f0bc0a60SKOSAKI Motohiro count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 3714f0bc0a60SKOSAKI Motohiro } 3715f0bc0a60SKOSAKI Motohiro finish_wait(&pgdat->kswapd_wait, &wait); 3716f0bc0a60SKOSAKI Motohiro } 3717f0bc0a60SKOSAKI Motohiro 37181da177e4SLinus Torvalds /* 37191da177e4SLinus Torvalds * The background pageout daemon, started as a kernel thread 37201da177e4SLinus Torvalds * from the init process. 37211da177e4SLinus Torvalds * 37221da177e4SLinus Torvalds * This basically trickles out pages so that we have _some_ 37231da177e4SLinus Torvalds * free memory available even if there is no other activity 37241da177e4SLinus Torvalds * that frees anything up. This is needed for things like routing 37251da177e4SLinus Torvalds * etc, where we otherwise might have all activity going on in 37261da177e4SLinus Torvalds * asynchronous contexts that cannot page things out. 37271da177e4SLinus Torvalds * 37281da177e4SLinus Torvalds * If there are applications that are active memory-allocators 37291da177e4SLinus Torvalds * (most normal use), this basically shouldn't matter. 37301da177e4SLinus Torvalds */ 37311da177e4SLinus Torvalds static int kswapd(void *p) 37321da177e4SLinus Torvalds { 3733e716f2ebSMel Gorman unsigned int alloc_order, reclaim_order; 3734e716f2ebSMel Gorman unsigned int classzone_idx = MAX_NR_ZONES - 1; 37351da177e4SLinus Torvalds pg_data_t *pgdat = (pg_data_t*)p; 37361da177e4SLinus Torvalds struct task_struct *tsk = current; 3737f0bc0a60SKOSAKI Motohiro 37381da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 37391da177e4SLinus Torvalds .reclaimed_slab = 0, 37401da177e4SLinus Torvalds }; 3741a70f7302SRusty Russell const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 37421da177e4SLinus Torvalds 3743174596a0SRusty Russell if (!cpumask_empty(cpumask)) 3744c5f59f08SMike Travis set_cpus_allowed_ptr(tsk, cpumask); 37451da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 37461da177e4SLinus Torvalds 37471da177e4SLinus Torvalds /* 37481da177e4SLinus Torvalds * Tell the memory management that we're a "memory allocator", 37491da177e4SLinus Torvalds * and that if we need more memory we should get access to it 37501da177e4SLinus Torvalds * regardless (see "__alloc_pages()"). "kswapd" should 37511da177e4SLinus Torvalds * never get caught in the normal page freeing logic. 37521da177e4SLinus Torvalds * 37531da177e4SLinus Torvalds * (Kswapd normally doesn't need memory anyway, but sometimes 37541da177e4SLinus Torvalds * you need a small amount of memory in order to be able to 37551da177e4SLinus Torvalds * page out something else, and this flag essentially protects 37561da177e4SLinus Torvalds * us from recursively trying to free more memory as we're 37571da177e4SLinus Torvalds * trying to free the first piece of memory in the first place). 37581da177e4SLinus Torvalds */ 3759930d9152SChristoph Lameter tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 376083144186SRafael J. Wysocki set_freezable(); 37611da177e4SLinus Torvalds 3762e716f2ebSMel Gorman pgdat->kswapd_order = 0; 3763e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = MAX_NR_ZONES; 37641da177e4SLinus Torvalds for ( ; ; ) { 37656f6313d4SJeff Liu bool ret; 37663e1d1d28SChristoph Lameter 3767e716f2ebSMel Gorman alloc_order = reclaim_order = pgdat->kswapd_order; 3768e716f2ebSMel Gorman classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 3769e716f2ebSMel Gorman 377038087d9bSMel Gorman kswapd_try_sleep: 377138087d9bSMel Gorman kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 377238087d9bSMel Gorman classzone_idx); 3773215ddd66SMel Gorman 377438087d9bSMel Gorman /* Read the new order and classzone_idx */ 377538087d9bSMel Gorman alloc_order = reclaim_order = pgdat->kswapd_order; 3776e716f2ebSMel Gorman classzone_idx = kswapd_classzone_idx(pgdat, 0); 377738087d9bSMel Gorman pgdat->kswapd_order = 0; 3778e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = MAX_NR_ZONES; 37791da177e4SLinus Torvalds 37808fe23e05SDavid Rientjes ret = try_to_freeze(); 37818fe23e05SDavid Rientjes if (kthread_should_stop()) 37828fe23e05SDavid Rientjes break; 37838fe23e05SDavid Rientjes 37848fe23e05SDavid Rientjes /* 37858fe23e05SDavid Rientjes * We can speed up thawing tasks if we don't call balance_pgdat 37868fe23e05SDavid Rientjes * after returning from the refrigerator 3787b1296cc4SRafael J. Wysocki */ 378838087d9bSMel Gorman if (ret) 378938087d9bSMel Gorman continue; 37901d82de61SMel Gorman 379138087d9bSMel Gorman /* 379238087d9bSMel Gorman * Reclaim begins at the requested order but if a high-order 379338087d9bSMel Gorman * reclaim fails then kswapd falls back to reclaiming for 379438087d9bSMel Gorman * order-0. If that happens, kswapd will consider sleeping 379538087d9bSMel Gorman * for the order it finished reclaiming at (reclaim_order) 379638087d9bSMel Gorman * but kcompactd is woken to compact for the original 379738087d9bSMel Gorman * request (alloc_order). 379838087d9bSMel Gorman */ 3799e5146b12SMel Gorman trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx, 3800e5146b12SMel Gorman alloc_order); 380138087d9bSMel Gorman reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); 380238087d9bSMel Gorman if (reclaim_order < alloc_order) 380338087d9bSMel Gorman goto kswapd_try_sleep; 380433906bc5SMel Gorman } 3805b0a8cc58STakamori Yamaguchi 380671abdc15SJohannes Weiner tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); 3807b0a8cc58STakamori Yamaguchi current->reclaim_state = NULL; 380871abdc15SJohannes Weiner 38091da177e4SLinus Torvalds return 0; 38101da177e4SLinus Torvalds } 38111da177e4SLinus Torvalds 38121da177e4SLinus Torvalds /* 38135ecd9d40SDavid Rientjes * A zone is low on free memory or too fragmented for high-order memory. If 38145ecd9d40SDavid Rientjes * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 38155ecd9d40SDavid Rientjes * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 38165ecd9d40SDavid Rientjes * has failed or is not needed, still wake up kcompactd if only compaction is 38175ecd9d40SDavid Rientjes * needed. 38181da177e4SLinus Torvalds */ 38195ecd9d40SDavid Rientjes void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 38205ecd9d40SDavid Rientjes enum zone_type classzone_idx) 38211da177e4SLinus Torvalds { 38221da177e4SLinus Torvalds pg_data_t *pgdat; 38231da177e4SLinus Torvalds 38246aa303deSMel Gorman if (!managed_zone(zone)) 38251da177e4SLinus Torvalds return; 38261da177e4SLinus Torvalds 38275ecd9d40SDavid Rientjes if (!cpuset_zone_allowed(zone, gfp_flags)) 38281da177e4SLinus Torvalds return; 382988f5acf8SMel Gorman pgdat = zone->zone_pgdat; 3830e716f2ebSMel Gorman pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, 3831e716f2ebSMel Gorman classzone_idx); 383238087d9bSMel Gorman pgdat->kswapd_order = max(pgdat->kswapd_order, order); 38338d0986e2SCon Kolivas if (!waitqueue_active(&pgdat->kswapd_wait)) 38341da177e4SLinus Torvalds return; 3835e1a55637SMel Gorman 38365ecd9d40SDavid Rientjes /* Hopeless node, leave it to direct reclaim if possible */ 38375ecd9d40SDavid Rientjes if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 38385ecd9d40SDavid Rientjes pgdat_balanced(pgdat, order, classzone_idx)) { 38395ecd9d40SDavid Rientjes /* 38405ecd9d40SDavid Rientjes * There may be plenty of free memory available, but it's too 38415ecd9d40SDavid Rientjes * fragmented for high-order allocations. Wake up kcompactd 38425ecd9d40SDavid Rientjes * and rely on compaction_suitable() to determine if it's 38435ecd9d40SDavid Rientjes * needed. If it fails, it will defer subsequent attempts to 38445ecd9d40SDavid Rientjes * ratelimit its work. 38455ecd9d40SDavid Rientjes */ 38465ecd9d40SDavid Rientjes if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 38475ecd9d40SDavid Rientjes wakeup_kcompactd(pgdat, order, classzone_idx); 3848c73322d0SJohannes Weiner return; 38495ecd9d40SDavid Rientjes } 3850c73322d0SJohannes Weiner 38515ecd9d40SDavid Rientjes trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order, 38525ecd9d40SDavid Rientjes gfp_flags); 38538d0986e2SCon Kolivas wake_up_interruptible(&pgdat->kswapd_wait); 38541da177e4SLinus Torvalds } 38551da177e4SLinus Torvalds 3856c6f37f12SRafael J. Wysocki #ifdef CONFIG_HIBERNATION 38571da177e4SLinus Torvalds /* 38587b51755cSKOSAKI Motohiro * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3859d6277db4SRafael J. Wysocki * freed pages. 3860d6277db4SRafael J. Wysocki * 3861d6277db4SRafael J. Wysocki * Rather than trying to age LRUs the aim is to preserve the overall 3862d6277db4SRafael J. Wysocki * LRU order by reclaiming preferentially 3863d6277db4SRafael J. Wysocki * inactive > active > active referenced > active mapped 38641da177e4SLinus Torvalds */ 38657b51755cSKOSAKI Motohiro unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 38661da177e4SLinus Torvalds { 3867d6277db4SRafael J. Wysocki struct reclaim_state reclaim_state; 3868d6277db4SRafael J. Wysocki struct scan_control sc = { 38697b51755cSKOSAKI Motohiro .nr_to_reclaim = nr_to_reclaim, 3870ee814fe2SJohannes Weiner .gfp_mask = GFP_HIGHUSER_MOVABLE, 3871b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 38729e3b2f8cSKonstantin Khlebnikov .priority = DEF_PRIORITY, 3873ee814fe2SJohannes Weiner .may_writepage = 1, 3874ee814fe2SJohannes Weiner .may_unmap = 1, 3875ee814fe2SJohannes Weiner .may_swap = 1, 3876ee814fe2SJohannes Weiner .hibernation_mode = 1, 38771da177e4SLinus Torvalds }; 38787b51755cSKOSAKI Motohiro struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 38797b51755cSKOSAKI Motohiro struct task_struct *p = current; 38807b51755cSKOSAKI Motohiro unsigned long nr_reclaimed; 3881499118e9SVlastimil Babka unsigned int noreclaim_flag; 38821da177e4SLinus Torvalds 3883d92a8cfcSPeter Zijlstra fs_reclaim_acquire(sc.gfp_mask); 388493781325SOmar Sandoval noreclaim_flag = memalloc_noreclaim_save(); 3885d6277db4SRafael J. Wysocki reclaim_state.reclaimed_slab = 0; 38867b51755cSKOSAKI Motohiro p->reclaim_state = &reclaim_state; 3887d6277db4SRafael J. Wysocki 38883115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3889d6277db4SRafael J. Wysocki 38907b51755cSKOSAKI Motohiro p->reclaim_state = NULL; 3891499118e9SVlastimil Babka memalloc_noreclaim_restore(noreclaim_flag); 389293781325SOmar Sandoval fs_reclaim_release(sc.gfp_mask); 3893d6277db4SRafael J. Wysocki 38947b51755cSKOSAKI Motohiro return nr_reclaimed; 38951da177e4SLinus Torvalds } 3896c6f37f12SRafael J. Wysocki #endif /* CONFIG_HIBERNATION */ 38971da177e4SLinus Torvalds 38981da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but 38991da177e4SLinus Torvalds not required for correctness. So if the last cpu in a node goes 39001da177e4SLinus Torvalds away, we get changed to run anywhere: as the first one comes back, 39011da177e4SLinus Torvalds restore their cpu bindings. */ 3902517bbed9SSebastian Andrzej Siewior static int kswapd_cpu_online(unsigned int cpu) 39031da177e4SLinus Torvalds { 390458c0a4a7SYasunori Goto int nid; 39051da177e4SLinus Torvalds 390648fb2e24SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 3907c5f59f08SMike Travis pg_data_t *pgdat = NODE_DATA(nid); 3908a70f7302SRusty Russell const struct cpumask *mask; 3909a70f7302SRusty Russell 3910a70f7302SRusty Russell mask = cpumask_of_node(pgdat->node_id); 3911c5f59f08SMike Travis 39123e597945SRusty Russell if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 39131da177e4SLinus Torvalds /* One of our CPUs online: restore mask */ 3914c5f59f08SMike Travis set_cpus_allowed_ptr(pgdat->kswapd, mask); 39151da177e4SLinus Torvalds } 3916517bbed9SSebastian Andrzej Siewior return 0; 39171da177e4SLinus Torvalds } 39181da177e4SLinus Torvalds 39193218ae14SYasunori Goto /* 39203218ae14SYasunori Goto * This kswapd start function will be called by init and node-hot-add. 39213218ae14SYasunori Goto * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 39223218ae14SYasunori Goto */ 39233218ae14SYasunori Goto int kswapd_run(int nid) 39243218ae14SYasunori Goto { 39253218ae14SYasunori Goto pg_data_t *pgdat = NODE_DATA(nid); 39263218ae14SYasunori Goto int ret = 0; 39273218ae14SYasunori Goto 39283218ae14SYasunori Goto if (pgdat->kswapd) 39293218ae14SYasunori Goto return 0; 39303218ae14SYasunori Goto 39313218ae14SYasunori Goto pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 39323218ae14SYasunori Goto if (IS_ERR(pgdat->kswapd)) { 39333218ae14SYasunori Goto /* failure at boot is fatal */ 3934c6202adfSThomas Gleixner BUG_ON(system_state < SYSTEM_RUNNING); 3935d5dc0ad9SGavin Shan pr_err("Failed to start kswapd on node %d\n", nid); 3936d5dc0ad9SGavin Shan ret = PTR_ERR(pgdat->kswapd); 3937d72515b8SXishi Qiu pgdat->kswapd = NULL; 39383218ae14SYasunori Goto } 39393218ae14SYasunori Goto return ret; 39403218ae14SYasunori Goto } 39413218ae14SYasunori Goto 39428fe23e05SDavid Rientjes /* 3943d8adde17SJiang Liu * Called by memory hotplug when all memory in a node is offlined. Caller must 3944bfc8c901SVladimir Davydov * hold mem_hotplug_begin/end(). 39458fe23e05SDavid Rientjes */ 39468fe23e05SDavid Rientjes void kswapd_stop(int nid) 39478fe23e05SDavid Rientjes { 39488fe23e05SDavid Rientjes struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 39498fe23e05SDavid Rientjes 3950d8adde17SJiang Liu if (kswapd) { 39518fe23e05SDavid Rientjes kthread_stop(kswapd); 3952d8adde17SJiang Liu NODE_DATA(nid)->kswapd = NULL; 3953d8adde17SJiang Liu } 39548fe23e05SDavid Rientjes } 39558fe23e05SDavid Rientjes 39561da177e4SLinus Torvalds static int __init kswapd_init(void) 39571da177e4SLinus Torvalds { 3958517bbed9SSebastian Andrzej Siewior int nid, ret; 395969e05944SAndrew Morton 39601da177e4SLinus Torvalds swap_setup(); 396148fb2e24SLai Jiangshan for_each_node_state(nid, N_MEMORY) 39623218ae14SYasunori Goto kswapd_run(nid); 3963517bbed9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3964517bbed9SSebastian Andrzej Siewior "mm/vmscan:online", kswapd_cpu_online, 3965517bbed9SSebastian Andrzej Siewior NULL); 3966517bbed9SSebastian Andrzej Siewior WARN_ON(ret < 0); 39671da177e4SLinus Torvalds return 0; 39681da177e4SLinus Torvalds } 39691da177e4SLinus Torvalds 39701da177e4SLinus Torvalds module_init(kswapd_init) 39719eeff239SChristoph Lameter 39729eeff239SChristoph Lameter #ifdef CONFIG_NUMA 39739eeff239SChristoph Lameter /* 3974a5f5f91dSMel Gorman * Node reclaim mode 39759eeff239SChristoph Lameter * 3976a5f5f91dSMel Gorman * If non-zero call node_reclaim when the number of free pages falls below 39779eeff239SChristoph Lameter * the watermarks. 39789eeff239SChristoph Lameter */ 3979a5f5f91dSMel Gorman int node_reclaim_mode __read_mostly; 39809eeff239SChristoph Lameter 39811b2ffb78SChristoph Lameter #define RECLAIM_OFF 0 39827d03431cSFernando Luis Vazquez Cao #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 39831b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 398495bbc0c7SZhihui Zhang #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ 39851b2ffb78SChristoph Lameter 39869eeff239SChristoph Lameter /* 3987a5f5f91dSMel Gorman * Priority for NODE_RECLAIM. This determines the fraction of pages 3988a92f7126SChristoph Lameter * of a node considered for each zone_reclaim. 4 scans 1/16th of 3989a92f7126SChristoph Lameter * a zone. 3990a92f7126SChristoph Lameter */ 3991a5f5f91dSMel Gorman #define NODE_RECLAIM_PRIORITY 4 3992a92f7126SChristoph Lameter 39939eeff239SChristoph Lameter /* 3994a5f5f91dSMel Gorman * Percentage of pages in a zone that must be unmapped for node_reclaim to 39959614634fSChristoph Lameter * occur. 39969614634fSChristoph Lameter */ 39979614634fSChristoph Lameter int sysctl_min_unmapped_ratio = 1; 39989614634fSChristoph Lameter 39999614634fSChristoph Lameter /* 40000ff38490SChristoph Lameter * If the number of slab pages in a zone grows beyond this percentage then 40010ff38490SChristoph Lameter * slab reclaim needs to occur. 40020ff38490SChristoph Lameter */ 40030ff38490SChristoph Lameter int sysctl_min_slab_ratio = 5; 40040ff38490SChristoph Lameter 400511fb9989SMel Gorman static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 400690afa5deSMel Gorman { 400711fb9989SMel Gorman unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 400811fb9989SMel Gorman unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 400911fb9989SMel Gorman node_page_state(pgdat, NR_ACTIVE_FILE); 401090afa5deSMel Gorman 401190afa5deSMel Gorman /* 401290afa5deSMel Gorman * It's possible for there to be more file mapped pages than 401390afa5deSMel Gorman * accounted for by the pages on the file LRU lists because 401490afa5deSMel Gorman * tmpfs pages accounted for as ANON can also be FILE_MAPPED 401590afa5deSMel Gorman */ 401690afa5deSMel Gorman return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 401790afa5deSMel Gorman } 401890afa5deSMel Gorman 401990afa5deSMel Gorman /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 4020a5f5f91dSMel Gorman static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 402190afa5deSMel Gorman { 4022d031a157SAlexandru Moise unsigned long nr_pagecache_reclaimable; 4023d031a157SAlexandru Moise unsigned long delta = 0; 402490afa5deSMel Gorman 402590afa5deSMel Gorman /* 402695bbc0c7SZhihui Zhang * If RECLAIM_UNMAP is set, then all file pages are considered 402790afa5deSMel Gorman * potentially reclaimable. Otherwise, we have to worry about 402811fb9989SMel Gorman * pages like swapcache and node_unmapped_file_pages() provides 402990afa5deSMel Gorman * a better estimate 403090afa5deSMel Gorman */ 4031a5f5f91dSMel Gorman if (node_reclaim_mode & RECLAIM_UNMAP) 4032a5f5f91dSMel Gorman nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 403390afa5deSMel Gorman else 4034a5f5f91dSMel Gorman nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 403590afa5deSMel Gorman 403690afa5deSMel Gorman /* If we can't clean pages, remove dirty pages from consideration */ 4037a5f5f91dSMel Gorman if (!(node_reclaim_mode & RECLAIM_WRITE)) 4038a5f5f91dSMel Gorman delta += node_page_state(pgdat, NR_FILE_DIRTY); 403990afa5deSMel Gorman 404090afa5deSMel Gorman /* Watch for any possible underflows due to delta */ 404190afa5deSMel Gorman if (unlikely(delta > nr_pagecache_reclaimable)) 404290afa5deSMel Gorman delta = nr_pagecache_reclaimable; 404390afa5deSMel Gorman 404490afa5deSMel Gorman return nr_pagecache_reclaimable - delta; 404590afa5deSMel Gorman } 404690afa5deSMel Gorman 40470ff38490SChristoph Lameter /* 4048a5f5f91dSMel Gorman * Try to free up some pages from this node through reclaim. 40499eeff239SChristoph Lameter */ 4050a5f5f91dSMel Gorman static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 40519eeff239SChristoph Lameter { 40527fb2d46dSChristoph Lameter /* Minimum pages needed in order to stay on node */ 405369e05944SAndrew Morton const unsigned long nr_pages = 1 << order; 40549eeff239SChristoph Lameter struct task_struct *p = current; 40559eeff239SChristoph Lameter struct reclaim_state reclaim_state; 4056499118e9SVlastimil Babka unsigned int noreclaim_flag; 4057179e9639SAndrew Morton struct scan_control sc = { 405862b726c1SAndrew Morton .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 4059f2f43e56SNick Desaulniers .gfp_mask = current_gfp_context(gfp_mask), 4060bd2f6199SJohannes Weiner .order = order, 4061a5f5f91dSMel Gorman .priority = NODE_RECLAIM_PRIORITY, 4062a5f5f91dSMel Gorman .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 4063a5f5f91dSMel Gorman .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 4064ee814fe2SJohannes Weiner .may_swap = 1, 4065f2f43e56SNick Desaulniers .reclaim_idx = gfp_zone(gfp_mask), 4066179e9639SAndrew Morton }; 40679eeff239SChristoph Lameter 40689eeff239SChristoph Lameter cond_resched(); 406993781325SOmar Sandoval fs_reclaim_acquire(sc.gfp_mask); 4070d4f7796eSChristoph Lameter /* 407195bbc0c7SZhihui Zhang * We need to be able to allocate from the reserves for RECLAIM_UNMAP 4072d4f7796eSChristoph Lameter * and we also need to be able to write out pages for RECLAIM_WRITE 407395bbc0c7SZhihui Zhang * and RECLAIM_UNMAP. 4074d4f7796eSChristoph Lameter */ 4075499118e9SVlastimil Babka noreclaim_flag = memalloc_noreclaim_save(); 4076499118e9SVlastimil Babka p->flags |= PF_SWAPWRITE; 40779eeff239SChristoph Lameter reclaim_state.reclaimed_slab = 0; 40789eeff239SChristoph Lameter p->reclaim_state = &reclaim_state; 4079c84db23cSChristoph Lameter 4080a5f5f91dSMel Gorman if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { 4081a92f7126SChristoph Lameter /* 4082894befecSAndrey Ryabinin * Free memory by calling shrink node with increasing 40830ff38490SChristoph Lameter * priorities until we have enough memory freed. 4084a92f7126SChristoph Lameter */ 4085a92f7126SChristoph Lameter do { 4086970a39a3SMel Gorman shrink_node(pgdat, &sc); 40879e3b2f8cSKonstantin Khlebnikov } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 40880ff38490SChristoph Lameter } 4089a92f7126SChristoph Lameter 40909eeff239SChristoph Lameter p->reclaim_state = NULL; 4091499118e9SVlastimil Babka current->flags &= ~PF_SWAPWRITE; 4092499118e9SVlastimil Babka memalloc_noreclaim_restore(noreclaim_flag); 409393781325SOmar Sandoval fs_reclaim_release(sc.gfp_mask); 4094a79311c1SRik van Riel return sc.nr_reclaimed >= nr_pages; 40959eeff239SChristoph Lameter } 4096179e9639SAndrew Morton 4097a5f5f91dSMel Gorman int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 4098179e9639SAndrew Morton { 4099d773ed6bSDavid Rientjes int ret; 4100179e9639SAndrew Morton 4101179e9639SAndrew Morton /* 4102a5f5f91dSMel Gorman * Node reclaim reclaims unmapped file backed pages and 41030ff38490SChristoph Lameter * slab pages if we are over the defined limits. 410434aa1330SChristoph Lameter * 41059614634fSChristoph Lameter * A small portion of unmapped file backed pages is needed for 41069614634fSChristoph Lameter * file I/O otherwise pages read by file I/O will be immediately 4107a5f5f91dSMel Gorman * thrown out if the node is overallocated. So we do not reclaim 4108a5f5f91dSMel Gorman * if less than a specified percentage of the node is used by 41099614634fSChristoph Lameter * unmapped file backed pages. 4110179e9639SAndrew Morton */ 4111a5f5f91dSMel Gorman if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 4112385386cfSJohannes Weiner node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) 4113a5f5f91dSMel Gorman return NODE_RECLAIM_FULL; 4114179e9639SAndrew Morton 4115179e9639SAndrew Morton /* 4116d773ed6bSDavid Rientjes * Do not scan if the allocation should not be delayed. 4117179e9639SAndrew Morton */ 4118d0164adcSMel Gorman if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 4119a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 4120179e9639SAndrew Morton 4121179e9639SAndrew Morton /* 4122a5f5f91dSMel Gorman * Only run node reclaim on the local node or on nodes that do not 4123179e9639SAndrew Morton * have associated processors. This will favor the local processor 4124179e9639SAndrew Morton * over remote processors and spread off node memory allocations 4125179e9639SAndrew Morton * as wide as possible. 4126179e9639SAndrew Morton */ 4127a5f5f91dSMel Gorman if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 4128a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 4129d773ed6bSDavid Rientjes 4130a5f5f91dSMel Gorman if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 4131a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 4132fa5e084eSMel Gorman 4133a5f5f91dSMel Gorman ret = __node_reclaim(pgdat, gfp_mask, order); 4134a5f5f91dSMel Gorman clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 4135d773ed6bSDavid Rientjes 413624cf7251SMel Gorman if (!ret) 413724cf7251SMel Gorman count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 413824cf7251SMel Gorman 4139d773ed6bSDavid Rientjes return ret; 4140179e9639SAndrew Morton } 41419eeff239SChristoph Lameter #endif 4142894bc310SLee Schermerhorn 4143894bc310SLee Schermerhorn /* 4144894bc310SLee Schermerhorn * page_evictable - test whether a page is evictable 4145894bc310SLee Schermerhorn * @page: the page to test 4146894bc310SLee Schermerhorn * 4147894bc310SLee Schermerhorn * Test whether page is evictable--i.e., should be placed on active/inactive 414839b5f29aSHugh Dickins * lists vs unevictable list. 4149894bc310SLee Schermerhorn * 4150894bc310SLee Schermerhorn * Reasons page might not be evictable: 4151ba9ddf49SLee Schermerhorn * (1) page's mapping marked unevictable 4152b291f000SNick Piggin * (2) page is part of an mlocked VMA 4153ba9ddf49SLee Schermerhorn * 4154894bc310SLee Schermerhorn */ 415539b5f29aSHugh Dickins int page_evictable(struct page *page) 4156894bc310SLee Schermerhorn { 4157e92bb4ddSHuang Ying int ret; 4158e92bb4ddSHuang Ying 4159e92bb4ddSHuang Ying /* Prevent address_space of inode and swap cache from being freed */ 4160e92bb4ddSHuang Ying rcu_read_lock(); 4161e92bb4ddSHuang Ying ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); 4162e92bb4ddSHuang Ying rcu_read_unlock(); 4163e92bb4ddSHuang Ying return ret; 4164894bc310SLee Schermerhorn } 416589e004eaSLee Schermerhorn 416685046579SHugh Dickins #ifdef CONFIG_SHMEM 416789e004eaSLee Schermerhorn /** 416824513264SHugh Dickins * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list 416924513264SHugh Dickins * @pages: array of pages to check 417024513264SHugh Dickins * @nr_pages: number of pages to check 417189e004eaSLee Schermerhorn * 417224513264SHugh Dickins * Checks pages for evictability and moves them to the appropriate lru list. 417385046579SHugh Dickins * 417485046579SHugh Dickins * This function is only used for SysV IPC SHM_UNLOCK. 417589e004eaSLee Schermerhorn */ 417624513264SHugh Dickins void check_move_unevictable_pages(struct page **pages, int nr_pages) 417789e004eaSLee Schermerhorn { 4178925b7673SJohannes Weiner struct lruvec *lruvec; 4179785b99feSMel Gorman struct pglist_data *pgdat = NULL; 418024513264SHugh Dickins int pgscanned = 0; 418124513264SHugh Dickins int pgrescued = 0; 418289e004eaSLee Schermerhorn int i; 418389e004eaSLee Schermerhorn 418424513264SHugh Dickins for (i = 0; i < nr_pages; i++) { 418524513264SHugh Dickins struct page *page = pages[i]; 4186785b99feSMel Gorman struct pglist_data *pagepgdat = page_pgdat(page); 418789e004eaSLee Schermerhorn 418824513264SHugh Dickins pgscanned++; 4189785b99feSMel Gorman if (pagepgdat != pgdat) { 4190785b99feSMel Gorman if (pgdat) 4191785b99feSMel Gorman spin_unlock_irq(&pgdat->lru_lock); 4192785b99feSMel Gorman pgdat = pagepgdat; 4193785b99feSMel Gorman spin_lock_irq(&pgdat->lru_lock); 419489e004eaSLee Schermerhorn } 4195785b99feSMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 419689e004eaSLee Schermerhorn 419724513264SHugh Dickins if (!PageLRU(page) || !PageUnevictable(page)) 419824513264SHugh Dickins continue; 419989e004eaSLee Schermerhorn 420039b5f29aSHugh Dickins if (page_evictable(page)) { 420124513264SHugh Dickins enum lru_list lru = page_lru_base_type(page); 420224513264SHugh Dickins 4203309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 420424513264SHugh Dickins ClearPageUnevictable(page); 4205fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 4206fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 420724513264SHugh Dickins pgrescued++; 420889e004eaSLee Schermerhorn } 420989e004eaSLee Schermerhorn } 421024513264SHugh Dickins 4211785b99feSMel Gorman if (pgdat) { 421224513264SHugh Dickins __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 421324513264SHugh Dickins __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 4214785b99feSMel Gorman spin_unlock_irq(&pgdat->lru_lock); 421524513264SHugh Dickins } 421685046579SHugh Dickins } 421785046579SHugh Dickins #endif /* CONFIG_SHMEM */ 4218