11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmscan.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie. 71da177e4SLinus Torvalds * kswapd added: 7.1.96 sct 81da177e4SLinus Torvalds * Removed kswapd_ctl limits, and swap out as many pages as needed 91da177e4SLinus Torvalds * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 101da177e4SLinus Torvalds * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 111da177e4SLinus Torvalds * Multiqueue VM started 5.8.00, Rik van Riel. 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 14b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15b1de0d13SMitchel Humpherys 161da177e4SLinus Torvalds #include <linux/mm.h> 171da177e4SLinus Torvalds #include <linux/module.h> 185a0e3ad6STejun Heo #include <linux/gfp.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/highmem.h> 2470ddf637SAnton Vorontsov #include <linux/vmpressure.h> 25e129b5c2SAndrew Morton #include <linux/vmstat.h> 261da177e4SLinus Torvalds #include <linux/file.h> 271da177e4SLinus Torvalds #include <linux/writeback.h> 281da177e4SLinus Torvalds #include <linux/blkdev.h> 291da177e4SLinus Torvalds #include <linux/buffer_head.h> /* for try_to_release_page(), 301da177e4SLinus Torvalds buffer_heads_over_limit */ 311da177e4SLinus Torvalds #include <linux/mm_inline.h> 321da177e4SLinus Torvalds #include <linux/backing-dev.h> 331da177e4SLinus Torvalds #include <linux/rmap.h> 341da177e4SLinus Torvalds #include <linux/topology.h> 351da177e4SLinus Torvalds #include <linux/cpu.h> 361da177e4SLinus Torvalds #include <linux/cpuset.h> 373e7d3449SMel Gorman #include <linux/compaction.h> 381da177e4SLinus Torvalds #include <linux/notifier.h> 391da177e4SLinus Torvalds #include <linux/rwsem.h> 40248a0301SRafael J. Wysocki #include <linux/delay.h> 413218ae14SYasunori Goto #include <linux/kthread.h> 427dfb7103SNigel Cunningham #include <linux/freezer.h> 4366e1707bSBalbir Singh #include <linux/memcontrol.h> 44873b4771SKeika Kobayashi #include <linux/delayacct.h> 45af936a16SLee Schermerhorn #include <linux/sysctl.h> 46929bea7cSKOSAKI Motohiro #include <linux/oom.h> 47268bb0ceSLinus Torvalds #include <linux/prefetch.h> 48b1de0d13SMitchel Humpherys #include <linux/printk.h> 49f9fe48beSRoss Zwisler #include <linux/dax.h> 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds #include <asm/tlbflush.h> 521da177e4SLinus Torvalds #include <asm/div64.h> 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds #include <linux/swapops.h> 55117aad1eSRafael Aquini #include <linux/balloon_compaction.h> 561da177e4SLinus Torvalds 570f8053a5SNick Piggin #include "internal.h" 580f8053a5SNick Piggin 5933906bc5SMel Gorman #define CREATE_TRACE_POINTS 6033906bc5SMel Gorman #include <trace/events/vmscan.h> 6133906bc5SMel Gorman 621da177e4SLinus Torvalds struct scan_control { 6322fba335SKOSAKI Motohiro /* How many pages shrink_list() should reclaim */ 6422fba335SKOSAKI Motohiro unsigned long nr_to_reclaim; 6522fba335SKOSAKI Motohiro 661da177e4SLinus Torvalds /* This context's GFP mask */ 676daa0e28SAl Viro gfp_t gfp_mask; 681da177e4SLinus Torvalds 69ee814fe2SJohannes Weiner /* Allocation order */ 705ad333ebSAndy Whitcroft int order; 7166e1707bSBalbir Singh 72ee814fe2SJohannes Weiner /* 73ee814fe2SJohannes Weiner * Nodemask of nodes allowed by the caller. If NULL, all nodes 74ee814fe2SJohannes Weiner * are scanned. 75ee814fe2SJohannes Weiner */ 76ee814fe2SJohannes Weiner nodemask_t *nodemask; 779e3b2f8cSKonstantin Khlebnikov 785f53e762SKOSAKI Motohiro /* 79f16015fbSJohannes Weiner * The memory cgroup that hit its limit and as a result is the 80f16015fbSJohannes Weiner * primary target of this reclaim invocation. 81f16015fbSJohannes Weiner */ 82f16015fbSJohannes Weiner struct mem_cgroup *target_mem_cgroup; 8366e1707bSBalbir Singh 84ee814fe2SJohannes Weiner /* Scan (total_size >> priority) pages at once */ 85ee814fe2SJohannes Weiner int priority; 86ee814fe2SJohannes Weiner 87b2e18757SMel Gorman /* The highest zone to isolate pages for reclaim from */ 88b2e18757SMel Gorman enum zone_type reclaim_idx; 89b2e18757SMel Gorman 90ee814fe2SJohannes Weiner unsigned int may_writepage:1; 91ee814fe2SJohannes Weiner 92ee814fe2SJohannes Weiner /* Can mapped pages be reclaimed? */ 93ee814fe2SJohannes Weiner unsigned int may_unmap:1; 94ee814fe2SJohannes Weiner 95ee814fe2SJohannes Weiner /* Can pages be swapped as part of reclaim? */ 96ee814fe2SJohannes Weiner unsigned int may_swap:1; 97ee814fe2SJohannes Weiner 98241994edSJohannes Weiner /* Can cgroups be reclaimed below their normal consumption range? */ 99241994edSJohannes Weiner unsigned int may_thrash:1; 100241994edSJohannes Weiner 101ee814fe2SJohannes Weiner unsigned int hibernation_mode:1; 102ee814fe2SJohannes Weiner 103ee814fe2SJohannes Weiner /* One of the zones is ready for compaction */ 104ee814fe2SJohannes Weiner unsigned int compaction_ready:1; 105ee814fe2SJohannes Weiner 106ee814fe2SJohannes Weiner /* Incremented by the number of inactive pages that were scanned */ 107ee814fe2SJohannes Weiner unsigned long nr_scanned; 108ee814fe2SJohannes Weiner 109ee814fe2SJohannes Weiner /* Number of pages freed so far during a call to shrink_zones() */ 110ee814fe2SJohannes Weiner unsigned long nr_reclaimed; 1111da177e4SLinus Torvalds }; 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH 1141da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) \ 1151da177e4SLinus Torvalds do { \ 1161da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1171da177e4SLinus Torvalds struct page *prev; \ 1181da177e4SLinus Torvalds \ 1191da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1201da177e4SLinus Torvalds prefetch(&prev->_field); \ 1211da177e4SLinus Torvalds } \ 1221da177e4SLinus Torvalds } while (0) 1231da177e4SLinus Torvalds #else 1241da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 1251da177e4SLinus Torvalds #endif 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW 1281da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) \ 1291da177e4SLinus Torvalds do { \ 1301da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1311da177e4SLinus Torvalds struct page *prev; \ 1321da177e4SLinus Torvalds \ 1331da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1341da177e4SLinus Torvalds prefetchw(&prev->_field); \ 1351da177e4SLinus Torvalds } \ 1361da177e4SLinus Torvalds } while (0) 1371da177e4SLinus Torvalds #else 1381da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 1391da177e4SLinus Torvalds #endif 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds /* 1421da177e4SLinus Torvalds * From 0 .. 100. Higher means more swappy. 1431da177e4SLinus Torvalds */ 1441da177e4SLinus Torvalds int vm_swappiness = 60; 145d0480be4SWang Sheng-Hui /* 146d0480be4SWang Sheng-Hui * The total number of pages which are beyond the high watermark within all 147d0480be4SWang Sheng-Hui * zones. 148d0480be4SWang Sheng-Hui */ 149d0480be4SWang Sheng-Hui unsigned long vm_total_pages; 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds static LIST_HEAD(shrinker_list); 1521da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem); 1531da177e4SLinus Torvalds 154c255a458SAndrew Morton #ifdef CONFIG_MEMCG 15589b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc) 15689b5fae5SJohannes Weiner { 157f16015fbSJohannes Weiner return !sc->target_mem_cgroup; 15889b5fae5SJohannes Weiner } 15997c9341fSTejun Heo 16097c9341fSTejun Heo /** 16197c9341fSTejun Heo * sane_reclaim - is the usual dirty throttling mechanism operational? 16297c9341fSTejun Heo * @sc: scan_control in question 16397c9341fSTejun Heo * 16497c9341fSTejun Heo * The normal page dirty throttling mechanism in balance_dirty_pages() is 16597c9341fSTejun Heo * completely broken with the legacy memcg and direct stalling in 16697c9341fSTejun Heo * shrink_page_list() is used for throttling instead, which lacks all the 16797c9341fSTejun Heo * niceties such as fairness, adaptive pausing, bandwidth proportional 16897c9341fSTejun Heo * allocation and configurability. 16997c9341fSTejun Heo * 17097c9341fSTejun Heo * This function tests whether the vmscan currently in progress can assume 17197c9341fSTejun Heo * that the normal dirty throttling mechanism is operational. 17297c9341fSTejun Heo */ 17397c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc) 17497c9341fSTejun Heo { 17597c9341fSTejun Heo struct mem_cgroup *memcg = sc->target_mem_cgroup; 17697c9341fSTejun Heo 17797c9341fSTejun Heo if (!memcg) 17897c9341fSTejun Heo return true; 17997c9341fSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK 18069234aceSLinus Torvalds if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 18197c9341fSTejun Heo return true; 18297c9341fSTejun Heo #endif 18397c9341fSTejun Heo return false; 18497c9341fSTejun Heo } 18591a45470SKAMEZAWA Hiroyuki #else 18689b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc) 18789b5fae5SJohannes Weiner { 18889b5fae5SJohannes Weiner return true; 18989b5fae5SJohannes Weiner } 19097c9341fSTejun Heo 19197c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc) 19297c9341fSTejun Heo { 19397c9341fSTejun Heo return true; 19497c9341fSTejun Heo } 19591a45470SKAMEZAWA Hiroyuki #endif 19691a45470SKAMEZAWA Hiroyuki 197599d0c95SMel Gorman unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat) 1986e543d57SLisa Du { 199599d0c95SMel Gorman unsigned long nr; 200599d0c95SMel Gorman 201599d0c95SMel Gorman nr = node_page_state_snapshot(pgdat, NR_ACTIVE_FILE) + 202599d0c95SMel Gorman node_page_state_snapshot(pgdat, NR_INACTIVE_FILE) + 203599d0c95SMel Gorman node_page_state_snapshot(pgdat, NR_ISOLATED_FILE); 204599d0c95SMel Gorman 205599d0c95SMel Gorman if (get_nr_swap_pages() > 0) 206599d0c95SMel Gorman nr += node_page_state_snapshot(pgdat, NR_ACTIVE_ANON) + 207599d0c95SMel Gorman node_page_state_snapshot(pgdat, NR_INACTIVE_ANON) + 208599d0c95SMel Gorman node_page_state_snapshot(pgdat, NR_ISOLATED_ANON); 209599d0c95SMel Gorman 210599d0c95SMel Gorman return nr; 211599d0c95SMel Gorman } 212599d0c95SMel Gorman 213599d0c95SMel Gorman bool pgdat_reclaimable(struct pglist_data *pgdat) 214599d0c95SMel Gorman { 215599d0c95SMel Gorman return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) < 216599d0c95SMel Gorman pgdat_reclaimable_pages(pgdat) * 6; 2176e543d57SLisa Du } 2186e543d57SLisa Du 21923047a96SJohannes Weiner unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru) 220c9f299d9SKOSAKI Motohiro { 221c3c787e8SHugh Dickins if (!mem_cgroup_disabled()) 2224d7dcca2SHugh Dickins return mem_cgroup_get_lru_size(lruvec, lru); 223a3d8e054SKOSAKI Motohiro 224599d0c95SMel Gorman return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); 225c9f299d9SKOSAKI Motohiro } 226c9f299d9SKOSAKI Motohiro 2271da177e4SLinus Torvalds /* 2281d3d4437SGlauber Costa * Add a shrinker callback to be called from the vm. 2291da177e4SLinus Torvalds */ 2301d3d4437SGlauber Costa int register_shrinker(struct shrinker *shrinker) 2311da177e4SLinus Torvalds { 2321d3d4437SGlauber Costa size_t size = sizeof(*shrinker->nr_deferred); 2331d3d4437SGlauber Costa 2341d3d4437SGlauber Costa if (shrinker->flags & SHRINKER_NUMA_AWARE) 2351d3d4437SGlauber Costa size *= nr_node_ids; 2361d3d4437SGlauber Costa 2371d3d4437SGlauber Costa shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); 2381d3d4437SGlauber Costa if (!shrinker->nr_deferred) 2391d3d4437SGlauber Costa return -ENOMEM; 2401d3d4437SGlauber Costa 2411da177e4SLinus Torvalds down_write(&shrinker_rwsem); 2421da177e4SLinus Torvalds list_add_tail(&shrinker->list, &shrinker_list); 2431da177e4SLinus Torvalds up_write(&shrinker_rwsem); 2441d3d4437SGlauber Costa return 0; 2451da177e4SLinus Torvalds } 2468e1f936bSRusty Russell EXPORT_SYMBOL(register_shrinker); 2471da177e4SLinus Torvalds 2481da177e4SLinus Torvalds /* 2491da177e4SLinus Torvalds * Remove one 2501da177e4SLinus Torvalds */ 2518e1f936bSRusty Russell void unregister_shrinker(struct shrinker *shrinker) 2521da177e4SLinus Torvalds { 2531da177e4SLinus Torvalds down_write(&shrinker_rwsem); 2541da177e4SLinus Torvalds list_del(&shrinker->list); 2551da177e4SLinus Torvalds up_write(&shrinker_rwsem); 256ae393321SAndrew Vagin kfree(shrinker->nr_deferred); 2571da177e4SLinus Torvalds } 2588e1f936bSRusty Russell EXPORT_SYMBOL(unregister_shrinker); 2591da177e4SLinus Torvalds 2601da177e4SLinus Torvalds #define SHRINK_BATCH 128 2611d3d4437SGlauber Costa 262cb731d6cSVladimir Davydov static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, 2636b4f7799SJohannes Weiner struct shrinker *shrinker, 2646b4f7799SJohannes Weiner unsigned long nr_scanned, 2656b4f7799SJohannes Weiner unsigned long nr_eligible) 2661da177e4SLinus Torvalds { 26724f7c6b9SDave Chinner unsigned long freed = 0; 2681da177e4SLinus Torvalds unsigned long long delta; 269635697c6SKonstantin Khlebnikov long total_scan; 270d5bc5fd3SVladimir Davydov long freeable; 271acf92b48SDave Chinner long nr; 272acf92b48SDave Chinner long new_nr; 2731d3d4437SGlauber Costa int nid = shrinkctl->nid; 274e9299f50SDave Chinner long batch_size = shrinker->batch ? shrinker->batch 275e9299f50SDave Chinner : SHRINK_BATCH; 2761da177e4SLinus Torvalds 277d5bc5fd3SVladimir Davydov freeable = shrinker->count_objects(shrinker, shrinkctl); 278d5bc5fd3SVladimir Davydov if (freeable == 0) 2791d3d4437SGlauber Costa return 0; 280635697c6SKonstantin Khlebnikov 281acf92b48SDave Chinner /* 282acf92b48SDave Chinner * copy the current shrinker scan count into a local variable 283acf92b48SDave Chinner * and zero it so that other concurrent shrinker invocations 284acf92b48SDave Chinner * don't also do this scanning work. 285acf92b48SDave Chinner */ 2861d3d4437SGlauber Costa nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); 287acf92b48SDave Chinner 288acf92b48SDave Chinner total_scan = nr; 2896b4f7799SJohannes Weiner delta = (4 * nr_scanned) / shrinker->seeks; 290d5bc5fd3SVladimir Davydov delta *= freeable; 2916b4f7799SJohannes Weiner do_div(delta, nr_eligible + 1); 292acf92b48SDave Chinner total_scan += delta; 293acf92b48SDave Chinner if (total_scan < 0) { 2948612c663SPintu Kumar pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 295a0b02131SDave Chinner shrinker->scan_objects, total_scan); 296d5bc5fd3SVladimir Davydov total_scan = freeable; 297ea164d73SAndrea Arcangeli } 298ea164d73SAndrea Arcangeli 299ea164d73SAndrea Arcangeli /* 3003567b59aSDave Chinner * We need to avoid excessive windup on filesystem shrinkers 3013567b59aSDave Chinner * due to large numbers of GFP_NOFS allocations causing the 3023567b59aSDave Chinner * shrinkers to return -1 all the time. This results in a large 3033567b59aSDave Chinner * nr being built up so when a shrink that can do some work 3043567b59aSDave Chinner * comes along it empties the entire cache due to nr >>> 305d5bc5fd3SVladimir Davydov * freeable. This is bad for sustaining a working set in 3063567b59aSDave Chinner * memory. 3073567b59aSDave Chinner * 3083567b59aSDave Chinner * Hence only allow the shrinker to scan the entire cache when 3093567b59aSDave Chinner * a large delta change is calculated directly. 3103567b59aSDave Chinner */ 311d5bc5fd3SVladimir Davydov if (delta < freeable / 4) 312d5bc5fd3SVladimir Davydov total_scan = min(total_scan, freeable / 2); 3133567b59aSDave Chinner 3143567b59aSDave Chinner /* 315ea164d73SAndrea Arcangeli * Avoid risking looping forever due to too large nr value: 316ea164d73SAndrea Arcangeli * never try to free more than twice the estimate number of 317ea164d73SAndrea Arcangeli * freeable entries. 318ea164d73SAndrea Arcangeli */ 319d5bc5fd3SVladimir Davydov if (total_scan > freeable * 2) 320d5bc5fd3SVladimir Davydov total_scan = freeable * 2; 3211da177e4SLinus Torvalds 32224f7c6b9SDave Chinner trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, 3236b4f7799SJohannes Weiner nr_scanned, nr_eligible, 324d5bc5fd3SVladimir Davydov freeable, delta, total_scan); 32509576073SDave Chinner 3260b1fb40aSVladimir Davydov /* 3270b1fb40aSVladimir Davydov * Normally, we should not scan less than batch_size objects in one 3280b1fb40aSVladimir Davydov * pass to avoid too frequent shrinker calls, but if the slab has less 3290b1fb40aSVladimir Davydov * than batch_size objects in total and we are really tight on memory, 3300b1fb40aSVladimir Davydov * we will try to reclaim all available objects, otherwise we can end 3310b1fb40aSVladimir Davydov * up failing allocations although there are plenty of reclaimable 3320b1fb40aSVladimir Davydov * objects spread over several slabs with usage less than the 3330b1fb40aSVladimir Davydov * batch_size. 3340b1fb40aSVladimir Davydov * 3350b1fb40aSVladimir Davydov * We detect the "tight on memory" situations by looking at the total 3360b1fb40aSVladimir Davydov * number of objects we want to scan (total_scan). If it is greater 337d5bc5fd3SVladimir Davydov * than the total number of objects on slab (freeable), we must be 3380b1fb40aSVladimir Davydov * scanning at high prio and therefore should try to reclaim as much as 3390b1fb40aSVladimir Davydov * possible. 3400b1fb40aSVladimir Davydov */ 3410b1fb40aSVladimir Davydov while (total_scan >= batch_size || 342d5bc5fd3SVladimir Davydov total_scan >= freeable) { 34324f7c6b9SDave Chinner unsigned long ret; 3440b1fb40aSVladimir Davydov unsigned long nr_to_scan = min(batch_size, total_scan); 3451da177e4SLinus Torvalds 3460b1fb40aSVladimir Davydov shrinkctl->nr_to_scan = nr_to_scan; 34724f7c6b9SDave Chinner ret = shrinker->scan_objects(shrinker, shrinkctl); 34824f7c6b9SDave Chinner if (ret == SHRINK_STOP) 3491da177e4SLinus Torvalds break; 35024f7c6b9SDave Chinner freed += ret; 35124f7c6b9SDave Chinner 3520b1fb40aSVladimir Davydov count_vm_events(SLABS_SCANNED, nr_to_scan); 3530b1fb40aSVladimir Davydov total_scan -= nr_to_scan; 3541da177e4SLinus Torvalds 3551da177e4SLinus Torvalds cond_resched(); 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 358acf92b48SDave Chinner /* 359acf92b48SDave Chinner * move the unused scan count back into the shrinker in a 360acf92b48SDave Chinner * manner that handles concurrent updates. If we exhausted the 361acf92b48SDave Chinner * scan, there is no need to do an update. 362acf92b48SDave Chinner */ 36383aeeadaSKonstantin Khlebnikov if (total_scan > 0) 36483aeeadaSKonstantin Khlebnikov new_nr = atomic_long_add_return(total_scan, 3651d3d4437SGlauber Costa &shrinker->nr_deferred[nid]); 36683aeeadaSKonstantin Khlebnikov else 3671d3d4437SGlauber Costa new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); 368acf92b48SDave Chinner 369df9024a8SDave Hansen trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); 3701d3d4437SGlauber Costa return freed; 3711d3d4437SGlauber Costa } 3721d3d4437SGlauber Costa 3736b4f7799SJohannes Weiner /** 374cb731d6cSVladimir Davydov * shrink_slab - shrink slab caches 3756b4f7799SJohannes Weiner * @gfp_mask: allocation context 3766b4f7799SJohannes Weiner * @nid: node whose slab caches to target 377cb731d6cSVladimir Davydov * @memcg: memory cgroup whose slab caches to target 3786b4f7799SJohannes Weiner * @nr_scanned: pressure numerator 3796b4f7799SJohannes Weiner * @nr_eligible: pressure denominator 3801d3d4437SGlauber Costa * 3816b4f7799SJohannes Weiner * Call the shrink functions to age shrinkable caches. 3821d3d4437SGlauber Costa * 3836b4f7799SJohannes Weiner * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, 3846b4f7799SJohannes Weiner * unaware shrinkers will receive a node id of 0 instead. 3851d3d4437SGlauber Costa * 386cb731d6cSVladimir Davydov * @memcg specifies the memory cgroup to target. If it is not NULL, 387cb731d6cSVladimir Davydov * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan 3880fc9f58aSVladimir Davydov * objects from the memory cgroup specified. Otherwise, only unaware 3890fc9f58aSVladimir Davydov * shrinkers are called. 390cb731d6cSVladimir Davydov * 3916b4f7799SJohannes Weiner * @nr_scanned and @nr_eligible form a ratio that indicate how much of 3926b4f7799SJohannes Weiner * the available objects should be scanned. Page reclaim for example 3936b4f7799SJohannes Weiner * passes the number of pages scanned and the number of pages on the 3946b4f7799SJohannes Weiner * LRU lists that it considered on @nid, plus a bias in @nr_scanned 3956b4f7799SJohannes Weiner * when it encountered mapped pages. The ratio is further biased by 3966b4f7799SJohannes Weiner * the ->seeks setting of the shrink function, which indicates the 3976b4f7799SJohannes Weiner * cost to recreate an object relative to that of an LRU page. 3981d3d4437SGlauber Costa * 3996b4f7799SJohannes Weiner * Returns the number of reclaimed slab objects. 4001d3d4437SGlauber Costa */ 401cb731d6cSVladimir Davydov static unsigned long shrink_slab(gfp_t gfp_mask, int nid, 402cb731d6cSVladimir Davydov struct mem_cgroup *memcg, 4036b4f7799SJohannes Weiner unsigned long nr_scanned, 4046b4f7799SJohannes Weiner unsigned long nr_eligible) 4051d3d4437SGlauber Costa { 4061d3d4437SGlauber Costa struct shrinker *shrinker; 4071d3d4437SGlauber Costa unsigned long freed = 0; 4081d3d4437SGlauber Costa 4090fc9f58aSVladimir Davydov if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))) 410cb731d6cSVladimir Davydov return 0; 411cb731d6cSVladimir Davydov 4126b4f7799SJohannes Weiner if (nr_scanned == 0) 4136b4f7799SJohannes Weiner nr_scanned = SWAP_CLUSTER_MAX; 4141d3d4437SGlauber Costa 4151d3d4437SGlauber Costa if (!down_read_trylock(&shrinker_rwsem)) { 4161d3d4437SGlauber Costa /* 4171d3d4437SGlauber Costa * If we would return 0, our callers would understand that we 4181d3d4437SGlauber Costa * have nothing else to shrink and give up trying. By returning 4191d3d4437SGlauber Costa * 1 we keep it going and assume we'll be able to shrink next 4201d3d4437SGlauber Costa * time. 4211d3d4437SGlauber Costa */ 4221d3d4437SGlauber Costa freed = 1; 4231d3d4437SGlauber Costa goto out; 4241d3d4437SGlauber Costa } 4251d3d4437SGlauber Costa 4261d3d4437SGlauber Costa list_for_each_entry(shrinker, &shrinker_list, list) { 4276b4f7799SJohannes Weiner struct shrink_control sc = { 4286b4f7799SJohannes Weiner .gfp_mask = gfp_mask, 4296b4f7799SJohannes Weiner .nid = nid, 430cb731d6cSVladimir Davydov .memcg = memcg, 4316b4f7799SJohannes Weiner }; 4326b4f7799SJohannes Weiner 4330fc9f58aSVladimir Davydov /* 4340fc9f58aSVladimir Davydov * If kernel memory accounting is disabled, we ignore 4350fc9f58aSVladimir Davydov * SHRINKER_MEMCG_AWARE flag and call all shrinkers 4360fc9f58aSVladimir Davydov * passing NULL for memcg. 4370fc9f58aSVladimir Davydov */ 4380fc9f58aSVladimir Davydov if (memcg_kmem_enabled() && 4390fc9f58aSVladimir Davydov !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE)) 440cb731d6cSVladimir Davydov continue; 441cb731d6cSVladimir Davydov 4426b4f7799SJohannes Weiner if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) 4436b4f7799SJohannes Weiner sc.nid = 0; 4446b4f7799SJohannes Weiner 445cb731d6cSVladimir Davydov freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); 446ec97097bSVladimir Davydov } 4471d3d4437SGlauber Costa 4481da177e4SLinus Torvalds up_read(&shrinker_rwsem); 449f06590bdSMinchan Kim out: 450f06590bdSMinchan Kim cond_resched(); 45124f7c6b9SDave Chinner return freed; 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds 454cb731d6cSVladimir Davydov void drop_slab_node(int nid) 455cb731d6cSVladimir Davydov { 456cb731d6cSVladimir Davydov unsigned long freed; 457cb731d6cSVladimir Davydov 458cb731d6cSVladimir Davydov do { 459cb731d6cSVladimir Davydov struct mem_cgroup *memcg = NULL; 460cb731d6cSVladimir Davydov 461cb731d6cSVladimir Davydov freed = 0; 462cb731d6cSVladimir Davydov do { 463cb731d6cSVladimir Davydov freed += shrink_slab(GFP_KERNEL, nid, memcg, 464cb731d6cSVladimir Davydov 1000, 1000); 465cb731d6cSVladimir Davydov } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 466cb731d6cSVladimir Davydov } while (freed > 10); 467cb731d6cSVladimir Davydov } 468cb731d6cSVladimir Davydov 469cb731d6cSVladimir Davydov void drop_slab(void) 470cb731d6cSVladimir Davydov { 471cb731d6cSVladimir Davydov int nid; 472cb731d6cSVladimir Davydov 473cb731d6cSVladimir Davydov for_each_online_node(nid) 474cb731d6cSVladimir Davydov drop_slab_node(nid); 475cb731d6cSVladimir Davydov } 476cb731d6cSVladimir Davydov 4771da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page) 4781da177e4SLinus Torvalds { 479ceddc3a5SJohannes Weiner /* 480ceddc3a5SJohannes Weiner * A freeable page cache page is referenced only by the caller 481ceddc3a5SJohannes Weiner * that isolated the page, the page cache radix tree and 482ceddc3a5SJohannes Weiner * optional buffer heads at page->private. 483ceddc3a5SJohannes Weiner */ 484edcf4748SJohannes Weiner return page_count(page) - page_has_private(page) == 2; 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds 487703c2708STejun Heo static int may_write_to_inode(struct inode *inode, struct scan_control *sc) 4881da177e4SLinus Torvalds { 489930d9152SChristoph Lameter if (current->flags & PF_SWAPWRITE) 4901da177e4SLinus Torvalds return 1; 491703c2708STejun Heo if (!inode_write_congested(inode)) 4921da177e4SLinus Torvalds return 1; 493703c2708STejun Heo if (inode_to_bdi(inode) == current->backing_dev_info) 4941da177e4SLinus Torvalds return 1; 4951da177e4SLinus Torvalds return 0; 4961da177e4SLinus Torvalds } 4971da177e4SLinus Torvalds 4981da177e4SLinus Torvalds /* 4991da177e4SLinus Torvalds * We detected a synchronous write error writing a page out. Probably 5001da177e4SLinus Torvalds * -ENOSPC. We need to propagate that into the address_space for a subsequent 5011da177e4SLinus Torvalds * fsync(), msync() or close(). 5021da177e4SLinus Torvalds * 5031da177e4SLinus Torvalds * The tricky part is that after writepage we cannot touch the mapping: nothing 5041da177e4SLinus Torvalds * prevents it from being freed up. But we have a ref on the page and once 5051da177e4SLinus Torvalds * that page is locked, the mapping is pinned. 5061da177e4SLinus Torvalds * 5071da177e4SLinus Torvalds * We're allowed to run sleeping lock_page() here because we know the caller has 5081da177e4SLinus Torvalds * __GFP_FS. 5091da177e4SLinus Torvalds */ 5101da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping, 5111da177e4SLinus Torvalds struct page *page, int error) 5121da177e4SLinus Torvalds { 5137eaceaccSJens Axboe lock_page(page); 5143e9f45bdSGuillaume Chazarain if (page_mapping(page) == mapping) 5153e9f45bdSGuillaume Chazarain mapping_set_error(mapping, error); 5161da177e4SLinus Torvalds unlock_page(page); 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds 51904e62a29SChristoph Lameter /* possible outcome of pageout() */ 52004e62a29SChristoph Lameter typedef enum { 52104e62a29SChristoph Lameter /* failed to write page out, page is locked */ 52204e62a29SChristoph Lameter PAGE_KEEP, 52304e62a29SChristoph Lameter /* move page to the active list, page is locked */ 52404e62a29SChristoph Lameter PAGE_ACTIVATE, 52504e62a29SChristoph Lameter /* page has been sent to the disk successfully, page is unlocked */ 52604e62a29SChristoph Lameter PAGE_SUCCESS, 52704e62a29SChristoph Lameter /* page is clean and locked */ 52804e62a29SChristoph Lameter PAGE_CLEAN, 52904e62a29SChristoph Lameter } pageout_t; 53004e62a29SChristoph Lameter 5311da177e4SLinus Torvalds /* 5321742f19fSAndrew Morton * pageout is called by shrink_page_list() for each dirty page. 5331742f19fSAndrew Morton * Calls ->writepage(). 5341da177e4SLinus Torvalds */ 535c661b078SAndy Whitcroft static pageout_t pageout(struct page *page, struct address_space *mapping, 5367d3579e8SKOSAKI Motohiro struct scan_control *sc) 5371da177e4SLinus Torvalds { 5381da177e4SLinus Torvalds /* 5391da177e4SLinus Torvalds * If the page is dirty, only perform writeback if that write 5401da177e4SLinus Torvalds * will be non-blocking. To prevent this allocation from being 5411da177e4SLinus Torvalds * stalled by pagecache activity. But note that there may be 5421da177e4SLinus Torvalds * stalls if we need to run get_block(). We could test 5431da177e4SLinus Torvalds * PagePrivate for that. 5441da177e4SLinus Torvalds * 5458174202bSAl Viro * If this process is currently in __generic_file_write_iter() against 5461da177e4SLinus Torvalds * this page's queue, we can perform writeback even if that 5471da177e4SLinus Torvalds * will block. 5481da177e4SLinus Torvalds * 5491da177e4SLinus Torvalds * If the page is swapcache, write it back even if that would 5501da177e4SLinus Torvalds * block, for some throttling. This happens by accident, because 5511da177e4SLinus Torvalds * swap_backing_dev_info is bust: it doesn't reflect the 5521da177e4SLinus Torvalds * congestion state of the swapdevs. Easy to fix, if needed. 5531da177e4SLinus Torvalds */ 5541da177e4SLinus Torvalds if (!is_page_cache_freeable(page)) 5551da177e4SLinus Torvalds return PAGE_KEEP; 5561da177e4SLinus Torvalds if (!mapping) { 5571da177e4SLinus Torvalds /* 5581da177e4SLinus Torvalds * Some data journaling orphaned pages can have 5591da177e4SLinus Torvalds * page->mapping == NULL while being dirty with clean buffers. 5601da177e4SLinus Torvalds */ 561266cf658SDavid Howells if (page_has_private(page)) { 5621da177e4SLinus Torvalds if (try_to_free_buffers(page)) { 5631da177e4SLinus Torvalds ClearPageDirty(page); 564b1de0d13SMitchel Humpherys pr_info("%s: orphaned page\n", __func__); 5651da177e4SLinus Torvalds return PAGE_CLEAN; 5661da177e4SLinus Torvalds } 5671da177e4SLinus Torvalds } 5681da177e4SLinus Torvalds return PAGE_KEEP; 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds if (mapping->a_ops->writepage == NULL) 5711da177e4SLinus Torvalds return PAGE_ACTIVATE; 572703c2708STejun Heo if (!may_write_to_inode(mapping->host, sc)) 5731da177e4SLinus Torvalds return PAGE_KEEP; 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds if (clear_page_dirty_for_io(page)) { 5761da177e4SLinus Torvalds int res; 5771da177e4SLinus Torvalds struct writeback_control wbc = { 5781da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 5791da177e4SLinus Torvalds .nr_to_write = SWAP_CLUSTER_MAX, 580111ebb6eSOGAWA Hirofumi .range_start = 0, 581111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 5821da177e4SLinus Torvalds .for_reclaim = 1, 5831da177e4SLinus Torvalds }; 5841da177e4SLinus Torvalds 5851da177e4SLinus Torvalds SetPageReclaim(page); 5861da177e4SLinus Torvalds res = mapping->a_ops->writepage(page, &wbc); 5871da177e4SLinus Torvalds if (res < 0) 5881da177e4SLinus Torvalds handle_write_error(mapping, page, res); 589994fc28cSZach Brown if (res == AOP_WRITEPAGE_ACTIVATE) { 5901da177e4SLinus Torvalds ClearPageReclaim(page); 5911da177e4SLinus Torvalds return PAGE_ACTIVATE; 5921da177e4SLinus Torvalds } 593c661b078SAndy Whitcroft 5941da177e4SLinus Torvalds if (!PageWriteback(page)) { 5951da177e4SLinus Torvalds /* synchronous write or broken a_ops? */ 5961da177e4SLinus Torvalds ClearPageReclaim(page); 5971da177e4SLinus Torvalds } 5983aa23851Syalin wang trace_mm_vmscan_writepage(page); 599c4a25635SMel Gorman inc_node_page_state(page, NR_VMSCAN_WRITE); 6001da177e4SLinus Torvalds return PAGE_SUCCESS; 6011da177e4SLinus Torvalds } 6021da177e4SLinus Torvalds 6031da177e4SLinus Torvalds return PAGE_CLEAN; 6041da177e4SLinus Torvalds } 6051da177e4SLinus Torvalds 606a649fd92SAndrew Morton /* 607e286781dSNick Piggin * Same as remove_mapping, but if the page is removed from the mapping, it 608e286781dSNick Piggin * gets returned with a refcount of 0. 609a649fd92SAndrew Morton */ 610a528910eSJohannes Weiner static int __remove_mapping(struct address_space *mapping, struct page *page, 611a528910eSJohannes Weiner bool reclaimed) 61249d2e9ccSChristoph Lameter { 613c4843a75SGreg Thelen unsigned long flags; 614c4843a75SGreg Thelen 61528e4d965SNick Piggin BUG_ON(!PageLocked(page)); 61628e4d965SNick Piggin BUG_ON(mapping != page_mapping(page)); 61749d2e9ccSChristoph Lameter 618c4843a75SGreg Thelen spin_lock_irqsave(&mapping->tree_lock, flags); 61949d2e9ccSChristoph Lameter /* 6200fd0e6b0SNick Piggin * The non racy check for a busy page. 6210fd0e6b0SNick Piggin * 6220fd0e6b0SNick Piggin * Must be careful with the order of the tests. When someone has 6230fd0e6b0SNick Piggin * a ref to the page, it may be possible that they dirty it then 6240fd0e6b0SNick Piggin * drop the reference. So if PageDirty is tested before page_count 6250fd0e6b0SNick Piggin * here, then the following race may occur: 6260fd0e6b0SNick Piggin * 6270fd0e6b0SNick Piggin * get_user_pages(&page); 6280fd0e6b0SNick Piggin * [user mapping goes away] 6290fd0e6b0SNick Piggin * write_to(page); 6300fd0e6b0SNick Piggin * !PageDirty(page) [good] 6310fd0e6b0SNick Piggin * SetPageDirty(page); 6320fd0e6b0SNick Piggin * put_page(page); 6330fd0e6b0SNick Piggin * !page_count(page) [good, discard it] 6340fd0e6b0SNick Piggin * 6350fd0e6b0SNick Piggin * [oops, our write_to data is lost] 6360fd0e6b0SNick Piggin * 6370fd0e6b0SNick Piggin * Reversing the order of the tests ensures such a situation cannot 6380fd0e6b0SNick Piggin * escape unnoticed. The smp_rmb is needed to ensure the page->flags 6390139aa7bSJoonsoo Kim * load is not satisfied before that of page->_refcount. 6400fd0e6b0SNick Piggin * 6410fd0e6b0SNick Piggin * Note that if SetPageDirty is always performed via set_page_dirty, 6420fd0e6b0SNick Piggin * and thus under tree_lock, then this ordering is not required. 64349d2e9ccSChristoph Lameter */ 644fe896d18SJoonsoo Kim if (!page_ref_freeze(page, 2)) 64549d2e9ccSChristoph Lameter goto cannot_free; 646e286781dSNick Piggin /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 647e286781dSNick Piggin if (unlikely(PageDirty(page))) { 648fe896d18SJoonsoo Kim page_ref_unfreeze(page, 2); 64949d2e9ccSChristoph Lameter goto cannot_free; 650e286781dSNick Piggin } 65149d2e9ccSChristoph Lameter 65249d2e9ccSChristoph Lameter if (PageSwapCache(page)) { 65349d2e9ccSChristoph Lameter swp_entry_t swap = { .val = page_private(page) }; 6540a31bc97SJohannes Weiner mem_cgroup_swapout(page, swap); 65549d2e9ccSChristoph Lameter __delete_from_swap_cache(page); 656c4843a75SGreg Thelen spin_unlock_irqrestore(&mapping->tree_lock, flags); 6570a31bc97SJohannes Weiner swapcache_free(swap); 658e286781dSNick Piggin } else { 6596072d13cSLinus Torvalds void (*freepage)(struct page *); 660a528910eSJohannes Weiner void *shadow = NULL; 6616072d13cSLinus Torvalds 6626072d13cSLinus Torvalds freepage = mapping->a_ops->freepage; 663a528910eSJohannes Weiner /* 664a528910eSJohannes Weiner * Remember a shadow entry for reclaimed file cache in 665a528910eSJohannes Weiner * order to detect refaults, thus thrashing, later on. 666a528910eSJohannes Weiner * 667a528910eSJohannes Weiner * But don't store shadows in an address space that is 668a528910eSJohannes Weiner * already exiting. This is not just an optizimation, 669a528910eSJohannes Weiner * inode reclaim needs to empty out the radix tree or 670a528910eSJohannes Weiner * the nodes are lost. Don't plant shadows behind its 671a528910eSJohannes Weiner * back. 672f9fe48beSRoss Zwisler * 673f9fe48beSRoss Zwisler * We also don't store shadows for DAX mappings because the 674f9fe48beSRoss Zwisler * only page cache pages found in these are zero pages 675f9fe48beSRoss Zwisler * covering holes, and because we don't want to mix DAX 676f9fe48beSRoss Zwisler * exceptional entries and shadow exceptional entries in the 677f9fe48beSRoss Zwisler * same page_tree. 678a528910eSJohannes Weiner */ 679a528910eSJohannes Weiner if (reclaimed && page_is_file_cache(page) && 680f9fe48beSRoss Zwisler !mapping_exiting(mapping) && !dax_mapping(mapping)) 681a528910eSJohannes Weiner shadow = workingset_eviction(mapping, page); 68262cccb8cSJohannes Weiner __delete_from_page_cache(page, shadow); 683c4843a75SGreg Thelen spin_unlock_irqrestore(&mapping->tree_lock, flags); 6846072d13cSLinus Torvalds 6856072d13cSLinus Torvalds if (freepage != NULL) 6866072d13cSLinus Torvalds freepage(page); 687e286781dSNick Piggin } 688e286781dSNick Piggin 68949d2e9ccSChristoph Lameter return 1; 69049d2e9ccSChristoph Lameter 69149d2e9ccSChristoph Lameter cannot_free: 692c4843a75SGreg Thelen spin_unlock_irqrestore(&mapping->tree_lock, flags); 69349d2e9ccSChristoph Lameter return 0; 69449d2e9ccSChristoph Lameter } 69549d2e9ccSChristoph Lameter 6961da177e4SLinus Torvalds /* 697e286781dSNick Piggin * Attempt to detach a locked page from its ->mapping. If it is dirty or if 698e286781dSNick Piggin * someone else has a ref on the page, abort and return 0. If it was 699e286781dSNick Piggin * successfully detached, return 1. Assumes the caller has a single ref on 700e286781dSNick Piggin * this page. 701e286781dSNick Piggin */ 702e286781dSNick Piggin int remove_mapping(struct address_space *mapping, struct page *page) 703e286781dSNick Piggin { 704a528910eSJohannes Weiner if (__remove_mapping(mapping, page, false)) { 705e286781dSNick Piggin /* 706e286781dSNick Piggin * Unfreezing the refcount with 1 rather than 2 effectively 707e286781dSNick Piggin * drops the pagecache ref for us without requiring another 708e286781dSNick Piggin * atomic operation. 709e286781dSNick Piggin */ 710fe896d18SJoonsoo Kim page_ref_unfreeze(page, 1); 711e286781dSNick Piggin return 1; 712e286781dSNick Piggin } 713e286781dSNick Piggin return 0; 714e286781dSNick Piggin } 715e286781dSNick Piggin 716894bc310SLee Schermerhorn /** 717894bc310SLee Schermerhorn * putback_lru_page - put previously isolated page onto appropriate LRU list 718894bc310SLee Schermerhorn * @page: page to be put back to appropriate lru list 719894bc310SLee Schermerhorn * 720894bc310SLee Schermerhorn * Add previously isolated @page to appropriate LRU list. 721894bc310SLee Schermerhorn * Page may still be unevictable for other reasons. 722894bc310SLee Schermerhorn * 723894bc310SLee Schermerhorn * lru_lock must not be held, interrupts must be enabled. 724894bc310SLee Schermerhorn */ 725894bc310SLee Schermerhorn void putback_lru_page(struct page *page) 726894bc310SLee Schermerhorn { 7270ec3b74cSVlastimil Babka bool is_unevictable; 728bbfd28eeSLee Schermerhorn int was_unevictable = PageUnevictable(page); 729894bc310SLee Schermerhorn 730309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 731894bc310SLee Schermerhorn 732894bc310SLee Schermerhorn redo: 733894bc310SLee Schermerhorn ClearPageUnevictable(page); 734894bc310SLee Schermerhorn 73539b5f29aSHugh Dickins if (page_evictable(page)) { 736894bc310SLee Schermerhorn /* 737894bc310SLee Schermerhorn * For evictable pages, we can use the cache. 738894bc310SLee Schermerhorn * In event of a race, worst case is we end up with an 739894bc310SLee Schermerhorn * unevictable page on [in]active list. 740894bc310SLee Schermerhorn * We know how to handle that. 741894bc310SLee Schermerhorn */ 7420ec3b74cSVlastimil Babka is_unevictable = false; 743c53954a0SMel Gorman lru_cache_add(page); 744894bc310SLee Schermerhorn } else { 745894bc310SLee Schermerhorn /* 746894bc310SLee Schermerhorn * Put unevictable pages directly on zone's unevictable 747894bc310SLee Schermerhorn * list. 748894bc310SLee Schermerhorn */ 7490ec3b74cSVlastimil Babka is_unevictable = true; 750894bc310SLee Schermerhorn add_page_to_unevictable_list(page); 7516a7b9548SJohannes Weiner /* 75221ee9f39SMinchan Kim * When racing with an mlock or AS_UNEVICTABLE clearing 75321ee9f39SMinchan Kim * (page is unlocked) make sure that if the other thread 75421ee9f39SMinchan Kim * does not observe our setting of PG_lru and fails 75524513264SHugh Dickins * isolation/check_move_unevictable_pages, 75621ee9f39SMinchan Kim * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 7576a7b9548SJohannes Weiner * the page back to the evictable list. 7586a7b9548SJohannes Weiner * 75921ee9f39SMinchan Kim * The other side is TestClearPageMlocked() or shmem_lock(). 7606a7b9548SJohannes Weiner */ 7616a7b9548SJohannes Weiner smp_mb(); 762894bc310SLee Schermerhorn } 763894bc310SLee Schermerhorn 764894bc310SLee Schermerhorn /* 765894bc310SLee Schermerhorn * page's status can change while we move it among lru. If an evictable 766894bc310SLee Schermerhorn * page is on unevictable list, it never be freed. To avoid that, 767894bc310SLee Schermerhorn * check after we added it to the list, again. 768894bc310SLee Schermerhorn */ 7690ec3b74cSVlastimil Babka if (is_unevictable && page_evictable(page)) { 770894bc310SLee Schermerhorn if (!isolate_lru_page(page)) { 771894bc310SLee Schermerhorn put_page(page); 772894bc310SLee Schermerhorn goto redo; 773894bc310SLee Schermerhorn } 774894bc310SLee Schermerhorn /* This means someone else dropped this page from LRU 775894bc310SLee Schermerhorn * So, it will be freed or putback to LRU again. There is 776894bc310SLee Schermerhorn * nothing to do here. 777894bc310SLee Schermerhorn */ 778894bc310SLee Schermerhorn } 779894bc310SLee Schermerhorn 7800ec3b74cSVlastimil Babka if (was_unevictable && !is_unevictable) 781bbfd28eeSLee Schermerhorn count_vm_event(UNEVICTABLE_PGRESCUED); 7820ec3b74cSVlastimil Babka else if (!was_unevictable && is_unevictable) 783bbfd28eeSLee Schermerhorn count_vm_event(UNEVICTABLE_PGCULLED); 784bbfd28eeSLee Schermerhorn 785894bc310SLee Schermerhorn put_page(page); /* drop ref from isolate */ 786894bc310SLee Schermerhorn } 787894bc310SLee Schermerhorn 788dfc8d636SJohannes Weiner enum page_references { 789dfc8d636SJohannes Weiner PAGEREF_RECLAIM, 790dfc8d636SJohannes Weiner PAGEREF_RECLAIM_CLEAN, 79164574746SJohannes Weiner PAGEREF_KEEP, 792dfc8d636SJohannes Weiner PAGEREF_ACTIVATE, 793dfc8d636SJohannes Weiner }; 794dfc8d636SJohannes Weiner 795dfc8d636SJohannes Weiner static enum page_references page_check_references(struct page *page, 796dfc8d636SJohannes Weiner struct scan_control *sc) 797dfc8d636SJohannes Weiner { 79864574746SJohannes Weiner int referenced_ptes, referenced_page; 799dfc8d636SJohannes Weiner unsigned long vm_flags; 800dfc8d636SJohannes Weiner 801c3ac9a8aSJohannes Weiner referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, 802c3ac9a8aSJohannes Weiner &vm_flags); 80364574746SJohannes Weiner referenced_page = TestClearPageReferenced(page); 804dfc8d636SJohannes Weiner 805dfc8d636SJohannes Weiner /* 806dfc8d636SJohannes Weiner * Mlock lost the isolation race with us. Let try_to_unmap() 807dfc8d636SJohannes Weiner * move the page to the unevictable list. 808dfc8d636SJohannes Weiner */ 809dfc8d636SJohannes Weiner if (vm_flags & VM_LOCKED) 810dfc8d636SJohannes Weiner return PAGEREF_RECLAIM; 811dfc8d636SJohannes Weiner 81264574746SJohannes Weiner if (referenced_ptes) { 813e4898273SMichal Hocko if (PageSwapBacked(page)) 81464574746SJohannes Weiner return PAGEREF_ACTIVATE; 81564574746SJohannes Weiner /* 81664574746SJohannes Weiner * All mapped pages start out with page table 81764574746SJohannes Weiner * references from the instantiating fault, so we need 81864574746SJohannes Weiner * to look twice if a mapped file page is used more 81964574746SJohannes Weiner * than once. 82064574746SJohannes Weiner * 82164574746SJohannes Weiner * Mark it and spare it for another trip around the 82264574746SJohannes Weiner * inactive list. Another page table reference will 82364574746SJohannes Weiner * lead to its activation. 82464574746SJohannes Weiner * 82564574746SJohannes Weiner * Note: the mark is set for activated pages as well 82664574746SJohannes Weiner * so that recently deactivated but used pages are 82764574746SJohannes Weiner * quickly recovered. 82864574746SJohannes Weiner */ 82964574746SJohannes Weiner SetPageReferenced(page); 83064574746SJohannes Weiner 83134dbc67aSKonstantin Khlebnikov if (referenced_page || referenced_ptes > 1) 832dfc8d636SJohannes Weiner return PAGEREF_ACTIVATE; 833dfc8d636SJohannes Weiner 834c909e993SKonstantin Khlebnikov /* 835c909e993SKonstantin Khlebnikov * Activate file-backed executable pages after first usage. 836c909e993SKonstantin Khlebnikov */ 837c909e993SKonstantin Khlebnikov if (vm_flags & VM_EXEC) 838c909e993SKonstantin Khlebnikov return PAGEREF_ACTIVATE; 839c909e993SKonstantin Khlebnikov 84064574746SJohannes Weiner return PAGEREF_KEEP; 84164574746SJohannes Weiner } 84264574746SJohannes Weiner 843dfc8d636SJohannes Weiner /* Reclaim if clean, defer dirty pages to writeback */ 8442e30244aSKOSAKI Motohiro if (referenced_page && !PageSwapBacked(page)) 845dfc8d636SJohannes Weiner return PAGEREF_RECLAIM_CLEAN; 84664574746SJohannes Weiner 84764574746SJohannes Weiner return PAGEREF_RECLAIM; 848dfc8d636SJohannes Weiner } 849dfc8d636SJohannes Weiner 850e2be15f6SMel Gorman /* Check if a page is dirty or under writeback */ 851e2be15f6SMel Gorman static void page_check_dirty_writeback(struct page *page, 852e2be15f6SMel Gorman bool *dirty, bool *writeback) 853e2be15f6SMel Gorman { 854b4597226SMel Gorman struct address_space *mapping; 855b4597226SMel Gorman 856e2be15f6SMel Gorman /* 857e2be15f6SMel Gorman * Anonymous pages are not handled by flushers and must be written 858e2be15f6SMel Gorman * from reclaim context. Do not stall reclaim based on them 859e2be15f6SMel Gorman */ 860e2be15f6SMel Gorman if (!page_is_file_cache(page)) { 861e2be15f6SMel Gorman *dirty = false; 862e2be15f6SMel Gorman *writeback = false; 863e2be15f6SMel Gorman return; 864e2be15f6SMel Gorman } 865e2be15f6SMel Gorman 866e2be15f6SMel Gorman /* By default assume that the page flags are accurate */ 867e2be15f6SMel Gorman *dirty = PageDirty(page); 868e2be15f6SMel Gorman *writeback = PageWriteback(page); 869b4597226SMel Gorman 870b4597226SMel Gorman /* Verify dirty/writeback state if the filesystem supports it */ 871b4597226SMel Gorman if (!page_has_private(page)) 872b4597226SMel Gorman return; 873b4597226SMel Gorman 874b4597226SMel Gorman mapping = page_mapping(page); 875b4597226SMel Gorman if (mapping && mapping->a_ops->is_dirty_writeback) 876b4597226SMel Gorman mapping->a_ops->is_dirty_writeback(page, dirty, writeback); 877e2be15f6SMel Gorman } 878e2be15f6SMel Gorman 879e286781dSNick Piggin /* 8801742f19fSAndrew Morton * shrink_page_list() returns the number of reclaimed pages 8811da177e4SLinus Torvalds */ 8821742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list, 883599d0c95SMel Gorman struct pglist_data *pgdat, 884f84f6e2bSMel Gorman struct scan_control *sc, 88502c6de8dSMinchan Kim enum ttu_flags ttu_flags, 8868e950282SMel Gorman unsigned long *ret_nr_dirty, 887d43006d5SMel Gorman unsigned long *ret_nr_unqueued_dirty, 8888e950282SMel Gorman unsigned long *ret_nr_congested, 88902c6de8dSMinchan Kim unsigned long *ret_nr_writeback, 890b1a6f21eSMel Gorman unsigned long *ret_nr_immediate, 89102c6de8dSMinchan Kim bool force_reclaim) 8921da177e4SLinus Torvalds { 8931da177e4SLinus Torvalds LIST_HEAD(ret_pages); 894abe4c3b5SMel Gorman LIST_HEAD(free_pages); 8951da177e4SLinus Torvalds int pgactivate = 0; 896d43006d5SMel Gorman unsigned long nr_unqueued_dirty = 0; 8970e093d99SMel Gorman unsigned long nr_dirty = 0; 8980e093d99SMel Gorman unsigned long nr_congested = 0; 89905ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 90092df3a72SMel Gorman unsigned long nr_writeback = 0; 901b1a6f21eSMel Gorman unsigned long nr_immediate = 0; 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds cond_resched(); 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds while (!list_empty(page_list)) { 9061da177e4SLinus Torvalds struct address_space *mapping; 9071da177e4SLinus Torvalds struct page *page; 9081da177e4SLinus Torvalds int may_enter_fs; 90902c6de8dSMinchan Kim enum page_references references = PAGEREF_RECLAIM_CLEAN; 910e2be15f6SMel Gorman bool dirty, writeback; 911854e9ed0SMinchan Kim bool lazyfree = false; 912854e9ed0SMinchan Kim int ret = SWAP_SUCCESS; 9131da177e4SLinus Torvalds 9141da177e4SLinus Torvalds cond_resched(); 9151da177e4SLinus Torvalds 9161da177e4SLinus Torvalds page = lru_to_page(page_list); 9171da177e4SLinus Torvalds list_del(&page->lru); 9181da177e4SLinus Torvalds 919529ae9aaSNick Piggin if (!trylock_page(page)) 9201da177e4SLinus Torvalds goto keep; 9211da177e4SLinus Torvalds 922309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 9231da177e4SLinus Torvalds 9241da177e4SLinus Torvalds sc->nr_scanned++; 92580e43426SChristoph Lameter 92639b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) 927b291f000SNick Piggin goto cull_mlocked; 928894bc310SLee Schermerhorn 929a6dc60f8SJohannes Weiner if (!sc->may_unmap && page_mapped(page)) 93080e43426SChristoph Lameter goto keep_locked; 93180e43426SChristoph Lameter 9321da177e4SLinus Torvalds /* Double the slab pressure for mapped and swapcache pages */ 9331da177e4SLinus Torvalds if (page_mapped(page) || PageSwapCache(page)) 9341da177e4SLinus Torvalds sc->nr_scanned++; 9351da177e4SLinus Torvalds 936c661b078SAndy Whitcroft may_enter_fs = (sc->gfp_mask & __GFP_FS) || 937c661b078SAndy Whitcroft (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 938c661b078SAndy Whitcroft 939e62e384eSMichal Hocko /* 940e2be15f6SMel Gorman * The number of dirty pages determines if a zone is marked 941e2be15f6SMel Gorman * reclaim_congested which affects wait_iff_congested. kswapd 942e2be15f6SMel Gorman * will stall and start writing pages if the tail of the LRU 943e2be15f6SMel Gorman * is all dirty unqueued pages. 944e2be15f6SMel Gorman */ 945e2be15f6SMel Gorman page_check_dirty_writeback(page, &dirty, &writeback); 946e2be15f6SMel Gorman if (dirty || writeback) 947e2be15f6SMel Gorman nr_dirty++; 948e2be15f6SMel Gorman 949e2be15f6SMel Gorman if (dirty && !writeback) 950e2be15f6SMel Gorman nr_unqueued_dirty++; 951e2be15f6SMel Gorman 952d04e8acdSMel Gorman /* 953d04e8acdSMel Gorman * Treat this page as congested if the underlying BDI is or if 954d04e8acdSMel Gorman * pages are cycling through the LRU so quickly that the 955d04e8acdSMel Gorman * pages marked for immediate reclaim are making it to the 956d04e8acdSMel Gorman * end of the LRU a second time. 957d04e8acdSMel Gorman */ 958e2be15f6SMel Gorman mapping = page_mapping(page); 9591da58ee2SJamie Liu if (((dirty || writeback) && mapping && 960703c2708STejun Heo inode_write_congested(mapping->host)) || 961d04e8acdSMel Gorman (writeback && PageReclaim(page))) 962e2be15f6SMel Gorman nr_congested++; 963e2be15f6SMel Gorman 964e2be15f6SMel Gorman /* 965283aba9fSMel Gorman * If a page at the tail of the LRU is under writeback, there 966283aba9fSMel Gorman * are three cases to consider. 967e62e384eSMichal Hocko * 968283aba9fSMel Gorman * 1) If reclaim is encountering an excessive number of pages 969283aba9fSMel Gorman * under writeback and this page is both under writeback and 970283aba9fSMel Gorman * PageReclaim then it indicates that pages are being queued 971283aba9fSMel Gorman * for IO but are being recycled through the LRU before the 972283aba9fSMel Gorman * IO can complete. Waiting on the page itself risks an 973283aba9fSMel Gorman * indefinite stall if it is impossible to writeback the 974283aba9fSMel Gorman * page due to IO error or disconnected storage so instead 975b1a6f21eSMel Gorman * note that the LRU is being scanned too quickly and the 976b1a6f21eSMel Gorman * caller can stall after page list has been processed. 977c3b94f44SHugh Dickins * 97897c9341fSTejun Heo * 2) Global or new memcg reclaim encounters a page that is 979ecf5fc6eSMichal Hocko * not marked for immediate reclaim, or the caller does not 980ecf5fc6eSMichal Hocko * have __GFP_FS (or __GFP_IO if it's simply going to swap, 981ecf5fc6eSMichal Hocko * not to fs). In this case mark the page for immediate 98297c9341fSTejun Heo * reclaim and continue scanning. 983283aba9fSMel Gorman * 984ecf5fc6eSMichal Hocko * Require may_enter_fs because we would wait on fs, which 985ecf5fc6eSMichal Hocko * may not have submitted IO yet. And the loop driver might 986283aba9fSMel Gorman * enter reclaim, and deadlock if it waits on a page for 987283aba9fSMel Gorman * which it is needed to do the write (loop masks off 988283aba9fSMel Gorman * __GFP_IO|__GFP_FS for this reason); but more thought 989283aba9fSMel Gorman * would probably show more reasons. 990283aba9fSMel Gorman * 9917fadc820SHugh Dickins * 3) Legacy memcg encounters a page that is already marked 992283aba9fSMel Gorman * PageReclaim. memcg does not have any dirty pages 993283aba9fSMel Gorman * throttling so we could easily OOM just because too many 994283aba9fSMel Gorman * pages are in writeback and there is nothing else to 995283aba9fSMel Gorman * reclaim. Wait for the writeback to complete. 996e62e384eSMichal Hocko */ 997283aba9fSMel Gorman if (PageWriteback(page)) { 998283aba9fSMel Gorman /* Case 1 above */ 999283aba9fSMel Gorman if (current_is_kswapd() && 1000283aba9fSMel Gorman PageReclaim(page) && 1001599d0c95SMel Gorman test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1002b1a6f21eSMel Gorman nr_immediate++; 1003b1a6f21eSMel Gorman goto keep_locked; 1004283aba9fSMel Gorman 1005283aba9fSMel Gorman /* Case 2 above */ 100697c9341fSTejun Heo } else if (sane_reclaim(sc) || 1007ecf5fc6eSMichal Hocko !PageReclaim(page) || !may_enter_fs) { 1008c3b94f44SHugh Dickins /* 1009c3b94f44SHugh Dickins * This is slightly racy - end_page_writeback() 1010c3b94f44SHugh Dickins * might have just cleared PageReclaim, then 1011c3b94f44SHugh Dickins * setting PageReclaim here end up interpreted 1012c3b94f44SHugh Dickins * as PageReadahead - but that does not matter 1013c3b94f44SHugh Dickins * enough to care. What we do want is for this 1014c3b94f44SHugh Dickins * page to have PageReclaim set next time memcg 1015c3b94f44SHugh Dickins * reclaim reaches the tests above, so it will 1016c3b94f44SHugh Dickins * then wait_on_page_writeback() to avoid OOM; 1017c3b94f44SHugh Dickins * and it's also appropriate in global reclaim. 1018c3b94f44SHugh Dickins */ 1019c3b94f44SHugh Dickins SetPageReclaim(page); 102092df3a72SMel Gorman nr_writeback++; 1021c3b94f44SHugh Dickins goto keep_locked; 1022283aba9fSMel Gorman 1023283aba9fSMel Gorman /* Case 3 above */ 1024283aba9fSMel Gorman } else { 10257fadc820SHugh Dickins unlock_page(page); 1026c3b94f44SHugh Dickins wait_on_page_writeback(page); 10277fadc820SHugh Dickins /* then go back and try same page again */ 10287fadc820SHugh Dickins list_add_tail(&page->lru, page_list); 10297fadc820SHugh Dickins continue; 1030e62e384eSMichal Hocko } 1031283aba9fSMel Gorman } 10321da177e4SLinus Torvalds 103302c6de8dSMinchan Kim if (!force_reclaim) 10346a18adb3SKonstantin Khlebnikov references = page_check_references(page, sc); 103502c6de8dSMinchan Kim 1036dfc8d636SJohannes Weiner switch (references) { 1037dfc8d636SJohannes Weiner case PAGEREF_ACTIVATE: 10381da177e4SLinus Torvalds goto activate_locked; 103964574746SJohannes Weiner case PAGEREF_KEEP: 104064574746SJohannes Weiner goto keep_locked; 1041dfc8d636SJohannes Weiner case PAGEREF_RECLAIM: 1042dfc8d636SJohannes Weiner case PAGEREF_RECLAIM_CLEAN: 1043dfc8d636SJohannes Weiner ; /* try to reclaim the page below */ 1044dfc8d636SJohannes Weiner } 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds /* 10471da177e4SLinus Torvalds * Anonymous process memory has backing store? 10481da177e4SLinus Torvalds * Try to allocate it some swap space here. 10491da177e4SLinus Torvalds */ 1050b291f000SNick Piggin if (PageAnon(page) && !PageSwapCache(page)) { 105163eb6b93SHugh Dickins if (!(sc->gfp_mask & __GFP_IO)) 105263eb6b93SHugh Dickins goto keep_locked; 10535bc7b8acSShaohua Li if (!add_to_swap(page, page_list)) 10541da177e4SLinus Torvalds goto activate_locked; 1055854e9ed0SMinchan Kim lazyfree = true; 105663eb6b93SHugh Dickins may_enter_fs = 1; 10571da177e4SLinus Torvalds 1058e2be15f6SMel Gorman /* Adding to swap updated mapping */ 10591da177e4SLinus Torvalds mapping = page_mapping(page); 10607751b2daSKirill A. Shutemov } else if (unlikely(PageTransHuge(page))) { 10617751b2daSKirill A. Shutemov /* Split file THP */ 10627751b2daSKirill A. Shutemov if (split_huge_page_to_list(page, page_list)) 10637751b2daSKirill A. Shutemov goto keep_locked; 1064e2be15f6SMel Gorman } 10651da177e4SLinus Torvalds 10667751b2daSKirill A. Shutemov VM_BUG_ON_PAGE(PageTransHuge(page), page); 10677751b2daSKirill A. Shutemov 10681da177e4SLinus Torvalds /* 10691da177e4SLinus Torvalds * The page is mapped into the page tables of one or more 10701da177e4SLinus Torvalds * processes. Try to unmap it here. 10711da177e4SLinus Torvalds */ 10721da177e4SLinus Torvalds if (page_mapped(page) && mapping) { 1073854e9ed0SMinchan Kim switch (ret = try_to_unmap(page, lazyfree ? 1074854e9ed0SMinchan Kim (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) : 1075854e9ed0SMinchan Kim (ttu_flags | TTU_BATCH_FLUSH))) { 10761da177e4SLinus Torvalds case SWAP_FAIL: 10771da177e4SLinus Torvalds goto activate_locked; 10781da177e4SLinus Torvalds case SWAP_AGAIN: 10791da177e4SLinus Torvalds goto keep_locked; 1080b291f000SNick Piggin case SWAP_MLOCK: 1081b291f000SNick Piggin goto cull_mlocked; 1082854e9ed0SMinchan Kim case SWAP_LZFREE: 1083854e9ed0SMinchan Kim goto lazyfree; 10841da177e4SLinus Torvalds case SWAP_SUCCESS: 10851da177e4SLinus Torvalds ; /* try to free the page below */ 10861da177e4SLinus Torvalds } 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds 10891da177e4SLinus Torvalds if (PageDirty(page)) { 1090ee72886dSMel Gorman /* 1091ee72886dSMel Gorman * Only kswapd can writeback filesystem pages to 1092d43006d5SMel Gorman * avoid risk of stack overflow but only writeback 1093d43006d5SMel Gorman * if many dirty pages have been encountered. 1094ee72886dSMel Gorman */ 1095f84f6e2bSMel Gorman if (page_is_file_cache(page) && 10969e3b2f8cSKonstantin Khlebnikov (!current_is_kswapd() || 1097599d0c95SMel Gorman !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 109849ea7eb6SMel Gorman /* 109949ea7eb6SMel Gorman * Immediately reclaim when written back. 110049ea7eb6SMel Gorman * Similar in principal to deactivate_page() 110149ea7eb6SMel Gorman * except we already have the page isolated 110249ea7eb6SMel Gorman * and know it's dirty 110349ea7eb6SMel Gorman */ 1104c4a25635SMel Gorman inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); 110549ea7eb6SMel Gorman SetPageReclaim(page); 110649ea7eb6SMel Gorman 1107ee72886dSMel Gorman goto keep_locked; 1108ee72886dSMel Gorman } 1109ee72886dSMel Gorman 1110dfc8d636SJohannes Weiner if (references == PAGEREF_RECLAIM_CLEAN) 11111da177e4SLinus Torvalds goto keep_locked; 11124dd4b920SAndrew Morton if (!may_enter_fs) 11131da177e4SLinus Torvalds goto keep_locked; 111452a8363eSChristoph Lameter if (!sc->may_writepage) 11151da177e4SLinus Torvalds goto keep_locked; 11161da177e4SLinus Torvalds 1117d950c947SMel Gorman /* 1118d950c947SMel Gorman * Page is dirty. Flush the TLB if a writable entry 1119d950c947SMel Gorman * potentially exists to avoid CPU writes after IO 1120d950c947SMel Gorman * starts and then write it out here. 1121d950c947SMel Gorman */ 1122d950c947SMel Gorman try_to_unmap_flush_dirty(); 11237d3579e8SKOSAKI Motohiro switch (pageout(page, mapping, sc)) { 11241da177e4SLinus Torvalds case PAGE_KEEP: 11251da177e4SLinus Torvalds goto keep_locked; 11261da177e4SLinus Torvalds case PAGE_ACTIVATE: 11271da177e4SLinus Torvalds goto activate_locked; 11281da177e4SLinus Torvalds case PAGE_SUCCESS: 11297d3579e8SKOSAKI Motohiro if (PageWriteback(page)) 113041ac1999SMel Gorman goto keep; 11317d3579e8SKOSAKI Motohiro if (PageDirty(page)) 11321da177e4SLinus Torvalds goto keep; 11337d3579e8SKOSAKI Motohiro 11341da177e4SLinus Torvalds /* 11351da177e4SLinus Torvalds * A synchronous write - probably a ramdisk. Go 11361da177e4SLinus Torvalds * ahead and try to reclaim the page. 11371da177e4SLinus Torvalds */ 1138529ae9aaSNick Piggin if (!trylock_page(page)) 11391da177e4SLinus Torvalds goto keep; 11401da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 11411da177e4SLinus Torvalds goto keep_locked; 11421da177e4SLinus Torvalds mapping = page_mapping(page); 11431da177e4SLinus Torvalds case PAGE_CLEAN: 11441da177e4SLinus Torvalds ; /* try to free the page below */ 11451da177e4SLinus Torvalds } 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds 11481da177e4SLinus Torvalds /* 11491da177e4SLinus Torvalds * If the page has buffers, try to free the buffer mappings 11501da177e4SLinus Torvalds * associated with this page. If we succeed we try to free 11511da177e4SLinus Torvalds * the page as well. 11521da177e4SLinus Torvalds * 11531da177e4SLinus Torvalds * We do this even if the page is PageDirty(). 11541da177e4SLinus Torvalds * try_to_release_page() does not perform I/O, but it is 11551da177e4SLinus Torvalds * possible for a page to have PageDirty set, but it is actually 11561da177e4SLinus Torvalds * clean (all its buffers are clean). This happens if the 11571da177e4SLinus Torvalds * buffers were written out directly, with submit_bh(). ext3 11581da177e4SLinus Torvalds * will do this, as well as the blockdev mapping. 11591da177e4SLinus Torvalds * try_to_release_page() will discover that cleanness and will 11601da177e4SLinus Torvalds * drop the buffers and mark the page clean - it can be freed. 11611da177e4SLinus Torvalds * 11621da177e4SLinus Torvalds * Rarely, pages can have buffers and no ->mapping. These are 11631da177e4SLinus Torvalds * the pages which were not successfully invalidated in 11641da177e4SLinus Torvalds * truncate_complete_page(). We try to drop those buffers here 11651da177e4SLinus Torvalds * and if that worked, and the page is no longer mapped into 11661da177e4SLinus Torvalds * process address space (page_count == 1) it can be freed. 11671da177e4SLinus Torvalds * Otherwise, leave the page on the LRU so it is swappable. 11681da177e4SLinus Torvalds */ 1169266cf658SDavid Howells if (page_has_private(page)) { 11701da177e4SLinus Torvalds if (!try_to_release_page(page, sc->gfp_mask)) 11711da177e4SLinus Torvalds goto activate_locked; 1172e286781dSNick Piggin if (!mapping && page_count(page) == 1) { 1173e286781dSNick Piggin unlock_page(page); 1174e286781dSNick Piggin if (put_page_testzero(page)) 11751da177e4SLinus Torvalds goto free_it; 1176e286781dSNick Piggin else { 1177e286781dSNick Piggin /* 1178e286781dSNick Piggin * rare race with speculative reference. 1179e286781dSNick Piggin * the speculative reference will free 1180e286781dSNick Piggin * this page shortly, so we may 1181e286781dSNick Piggin * increment nr_reclaimed here (and 1182e286781dSNick Piggin * leave it off the LRU). 1183e286781dSNick Piggin */ 1184e286781dSNick Piggin nr_reclaimed++; 1185e286781dSNick Piggin continue; 1186e286781dSNick Piggin } 1187e286781dSNick Piggin } 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds 1190854e9ed0SMinchan Kim lazyfree: 1191a528910eSJohannes Weiner if (!mapping || !__remove_mapping(mapping, page, true)) 119249d2e9ccSChristoph Lameter goto keep_locked; 11931da177e4SLinus Torvalds 1194a978d6f5SNick Piggin /* 1195a978d6f5SNick Piggin * At this point, we have no other references and there is 1196a978d6f5SNick Piggin * no way to pick any more up (removed from LRU, removed 1197a978d6f5SNick Piggin * from pagecache). Can use non-atomic bitops now (and 1198a978d6f5SNick Piggin * we obviously don't have to worry about waking up a process 1199a978d6f5SNick Piggin * waiting on the page lock, because there are no references. 1200a978d6f5SNick Piggin */ 120148c935adSKirill A. Shutemov __ClearPageLocked(page); 1202e286781dSNick Piggin free_it: 1203854e9ed0SMinchan Kim if (ret == SWAP_LZFREE) 1204854e9ed0SMinchan Kim count_vm_event(PGLAZYFREED); 1205854e9ed0SMinchan Kim 120605ff5137SAndrew Morton nr_reclaimed++; 1207abe4c3b5SMel Gorman 1208abe4c3b5SMel Gorman /* 1209abe4c3b5SMel Gorman * Is there need to periodically free_page_list? It would 1210abe4c3b5SMel Gorman * appear not as the counts should be low 1211abe4c3b5SMel Gorman */ 1212abe4c3b5SMel Gorman list_add(&page->lru, &free_pages); 12131da177e4SLinus Torvalds continue; 12141da177e4SLinus Torvalds 1215b291f000SNick Piggin cull_mlocked: 121663d6c5adSHugh Dickins if (PageSwapCache(page)) 121763d6c5adSHugh Dickins try_to_free_swap(page); 1218b291f000SNick Piggin unlock_page(page); 1219c54839a7SJaewon Kim list_add(&page->lru, &ret_pages); 1220b291f000SNick Piggin continue; 1221b291f000SNick Piggin 12221da177e4SLinus Torvalds activate_locked: 122368a22394SRik van Riel /* Not a candidate for swapping, so reclaim swap space. */ 12245ccc5abaSVladimir Davydov if (PageSwapCache(page) && mem_cgroup_swap_full(page)) 1225a2c43eedSHugh Dickins try_to_free_swap(page); 1226309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 12271da177e4SLinus Torvalds SetPageActive(page); 12281da177e4SLinus Torvalds pgactivate++; 12291da177e4SLinus Torvalds keep_locked: 12301da177e4SLinus Torvalds unlock_page(page); 12311da177e4SLinus Torvalds keep: 12321da177e4SLinus Torvalds list_add(&page->lru, &ret_pages); 1233309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 12341da177e4SLinus Torvalds } 1235abe4c3b5SMel Gorman 1236747db954SJohannes Weiner mem_cgroup_uncharge_list(&free_pages); 123772b252aeSMel Gorman try_to_unmap_flush(); 1238b745bc85SMel Gorman free_hot_cold_page_list(&free_pages, true); 1239abe4c3b5SMel Gorman 12401da177e4SLinus Torvalds list_splice(&ret_pages, page_list); 1241f8891e5eSChristoph Lameter count_vm_events(PGACTIVATE, pgactivate); 12420a31bc97SJohannes Weiner 12438e950282SMel Gorman *ret_nr_dirty += nr_dirty; 12448e950282SMel Gorman *ret_nr_congested += nr_congested; 1245d43006d5SMel Gorman *ret_nr_unqueued_dirty += nr_unqueued_dirty; 124692df3a72SMel Gorman *ret_nr_writeback += nr_writeback; 1247b1a6f21eSMel Gorman *ret_nr_immediate += nr_immediate; 124805ff5137SAndrew Morton return nr_reclaimed; 12491da177e4SLinus Torvalds } 12501da177e4SLinus Torvalds 125102c6de8dSMinchan Kim unsigned long reclaim_clean_pages_from_list(struct zone *zone, 125202c6de8dSMinchan Kim struct list_head *page_list) 125302c6de8dSMinchan Kim { 125402c6de8dSMinchan Kim struct scan_control sc = { 125502c6de8dSMinchan Kim .gfp_mask = GFP_KERNEL, 125602c6de8dSMinchan Kim .priority = DEF_PRIORITY, 125702c6de8dSMinchan Kim .may_unmap = 1, 125802c6de8dSMinchan Kim }; 12598e950282SMel Gorman unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5; 126002c6de8dSMinchan Kim struct page *page, *next; 126102c6de8dSMinchan Kim LIST_HEAD(clean_pages); 126202c6de8dSMinchan Kim 126302c6de8dSMinchan Kim list_for_each_entry_safe(page, next, page_list, lru) { 1264117aad1eSRafael Aquini if (page_is_file_cache(page) && !PageDirty(page) && 1265b1123ea6SMinchan Kim !__PageMovable(page)) { 126602c6de8dSMinchan Kim ClearPageActive(page); 126702c6de8dSMinchan Kim list_move(&page->lru, &clean_pages); 126802c6de8dSMinchan Kim } 126902c6de8dSMinchan Kim } 127002c6de8dSMinchan Kim 1271599d0c95SMel Gorman ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, 127202c6de8dSMinchan Kim TTU_UNMAP|TTU_IGNORE_ACCESS, 12738e950282SMel Gorman &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true); 127402c6de8dSMinchan Kim list_splice(&clean_pages, page_list); 1275599d0c95SMel Gorman mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); 127602c6de8dSMinchan Kim return ret; 127702c6de8dSMinchan Kim } 127802c6de8dSMinchan Kim 12795ad333ebSAndy Whitcroft /* 12805ad333ebSAndy Whitcroft * Attempt to remove the specified page from its LRU. Only take this page 12815ad333ebSAndy Whitcroft * if it is of the appropriate PageActive status. Pages which are being 12825ad333ebSAndy Whitcroft * freed elsewhere are also ignored. 12835ad333ebSAndy Whitcroft * 12845ad333ebSAndy Whitcroft * page: page to consider 12855ad333ebSAndy Whitcroft * mode: one of the LRU isolation modes defined above 12865ad333ebSAndy Whitcroft * 12875ad333ebSAndy Whitcroft * returns 0 on success, -ve errno on failure. 12885ad333ebSAndy Whitcroft */ 1289f3fd4a61SKonstantin Khlebnikov int __isolate_lru_page(struct page *page, isolate_mode_t mode) 12905ad333ebSAndy Whitcroft { 12915ad333ebSAndy Whitcroft int ret = -EINVAL; 12925ad333ebSAndy Whitcroft 12935ad333ebSAndy Whitcroft /* Only take pages on the LRU. */ 12945ad333ebSAndy Whitcroft if (!PageLRU(page)) 12955ad333ebSAndy Whitcroft return ret; 12965ad333ebSAndy Whitcroft 1297e46a2879SMinchan Kim /* Compaction should not handle unevictable pages but CMA can do so */ 1298e46a2879SMinchan Kim if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) 1299894bc310SLee Schermerhorn return ret; 1300894bc310SLee Schermerhorn 13015ad333ebSAndy Whitcroft ret = -EBUSY; 130208e552c6SKAMEZAWA Hiroyuki 1303c8244935SMel Gorman /* 1304c8244935SMel Gorman * To minimise LRU disruption, the caller can indicate that it only 1305c8244935SMel Gorman * wants to isolate pages it will be able to operate on without 1306c8244935SMel Gorman * blocking - clean pages for the most part. 1307c8244935SMel Gorman * 1308c8244935SMel Gorman * ISOLATE_CLEAN means that only clean pages should be isolated. This 1309c8244935SMel Gorman * is used by reclaim when it is cannot write to backing storage 1310c8244935SMel Gorman * 1311c8244935SMel Gorman * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages 1312c8244935SMel Gorman * that it is possible to migrate without blocking 1313c8244935SMel Gorman */ 1314c8244935SMel Gorman if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { 1315c8244935SMel Gorman /* All the caller can do on PageWriteback is block */ 1316c8244935SMel Gorman if (PageWriteback(page)) 131739deaf85SMinchan Kim return ret; 131839deaf85SMinchan Kim 1319c8244935SMel Gorman if (PageDirty(page)) { 1320c8244935SMel Gorman struct address_space *mapping; 1321c8244935SMel Gorman 1322c8244935SMel Gorman /* ISOLATE_CLEAN means only clean pages */ 1323c8244935SMel Gorman if (mode & ISOLATE_CLEAN) 1324c8244935SMel Gorman return ret; 1325c8244935SMel Gorman 1326c8244935SMel Gorman /* 1327c8244935SMel Gorman * Only pages without mappings or that have a 1328c8244935SMel Gorman * ->migratepage callback are possible to migrate 1329c8244935SMel Gorman * without blocking 1330c8244935SMel Gorman */ 1331c8244935SMel Gorman mapping = page_mapping(page); 1332c8244935SMel Gorman if (mapping && !mapping->a_ops->migratepage) 1333c8244935SMel Gorman return ret; 1334c8244935SMel Gorman } 1335c8244935SMel Gorman } 1336c8244935SMel Gorman 1337f80c0673SMinchan Kim if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1338f80c0673SMinchan Kim return ret; 1339f80c0673SMinchan Kim 13405ad333ebSAndy Whitcroft if (likely(get_page_unless_zero(page))) { 13415ad333ebSAndy Whitcroft /* 13425ad333ebSAndy Whitcroft * Be careful not to clear PageLRU until after we're 13435ad333ebSAndy Whitcroft * sure the page is not being freed elsewhere -- the 13445ad333ebSAndy Whitcroft * page release code relies on it. 13455ad333ebSAndy Whitcroft */ 13465ad333ebSAndy Whitcroft ClearPageLRU(page); 13475ad333ebSAndy Whitcroft ret = 0; 13485ad333ebSAndy Whitcroft } 13495ad333ebSAndy Whitcroft 13505ad333ebSAndy Whitcroft return ret; 13515ad333ebSAndy Whitcroft } 13525ad333ebSAndy Whitcroft 13537ee36a14SMel Gorman 13547ee36a14SMel Gorman /* 13557ee36a14SMel Gorman * Update LRU sizes after isolating pages. The LRU size updates must 13567ee36a14SMel Gorman * be complete before mem_cgroup_update_lru_size due to a santity check. 13577ee36a14SMel Gorman */ 13587ee36a14SMel Gorman static __always_inline void update_lru_sizes(struct lruvec *lruvec, 13597ee36a14SMel Gorman enum lru_list lru, unsigned long *nr_zone_taken, 13607ee36a14SMel Gorman unsigned long nr_taken) 13617ee36a14SMel Gorman { 13627ee36a14SMel Gorman #ifdef CONFIG_HIGHMEM 13637ee36a14SMel Gorman int zid; 13647ee36a14SMel Gorman 13657ee36a14SMel Gorman /* 13667ee36a14SMel Gorman * Highmem has separate accounting for highmem pages so each zone 13677ee36a14SMel Gorman * is updated separately. 13687ee36a14SMel Gorman */ 13697ee36a14SMel Gorman for (zid = 0; zid < MAX_NR_ZONES; zid++) { 13707ee36a14SMel Gorman if (!nr_zone_taken[zid]) 13717ee36a14SMel Gorman continue; 13727ee36a14SMel Gorman 13737ee36a14SMel Gorman __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 13747ee36a14SMel Gorman } 13757ee36a14SMel Gorman #else 13767ee36a14SMel Gorman /* Zone ID does not matter on !HIGHMEM */ 13777ee36a14SMel Gorman __update_lru_size(lruvec, lru, 0, -nr_taken); 13787ee36a14SMel Gorman #endif 13797ee36a14SMel Gorman 13807ee36a14SMel Gorman #ifdef CONFIG_MEMCG 13817ee36a14SMel Gorman mem_cgroup_update_lru_size(lruvec, lru, -nr_taken); 13827ee36a14SMel Gorman #endif 13837ee36a14SMel Gorman } 13847ee36a14SMel Gorman 138549d2e9ccSChristoph Lameter /* 1386a52633d8SMel Gorman * zone_lru_lock is heavily contended. Some of the functions that 13871da177e4SLinus Torvalds * shrink the lists perform better by taking out a batch of pages 13881da177e4SLinus Torvalds * and working on them outside the LRU lock. 13891da177e4SLinus Torvalds * 13901da177e4SLinus Torvalds * For pagecache intensive workloads, this function is the hottest 13911da177e4SLinus Torvalds * spot in the kernel (apart from copy_*_user functions). 13921da177e4SLinus Torvalds * 13931da177e4SLinus Torvalds * Appropriate locks must be held before calling this function. 13941da177e4SLinus Torvalds * 13951da177e4SLinus Torvalds * @nr_to_scan: The number of pages to look through on the list. 13965dc35979SKonstantin Khlebnikov * @lruvec: The LRU vector to pull pages from. 13971da177e4SLinus Torvalds * @dst: The temp list to put pages on to. 1398f626012dSHugh Dickins * @nr_scanned: The number of pages that were scanned. 1399fe2c2a10SRik van Riel * @sc: The scan_control struct for this reclaim session 14005ad333ebSAndy Whitcroft * @mode: One of the LRU isolation modes 14013cb99451SKonstantin Khlebnikov * @lru: LRU list id for isolating 14021da177e4SLinus Torvalds * 14031da177e4SLinus Torvalds * returns how many pages were moved onto *@dst. 14041da177e4SLinus Torvalds */ 140569e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 14065dc35979SKonstantin Khlebnikov struct lruvec *lruvec, struct list_head *dst, 1407fe2c2a10SRik van Riel unsigned long *nr_scanned, struct scan_control *sc, 14083cb99451SKonstantin Khlebnikov isolate_mode_t mode, enum lru_list lru) 14091da177e4SLinus Torvalds { 141075b00af7SHugh Dickins struct list_head *src = &lruvec->lists[lru]; 141169e05944SAndrew Morton unsigned long nr_taken = 0; 1412599d0c95SMel Gorman unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 14137cc30fcfSMel Gorman unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 1414599d0c95SMel Gorman unsigned long scan, nr_pages; 1415b2e18757SMel Gorman LIST_HEAD(pages_skipped); 14161da177e4SLinus Torvalds 14170b802f10SVladimir Davydov for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan && 14180b802f10SVladimir Davydov !list_empty(src); scan++) { 14195ad333ebSAndy Whitcroft struct page *page; 14205ad333ebSAndy Whitcroft 14211da177e4SLinus Torvalds page = lru_to_page(src); 14221da177e4SLinus Torvalds prefetchw_prev_lru_page(page, src, flags); 14231da177e4SLinus Torvalds 1424309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 14258d438f96SNick Piggin 1426b2e18757SMel Gorman if (page_zonenum(page) > sc->reclaim_idx) { 1427b2e18757SMel Gorman list_move(&page->lru, &pages_skipped); 14287cc30fcfSMel Gorman nr_skipped[page_zonenum(page)]++; 1429b2e18757SMel Gorman continue; 1430b2e18757SMel Gorman } 1431b2e18757SMel Gorman 1432f3fd4a61SKonstantin Khlebnikov switch (__isolate_lru_page(page, mode)) { 14335ad333ebSAndy Whitcroft case 0: 1434599d0c95SMel Gorman nr_pages = hpage_nr_pages(page); 1435599d0c95SMel Gorman nr_taken += nr_pages; 1436599d0c95SMel Gorman nr_zone_taken[page_zonenum(page)] += nr_pages; 14375ad333ebSAndy Whitcroft list_move(&page->lru, dst); 14385ad333ebSAndy Whitcroft break; 14397c8ee9a8SNick Piggin 14405ad333ebSAndy Whitcroft case -EBUSY: 14415ad333ebSAndy Whitcroft /* else it is being freed elsewhere */ 14425ad333ebSAndy Whitcroft list_move(&page->lru, src); 14435ad333ebSAndy Whitcroft continue; 14445ad333ebSAndy Whitcroft 14455ad333ebSAndy Whitcroft default: 14465ad333ebSAndy Whitcroft BUG(); 14475ad333ebSAndy Whitcroft } 14485ad333ebSAndy Whitcroft } 14491da177e4SLinus Torvalds 1450b2e18757SMel Gorman /* 1451b2e18757SMel Gorman * Splice any skipped pages to the start of the LRU list. Note that 1452b2e18757SMel Gorman * this disrupts the LRU order when reclaiming for lower zones but 1453b2e18757SMel Gorman * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1454b2e18757SMel Gorman * scanning would soon rescan the same pages to skip and put the 1455b2e18757SMel Gorman * system at risk of premature OOM. 1456b2e18757SMel Gorman */ 14577cc30fcfSMel Gorman if (!list_empty(&pages_skipped)) { 14587cc30fcfSMel Gorman int zid; 14597cc30fcfSMel Gorman 1460b2e18757SMel Gorman list_splice(&pages_skipped, src); 14617cc30fcfSMel Gorman for (zid = 0; zid < MAX_NR_ZONES; zid++) { 14627cc30fcfSMel Gorman if (!nr_skipped[zid]) 14637cc30fcfSMel Gorman continue; 14647cc30fcfSMel Gorman 14657cc30fcfSMel Gorman __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 14667cc30fcfSMel Gorman } 14677cc30fcfSMel Gorman } 1468f626012dSHugh Dickins *nr_scanned = scan; 1469e5146b12SMel Gorman trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan, 147075b00af7SHugh Dickins nr_taken, mode, is_file_lru(lru)); 14717ee36a14SMel Gorman update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken); 14721da177e4SLinus Torvalds return nr_taken; 14731da177e4SLinus Torvalds } 14741da177e4SLinus Torvalds 147562695a84SNick Piggin /** 147662695a84SNick Piggin * isolate_lru_page - tries to isolate a page from its LRU list 147762695a84SNick Piggin * @page: page to isolate from its LRU list 147862695a84SNick Piggin * 147962695a84SNick Piggin * Isolates a @page from an LRU list, clears PageLRU and adjusts the 148062695a84SNick Piggin * vmstat statistic corresponding to whatever LRU list the page was on. 148162695a84SNick Piggin * 148262695a84SNick Piggin * Returns 0 if the page was removed from an LRU list. 148362695a84SNick Piggin * Returns -EBUSY if the page was not on an LRU list. 148462695a84SNick Piggin * 148562695a84SNick Piggin * The returned page will have PageLRU() cleared. If it was found on 1486894bc310SLee Schermerhorn * the active list, it will have PageActive set. If it was found on 1487894bc310SLee Schermerhorn * the unevictable list, it will have the PageUnevictable bit set. That flag 1488894bc310SLee Schermerhorn * may need to be cleared by the caller before letting the page go. 148962695a84SNick Piggin * 149062695a84SNick Piggin * The vmstat statistic corresponding to the list on which the page was 149162695a84SNick Piggin * found will be decremented. 149262695a84SNick Piggin * 149362695a84SNick Piggin * Restrictions: 149462695a84SNick Piggin * (1) Must be called with an elevated refcount on the page. This is a 149562695a84SNick Piggin * fundamentnal difference from isolate_lru_pages (which is called 149662695a84SNick Piggin * without a stable reference). 149762695a84SNick Piggin * (2) the lru_lock must not be held. 149862695a84SNick Piggin * (3) interrupts must be enabled. 149962695a84SNick Piggin */ 150062695a84SNick Piggin int isolate_lru_page(struct page *page) 150162695a84SNick Piggin { 150262695a84SNick Piggin int ret = -EBUSY; 150362695a84SNick Piggin 1504309381feSSasha Levin VM_BUG_ON_PAGE(!page_count(page), page); 1505cf2a82eeSKirill A. Shutemov WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); 15060c917313SKonstantin Khlebnikov 150762695a84SNick Piggin if (PageLRU(page)) { 150862695a84SNick Piggin struct zone *zone = page_zone(page); 1509fa9add64SHugh Dickins struct lruvec *lruvec; 151062695a84SNick Piggin 1511a52633d8SMel Gorman spin_lock_irq(zone_lru_lock(zone)); 1512599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 15130c917313SKonstantin Khlebnikov if (PageLRU(page)) { 1514894bc310SLee Schermerhorn int lru = page_lru(page); 15150c917313SKonstantin Khlebnikov get_page(page); 151662695a84SNick Piggin ClearPageLRU(page); 1517fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 1518fa9add64SHugh Dickins ret = 0; 151962695a84SNick Piggin } 1520a52633d8SMel Gorman spin_unlock_irq(zone_lru_lock(zone)); 152162695a84SNick Piggin } 152262695a84SNick Piggin return ret; 152362695a84SNick Piggin } 152462695a84SNick Piggin 15255ad333ebSAndy Whitcroft /* 1526d37dd5dcSFengguang Wu * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1527d37dd5dcSFengguang Wu * then get resheduled. When there are massive number of tasks doing page 1528d37dd5dcSFengguang Wu * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1529d37dd5dcSFengguang Wu * the LRU list will go small and be scanned faster than necessary, leading to 1530d37dd5dcSFengguang Wu * unnecessary swapping, thrashing and OOM. 153135cd7815SRik van Riel */ 1532599d0c95SMel Gorman static int too_many_isolated(struct pglist_data *pgdat, int file, 153335cd7815SRik van Riel struct scan_control *sc) 153435cd7815SRik van Riel { 153535cd7815SRik van Riel unsigned long inactive, isolated; 153635cd7815SRik van Riel 153735cd7815SRik van Riel if (current_is_kswapd()) 153835cd7815SRik van Riel return 0; 153935cd7815SRik van Riel 154097c9341fSTejun Heo if (!sane_reclaim(sc)) 154135cd7815SRik van Riel return 0; 154235cd7815SRik van Riel 154335cd7815SRik van Riel if (file) { 1544599d0c95SMel Gorman inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1545599d0c95SMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 154635cd7815SRik van Riel } else { 1547599d0c95SMel Gorman inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1548599d0c95SMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 154935cd7815SRik van Riel } 155035cd7815SRik van Riel 15513cf23841SFengguang Wu /* 15523cf23841SFengguang Wu * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 15533cf23841SFengguang Wu * won't get blocked by normal direct-reclaimers, forming a circular 15543cf23841SFengguang Wu * deadlock. 15553cf23841SFengguang Wu */ 1556d0164adcSMel Gorman if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 15573cf23841SFengguang Wu inactive >>= 3; 15583cf23841SFengguang Wu 155935cd7815SRik van Riel return isolated > inactive; 156035cd7815SRik van Riel } 156135cd7815SRik van Riel 156266635629SMel Gorman static noinline_for_stack void 156375b00af7SHugh Dickins putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) 156466635629SMel Gorman { 156527ac81d8SKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1566599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 15673f79768fSHugh Dickins LIST_HEAD(pages_to_free); 156866635629SMel Gorman 156966635629SMel Gorman /* 157066635629SMel Gorman * Put back any unfreeable pages. 157166635629SMel Gorman */ 157266635629SMel Gorman while (!list_empty(page_list)) { 15733f79768fSHugh Dickins struct page *page = lru_to_page(page_list); 157466635629SMel Gorman int lru; 15753f79768fSHugh Dickins 1576309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 157766635629SMel Gorman list_del(&page->lru); 157839b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) { 1579599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 158066635629SMel Gorman putback_lru_page(page); 1581599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 158266635629SMel Gorman continue; 158366635629SMel Gorman } 1584fa9add64SHugh Dickins 1585599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 1586fa9add64SHugh Dickins 15877a608572SLinus Torvalds SetPageLRU(page); 158866635629SMel Gorman lru = page_lru(page); 1589fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 1590fa9add64SHugh Dickins 159166635629SMel Gorman if (is_active_lru(lru)) { 159266635629SMel Gorman int file = is_file_lru(lru); 15939992af10SRik van Riel int numpages = hpage_nr_pages(page); 15949992af10SRik van Riel reclaim_stat->recent_rotated[file] += numpages; 159566635629SMel Gorman } 15962bcf8879SHugh Dickins if (put_page_testzero(page)) { 15972bcf8879SHugh Dickins __ClearPageLRU(page); 15982bcf8879SHugh Dickins __ClearPageActive(page); 1599fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 16002bcf8879SHugh Dickins 16012bcf8879SHugh Dickins if (unlikely(PageCompound(page))) { 1602599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1603747db954SJohannes Weiner mem_cgroup_uncharge(page); 16042bcf8879SHugh Dickins (*get_compound_page_dtor(page))(page); 1605599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 16062bcf8879SHugh Dickins } else 16072bcf8879SHugh Dickins list_add(&page->lru, &pages_to_free); 160866635629SMel Gorman } 160966635629SMel Gorman } 161066635629SMel Gorman 16113f79768fSHugh Dickins /* 16123f79768fSHugh Dickins * To save our caller's stack, now use input list for pages to free. 16133f79768fSHugh Dickins */ 16143f79768fSHugh Dickins list_splice(&pages_to_free, page_list); 161566635629SMel Gorman } 161666635629SMel Gorman 161766635629SMel Gorman /* 1618399ba0b9SNeilBrown * If a kernel thread (such as nfsd for loop-back mounts) services 1619399ba0b9SNeilBrown * a backing device by writing to the page cache it sets PF_LESS_THROTTLE. 1620399ba0b9SNeilBrown * In that case we should only throttle if the backing device it is 1621399ba0b9SNeilBrown * writing to is congested. In other cases it is safe to throttle. 1622399ba0b9SNeilBrown */ 1623399ba0b9SNeilBrown static int current_may_throttle(void) 1624399ba0b9SNeilBrown { 1625399ba0b9SNeilBrown return !(current->flags & PF_LESS_THROTTLE) || 1626399ba0b9SNeilBrown current->backing_dev_info == NULL || 1627399ba0b9SNeilBrown bdi_write_congested(current->backing_dev_info); 1628399ba0b9SNeilBrown } 1629399ba0b9SNeilBrown 1630399ba0b9SNeilBrown /* 1631b2e18757SMel Gorman * shrink_inactive_list() is a helper for shrink_node(). It returns the number 16321742f19fSAndrew Morton * of reclaimed pages 16331da177e4SLinus Torvalds */ 163466635629SMel Gorman static noinline_for_stack unsigned long 16351a93be0eSKonstantin Khlebnikov shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, 16369e3b2f8cSKonstantin Khlebnikov struct scan_control *sc, enum lru_list lru) 16371da177e4SLinus Torvalds { 16381da177e4SLinus Torvalds LIST_HEAD(page_list); 1639e247dbceSKOSAKI Motohiro unsigned long nr_scanned; 164005ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 1641e247dbceSKOSAKI Motohiro unsigned long nr_taken; 16428e950282SMel Gorman unsigned long nr_dirty = 0; 16438e950282SMel Gorman unsigned long nr_congested = 0; 1644e2be15f6SMel Gorman unsigned long nr_unqueued_dirty = 0; 164592df3a72SMel Gorman unsigned long nr_writeback = 0; 1646b1a6f21eSMel Gorman unsigned long nr_immediate = 0; 1647f3fd4a61SKonstantin Khlebnikov isolate_mode_t isolate_mode = 0; 16483cb99451SKonstantin Khlebnikov int file = is_file_lru(lru); 1649599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 16501a93be0eSKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 165178dc583dSKOSAKI Motohiro 1652599d0c95SMel Gorman while (unlikely(too_many_isolated(pgdat, file, sc))) { 165358355c78SKOSAKI Motohiro congestion_wait(BLK_RW_ASYNC, HZ/10); 165435cd7815SRik van Riel 165535cd7815SRik van Riel /* We are about to die and free our memory. Return now. */ 165635cd7815SRik van Riel if (fatal_signal_pending(current)) 165735cd7815SRik van Riel return SWAP_CLUSTER_MAX; 165835cd7815SRik van Riel } 165935cd7815SRik van Riel 16601da177e4SLinus Torvalds lru_add_drain(); 1661f80c0673SMinchan Kim 1662f80c0673SMinchan Kim if (!sc->may_unmap) 166361317289SHillf Danton isolate_mode |= ISOLATE_UNMAPPED; 1664f80c0673SMinchan Kim if (!sc->may_writepage) 166561317289SHillf Danton isolate_mode |= ISOLATE_CLEAN; 1666f80c0673SMinchan Kim 1667599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 16681da177e4SLinus Torvalds 16695dc35979SKonstantin Khlebnikov nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 16705dc35979SKonstantin Khlebnikov &nr_scanned, sc, isolate_mode, lru); 167195d918fcSKonstantin Khlebnikov 1672599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 16739d5e6a9fSHugh Dickins reclaim_stat->recent_scanned[file] += nr_taken; 167495d918fcSKonstantin Khlebnikov 167589b5fae5SJohannes Weiner if (global_reclaim(sc)) { 1676599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned); 1677b35ea17bSKOSAKI Motohiro if (current_is_kswapd()) 1678599d0c95SMel Gorman __count_vm_events(PGSCAN_KSWAPD, nr_scanned); 1679b35ea17bSKOSAKI Motohiro else 1680599d0c95SMel Gorman __count_vm_events(PGSCAN_DIRECT, nr_scanned); 1681b35ea17bSKOSAKI Motohiro } 1682599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1683d563c050SHillf Danton 1684d563c050SHillf Danton if (nr_taken == 0) 168566635629SMel Gorman return 0; 1686b35ea17bSKOSAKI Motohiro 1687599d0c95SMel Gorman nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP, 16888e950282SMel Gorman &nr_dirty, &nr_unqueued_dirty, &nr_congested, 16898e950282SMel Gorman &nr_writeback, &nr_immediate, 1690b1a6f21eSMel Gorman false); 1691c661b078SAndy Whitcroft 1692599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 16933f79768fSHugh Dickins 1694904249aaSYing Han if (global_reclaim(sc)) { 1695b35ea17bSKOSAKI Motohiro if (current_is_kswapd()) 1696599d0c95SMel Gorman __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed); 1697904249aaSYing Han else 1698599d0c95SMel Gorman __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed); 1699904249aaSYing Han } 1700a74609faSNick Piggin 170127ac81d8SKonstantin Khlebnikov putback_inactive_pages(lruvec, &page_list); 17023f79768fSHugh Dickins 1703599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 17043f79768fSHugh Dickins 1705599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 17063f79768fSHugh Dickins 1707747db954SJohannes Weiner mem_cgroup_uncharge_list(&page_list); 1708b745bc85SMel Gorman free_hot_cold_page_list(&page_list, true); 1709e11da5b4SMel Gorman 171092df3a72SMel Gorman /* 171192df3a72SMel Gorman * If reclaim is isolating dirty pages under writeback, it implies 171292df3a72SMel Gorman * that the long-lived page allocation rate is exceeding the page 171392df3a72SMel Gorman * laundering rate. Either the global limits are not being effective 171492df3a72SMel Gorman * at throttling processes due to the page distribution throughout 171592df3a72SMel Gorman * zones or there is heavy usage of a slow backing device. The 171692df3a72SMel Gorman * only option is to throttle from reclaim context which is not ideal 171792df3a72SMel Gorman * as there is no guarantee the dirtying process is throttled in the 171892df3a72SMel Gorman * same way balance_dirty_pages() manages. 171992df3a72SMel Gorman * 17208e950282SMel Gorman * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number 17218e950282SMel Gorman * of pages under pages flagged for immediate reclaim and stall if any 17228e950282SMel Gorman * are encountered in the nr_immediate check below. 172392df3a72SMel Gorman */ 1724918fc718SMel Gorman if (nr_writeback && nr_writeback == nr_taken) 1725599d0c95SMel Gorman set_bit(PGDAT_WRITEBACK, &pgdat->flags); 172692df3a72SMel Gorman 1727d43006d5SMel Gorman /* 172897c9341fSTejun Heo * Legacy memcg will stall in page writeback so avoid forcibly 172997c9341fSTejun Heo * stalling here. 1730d43006d5SMel Gorman */ 173197c9341fSTejun Heo if (sane_reclaim(sc)) { 1732b1a6f21eSMel Gorman /* 17338e950282SMel Gorman * Tag a zone as congested if all the dirty pages scanned were 17348e950282SMel Gorman * backed by a congested BDI and wait_iff_congested will stall. 17358e950282SMel Gorman */ 17368e950282SMel Gorman if (nr_dirty && nr_dirty == nr_congested) 1737599d0c95SMel Gorman set_bit(PGDAT_CONGESTED, &pgdat->flags); 17388e950282SMel Gorman 17398e950282SMel Gorman /* 1740b1a6f21eSMel Gorman * If dirty pages are scanned that are not queued for IO, it 1741b1a6f21eSMel Gorman * implies that flushers are not keeping up. In this case, flag 1742599d0c95SMel Gorman * the pgdat PGDAT_DIRTY and kswapd will start writing pages from 174357054651SJohannes Weiner * reclaim context. 1744b1a6f21eSMel Gorman */ 1745b1a6f21eSMel Gorman if (nr_unqueued_dirty == nr_taken) 1746599d0c95SMel Gorman set_bit(PGDAT_DIRTY, &pgdat->flags); 1747b1a6f21eSMel Gorman 1748b1a6f21eSMel Gorman /* 1749b738d764SLinus Torvalds * If kswapd scans pages marked marked for immediate 1750b738d764SLinus Torvalds * reclaim and under writeback (nr_immediate), it implies 1751b738d764SLinus Torvalds * that pages are cycling through the LRU faster than 1752b1a6f21eSMel Gorman * they are written so also forcibly stall. 1753b1a6f21eSMel Gorman */ 1754b738d764SLinus Torvalds if (nr_immediate && current_may_throttle()) 1755b1a6f21eSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 1756e2be15f6SMel Gorman } 1757d43006d5SMel Gorman 17588e950282SMel Gorman /* 17598e950282SMel Gorman * Stall direct reclaim for IO completions if underlying BDIs or zone 17608e950282SMel Gorman * is congested. Allow kswapd to continue until it starts encountering 17618e950282SMel Gorman * unqueued dirty pages or cycling through the LRU too quickly. 17628e950282SMel Gorman */ 1763399ba0b9SNeilBrown if (!sc->hibernation_mode && !current_is_kswapd() && 1764399ba0b9SNeilBrown current_may_throttle()) 1765599d0c95SMel Gorman wait_iff_congested(pgdat, BLK_RW_ASYNC, HZ/10); 17668e950282SMel Gorman 1767599d0c95SMel Gorman trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 1768599d0c95SMel Gorman nr_scanned, nr_reclaimed, 1769ba5e9579Syalin wang sc->priority, file); 177005ff5137SAndrew Morton return nr_reclaimed; 17711da177e4SLinus Torvalds } 17721da177e4SLinus Torvalds 17733bb1a852SMartin Bligh /* 17741cfb419bSKAMEZAWA Hiroyuki * This moves pages from the active list to the inactive list. 17751cfb419bSKAMEZAWA Hiroyuki * 17761cfb419bSKAMEZAWA Hiroyuki * We move them the other way if the page is referenced by one or more 17771cfb419bSKAMEZAWA Hiroyuki * processes, from rmap. 17781cfb419bSKAMEZAWA Hiroyuki * 17791cfb419bSKAMEZAWA Hiroyuki * If the pages are mostly unmapped, the processing is fast and it is 1780a52633d8SMel Gorman * appropriate to hold zone_lru_lock across the whole operation. But if 17811cfb419bSKAMEZAWA Hiroyuki * the pages are mapped, the processing is slow (page_referenced()) so we 1782a52633d8SMel Gorman * should drop zone_lru_lock around each page. It's impossible to balance 17831cfb419bSKAMEZAWA Hiroyuki * this, so instead we remove the pages from the LRU while processing them. 17841cfb419bSKAMEZAWA Hiroyuki * It is safe to rely on PG_active against the non-LRU pages in here because 17851cfb419bSKAMEZAWA Hiroyuki * nobody will play with that bit on a non-LRU page. 17861cfb419bSKAMEZAWA Hiroyuki * 17870139aa7bSJoonsoo Kim * The downside is that we have to touch page->_refcount against each page. 17881cfb419bSKAMEZAWA Hiroyuki * But we had to alter page->flags anyway. 17891cfb419bSKAMEZAWA Hiroyuki */ 17901cfb419bSKAMEZAWA Hiroyuki 1791fa9add64SHugh Dickins static void move_active_pages_to_lru(struct lruvec *lruvec, 17923eb4140fSWu Fengguang struct list_head *list, 17932bcf8879SHugh Dickins struct list_head *pages_to_free, 17943eb4140fSWu Fengguang enum lru_list lru) 17953eb4140fSWu Fengguang { 1796599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 17973eb4140fSWu Fengguang unsigned long pgmoved = 0; 17983eb4140fSWu Fengguang struct page *page; 1799fa9add64SHugh Dickins int nr_pages; 18003eb4140fSWu Fengguang 18013eb4140fSWu Fengguang while (!list_empty(list)) { 18023eb4140fSWu Fengguang page = lru_to_page(list); 1803599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 18043eb4140fSWu Fengguang 1805309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 18063eb4140fSWu Fengguang SetPageLRU(page); 18073eb4140fSWu Fengguang 1808fa9add64SHugh Dickins nr_pages = hpage_nr_pages(page); 1809599d0c95SMel Gorman update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); 1810925b7673SJohannes Weiner list_move(&page->lru, &lruvec->lists[lru]); 1811fa9add64SHugh Dickins pgmoved += nr_pages; 18123eb4140fSWu Fengguang 18132bcf8879SHugh Dickins if (put_page_testzero(page)) { 18142bcf8879SHugh Dickins __ClearPageLRU(page); 18152bcf8879SHugh Dickins __ClearPageActive(page); 1816fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 18172bcf8879SHugh Dickins 18182bcf8879SHugh Dickins if (unlikely(PageCompound(page))) { 1819599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 1820747db954SJohannes Weiner mem_cgroup_uncharge(page); 18212bcf8879SHugh Dickins (*get_compound_page_dtor(page))(page); 1822599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 18232bcf8879SHugh Dickins } else 18242bcf8879SHugh Dickins list_add(&page->lru, pages_to_free); 18253eb4140fSWu Fengguang } 18263eb4140fSWu Fengguang } 18279d5e6a9fSHugh Dickins 18283eb4140fSWu Fengguang if (!is_active_lru(lru)) 18293eb4140fSWu Fengguang __count_vm_events(PGDEACTIVATE, pgmoved); 18303eb4140fSWu Fengguang } 18311cfb419bSKAMEZAWA Hiroyuki 1832f626012dSHugh Dickins static void shrink_active_list(unsigned long nr_to_scan, 18331a93be0eSKonstantin Khlebnikov struct lruvec *lruvec, 1834f16015fbSJohannes Weiner struct scan_control *sc, 18359e3b2f8cSKonstantin Khlebnikov enum lru_list lru) 18361cfb419bSKAMEZAWA Hiroyuki { 183744c241f1SKOSAKI Motohiro unsigned long nr_taken; 1838f626012dSHugh Dickins unsigned long nr_scanned; 18396fe6b7e3SWu Fengguang unsigned long vm_flags; 18401cfb419bSKAMEZAWA Hiroyuki LIST_HEAD(l_hold); /* The pages which were snipped off */ 18418cab4754SWu Fengguang LIST_HEAD(l_active); 1842b69408e8SChristoph Lameter LIST_HEAD(l_inactive); 18431cfb419bSKAMEZAWA Hiroyuki struct page *page; 18441a93be0eSKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 184544c241f1SKOSAKI Motohiro unsigned long nr_rotated = 0; 1846f3fd4a61SKonstantin Khlebnikov isolate_mode_t isolate_mode = 0; 18473cb99451SKonstantin Khlebnikov int file = is_file_lru(lru); 1848599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 18491cfb419bSKAMEZAWA Hiroyuki 18501da177e4SLinus Torvalds lru_add_drain(); 1851f80c0673SMinchan Kim 1852f80c0673SMinchan Kim if (!sc->may_unmap) 185361317289SHillf Danton isolate_mode |= ISOLATE_UNMAPPED; 1854f80c0673SMinchan Kim if (!sc->may_writepage) 185561317289SHillf Danton isolate_mode |= ISOLATE_CLEAN; 1856f80c0673SMinchan Kim 1857599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 1858925b7673SJohannes Weiner 18595dc35979SKonstantin Khlebnikov nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 18605dc35979SKonstantin Khlebnikov &nr_scanned, sc, isolate_mode, lru); 186189b5fae5SJohannes Weiner 1862599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 1863b7c46d15SJohannes Weiner reclaim_stat->recent_scanned[file] += nr_taken; 18641cfb419bSKAMEZAWA Hiroyuki 18659d5e6a9fSHugh Dickins if (global_reclaim(sc)) 1866599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned); 1867599d0c95SMel Gorman __count_vm_events(PGREFILL, nr_scanned); 18689d5e6a9fSHugh Dickins 1869599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 18701da177e4SLinus Torvalds 18711da177e4SLinus Torvalds while (!list_empty(&l_hold)) { 18721da177e4SLinus Torvalds cond_resched(); 18731da177e4SLinus Torvalds page = lru_to_page(&l_hold); 18741da177e4SLinus Torvalds list_del(&page->lru); 18757e9cd484SRik van Riel 187639b5f29aSHugh Dickins if (unlikely(!page_evictable(page))) { 1877894bc310SLee Schermerhorn putback_lru_page(page); 1878894bc310SLee Schermerhorn continue; 1879894bc310SLee Schermerhorn } 1880894bc310SLee Schermerhorn 1881cc715d99SMel Gorman if (unlikely(buffer_heads_over_limit)) { 1882cc715d99SMel Gorman if (page_has_private(page) && trylock_page(page)) { 1883cc715d99SMel Gorman if (page_has_private(page)) 1884cc715d99SMel Gorman try_to_release_page(page, 0); 1885cc715d99SMel Gorman unlock_page(page); 1886cc715d99SMel Gorman } 1887cc715d99SMel Gorman } 1888cc715d99SMel Gorman 1889c3ac9a8aSJohannes Weiner if (page_referenced(page, 0, sc->target_mem_cgroup, 1890c3ac9a8aSJohannes Weiner &vm_flags)) { 18919992af10SRik van Riel nr_rotated += hpage_nr_pages(page); 18928cab4754SWu Fengguang /* 18938cab4754SWu Fengguang * Identify referenced, file-backed active pages and 18948cab4754SWu Fengguang * give them one more trip around the active list. So 18958cab4754SWu Fengguang * that executable code get better chances to stay in 18968cab4754SWu Fengguang * memory under moderate memory pressure. Anon pages 18978cab4754SWu Fengguang * are not likely to be evicted by use-once streaming 18988cab4754SWu Fengguang * IO, plus JVM can create lots of anon VM_EXEC pages, 18998cab4754SWu Fengguang * so we ignore them here. 19008cab4754SWu Fengguang */ 190141e20983SWu Fengguang if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 19028cab4754SWu Fengguang list_add(&page->lru, &l_active); 19038cab4754SWu Fengguang continue; 19048cab4754SWu Fengguang } 19058cab4754SWu Fengguang } 19067e9cd484SRik van Riel 19075205e56eSKOSAKI Motohiro ClearPageActive(page); /* we are de-activating */ 19081da177e4SLinus Torvalds list_add(&page->lru, &l_inactive); 19091da177e4SLinus Torvalds } 19101da177e4SLinus Torvalds 1911b555749aSAndrew Morton /* 19128cab4754SWu Fengguang * Move pages back to the lru list. 1913b555749aSAndrew Morton */ 1914599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 19154f98a2feSRik van Riel /* 19168cab4754SWu Fengguang * Count referenced pages from currently used mappings as rotated, 19178cab4754SWu Fengguang * even though only some of them are actually re-activated. This 19188cab4754SWu Fengguang * helps balance scan pressure between file and anonymous pages in 19197c0db9e9SJerome Marchand * get_scan_count. 1920556adecbSRik van Riel */ 1921b7c46d15SJohannes Weiner reclaim_stat->recent_rotated[file] += nr_rotated; 1922556adecbSRik van Riel 1923fa9add64SHugh Dickins move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); 1924fa9add64SHugh Dickins move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); 1925599d0c95SMel Gorman __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 1926599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 19272bcf8879SHugh Dickins 1928747db954SJohannes Weiner mem_cgroup_uncharge_list(&l_hold); 1929b745bc85SMel Gorman free_hot_cold_page_list(&l_hold, true); 19301da177e4SLinus Torvalds } 19311da177e4SLinus Torvalds 193259dc76b0SRik van Riel /* 193359dc76b0SRik van Riel * The inactive anon list should be small enough that the VM never has 193459dc76b0SRik van Riel * to do too much work. 193514797e23SKOSAKI Motohiro * 193659dc76b0SRik van Riel * The inactive file list should be small enough to leave most memory 193759dc76b0SRik van Riel * to the established workingset on the scan-resistant active list, 193859dc76b0SRik van Riel * but large enough to avoid thrashing the aggregate readahead window. 193959dc76b0SRik van Riel * 194059dc76b0SRik van Riel * Both inactive lists should also be large enough that each inactive 194159dc76b0SRik van Riel * page has a chance to be referenced again before it is reclaimed. 194259dc76b0SRik van Riel * 194359dc76b0SRik van Riel * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages 194459dc76b0SRik van Riel * on this LRU, maintained by the pageout code. A zone->inactive_ratio 194559dc76b0SRik van Riel * of 3 means 3:1 or 25% of the pages are kept on the inactive list. 194659dc76b0SRik van Riel * 194759dc76b0SRik van Riel * total target max 194859dc76b0SRik van Riel * memory ratio inactive 194959dc76b0SRik van Riel * ------------------------------------- 195059dc76b0SRik van Riel * 10MB 1 5MB 195159dc76b0SRik van Riel * 100MB 1 50MB 195259dc76b0SRik van Riel * 1GB 3 250MB 195359dc76b0SRik van Riel * 10GB 10 0.9GB 195459dc76b0SRik van Riel * 100GB 31 3GB 195559dc76b0SRik van Riel * 1TB 101 10GB 195659dc76b0SRik van Riel * 10TB 320 32GB 195714797e23SKOSAKI Motohiro */ 195859dc76b0SRik van Riel static bool inactive_list_is_low(struct lruvec *lruvec, bool file) 195914797e23SKOSAKI Motohiro { 196059dc76b0SRik van Riel unsigned long inactive_ratio; 196159dc76b0SRik van Riel unsigned long inactive; 196259dc76b0SRik van Riel unsigned long active; 196359dc76b0SRik van Riel unsigned long gb; 196459dc76b0SRik van Riel 196574e3f3c3SMinchan Kim /* 196674e3f3c3SMinchan Kim * If we don't have swap space, anonymous page deactivation 196774e3f3c3SMinchan Kim * is pointless. 196874e3f3c3SMinchan Kim */ 196959dc76b0SRik van Riel if (!file && !total_swap_pages) 197042e2e457SYaowei Bai return false; 197174e3f3c3SMinchan Kim 197259dc76b0SRik van Riel inactive = lruvec_lru_size(lruvec, file * LRU_FILE); 197359dc76b0SRik van Riel active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE); 1974f16015fbSJohannes Weiner 197559dc76b0SRik van Riel gb = (inactive + active) >> (30 - PAGE_SHIFT); 197659dc76b0SRik van Riel if (gb) 197759dc76b0SRik van Riel inactive_ratio = int_sqrt(10 * gb); 1978b39415b2SRik van Riel else 197959dc76b0SRik van Riel inactive_ratio = 1; 198059dc76b0SRik van Riel 198159dc76b0SRik van Riel return inactive * inactive_ratio < active; 1982b39415b2SRik van Riel } 1983b39415b2SRik van Riel 19844f98a2feSRik van Riel static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 19851a93be0eSKonstantin Khlebnikov struct lruvec *lruvec, struct scan_control *sc) 1986b69408e8SChristoph Lameter { 1987b39415b2SRik van Riel if (is_active_lru(lru)) { 198859dc76b0SRik van Riel if (inactive_list_is_low(lruvec, is_file_lru(lru))) 19891a93be0eSKonstantin Khlebnikov shrink_active_list(nr_to_scan, lruvec, sc, lru); 1990556adecbSRik van Riel return 0; 1991556adecbSRik van Riel } 1992556adecbSRik van Riel 19931a93be0eSKonstantin Khlebnikov return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 1994b69408e8SChristoph Lameter } 1995b69408e8SChristoph Lameter 19969a265114SJohannes Weiner enum scan_balance { 19979a265114SJohannes Weiner SCAN_EQUAL, 19989a265114SJohannes Weiner SCAN_FRACT, 19999a265114SJohannes Weiner SCAN_ANON, 20009a265114SJohannes Weiner SCAN_FILE, 20019a265114SJohannes Weiner }; 20029a265114SJohannes Weiner 20031da177e4SLinus Torvalds /* 20044f98a2feSRik van Riel * Determine how aggressively the anon and file LRU lists should be 20054f98a2feSRik van Riel * scanned. The relative value of each set of LRU lists is determined 20064f98a2feSRik van Riel * by looking at the fraction of the pages scanned we did rotate back 20074f98a2feSRik van Riel * onto the active list instead of evict. 20084f98a2feSRik van Riel * 2009be7bd59dSWanpeng Li * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan 2010be7bd59dSWanpeng Li * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan 20114f98a2feSRik van Riel */ 201233377678SVladimir Davydov static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, 20136b4f7799SJohannes Weiner struct scan_control *sc, unsigned long *nr, 20146b4f7799SJohannes Weiner unsigned long *lru_pages) 20154f98a2feSRik van Riel { 201633377678SVladimir Davydov int swappiness = mem_cgroup_swappiness(memcg); 201790126375SKonstantin Khlebnikov struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 20189a265114SJohannes Weiner u64 fraction[2]; 20199a265114SJohannes Weiner u64 denominator = 0; /* gcc */ 2020599d0c95SMel Gorman struct pglist_data *pgdat = lruvec_pgdat(lruvec); 20219a265114SJohannes Weiner unsigned long anon_prio, file_prio; 20229a265114SJohannes Weiner enum scan_balance scan_balance; 20230bf1457fSJohannes Weiner unsigned long anon, file; 20249a265114SJohannes Weiner bool force_scan = false; 20259a265114SJohannes Weiner unsigned long ap, fp; 20269a265114SJohannes Weiner enum lru_list lru; 20276f04f48dSSuleiman Souhlal bool some_scanned; 20286f04f48dSSuleiman Souhlal int pass; 2029246e87a9SKAMEZAWA Hiroyuki 2030f11c0ca5SJohannes Weiner /* 2031f11c0ca5SJohannes Weiner * If the zone or memcg is small, nr[l] can be 0. This 2032f11c0ca5SJohannes Weiner * results in no scanning on this priority and a potential 2033f11c0ca5SJohannes Weiner * priority drop. Global direct reclaim can go to the next 2034f11c0ca5SJohannes Weiner * zone and tends to have no problems. Global kswapd is for 2035f11c0ca5SJohannes Weiner * zone balancing and it needs to scan a minimum amount. When 2036f11c0ca5SJohannes Weiner * reclaiming for a memcg, a priority drop can cause high 2037f11c0ca5SJohannes Weiner * latencies, so it's better to scan a minimum amount there as 2038f11c0ca5SJohannes Weiner * well. 2039f11c0ca5SJohannes Weiner */ 204090cbc250SVladimir Davydov if (current_is_kswapd()) { 2041599d0c95SMel Gorman if (!pgdat_reclaimable(pgdat)) 2042a4d3e9e7SJohannes Weiner force_scan = true; 2043eb01aaabSVladimir Davydov if (!mem_cgroup_online(memcg)) 204490cbc250SVladimir Davydov force_scan = true; 204590cbc250SVladimir Davydov } 204689b5fae5SJohannes Weiner if (!global_reclaim(sc)) 2047a4d3e9e7SJohannes Weiner force_scan = true; 204876a33fc3SShaohua Li 204976a33fc3SShaohua Li /* If we have no swap space, do not bother scanning anon pages. */ 2050d8b38438SVladimir Davydov if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { 20519a265114SJohannes Weiner scan_balance = SCAN_FILE; 205276a33fc3SShaohua Li goto out; 205376a33fc3SShaohua Li } 20544f98a2feSRik van Riel 205510316b31SJohannes Weiner /* 205610316b31SJohannes Weiner * Global reclaim will swap to prevent OOM even with no 205710316b31SJohannes Weiner * swappiness, but memcg users want to use this knob to 205810316b31SJohannes Weiner * disable swapping for individual groups completely when 205910316b31SJohannes Weiner * using the memory controller's swap limit feature would be 206010316b31SJohannes Weiner * too expensive. 206110316b31SJohannes Weiner */ 206202695175SJohannes Weiner if (!global_reclaim(sc) && !swappiness) { 20639a265114SJohannes Weiner scan_balance = SCAN_FILE; 206410316b31SJohannes Weiner goto out; 206510316b31SJohannes Weiner } 206610316b31SJohannes Weiner 206710316b31SJohannes Weiner /* 206810316b31SJohannes Weiner * Do not apply any pressure balancing cleverness when the 206910316b31SJohannes Weiner * system is close to OOM, scan both anon and file equally 207010316b31SJohannes Weiner * (unless the swappiness setting disagrees with swapping). 207110316b31SJohannes Weiner */ 207202695175SJohannes Weiner if (!sc->priority && swappiness) { 20739a265114SJohannes Weiner scan_balance = SCAN_EQUAL; 207410316b31SJohannes Weiner goto out; 207510316b31SJohannes Weiner } 207610316b31SJohannes Weiner 207711d16c25SJohannes Weiner /* 207862376251SJohannes Weiner * Prevent the reclaimer from falling into the cache trap: as 207962376251SJohannes Weiner * cache pages start out inactive, every cache fault will tip 208062376251SJohannes Weiner * the scan balance towards the file LRU. And as the file LRU 208162376251SJohannes Weiner * shrinks, so does the window for rotation from references. 208262376251SJohannes Weiner * This means we have a runaway feedback loop where a tiny 208362376251SJohannes Weiner * thrashing file LRU becomes infinitely more attractive than 208462376251SJohannes Weiner * anon pages. Try to detect this based on file LRU size. 208562376251SJohannes Weiner */ 208662376251SJohannes Weiner if (global_reclaim(sc)) { 2087599d0c95SMel Gorman unsigned long pgdatfile; 2088599d0c95SMel Gorman unsigned long pgdatfree; 2089599d0c95SMel Gorman int z; 2090599d0c95SMel Gorman unsigned long total_high_wmark = 0; 209162376251SJohannes Weiner 2092599d0c95SMel Gorman pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2093599d0c95SMel Gorman pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + 2094599d0c95SMel Gorman node_page_state(pgdat, NR_INACTIVE_FILE); 20952ab051e1SJerome Marchand 2096599d0c95SMel Gorman for (z = 0; z < MAX_NR_ZONES; z++) { 2097599d0c95SMel Gorman struct zone *zone = &pgdat->node_zones[z]; 2098599d0c95SMel Gorman if (!populated_zone(zone)) 2099599d0c95SMel Gorman continue; 2100599d0c95SMel Gorman 2101599d0c95SMel Gorman total_high_wmark += high_wmark_pages(zone); 2102599d0c95SMel Gorman } 2103599d0c95SMel Gorman 2104599d0c95SMel Gorman if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) { 210562376251SJohannes Weiner scan_balance = SCAN_ANON; 210662376251SJohannes Weiner goto out; 210762376251SJohannes Weiner } 210862376251SJohannes Weiner } 210962376251SJohannes Weiner 211062376251SJohannes Weiner /* 2111316bda0eSVladimir Davydov * If there is enough inactive page cache, i.e. if the size of the 2112316bda0eSVladimir Davydov * inactive list is greater than that of the active list *and* the 2113316bda0eSVladimir Davydov * inactive list actually has some pages to scan on this priority, we 2114316bda0eSVladimir Davydov * do not reclaim anything from the anonymous working set right now. 2115316bda0eSVladimir Davydov * Without the second condition we could end up never scanning an 2116316bda0eSVladimir Davydov * lruvec even if it has plenty of old anonymous pages unless the 2117316bda0eSVladimir Davydov * system is under heavy pressure. 2118e9868505SRik van Riel */ 211959dc76b0SRik van Riel if (!inactive_list_is_low(lruvec, true) && 212023047a96SJohannes Weiner lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) { 21219a265114SJohannes Weiner scan_balance = SCAN_FILE; 2122e9868505SRik van Riel goto out; 21234f98a2feSRik van Riel } 21244f98a2feSRik van Riel 21259a265114SJohannes Weiner scan_balance = SCAN_FRACT; 21269a265114SJohannes Weiner 21274f98a2feSRik van Riel /* 212858c37f6eSKOSAKI Motohiro * With swappiness at 100, anonymous and file have the same priority. 212958c37f6eSKOSAKI Motohiro * This scanning priority is essentially the inverse of IO cost. 213058c37f6eSKOSAKI Motohiro */ 213102695175SJohannes Weiner anon_prio = swappiness; 213275b00af7SHugh Dickins file_prio = 200 - anon_prio; 213358c37f6eSKOSAKI Motohiro 213458c37f6eSKOSAKI Motohiro /* 21354f98a2feSRik van Riel * OK, so we have swap space and a fair amount of page cache 21364f98a2feSRik van Riel * pages. We use the recently rotated / recently scanned 21374f98a2feSRik van Riel * ratios to determine how valuable each cache is. 21384f98a2feSRik van Riel * 21394f98a2feSRik van Riel * Because workloads change over time (and to avoid overflow) 21404f98a2feSRik van Riel * we keep these statistics as a floating average, which ends 21414f98a2feSRik van Riel * up weighing recent references more than old ones. 21424f98a2feSRik van Riel * 21434f98a2feSRik van Riel * anon in [0], file in [1] 21444f98a2feSRik van Riel */ 21452ab051e1SJerome Marchand 214623047a96SJohannes Weiner anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) + 214723047a96SJohannes Weiner lruvec_lru_size(lruvec, LRU_INACTIVE_ANON); 214823047a96SJohannes Weiner file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) + 214923047a96SJohannes Weiner lruvec_lru_size(lruvec, LRU_INACTIVE_FILE); 21502ab051e1SJerome Marchand 2151599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 215258c37f6eSKOSAKI Motohiro if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 21536e901571SKOSAKI Motohiro reclaim_stat->recent_scanned[0] /= 2; 21546e901571SKOSAKI Motohiro reclaim_stat->recent_rotated[0] /= 2; 21554f98a2feSRik van Riel } 21564f98a2feSRik van Riel 21576e901571SKOSAKI Motohiro if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 21586e901571SKOSAKI Motohiro reclaim_stat->recent_scanned[1] /= 2; 21596e901571SKOSAKI Motohiro reclaim_stat->recent_rotated[1] /= 2; 21604f98a2feSRik van Riel } 21614f98a2feSRik van Riel 21624f98a2feSRik van Riel /* 216300d8089cSRik van Riel * The amount of pressure on anon vs file pages is inversely 216400d8089cSRik van Riel * proportional to the fraction of recently scanned pages on 216500d8089cSRik van Riel * each list that were recently referenced and in active use. 21664f98a2feSRik van Riel */ 2167fe35004fSSatoru Moriya ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); 21686e901571SKOSAKI Motohiro ap /= reclaim_stat->recent_rotated[0] + 1; 21694f98a2feSRik van Riel 2170fe35004fSSatoru Moriya fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); 21716e901571SKOSAKI Motohiro fp /= reclaim_stat->recent_rotated[1] + 1; 2172599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 21734f98a2feSRik van Riel 217476a33fc3SShaohua Li fraction[0] = ap; 217576a33fc3SShaohua Li fraction[1] = fp; 217676a33fc3SShaohua Li denominator = ap + fp + 1; 217776a33fc3SShaohua Li out: 21786f04f48dSSuleiman Souhlal some_scanned = false; 21796f04f48dSSuleiman Souhlal /* Only use force_scan on second pass. */ 21806f04f48dSSuleiman Souhlal for (pass = 0; !some_scanned && pass < 2; pass++) { 21816b4f7799SJohannes Weiner *lru_pages = 0; 21824111304dSHugh Dickins for_each_evictable_lru(lru) { 21834111304dSHugh Dickins int file = is_file_lru(lru); 2184d778df51SJohannes Weiner unsigned long size; 218576a33fc3SShaohua Li unsigned long scan; 218676a33fc3SShaohua Li 218723047a96SJohannes Weiner size = lruvec_lru_size(lruvec, lru); 2188d778df51SJohannes Weiner scan = size >> sc->priority; 21899a265114SJohannes Weiner 21906f04f48dSSuleiman Souhlal if (!scan && pass && force_scan) 2191d778df51SJohannes Weiner scan = min(size, SWAP_CLUSTER_MAX); 21929a265114SJohannes Weiner 21939a265114SJohannes Weiner switch (scan_balance) { 21949a265114SJohannes Weiner case SCAN_EQUAL: 21959a265114SJohannes Weiner /* Scan lists relative to size */ 21969a265114SJohannes Weiner break; 21979a265114SJohannes Weiner case SCAN_FRACT: 21989a265114SJohannes Weiner /* 21999a265114SJohannes Weiner * Scan types proportional to swappiness and 22009a265114SJohannes Weiner * their relative recent reclaim efficiency. 22019a265114SJohannes Weiner */ 22026f04f48dSSuleiman Souhlal scan = div64_u64(scan * fraction[file], 22036f04f48dSSuleiman Souhlal denominator); 22049a265114SJohannes Weiner break; 22059a265114SJohannes Weiner case SCAN_FILE: 22069a265114SJohannes Weiner case SCAN_ANON: 22079a265114SJohannes Weiner /* Scan one type exclusively */ 22086b4f7799SJohannes Weiner if ((scan_balance == SCAN_FILE) != file) { 22096b4f7799SJohannes Weiner size = 0; 22109a265114SJohannes Weiner scan = 0; 22116b4f7799SJohannes Weiner } 22129a265114SJohannes Weiner break; 22139a265114SJohannes Weiner default: 22149a265114SJohannes Weiner /* Look ma, no brain */ 22159a265114SJohannes Weiner BUG(); 22169a265114SJohannes Weiner } 22176b4f7799SJohannes Weiner 22186b4f7799SJohannes Weiner *lru_pages += size; 22194111304dSHugh Dickins nr[lru] = scan; 22206b4f7799SJohannes Weiner 22216f04f48dSSuleiman Souhlal /* 22226f04f48dSSuleiman Souhlal * Skip the second pass and don't force_scan, 22236f04f48dSSuleiman Souhlal * if we found something to scan. 22246f04f48dSSuleiman Souhlal */ 22256f04f48dSSuleiman Souhlal some_scanned |= !!scan; 22266f04f48dSSuleiman Souhlal } 222776a33fc3SShaohua Li } 22286e08a369SWu Fengguang } 22294f98a2feSRik van Riel 223072b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 223172b252aeSMel Gorman static void init_tlb_ubc(void) 223272b252aeSMel Gorman { 223372b252aeSMel Gorman /* 223472b252aeSMel Gorman * This deliberately does not clear the cpumask as it's expensive 223572b252aeSMel Gorman * and unnecessary. If there happens to be data in there then the 223672b252aeSMel Gorman * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and 223772b252aeSMel Gorman * then will be cleared. 223872b252aeSMel Gorman */ 223972b252aeSMel Gorman current->tlb_ubc.flush_required = false; 224072b252aeSMel Gorman } 224172b252aeSMel Gorman #else 224272b252aeSMel Gorman static inline void init_tlb_ubc(void) 224372b252aeSMel Gorman { 224472b252aeSMel Gorman } 224572b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 224672b252aeSMel Gorman 22479b4f98cdSJohannes Weiner /* 2248a9dd0a83SMel Gorman * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 22499b4f98cdSJohannes Weiner */ 2250a9dd0a83SMel Gorman static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, 22516b4f7799SJohannes Weiner struct scan_control *sc, unsigned long *lru_pages) 22529b4f98cdSJohannes Weiner { 2253ef8f2327SMel Gorman struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); 22549b4f98cdSJohannes Weiner unsigned long nr[NR_LRU_LISTS]; 2255e82e0561SMel Gorman unsigned long targets[NR_LRU_LISTS]; 22569b4f98cdSJohannes Weiner unsigned long nr_to_scan; 22579b4f98cdSJohannes Weiner enum lru_list lru; 22589b4f98cdSJohannes Weiner unsigned long nr_reclaimed = 0; 22599b4f98cdSJohannes Weiner unsigned long nr_to_reclaim = sc->nr_to_reclaim; 22609b4f98cdSJohannes Weiner struct blk_plug plug; 22611a501907SMel Gorman bool scan_adjusted; 22629b4f98cdSJohannes Weiner 226333377678SVladimir Davydov get_scan_count(lruvec, memcg, sc, nr, lru_pages); 22649b4f98cdSJohannes Weiner 2265e82e0561SMel Gorman /* Record the original scan target for proportional adjustments later */ 2266e82e0561SMel Gorman memcpy(targets, nr, sizeof(nr)); 2267e82e0561SMel Gorman 22681a501907SMel Gorman /* 22691a501907SMel Gorman * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 22701a501907SMel Gorman * event that can occur when there is little memory pressure e.g. 22711a501907SMel Gorman * multiple streaming readers/writers. Hence, we do not abort scanning 22721a501907SMel Gorman * when the requested number of pages are reclaimed when scanning at 22731a501907SMel Gorman * DEF_PRIORITY on the assumption that the fact we are direct 22741a501907SMel Gorman * reclaiming implies that kswapd is not keeping up and it is best to 22751a501907SMel Gorman * do a batch of work at once. For memcg reclaim one check is made to 22761a501907SMel Gorman * abort proportional reclaim if either the file or anon lru has already 22771a501907SMel Gorman * dropped to zero at the first pass. 22781a501907SMel Gorman */ 22791a501907SMel Gorman scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 22801a501907SMel Gorman sc->priority == DEF_PRIORITY); 22811a501907SMel Gorman 228272b252aeSMel Gorman init_tlb_ubc(); 228372b252aeSMel Gorman 22849b4f98cdSJohannes Weiner blk_start_plug(&plug); 22859b4f98cdSJohannes Weiner while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 22869b4f98cdSJohannes Weiner nr[LRU_INACTIVE_FILE]) { 2287e82e0561SMel Gorman unsigned long nr_anon, nr_file, percentage; 2288e82e0561SMel Gorman unsigned long nr_scanned; 2289e82e0561SMel Gorman 22909b4f98cdSJohannes Weiner for_each_evictable_lru(lru) { 22919b4f98cdSJohannes Weiner if (nr[lru]) { 22929b4f98cdSJohannes Weiner nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 22939b4f98cdSJohannes Weiner nr[lru] -= nr_to_scan; 22949b4f98cdSJohannes Weiner 22959b4f98cdSJohannes Weiner nr_reclaimed += shrink_list(lru, nr_to_scan, 22969b4f98cdSJohannes Weiner lruvec, sc); 22979b4f98cdSJohannes Weiner } 22989b4f98cdSJohannes Weiner } 2299e82e0561SMel Gorman 2300e82e0561SMel Gorman if (nr_reclaimed < nr_to_reclaim || scan_adjusted) 2301e82e0561SMel Gorman continue; 2302e82e0561SMel Gorman 23039b4f98cdSJohannes Weiner /* 2304e82e0561SMel Gorman * For kswapd and memcg, reclaim at least the number of pages 23051a501907SMel Gorman * requested. Ensure that the anon and file LRUs are scanned 2306e82e0561SMel Gorman * proportionally what was requested by get_scan_count(). We 2307e82e0561SMel Gorman * stop reclaiming one LRU and reduce the amount scanning 2308e82e0561SMel Gorman * proportional to the original scan target. 2309e82e0561SMel Gorman */ 2310e82e0561SMel Gorman nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 2311e82e0561SMel Gorman nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 2312e82e0561SMel Gorman 23131a501907SMel Gorman /* 23141a501907SMel Gorman * It's just vindictive to attack the larger once the smaller 23151a501907SMel Gorman * has gone to zero. And given the way we stop scanning the 23161a501907SMel Gorman * smaller below, this makes sure that we only make one nudge 23171a501907SMel Gorman * towards proportionality once we've got nr_to_reclaim. 23181a501907SMel Gorman */ 23191a501907SMel Gorman if (!nr_file || !nr_anon) 23201a501907SMel Gorman break; 23211a501907SMel Gorman 2322e82e0561SMel Gorman if (nr_file > nr_anon) { 2323e82e0561SMel Gorman unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 2324e82e0561SMel Gorman targets[LRU_ACTIVE_ANON] + 1; 2325e82e0561SMel Gorman lru = LRU_BASE; 2326e82e0561SMel Gorman percentage = nr_anon * 100 / scan_target; 2327e82e0561SMel Gorman } else { 2328e82e0561SMel Gorman unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 2329e82e0561SMel Gorman targets[LRU_ACTIVE_FILE] + 1; 2330e82e0561SMel Gorman lru = LRU_FILE; 2331e82e0561SMel Gorman percentage = nr_file * 100 / scan_target; 2332e82e0561SMel Gorman } 2333e82e0561SMel Gorman 2334e82e0561SMel Gorman /* Stop scanning the smaller of the LRU */ 2335e82e0561SMel Gorman nr[lru] = 0; 2336e82e0561SMel Gorman nr[lru + LRU_ACTIVE] = 0; 2337e82e0561SMel Gorman 2338e82e0561SMel Gorman /* 2339e82e0561SMel Gorman * Recalculate the other LRU scan count based on its original 2340e82e0561SMel Gorman * scan target and the percentage scanning already complete 2341e82e0561SMel Gorman */ 2342e82e0561SMel Gorman lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 2343e82e0561SMel Gorman nr_scanned = targets[lru] - nr[lru]; 2344e82e0561SMel Gorman nr[lru] = targets[lru] * (100 - percentage) / 100; 2345e82e0561SMel Gorman nr[lru] -= min(nr[lru], nr_scanned); 2346e82e0561SMel Gorman 2347e82e0561SMel Gorman lru += LRU_ACTIVE; 2348e82e0561SMel Gorman nr_scanned = targets[lru] - nr[lru]; 2349e82e0561SMel Gorman nr[lru] = targets[lru] * (100 - percentage) / 100; 2350e82e0561SMel Gorman nr[lru] -= min(nr[lru], nr_scanned); 2351e82e0561SMel Gorman 2352e82e0561SMel Gorman scan_adjusted = true; 23539b4f98cdSJohannes Weiner } 23549b4f98cdSJohannes Weiner blk_finish_plug(&plug); 23559b4f98cdSJohannes Weiner sc->nr_reclaimed += nr_reclaimed; 23569b4f98cdSJohannes Weiner 23579b4f98cdSJohannes Weiner /* 23589b4f98cdSJohannes Weiner * Even if we did not try to evict anon pages at all, we want to 23599b4f98cdSJohannes Weiner * rebalance the anon lru active/inactive ratio. 23609b4f98cdSJohannes Weiner */ 236159dc76b0SRik van Riel if (inactive_list_is_low(lruvec, false)) 23629b4f98cdSJohannes Weiner shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 23639b4f98cdSJohannes Weiner sc, LRU_ACTIVE_ANON); 23649b4f98cdSJohannes Weiner 23659b4f98cdSJohannes Weiner throttle_vm_writeout(sc->gfp_mask); 23669b4f98cdSJohannes Weiner } 23679b4f98cdSJohannes Weiner 236823b9da55SMel Gorman /* Use reclaim/compaction for costly allocs or under memory pressure */ 23699e3b2f8cSKonstantin Khlebnikov static bool in_reclaim_compaction(struct scan_control *sc) 237023b9da55SMel Gorman { 2371d84da3f9SKirill A. Shutemov if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && 237223b9da55SMel Gorman (sc->order > PAGE_ALLOC_COSTLY_ORDER || 23739e3b2f8cSKonstantin Khlebnikov sc->priority < DEF_PRIORITY - 2)) 237423b9da55SMel Gorman return true; 237523b9da55SMel Gorman 237623b9da55SMel Gorman return false; 237723b9da55SMel Gorman } 237823b9da55SMel Gorman 23794f98a2feSRik van Riel /* 238023b9da55SMel Gorman * Reclaim/compaction is used for high-order allocation requests. It reclaims 238123b9da55SMel Gorman * order-0 pages before compacting the zone. should_continue_reclaim() returns 238223b9da55SMel Gorman * true if more pages should be reclaimed such that when the page allocator 238323b9da55SMel Gorman * calls try_to_compact_zone() that it will have enough free pages to succeed. 238423b9da55SMel Gorman * It will give up earlier than that if there is difficulty reclaiming pages. 23853e7d3449SMel Gorman */ 2386a9dd0a83SMel Gorman static inline bool should_continue_reclaim(struct pglist_data *pgdat, 23873e7d3449SMel Gorman unsigned long nr_reclaimed, 23883e7d3449SMel Gorman unsigned long nr_scanned, 23893e7d3449SMel Gorman struct scan_control *sc) 23903e7d3449SMel Gorman { 23913e7d3449SMel Gorman unsigned long pages_for_compaction; 23923e7d3449SMel Gorman unsigned long inactive_lru_pages; 2393a9dd0a83SMel Gorman int z; 23943e7d3449SMel Gorman 23953e7d3449SMel Gorman /* If not in reclaim/compaction mode, stop */ 23969e3b2f8cSKonstantin Khlebnikov if (!in_reclaim_compaction(sc)) 23973e7d3449SMel Gorman return false; 23983e7d3449SMel Gorman 23992876592fSMel Gorman /* Consider stopping depending on scan and reclaim activity */ 24002876592fSMel Gorman if (sc->gfp_mask & __GFP_REPEAT) { 24013e7d3449SMel Gorman /* 24022876592fSMel Gorman * For __GFP_REPEAT allocations, stop reclaiming if the 24032876592fSMel Gorman * full LRU list has been scanned and we are still failing 24042876592fSMel Gorman * to reclaim pages. This full LRU scan is potentially 24052876592fSMel Gorman * expensive but a __GFP_REPEAT caller really wants to succeed 24063e7d3449SMel Gorman */ 24073e7d3449SMel Gorman if (!nr_reclaimed && !nr_scanned) 24083e7d3449SMel Gorman return false; 24092876592fSMel Gorman } else { 24102876592fSMel Gorman /* 24112876592fSMel Gorman * For non-__GFP_REPEAT allocations which can presumably 24122876592fSMel Gorman * fail without consequence, stop if we failed to reclaim 24132876592fSMel Gorman * any pages from the last SWAP_CLUSTER_MAX number of 24142876592fSMel Gorman * pages that were scanned. This will return to the 24152876592fSMel Gorman * caller faster at the risk reclaim/compaction and 24162876592fSMel Gorman * the resulting allocation attempt fails 24172876592fSMel Gorman */ 24182876592fSMel Gorman if (!nr_reclaimed) 24192876592fSMel Gorman return false; 24202876592fSMel Gorman } 24213e7d3449SMel Gorman 24223e7d3449SMel Gorman /* 24233e7d3449SMel Gorman * If we have not reclaimed enough pages for compaction and the 24243e7d3449SMel Gorman * inactive lists are large enough, continue reclaiming 24253e7d3449SMel Gorman */ 24263e7d3449SMel Gorman pages_for_compaction = (2UL << sc->order); 2427a9dd0a83SMel Gorman inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 2428ec8acf20SShaohua Li if (get_nr_swap_pages() > 0) 2429a9dd0a83SMel Gorman inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 24303e7d3449SMel Gorman if (sc->nr_reclaimed < pages_for_compaction && 24313e7d3449SMel Gorman inactive_lru_pages > pages_for_compaction) 24323e7d3449SMel Gorman return true; 24333e7d3449SMel Gorman 24343e7d3449SMel Gorman /* If compaction would go ahead or the allocation would succeed, stop */ 2435a9dd0a83SMel Gorman for (z = 0; z <= sc->reclaim_idx; z++) { 2436a9dd0a83SMel Gorman struct zone *zone = &pgdat->node_zones[z]; 2437a9dd0a83SMel Gorman if (!populated_zone(zone)) 2438a9dd0a83SMel Gorman continue; 2439a9dd0a83SMel Gorman 2440a9dd0a83SMel Gorman switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { 24413e7d3449SMel Gorman case COMPACT_PARTIAL: 24423e7d3449SMel Gorman case COMPACT_CONTINUE: 24433e7d3449SMel Gorman return false; 24443e7d3449SMel Gorman default: 2445a9dd0a83SMel Gorman /* check next zone */ 2446a9dd0a83SMel Gorman ; 24473e7d3449SMel Gorman } 24483e7d3449SMel Gorman } 2449a9dd0a83SMel Gorman return true; 2450a9dd0a83SMel Gorman } 24513e7d3449SMel Gorman 2452970a39a3SMel Gorman static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) 2453f16015fbSJohannes Weiner { 2454cb731d6cSVladimir Davydov struct reclaim_state *reclaim_state = current->reclaim_state; 24559b4f98cdSJohannes Weiner unsigned long nr_reclaimed, nr_scanned; 24562344d7e4SJohannes Weiner bool reclaimable = false; 24579b4f98cdSJohannes Weiner 24589b4f98cdSJohannes Weiner do { 24595660048cSJohannes Weiner struct mem_cgroup *root = sc->target_mem_cgroup; 24605660048cSJohannes Weiner struct mem_cgroup_reclaim_cookie reclaim = { 2461ef8f2327SMel Gorman .pgdat = pgdat, 24629e3b2f8cSKonstantin Khlebnikov .priority = sc->priority, 24635660048cSJohannes Weiner }; 2464a9dd0a83SMel Gorman unsigned long node_lru_pages = 0; 2465694fbc0fSAndrew Morton struct mem_cgroup *memcg; 24665660048cSJohannes Weiner 24679b4f98cdSJohannes Weiner nr_reclaimed = sc->nr_reclaimed; 24689b4f98cdSJohannes Weiner nr_scanned = sc->nr_scanned; 24699b4f98cdSJohannes Weiner 2470694fbc0fSAndrew Morton memcg = mem_cgroup_iter(root, NULL, &reclaim); 2471694fbc0fSAndrew Morton do { 24726b4f7799SJohannes Weiner unsigned long lru_pages; 24738e8ae645SJohannes Weiner unsigned long reclaimed; 2474cb731d6cSVladimir Davydov unsigned long scanned; 24759b4f98cdSJohannes Weiner 2476241994edSJohannes Weiner if (mem_cgroup_low(root, memcg)) { 2477241994edSJohannes Weiner if (!sc->may_thrash) 2478241994edSJohannes Weiner continue; 2479241994edSJohannes Weiner mem_cgroup_events(memcg, MEMCG_LOW, 1); 2480241994edSJohannes Weiner } 2481241994edSJohannes Weiner 24828e8ae645SJohannes Weiner reclaimed = sc->nr_reclaimed; 2483cb731d6cSVladimir Davydov scanned = sc->nr_scanned; 24845660048cSJohannes Weiner 2485a9dd0a83SMel Gorman shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2486a9dd0a83SMel Gorman node_lru_pages += lru_pages; 2487f9be23d6SKonstantin Khlebnikov 2488b2e18757SMel Gorman if (!global_reclaim(sc)) 2489a9dd0a83SMel Gorman shrink_slab(sc->gfp_mask, pgdat->node_id, 2490cb731d6cSVladimir Davydov memcg, sc->nr_scanned - scanned, 2491cb731d6cSVladimir Davydov lru_pages); 2492cb731d6cSVladimir Davydov 24938e8ae645SJohannes Weiner /* Record the group's reclaim efficiency */ 24948e8ae645SJohannes Weiner vmpressure(sc->gfp_mask, memcg, false, 24958e8ae645SJohannes Weiner sc->nr_scanned - scanned, 24968e8ae645SJohannes Weiner sc->nr_reclaimed - reclaimed); 24978e8ae645SJohannes Weiner 24985660048cSJohannes Weiner /* 2499a394cb8eSMichal Hocko * Direct reclaim and kswapd have to scan all memory 2500a394cb8eSMichal Hocko * cgroups to fulfill the overall scan target for the 2501a9dd0a83SMel Gorman * node. 2502a394cb8eSMichal Hocko * 2503a394cb8eSMichal Hocko * Limit reclaim, on the other hand, only cares about 2504a394cb8eSMichal Hocko * nr_to_reclaim pages to be reclaimed and it will 2505a394cb8eSMichal Hocko * retry with decreasing priority if one round over the 2506a394cb8eSMichal Hocko * whole hierarchy is not sufficient. 25075660048cSJohannes Weiner */ 2508a394cb8eSMichal Hocko if (!global_reclaim(sc) && 2509a394cb8eSMichal Hocko sc->nr_reclaimed >= sc->nr_to_reclaim) { 25105660048cSJohannes Weiner mem_cgroup_iter_break(root, memcg); 25115660048cSJohannes Weiner break; 25125660048cSJohannes Weiner } 2513241994edSJohannes Weiner } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim))); 251470ddf637SAnton Vorontsov 25156b4f7799SJohannes Weiner /* 25166b4f7799SJohannes Weiner * Shrink the slab caches in the same proportion that 25176b4f7799SJohannes Weiner * the eligible LRU pages were scanned. 25186b4f7799SJohannes Weiner */ 2519b2e18757SMel Gorman if (global_reclaim(sc)) 2520a9dd0a83SMel Gorman shrink_slab(sc->gfp_mask, pgdat->node_id, NULL, 25216b4f7799SJohannes Weiner sc->nr_scanned - nr_scanned, 2522a9dd0a83SMel Gorman node_lru_pages); 25236b4f7799SJohannes Weiner 25246b4f7799SJohannes Weiner if (reclaim_state) { 2525cb731d6cSVladimir Davydov sc->nr_reclaimed += reclaim_state->reclaimed_slab; 25266b4f7799SJohannes Weiner reclaim_state->reclaimed_slab = 0; 25276b4f7799SJohannes Weiner } 25286b4f7799SJohannes Weiner 25298e8ae645SJohannes Weiner /* Record the subtree's reclaim efficiency */ 25308e8ae645SJohannes Weiner vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 253170ddf637SAnton Vorontsov sc->nr_scanned - nr_scanned, 253270ddf637SAnton Vorontsov sc->nr_reclaimed - nr_reclaimed); 253370ddf637SAnton Vorontsov 25342344d7e4SJohannes Weiner if (sc->nr_reclaimed - nr_reclaimed) 25352344d7e4SJohannes Weiner reclaimable = true; 25362344d7e4SJohannes Weiner 2537a9dd0a83SMel Gorman } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, 25389b4f98cdSJohannes Weiner sc->nr_scanned - nr_scanned, sc)); 25392344d7e4SJohannes Weiner 25402344d7e4SJohannes Weiner return reclaimable; 2541f16015fbSJohannes Weiner } 2542f16015fbSJohannes Weiner 254353853e2dSVlastimil Babka /* 254453853e2dSVlastimil Babka * Returns true if compaction should go ahead for a high-order request, or 254553853e2dSVlastimil Babka * the high-order allocation would succeed without compaction. 254653853e2dSVlastimil Babka */ 25474f588331SMel Gorman static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 2548fe4b1b24SMel Gorman { 254931483b6aSMel Gorman unsigned long watermark; 2550fe4b1b24SMel Gorman bool watermark_ok; 2551fe4b1b24SMel Gorman 2552fe4b1b24SMel Gorman /* 2553fe4b1b24SMel Gorman * Compaction takes time to run and there are potentially other 2554fe4b1b24SMel Gorman * callers using the pages just freed. Continue reclaiming until 2555fe4b1b24SMel Gorman * there is a buffer of free pages available to give compaction 2556fe4b1b24SMel Gorman * a reasonable chance of completing and allocating the page 2557fe4b1b24SMel Gorman */ 25584f588331SMel Gorman watermark = high_wmark_pages(zone) + (2UL << sc->order); 25594f588331SMel Gorman watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 2560fe4b1b24SMel Gorman 2561fe4b1b24SMel Gorman /* 2562fe4b1b24SMel Gorman * If compaction is deferred, reclaim up to a point where 2563fe4b1b24SMel Gorman * compaction will have a chance of success when re-enabled 2564fe4b1b24SMel Gorman */ 25654f588331SMel Gorman if (compaction_deferred(zone, sc->order)) 2566fe4b1b24SMel Gorman return watermark_ok; 2567fe4b1b24SMel Gorman 256853853e2dSVlastimil Babka /* 256953853e2dSVlastimil Babka * If compaction is not ready to start and allocation is not likely 257053853e2dSVlastimil Babka * to succeed without it, then keep reclaiming. 257153853e2dSVlastimil Babka */ 25724f588331SMel Gorman if (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx) == COMPACT_SKIPPED) 2573fe4b1b24SMel Gorman return false; 2574fe4b1b24SMel Gorman 2575fe4b1b24SMel Gorman return watermark_ok; 2576fe4b1b24SMel Gorman } 2577fe4b1b24SMel Gorman 25781da177e4SLinus Torvalds /* 25791da177e4SLinus Torvalds * This is the direct reclaim path, for page-allocating processes. We only 25801da177e4SLinus Torvalds * try to reclaim pages from zones which will satisfy the caller's allocation 25811da177e4SLinus Torvalds * request. 25821da177e4SLinus Torvalds * 25831da177e4SLinus Torvalds * If a zone is deemed to be full of pinned pages then just give it a light 25841da177e4SLinus Torvalds * scan then give up on it. 25851da177e4SLinus Torvalds */ 25860a0337e0SMichal Hocko static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 25871da177e4SLinus Torvalds { 2588dd1a239fSMel Gorman struct zoneref *z; 258954a6eb5cSMel Gorman struct zone *zone; 25900608f43dSAndrew Morton unsigned long nr_soft_reclaimed; 25910608f43dSAndrew Morton unsigned long nr_soft_scanned; 2592619d0d76SWeijie Yang gfp_t orig_mask; 259379dafcdcSMel Gorman pg_data_t *last_pgdat = NULL; 25941cfb419bSKAMEZAWA Hiroyuki 2595cc715d99SMel Gorman /* 2596cc715d99SMel Gorman * If the number of buffer_heads in the machine exceeds the maximum 2597cc715d99SMel Gorman * allowed level, force direct reclaim to scan the highmem zone as 2598cc715d99SMel Gorman * highmem pages could be pinning lowmem pages storing buffer_heads 2599cc715d99SMel Gorman */ 2600619d0d76SWeijie Yang orig_mask = sc->gfp_mask; 2601b2e18757SMel Gorman if (buffer_heads_over_limit) { 2602cc715d99SMel Gorman sc->gfp_mask |= __GFP_HIGHMEM; 26034f588331SMel Gorman sc->reclaim_idx = gfp_zone(sc->gfp_mask); 2604b2e18757SMel Gorman } 2605cc715d99SMel Gorman 2606d4debc66SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 2607b2e18757SMel Gorman sc->reclaim_idx, sc->nodemask) { 2608b2e18757SMel Gorman /* 26091cfb419bSKAMEZAWA Hiroyuki * Take care memory controller reclaiming has small influence 26101cfb419bSKAMEZAWA Hiroyuki * to global LRU. 26111cfb419bSKAMEZAWA Hiroyuki */ 261289b5fae5SJohannes Weiner if (global_reclaim(sc)) { 2613344736f2SVladimir Davydov if (!cpuset_zone_allowed(zone, 2614344736f2SVladimir Davydov GFP_KERNEL | __GFP_HARDWALL)) 26151da177e4SLinus Torvalds continue; 261665ec02cbSVladimir Davydov 26176e543d57SLisa Du if (sc->priority != DEF_PRIORITY && 2618599d0c95SMel Gorman !pgdat_reclaimable(zone->zone_pgdat)) 26191da177e4SLinus Torvalds continue; /* Let kswapd poll it */ 26200b06496aSJohannes Weiner 2621e0887c19SRik van Riel /* 2622e0c23279SMel Gorman * If we already have plenty of memory free for 2623e0c23279SMel Gorman * compaction in this zone, don't free any more. 2624e0c23279SMel Gorman * Even though compaction is invoked for any 2625e0c23279SMel Gorman * non-zero order, only frequent costly order 2626e0c23279SMel Gorman * reclamation is disruptive enough to become a 2627c7cfa37bSCopot Alexandru * noticeable problem, like transparent huge 2628c7cfa37bSCopot Alexandru * page allocations. 2629e0887c19SRik van Riel */ 26300b06496aSJohannes Weiner if (IS_ENABLED(CONFIG_COMPACTION) && 26310b06496aSJohannes Weiner sc->order > PAGE_ALLOC_COSTLY_ORDER && 26324f588331SMel Gorman compaction_ready(zone, sc)) { 26330b06496aSJohannes Weiner sc->compaction_ready = true; 2634e0887c19SRik van Riel continue; 2635e0887c19SRik van Riel } 26360b06496aSJohannes Weiner 26370608f43dSAndrew Morton /* 263879dafcdcSMel Gorman * Shrink each node in the zonelist once. If the 263979dafcdcSMel Gorman * zonelist is ordered by zone (not the default) then a 264079dafcdcSMel Gorman * node may be shrunk multiple times but in that case 264179dafcdcSMel Gorman * the user prefers lower zones being preserved. 264279dafcdcSMel Gorman */ 264379dafcdcSMel Gorman if (zone->zone_pgdat == last_pgdat) 264479dafcdcSMel Gorman continue; 264579dafcdcSMel Gorman 264679dafcdcSMel Gorman /* 26470608f43dSAndrew Morton * This steals pages from memory cgroups over softlimit 26480608f43dSAndrew Morton * and returns the number of reclaimed pages and 26490608f43dSAndrew Morton * scanned pages. This works for global memory pressure 26500608f43dSAndrew Morton * and balancing, not for a memcg's limit. 26510608f43dSAndrew Morton */ 26520608f43dSAndrew Morton nr_soft_scanned = 0; 2653ef8f2327SMel Gorman nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, 26540608f43dSAndrew Morton sc->order, sc->gfp_mask, 26550608f43dSAndrew Morton &nr_soft_scanned); 26560608f43dSAndrew Morton sc->nr_reclaimed += nr_soft_reclaimed; 26570608f43dSAndrew Morton sc->nr_scanned += nr_soft_scanned; 2658ac34a1a3SKAMEZAWA Hiroyuki /* need some check for avoid more shrink_zone() */ 2659ac34a1a3SKAMEZAWA Hiroyuki } 2660d149e3b2SYing Han 266179dafcdcSMel Gorman /* See comment about same check for global reclaim above */ 266279dafcdcSMel Gorman if (zone->zone_pgdat == last_pgdat) 266379dafcdcSMel Gorman continue; 266479dafcdcSMel Gorman last_pgdat = zone->zone_pgdat; 2665970a39a3SMel Gorman shrink_node(zone->zone_pgdat, sc); 26661da177e4SLinus Torvalds } 2667e0c23279SMel Gorman 266865ec02cbSVladimir Davydov /* 2669619d0d76SWeijie Yang * Restore to original mask to avoid the impact on the caller if we 2670619d0d76SWeijie Yang * promoted it to __GFP_HIGHMEM. 2671619d0d76SWeijie Yang */ 2672619d0d76SWeijie Yang sc->gfp_mask = orig_mask; 26731da177e4SLinus Torvalds } 26741da177e4SLinus Torvalds 26751da177e4SLinus Torvalds /* 26761da177e4SLinus Torvalds * This is the main entry point to direct page reclaim. 26771da177e4SLinus Torvalds * 26781da177e4SLinus Torvalds * If a full scan of the inactive list fails to free enough memory then we 26791da177e4SLinus Torvalds * are "out of memory" and something needs to be killed. 26801da177e4SLinus Torvalds * 26811da177e4SLinus Torvalds * If the caller is !__GFP_FS then the probability of a failure is reasonably 26821da177e4SLinus Torvalds * high - the zone may be full of dirty or under-writeback pages, which this 26835b0830cbSJens Axboe * caller can't do much about. We kick the writeback threads and take explicit 26845b0830cbSJens Axboe * naps in the hope that some of these pages can be written. But if the 26855b0830cbSJens Axboe * allocating task holds filesystem locks which prevent writeout this might not 26865b0830cbSJens Axboe * work, and the allocation attempt will fail. 2687a41f24eaSNishanth Aravamudan * 2688a41f24eaSNishanth Aravamudan * returns: 0, if no pages reclaimed 2689a41f24eaSNishanth Aravamudan * else, the number of pages reclaimed 26901da177e4SLinus Torvalds */ 2691dac1d27bSMel Gorman static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 26923115cd91SVladimir Davydov struct scan_control *sc) 26931da177e4SLinus Torvalds { 2694241994edSJohannes Weiner int initial_priority = sc->priority; 269569e05944SAndrew Morton unsigned long total_scanned = 0; 269622fba335SKOSAKI Motohiro unsigned long writeback_threshold; 2697241994edSJohannes Weiner retry: 2698873b4771SKeika Kobayashi delayacct_freepages_start(); 2699873b4771SKeika Kobayashi 270089b5fae5SJohannes Weiner if (global_reclaim(sc)) 27017cc30fcfSMel Gorman __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 27021da177e4SLinus Torvalds 27039e3b2f8cSKonstantin Khlebnikov do { 270470ddf637SAnton Vorontsov vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 270570ddf637SAnton Vorontsov sc->priority); 270666e1707bSBalbir Singh sc->nr_scanned = 0; 27070a0337e0SMichal Hocko shrink_zones(zonelist, sc); 2708e0c23279SMel Gorman 270966e1707bSBalbir Singh total_scanned += sc->nr_scanned; 2710bb21c7ceSKOSAKI Motohiro if (sc->nr_reclaimed >= sc->nr_to_reclaim) 27110b06496aSJohannes Weiner break; 27120b06496aSJohannes Weiner 27130b06496aSJohannes Weiner if (sc->compaction_ready) 27140b06496aSJohannes Weiner break; 27151da177e4SLinus Torvalds 27161da177e4SLinus Torvalds /* 27170e50ce3bSMinchan Kim * If we're getting trouble reclaiming, start doing 27180e50ce3bSMinchan Kim * writepage even in laptop mode. 27190e50ce3bSMinchan Kim */ 27200e50ce3bSMinchan Kim if (sc->priority < DEF_PRIORITY - 2) 27210e50ce3bSMinchan Kim sc->may_writepage = 1; 27220e50ce3bSMinchan Kim 27230e50ce3bSMinchan Kim /* 27241da177e4SLinus Torvalds * Try to write back as many pages as we just scanned. This 27251da177e4SLinus Torvalds * tends to cause slow streaming writers to write data to the 27261da177e4SLinus Torvalds * disk smoothly, at the dirtying rate, which is nice. But 27271da177e4SLinus Torvalds * that's undesirable in laptop mode, where we *want* lumpy 27281da177e4SLinus Torvalds * writeout. So in laptop mode, write out the whole world. 27291da177e4SLinus Torvalds */ 273022fba335SKOSAKI Motohiro writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 273122fba335SKOSAKI Motohiro if (total_scanned > writeback_threshold) { 27320e175a18SCurt Wohlgemuth wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, 27330e175a18SCurt Wohlgemuth WB_REASON_TRY_TO_FREE_PAGES); 273466e1707bSBalbir Singh sc->may_writepage = 1; 27351da177e4SLinus Torvalds } 27360b06496aSJohannes Weiner } while (--sc->priority >= 0); 2737bb21c7ceSKOSAKI Motohiro 2738873b4771SKeika Kobayashi delayacct_freepages_end(); 2739873b4771SKeika Kobayashi 2740bb21c7ceSKOSAKI Motohiro if (sc->nr_reclaimed) 2741bb21c7ceSKOSAKI Motohiro return sc->nr_reclaimed; 2742bb21c7ceSKOSAKI Motohiro 27430cee34fdSMel Gorman /* Aborted reclaim to try compaction? don't OOM, then */ 27440b06496aSJohannes Weiner if (sc->compaction_ready) 27457335084dSMel Gorman return 1; 27467335084dSMel Gorman 2747241994edSJohannes Weiner /* Untapped cgroup reserves? Don't OOM, retry. */ 2748241994edSJohannes Weiner if (!sc->may_thrash) { 2749241994edSJohannes Weiner sc->priority = initial_priority; 2750241994edSJohannes Weiner sc->may_thrash = 1; 2751241994edSJohannes Weiner goto retry; 2752241994edSJohannes Weiner } 2753241994edSJohannes Weiner 2754bb21c7ceSKOSAKI Motohiro return 0; 27551da177e4SLinus Torvalds } 27561da177e4SLinus Torvalds 27575515061dSMel Gorman static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) 27585515061dSMel Gorman { 27595515061dSMel Gorman struct zone *zone; 27605515061dSMel Gorman unsigned long pfmemalloc_reserve = 0; 27615515061dSMel Gorman unsigned long free_pages = 0; 27625515061dSMel Gorman int i; 27635515061dSMel Gorman bool wmark_ok; 27645515061dSMel Gorman 27655515061dSMel Gorman for (i = 0; i <= ZONE_NORMAL; i++) { 27665515061dSMel Gorman zone = &pgdat->node_zones[i]; 2767f012a84aSNishanth Aravamudan if (!populated_zone(zone) || 2768599d0c95SMel Gorman pgdat_reclaimable_pages(pgdat) == 0) 2769675becceSMel Gorman continue; 2770675becceSMel Gorman 27715515061dSMel Gorman pfmemalloc_reserve += min_wmark_pages(zone); 27725515061dSMel Gorman free_pages += zone_page_state(zone, NR_FREE_PAGES); 27735515061dSMel Gorman } 27745515061dSMel Gorman 2775675becceSMel Gorman /* If there are no reserves (unexpected config) then do not throttle */ 2776675becceSMel Gorman if (!pfmemalloc_reserve) 2777675becceSMel Gorman return true; 2778675becceSMel Gorman 27795515061dSMel Gorman wmark_ok = free_pages > pfmemalloc_reserve / 2; 27805515061dSMel Gorman 27815515061dSMel Gorman /* kswapd must be awake if processes are being throttled */ 27825515061dSMel Gorman if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 278338087d9bSMel Gorman pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, 27845515061dSMel Gorman (enum zone_type)ZONE_NORMAL); 27855515061dSMel Gorman wake_up_interruptible(&pgdat->kswapd_wait); 27865515061dSMel Gorman } 27875515061dSMel Gorman 27885515061dSMel Gorman return wmark_ok; 27895515061dSMel Gorman } 27905515061dSMel Gorman 27915515061dSMel Gorman /* 27925515061dSMel Gorman * Throttle direct reclaimers if backing storage is backed by the network 27935515061dSMel Gorman * and the PFMEMALLOC reserve for the preferred node is getting dangerously 27945515061dSMel Gorman * depleted. kswapd will continue to make progress and wake the processes 279550694c28SMel Gorman * when the low watermark is reached. 279650694c28SMel Gorman * 279750694c28SMel Gorman * Returns true if a fatal signal was delivered during throttling. If this 279850694c28SMel Gorman * happens, the page allocator should not consider triggering the OOM killer. 27995515061dSMel Gorman */ 280050694c28SMel Gorman static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 28015515061dSMel Gorman nodemask_t *nodemask) 28025515061dSMel Gorman { 2803675becceSMel Gorman struct zoneref *z; 28045515061dSMel Gorman struct zone *zone; 2805675becceSMel Gorman pg_data_t *pgdat = NULL; 28065515061dSMel Gorman 28075515061dSMel Gorman /* 28085515061dSMel Gorman * Kernel threads should not be throttled as they may be indirectly 28095515061dSMel Gorman * responsible for cleaning pages necessary for reclaim to make forward 28105515061dSMel Gorman * progress. kjournald for example may enter direct reclaim while 28115515061dSMel Gorman * committing a transaction where throttling it could forcing other 28125515061dSMel Gorman * processes to block on log_wait_commit(). 28135515061dSMel Gorman */ 28145515061dSMel Gorman if (current->flags & PF_KTHREAD) 281550694c28SMel Gorman goto out; 281650694c28SMel Gorman 281750694c28SMel Gorman /* 281850694c28SMel Gorman * If a fatal signal is pending, this process should not throttle. 281950694c28SMel Gorman * It should return quickly so it can exit and free its memory 282050694c28SMel Gorman */ 282150694c28SMel Gorman if (fatal_signal_pending(current)) 282250694c28SMel Gorman goto out; 28235515061dSMel Gorman 2824675becceSMel Gorman /* 2825675becceSMel Gorman * Check if the pfmemalloc reserves are ok by finding the first node 2826675becceSMel Gorman * with a usable ZONE_NORMAL or lower zone. The expectation is that 2827675becceSMel Gorman * GFP_KERNEL will be required for allocating network buffers when 2828675becceSMel Gorman * swapping over the network so ZONE_HIGHMEM is unusable. 2829675becceSMel Gorman * 2830675becceSMel Gorman * Throttling is based on the first usable node and throttled processes 2831675becceSMel Gorman * wait on a queue until kswapd makes progress and wakes them. There 2832675becceSMel Gorman * is an affinity then between processes waking up and where reclaim 2833675becceSMel Gorman * progress has been made assuming the process wakes on the same node. 2834675becceSMel Gorman * More importantly, processes running on remote nodes will not compete 2835675becceSMel Gorman * for remote pfmemalloc reserves and processes on different nodes 2836675becceSMel Gorman * should make reasonable progress. 2837675becceSMel Gorman */ 2838675becceSMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 283917636faaSMichael S. Tsirkin gfp_zone(gfp_mask), nodemask) { 2840675becceSMel Gorman if (zone_idx(zone) > ZONE_NORMAL) 2841675becceSMel Gorman continue; 2842675becceSMel Gorman 2843675becceSMel Gorman /* Throttle based on the first usable node */ 28445515061dSMel Gorman pgdat = zone->zone_pgdat; 28455515061dSMel Gorman if (pfmemalloc_watermark_ok(pgdat)) 284650694c28SMel Gorman goto out; 2847675becceSMel Gorman break; 2848675becceSMel Gorman } 2849675becceSMel Gorman 2850675becceSMel Gorman /* If no zone was usable by the allocation flags then do not throttle */ 2851675becceSMel Gorman if (!pgdat) 2852675becceSMel Gorman goto out; 28535515061dSMel Gorman 285468243e76SMel Gorman /* Account for the throttling */ 285568243e76SMel Gorman count_vm_event(PGSCAN_DIRECT_THROTTLE); 285668243e76SMel Gorman 28575515061dSMel Gorman /* 28585515061dSMel Gorman * If the caller cannot enter the filesystem, it's possible that it 28595515061dSMel Gorman * is due to the caller holding an FS lock or performing a journal 28605515061dSMel Gorman * transaction in the case of a filesystem like ext[3|4]. In this case, 28615515061dSMel Gorman * it is not safe to block on pfmemalloc_wait as kswapd could be 28625515061dSMel Gorman * blocked waiting on the same lock. Instead, throttle for up to a 28635515061dSMel Gorman * second before continuing. 28645515061dSMel Gorman */ 28655515061dSMel Gorman if (!(gfp_mask & __GFP_FS)) { 28665515061dSMel Gorman wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 28675515061dSMel Gorman pfmemalloc_watermark_ok(pgdat), HZ); 286850694c28SMel Gorman 286950694c28SMel Gorman goto check_pending; 28705515061dSMel Gorman } 28715515061dSMel Gorman 28725515061dSMel Gorman /* Throttle until kswapd wakes the process */ 28735515061dSMel Gorman wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 28745515061dSMel Gorman pfmemalloc_watermark_ok(pgdat)); 287550694c28SMel Gorman 287650694c28SMel Gorman check_pending: 287750694c28SMel Gorman if (fatal_signal_pending(current)) 287850694c28SMel Gorman return true; 287950694c28SMel Gorman 288050694c28SMel Gorman out: 288150694c28SMel Gorman return false; 28825515061dSMel Gorman } 28835515061dSMel Gorman 2884dac1d27bSMel Gorman unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2885327c0e96SKAMEZAWA Hiroyuki gfp_t gfp_mask, nodemask_t *nodemask) 288666e1707bSBalbir Singh { 288733906bc5SMel Gorman unsigned long nr_reclaimed; 288866e1707bSBalbir Singh struct scan_control sc = { 288922fba335SKOSAKI Motohiro .nr_to_reclaim = SWAP_CLUSTER_MAX, 2890ee814fe2SJohannes Weiner .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), 2891b2e18757SMel Gorman .reclaim_idx = gfp_zone(gfp_mask), 2892ee814fe2SJohannes Weiner .order = order, 2893ee814fe2SJohannes Weiner .nodemask = nodemask, 2894ee814fe2SJohannes Weiner .priority = DEF_PRIORITY, 2895ee814fe2SJohannes Weiner .may_writepage = !laptop_mode, 2896a6dc60f8SJohannes Weiner .may_unmap = 1, 28972e2e4259SKOSAKI Motohiro .may_swap = 1, 289866e1707bSBalbir Singh }; 289966e1707bSBalbir Singh 29005515061dSMel Gorman /* 290150694c28SMel Gorman * Do not enter reclaim if fatal signal was delivered while throttled. 290250694c28SMel Gorman * 1 is returned so that the page allocator does not OOM kill at this 290350694c28SMel Gorman * point. 29045515061dSMel Gorman */ 290550694c28SMel Gorman if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask)) 29065515061dSMel Gorman return 1; 29075515061dSMel Gorman 290833906bc5SMel Gorman trace_mm_vmscan_direct_reclaim_begin(order, 290933906bc5SMel Gorman sc.may_writepage, 2910e5146b12SMel Gorman gfp_mask, 2911e5146b12SMel Gorman sc.reclaim_idx); 291233906bc5SMel Gorman 29133115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 291433906bc5SMel Gorman 291533906bc5SMel Gorman trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 291633906bc5SMel Gorman 291733906bc5SMel Gorman return nr_reclaimed; 291866e1707bSBalbir Singh } 291966e1707bSBalbir Singh 2920c255a458SAndrew Morton #ifdef CONFIG_MEMCG 292166e1707bSBalbir Singh 2922a9dd0a83SMel Gorman unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 29234e416953SBalbir Singh gfp_t gfp_mask, bool noswap, 2924ef8f2327SMel Gorman pg_data_t *pgdat, 29250ae5e89cSYing Han unsigned long *nr_scanned) 29264e416953SBalbir Singh { 29274e416953SBalbir Singh struct scan_control sc = { 2928b8f5c566SKOSAKI Motohiro .nr_to_reclaim = SWAP_CLUSTER_MAX, 2929ee814fe2SJohannes Weiner .target_mem_cgroup = memcg, 29304e416953SBalbir Singh .may_writepage = !laptop_mode, 29314e416953SBalbir Singh .may_unmap = 1, 2932b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 29334e416953SBalbir Singh .may_swap = !noswap, 29344e416953SBalbir Singh }; 29356b4f7799SJohannes Weiner unsigned long lru_pages; 29360ae5e89cSYing Han 29374e416953SBalbir Singh sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 29384e416953SBalbir Singh (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2939bdce6d9eSKOSAKI Motohiro 29409e3b2f8cSKonstantin Khlebnikov trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 2941bdce6d9eSKOSAKI Motohiro sc.may_writepage, 2942e5146b12SMel Gorman sc.gfp_mask, 2943e5146b12SMel Gorman sc.reclaim_idx); 2944bdce6d9eSKOSAKI Motohiro 29454e416953SBalbir Singh /* 29464e416953SBalbir Singh * NOTE: Although we can get the priority field, using it 29474e416953SBalbir Singh * here is not a good idea, since it limits the pages we can scan. 2948a9dd0a83SMel Gorman * if we don't reclaim here, the shrink_node from balance_pgdat 29494e416953SBalbir Singh * will pick up pages from other mem cgroup's as well. We hack 29504e416953SBalbir Singh * the priority and make it zero. 29514e416953SBalbir Singh */ 2952ef8f2327SMel Gorman shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); 2953bdce6d9eSKOSAKI Motohiro 2954bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2955bdce6d9eSKOSAKI Motohiro 29560ae5e89cSYing Han *nr_scanned = sc.nr_scanned; 29574e416953SBalbir Singh return sc.nr_reclaimed; 29584e416953SBalbir Singh } 29594e416953SBalbir Singh 296072835c86SJohannes Weiner unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 2961b70a2a21SJohannes Weiner unsigned long nr_pages, 29628c7c6e34SKAMEZAWA Hiroyuki gfp_t gfp_mask, 2963b70a2a21SJohannes Weiner bool may_swap) 296466e1707bSBalbir Singh { 29654e416953SBalbir Singh struct zonelist *zonelist; 2966bdce6d9eSKOSAKI Motohiro unsigned long nr_reclaimed; 2967889976dbSYing Han int nid; 296866e1707bSBalbir Singh struct scan_control sc = { 2969b70a2a21SJohannes Weiner .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 2970ee814fe2SJohannes Weiner .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2971ee814fe2SJohannes Weiner (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2972b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 2973ee814fe2SJohannes Weiner .target_mem_cgroup = memcg, 2974ee814fe2SJohannes Weiner .priority = DEF_PRIORITY, 297566e1707bSBalbir Singh .may_writepage = !laptop_mode, 2976a6dc60f8SJohannes Weiner .may_unmap = 1, 2977b70a2a21SJohannes Weiner .may_swap = may_swap, 2978a09ed5e0SYing Han }; 297966e1707bSBalbir Singh 2980889976dbSYing Han /* 2981889976dbSYing Han * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2982889976dbSYing Han * take care of from where we get pages. So the node where we start the 2983889976dbSYing Han * scan does not need to be the current node. 2984889976dbSYing Han */ 298572835c86SJohannes Weiner nid = mem_cgroup_select_victim_node(memcg); 2986889976dbSYing Han 2987889976dbSYing Han zonelist = NODE_DATA(nid)->node_zonelists; 2988bdce6d9eSKOSAKI Motohiro 2989bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_reclaim_begin(0, 2990bdce6d9eSKOSAKI Motohiro sc.may_writepage, 2991e5146b12SMel Gorman sc.gfp_mask, 2992e5146b12SMel Gorman sc.reclaim_idx); 2993bdce6d9eSKOSAKI Motohiro 29943115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2995bdce6d9eSKOSAKI Motohiro 2996bdce6d9eSKOSAKI Motohiro trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2997bdce6d9eSKOSAKI Motohiro 2998bdce6d9eSKOSAKI Motohiro return nr_reclaimed; 299966e1707bSBalbir Singh } 300066e1707bSBalbir Singh #endif 300166e1707bSBalbir Singh 30021d82de61SMel Gorman static void age_active_anon(struct pglist_data *pgdat, 3003ef8f2327SMel Gorman struct scan_control *sc) 3004f16015fbSJohannes Weiner { 3005b95a2f2dSJohannes Weiner struct mem_cgroup *memcg; 3006b95a2f2dSJohannes Weiner 3007b95a2f2dSJohannes Weiner if (!total_swap_pages) 3008b95a2f2dSJohannes Weiner return; 3009b95a2f2dSJohannes Weiner 3010b95a2f2dSJohannes Weiner memcg = mem_cgroup_iter(NULL, NULL, NULL); 3011b95a2f2dSJohannes Weiner do { 3012ef8f2327SMel Gorman struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); 3013f16015fbSJohannes Weiner 301459dc76b0SRik van Riel if (inactive_list_is_low(lruvec, false)) 30151a93be0eSKonstantin Khlebnikov shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 30169e3b2f8cSKonstantin Khlebnikov sc, LRU_ACTIVE_ANON); 3017b95a2f2dSJohannes Weiner 3018b95a2f2dSJohannes Weiner memcg = mem_cgroup_iter(NULL, memcg, NULL); 3019b95a2f2dSJohannes Weiner } while (memcg); 3020f16015fbSJohannes Weiner } 3021f16015fbSJohannes Weiner 302231483b6aSMel Gorman static bool zone_balanced(struct zone *zone, int order, int classzone_idx) 302360cefed4SJohannes Weiner { 302431483b6aSMel Gorman unsigned long mark = high_wmark_pages(zone); 302560cefed4SJohannes Weiner 30266256c6b4SMel Gorman if (!zone_watermark_ok_safe(zone, order, mark, classzone_idx)) 30276256c6b4SMel Gorman return false; 30286256c6b4SMel Gorman 30296256c6b4SMel Gorman /* 30306256c6b4SMel Gorman * If any eligible zone is balanced then the node is not considered 30316256c6b4SMel Gorman * to be congested or dirty 30326256c6b4SMel Gorman */ 30336256c6b4SMel Gorman clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags); 30346256c6b4SMel Gorman clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags); 30356256c6b4SMel Gorman 30366256c6b4SMel Gorman return true; 303760cefed4SJohannes Weiner } 303860cefed4SJohannes Weiner 30391741c877SMel Gorman /* 30405515061dSMel Gorman * Prepare kswapd for sleeping. This verifies that there are no processes 30415515061dSMel Gorman * waiting in throttle_direct_reclaim() and that watermarks have been met. 30425515061dSMel Gorman * 30435515061dSMel Gorman * Returns true if kswapd is ready to sleep 30445515061dSMel Gorman */ 3045d9f21d42SMel Gorman static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx) 3046f50de2d3SMel Gorman { 30471d82de61SMel Gorman int i; 30481d82de61SMel Gorman 30495515061dSMel Gorman /* 30509e5e3661SVlastimil Babka * The throttled processes are normally woken up in balance_pgdat() as 30519e5e3661SVlastimil Babka * soon as pfmemalloc_watermark_ok() is true. But there is a potential 30529e5e3661SVlastimil Babka * race between when kswapd checks the watermarks and a process gets 30539e5e3661SVlastimil Babka * throttled. There is also a potential race if processes get 30549e5e3661SVlastimil Babka * throttled, kswapd wakes, a large process exits thereby balancing the 30559e5e3661SVlastimil Babka * zones, which causes kswapd to exit balance_pgdat() before reaching 30569e5e3661SVlastimil Babka * the wake up checks. If kswapd is going to sleep, no process should 30579e5e3661SVlastimil Babka * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 30589e5e3661SVlastimil Babka * the wake up is premature, processes will wake kswapd and get 30599e5e3661SVlastimil Babka * throttled again. The difference from wake ups in balance_pgdat() is 30609e5e3661SVlastimil Babka * that here we are under prepare_to_wait(). 30615515061dSMel Gorman */ 30629e5e3661SVlastimil Babka if (waitqueue_active(&pgdat->pfmemalloc_wait)) 30639e5e3661SVlastimil Babka wake_up_all(&pgdat->pfmemalloc_wait); 3064f50de2d3SMel Gorman 30651d82de61SMel Gorman for (i = 0; i <= classzone_idx; i++) { 30661d82de61SMel Gorman struct zone *zone = pgdat->node_zones + i; 30671d82de61SMel Gorman 30681d82de61SMel Gorman if (!populated_zone(zone)) 30691d82de61SMel Gorman continue; 30701d82de61SMel Gorman 307138087d9bSMel Gorman if (!zone_balanced(zone, order, classzone_idx)) 307238087d9bSMel Gorman return false; 30731d82de61SMel Gorman } 30741d82de61SMel Gorman 307538087d9bSMel Gorman return true; 3076f50de2d3SMel Gorman } 3077f50de2d3SMel Gorman 30781da177e4SLinus Torvalds /* 30791d82de61SMel Gorman * kswapd shrinks a node of pages that are at or below the highest usable 30801d82de61SMel Gorman * zone that is currently unbalanced. 3081b8e83b94SMel Gorman * 3082b8e83b94SMel Gorman * Returns true if kswapd scanned at least the requested number of pages to 3083283aba9fSMel Gorman * reclaim or if the lack of progress was due to pages under writeback. 3084283aba9fSMel Gorman * This is used to determine if the scanning priority needs to be raised. 308575485363SMel Gorman */ 30861d82de61SMel Gorman static bool kswapd_shrink_node(pg_data_t *pgdat, 3087accf6242SVlastimil Babka struct scan_control *sc) 308875485363SMel Gorman { 30891d82de61SMel Gorman struct zone *zone; 30901d82de61SMel Gorman int z; 309175485363SMel Gorman 30921d82de61SMel Gorman /* Reclaim a number of pages proportional to the number of zones */ 30931d82de61SMel Gorman sc->nr_to_reclaim = 0; 3094970a39a3SMel Gorman for (z = 0; z <= sc->reclaim_idx; z++) { 30951d82de61SMel Gorman zone = pgdat->node_zones + z; 30961d82de61SMel Gorman if (!populated_zone(zone)) 30971d82de61SMel Gorman continue; 30987c954f6dSMel Gorman 30991d82de61SMel Gorman sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 31007c954f6dSMel Gorman } 31017c954f6dSMel Gorman 31021d82de61SMel Gorman /* 31031d82de61SMel Gorman * Historically care was taken to put equal pressure on all zones but 31041d82de61SMel Gorman * now pressure is applied based on node LRU order. 31051d82de61SMel Gorman */ 3106970a39a3SMel Gorman shrink_node(pgdat, sc); 31071d82de61SMel Gorman 31081d82de61SMel Gorman /* 31091d82de61SMel Gorman * Fragmentation may mean that the system cannot be rebalanced for 31101d82de61SMel Gorman * high-order allocations. If twice the allocation size has been 31111d82de61SMel Gorman * reclaimed then recheck watermarks only at order-0 to prevent 31121d82de61SMel Gorman * excessive reclaim. Assume that a process requested a high-order 31131d82de61SMel Gorman * can direct reclaim/compact. 31141d82de61SMel Gorman */ 31151d82de61SMel Gorman if (sc->order && sc->nr_reclaimed >= 2UL << sc->order) 31161d82de61SMel Gorman sc->order = 0; 31171d82de61SMel Gorman 3118b8e83b94SMel Gorman return sc->nr_scanned >= sc->nr_to_reclaim; 311975485363SMel Gorman } 312075485363SMel Gorman 312175485363SMel Gorman /* 31221d82de61SMel Gorman * For kswapd, balance_pgdat() will reclaim pages across a node from zones 31231d82de61SMel Gorman * that are eligible for use by the caller until at least one zone is 31241d82de61SMel Gorman * balanced. 31251da177e4SLinus Torvalds * 31261d82de61SMel Gorman * Returns the order kswapd finished reclaiming at. 31271da177e4SLinus Torvalds * 31281da177e4SLinus Torvalds * kswapd scans the zones in the highmem->normal->dma direction. It skips 312941858966SMel Gorman * zones which have free_pages > high_wmark_pages(zone), but once a zone is 31301d82de61SMel Gorman * found to have free_pages <= high_wmark_pages(zone), any page is that zone 31311d82de61SMel Gorman * or lower is eligible for reclaim until at least one usable zone is 31321d82de61SMel Gorman * balanced. 31331da177e4SLinus Torvalds */ 3134accf6242SVlastimil Babka static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) 31351da177e4SLinus Torvalds { 31361da177e4SLinus Torvalds int i; 31370608f43dSAndrew Morton unsigned long nr_soft_reclaimed; 31380608f43dSAndrew Morton unsigned long nr_soft_scanned; 31391d82de61SMel Gorman struct zone *zone; 3140179e9639SAndrew Morton struct scan_control sc = { 3141179e9639SAndrew Morton .gfp_mask = GFP_KERNEL, 3142ee814fe2SJohannes Weiner .order = order, 3143b8e83b94SMel Gorman .priority = DEF_PRIORITY, 3144ee814fe2SJohannes Weiner .may_writepage = !laptop_mode, 3145a6dc60f8SJohannes Weiner .may_unmap = 1, 31462e2e4259SKOSAKI Motohiro .may_swap = 1, 3147179e9639SAndrew Morton }; 3148f8891e5eSChristoph Lameter count_vm_event(PAGEOUTRUN); 31491da177e4SLinus Torvalds 31509e3b2f8cSKonstantin Khlebnikov do { 3151b8e83b94SMel Gorman bool raise_priority = true; 3152b8e83b94SMel Gorman 3153b8e83b94SMel Gorman sc.nr_reclaimed = 0; 315484c7a777SMel Gorman sc.reclaim_idx = classzone_idx; 31551da177e4SLinus Torvalds 315686c79f6bSMel Gorman /* 315784c7a777SMel Gorman * If the number of buffer_heads exceeds the maximum allowed 315884c7a777SMel Gorman * then consider reclaiming from all zones. This has a dual 315984c7a777SMel Gorman * purpose -- on 64-bit systems it is expected that 316084c7a777SMel Gorman * buffer_heads are stripped during active rotation. On 32-bit 316184c7a777SMel Gorman * systems, highmem pages can pin lowmem memory and shrinking 316284c7a777SMel Gorman * buffers can relieve lowmem pressure. Reclaim may still not 316384c7a777SMel Gorman * go ahead if all eligible zones for the original allocation 316484c7a777SMel Gorman * request are balanced to avoid excessive reclaim from kswapd. 316586c79f6bSMel Gorman */ 316686c79f6bSMel Gorman if (buffer_heads_over_limit) { 316786c79f6bSMel Gorman for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 316886c79f6bSMel Gorman zone = pgdat->node_zones + i; 316986c79f6bSMel Gorman if (!populated_zone(zone)) 317086c79f6bSMel Gorman continue; 317186c79f6bSMel Gorman 3172970a39a3SMel Gorman sc.reclaim_idx = i; 317386c79f6bSMel Gorman break; 317486c79f6bSMel Gorman } 317586c79f6bSMel Gorman } 317686c79f6bSMel Gorman 317786c79f6bSMel Gorman /* 317886c79f6bSMel Gorman * Only reclaim if there are no eligible zones. Check from 317986c79f6bSMel Gorman * high to low zone as allocations prefer higher zones. 318086c79f6bSMel Gorman * Scanning from low to high zone would allow congestion to be 318186c79f6bSMel Gorman * cleared during a very small window when a small low 318286c79f6bSMel Gorman * zone was balanced even under extreme pressure when the 318384c7a777SMel Gorman * overall node may be congested. Note that sc.reclaim_idx 318484c7a777SMel Gorman * is not used as buffer_heads_over_limit may have adjusted 318584c7a777SMel Gorman * it. 318686c79f6bSMel Gorman */ 318784c7a777SMel Gorman for (i = classzone_idx; i >= 0; i--) { 31881d82de61SMel Gorman zone = pgdat->node_zones + i; 3189f3fe6512SCon Kolivas if (!populated_zone(zone)) 31901da177e4SLinus Torvalds continue; 31911da177e4SLinus Torvalds 319284c7a777SMel Gorman if (zone_balanced(zone, sc.order, classzone_idx)) 31931da177e4SLinus Torvalds goto out; 319486c79f6bSMel Gorman } 3195e1dbeda6SAndrew Morton 31961da177e4SLinus Torvalds /* 31971d82de61SMel Gorman * Do some background aging of the anon list, to give 31981d82de61SMel Gorman * pages a chance to be referenced before reclaiming. All 31991d82de61SMel Gorman * pages are rotated regardless of classzone as this is 32001d82de61SMel Gorman * about consistent aging. 32011d82de61SMel Gorman */ 3202ef8f2327SMel Gorman age_active_anon(pgdat, &sc); 32031d82de61SMel Gorman 32041d82de61SMel Gorman /* 3205b7ea3c41SMel Gorman * If we're getting trouble reclaiming, start doing writepage 3206b7ea3c41SMel Gorman * even in laptop mode. 3207b7ea3c41SMel Gorman */ 32081d82de61SMel Gorman if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat)) 3209b7ea3c41SMel Gorman sc.may_writepage = 1; 3210b7ea3c41SMel Gorman 32111d82de61SMel Gorman /* Call soft limit reclaim before calling shrink_node. */ 32121da177e4SLinus Torvalds sc.nr_scanned = 0; 32130608f43dSAndrew Morton nr_soft_scanned = 0; 3214ef8f2327SMel Gorman nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, 32151d82de61SMel Gorman sc.gfp_mask, &nr_soft_scanned); 32160608f43dSAndrew Morton sc.nr_reclaimed += nr_soft_reclaimed; 32170608f43dSAndrew Morton 321832a4330dSRik van Riel /* 32191d82de61SMel Gorman * There should be no need to raise the scanning priority if 32201d82de61SMel Gorman * enough pages are already being scanned that that high 32211d82de61SMel Gorman * watermark would be met at 100% efficiency. 322232a4330dSRik van Riel */ 3223970a39a3SMel Gorman if (kswapd_shrink_node(pgdat, &sc)) 3224b8e83b94SMel Gorman raise_priority = false; 3225d7868daeSMel Gorman 32265515061dSMel Gorman /* 32275515061dSMel Gorman * If the low watermark is met there is no need for processes 32285515061dSMel Gorman * to be throttled on pfmemalloc_wait as they should not be 32295515061dSMel Gorman * able to safely make forward progress. Wake them 32305515061dSMel Gorman */ 32315515061dSMel Gorman if (waitqueue_active(&pgdat->pfmemalloc_wait) && 32325515061dSMel Gorman pfmemalloc_watermark_ok(pgdat)) 3233cfc51155SVlastimil Babka wake_up_all(&pgdat->pfmemalloc_wait); 32345515061dSMel Gorman 3235b8e83b94SMel Gorman /* Check if kswapd should be suspending */ 3236b8e83b94SMel Gorman if (try_to_freeze() || kthread_should_stop()) 3237b8e83b94SMel Gorman break; 3238b8e83b94SMel Gorman 3239b8e83b94SMel Gorman /* 3240b8e83b94SMel Gorman * Raise priority if scanning rate is too low or there was no 3241b8e83b94SMel Gorman * progress in reclaiming pages 3242b8e83b94SMel Gorman */ 3243b8e83b94SMel Gorman if (raise_priority || !sc.nr_reclaimed) 3244b8e83b94SMel Gorman sc.priority--; 32451d82de61SMel Gorman } while (sc.priority >= 1); 32461da177e4SLinus Torvalds 3247b8e83b94SMel Gorman out: 32480abdee2bSMel Gorman /* 32491d82de61SMel Gorman * Return the order kswapd stopped reclaiming at as 32501d82de61SMel Gorman * prepare_kswapd_sleep() takes it into account. If another caller 32511d82de61SMel Gorman * entered the allocator slow path while kswapd was awake, order will 32521d82de61SMel Gorman * remain at the higher level. 32530abdee2bSMel Gorman */ 32541d82de61SMel Gorman return sc.order; 32551da177e4SLinus Torvalds } 32561da177e4SLinus Torvalds 325738087d9bSMel Gorman static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 325838087d9bSMel Gorman unsigned int classzone_idx) 3259f0bc0a60SKOSAKI Motohiro { 3260f0bc0a60SKOSAKI Motohiro long remaining = 0; 3261f0bc0a60SKOSAKI Motohiro DEFINE_WAIT(wait); 3262f0bc0a60SKOSAKI Motohiro 3263f0bc0a60SKOSAKI Motohiro if (freezing(current) || kthread_should_stop()) 3264f0bc0a60SKOSAKI Motohiro return; 3265f0bc0a60SKOSAKI Motohiro 3266f0bc0a60SKOSAKI Motohiro prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3267f0bc0a60SKOSAKI Motohiro 3268f0bc0a60SKOSAKI Motohiro /* Try to sleep for a short interval */ 3269d9f21d42SMel Gorman if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3270fd901c95SVlastimil Babka /* 3271fd901c95SVlastimil Babka * Compaction records what page blocks it recently failed to 3272fd901c95SVlastimil Babka * isolate pages from and skips them in the future scanning. 3273fd901c95SVlastimil Babka * When kswapd is going to sleep, it is reasonable to assume 3274fd901c95SVlastimil Babka * that pages and compaction may succeed so reset the cache. 3275fd901c95SVlastimil Babka */ 3276fd901c95SVlastimil Babka reset_isolation_suitable(pgdat); 3277fd901c95SVlastimil Babka 3278fd901c95SVlastimil Babka /* 3279fd901c95SVlastimil Babka * We have freed the memory, now we should compact it to make 3280fd901c95SVlastimil Babka * allocation of the requested order possible. 3281fd901c95SVlastimil Babka */ 328238087d9bSMel Gorman wakeup_kcompactd(pgdat, alloc_order, classzone_idx); 3283fd901c95SVlastimil Babka 3284f0bc0a60SKOSAKI Motohiro remaining = schedule_timeout(HZ/10); 328538087d9bSMel Gorman 328638087d9bSMel Gorman /* 328738087d9bSMel Gorman * If woken prematurely then reset kswapd_classzone_idx and 328838087d9bSMel Gorman * order. The values will either be from a wakeup request or 328938087d9bSMel Gorman * the previous request that slept prematurely. 329038087d9bSMel Gorman */ 329138087d9bSMel Gorman if (remaining) { 329238087d9bSMel Gorman pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx); 329338087d9bSMel Gorman pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); 329438087d9bSMel Gorman } 329538087d9bSMel Gorman 3296f0bc0a60SKOSAKI Motohiro finish_wait(&pgdat->kswapd_wait, &wait); 3297f0bc0a60SKOSAKI Motohiro prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3298f0bc0a60SKOSAKI Motohiro } 3299f0bc0a60SKOSAKI Motohiro 3300f0bc0a60SKOSAKI Motohiro /* 3301f0bc0a60SKOSAKI Motohiro * After a short sleep, check if it was a premature sleep. If not, then 3302f0bc0a60SKOSAKI Motohiro * go fully to sleep until explicitly woken up. 3303f0bc0a60SKOSAKI Motohiro */ 3304d9f21d42SMel Gorman if (!remaining && 3305d9f21d42SMel Gorman prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3306f0bc0a60SKOSAKI Motohiro trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 3307f0bc0a60SKOSAKI Motohiro 3308f0bc0a60SKOSAKI Motohiro /* 3309f0bc0a60SKOSAKI Motohiro * vmstat counters are not perfectly accurate and the estimated 3310f0bc0a60SKOSAKI Motohiro * value for counters such as NR_FREE_PAGES can deviate from the 3311f0bc0a60SKOSAKI Motohiro * true value by nr_online_cpus * threshold. To avoid the zone 3312f0bc0a60SKOSAKI Motohiro * watermarks being breached while under pressure, we reduce the 3313f0bc0a60SKOSAKI Motohiro * per-cpu vmstat threshold while kswapd is awake and restore 3314f0bc0a60SKOSAKI Motohiro * them before going back to sleep. 3315f0bc0a60SKOSAKI Motohiro */ 3316f0bc0a60SKOSAKI Motohiro set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 33171c7e7f6cSAaditya Kumar 33181c7e7f6cSAaditya Kumar if (!kthread_should_stop()) 3319f0bc0a60SKOSAKI Motohiro schedule(); 33201c7e7f6cSAaditya Kumar 3321f0bc0a60SKOSAKI Motohiro set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 3322f0bc0a60SKOSAKI Motohiro } else { 3323f0bc0a60SKOSAKI Motohiro if (remaining) 3324f0bc0a60SKOSAKI Motohiro count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 3325f0bc0a60SKOSAKI Motohiro else 3326f0bc0a60SKOSAKI Motohiro count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 3327f0bc0a60SKOSAKI Motohiro } 3328f0bc0a60SKOSAKI Motohiro finish_wait(&pgdat->kswapd_wait, &wait); 3329f0bc0a60SKOSAKI Motohiro } 3330f0bc0a60SKOSAKI Motohiro 33311da177e4SLinus Torvalds /* 33321da177e4SLinus Torvalds * The background pageout daemon, started as a kernel thread 33331da177e4SLinus Torvalds * from the init process. 33341da177e4SLinus Torvalds * 33351da177e4SLinus Torvalds * This basically trickles out pages so that we have _some_ 33361da177e4SLinus Torvalds * free memory available even if there is no other activity 33371da177e4SLinus Torvalds * that frees anything up. This is needed for things like routing 33381da177e4SLinus Torvalds * etc, where we otherwise might have all activity going on in 33391da177e4SLinus Torvalds * asynchronous contexts that cannot page things out. 33401da177e4SLinus Torvalds * 33411da177e4SLinus Torvalds * If there are applications that are active memory-allocators 33421da177e4SLinus Torvalds * (most normal use), this basically shouldn't matter. 33431da177e4SLinus Torvalds */ 33441da177e4SLinus Torvalds static int kswapd(void *p) 33451da177e4SLinus Torvalds { 334638087d9bSMel Gorman unsigned int alloc_order, reclaim_order, classzone_idx; 33471da177e4SLinus Torvalds pg_data_t *pgdat = (pg_data_t*)p; 33481da177e4SLinus Torvalds struct task_struct *tsk = current; 3349f0bc0a60SKOSAKI Motohiro 33501da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 33511da177e4SLinus Torvalds .reclaimed_slab = 0, 33521da177e4SLinus Torvalds }; 3353a70f7302SRusty Russell const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 33541da177e4SLinus Torvalds 3355cf40bd16SNick Piggin lockdep_set_current_reclaim_state(GFP_KERNEL); 3356cf40bd16SNick Piggin 3357174596a0SRusty Russell if (!cpumask_empty(cpumask)) 3358c5f59f08SMike Travis set_cpus_allowed_ptr(tsk, cpumask); 33591da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 33601da177e4SLinus Torvalds 33611da177e4SLinus Torvalds /* 33621da177e4SLinus Torvalds * Tell the memory management that we're a "memory allocator", 33631da177e4SLinus Torvalds * and that if we need more memory we should get access to it 33641da177e4SLinus Torvalds * regardless (see "__alloc_pages()"). "kswapd" should 33651da177e4SLinus Torvalds * never get caught in the normal page freeing logic. 33661da177e4SLinus Torvalds * 33671da177e4SLinus Torvalds * (Kswapd normally doesn't need memory anyway, but sometimes 33681da177e4SLinus Torvalds * you need a small amount of memory in order to be able to 33691da177e4SLinus Torvalds * page out something else, and this flag essentially protects 33701da177e4SLinus Torvalds * us from recursively trying to free more memory as we're 33711da177e4SLinus Torvalds * trying to free the first piece of memory in the first place). 33721da177e4SLinus Torvalds */ 3373930d9152SChristoph Lameter tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 337483144186SRafael J. Wysocki set_freezable(); 33751da177e4SLinus Torvalds 337638087d9bSMel Gorman pgdat->kswapd_order = alloc_order = reclaim_order = 0; 337738087d9bSMel Gorman pgdat->kswapd_classzone_idx = classzone_idx = 0; 33781da177e4SLinus Torvalds for ( ; ; ) { 33796f6313d4SJeff Liu bool ret; 33803e1d1d28SChristoph Lameter 338138087d9bSMel Gorman kswapd_try_sleep: 338238087d9bSMel Gorman kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 338338087d9bSMel Gorman classzone_idx); 3384215ddd66SMel Gorman 338538087d9bSMel Gorman /* Read the new order and classzone_idx */ 338638087d9bSMel Gorman alloc_order = reclaim_order = pgdat->kswapd_order; 338738087d9bSMel Gorman classzone_idx = pgdat->kswapd_classzone_idx; 338838087d9bSMel Gorman pgdat->kswapd_order = 0; 338938087d9bSMel Gorman pgdat->kswapd_classzone_idx = 0; 33901da177e4SLinus Torvalds 33918fe23e05SDavid Rientjes ret = try_to_freeze(); 33928fe23e05SDavid Rientjes if (kthread_should_stop()) 33938fe23e05SDavid Rientjes break; 33948fe23e05SDavid Rientjes 33958fe23e05SDavid Rientjes /* 33968fe23e05SDavid Rientjes * We can speed up thawing tasks if we don't call balance_pgdat 33978fe23e05SDavid Rientjes * after returning from the refrigerator 3398b1296cc4SRafael J. Wysocki */ 339938087d9bSMel Gorman if (ret) 340038087d9bSMel Gorman continue; 34011d82de61SMel Gorman 340238087d9bSMel Gorman /* 340338087d9bSMel Gorman * Reclaim begins at the requested order but if a high-order 340438087d9bSMel Gorman * reclaim fails then kswapd falls back to reclaiming for 340538087d9bSMel Gorman * order-0. If that happens, kswapd will consider sleeping 340638087d9bSMel Gorman * for the order it finished reclaiming at (reclaim_order) 340738087d9bSMel Gorman * but kcompactd is woken to compact for the original 340838087d9bSMel Gorman * request (alloc_order). 340938087d9bSMel Gorman */ 3410e5146b12SMel Gorman trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx, 3411e5146b12SMel Gorman alloc_order); 341238087d9bSMel Gorman reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); 341338087d9bSMel Gorman if (reclaim_order < alloc_order) 341438087d9bSMel Gorman goto kswapd_try_sleep; 341538087d9bSMel Gorman 341638087d9bSMel Gorman alloc_order = reclaim_order = pgdat->kswapd_order; 341738087d9bSMel Gorman classzone_idx = pgdat->kswapd_classzone_idx; 341833906bc5SMel Gorman } 3419b0a8cc58STakamori Yamaguchi 342071abdc15SJohannes Weiner tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); 3421b0a8cc58STakamori Yamaguchi current->reclaim_state = NULL; 342271abdc15SJohannes Weiner lockdep_clear_current_reclaim_state(); 342371abdc15SJohannes Weiner 34241da177e4SLinus Torvalds return 0; 34251da177e4SLinus Torvalds } 34261da177e4SLinus Torvalds 34271da177e4SLinus Torvalds /* 34281da177e4SLinus Torvalds * A zone is low on free memory, so wake its kswapd task to service it. 34291da177e4SLinus Torvalds */ 343099504748SMel Gorman void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 34311da177e4SLinus Torvalds { 34321da177e4SLinus Torvalds pg_data_t *pgdat; 3433e1a55637SMel Gorman int z; 34341da177e4SLinus Torvalds 3435f3fe6512SCon Kolivas if (!populated_zone(zone)) 34361da177e4SLinus Torvalds return; 34371da177e4SLinus Torvalds 3438344736f2SVladimir Davydov if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) 34391da177e4SLinus Torvalds return; 344088f5acf8SMel Gorman pgdat = zone->zone_pgdat; 344138087d9bSMel Gorman pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx); 344238087d9bSMel Gorman pgdat->kswapd_order = max(pgdat->kswapd_order, order); 34438d0986e2SCon Kolivas if (!waitqueue_active(&pgdat->kswapd_wait)) 34441da177e4SLinus Torvalds return; 3445e1a55637SMel Gorman 3446e1a55637SMel Gorman /* Only wake kswapd if all zones are unbalanced */ 3447e1a55637SMel Gorman for (z = 0; z <= classzone_idx; z++) { 3448e1a55637SMel Gorman zone = pgdat->node_zones + z; 3449e1a55637SMel Gorman if (!populated_zone(zone)) 3450e1a55637SMel Gorman continue; 3451e1a55637SMel Gorman 3452e1a55637SMel Gorman if (zone_balanced(zone, order, classzone_idx)) 345388f5acf8SMel Gorman return; 3454e1a55637SMel Gorman } 345588f5acf8SMel Gorman 345688f5acf8SMel Gorman trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 34578d0986e2SCon Kolivas wake_up_interruptible(&pgdat->kswapd_wait); 34581da177e4SLinus Torvalds } 34591da177e4SLinus Torvalds 3460c6f37f12SRafael J. Wysocki #ifdef CONFIG_HIBERNATION 34611da177e4SLinus Torvalds /* 34627b51755cSKOSAKI Motohiro * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3463d6277db4SRafael J. Wysocki * freed pages. 3464d6277db4SRafael J. Wysocki * 3465d6277db4SRafael J. Wysocki * Rather than trying to age LRUs the aim is to preserve the overall 3466d6277db4SRafael J. Wysocki * LRU order by reclaiming preferentially 3467d6277db4SRafael J. Wysocki * inactive > active > active referenced > active mapped 34681da177e4SLinus Torvalds */ 34697b51755cSKOSAKI Motohiro unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 34701da177e4SLinus Torvalds { 3471d6277db4SRafael J. Wysocki struct reclaim_state reclaim_state; 3472d6277db4SRafael J. Wysocki struct scan_control sc = { 34737b51755cSKOSAKI Motohiro .nr_to_reclaim = nr_to_reclaim, 3474ee814fe2SJohannes Weiner .gfp_mask = GFP_HIGHUSER_MOVABLE, 3475b2e18757SMel Gorman .reclaim_idx = MAX_NR_ZONES - 1, 34769e3b2f8cSKonstantin Khlebnikov .priority = DEF_PRIORITY, 3477ee814fe2SJohannes Weiner .may_writepage = 1, 3478ee814fe2SJohannes Weiner .may_unmap = 1, 3479ee814fe2SJohannes Weiner .may_swap = 1, 3480ee814fe2SJohannes Weiner .hibernation_mode = 1, 34811da177e4SLinus Torvalds }; 34827b51755cSKOSAKI Motohiro struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 34837b51755cSKOSAKI Motohiro struct task_struct *p = current; 34847b51755cSKOSAKI Motohiro unsigned long nr_reclaimed; 34851da177e4SLinus Torvalds 34867b51755cSKOSAKI Motohiro p->flags |= PF_MEMALLOC; 34877b51755cSKOSAKI Motohiro lockdep_set_current_reclaim_state(sc.gfp_mask); 3488d6277db4SRafael J. Wysocki reclaim_state.reclaimed_slab = 0; 34897b51755cSKOSAKI Motohiro p->reclaim_state = &reclaim_state; 3490d6277db4SRafael J. Wysocki 34913115cd91SVladimir Davydov nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3492d6277db4SRafael J. Wysocki 34937b51755cSKOSAKI Motohiro p->reclaim_state = NULL; 34947b51755cSKOSAKI Motohiro lockdep_clear_current_reclaim_state(); 34957b51755cSKOSAKI Motohiro p->flags &= ~PF_MEMALLOC; 3496d6277db4SRafael J. Wysocki 34977b51755cSKOSAKI Motohiro return nr_reclaimed; 34981da177e4SLinus Torvalds } 3499c6f37f12SRafael J. Wysocki #endif /* CONFIG_HIBERNATION */ 35001da177e4SLinus Torvalds 35011da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but 35021da177e4SLinus Torvalds not required for correctness. So if the last cpu in a node goes 35031da177e4SLinus Torvalds away, we get changed to run anywhere: as the first one comes back, 35041da177e4SLinus Torvalds restore their cpu bindings. */ 3505fcb35a9bSGreg Kroah-Hartman static int cpu_callback(struct notifier_block *nfb, unsigned long action, 3506fcb35a9bSGreg Kroah-Hartman void *hcpu) 35071da177e4SLinus Torvalds { 350858c0a4a7SYasunori Goto int nid; 35091da177e4SLinus Torvalds 35108bb78442SRafael J. Wysocki if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 351148fb2e24SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 3512c5f59f08SMike Travis pg_data_t *pgdat = NODE_DATA(nid); 3513a70f7302SRusty Russell const struct cpumask *mask; 3514a70f7302SRusty Russell 3515a70f7302SRusty Russell mask = cpumask_of_node(pgdat->node_id); 3516c5f59f08SMike Travis 35173e597945SRusty Russell if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 35181da177e4SLinus Torvalds /* One of our CPUs online: restore mask */ 3519c5f59f08SMike Travis set_cpus_allowed_ptr(pgdat->kswapd, mask); 35201da177e4SLinus Torvalds } 35211da177e4SLinus Torvalds } 35221da177e4SLinus Torvalds return NOTIFY_OK; 35231da177e4SLinus Torvalds } 35241da177e4SLinus Torvalds 35253218ae14SYasunori Goto /* 35263218ae14SYasunori Goto * This kswapd start function will be called by init and node-hot-add. 35273218ae14SYasunori Goto * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 35283218ae14SYasunori Goto */ 35293218ae14SYasunori Goto int kswapd_run(int nid) 35303218ae14SYasunori Goto { 35313218ae14SYasunori Goto pg_data_t *pgdat = NODE_DATA(nid); 35323218ae14SYasunori Goto int ret = 0; 35333218ae14SYasunori Goto 35343218ae14SYasunori Goto if (pgdat->kswapd) 35353218ae14SYasunori Goto return 0; 35363218ae14SYasunori Goto 35373218ae14SYasunori Goto pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 35383218ae14SYasunori Goto if (IS_ERR(pgdat->kswapd)) { 35393218ae14SYasunori Goto /* failure at boot is fatal */ 35403218ae14SYasunori Goto BUG_ON(system_state == SYSTEM_BOOTING); 3541d5dc0ad9SGavin Shan pr_err("Failed to start kswapd on node %d\n", nid); 3542d5dc0ad9SGavin Shan ret = PTR_ERR(pgdat->kswapd); 3543d72515b8SXishi Qiu pgdat->kswapd = NULL; 35443218ae14SYasunori Goto } 35453218ae14SYasunori Goto return ret; 35463218ae14SYasunori Goto } 35473218ae14SYasunori Goto 35488fe23e05SDavid Rientjes /* 3549d8adde17SJiang Liu * Called by memory hotplug when all memory in a node is offlined. Caller must 3550bfc8c901SVladimir Davydov * hold mem_hotplug_begin/end(). 35518fe23e05SDavid Rientjes */ 35528fe23e05SDavid Rientjes void kswapd_stop(int nid) 35538fe23e05SDavid Rientjes { 35548fe23e05SDavid Rientjes struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 35558fe23e05SDavid Rientjes 3556d8adde17SJiang Liu if (kswapd) { 35578fe23e05SDavid Rientjes kthread_stop(kswapd); 3558d8adde17SJiang Liu NODE_DATA(nid)->kswapd = NULL; 3559d8adde17SJiang Liu } 35608fe23e05SDavid Rientjes } 35618fe23e05SDavid Rientjes 35621da177e4SLinus Torvalds static int __init kswapd_init(void) 35631da177e4SLinus Torvalds { 35643218ae14SYasunori Goto int nid; 356569e05944SAndrew Morton 35661da177e4SLinus Torvalds swap_setup(); 356748fb2e24SLai Jiangshan for_each_node_state(nid, N_MEMORY) 35683218ae14SYasunori Goto kswapd_run(nid); 35691da177e4SLinus Torvalds hotcpu_notifier(cpu_callback, 0); 35701da177e4SLinus Torvalds return 0; 35711da177e4SLinus Torvalds } 35721da177e4SLinus Torvalds 35731da177e4SLinus Torvalds module_init(kswapd_init) 35749eeff239SChristoph Lameter 35759eeff239SChristoph Lameter #ifdef CONFIG_NUMA 35769eeff239SChristoph Lameter /* 3577a5f5f91dSMel Gorman * Node reclaim mode 35789eeff239SChristoph Lameter * 3579a5f5f91dSMel Gorman * If non-zero call node_reclaim when the number of free pages falls below 35809eeff239SChristoph Lameter * the watermarks. 35819eeff239SChristoph Lameter */ 3582a5f5f91dSMel Gorman int node_reclaim_mode __read_mostly; 35839eeff239SChristoph Lameter 35841b2ffb78SChristoph Lameter #define RECLAIM_OFF 0 35857d03431cSFernando Luis Vazquez Cao #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 35861b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 358795bbc0c7SZhihui Zhang #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ 35881b2ffb78SChristoph Lameter 35899eeff239SChristoph Lameter /* 3590a5f5f91dSMel Gorman * Priority for NODE_RECLAIM. This determines the fraction of pages 3591a92f7126SChristoph Lameter * of a node considered for each zone_reclaim. 4 scans 1/16th of 3592a92f7126SChristoph Lameter * a zone. 3593a92f7126SChristoph Lameter */ 3594a5f5f91dSMel Gorman #define NODE_RECLAIM_PRIORITY 4 3595a92f7126SChristoph Lameter 35969eeff239SChristoph Lameter /* 3597a5f5f91dSMel Gorman * Percentage of pages in a zone that must be unmapped for node_reclaim to 35989614634fSChristoph Lameter * occur. 35999614634fSChristoph Lameter */ 36009614634fSChristoph Lameter int sysctl_min_unmapped_ratio = 1; 36019614634fSChristoph Lameter 36029614634fSChristoph Lameter /* 36030ff38490SChristoph Lameter * If the number of slab pages in a zone grows beyond this percentage then 36040ff38490SChristoph Lameter * slab reclaim needs to occur. 36050ff38490SChristoph Lameter */ 36060ff38490SChristoph Lameter int sysctl_min_slab_ratio = 5; 36070ff38490SChristoph Lameter 360811fb9989SMel Gorman static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 360990afa5deSMel Gorman { 361011fb9989SMel Gorman unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 361111fb9989SMel Gorman unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 361211fb9989SMel Gorman node_page_state(pgdat, NR_ACTIVE_FILE); 361390afa5deSMel Gorman 361490afa5deSMel Gorman /* 361590afa5deSMel Gorman * It's possible for there to be more file mapped pages than 361690afa5deSMel Gorman * accounted for by the pages on the file LRU lists because 361790afa5deSMel Gorman * tmpfs pages accounted for as ANON can also be FILE_MAPPED 361890afa5deSMel Gorman */ 361990afa5deSMel Gorman return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 362090afa5deSMel Gorman } 362190afa5deSMel Gorman 362290afa5deSMel Gorman /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 3623a5f5f91dSMel Gorman static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 362490afa5deSMel Gorman { 3625d031a157SAlexandru Moise unsigned long nr_pagecache_reclaimable; 3626d031a157SAlexandru Moise unsigned long delta = 0; 362790afa5deSMel Gorman 362890afa5deSMel Gorman /* 362995bbc0c7SZhihui Zhang * If RECLAIM_UNMAP is set, then all file pages are considered 363090afa5deSMel Gorman * potentially reclaimable. Otherwise, we have to worry about 363111fb9989SMel Gorman * pages like swapcache and node_unmapped_file_pages() provides 363290afa5deSMel Gorman * a better estimate 363390afa5deSMel Gorman */ 3634a5f5f91dSMel Gorman if (node_reclaim_mode & RECLAIM_UNMAP) 3635a5f5f91dSMel Gorman nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 363690afa5deSMel Gorman else 3637a5f5f91dSMel Gorman nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 363890afa5deSMel Gorman 363990afa5deSMel Gorman /* If we can't clean pages, remove dirty pages from consideration */ 3640a5f5f91dSMel Gorman if (!(node_reclaim_mode & RECLAIM_WRITE)) 3641a5f5f91dSMel Gorman delta += node_page_state(pgdat, NR_FILE_DIRTY); 364290afa5deSMel Gorman 364390afa5deSMel Gorman /* Watch for any possible underflows due to delta */ 364490afa5deSMel Gorman if (unlikely(delta > nr_pagecache_reclaimable)) 364590afa5deSMel Gorman delta = nr_pagecache_reclaimable; 364690afa5deSMel Gorman 364790afa5deSMel Gorman return nr_pagecache_reclaimable - delta; 364890afa5deSMel Gorman } 364990afa5deSMel Gorman 36500ff38490SChristoph Lameter /* 3651a5f5f91dSMel Gorman * Try to free up some pages from this node through reclaim. 36529eeff239SChristoph Lameter */ 3653a5f5f91dSMel Gorman static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 36549eeff239SChristoph Lameter { 36557fb2d46dSChristoph Lameter /* Minimum pages needed in order to stay on node */ 365669e05944SAndrew Morton const unsigned long nr_pages = 1 << order; 36579eeff239SChristoph Lameter struct task_struct *p = current; 36589eeff239SChristoph Lameter struct reclaim_state reclaim_state; 3659a5f5f91dSMel Gorman int classzone_idx = gfp_zone(gfp_mask); 3660179e9639SAndrew Morton struct scan_control sc = { 366162b726c1SAndrew Morton .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 366221caf2fcSMing Lei .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), 3663bd2f6199SJohannes Weiner .order = order, 3664a5f5f91dSMel Gorman .priority = NODE_RECLAIM_PRIORITY, 3665a5f5f91dSMel Gorman .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 3666a5f5f91dSMel Gorman .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 3667ee814fe2SJohannes Weiner .may_swap = 1, 3668a5f5f91dSMel Gorman .reclaim_idx = classzone_idx, 3669179e9639SAndrew Morton }; 36709eeff239SChristoph Lameter 36719eeff239SChristoph Lameter cond_resched(); 3672d4f7796eSChristoph Lameter /* 367395bbc0c7SZhihui Zhang * We need to be able to allocate from the reserves for RECLAIM_UNMAP 3674d4f7796eSChristoph Lameter * and we also need to be able to write out pages for RECLAIM_WRITE 367595bbc0c7SZhihui Zhang * and RECLAIM_UNMAP. 3676d4f7796eSChristoph Lameter */ 3677d4f7796eSChristoph Lameter p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 367876ca542dSKOSAKI Motohiro lockdep_set_current_reclaim_state(gfp_mask); 36799eeff239SChristoph Lameter reclaim_state.reclaimed_slab = 0; 36809eeff239SChristoph Lameter p->reclaim_state = &reclaim_state; 3681c84db23cSChristoph Lameter 3682a5f5f91dSMel Gorman if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { 3683a92f7126SChristoph Lameter /* 36840ff38490SChristoph Lameter * Free memory by calling shrink zone with increasing 36850ff38490SChristoph Lameter * priorities until we have enough memory freed. 3686a92f7126SChristoph Lameter */ 3687a92f7126SChristoph Lameter do { 3688970a39a3SMel Gorman shrink_node(pgdat, &sc); 36899e3b2f8cSKonstantin Khlebnikov } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 36900ff38490SChristoph Lameter } 3691a92f7126SChristoph Lameter 36929eeff239SChristoph Lameter p->reclaim_state = NULL; 3693d4f7796eSChristoph Lameter current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 369476ca542dSKOSAKI Motohiro lockdep_clear_current_reclaim_state(); 3695a79311c1SRik van Riel return sc.nr_reclaimed >= nr_pages; 36969eeff239SChristoph Lameter } 3697179e9639SAndrew Morton 3698a5f5f91dSMel Gorman int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 3699179e9639SAndrew Morton { 3700d773ed6bSDavid Rientjes int ret; 3701179e9639SAndrew Morton 3702179e9639SAndrew Morton /* 3703a5f5f91dSMel Gorman * Node reclaim reclaims unmapped file backed pages and 37040ff38490SChristoph Lameter * slab pages if we are over the defined limits. 370534aa1330SChristoph Lameter * 37069614634fSChristoph Lameter * A small portion of unmapped file backed pages is needed for 37079614634fSChristoph Lameter * file I/O otherwise pages read by file I/O will be immediately 3708a5f5f91dSMel Gorman * thrown out if the node is overallocated. So we do not reclaim 3709a5f5f91dSMel Gorman * if less than a specified percentage of the node is used by 37109614634fSChristoph Lameter * unmapped file backed pages. 3711179e9639SAndrew Morton */ 3712a5f5f91dSMel Gorman if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 3713a5f5f91dSMel Gorman sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) 3714a5f5f91dSMel Gorman return NODE_RECLAIM_FULL; 3715179e9639SAndrew Morton 3716a5f5f91dSMel Gorman if (!pgdat_reclaimable(pgdat)) 3717a5f5f91dSMel Gorman return NODE_RECLAIM_FULL; 3718d773ed6bSDavid Rientjes 3719179e9639SAndrew Morton /* 3720d773ed6bSDavid Rientjes * Do not scan if the allocation should not be delayed. 3721179e9639SAndrew Morton */ 3722d0164adcSMel Gorman if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 3723a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 3724179e9639SAndrew Morton 3725179e9639SAndrew Morton /* 3726a5f5f91dSMel Gorman * Only run node reclaim on the local node or on nodes that do not 3727179e9639SAndrew Morton * have associated processors. This will favor the local processor 3728179e9639SAndrew Morton * over remote processors and spread off node memory allocations 3729179e9639SAndrew Morton * as wide as possible. 3730179e9639SAndrew Morton */ 3731a5f5f91dSMel Gorman if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 3732a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 3733d773ed6bSDavid Rientjes 3734a5f5f91dSMel Gorman if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 3735a5f5f91dSMel Gorman return NODE_RECLAIM_NOSCAN; 3736fa5e084eSMel Gorman 3737a5f5f91dSMel Gorman ret = __node_reclaim(pgdat, gfp_mask, order); 3738a5f5f91dSMel Gorman clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 3739d773ed6bSDavid Rientjes 374024cf7251SMel Gorman if (!ret) 374124cf7251SMel Gorman count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 374224cf7251SMel Gorman 3743d773ed6bSDavid Rientjes return ret; 3744179e9639SAndrew Morton } 37459eeff239SChristoph Lameter #endif 3746894bc310SLee Schermerhorn 3747894bc310SLee Schermerhorn /* 3748894bc310SLee Schermerhorn * page_evictable - test whether a page is evictable 3749894bc310SLee Schermerhorn * @page: the page to test 3750894bc310SLee Schermerhorn * 3751894bc310SLee Schermerhorn * Test whether page is evictable--i.e., should be placed on active/inactive 375239b5f29aSHugh Dickins * lists vs unevictable list. 3753894bc310SLee Schermerhorn * 3754894bc310SLee Schermerhorn * Reasons page might not be evictable: 3755ba9ddf49SLee Schermerhorn * (1) page's mapping marked unevictable 3756b291f000SNick Piggin * (2) page is part of an mlocked VMA 3757ba9ddf49SLee Schermerhorn * 3758894bc310SLee Schermerhorn */ 375939b5f29aSHugh Dickins int page_evictable(struct page *page) 3760894bc310SLee Schermerhorn { 376139b5f29aSHugh Dickins return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); 3762894bc310SLee Schermerhorn } 376389e004eaSLee Schermerhorn 376485046579SHugh Dickins #ifdef CONFIG_SHMEM 376589e004eaSLee Schermerhorn /** 376624513264SHugh Dickins * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list 376724513264SHugh Dickins * @pages: array of pages to check 376824513264SHugh Dickins * @nr_pages: number of pages to check 376989e004eaSLee Schermerhorn * 377024513264SHugh Dickins * Checks pages for evictability and moves them to the appropriate lru list. 377185046579SHugh Dickins * 377285046579SHugh Dickins * This function is only used for SysV IPC SHM_UNLOCK. 377389e004eaSLee Schermerhorn */ 377424513264SHugh Dickins void check_move_unevictable_pages(struct page **pages, int nr_pages) 377589e004eaSLee Schermerhorn { 3776925b7673SJohannes Weiner struct lruvec *lruvec; 3777*785b99feSMel Gorman struct pglist_data *pgdat = NULL; 377824513264SHugh Dickins int pgscanned = 0; 377924513264SHugh Dickins int pgrescued = 0; 378089e004eaSLee Schermerhorn int i; 378189e004eaSLee Schermerhorn 378224513264SHugh Dickins for (i = 0; i < nr_pages; i++) { 378324513264SHugh Dickins struct page *page = pages[i]; 3784*785b99feSMel Gorman struct pglist_data *pagepgdat = page_pgdat(page); 378589e004eaSLee Schermerhorn 378624513264SHugh Dickins pgscanned++; 3787*785b99feSMel Gorman if (pagepgdat != pgdat) { 3788*785b99feSMel Gorman if (pgdat) 3789*785b99feSMel Gorman spin_unlock_irq(&pgdat->lru_lock); 3790*785b99feSMel Gorman pgdat = pagepgdat; 3791*785b99feSMel Gorman spin_lock_irq(&pgdat->lru_lock); 379289e004eaSLee Schermerhorn } 3793*785b99feSMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 379489e004eaSLee Schermerhorn 379524513264SHugh Dickins if (!PageLRU(page) || !PageUnevictable(page)) 379624513264SHugh Dickins continue; 379789e004eaSLee Schermerhorn 379839b5f29aSHugh Dickins if (page_evictable(page)) { 379924513264SHugh Dickins enum lru_list lru = page_lru_base_type(page); 380024513264SHugh Dickins 3801309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page), page); 380224513264SHugh Dickins ClearPageUnevictable(page); 3803fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3804fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 380524513264SHugh Dickins pgrescued++; 380689e004eaSLee Schermerhorn } 380789e004eaSLee Schermerhorn } 380824513264SHugh Dickins 3809*785b99feSMel Gorman if (pgdat) { 381024513264SHugh Dickins __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 381124513264SHugh Dickins __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 3812*785b99feSMel Gorman spin_unlock_irq(&pgdat->lru_lock); 381324513264SHugh Dickins } 381485046579SHugh Dickins } 381585046579SHugh Dickins #endif /* CONFIG_SHMEM */ 3816