xref: /openbmc/linux/mm/vmscan.c (revision a52633d8e9c35832f1409dc5fa166019048a3f1f)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/vmscan.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  Swap reorganised 29.12.95, Stephen Tweedie.
71da177e4SLinus Torvalds  *  kswapd added: 7.1.96  sct
81da177e4SLinus Torvalds  *  Removed kswapd_ctl limits, and swap out as many pages as needed
91da177e4SLinus Torvalds  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
101da177e4SLinus Torvalds  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
111da177e4SLinus Torvalds  *  Multiqueue VM started 5.8.00, Rik van Riel.
121da177e4SLinus Torvalds  */
131da177e4SLinus Torvalds 
14b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15b1de0d13SMitchel Humpherys 
161da177e4SLinus Torvalds #include <linux/mm.h>
171da177e4SLinus Torvalds #include <linux/module.h>
185a0e3ad6STejun Heo #include <linux/gfp.h>
191da177e4SLinus Torvalds #include <linux/kernel_stat.h>
201da177e4SLinus Torvalds #include <linux/swap.h>
211da177e4SLinus Torvalds #include <linux/pagemap.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/highmem.h>
2470ddf637SAnton Vorontsov #include <linux/vmpressure.h>
25e129b5c2SAndrew Morton #include <linux/vmstat.h>
261da177e4SLinus Torvalds #include <linux/file.h>
271da177e4SLinus Torvalds #include <linux/writeback.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/buffer_head.h>	/* for try_to_release_page(),
301da177e4SLinus Torvalds 					buffer_heads_over_limit */
311da177e4SLinus Torvalds #include <linux/mm_inline.h>
321da177e4SLinus Torvalds #include <linux/backing-dev.h>
331da177e4SLinus Torvalds #include <linux/rmap.h>
341da177e4SLinus Torvalds #include <linux/topology.h>
351da177e4SLinus Torvalds #include <linux/cpu.h>
361da177e4SLinus Torvalds #include <linux/cpuset.h>
373e7d3449SMel Gorman #include <linux/compaction.h>
381da177e4SLinus Torvalds #include <linux/notifier.h>
391da177e4SLinus Torvalds #include <linux/rwsem.h>
40248a0301SRafael J. Wysocki #include <linux/delay.h>
413218ae14SYasunori Goto #include <linux/kthread.h>
427dfb7103SNigel Cunningham #include <linux/freezer.h>
4366e1707bSBalbir Singh #include <linux/memcontrol.h>
44873b4771SKeika Kobayashi #include <linux/delayacct.h>
45af936a16SLee Schermerhorn #include <linux/sysctl.h>
46929bea7cSKOSAKI Motohiro #include <linux/oom.h>
47268bb0ceSLinus Torvalds #include <linux/prefetch.h>
48b1de0d13SMitchel Humpherys #include <linux/printk.h>
49f9fe48beSRoss Zwisler #include <linux/dax.h>
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds #include <asm/tlbflush.h>
521da177e4SLinus Torvalds #include <asm/div64.h>
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #include <linux/swapops.h>
55117aad1eSRafael Aquini #include <linux/balloon_compaction.h>
561da177e4SLinus Torvalds 
570f8053a5SNick Piggin #include "internal.h"
580f8053a5SNick Piggin 
5933906bc5SMel Gorman #define CREATE_TRACE_POINTS
6033906bc5SMel Gorman #include <trace/events/vmscan.h>
6133906bc5SMel Gorman 
621da177e4SLinus Torvalds struct scan_control {
6322fba335SKOSAKI Motohiro 	/* How many pages shrink_list() should reclaim */
6422fba335SKOSAKI Motohiro 	unsigned long nr_to_reclaim;
6522fba335SKOSAKI Motohiro 
661da177e4SLinus Torvalds 	/* This context's GFP mask */
676daa0e28SAl Viro 	gfp_t gfp_mask;
681da177e4SLinus Torvalds 
69ee814fe2SJohannes Weiner 	/* Allocation order */
705ad333ebSAndy Whitcroft 	int order;
7166e1707bSBalbir Singh 
72ee814fe2SJohannes Weiner 	/*
73ee814fe2SJohannes Weiner 	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
74ee814fe2SJohannes Weiner 	 * are scanned.
75ee814fe2SJohannes Weiner 	 */
76ee814fe2SJohannes Weiner 	nodemask_t	*nodemask;
779e3b2f8cSKonstantin Khlebnikov 
785f53e762SKOSAKI Motohiro 	/*
79f16015fbSJohannes Weiner 	 * The memory cgroup that hit its limit and as a result is the
80f16015fbSJohannes Weiner 	 * primary target of this reclaim invocation.
81f16015fbSJohannes Weiner 	 */
82f16015fbSJohannes Weiner 	struct mem_cgroup *target_mem_cgroup;
8366e1707bSBalbir Singh 
84ee814fe2SJohannes Weiner 	/* Scan (total_size >> priority) pages at once */
85ee814fe2SJohannes Weiner 	int priority;
86ee814fe2SJohannes Weiner 
87ee814fe2SJohannes Weiner 	unsigned int may_writepage:1;
88ee814fe2SJohannes Weiner 
89ee814fe2SJohannes Weiner 	/* Can mapped pages be reclaimed? */
90ee814fe2SJohannes Weiner 	unsigned int may_unmap:1;
91ee814fe2SJohannes Weiner 
92ee814fe2SJohannes Weiner 	/* Can pages be swapped as part of reclaim? */
93ee814fe2SJohannes Weiner 	unsigned int may_swap:1;
94ee814fe2SJohannes Weiner 
95241994edSJohannes Weiner 	/* Can cgroups be reclaimed below their normal consumption range? */
96241994edSJohannes Weiner 	unsigned int may_thrash:1;
97241994edSJohannes Weiner 
98ee814fe2SJohannes Weiner 	unsigned int hibernation_mode:1;
99ee814fe2SJohannes Weiner 
100ee814fe2SJohannes Weiner 	/* One of the zones is ready for compaction */
101ee814fe2SJohannes Weiner 	unsigned int compaction_ready:1;
102ee814fe2SJohannes Weiner 
103ee814fe2SJohannes Weiner 	/* Incremented by the number of inactive pages that were scanned */
104ee814fe2SJohannes Weiner 	unsigned long nr_scanned;
105ee814fe2SJohannes Weiner 
106ee814fe2SJohannes Weiner 	/* Number of pages freed so far during a call to shrink_zones() */
107ee814fe2SJohannes Weiner 	unsigned long nr_reclaimed;
1081da177e4SLinus Torvalds };
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH
1111da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field)			\
1121da177e4SLinus Torvalds 	do {								\
1131da177e4SLinus Torvalds 		if ((_page)->lru.prev != _base) {			\
1141da177e4SLinus Torvalds 			struct page *prev;				\
1151da177e4SLinus Torvalds 									\
1161da177e4SLinus Torvalds 			prev = lru_to_page(&(_page->lru));		\
1171da177e4SLinus Torvalds 			prefetch(&prev->_field);			\
1181da177e4SLinus Torvalds 		}							\
1191da177e4SLinus Torvalds 	} while (0)
1201da177e4SLinus Torvalds #else
1211da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
1221da177e4SLinus Torvalds #endif
1231da177e4SLinus Torvalds 
1241da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW
1251da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field)			\
1261da177e4SLinus Torvalds 	do {								\
1271da177e4SLinus Torvalds 		if ((_page)->lru.prev != _base) {			\
1281da177e4SLinus Torvalds 			struct page *prev;				\
1291da177e4SLinus Torvalds 									\
1301da177e4SLinus Torvalds 			prev = lru_to_page(&(_page->lru));		\
1311da177e4SLinus Torvalds 			prefetchw(&prev->_field);			\
1321da177e4SLinus Torvalds 		}							\
1331da177e4SLinus Torvalds 	} while (0)
1341da177e4SLinus Torvalds #else
1351da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
1361da177e4SLinus Torvalds #endif
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds /*
1391da177e4SLinus Torvalds  * From 0 .. 100.  Higher means more swappy.
1401da177e4SLinus Torvalds  */
1411da177e4SLinus Torvalds int vm_swappiness = 60;
142d0480be4SWang Sheng-Hui /*
143d0480be4SWang Sheng-Hui  * The total number of pages which are beyond the high watermark within all
144d0480be4SWang Sheng-Hui  * zones.
145d0480be4SWang Sheng-Hui  */
146d0480be4SWang Sheng-Hui unsigned long vm_total_pages;
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds static LIST_HEAD(shrinker_list);
1491da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem);
1501da177e4SLinus Torvalds 
151c255a458SAndrew Morton #ifdef CONFIG_MEMCG
15289b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc)
15389b5fae5SJohannes Weiner {
154f16015fbSJohannes Weiner 	return !sc->target_mem_cgroup;
15589b5fae5SJohannes Weiner }
15697c9341fSTejun Heo 
15797c9341fSTejun Heo /**
15897c9341fSTejun Heo  * sane_reclaim - is the usual dirty throttling mechanism operational?
15997c9341fSTejun Heo  * @sc: scan_control in question
16097c9341fSTejun Heo  *
16197c9341fSTejun Heo  * The normal page dirty throttling mechanism in balance_dirty_pages() is
16297c9341fSTejun Heo  * completely broken with the legacy memcg and direct stalling in
16397c9341fSTejun Heo  * shrink_page_list() is used for throttling instead, which lacks all the
16497c9341fSTejun Heo  * niceties such as fairness, adaptive pausing, bandwidth proportional
16597c9341fSTejun Heo  * allocation and configurability.
16697c9341fSTejun Heo  *
16797c9341fSTejun Heo  * This function tests whether the vmscan currently in progress can assume
16897c9341fSTejun Heo  * that the normal dirty throttling mechanism is operational.
16997c9341fSTejun Heo  */
17097c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc)
17197c9341fSTejun Heo {
17297c9341fSTejun Heo 	struct mem_cgroup *memcg = sc->target_mem_cgroup;
17397c9341fSTejun Heo 
17497c9341fSTejun Heo 	if (!memcg)
17597c9341fSTejun Heo 		return true;
17697c9341fSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
17769234aceSLinus Torvalds 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
17897c9341fSTejun Heo 		return true;
17997c9341fSTejun Heo #endif
18097c9341fSTejun Heo 	return false;
18197c9341fSTejun Heo }
18291a45470SKAMEZAWA Hiroyuki #else
18389b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc)
18489b5fae5SJohannes Weiner {
18589b5fae5SJohannes Weiner 	return true;
18689b5fae5SJohannes Weiner }
18797c9341fSTejun Heo 
18897c9341fSTejun Heo static bool sane_reclaim(struct scan_control *sc)
18997c9341fSTejun Heo {
19097c9341fSTejun Heo 	return true;
19197c9341fSTejun Heo }
19291a45470SKAMEZAWA Hiroyuki #endif
19391a45470SKAMEZAWA Hiroyuki 
1940a0337e0SMichal Hocko unsigned long zone_reclaimable_pages(struct zone *zone)
1956e543d57SLisa Du {
196d031a157SAlexandru Moise 	unsigned long nr;
1976e543d57SLisa Du 
1980db2cb8dSMichal Hocko 	nr = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
1990db2cb8dSMichal Hocko 	     zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
2000db2cb8dSMichal Hocko 	     zone_page_state_snapshot(zone, NR_ISOLATED_FILE);
2016e543d57SLisa Du 
2026e543d57SLisa Du 	if (get_nr_swap_pages() > 0)
2030db2cb8dSMichal Hocko 		nr += zone_page_state_snapshot(zone, NR_ACTIVE_ANON) +
2040db2cb8dSMichal Hocko 		      zone_page_state_snapshot(zone, NR_INACTIVE_ANON) +
2050db2cb8dSMichal Hocko 		      zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
2066e543d57SLisa Du 
2076e543d57SLisa Du 	return nr;
2086e543d57SLisa Du }
2096e543d57SLisa Du 
2106e543d57SLisa Du bool zone_reclaimable(struct zone *zone)
2116e543d57SLisa Du {
2120db2cb8dSMichal Hocko 	return zone_page_state_snapshot(zone, NR_PAGES_SCANNED) <
2130d5d823aSMel Gorman 		zone_reclaimable_pages(zone) * 6;
2146e543d57SLisa Du }
2156e543d57SLisa Du 
21623047a96SJohannes Weiner unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
217c9f299d9SKOSAKI Motohiro {
218c3c787e8SHugh Dickins 	if (!mem_cgroup_disabled())
2194d7dcca2SHugh Dickins 		return mem_cgroup_get_lru_size(lruvec, lru);
220a3d8e054SKOSAKI Motohiro 
221074291feSKonstantin Khlebnikov 	return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
222c9f299d9SKOSAKI Motohiro }
223c9f299d9SKOSAKI Motohiro 
2241da177e4SLinus Torvalds /*
2251d3d4437SGlauber Costa  * Add a shrinker callback to be called from the vm.
2261da177e4SLinus Torvalds  */
2271d3d4437SGlauber Costa int register_shrinker(struct shrinker *shrinker)
2281da177e4SLinus Torvalds {
2291d3d4437SGlauber Costa 	size_t size = sizeof(*shrinker->nr_deferred);
2301d3d4437SGlauber Costa 
2311d3d4437SGlauber Costa 	if (shrinker->flags & SHRINKER_NUMA_AWARE)
2321d3d4437SGlauber Costa 		size *= nr_node_ids;
2331d3d4437SGlauber Costa 
2341d3d4437SGlauber Costa 	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
2351d3d4437SGlauber Costa 	if (!shrinker->nr_deferred)
2361d3d4437SGlauber Costa 		return -ENOMEM;
2371d3d4437SGlauber Costa 
2381da177e4SLinus Torvalds 	down_write(&shrinker_rwsem);
2391da177e4SLinus Torvalds 	list_add_tail(&shrinker->list, &shrinker_list);
2401da177e4SLinus Torvalds 	up_write(&shrinker_rwsem);
2411d3d4437SGlauber Costa 	return 0;
2421da177e4SLinus Torvalds }
2438e1f936bSRusty Russell EXPORT_SYMBOL(register_shrinker);
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds /*
2461da177e4SLinus Torvalds  * Remove one
2471da177e4SLinus Torvalds  */
2488e1f936bSRusty Russell void unregister_shrinker(struct shrinker *shrinker)
2491da177e4SLinus Torvalds {
2501da177e4SLinus Torvalds 	down_write(&shrinker_rwsem);
2511da177e4SLinus Torvalds 	list_del(&shrinker->list);
2521da177e4SLinus Torvalds 	up_write(&shrinker_rwsem);
253ae393321SAndrew Vagin 	kfree(shrinker->nr_deferred);
2541da177e4SLinus Torvalds }
2558e1f936bSRusty Russell EXPORT_SYMBOL(unregister_shrinker);
2561da177e4SLinus Torvalds 
2571da177e4SLinus Torvalds #define SHRINK_BATCH 128
2581d3d4437SGlauber Costa 
259cb731d6cSVladimir Davydov static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2606b4f7799SJohannes Weiner 				    struct shrinker *shrinker,
2616b4f7799SJohannes Weiner 				    unsigned long nr_scanned,
2626b4f7799SJohannes Weiner 				    unsigned long nr_eligible)
2631da177e4SLinus Torvalds {
26424f7c6b9SDave Chinner 	unsigned long freed = 0;
2651da177e4SLinus Torvalds 	unsigned long long delta;
266635697c6SKonstantin Khlebnikov 	long total_scan;
267d5bc5fd3SVladimir Davydov 	long freeable;
268acf92b48SDave Chinner 	long nr;
269acf92b48SDave Chinner 	long new_nr;
2701d3d4437SGlauber Costa 	int nid = shrinkctl->nid;
271e9299f50SDave Chinner 	long batch_size = shrinker->batch ? shrinker->batch
272e9299f50SDave Chinner 					  : SHRINK_BATCH;
2731da177e4SLinus Torvalds 
274d5bc5fd3SVladimir Davydov 	freeable = shrinker->count_objects(shrinker, shrinkctl);
275d5bc5fd3SVladimir Davydov 	if (freeable == 0)
2761d3d4437SGlauber Costa 		return 0;
277635697c6SKonstantin Khlebnikov 
278acf92b48SDave Chinner 	/*
279acf92b48SDave Chinner 	 * copy the current shrinker scan count into a local variable
280acf92b48SDave Chinner 	 * and zero it so that other concurrent shrinker invocations
281acf92b48SDave Chinner 	 * don't also do this scanning work.
282acf92b48SDave Chinner 	 */
2831d3d4437SGlauber Costa 	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
284acf92b48SDave Chinner 
285acf92b48SDave Chinner 	total_scan = nr;
2866b4f7799SJohannes Weiner 	delta = (4 * nr_scanned) / shrinker->seeks;
287d5bc5fd3SVladimir Davydov 	delta *= freeable;
2886b4f7799SJohannes Weiner 	do_div(delta, nr_eligible + 1);
289acf92b48SDave Chinner 	total_scan += delta;
290acf92b48SDave Chinner 	if (total_scan < 0) {
2918612c663SPintu Kumar 		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
292a0b02131SDave Chinner 		       shrinker->scan_objects, total_scan);
293d5bc5fd3SVladimir Davydov 		total_scan = freeable;
294ea164d73SAndrea Arcangeli 	}
295ea164d73SAndrea Arcangeli 
296ea164d73SAndrea Arcangeli 	/*
2973567b59aSDave Chinner 	 * We need to avoid excessive windup on filesystem shrinkers
2983567b59aSDave Chinner 	 * due to large numbers of GFP_NOFS allocations causing the
2993567b59aSDave Chinner 	 * shrinkers to return -1 all the time. This results in a large
3003567b59aSDave Chinner 	 * nr being built up so when a shrink that can do some work
3013567b59aSDave Chinner 	 * comes along it empties the entire cache due to nr >>>
302d5bc5fd3SVladimir Davydov 	 * freeable. This is bad for sustaining a working set in
3033567b59aSDave Chinner 	 * memory.
3043567b59aSDave Chinner 	 *
3053567b59aSDave Chinner 	 * Hence only allow the shrinker to scan the entire cache when
3063567b59aSDave Chinner 	 * a large delta change is calculated directly.
3073567b59aSDave Chinner 	 */
308d5bc5fd3SVladimir Davydov 	if (delta < freeable / 4)
309d5bc5fd3SVladimir Davydov 		total_scan = min(total_scan, freeable / 2);
3103567b59aSDave Chinner 
3113567b59aSDave Chinner 	/*
312ea164d73SAndrea Arcangeli 	 * Avoid risking looping forever due to too large nr value:
313ea164d73SAndrea Arcangeli 	 * never try to free more than twice the estimate number of
314ea164d73SAndrea Arcangeli 	 * freeable entries.
315ea164d73SAndrea Arcangeli 	 */
316d5bc5fd3SVladimir Davydov 	if (total_scan > freeable * 2)
317d5bc5fd3SVladimir Davydov 		total_scan = freeable * 2;
3181da177e4SLinus Torvalds 
31924f7c6b9SDave Chinner 	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
3206b4f7799SJohannes Weiner 				   nr_scanned, nr_eligible,
321d5bc5fd3SVladimir Davydov 				   freeable, delta, total_scan);
32209576073SDave Chinner 
3230b1fb40aSVladimir Davydov 	/*
3240b1fb40aSVladimir Davydov 	 * Normally, we should not scan less than batch_size objects in one
3250b1fb40aSVladimir Davydov 	 * pass to avoid too frequent shrinker calls, but if the slab has less
3260b1fb40aSVladimir Davydov 	 * than batch_size objects in total and we are really tight on memory,
3270b1fb40aSVladimir Davydov 	 * we will try to reclaim all available objects, otherwise we can end
3280b1fb40aSVladimir Davydov 	 * up failing allocations although there are plenty of reclaimable
3290b1fb40aSVladimir Davydov 	 * objects spread over several slabs with usage less than the
3300b1fb40aSVladimir Davydov 	 * batch_size.
3310b1fb40aSVladimir Davydov 	 *
3320b1fb40aSVladimir Davydov 	 * We detect the "tight on memory" situations by looking at the total
3330b1fb40aSVladimir Davydov 	 * number of objects we want to scan (total_scan). If it is greater
334d5bc5fd3SVladimir Davydov 	 * than the total number of objects on slab (freeable), we must be
3350b1fb40aSVladimir Davydov 	 * scanning at high prio and therefore should try to reclaim as much as
3360b1fb40aSVladimir Davydov 	 * possible.
3370b1fb40aSVladimir Davydov 	 */
3380b1fb40aSVladimir Davydov 	while (total_scan >= batch_size ||
339d5bc5fd3SVladimir Davydov 	       total_scan >= freeable) {
34024f7c6b9SDave Chinner 		unsigned long ret;
3410b1fb40aSVladimir Davydov 		unsigned long nr_to_scan = min(batch_size, total_scan);
3421da177e4SLinus Torvalds 
3430b1fb40aSVladimir Davydov 		shrinkctl->nr_to_scan = nr_to_scan;
34424f7c6b9SDave Chinner 		ret = shrinker->scan_objects(shrinker, shrinkctl);
34524f7c6b9SDave Chinner 		if (ret == SHRINK_STOP)
3461da177e4SLinus Torvalds 			break;
34724f7c6b9SDave Chinner 		freed += ret;
34824f7c6b9SDave Chinner 
3490b1fb40aSVladimir Davydov 		count_vm_events(SLABS_SCANNED, nr_to_scan);
3500b1fb40aSVladimir Davydov 		total_scan -= nr_to_scan;
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds 		cond_resched();
3531da177e4SLinus Torvalds 	}
3541da177e4SLinus Torvalds 
355acf92b48SDave Chinner 	/*
356acf92b48SDave Chinner 	 * move the unused scan count back into the shrinker in a
357acf92b48SDave Chinner 	 * manner that handles concurrent updates. If we exhausted the
358acf92b48SDave Chinner 	 * scan, there is no need to do an update.
359acf92b48SDave Chinner 	 */
36083aeeadaSKonstantin Khlebnikov 	if (total_scan > 0)
36183aeeadaSKonstantin Khlebnikov 		new_nr = atomic_long_add_return(total_scan,
3621d3d4437SGlauber Costa 						&shrinker->nr_deferred[nid]);
36383aeeadaSKonstantin Khlebnikov 	else
3641d3d4437SGlauber Costa 		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
365acf92b48SDave Chinner 
366df9024a8SDave Hansen 	trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
3671d3d4437SGlauber Costa 	return freed;
3681d3d4437SGlauber Costa }
3691d3d4437SGlauber Costa 
3706b4f7799SJohannes Weiner /**
371cb731d6cSVladimir Davydov  * shrink_slab - shrink slab caches
3726b4f7799SJohannes Weiner  * @gfp_mask: allocation context
3736b4f7799SJohannes Weiner  * @nid: node whose slab caches to target
374cb731d6cSVladimir Davydov  * @memcg: memory cgroup whose slab caches to target
3756b4f7799SJohannes Weiner  * @nr_scanned: pressure numerator
3766b4f7799SJohannes Weiner  * @nr_eligible: pressure denominator
3771d3d4437SGlauber Costa  *
3786b4f7799SJohannes Weiner  * Call the shrink functions to age shrinkable caches.
3791d3d4437SGlauber Costa  *
3806b4f7799SJohannes Weiner  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
3816b4f7799SJohannes Weiner  * unaware shrinkers will receive a node id of 0 instead.
3821d3d4437SGlauber Costa  *
383cb731d6cSVladimir Davydov  * @memcg specifies the memory cgroup to target. If it is not NULL,
384cb731d6cSVladimir Davydov  * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
3850fc9f58aSVladimir Davydov  * objects from the memory cgroup specified. Otherwise, only unaware
3860fc9f58aSVladimir Davydov  * shrinkers are called.
387cb731d6cSVladimir Davydov  *
3886b4f7799SJohannes Weiner  * @nr_scanned and @nr_eligible form a ratio that indicate how much of
3896b4f7799SJohannes Weiner  * the available objects should be scanned.  Page reclaim for example
3906b4f7799SJohannes Weiner  * passes the number of pages scanned and the number of pages on the
3916b4f7799SJohannes Weiner  * LRU lists that it considered on @nid, plus a bias in @nr_scanned
3926b4f7799SJohannes Weiner  * when it encountered mapped pages.  The ratio is further biased by
3936b4f7799SJohannes Weiner  * the ->seeks setting of the shrink function, which indicates the
3946b4f7799SJohannes Weiner  * cost to recreate an object relative to that of an LRU page.
3951d3d4437SGlauber Costa  *
3966b4f7799SJohannes Weiner  * Returns the number of reclaimed slab objects.
3971d3d4437SGlauber Costa  */
398cb731d6cSVladimir Davydov static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
399cb731d6cSVladimir Davydov 				 struct mem_cgroup *memcg,
4006b4f7799SJohannes Weiner 				 unsigned long nr_scanned,
4016b4f7799SJohannes Weiner 				 unsigned long nr_eligible)
4021d3d4437SGlauber Costa {
4031d3d4437SGlauber Costa 	struct shrinker *shrinker;
4041d3d4437SGlauber Costa 	unsigned long freed = 0;
4051d3d4437SGlauber Costa 
4060fc9f58aSVladimir Davydov 	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
407cb731d6cSVladimir Davydov 		return 0;
408cb731d6cSVladimir Davydov 
4096b4f7799SJohannes Weiner 	if (nr_scanned == 0)
4106b4f7799SJohannes Weiner 		nr_scanned = SWAP_CLUSTER_MAX;
4111d3d4437SGlauber Costa 
4121d3d4437SGlauber Costa 	if (!down_read_trylock(&shrinker_rwsem)) {
4131d3d4437SGlauber Costa 		/*
4141d3d4437SGlauber Costa 		 * If we would return 0, our callers would understand that we
4151d3d4437SGlauber Costa 		 * have nothing else to shrink and give up trying. By returning
4161d3d4437SGlauber Costa 		 * 1 we keep it going and assume we'll be able to shrink next
4171d3d4437SGlauber Costa 		 * time.
4181d3d4437SGlauber Costa 		 */
4191d3d4437SGlauber Costa 		freed = 1;
4201d3d4437SGlauber Costa 		goto out;
4211d3d4437SGlauber Costa 	}
4221d3d4437SGlauber Costa 
4231d3d4437SGlauber Costa 	list_for_each_entry(shrinker, &shrinker_list, list) {
4246b4f7799SJohannes Weiner 		struct shrink_control sc = {
4256b4f7799SJohannes Weiner 			.gfp_mask = gfp_mask,
4266b4f7799SJohannes Weiner 			.nid = nid,
427cb731d6cSVladimir Davydov 			.memcg = memcg,
4286b4f7799SJohannes Weiner 		};
4296b4f7799SJohannes Weiner 
4300fc9f58aSVladimir Davydov 		/*
4310fc9f58aSVladimir Davydov 		 * If kernel memory accounting is disabled, we ignore
4320fc9f58aSVladimir Davydov 		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
4330fc9f58aSVladimir Davydov 		 * passing NULL for memcg.
4340fc9f58aSVladimir Davydov 		 */
4350fc9f58aSVladimir Davydov 		if (memcg_kmem_enabled() &&
4360fc9f58aSVladimir Davydov 		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
437cb731d6cSVladimir Davydov 			continue;
438cb731d6cSVladimir Davydov 
4396b4f7799SJohannes Weiner 		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
4406b4f7799SJohannes Weiner 			sc.nid = 0;
4416b4f7799SJohannes Weiner 
442cb731d6cSVladimir Davydov 		freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
443ec97097bSVladimir Davydov 	}
4441d3d4437SGlauber Costa 
4451da177e4SLinus Torvalds 	up_read(&shrinker_rwsem);
446f06590bdSMinchan Kim out:
447f06590bdSMinchan Kim 	cond_resched();
44824f7c6b9SDave Chinner 	return freed;
4491da177e4SLinus Torvalds }
4501da177e4SLinus Torvalds 
451cb731d6cSVladimir Davydov void drop_slab_node(int nid)
452cb731d6cSVladimir Davydov {
453cb731d6cSVladimir Davydov 	unsigned long freed;
454cb731d6cSVladimir Davydov 
455cb731d6cSVladimir Davydov 	do {
456cb731d6cSVladimir Davydov 		struct mem_cgroup *memcg = NULL;
457cb731d6cSVladimir Davydov 
458cb731d6cSVladimir Davydov 		freed = 0;
459cb731d6cSVladimir Davydov 		do {
460cb731d6cSVladimir Davydov 			freed += shrink_slab(GFP_KERNEL, nid, memcg,
461cb731d6cSVladimir Davydov 					     1000, 1000);
462cb731d6cSVladimir Davydov 		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
463cb731d6cSVladimir Davydov 	} while (freed > 10);
464cb731d6cSVladimir Davydov }
465cb731d6cSVladimir Davydov 
466cb731d6cSVladimir Davydov void drop_slab(void)
467cb731d6cSVladimir Davydov {
468cb731d6cSVladimir Davydov 	int nid;
469cb731d6cSVladimir Davydov 
470cb731d6cSVladimir Davydov 	for_each_online_node(nid)
471cb731d6cSVladimir Davydov 		drop_slab_node(nid);
472cb731d6cSVladimir Davydov }
473cb731d6cSVladimir Davydov 
4741da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page)
4751da177e4SLinus Torvalds {
476ceddc3a5SJohannes Weiner 	/*
477ceddc3a5SJohannes Weiner 	 * A freeable page cache page is referenced only by the caller
478ceddc3a5SJohannes Weiner 	 * that isolated the page, the page cache radix tree and
479ceddc3a5SJohannes Weiner 	 * optional buffer heads at page->private.
480ceddc3a5SJohannes Weiner 	 */
481edcf4748SJohannes Weiner 	return page_count(page) - page_has_private(page) == 2;
4821da177e4SLinus Torvalds }
4831da177e4SLinus Torvalds 
484703c2708STejun Heo static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
4851da177e4SLinus Torvalds {
486930d9152SChristoph Lameter 	if (current->flags & PF_SWAPWRITE)
4871da177e4SLinus Torvalds 		return 1;
488703c2708STejun Heo 	if (!inode_write_congested(inode))
4891da177e4SLinus Torvalds 		return 1;
490703c2708STejun Heo 	if (inode_to_bdi(inode) == current->backing_dev_info)
4911da177e4SLinus Torvalds 		return 1;
4921da177e4SLinus Torvalds 	return 0;
4931da177e4SLinus Torvalds }
4941da177e4SLinus Torvalds 
4951da177e4SLinus Torvalds /*
4961da177e4SLinus Torvalds  * We detected a synchronous write error writing a page out.  Probably
4971da177e4SLinus Torvalds  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
4981da177e4SLinus Torvalds  * fsync(), msync() or close().
4991da177e4SLinus Torvalds  *
5001da177e4SLinus Torvalds  * The tricky part is that after writepage we cannot touch the mapping: nothing
5011da177e4SLinus Torvalds  * prevents it from being freed up.  But we have a ref on the page and once
5021da177e4SLinus Torvalds  * that page is locked, the mapping is pinned.
5031da177e4SLinus Torvalds  *
5041da177e4SLinus Torvalds  * We're allowed to run sleeping lock_page() here because we know the caller has
5051da177e4SLinus Torvalds  * __GFP_FS.
5061da177e4SLinus Torvalds  */
5071da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping,
5081da177e4SLinus Torvalds 				struct page *page, int error)
5091da177e4SLinus Torvalds {
5107eaceaccSJens Axboe 	lock_page(page);
5113e9f45bdSGuillaume Chazarain 	if (page_mapping(page) == mapping)
5123e9f45bdSGuillaume Chazarain 		mapping_set_error(mapping, error);
5131da177e4SLinus Torvalds 	unlock_page(page);
5141da177e4SLinus Torvalds }
5151da177e4SLinus Torvalds 
51604e62a29SChristoph Lameter /* possible outcome of pageout() */
51704e62a29SChristoph Lameter typedef enum {
51804e62a29SChristoph Lameter 	/* failed to write page out, page is locked */
51904e62a29SChristoph Lameter 	PAGE_KEEP,
52004e62a29SChristoph Lameter 	/* move page to the active list, page is locked */
52104e62a29SChristoph Lameter 	PAGE_ACTIVATE,
52204e62a29SChristoph Lameter 	/* page has been sent to the disk successfully, page is unlocked */
52304e62a29SChristoph Lameter 	PAGE_SUCCESS,
52404e62a29SChristoph Lameter 	/* page is clean and locked */
52504e62a29SChristoph Lameter 	PAGE_CLEAN,
52604e62a29SChristoph Lameter } pageout_t;
52704e62a29SChristoph Lameter 
5281da177e4SLinus Torvalds /*
5291742f19fSAndrew Morton  * pageout is called by shrink_page_list() for each dirty page.
5301742f19fSAndrew Morton  * Calls ->writepage().
5311da177e4SLinus Torvalds  */
532c661b078SAndy Whitcroft static pageout_t pageout(struct page *page, struct address_space *mapping,
5337d3579e8SKOSAKI Motohiro 			 struct scan_control *sc)
5341da177e4SLinus Torvalds {
5351da177e4SLinus Torvalds 	/*
5361da177e4SLinus Torvalds 	 * If the page is dirty, only perform writeback if that write
5371da177e4SLinus Torvalds 	 * will be non-blocking.  To prevent this allocation from being
5381da177e4SLinus Torvalds 	 * stalled by pagecache activity.  But note that there may be
5391da177e4SLinus Torvalds 	 * stalls if we need to run get_block().  We could test
5401da177e4SLinus Torvalds 	 * PagePrivate for that.
5411da177e4SLinus Torvalds 	 *
5428174202bSAl Viro 	 * If this process is currently in __generic_file_write_iter() against
5431da177e4SLinus Torvalds 	 * this page's queue, we can perform writeback even if that
5441da177e4SLinus Torvalds 	 * will block.
5451da177e4SLinus Torvalds 	 *
5461da177e4SLinus Torvalds 	 * If the page is swapcache, write it back even if that would
5471da177e4SLinus Torvalds 	 * block, for some throttling. This happens by accident, because
5481da177e4SLinus Torvalds 	 * swap_backing_dev_info is bust: it doesn't reflect the
5491da177e4SLinus Torvalds 	 * congestion state of the swapdevs.  Easy to fix, if needed.
5501da177e4SLinus Torvalds 	 */
5511da177e4SLinus Torvalds 	if (!is_page_cache_freeable(page))
5521da177e4SLinus Torvalds 		return PAGE_KEEP;
5531da177e4SLinus Torvalds 	if (!mapping) {
5541da177e4SLinus Torvalds 		/*
5551da177e4SLinus Torvalds 		 * Some data journaling orphaned pages can have
5561da177e4SLinus Torvalds 		 * page->mapping == NULL while being dirty with clean buffers.
5571da177e4SLinus Torvalds 		 */
558266cf658SDavid Howells 		if (page_has_private(page)) {
5591da177e4SLinus Torvalds 			if (try_to_free_buffers(page)) {
5601da177e4SLinus Torvalds 				ClearPageDirty(page);
561b1de0d13SMitchel Humpherys 				pr_info("%s: orphaned page\n", __func__);
5621da177e4SLinus Torvalds 				return PAGE_CLEAN;
5631da177e4SLinus Torvalds 			}
5641da177e4SLinus Torvalds 		}
5651da177e4SLinus Torvalds 		return PAGE_KEEP;
5661da177e4SLinus Torvalds 	}
5671da177e4SLinus Torvalds 	if (mapping->a_ops->writepage == NULL)
5681da177e4SLinus Torvalds 		return PAGE_ACTIVATE;
569703c2708STejun Heo 	if (!may_write_to_inode(mapping->host, sc))
5701da177e4SLinus Torvalds 		return PAGE_KEEP;
5711da177e4SLinus Torvalds 
5721da177e4SLinus Torvalds 	if (clear_page_dirty_for_io(page)) {
5731da177e4SLinus Torvalds 		int res;
5741da177e4SLinus Torvalds 		struct writeback_control wbc = {
5751da177e4SLinus Torvalds 			.sync_mode = WB_SYNC_NONE,
5761da177e4SLinus Torvalds 			.nr_to_write = SWAP_CLUSTER_MAX,
577111ebb6eSOGAWA Hirofumi 			.range_start = 0,
578111ebb6eSOGAWA Hirofumi 			.range_end = LLONG_MAX,
5791da177e4SLinus Torvalds 			.for_reclaim = 1,
5801da177e4SLinus Torvalds 		};
5811da177e4SLinus Torvalds 
5821da177e4SLinus Torvalds 		SetPageReclaim(page);
5831da177e4SLinus Torvalds 		res = mapping->a_ops->writepage(page, &wbc);
5841da177e4SLinus Torvalds 		if (res < 0)
5851da177e4SLinus Torvalds 			handle_write_error(mapping, page, res);
586994fc28cSZach Brown 		if (res == AOP_WRITEPAGE_ACTIVATE) {
5871da177e4SLinus Torvalds 			ClearPageReclaim(page);
5881da177e4SLinus Torvalds 			return PAGE_ACTIVATE;
5891da177e4SLinus Torvalds 		}
590c661b078SAndy Whitcroft 
5911da177e4SLinus Torvalds 		if (!PageWriteback(page)) {
5921da177e4SLinus Torvalds 			/* synchronous write or broken a_ops? */
5931da177e4SLinus Torvalds 			ClearPageReclaim(page);
5941da177e4SLinus Torvalds 		}
5953aa23851Syalin wang 		trace_mm_vmscan_writepage(page);
596e129b5c2SAndrew Morton 		inc_zone_page_state(page, NR_VMSCAN_WRITE);
5971da177e4SLinus Torvalds 		return PAGE_SUCCESS;
5981da177e4SLinus Torvalds 	}
5991da177e4SLinus Torvalds 
6001da177e4SLinus Torvalds 	return PAGE_CLEAN;
6011da177e4SLinus Torvalds }
6021da177e4SLinus Torvalds 
603a649fd92SAndrew Morton /*
604e286781dSNick Piggin  * Same as remove_mapping, but if the page is removed from the mapping, it
605e286781dSNick Piggin  * gets returned with a refcount of 0.
606a649fd92SAndrew Morton  */
607a528910eSJohannes Weiner static int __remove_mapping(struct address_space *mapping, struct page *page,
608a528910eSJohannes Weiner 			    bool reclaimed)
60949d2e9ccSChristoph Lameter {
610c4843a75SGreg Thelen 	unsigned long flags;
611c4843a75SGreg Thelen 
61228e4d965SNick Piggin 	BUG_ON(!PageLocked(page));
61328e4d965SNick Piggin 	BUG_ON(mapping != page_mapping(page));
61449d2e9ccSChristoph Lameter 
615c4843a75SGreg Thelen 	spin_lock_irqsave(&mapping->tree_lock, flags);
61649d2e9ccSChristoph Lameter 	/*
6170fd0e6b0SNick Piggin 	 * The non racy check for a busy page.
6180fd0e6b0SNick Piggin 	 *
6190fd0e6b0SNick Piggin 	 * Must be careful with the order of the tests. When someone has
6200fd0e6b0SNick Piggin 	 * a ref to the page, it may be possible that they dirty it then
6210fd0e6b0SNick Piggin 	 * drop the reference. So if PageDirty is tested before page_count
6220fd0e6b0SNick Piggin 	 * here, then the following race may occur:
6230fd0e6b0SNick Piggin 	 *
6240fd0e6b0SNick Piggin 	 * get_user_pages(&page);
6250fd0e6b0SNick Piggin 	 * [user mapping goes away]
6260fd0e6b0SNick Piggin 	 * write_to(page);
6270fd0e6b0SNick Piggin 	 *				!PageDirty(page)    [good]
6280fd0e6b0SNick Piggin 	 * SetPageDirty(page);
6290fd0e6b0SNick Piggin 	 * put_page(page);
6300fd0e6b0SNick Piggin 	 *				!page_count(page)   [good, discard it]
6310fd0e6b0SNick Piggin 	 *
6320fd0e6b0SNick Piggin 	 * [oops, our write_to data is lost]
6330fd0e6b0SNick Piggin 	 *
6340fd0e6b0SNick Piggin 	 * Reversing the order of the tests ensures such a situation cannot
6350fd0e6b0SNick Piggin 	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
6360139aa7bSJoonsoo Kim 	 * load is not satisfied before that of page->_refcount.
6370fd0e6b0SNick Piggin 	 *
6380fd0e6b0SNick Piggin 	 * Note that if SetPageDirty is always performed via set_page_dirty,
6390fd0e6b0SNick Piggin 	 * and thus under tree_lock, then this ordering is not required.
64049d2e9ccSChristoph Lameter 	 */
641fe896d18SJoonsoo Kim 	if (!page_ref_freeze(page, 2))
64249d2e9ccSChristoph Lameter 		goto cannot_free;
643e286781dSNick Piggin 	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
644e286781dSNick Piggin 	if (unlikely(PageDirty(page))) {
645fe896d18SJoonsoo Kim 		page_ref_unfreeze(page, 2);
64649d2e9ccSChristoph Lameter 		goto cannot_free;
647e286781dSNick Piggin 	}
64849d2e9ccSChristoph Lameter 
64949d2e9ccSChristoph Lameter 	if (PageSwapCache(page)) {
65049d2e9ccSChristoph Lameter 		swp_entry_t swap = { .val = page_private(page) };
6510a31bc97SJohannes Weiner 		mem_cgroup_swapout(page, swap);
65249d2e9ccSChristoph Lameter 		__delete_from_swap_cache(page);
653c4843a75SGreg Thelen 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
6540a31bc97SJohannes Weiner 		swapcache_free(swap);
655e286781dSNick Piggin 	} else {
6566072d13cSLinus Torvalds 		void (*freepage)(struct page *);
657a528910eSJohannes Weiner 		void *shadow = NULL;
6586072d13cSLinus Torvalds 
6596072d13cSLinus Torvalds 		freepage = mapping->a_ops->freepage;
660a528910eSJohannes Weiner 		/*
661a528910eSJohannes Weiner 		 * Remember a shadow entry for reclaimed file cache in
662a528910eSJohannes Weiner 		 * order to detect refaults, thus thrashing, later on.
663a528910eSJohannes Weiner 		 *
664a528910eSJohannes Weiner 		 * But don't store shadows in an address space that is
665a528910eSJohannes Weiner 		 * already exiting.  This is not just an optizimation,
666a528910eSJohannes Weiner 		 * inode reclaim needs to empty out the radix tree or
667a528910eSJohannes Weiner 		 * the nodes are lost.  Don't plant shadows behind its
668a528910eSJohannes Weiner 		 * back.
669f9fe48beSRoss Zwisler 		 *
670f9fe48beSRoss Zwisler 		 * We also don't store shadows for DAX mappings because the
671f9fe48beSRoss Zwisler 		 * only page cache pages found in these are zero pages
672f9fe48beSRoss Zwisler 		 * covering holes, and because we don't want to mix DAX
673f9fe48beSRoss Zwisler 		 * exceptional entries and shadow exceptional entries in the
674f9fe48beSRoss Zwisler 		 * same page_tree.
675a528910eSJohannes Weiner 		 */
676a528910eSJohannes Weiner 		if (reclaimed && page_is_file_cache(page) &&
677f9fe48beSRoss Zwisler 		    !mapping_exiting(mapping) && !dax_mapping(mapping))
678a528910eSJohannes Weiner 			shadow = workingset_eviction(mapping, page);
67962cccb8cSJohannes Weiner 		__delete_from_page_cache(page, shadow);
680c4843a75SGreg Thelen 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
6816072d13cSLinus Torvalds 
6826072d13cSLinus Torvalds 		if (freepage != NULL)
6836072d13cSLinus Torvalds 			freepage(page);
684e286781dSNick Piggin 	}
685e286781dSNick Piggin 
68649d2e9ccSChristoph Lameter 	return 1;
68749d2e9ccSChristoph Lameter 
68849d2e9ccSChristoph Lameter cannot_free:
689c4843a75SGreg Thelen 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
69049d2e9ccSChristoph Lameter 	return 0;
69149d2e9ccSChristoph Lameter }
69249d2e9ccSChristoph Lameter 
6931da177e4SLinus Torvalds /*
694e286781dSNick Piggin  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
695e286781dSNick Piggin  * someone else has a ref on the page, abort and return 0.  If it was
696e286781dSNick Piggin  * successfully detached, return 1.  Assumes the caller has a single ref on
697e286781dSNick Piggin  * this page.
698e286781dSNick Piggin  */
699e286781dSNick Piggin int remove_mapping(struct address_space *mapping, struct page *page)
700e286781dSNick Piggin {
701a528910eSJohannes Weiner 	if (__remove_mapping(mapping, page, false)) {
702e286781dSNick Piggin 		/*
703e286781dSNick Piggin 		 * Unfreezing the refcount with 1 rather than 2 effectively
704e286781dSNick Piggin 		 * drops the pagecache ref for us without requiring another
705e286781dSNick Piggin 		 * atomic operation.
706e286781dSNick Piggin 		 */
707fe896d18SJoonsoo Kim 		page_ref_unfreeze(page, 1);
708e286781dSNick Piggin 		return 1;
709e286781dSNick Piggin 	}
710e286781dSNick Piggin 	return 0;
711e286781dSNick Piggin }
712e286781dSNick Piggin 
713894bc310SLee Schermerhorn /**
714894bc310SLee Schermerhorn  * putback_lru_page - put previously isolated page onto appropriate LRU list
715894bc310SLee Schermerhorn  * @page: page to be put back to appropriate lru list
716894bc310SLee Schermerhorn  *
717894bc310SLee Schermerhorn  * Add previously isolated @page to appropriate LRU list.
718894bc310SLee Schermerhorn  * Page may still be unevictable for other reasons.
719894bc310SLee Schermerhorn  *
720894bc310SLee Schermerhorn  * lru_lock must not be held, interrupts must be enabled.
721894bc310SLee Schermerhorn  */
722894bc310SLee Schermerhorn void putback_lru_page(struct page *page)
723894bc310SLee Schermerhorn {
7240ec3b74cSVlastimil Babka 	bool is_unevictable;
725bbfd28eeSLee Schermerhorn 	int was_unevictable = PageUnevictable(page);
726894bc310SLee Schermerhorn 
727309381feSSasha Levin 	VM_BUG_ON_PAGE(PageLRU(page), page);
728894bc310SLee Schermerhorn 
729894bc310SLee Schermerhorn redo:
730894bc310SLee Schermerhorn 	ClearPageUnevictable(page);
731894bc310SLee Schermerhorn 
73239b5f29aSHugh Dickins 	if (page_evictable(page)) {
733894bc310SLee Schermerhorn 		/*
734894bc310SLee Schermerhorn 		 * For evictable pages, we can use the cache.
735894bc310SLee Schermerhorn 		 * In event of a race, worst case is we end up with an
736894bc310SLee Schermerhorn 		 * unevictable page on [in]active list.
737894bc310SLee Schermerhorn 		 * We know how to handle that.
738894bc310SLee Schermerhorn 		 */
7390ec3b74cSVlastimil Babka 		is_unevictable = false;
740c53954a0SMel Gorman 		lru_cache_add(page);
741894bc310SLee Schermerhorn 	} else {
742894bc310SLee Schermerhorn 		/*
743894bc310SLee Schermerhorn 		 * Put unevictable pages directly on zone's unevictable
744894bc310SLee Schermerhorn 		 * list.
745894bc310SLee Schermerhorn 		 */
7460ec3b74cSVlastimil Babka 		is_unevictable = true;
747894bc310SLee Schermerhorn 		add_page_to_unevictable_list(page);
7486a7b9548SJohannes Weiner 		/*
74921ee9f39SMinchan Kim 		 * When racing with an mlock or AS_UNEVICTABLE clearing
75021ee9f39SMinchan Kim 		 * (page is unlocked) make sure that if the other thread
75121ee9f39SMinchan Kim 		 * does not observe our setting of PG_lru and fails
75224513264SHugh Dickins 		 * isolation/check_move_unevictable_pages,
75321ee9f39SMinchan Kim 		 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
7546a7b9548SJohannes Weiner 		 * the page back to the evictable list.
7556a7b9548SJohannes Weiner 		 *
75621ee9f39SMinchan Kim 		 * The other side is TestClearPageMlocked() or shmem_lock().
7576a7b9548SJohannes Weiner 		 */
7586a7b9548SJohannes Weiner 		smp_mb();
759894bc310SLee Schermerhorn 	}
760894bc310SLee Schermerhorn 
761894bc310SLee Schermerhorn 	/*
762894bc310SLee Schermerhorn 	 * page's status can change while we move it among lru. If an evictable
763894bc310SLee Schermerhorn 	 * page is on unevictable list, it never be freed. To avoid that,
764894bc310SLee Schermerhorn 	 * check after we added it to the list, again.
765894bc310SLee Schermerhorn 	 */
7660ec3b74cSVlastimil Babka 	if (is_unevictable && page_evictable(page)) {
767894bc310SLee Schermerhorn 		if (!isolate_lru_page(page)) {
768894bc310SLee Schermerhorn 			put_page(page);
769894bc310SLee Schermerhorn 			goto redo;
770894bc310SLee Schermerhorn 		}
771894bc310SLee Schermerhorn 		/* This means someone else dropped this page from LRU
772894bc310SLee Schermerhorn 		 * So, it will be freed or putback to LRU again. There is
773894bc310SLee Schermerhorn 		 * nothing to do here.
774894bc310SLee Schermerhorn 		 */
775894bc310SLee Schermerhorn 	}
776894bc310SLee Schermerhorn 
7770ec3b74cSVlastimil Babka 	if (was_unevictable && !is_unevictable)
778bbfd28eeSLee Schermerhorn 		count_vm_event(UNEVICTABLE_PGRESCUED);
7790ec3b74cSVlastimil Babka 	else if (!was_unevictable && is_unevictable)
780bbfd28eeSLee Schermerhorn 		count_vm_event(UNEVICTABLE_PGCULLED);
781bbfd28eeSLee Schermerhorn 
782894bc310SLee Schermerhorn 	put_page(page);		/* drop ref from isolate */
783894bc310SLee Schermerhorn }
784894bc310SLee Schermerhorn 
785dfc8d636SJohannes Weiner enum page_references {
786dfc8d636SJohannes Weiner 	PAGEREF_RECLAIM,
787dfc8d636SJohannes Weiner 	PAGEREF_RECLAIM_CLEAN,
78864574746SJohannes Weiner 	PAGEREF_KEEP,
789dfc8d636SJohannes Weiner 	PAGEREF_ACTIVATE,
790dfc8d636SJohannes Weiner };
791dfc8d636SJohannes Weiner 
792dfc8d636SJohannes Weiner static enum page_references page_check_references(struct page *page,
793dfc8d636SJohannes Weiner 						  struct scan_control *sc)
794dfc8d636SJohannes Weiner {
79564574746SJohannes Weiner 	int referenced_ptes, referenced_page;
796dfc8d636SJohannes Weiner 	unsigned long vm_flags;
797dfc8d636SJohannes Weiner 
798c3ac9a8aSJohannes Weiner 	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
799c3ac9a8aSJohannes Weiner 					  &vm_flags);
80064574746SJohannes Weiner 	referenced_page = TestClearPageReferenced(page);
801dfc8d636SJohannes Weiner 
802dfc8d636SJohannes Weiner 	/*
803dfc8d636SJohannes Weiner 	 * Mlock lost the isolation race with us.  Let try_to_unmap()
804dfc8d636SJohannes Weiner 	 * move the page to the unevictable list.
805dfc8d636SJohannes Weiner 	 */
806dfc8d636SJohannes Weiner 	if (vm_flags & VM_LOCKED)
807dfc8d636SJohannes Weiner 		return PAGEREF_RECLAIM;
808dfc8d636SJohannes Weiner 
80964574746SJohannes Weiner 	if (referenced_ptes) {
810e4898273SMichal Hocko 		if (PageSwapBacked(page))
81164574746SJohannes Weiner 			return PAGEREF_ACTIVATE;
81264574746SJohannes Weiner 		/*
81364574746SJohannes Weiner 		 * All mapped pages start out with page table
81464574746SJohannes Weiner 		 * references from the instantiating fault, so we need
81564574746SJohannes Weiner 		 * to look twice if a mapped file page is used more
81664574746SJohannes Weiner 		 * than once.
81764574746SJohannes Weiner 		 *
81864574746SJohannes Weiner 		 * Mark it and spare it for another trip around the
81964574746SJohannes Weiner 		 * inactive list.  Another page table reference will
82064574746SJohannes Weiner 		 * lead to its activation.
82164574746SJohannes Weiner 		 *
82264574746SJohannes Weiner 		 * Note: the mark is set for activated pages as well
82364574746SJohannes Weiner 		 * so that recently deactivated but used pages are
82464574746SJohannes Weiner 		 * quickly recovered.
82564574746SJohannes Weiner 		 */
82664574746SJohannes Weiner 		SetPageReferenced(page);
82764574746SJohannes Weiner 
82834dbc67aSKonstantin Khlebnikov 		if (referenced_page || referenced_ptes > 1)
829dfc8d636SJohannes Weiner 			return PAGEREF_ACTIVATE;
830dfc8d636SJohannes Weiner 
831c909e993SKonstantin Khlebnikov 		/*
832c909e993SKonstantin Khlebnikov 		 * Activate file-backed executable pages after first usage.
833c909e993SKonstantin Khlebnikov 		 */
834c909e993SKonstantin Khlebnikov 		if (vm_flags & VM_EXEC)
835c909e993SKonstantin Khlebnikov 			return PAGEREF_ACTIVATE;
836c909e993SKonstantin Khlebnikov 
83764574746SJohannes Weiner 		return PAGEREF_KEEP;
83864574746SJohannes Weiner 	}
83964574746SJohannes Weiner 
840dfc8d636SJohannes Weiner 	/* Reclaim if clean, defer dirty pages to writeback */
8412e30244aSKOSAKI Motohiro 	if (referenced_page && !PageSwapBacked(page))
842dfc8d636SJohannes Weiner 		return PAGEREF_RECLAIM_CLEAN;
84364574746SJohannes Weiner 
84464574746SJohannes Weiner 	return PAGEREF_RECLAIM;
845dfc8d636SJohannes Weiner }
846dfc8d636SJohannes Weiner 
847e2be15f6SMel Gorman /* Check if a page is dirty or under writeback */
848e2be15f6SMel Gorman static void page_check_dirty_writeback(struct page *page,
849e2be15f6SMel Gorman 				       bool *dirty, bool *writeback)
850e2be15f6SMel Gorman {
851b4597226SMel Gorman 	struct address_space *mapping;
852b4597226SMel Gorman 
853e2be15f6SMel Gorman 	/*
854e2be15f6SMel Gorman 	 * Anonymous pages are not handled by flushers and must be written
855e2be15f6SMel Gorman 	 * from reclaim context. Do not stall reclaim based on them
856e2be15f6SMel Gorman 	 */
857e2be15f6SMel Gorman 	if (!page_is_file_cache(page)) {
858e2be15f6SMel Gorman 		*dirty = false;
859e2be15f6SMel Gorman 		*writeback = false;
860e2be15f6SMel Gorman 		return;
861e2be15f6SMel Gorman 	}
862e2be15f6SMel Gorman 
863e2be15f6SMel Gorman 	/* By default assume that the page flags are accurate */
864e2be15f6SMel Gorman 	*dirty = PageDirty(page);
865e2be15f6SMel Gorman 	*writeback = PageWriteback(page);
866b4597226SMel Gorman 
867b4597226SMel Gorman 	/* Verify dirty/writeback state if the filesystem supports it */
868b4597226SMel Gorman 	if (!page_has_private(page))
869b4597226SMel Gorman 		return;
870b4597226SMel Gorman 
871b4597226SMel Gorman 	mapping = page_mapping(page);
872b4597226SMel Gorman 	if (mapping && mapping->a_ops->is_dirty_writeback)
873b4597226SMel Gorman 		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
874e2be15f6SMel Gorman }
875e2be15f6SMel Gorman 
876e286781dSNick Piggin /*
8771742f19fSAndrew Morton  * shrink_page_list() returns the number of reclaimed pages
8781da177e4SLinus Torvalds  */
8791742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list,
8806a18adb3SKonstantin Khlebnikov 				      struct zone *zone,
881f84f6e2bSMel Gorman 				      struct scan_control *sc,
88202c6de8dSMinchan Kim 				      enum ttu_flags ttu_flags,
8838e950282SMel Gorman 				      unsigned long *ret_nr_dirty,
884d43006d5SMel Gorman 				      unsigned long *ret_nr_unqueued_dirty,
8858e950282SMel Gorman 				      unsigned long *ret_nr_congested,
88602c6de8dSMinchan Kim 				      unsigned long *ret_nr_writeback,
887b1a6f21eSMel Gorman 				      unsigned long *ret_nr_immediate,
88802c6de8dSMinchan Kim 				      bool force_reclaim)
8891da177e4SLinus Torvalds {
8901da177e4SLinus Torvalds 	LIST_HEAD(ret_pages);
891abe4c3b5SMel Gorman 	LIST_HEAD(free_pages);
8921da177e4SLinus Torvalds 	int pgactivate = 0;
893d43006d5SMel Gorman 	unsigned long nr_unqueued_dirty = 0;
8940e093d99SMel Gorman 	unsigned long nr_dirty = 0;
8950e093d99SMel Gorman 	unsigned long nr_congested = 0;
89605ff5137SAndrew Morton 	unsigned long nr_reclaimed = 0;
89792df3a72SMel Gorman 	unsigned long nr_writeback = 0;
898b1a6f21eSMel Gorman 	unsigned long nr_immediate = 0;
8991da177e4SLinus Torvalds 
9001da177e4SLinus Torvalds 	cond_resched();
9011da177e4SLinus Torvalds 
9021da177e4SLinus Torvalds 	while (!list_empty(page_list)) {
9031da177e4SLinus Torvalds 		struct address_space *mapping;
9041da177e4SLinus Torvalds 		struct page *page;
9051da177e4SLinus Torvalds 		int may_enter_fs;
90602c6de8dSMinchan Kim 		enum page_references references = PAGEREF_RECLAIM_CLEAN;
907e2be15f6SMel Gorman 		bool dirty, writeback;
908854e9ed0SMinchan Kim 		bool lazyfree = false;
909854e9ed0SMinchan Kim 		int ret = SWAP_SUCCESS;
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 		cond_resched();
9121da177e4SLinus Torvalds 
9131da177e4SLinus Torvalds 		page = lru_to_page(page_list);
9141da177e4SLinus Torvalds 		list_del(&page->lru);
9151da177e4SLinus Torvalds 
916529ae9aaSNick Piggin 		if (!trylock_page(page))
9171da177e4SLinus Torvalds 			goto keep;
9181da177e4SLinus Torvalds 
919309381feSSasha Levin 		VM_BUG_ON_PAGE(PageActive(page), page);
920309381feSSasha Levin 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
9211da177e4SLinus Torvalds 
9221da177e4SLinus Torvalds 		sc->nr_scanned++;
92380e43426SChristoph Lameter 
92439b5f29aSHugh Dickins 		if (unlikely(!page_evictable(page)))
925b291f000SNick Piggin 			goto cull_mlocked;
926894bc310SLee Schermerhorn 
927a6dc60f8SJohannes Weiner 		if (!sc->may_unmap && page_mapped(page))
92880e43426SChristoph Lameter 			goto keep_locked;
92980e43426SChristoph Lameter 
9301da177e4SLinus Torvalds 		/* Double the slab pressure for mapped and swapcache pages */
9311da177e4SLinus Torvalds 		if (page_mapped(page) || PageSwapCache(page))
9321da177e4SLinus Torvalds 			sc->nr_scanned++;
9331da177e4SLinus Torvalds 
934c661b078SAndy Whitcroft 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
935c661b078SAndy Whitcroft 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
936c661b078SAndy Whitcroft 
937e62e384eSMichal Hocko 		/*
938e2be15f6SMel Gorman 		 * The number of dirty pages determines if a zone is marked
939e2be15f6SMel Gorman 		 * reclaim_congested which affects wait_iff_congested. kswapd
940e2be15f6SMel Gorman 		 * will stall and start writing pages if the tail of the LRU
941e2be15f6SMel Gorman 		 * is all dirty unqueued pages.
942e2be15f6SMel Gorman 		 */
943e2be15f6SMel Gorman 		page_check_dirty_writeback(page, &dirty, &writeback);
944e2be15f6SMel Gorman 		if (dirty || writeback)
945e2be15f6SMel Gorman 			nr_dirty++;
946e2be15f6SMel Gorman 
947e2be15f6SMel Gorman 		if (dirty && !writeback)
948e2be15f6SMel Gorman 			nr_unqueued_dirty++;
949e2be15f6SMel Gorman 
950d04e8acdSMel Gorman 		/*
951d04e8acdSMel Gorman 		 * Treat this page as congested if the underlying BDI is or if
952d04e8acdSMel Gorman 		 * pages are cycling through the LRU so quickly that the
953d04e8acdSMel Gorman 		 * pages marked for immediate reclaim are making it to the
954d04e8acdSMel Gorman 		 * end of the LRU a second time.
955d04e8acdSMel Gorman 		 */
956e2be15f6SMel Gorman 		mapping = page_mapping(page);
9571da58ee2SJamie Liu 		if (((dirty || writeback) && mapping &&
958703c2708STejun Heo 		     inode_write_congested(mapping->host)) ||
959d04e8acdSMel Gorman 		    (writeback && PageReclaim(page)))
960e2be15f6SMel Gorman 			nr_congested++;
961e2be15f6SMel Gorman 
962e2be15f6SMel Gorman 		/*
963283aba9fSMel Gorman 		 * If a page at the tail of the LRU is under writeback, there
964283aba9fSMel Gorman 		 * are three cases to consider.
965e62e384eSMichal Hocko 		 *
966283aba9fSMel Gorman 		 * 1) If reclaim is encountering an excessive number of pages
967283aba9fSMel Gorman 		 *    under writeback and this page is both under writeback and
968283aba9fSMel Gorman 		 *    PageReclaim then it indicates that pages are being queued
969283aba9fSMel Gorman 		 *    for IO but are being recycled through the LRU before the
970283aba9fSMel Gorman 		 *    IO can complete. Waiting on the page itself risks an
971283aba9fSMel Gorman 		 *    indefinite stall if it is impossible to writeback the
972283aba9fSMel Gorman 		 *    page due to IO error or disconnected storage so instead
973b1a6f21eSMel Gorman 		 *    note that the LRU is being scanned too quickly and the
974b1a6f21eSMel Gorman 		 *    caller can stall after page list has been processed.
975c3b94f44SHugh Dickins 		 *
97697c9341fSTejun Heo 		 * 2) Global or new memcg reclaim encounters a page that is
977ecf5fc6eSMichal Hocko 		 *    not marked for immediate reclaim, or the caller does not
978ecf5fc6eSMichal Hocko 		 *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
979ecf5fc6eSMichal Hocko 		 *    not to fs). In this case mark the page for immediate
98097c9341fSTejun Heo 		 *    reclaim and continue scanning.
981283aba9fSMel Gorman 		 *
982ecf5fc6eSMichal Hocko 		 *    Require may_enter_fs because we would wait on fs, which
983ecf5fc6eSMichal Hocko 		 *    may not have submitted IO yet. And the loop driver might
984283aba9fSMel Gorman 		 *    enter reclaim, and deadlock if it waits on a page for
985283aba9fSMel Gorman 		 *    which it is needed to do the write (loop masks off
986283aba9fSMel Gorman 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
987283aba9fSMel Gorman 		 *    would probably show more reasons.
988283aba9fSMel Gorman 		 *
9897fadc820SHugh Dickins 		 * 3) Legacy memcg encounters a page that is already marked
990283aba9fSMel Gorman 		 *    PageReclaim. memcg does not have any dirty pages
991283aba9fSMel Gorman 		 *    throttling so we could easily OOM just because too many
992283aba9fSMel Gorman 		 *    pages are in writeback and there is nothing else to
993283aba9fSMel Gorman 		 *    reclaim. Wait for the writeback to complete.
994e62e384eSMichal Hocko 		 */
995283aba9fSMel Gorman 		if (PageWriteback(page)) {
996283aba9fSMel Gorman 			/* Case 1 above */
997283aba9fSMel Gorman 			if (current_is_kswapd() &&
998283aba9fSMel Gorman 			    PageReclaim(page) &&
99957054651SJohannes Weiner 			    test_bit(ZONE_WRITEBACK, &zone->flags)) {
1000b1a6f21eSMel Gorman 				nr_immediate++;
1001b1a6f21eSMel Gorman 				goto keep_locked;
1002283aba9fSMel Gorman 
1003283aba9fSMel Gorman 			/* Case 2 above */
100497c9341fSTejun Heo 			} else if (sane_reclaim(sc) ||
1005ecf5fc6eSMichal Hocko 			    !PageReclaim(page) || !may_enter_fs) {
1006c3b94f44SHugh Dickins 				/*
1007c3b94f44SHugh Dickins 				 * This is slightly racy - end_page_writeback()
1008c3b94f44SHugh Dickins 				 * might have just cleared PageReclaim, then
1009c3b94f44SHugh Dickins 				 * setting PageReclaim here end up interpreted
1010c3b94f44SHugh Dickins 				 * as PageReadahead - but that does not matter
1011c3b94f44SHugh Dickins 				 * enough to care.  What we do want is for this
1012c3b94f44SHugh Dickins 				 * page to have PageReclaim set next time memcg
1013c3b94f44SHugh Dickins 				 * reclaim reaches the tests above, so it will
1014c3b94f44SHugh Dickins 				 * then wait_on_page_writeback() to avoid OOM;
1015c3b94f44SHugh Dickins 				 * and it's also appropriate in global reclaim.
1016c3b94f44SHugh Dickins 				 */
1017c3b94f44SHugh Dickins 				SetPageReclaim(page);
101892df3a72SMel Gorman 				nr_writeback++;
1019c3b94f44SHugh Dickins 				goto keep_locked;
1020283aba9fSMel Gorman 
1021283aba9fSMel Gorman 			/* Case 3 above */
1022283aba9fSMel Gorman 			} else {
10237fadc820SHugh Dickins 				unlock_page(page);
1024c3b94f44SHugh Dickins 				wait_on_page_writeback(page);
10257fadc820SHugh Dickins 				/* then go back and try same page again */
10267fadc820SHugh Dickins 				list_add_tail(&page->lru, page_list);
10277fadc820SHugh Dickins 				continue;
1028e62e384eSMichal Hocko 			}
1029283aba9fSMel Gorman 		}
10301da177e4SLinus Torvalds 
103102c6de8dSMinchan Kim 		if (!force_reclaim)
10326a18adb3SKonstantin Khlebnikov 			references = page_check_references(page, sc);
103302c6de8dSMinchan Kim 
1034dfc8d636SJohannes Weiner 		switch (references) {
1035dfc8d636SJohannes Weiner 		case PAGEREF_ACTIVATE:
10361da177e4SLinus Torvalds 			goto activate_locked;
103764574746SJohannes Weiner 		case PAGEREF_KEEP:
103864574746SJohannes Weiner 			goto keep_locked;
1039dfc8d636SJohannes Weiner 		case PAGEREF_RECLAIM:
1040dfc8d636SJohannes Weiner 		case PAGEREF_RECLAIM_CLEAN:
1041dfc8d636SJohannes Weiner 			; /* try to reclaim the page below */
1042dfc8d636SJohannes Weiner 		}
10431da177e4SLinus Torvalds 
10441da177e4SLinus Torvalds 		/*
10451da177e4SLinus Torvalds 		 * Anonymous process memory has backing store?
10461da177e4SLinus Torvalds 		 * Try to allocate it some swap space here.
10471da177e4SLinus Torvalds 		 */
1048b291f000SNick Piggin 		if (PageAnon(page) && !PageSwapCache(page)) {
104963eb6b93SHugh Dickins 			if (!(sc->gfp_mask & __GFP_IO))
105063eb6b93SHugh Dickins 				goto keep_locked;
10515bc7b8acSShaohua Li 			if (!add_to_swap(page, page_list))
10521da177e4SLinus Torvalds 				goto activate_locked;
1053854e9ed0SMinchan Kim 			lazyfree = true;
105463eb6b93SHugh Dickins 			may_enter_fs = 1;
10551da177e4SLinus Torvalds 
1056e2be15f6SMel Gorman 			/* Adding to swap updated mapping */
10571da177e4SLinus Torvalds 			mapping = page_mapping(page);
10587751b2daSKirill A. Shutemov 		} else if (unlikely(PageTransHuge(page))) {
10597751b2daSKirill A. Shutemov 			/* Split file THP */
10607751b2daSKirill A. Shutemov 			if (split_huge_page_to_list(page, page_list))
10617751b2daSKirill A. Shutemov 				goto keep_locked;
1062e2be15f6SMel Gorman 		}
10631da177e4SLinus Torvalds 
10647751b2daSKirill A. Shutemov 		VM_BUG_ON_PAGE(PageTransHuge(page), page);
10657751b2daSKirill A. Shutemov 
10661da177e4SLinus Torvalds 		/*
10671da177e4SLinus Torvalds 		 * The page is mapped into the page tables of one or more
10681da177e4SLinus Torvalds 		 * processes. Try to unmap it here.
10691da177e4SLinus Torvalds 		 */
10701da177e4SLinus Torvalds 		if (page_mapped(page) && mapping) {
1071854e9ed0SMinchan Kim 			switch (ret = try_to_unmap(page, lazyfree ?
1072854e9ed0SMinchan Kim 				(ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
1073854e9ed0SMinchan Kim 				(ttu_flags | TTU_BATCH_FLUSH))) {
10741da177e4SLinus Torvalds 			case SWAP_FAIL:
10751da177e4SLinus Torvalds 				goto activate_locked;
10761da177e4SLinus Torvalds 			case SWAP_AGAIN:
10771da177e4SLinus Torvalds 				goto keep_locked;
1078b291f000SNick Piggin 			case SWAP_MLOCK:
1079b291f000SNick Piggin 				goto cull_mlocked;
1080854e9ed0SMinchan Kim 			case SWAP_LZFREE:
1081854e9ed0SMinchan Kim 				goto lazyfree;
10821da177e4SLinus Torvalds 			case SWAP_SUCCESS:
10831da177e4SLinus Torvalds 				; /* try to free the page below */
10841da177e4SLinus Torvalds 			}
10851da177e4SLinus Torvalds 		}
10861da177e4SLinus Torvalds 
10871da177e4SLinus Torvalds 		if (PageDirty(page)) {
1088ee72886dSMel Gorman 			/*
1089ee72886dSMel Gorman 			 * Only kswapd can writeback filesystem pages to
1090d43006d5SMel Gorman 			 * avoid risk of stack overflow but only writeback
1091d43006d5SMel Gorman 			 * if many dirty pages have been encountered.
1092ee72886dSMel Gorman 			 */
1093f84f6e2bSMel Gorman 			if (page_is_file_cache(page) &&
10949e3b2f8cSKonstantin Khlebnikov 					(!current_is_kswapd() ||
109557054651SJohannes Weiner 					 !test_bit(ZONE_DIRTY, &zone->flags))) {
109649ea7eb6SMel Gorman 				/*
109749ea7eb6SMel Gorman 				 * Immediately reclaim when written back.
109849ea7eb6SMel Gorman 				 * Similar in principal to deactivate_page()
109949ea7eb6SMel Gorman 				 * except we already have the page isolated
110049ea7eb6SMel Gorman 				 * and know it's dirty
110149ea7eb6SMel Gorman 				 */
110249ea7eb6SMel Gorman 				inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
110349ea7eb6SMel Gorman 				SetPageReclaim(page);
110449ea7eb6SMel Gorman 
1105ee72886dSMel Gorman 				goto keep_locked;
1106ee72886dSMel Gorman 			}
1107ee72886dSMel Gorman 
1108dfc8d636SJohannes Weiner 			if (references == PAGEREF_RECLAIM_CLEAN)
11091da177e4SLinus Torvalds 				goto keep_locked;
11104dd4b920SAndrew Morton 			if (!may_enter_fs)
11111da177e4SLinus Torvalds 				goto keep_locked;
111252a8363eSChristoph Lameter 			if (!sc->may_writepage)
11131da177e4SLinus Torvalds 				goto keep_locked;
11141da177e4SLinus Torvalds 
1115d950c947SMel Gorman 			/*
1116d950c947SMel Gorman 			 * Page is dirty. Flush the TLB if a writable entry
1117d950c947SMel Gorman 			 * potentially exists to avoid CPU writes after IO
1118d950c947SMel Gorman 			 * starts and then write it out here.
1119d950c947SMel Gorman 			 */
1120d950c947SMel Gorman 			try_to_unmap_flush_dirty();
11217d3579e8SKOSAKI Motohiro 			switch (pageout(page, mapping, sc)) {
11221da177e4SLinus Torvalds 			case PAGE_KEEP:
11231da177e4SLinus Torvalds 				goto keep_locked;
11241da177e4SLinus Torvalds 			case PAGE_ACTIVATE:
11251da177e4SLinus Torvalds 				goto activate_locked;
11261da177e4SLinus Torvalds 			case PAGE_SUCCESS:
11277d3579e8SKOSAKI Motohiro 				if (PageWriteback(page))
112841ac1999SMel Gorman 					goto keep;
11297d3579e8SKOSAKI Motohiro 				if (PageDirty(page))
11301da177e4SLinus Torvalds 					goto keep;
11317d3579e8SKOSAKI Motohiro 
11321da177e4SLinus Torvalds 				/*
11331da177e4SLinus Torvalds 				 * A synchronous write - probably a ramdisk.  Go
11341da177e4SLinus Torvalds 				 * ahead and try to reclaim the page.
11351da177e4SLinus Torvalds 				 */
1136529ae9aaSNick Piggin 				if (!trylock_page(page))
11371da177e4SLinus Torvalds 					goto keep;
11381da177e4SLinus Torvalds 				if (PageDirty(page) || PageWriteback(page))
11391da177e4SLinus Torvalds 					goto keep_locked;
11401da177e4SLinus Torvalds 				mapping = page_mapping(page);
11411da177e4SLinus Torvalds 			case PAGE_CLEAN:
11421da177e4SLinus Torvalds 				; /* try to free the page below */
11431da177e4SLinus Torvalds 			}
11441da177e4SLinus Torvalds 		}
11451da177e4SLinus Torvalds 
11461da177e4SLinus Torvalds 		/*
11471da177e4SLinus Torvalds 		 * If the page has buffers, try to free the buffer mappings
11481da177e4SLinus Torvalds 		 * associated with this page. If we succeed we try to free
11491da177e4SLinus Torvalds 		 * the page as well.
11501da177e4SLinus Torvalds 		 *
11511da177e4SLinus Torvalds 		 * We do this even if the page is PageDirty().
11521da177e4SLinus Torvalds 		 * try_to_release_page() does not perform I/O, but it is
11531da177e4SLinus Torvalds 		 * possible for a page to have PageDirty set, but it is actually
11541da177e4SLinus Torvalds 		 * clean (all its buffers are clean).  This happens if the
11551da177e4SLinus Torvalds 		 * buffers were written out directly, with submit_bh(). ext3
11561da177e4SLinus Torvalds 		 * will do this, as well as the blockdev mapping.
11571da177e4SLinus Torvalds 		 * try_to_release_page() will discover that cleanness and will
11581da177e4SLinus Torvalds 		 * drop the buffers and mark the page clean - it can be freed.
11591da177e4SLinus Torvalds 		 *
11601da177e4SLinus Torvalds 		 * Rarely, pages can have buffers and no ->mapping.  These are
11611da177e4SLinus Torvalds 		 * the pages which were not successfully invalidated in
11621da177e4SLinus Torvalds 		 * truncate_complete_page().  We try to drop those buffers here
11631da177e4SLinus Torvalds 		 * and if that worked, and the page is no longer mapped into
11641da177e4SLinus Torvalds 		 * process address space (page_count == 1) it can be freed.
11651da177e4SLinus Torvalds 		 * Otherwise, leave the page on the LRU so it is swappable.
11661da177e4SLinus Torvalds 		 */
1167266cf658SDavid Howells 		if (page_has_private(page)) {
11681da177e4SLinus Torvalds 			if (!try_to_release_page(page, sc->gfp_mask))
11691da177e4SLinus Torvalds 				goto activate_locked;
1170e286781dSNick Piggin 			if (!mapping && page_count(page) == 1) {
1171e286781dSNick Piggin 				unlock_page(page);
1172e286781dSNick Piggin 				if (put_page_testzero(page))
11731da177e4SLinus Torvalds 					goto free_it;
1174e286781dSNick Piggin 				else {
1175e286781dSNick Piggin 					/*
1176e286781dSNick Piggin 					 * rare race with speculative reference.
1177e286781dSNick Piggin 					 * the speculative reference will free
1178e286781dSNick Piggin 					 * this page shortly, so we may
1179e286781dSNick Piggin 					 * increment nr_reclaimed here (and
1180e286781dSNick Piggin 					 * leave it off the LRU).
1181e286781dSNick Piggin 					 */
1182e286781dSNick Piggin 					nr_reclaimed++;
1183e286781dSNick Piggin 					continue;
1184e286781dSNick Piggin 				}
1185e286781dSNick Piggin 			}
11861da177e4SLinus Torvalds 		}
11871da177e4SLinus Torvalds 
1188854e9ed0SMinchan Kim lazyfree:
1189a528910eSJohannes Weiner 		if (!mapping || !__remove_mapping(mapping, page, true))
119049d2e9ccSChristoph Lameter 			goto keep_locked;
11911da177e4SLinus Torvalds 
1192a978d6f5SNick Piggin 		/*
1193a978d6f5SNick Piggin 		 * At this point, we have no other references and there is
1194a978d6f5SNick Piggin 		 * no way to pick any more up (removed from LRU, removed
1195a978d6f5SNick Piggin 		 * from pagecache). Can use non-atomic bitops now (and
1196a978d6f5SNick Piggin 		 * we obviously don't have to worry about waking up a process
1197a978d6f5SNick Piggin 		 * waiting on the page lock, because there are no references.
1198a978d6f5SNick Piggin 		 */
119948c935adSKirill A. Shutemov 		__ClearPageLocked(page);
1200e286781dSNick Piggin free_it:
1201854e9ed0SMinchan Kim 		if (ret == SWAP_LZFREE)
1202854e9ed0SMinchan Kim 			count_vm_event(PGLAZYFREED);
1203854e9ed0SMinchan Kim 
120405ff5137SAndrew Morton 		nr_reclaimed++;
1205abe4c3b5SMel Gorman 
1206abe4c3b5SMel Gorman 		/*
1207abe4c3b5SMel Gorman 		 * Is there need to periodically free_page_list? It would
1208abe4c3b5SMel Gorman 		 * appear not as the counts should be low
1209abe4c3b5SMel Gorman 		 */
1210abe4c3b5SMel Gorman 		list_add(&page->lru, &free_pages);
12111da177e4SLinus Torvalds 		continue;
12121da177e4SLinus Torvalds 
1213b291f000SNick Piggin cull_mlocked:
121463d6c5adSHugh Dickins 		if (PageSwapCache(page))
121563d6c5adSHugh Dickins 			try_to_free_swap(page);
1216b291f000SNick Piggin 		unlock_page(page);
1217c54839a7SJaewon Kim 		list_add(&page->lru, &ret_pages);
1218b291f000SNick Piggin 		continue;
1219b291f000SNick Piggin 
12201da177e4SLinus Torvalds activate_locked:
122168a22394SRik van Riel 		/* Not a candidate for swapping, so reclaim swap space. */
12225ccc5abaSVladimir Davydov 		if (PageSwapCache(page) && mem_cgroup_swap_full(page))
1223a2c43eedSHugh Dickins 			try_to_free_swap(page);
1224309381feSSasha Levin 		VM_BUG_ON_PAGE(PageActive(page), page);
12251da177e4SLinus Torvalds 		SetPageActive(page);
12261da177e4SLinus Torvalds 		pgactivate++;
12271da177e4SLinus Torvalds keep_locked:
12281da177e4SLinus Torvalds 		unlock_page(page);
12291da177e4SLinus Torvalds keep:
12301da177e4SLinus Torvalds 		list_add(&page->lru, &ret_pages);
1231309381feSSasha Levin 		VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
12321da177e4SLinus Torvalds 	}
1233abe4c3b5SMel Gorman 
1234747db954SJohannes Weiner 	mem_cgroup_uncharge_list(&free_pages);
123572b252aeSMel Gorman 	try_to_unmap_flush();
1236b745bc85SMel Gorman 	free_hot_cold_page_list(&free_pages, true);
1237abe4c3b5SMel Gorman 
12381da177e4SLinus Torvalds 	list_splice(&ret_pages, page_list);
1239f8891e5eSChristoph Lameter 	count_vm_events(PGACTIVATE, pgactivate);
12400a31bc97SJohannes Weiner 
12418e950282SMel Gorman 	*ret_nr_dirty += nr_dirty;
12428e950282SMel Gorman 	*ret_nr_congested += nr_congested;
1243d43006d5SMel Gorman 	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
124492df3a72SMel Gorman 	*ret_nr_writeback += nr_writeback;
1245b1a6f21eSMel Gorman 	*ret_nr_immediate += nr_immediate;
124605ff5137SAndrew Morton 	return nr_reclaimed;
12471da177e4SLinus Torvalds }
12481da177e4SLinus Torvalds 
124902c6de8dSMinchan Kim unsigned long reclaim_clean_pages_from_list(struct zone *zone,
125002c6de8dSMinchan Kim 					    struct list_head *page_list)
125102c6de8dSMinchan Kim {
125202c6de8dSMinchan Kim 	struct scan_control sc = {
125302c6de8dSMinchan Kim 		.gfp_mask = GFP_KERNEL,
125402c6de8dSMinchan Kim 		.priority = DEF_PRIORITY,
125502c6de8dSMinchan Kim 		.may_unmap = 1,
125602c6de8dSMinchan Kim 	};
12578e950282SMel Gorman 	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
125802c6de8dSMinchan Kim 	struct page *page, *next;
125902c6de8dSMinchan Kim 	LIST_HEAD(clean_pages);
126002c6de8dSMinchan Kim 
126102c6de8dSMinchan Kim 	list_for_each_entry_safe(page, next, page_list, lru) {
1262117aad1eSRafael Aquini 		if (page_is_file_cache(page) && !PageDirty(page) &&
1263b1123ea6SMinchan Kim 		    !__PageMovable(page)) {
126402c6de8dSMinchan Kim 			ClearPageActive(page);
126502c6de8dSMinchan Kim 			list_move(&page->lru, &clean_pages);
126602c6de8dSMinchan Kim 		}
126702c6de8dSMinchan Kim 	}
126802c6de8dSMinchan Kim 
126902c6de8dSMinchan Kim 	ret = shrink_page_list(&clean_pages, zone, &sc,
127002c6de8dSMinchan Kim 			TTU_UNMAP|TTU_IGNORE_ACCESS,
12718e950282SMel Gorman 			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
127202c6de8dSMinchan Kim 	list_splice(&clean_pages, page_list);
127383da7510SChristoph Lameter 	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
127402c6de8dSMinchan Kim 	return ret;
127502c6de8dSMinchan Kim }
127602c6de8dSMinchan Kim 
12775ad333ebSAndy Whitcroft /*
12785ad333ebSAndy Whitcroft  * Attempt to remove the specified page from its LRU.  Only take this page
12795ad333ebSAndy Whitcroft  * if it is of the appropriate PageActive status.  Pages which are being
12805ad333ebSAndy Whitcroft  * freed elsewhere are also ignored.
12815ad333ebSAndy Whitcroft  *
12825ad333ebSAndy Whitcroft  * page:	page to consider
12835ad333ebSAndy Whitcroft  * mode:	one of the LRU isolation modes defined above
12845ad333ebSAndy Whitcroft  *
12855ad333ebSAndy Whitcroft  * returns 0 on success, -ve errno on failure.
12865ad333ebSAndy Whitcroft  */
1287f3fd4a61SKonstantin Khlebnikov int __isolate_lru_page(struct page *page, isolate_mode_t mode)
12885ad333ebSAndy Whitcroft {
12895ad333ebSAndy Whitcroft 	int ret = -EINVAL;
12905ad333ebSAndy Whitcroft 
12915ad333ebSAndy Whitcroft 	/* Only take pages on the LRU. */
12925ad333ebSAndy Whitcroft 	if (!PageLRU(page))
12935ad333ebSAndy Whitcroft 		return ret;
12945ad333ebSAndy Whitcroft 
1295e46a2879SMinchan Kim 	/* Compaction should not handle unevictable pages but CMA can do so */
1296e46a2879SMinchan Kim 	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1297894bc310SLee Schermerhorn 		return ret;
1298894bc310SLee Schermerhorn 
12995ad333ebSAndy Whitcroft 	ret = -EBUSY;
130008e552c6SKAMEZAWA Hiroyuki 
1301c8244935SMel Gorman 	/*
1302c8244935SMel Gorman 	 * To minimise LRU disruption, the caller can indicate that it only
1303c8244935SMel Gorman 	 * wants to isolate pages it will be able to operate on without
1304c8244935SMel Gorman 	 * blocking - clean pages for the most part.
1305c8244935SMel Gorman 	 *
1306c8244935SMel Gorman 	 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1307c8244935SMel Gorman 	 * is used by reclaim when it is cannot write to backing storage
1308c8244935SMel Gorman 	 *
1309c8244935SMel Gorman 	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1310c8244935SMel Gorman 	 * that it is possible to migrate without blocking
1311c8244935SMel Gorman 	 */
1312c8244935SMel Gorman 	if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1313c8244935SMel Gorman 		/* All the caller can do on PageWriteback is block */
1314c8244935SMel Gorman 		if (PageWriteback(page))
131539deaf85SMinchan Kim 			return ret;
131639deaf85SMinchan Kim 
1317c8244935SMel Gorman 		if (PageDirty(page)) {
1318c8244935SMel Gorman 			struct address_space *mapping;
1319c8244935SMel Gorman 
1320c8244935SMel Gorman 			/* ISOLATE_CLEAN means only clean pages */
1321c8244935SMel Gorman 			if (mode & ISOLATE_CLEAN)
1322c8244935SMel Gorman 				return ret;
1323c8244935SMel Gorman 
1324c8244935SMel Gorman 			/*
1325c8244935SMel Gorman 			 * Only pages without mappings or that have a
1326c8244935SMel Gorman 			 * ->migratepage callback are possible to migrate
1327c8244935SMel Gorman 			 * without blocking
1328c8244935SMel Gorman 			 */
1329c8244935SMel Gorman 			mapping = page_mapping(page);
1330c8244935SMel Gorman 			if (mapping && !mapping->a_ops->migratepage)
1331c8244935SMel Gorman 				return ret;
1332c8244935SMel Gorman 		}
1333c8244935SMel Gorman 	}
1334c8244935SMel Gorman 
1335f80c0673SMinchan Kim 	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1336f80c0673SMinchan Kim 		return ret;
1337f80c0673SMinchan Kim 
13385ad333ebSAndy Whitcroft 	if (likely(get_page_unless_zero(page))) {
13395ad333ebSAndy Whitcroft 		/*
13405ad333ebSAndy Whitcroft 		 * Be careful not to clear PageLRU until after we're
13415ad333ebSAndy Whitcroft 		 * sure the page is not being freed elsewhere -- the
13425ad333ebSAndy Whitcroft 		 * page release code relies on it.
13435ad333ebSAndy Whitcroft 		 */
13445ad333ebSAndy Whitcroft 		ClearPageLRU(page);
13455ad333ebSAndy Whitcroft 		ret = 0;
13465ad333ebSAndy Whitcroft 	}
13475ad333ebSAndy Whitcroft 
13485ad333ebSAndy Whitcroft 	return ret;
13495ad333ebSAndy Whitcroft }
13505ad333ebSAndy Whitcroft 
135149d2e9ccSChristoph Lameter /*
1352*a52633d8SMel Gorman  * zone_lru_lock is heavily contended.  Some of the functions that
13531da177e4SLinus Torvalds  * shrink the lists perform better by taking out a batch of pages
13541da177e4SLinus Torvalds  * and working on them outside the LRU lock.
13551da177e4SLinus Torvalds  *
13561da177e4SLinus Torvalds  * For pagecache intensive workloads, this function is the hottest
13571da177e4SLinus Torvalds  * spot in the kernel (apart from copy_*_user functions).
13581da177e4SLinus Torvalds  *
13591da177e4SLinus Torvalds  * Appropriate locks must be held before calling this function.
13601da177e4SLinus Torvalds  *
13611da177e4SLinus Torvalds  * @nr_to_scan:	The number of pages to look through on the list.
13625dc35979SKonstantin Khlebnikov  * @lruvec:	The LRU vector to pull pages from.
13631da177e4SLinus Torvalds  * @dst:	The temp list to put pages on to.
1364f626012dSHugh Dickins  * @nr_scanned:	The number of pages that were scanned.
1365fe2c2a10SRik van Riel  * @sc:		The scan_control struct for this reclaim session
13665ad333ebSAndy Whitcroft  * @mode:	One of the LRU isolation modes
13673cb99451SKonstantin Khlebnikov  * @lru:	LRU list id for isolating
13681da177e4SLinus Torvalds  *
13691da177e4SLinus Torvalds  * returns how many pages were moved onto *@dst.
13701da177e4SLinus Torvalds  */
137169e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
13725dc35979SKonstantin Khlebnikov 		struct lruvec *lruvec, struct list_head *dst,
1373fe2c2a10SRik van Riel 		unsigned long *nr_scanned, struct scan_control *sc,
13743cb99451SKonstantin Khlebnikov 		isolate_mode_t mode, enum lru_list lru)
13751da177e4SLinus Torvalds {
137675b00af7SHugh Dickins 	struct list_head *src = &lruvec->lists[lru];
137769e05944SAndrew Morton 	unsigned long nr_taken = 0;
1378c9b02d97SWu Fengguang 	unsigned long scan;
13791da177e4SLinus Torvalds 
13800b802f10SVladimir Davydov 	for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
13810b802f10SVladimir Davydov 					!list_empty(src); scan++) {
13825ad333ebSAndy Whitcroft 		struct page *page;
13835ad333ebSAndy Whitcroft 
13841da177e4SLinus Torvalds 		page = lru_to_page(src);
13851da177e4SLinus Torvalds 		prefetchw_prev_lru_page(page, src, flags);
13861da177e4SLinus Torvalds 
1387309381feSSasha Levin 		VM_BUG_ON_PAGE(!PageLRU(page), page);
13888d438f96SNick Piggin 
1389f3fd4a61SKonstantin Khlebnikov 		switch (__isolate_lru_page(page, mode)) {
13905ad333ebSAndy Whitcroft 		case 0:
13919d5e6a9fSHugh Dickins 			nr_taken += hpage_nr_pages(page);
13925ad333ebSAndy Whitcroft 			list_move(&page->lru, dst);
13935ad333ebSAndy Whitcroft 			break;
13947c8ee9a8SNick Piggin 
13955ad333ebSAndy Whitcroft 		case -EBUSY:
13965ad333ebSAndy Whitcroft 			/* else it is being freed elsewhere */
13975ad333ebSAndy Whitcroft 			list_move(&page->lru, src);
13985ad333ebSAndy Whitcroft 			continue;
13995ad333ebSAndy Whitcroft 
14005ad333ebSAndy Whitcroft 		default:
14015ad333ebSAndy Whitcroft 			BUG();
14025ad333ebSAndy Whitcroft 		}
14035ad333ebSAndy Whitcroft 	}
14041da177e4SLinus Torvalds 
1405f626012dSHugh Dickins 	*nr_scanned = scan;
140675b00af7SHugh Dickins 	trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
140775b00af7SHugh Dickins 				    nr_taken, mode, is_file_lru(lru));
14081da177e4SLinus Torvalds 	return nr_taken;
14091da177e4SLinus Torvalds }
14101da177e4SLinus Torvalds 
141162695a84SNick Piggin /**
141262695a84SNick Piggin  * isolate_lru_page - tries to isolate a page from its LRU list
141362695a84SNick Piggin  * @page: page to isolate from its LRU list
141462695a84SNick Piggin  *
141562695a84SNick Piggin  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
141662695a84SNick Piggin  * vmstat statistic corresponding to whatever LRU list the page was on.
141762695a84SNick Piggin  *
141862695a84SNick Piggin  * Returns 0 if the page was removed from an LRU list.
141962695a84SNick Piggin  * Returns -EBUSY if the page was not on an LRU list.
142062695a84SNick Piggin  *
142162695a84SNick Piggin  * The returned page will have PageLRU() cleared.  If it was found on
1422894bc310SLee Schermerhorn  * the active list, it will have PageActive set.  If it was found on
1423894bc310SLee Schermerhorn  * the unevictable list, it will have the PageUnevictable bit set. That flag
1424894bc310SLee Schermerhorn  * may need to be cleared by the caller before letting the page go.
142562695a84SNick Piggin  *
142662695a84SNick Piggin  * The vmstat statistic corresponding to the list on which the page was
142762695a84SNick Piggin  * found will be decremented.
142862695a84SNick Piggin  *
142962695a84SNick Piggin  * Restrictions:
143062695a84SNick Piggin  * (1) Must be called with an elevated refcount on the page. This is a
143162695a84SNick Piggin  *     fundamentnal difference from isolate_lru_pages (which is called
143262695a84SNick Piggin  *     without a stable reference).
143362695a84SNick Piggin  * (2) the lru_lock must not be held.
143462695a84SNick Piggin  * (3) interrupts must be enabled.
143562695a84SNick Piggin  */
143662695a84SNick Piggin int isolate_lru_page(struct page *page)
143762695a84SNick Piggin {
143862695a84SNick Piggin 	int ret = -EBUSY;
143962695a84SNick Piggin 
1440309381feSSasha Levin 	VM_BUG_ON_PAGE(!page_count(page), page);
1441cf2a82eeSKirill A. Shutemov 	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
14420c917313SKonstantin Khlebnikov 
144362695a84SNick Piggin 	if (PageLRU(page)) {
144462695a84SNick Piggin 		struct zone *zone = page_zone(page);
1445fa9add64SHugh Dickins 		struct lruvec *lruvec;
144662695a84SNick Piggin 
1447*a52633d8SMel Gorman 		spin_lock_irq(zone_lru_lock(zone));
1448fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
14490c917313SKonstantin Khlebnikov 		if (PageLRU(page)) {
1450894bc310SLee Schermerhorn 			int lru = page_lru(page);
14510c917313SKonstantin Khlebnikov 			get_page(page);
145262695a84SNick Piggin 			ClearPageLRU(page);
1453fa9add64SHugh Dickins 			del_page_from_lru_list(page, lruvec, lru);
1454fa9add64SHugh Dickins 			ret = 0;
145562695a84SNick Piggin 		}
1456*a52633d8SMel Gorman 		spin_unlock_irq(zone_lru_lock(zone));
145762695a84SNick Piggin 	}
145862695a84SNick Piggin 	return ret;
145962695a84SNick Piggin }
146062695a84SNick Piggin 
14615ad333ebSAndy Whitcroft /*
1462d37dd5dcSFengguang Wu  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1463d37dd5dcSFengguang Wu  * then get resheduled. When there are massive number of tasks doing page
1464d37dd5dcSFengguang Wu  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1465d37dd5dcSFengguang Wu  * the LRU list will go small and be scanned faster than necessary, leading to
1466d37dd5dcSFengguang Wu  * unnecessary swapping, thrashing and OOM.
146735cd7815SRik van Riel  */
146835cd7815SRik van Riel static int too_many_isolated(struct zone *zone, int file,
146935cd7815SRik van Riel 		struct scan_control *sc)
147035cd7815SRik van Riel {
147135cd7815SRik van Riel 	unsigned long inactive, isolated;
147235cd7815SRik van Riel 
147335cd7815SRik van Riel 	if (current_is_kswapd())
147435cd7815SRik van Riel 		return 0;
147535cd7815SRik van Riel 
147697c9341fSTejun Heo 	if (!sane_reclaim(sc))
147735cd7815SRik van Riel 		return 0;
147835cd7815SRik van Riel 
147935cd7815SRik van Riel 	if (file) {
148035cd7815SRik van Riel 		inactive = zone_page_state(zone, NR_INACTIVE_FILE);
148135cd7815SRik van Riel 		isolated = zone_page_state(zone, NR_ISOLATED_FILE);
148235cd7815SRik van Riel 	} else {
148335cd7815SRik van Riel 		inactive = zone_page_state(zone, NR_INACTIVE_ANON);
148435cd7815SRik van Riel 		isolated = zone_page_state(zone, NR_ISOLATED_ANON);
148535cd7815SRik van Riel 	}
148635cd7815SRik van Riel 
14873cf23841SFengguang Wu 	/*
14883cf23841SFengguang Wu 	 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
14893cf23841SFengguang Wu 	 * won't get blocked by normal direct-reclaimers, forming a circular
14903cf23841SFengguang Wu 	 * deadlock.
14913cf23841SFengguang Wu 	 */
1492d0164adcSMel Gorman 	if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
14933cf23841SFengguang Wu 		inactive >>= 3;
14943cf23841SFengguang Wu 
149535cd7815SRik van Riel 	return isolated > inactive;
149635cd7815SRik van Riel }
149735cd7815SRik van Riel 
149866635629SMel Gorman static noinline_for_stack void
149975b00af7SHugh Dickins putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
150066635629SMel Gorman {
150127ac81d8SKonstantin Khlebnikov 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
150227ac81d8SKonstantin Khlebnikov 	struct zone *zone = lruvec_zone(lruvec);
15033f79768fSHugh Dickins 	LIST_HEAD(pages_to_free);
150466635629SMel Gorman 
150566635629SMel Gorman 	/*
150666635629SMel Gorman 	 * Put back any unfreeable pages.
150766635629SMel Gorman 	 */
150866635629SMel Gorman 	while (!list_empty(page_list)) {
15093f79768fSHugh Dickins 		struct page *page = lru_to_page(page_list);
151066635629SMel Gorman 		int lru;
15113f79768fSHugh Dickins 
1512309381feSSasha Levin 		VM_BUG_ON_PAGE(PageLRU(page), page);
151366635629SMel Gorman 		list_del(&page->lru);
151439b5f29aSHugh Dickins 		if (unlikely(!page_evictable(page))) {
1515*a52633d8SMel Gorman 			spin_unlock_irq(zone_lru_lock(zone));
151666635629SMel Gorman 			putback_lru_page(page);
1517*a52633d8SMel Gorman 			spin_lock_irq(zone_lru_lock(zone));
151866635629SMel Gorman 			continue;
151966635629SMel Gorman 		}
1520fa9add64SHugh Dickins 
1521fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
1522fa9add64SHugh Dickins 
15237a608572SLinus Torvalds 		SetPageLRU(page);
152466635629SMel Gorman 		lru = page_lru(page);
1525fa9add64SHugh Dickins 		add_page_to_lru_list(page, lruvec, lru);
1526fa9add64SHugh Dickins 
152766635629SMel Gorman 		if (is_active_lru(lru)) {
152866635629SMel Gorman 			int file = is_file_lru(lru);
15299992af10SRik van Riel 			int numpages = hpage_nr_pages(page);
15309992af10SRik van Riel 			reclaim_stat->recent_rotated[file] += numpages;
153166635629SMel Gorman 		}
15322bcf8879SHugh Dickins 		if (put_page_testzero(page)) {
15332bcf8879SHugh Dickins 			__ClearPageLRU(page);
15342bcf8879SHugh Dickins 			__ClearPageActive(page);
1535fa9add64SHugh Dickins 			del_page_from_lru_list(page, lruvec, lru);
15362bcf8879SHugh Dickins 
15372bcf8879SHugh Dickins 			if (unlikely(PageCompound(page))) {
1538*a52633d8SMel Gorman 				spin_unlock_irq(zone_lru_lock(zone));
1539747db954SJohannes Weiner 				mem_cgroup_uncharge(page);
15402bcf8879SHugh Dickins 				(*get_compound_page_dtor(page))(page);
1541*a52633d8SMel Gorman 				spin_lock_irq(zone_lru_lock(zone));
15422bcf8879SHugh Dickins 			} else
15432bcf8879SHugh Dickins 				list_add(&page->lru, &pages_to_free);
154466635629SMel Gorman 		}
154566635629SMel Gorman 	}
154666635629SMel Gorman 
15473f79768fSHugh Dickins 	/*
15483f79768fSHugh Dickins 	 * To save our caller's stack, now use input list for pages to free.
15493f79768fSHugh Dickins 	 */
15503f79768fSHugh Dickins 	list_splice(&pages_to_free, page_list);
155166635629SMel Gorman }
155266635629SMel Gorman 
155366635629SMel Gorman /*
1554399ba0b9SNeilBrown  * If a kernel thread (such as nfsd for loop-back mounts) services
1555399ba0b9SNeilBrown  * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1556399ba0b9SNeilBrown  * In that case we should only throttle if the backing device it is
1557399ba0b9SNeilBrown  * writing to is congested.  In other cases it is safe to throttle.
1558399ba0b9SNeilBrown  */
1559399ba0b9SNeilBrown static int current_may_throttle(void)
1560399ba0b9SNeilBrown {
1561399ba0b9SNeilBrown 	return !(current->flags & PF_LESS_THROTTLE) ||
1562399ba0b9SNeilBrown 		current->backing_dev_info == NULL ||
1563399ba0b9SNeilBrown 		bdi_write_congested(current->backing_dev_info);
1564399ba0b9SNeilBrown }
1565399ba0b9SNeilBrown 
1566399ba0b9SNeilBrown /*
15671742f19fSAndrew Morton  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
15681742f19fSAndrew Morton  * of reclaimed pages
15691da177e4SLinus Torvalds  */
157066635629SMel Gorman static noinline_for_stack unsigned long
15711a93be0eSKonstantin Khlebnikov shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
15729e3b2f8cSKonstantin Khlebnikov 		     struct scan_control *sc, enum lru_list lru)
15731da177e4SLinus Torvalds {
15741da177e4SLinus Torvalds 	LIST_HEAD(page_list);
1575e247dbceSKOSAKI Motohiro 	unsigned long nr_scanned;
157605ff5137SAndrew Morton 	unsigned long nr_reclaimed = 0;
1577e247dbceSKOSAKI Motohiro 	unsigned long nr_taken;
15788e950282SMel Gorman 	unsigned long nr_dirty = 0;
15798e950282SMel Gorman 	unsigned long nr_congested = 0;
1580e2be15f6SMel Gorman 	unsigned long nr_unqueued_dirty = 0;
158192df3a72SMel Gorman 	unsigned long nr_writeback = 0;
1582b1a6f21eSMel Gorman 	unsigned long nr_immediate = 0;
1583f3fd4a61SKonstantin Khlebnikov 	isolate_mode_t isolate_mode = 0;
15843cb99451SKonstantin Khlebnikov 	int file = is_file_lru(lru);
15851a93be0eSKonstantin Khlebnikov 	struct zone *zone = lruvec_zone(lruvec);
15861a93be0eSKonstantin Khlebnikov 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
158778dc583dSKOSAKI Motohiro 
158835cd7815SRik van Riel 	while (unlikely(too_many_isolated(zone, file, sc))) {
158958355c78SKOSAKI Motohiro 		congestion_wait(BLK_RW_ASYNC, HZ/10);
159035cd7815SRik van Riel 
159135cd7815SRik van Riel 		/* We are about to die and free our memory. Return now. */
159235cd7815SRik van Riel 		if (fatal_signal_pending(current))
159335cd7815SRik van Riel 			return SWAP_CLUSTER_MAX;
159435cd7815SRik van Riel 	}
159535cd7815SRik van Riel 
15961da177e4SLinus Torvalds 	lru_add_drain();
1597f80c0673SMinchan Kim 
1598f80c0673SMinchan Kim 	if (!sc->may_unmap)
159961317289SHillf Danton 		isolate_mode |= ISOLATE_UNMAPPED;
1600f80c0673SMinchan Kim 	if (!sc->may_writepage)
160161317289SHillf Danton 		isolate_mode |= ISOLATE_CLEAN;
1602f80c0673SMinchan Kim 
1603*a52633d8SMel Gorman 	spin_lock_irq(zone_lru_lock(zone));
16041da177e4SLinus Torvalds 
16055dc35979SKonstantin Khlebnikov 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
16065dc35979SKonstantin Khlebnikov 				     &nr_scanned, sc, isolate_mode, lru);
160795d918fcSKonstantin Khlebnikov 
16089d5e6a9fSHugh Dickins 	update_lru_size(lruvec, lru, -nr_taken);
160995d918fcSKonstantin Khlebnikov 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
16109d5e6a9fSHugh Dickins 	reclaim_stat->recent_scanned[file] += nr_taken;
161195d918fcSKonstantin Khlebnikov 
161289b5fae5SJohannes Weiner 	if (global_reclaim(sc)) {
16130d5d823aSMel Gorman 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1614b35ea17bSKOSAKI Motohiro 		if (current_is_kswapd())
161575b00af7SHugh Dickins 			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1616b35ea17bSKOSAKI Motohiro 		else
161775b00af7SHugh Dickins 			__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1618b35ea17bSKOSAKI Motohiro 	}
1619*a52633d8SMel Gorman 	spin_unlock_irq(zone_lru_lock(zone));
1620d563c050SHillf Danton 
1621d563c050SHillf Danton 	if (nr_taken == 0)
162266635629SMel Gorman 		return 0;
1623b35ea17bSKOSAKI Motohiro 
162402c6de8dSMinchan Kim 	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
16258e950282SMel Gorman 				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
16268e950282SMel Gorman 				&nr_writeback, &nr_immediate,
1627b1a6f21eSMel Gorman 				false);
1628c661b078SAndy Whitcroft 
1629*a52633d8SMel Gorman 	spin_lock_irq(zone_lru_lock(zone));
16303f79768fSHugh Dickins 
1631904249aaSYing Han 	if (global_reclaim(sc)) {
1632b35ea17bSKOSAKI Motohiro 		if (current_is_kswapd())
1633904249aaSYing Han 			__count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1634904249aaSYing Han 					       nr_reclaimed);
1635904249aaSYing Han 		else
1636904249aaSYing Han 			__count_zone_vm_events(PGSTEAL_DIRECT, zone,
1637904249aaSYing Han 					       nr_reclaimed);
1638904249aaSYing Han 	}
1639a74609faSNick Piggin 
164027ac81d8SKonstantin Khlebnikov 	putback_inactive_pages(lruvec, &page_list);
16413f79768fSHugh Dickins 
164295d918fcSKonstantin Khlebnikov 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
16433f79768fSHugh Dickins 
1644*a52633d8SMel Gorman 	spin_unlock_irq(zone_lru_lock(zone));
16453f79768fSHugh Dickins 
1646747db954SJohannes Weiner 	mem_cgroup_uncharge_list(&page_list);
1647b745bc85SMel Gorman 	free_hot_cold_page_list(&page_list, true);
1648e11da5b4SMel Gorman 
164992df3a72SMel Gorman 	/*
165092df3a72SMel Gorman 	 * If reclaim is isolating dirty pages under writeback, it implies
165192df3a72SMel Gorman 	 * that the long-lived page allocation rate is exceeding the page
165292df3a72SMel Gorman 	 * laundering rate. Either the global limits are not being effective
165392df3a72SMel Gorman 	 * at throttling processes due to the page distribution throughout
165492df3a72SMel Gorman 	 * zones or there is heavy usage of a slow backing device. The
165592df3a72SMel Gorman 	 * only option is to throttle from reclaim context which is not ideal
165692df3a72SMel Gorman 	 * as there is no guarantee the dirtying process is throttled in the
165792df3a72SMel Gorman 	 * same way balance_dirty_pages() manages.
165892df3a72SMel Gorman 	 *
16598e950282SMel Gorman 	 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
16608e950282SMel Gorman 	 * of pages under pages flagged for immediate reclaim and stall if any
16618e950282SMel Gorman 	 * are encountered in the nr_immediate check below.
166292df3a72SMel Gorman 	 */
1663918fc718SMel Gorman 	if (nr_writeback && nr_writeback == nr_taken)
166457054651SJohannes Weiner 		set_bit(ZONE_WRITEBACK, &zone->flags);
166592df3a72SMel Gorman 
1666d43006d5SMel Gorman 	/*
166797c9341fSTejun Heo 	 * Legacy memcg will stall in page writeback so avoid forcibly
166897c9341fSTejun Heo 	 * stalling here.
1669d43006d5SMel Gorman 	 */
167097c9341fSTejun Heo 	if (sane_reclaim(sc)) {
1671b1a6f21eSMel Gorman 		/*
16728e950282SMel Gorman 		 * Tag a zone as congested if all the dirty pages scanned were
16738e950282SMel Gorman 		 * backed by a congested BDI and wait_iff_congested will stall.
16748e950282SMel Gorman 		 */
16758e950282SMel Gorman 		if (nr_dirty && nr_dirty == nr_congested)
167657054651SJohannes Weiner 			set_bit(ZONE_CONGESTED, &zone->flags);
16778e950282SMel Gorman 
16788e950282SMel Gorman 		/*
1679b1a6f21eSMel Gorman 		 * If dirty pages are scanned that are not queued for IO, it
1680b1a6f21eSMel Gorman 		 * implies that flushers are not keeping up. In this case, flag
168157054651SJohannes Weiner 		 * the zone ZONE_DIRTY and kswapd will start writing pages from
168257054651SJohannes Weiner 		 * reclaim context.
1683b1a6f21eSMel Gorman 		 */
1684b1a6f21eSMel Gorman 		if (nr_unqueued_dirty == nr_taken)
168557054651SJohannes Weiner 			set_bit(ZONE_DIRTY, &zone->flags);
1686b1a6f21eSMel Gorman 
1687b1a6f21eSMel Gorman 		/*
1688b738d764SLinus Torvalds 		 * If kswapd scans pages marked marked for immediate
1689b738d764SLinus Torvalds 		 * reclaim and under writeback (nr_immediate), it implies
1690b738d764SLinus Torvalds 		 * that pages are cycling through the LRU faster than
1691b1a6f21eSMel Gorman 		 * they are written so also forcibly stall.
1692b1a6f21eSMel Gorman 		 */
1693b738d764SLinus Torvalds 		if (nr_immediate && current_may_throttle())
1694b1a6f21eSMel Gorman 			congestion_wait(BLK_RW_ASYNC, HZ/10);
1695e2be15f6SMel Gorman 	}
1696d43006d5SMel Gorman 
16978e950282SMel Gorman 	/*
16988e950282SMel Gorman 	 * Stall direct reclaim for IO completions if underlying BDIs or zone
16998e950282SMel Gorman 	 * is congested. Allow kswapd to continue until it starts encountering
17008e950282SMel Gorman 	 * unqueued dirty pages or cycling through the LRU too quickly.
17018e950282SMel Gorman 	 */
1702399ba0b9SNeilBrown 	if (!sc->hibernation_mode && !current_is_kswapd() &&
1703399ba0b9SNeilBrown 	    current_may_throttle())
17048e950282SMel Gorman 		wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
17058e950282SMel Gorman 
1706ba5e9579Syalin wang 	trace_mm_vmscan_lru_shrink_inactive(zone, nr_scanned, nr_reclaimed,
1707ba5e9579Syalin wang 			sc->priority, file);
170805ff5137SAndrew Morton 	return nr_reclaimed;
17091da177e4SLinus Torvalds }
17101da177e4SLinus Torvalds 
17113bb1a852SMartin Bligh /*
17121cfb419bSKAMEZAWA Hiroyuki  * This moves pages from the active list to the inactive list.
17131cfb419bSKAMEZAWA Hiroyuki  *
17141cfb419bSKAMEZAWA Hiroyuki  * We move them the other way if the page is referenced by one or more
17151cfb419bSKAMEZAWA Hiroyuki  * processes, from rmap.
17161cfb419bSKAMEZAWA Hiroyuki  *
17171cfb419bSKAMEZAWA Hiroyuki  * If the pages are mostly unmapped, the processing is fast and it is
1718*a52633d8SMel Gorman  * appropriate to hold zone_lru_lock across the whole operation.  But if
17191cfb419bSKAMEZAWA Hiroyuki  * the pages are mapped, the processing is slow (page_referenced()) so we
1720*a52633d8SMel Gorman  * should drop zone_lru_lock around each page.  It's impossible to balance
17211cfb419bSKAMEZAWA Hiroyuki  * this, so instead we remove the pages from the LRU while processing them.
17221cfb419bSKAMEZAWA Hiroyuki  * It is safe to rely on PG_active against the non-LRU pages in here because
17231cfb419bSKAMEZAWA Hiroyuki  * nobody will play with that bit on a non-LRU page.
17241cfb419bSKAMEZAWA Hiroyuki  *
17250139aa7bSJoonsoo Kim  * The downside is that we have to touch page->_refcount against each page.
17261cfb419bSKAMEZAWA Hiroyuki  * But we had to alter page->flags anyway.
17271cfb419bSKAMEZAWA Hiroyuki  */
17281cfb419bSKAMEZAWA Hiroyuki 
1729fa9add64SHugh Dickins static void move_active_pages_to_lru(struct lruvec *lruvec,
17303eb4140fSWu Fengguang 				     struct list_head *list,
17312bcf8879SHugh Dickins 				     struct list_head *pages_to_free,
17323eb4140fSWu Fengguang 				     enum lru_list lru)
17333eb4140fSWu Fengguang {
1734fa9add64SHugh Dickins 	struct zone *zone = lruvec_zone(lruvec);
17353eb4140fSWu Fengguang 	unsigned long pgmoved = 0;
17363eb4140fSWu Fengguang 	struct page *page;
1737fa9add64SHugh Dickins 	int nr_pages;
17383eb4140fSWu Fengguang 
17393eb4140fSWu Fengguang 	while (!list_empty(list)) {
17403eb4140fSWu Fengguang 		page = lru_to_page(list);
1741fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
17423eb4140fSWu Fengguang 
1743309381feSSasha Levin 		VM_BUG_ON_PAGE(PageLRU(page), page);
17443eb4140fSWu Fengguang 		SetPageLRU(page);
17453eb4140fSWu Fengguang 
1746fa9add64SHugh Dickins 		nr_pages = hpage_nr_pages(page);
17479d5e6a9fSHugh Dickins 		update_lru_size(lruvec, lru, nr_pages);
1748925b7673SJohannes Weiner 		list_move(&page->lru, &lruvec->lists[lru]);
1749fa9add64SHugh Dickins 		pgmoved += nr_pages;
17503eb4140fSWu Fengguang 
17512bcf8879SHugh Dickins 		if (put_page_testzero(page)) {
17522bcf8879SHugh Dickins 			__ClearPageLRU(page);
17532bcf8879SHugh Dickins 			__ClearPageActive(page);
1754fa9add64SHugh Dickins 			del_page_from_lru_list(page, lruvec, lru);
17552bcf8879SHugh Dickins 
17562bcf8879SHugh Dickins 			if (unlikely(PageCompound(page))) {
1757*a52633d8SMel Gorman 				spin_unlock_irq(zone_lru_lock(zone));
1758747db954SJohannes Weiner 				mem_cgroup_uncharge(page);
17592bcf8879SHugh Dickins 				(*get_compound_page_dtor(page))(page);
1760*a52633d8SMel Gorman 				spin_lock_irq(zone_lru_lock(zone));
17612bcf8879SHugh Dickins 			} else
17622bcf8879SHugh Dickins 				list_add(&page->lru, pages_to_free);
17633eb4140fSWu Fengguang 		}
17643eb4140fSWu Fengguang 	}
17659d5e6a9fSHugh Dickins 
17663eb4140fSWu Fengguang 	if (!is_active_lru(lru))
17673eb4140fSWu Fengguang 		__count_vm_events(PGDEACTIVATE, pgmoved);
17683eb4140fSWu Fengguang }
17691cfb419bSKAMEZAWA Hiroyuki 
1770f626012dSHugh Dickins static void shrink_active_list(unsigned long nr_to_scan,
17711a93be0eSKonstantin Khlebnikov 			       struct lruvec *lruvec,
1772f16015fbSJohannes Weiner 			       struct scan_control *sc,
17739e3b2f8cSKonstantin Khlebnikov 			       enum lru_list lru)
17741cfb419bSKAMEZAWA Hiroyuki {
177544c241f1SKOSAKI Motohiro 	unsigned long nr_taken;
1776f626012dSHugh Dickins 	unsigned long nr_scanned;
17776fe6b7e3SWu Fengguang 	unsigned long vm_flags;
17781cfb419bSKAMEZAWA Hiroyuki 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
17798cab4754SWu Fengguang 	LIST_HEAD(l_active);
1780b69408e8SChristoph Lameter 	LIST_HEAD(l_inactive);
17811cfb419bSKAMEZAWA Hiroyuki 	struct page *page;
17821a93be0eSKonstantin Khlebnikov 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
178344c241f1SKOSAKI Motohiro 	unsigned long nr_rotated = 0;
1784f3fd4a61SKonstantin Khlebnikov 	isolate_mode_t isolate_mode = 0;
17853cb99451SKonstantin Khlebnikov 	int file = is_file_lru(lru);
17861a93be0eSKonstantin Khlebnikov 	struct zone *zone = lruvec_zone(lruvec);
17871cfb419bSKAMEZAWA Hiroyuki 
17881da177e4SLinus Torvalds 	lru_add_drain();
1789f80c0673SMinchan Kim 
1790f80c0673SMinchan Kim 	if (!sc->may_unmap)
179161317289SHillf Danton 		isolate_mode |= ISOLATE_UNMAPPED;
1792f80c0673SMinchan Kim 	if (!sc->may_writepage)
179361317289SHillf Danton 		isolate_mode |= ISOLATE_CLEAN;
1794f80c0673SMinchan Kim 
1795*a52633d8SMel Gorman 	spin_lock_irq(zone_lru_lock(zone));
1796925b7673SJohannes Weiner 
17975dc35979SKonstantin Khlebnikov 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
17985dc35979SKonstantin Khlebnikov 				     &nr_scanned, sc, isolate_mode, lru);
179989b5fae5SJohannes Weiner 
18009d5e6a9fSHugh Dickins 	update_lru_size(lruvec, lru, -nr_taken);
18019d5e6a9fSHugh Dickins 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1802b7c46d15SJohannes Weiner 	reclaim_stat->recent_scanned[file] += nr_taken;
18031cfb419bSKAMEZAWA Hiroyuki 
18049d5e6a9fSHugh Dickins 	if (global_reclaim(sc))
18059d5e6a9fSHugh Dickins 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1806f626012dSHugh Dickins 	__count_zone_vm_events(PGREFILL, zone, nr_scanned);
18079d5e6a9fSHugh Dickins 
1808*a52633d8SMel Gorman 	spin_unlock_irq(zone_lru_lock(zone));
18091da177e4SLinus Torvalds 
18101da177e4SLinus Torvalds 	while (!list_empty(&l_hold)) {
18111da177e4SLinus Torvalds 		cond_resched();
18121da177e4SLinus Torvalds 		page = lru_to_page(&l_hold);
18131da177e4SLinus Torvalds 		list_del(&page->lru);
18147e9cd484SRik van Riel 
181539b5f29aSHugh Dickins 		if (unlikely(!page_evictable(page))) {
1816894bc310SLee Schermerhorn 			putback_lru_page(page);
1817894bc310SLee Schermerhorn 			continue;
1818894bc310SLee Schermerhorn 		}
1819894bc310SLee Schermerhorn 
1820cc715d99SMel Gorman 		if (unlikely(buffer_heads_over_limit)) {
1821cc715d99SMel Gorman 			if (page_has_private(page) && trylock_page(page)) {
1822cc715d99SMel Gorman 				if (page_has_private(page))
1823cc715d99SMel Gorman 					try_to_release_page(page, 0);
1824cc715d99SMel Gorman 				unlock_page(page);
1825cc715d99SMel Gorman 			}
1826cc715d99SMel Gorman 		}
1827cc715d99SMel Gorman 
1828c3ac9a8aSJohannes Weiner 		if (page_referenced(page, 0, sc->target_mem_cgroup,
1829c3ac9a8aSJohannes Weiner 				    &vm_flags)) {
18309992af10SRik van Riel 			nr_rotated += hpage_nr_pages(page);
18318cab4754SWu Fengguang 			/*
18328cab4754SWu Fengguang 			 * Identify referenced, file-backed active pages and
18338cab4754SWu Fengguang 			 * give them one more trip around the active list. So
18348cab4754SWu Fengguang 			 * that executable code get better chances to stay in
18358cab4754SWu Fengguang 			 * memory under moderate memory pressure.  Anon pages
18368cab4754SWu Fengguang 			 * are not likely to be evicted by use-once streaming
18378cab4754SWu Fengguang 			 * IO, plus JVM can create lots of anon VM_EXEC pages,
18388cab4754SWu Fengguang 			 * so we ignore them here.
18398cab4754SWu Fengguang 			 */
184041e20983SWu Fengguang 			if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
18418cab4754SWu Fengguang 				list_add(&page->lru, &l_active);
18428cab4754SWu Fengguang 				continue;
18438cab4754SWu Fengguang 			}
18448cab4754SWu Fengguang 		}
18457e9cd484SRik van Riel 
18465205e56eSKOSAKI Motohiro 		ClearPageActive(page);	/* we are de-activating */
18471da177e4SLinus Torvalds 		list_add(&page->lru, &l_inactive);
18481da177e4SLinus Torvalds 	}
18491da177e4SLinus Torvalds 
1850b555749aSAndrew Morton 	/*
18518cab4754SWu Fengguang 	 * Move pages back to the lru list.
1852b555749aSAndrew Morton 	 */
1853*a52633d8SMel Gorman 	spin_lock_irq(zone_lru_lock(zone));
18544f98a2feSRik van Riel 	/*
18558cab4754SWu Fengguang 	 * Count referenced pages from currently used mappings as rotated,
18568cab4754SWu Fengguang 	 * even though only some of them are actually re-activated.  This
18578cab4754SWu Fengguang 	 * helps balance scan pressure between file and anonymous pages in
18587c0db9e9SJerome Marchand 	 * get_scan_count.
1859556adecbSRik van Riel 	 */
1860b7c46d15SJohannes Weiner 	reclaim_stat->recent_rotated[file] += nr_rotated;
1861556adecbSRik van Riel 
1862fa9add64SHugh Dickins 	move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1863fa9add64SHugh Dickins 	move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1864a731286dSKOSAKI Motohiro 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1865*a52633d8SMel Gorman 	spin_unlock_irq(zone_lru_lock(zone));
18662bcf8879SHugh Dickins 
1867747db954SJohannes Weiner 	mem_cgroup_uncharge_list(&l_hold);
1868b745bc85SMel Gorman 	free_hot_cold_page_list(&l_hold, true);
18691da177e4SLinus Torvalds }
18701da177e4SLinus Torvalds 
187159dc76b0SRik van Riel /*
187259dc76b0SRik van Riel  * The inactive anon list should be small enough that the VM never has
187359dc76b0SRik van Riel  * to do too much work.
187414797e23SKOSAKI Motohiro  *
187559dc76b0SRik van Riel  * The inactive file list should be small enough to leave most memory
187659dc76b0SRik van Riel  * to the established workingset on the scan-resistant active list,
187759dc76b0SRik van Riel  * but large enough to avoid thrashing the aggregate readahead window.
187859dc76b0SRik van Riel  *
187959dc76b0SRik van Riel  * Both inactive lists should also be large enough that each inactive
188059dc76b0SRik van Riel  * page has a chance to be referenced again before it is reclaimed.
188159dc76b0SRik van Riel  *
188259dc76b0SRik van Riel  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
188359dc76b0SRik van Riel  * on this LRU, maintained by the pageout code. A zone->inactive_ratio
188459dc76b0SRik van Riel  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
188559dc76b0SRik van Riel  *
188659dc76b0SRik van Riel  * total     target    max
188759dc76b0SRik van Riel  * memory    ratio     inactive
188859dc76b0SRik van Riel  * -------------------------------------
188959dc76b0SRik van Riel  *   10MB       1         5MB
189059dc76b0SRik van Riel  *  100MB       1        50MB
189159dc76b0SRik van Riel  *    1GB       3       250MB
189259dc76b0SRik van Riel  *   10GB      10       0.9GB
189359dc76b0SRik van Riel  *  100GB      31         3GB
189459dc76b0SRik van Riel  *    1TB     101        10GB
189559dc76b0SRik van Riel  *   10TB     320        32GB
189614797e23SKOSAKI Motohiro  */
189759dc76b0SRik van Riel static bool inactive_list_is_low(struct lruvec *lruvec, bool file)
189814797e23SKOSAKI Motohiro {
189959dc76b0SRik van Riel 	unsigned long inactive_ratio;
190059dc76b0SRik van Riel 	unsigned long inactive;
190159dc76b0SRik van Riel 	unsigned long active;
190259dc76b0SRik van Riel 	unsigned long gb;
190359dc76b0SRik van Riel 
190474e3f3c3SMinchan Kim 	/*
190574e3f3c3SMinchan Kim 	 * If we don't have swap space, anonymous page deactivation
190674e3f3c3SMinchan Kim 	 * is pointless.
190774e3f3c3SMinchan Kim 	 */
190859dc76b0SRik van Riel 	if (!file && !total_swap_pages)
190942e2e457SYaowei Bai 		return false;
191074e3f3c3SMinchan Kim 
191159dc76b0SRik van Riel 	inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
191259dc76b0SRik van Riel 	active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
1913f16015fbSJohannes Weiner 
191459dc76b0SRik van Riel 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
191559dc76b0SRik van Riel 	if (gb)
191659dc76b0SRik van Riel 		inactive_ratio = int_sqrt(10 * gb);
1917b39415b2SRik van Riel 	else
191859dc76b0SRik van Riel 		inactive_ratio = 1;
191959dc76b0SRik van Riel 
192059dc76b0SRik van Riel 	return inactive * inactive_ratio < active;
1921b39415b2SRik van Riel }
1922b39415b2SRik van Riel 
19234f98a2feSRik van Riel static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
19241a93be0eSKonstantin Khlebnikov 				 struct lruvec *lruvec, struct scan_control *sc)
1925b69408e8SChristoph Lameter {
1926b39415b2SRik van Riel 	if (is_active_lru(lru)) {
192759dc76b0SRik van Riel 		if (inactive_list_is_low(lruvec, is_file_lru(lru)))
19281a93be0eSKonstantin Khlebnikov 			shrink_active_list(nr_to_scan, lruvec, sc, lru);
1929556adecbSRik van Riel 		return 0;
1930556adecbSRik van Riel 	}
1931556adecbSRik van Riel 
19321a93be0eSKonstantin Khlebnikov 	return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1933b69408e8SChristoph Lameter }
1934b69408e8SChristoph Lameter 
19359a265114SJohannes Weiner enum scan_balance {
19369a265114SJohannes Weiner 	SCAN_EQUAL,
19379a265114SJohannes Weiner 	SCAN_FRACT,
19389a265114SJohannes Weiner 	SCAN_ANON,
19399a265114SJohannes Weiner 	SCAN_FILE,
19409a265114SJohannes Weiner };
19419a265114SJohannes Weiner 
19421da177e4SLinus Torvalds /*
19434f98a2feSRik van Riel  * Determine how aggressively the anon and file LRU lists should be
19444f98a2feSRik van Riel  * scanned.  The relative value of each set of LRU lists is determined
19454f98a2feSRik van Riel  * by looking at the fraction of the pages scanned we did rotate back
19464f98a2feSRik van Riel  * onto the active list instead of evict.
19474f98a2feSRik van Riel  *
1948be7bd59dSWanpeng Li  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1949be7bd59dSWanpeng Li  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
19504f98a2feSRik van Riel  */
195133377678SVladimir Davydov static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
19526b4f7799SJohannes Weiner 			   struct scan_control *sc, unsigned long *nr,
19536b4f7799SJohannes Weiner 			   unsigned long *lru_pages)
19544f98a2feSRik van Riel {
195533377678SVladimir Davydov 	int swappiness = mem_cgroup_swappiness(memcg);
195690126375SKonstantin Khlebnikov 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
19579a265114SJohannes Weiner 	u64 fraction[2];
19589a265114SJohannes Weiner 	u64 denominator = 0;	/* gcc */
195990126375SKonstantin Khlebnikov 	struct zone *zone = lruvec_zone(lruvec);
19609a265114SJohannes Weiner 	unsigned long anon_prio, file_prio;
19619a265114SJohannes Weiner 	enum scan_balance scan_balance;
19620bf1457fSJohannes Weiner 	unsigned long anon, file;
19639a265114SJohannes Weiner 	bool force_scan = false;
19649a265114SJohannes Weiner 	unsigned long ap, fp;
19659a265114SJohannes Weiner 	enum lru_list lru;
19666f04f48dSSuleiman Souhlal 	bool some_scanned;
19676f04f48dSSuleiman Souhlal 	int pass;
1968246e87a9SKAMEZAWA Hiroyuki 
1969f11c0ca5SJohannes Weiner 	/*
1970f11c0ca5SJohannes Weiner 	 * If the zone or memcg is small, nr[l] can be 0.  This
1971f11c0ca5SJohannes Weiner 	 * results in no scanning on this priority and a potential
1972f11c0ca5SJohannes Weiner 	 * priority drop.  Global direct reclaim can go to the next
1973f11c0ca5SJohannes Weiner 	 * zone and tends to have no problems. Global kswapd is for
1974f11c0ca5SJohannes Weiner 	 * zone balancing and it needs to scan a minimum amount. When
1975f11c0ca5SJohannes Weiner 	 * reclaiming for a memcg, a priority drop can cause high
1976f11c0ca5SJohannes Weiner 	 * latencies, so it's better to scan a minimum amount there as
1977f11c0ca5SJohannes Weiner 	 * well.
1978f11c0ca5SJohannes Weiner 	 */
197990cbc250SVladimir Davydov 	if (current_is_kswapd()) {
198090cbc250SVladimir Davydov 		if (!zone_reclaimable(zone))
1981a4d3e9e7SJohannes Weiner 			force_scan = true;
1982eb01aaabSVladimir Davydov 		if (!mem_cgroup_online(memcg))
198390cbc250SVladimir Davydov 			force_scan = true;
198490cbc250SVladimir Davydov 	}
198589b5fae5SJohannes Weiner 	if (!global_reclaim(sc))
1986a4d3e9e7SJohannes Weiner 		force_scan = true;
198776a33fc3SShaohua Li 
198876a33fc3SShaohua Li 	/* If we have no swap space, do not bother scanning anon pages. */
1989d8b38438SVladimir Davydov 	if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
19909a265114SJohannes Weiner 		scan_balance = SCAN_FILE;
199176a33fc3SShaohua Li 		goto out;
199276a33fc3SShaohua Li 	}
19934f98a2feSRik van Riel 
199410316b31SJohannes Weiner 	/*
199510316b31SJohannes Weiner 	 * Global reclaim will swap to prevent OOM even with no
199610316b31SJohannes Weiner 	 * swappiness, but memcg users want to use this knob to
199710316b31SJohannes Weiner 	 * disable swapping for individual groups completely when
199810316b31SJohannes Weiner 	 * using the memory controller's swap limit feature would be
199910316b31SJohannes Weiner 	 * too expensive.
200010316b31SJohannes Weiner 	 */
200102695175SJohannes Weiner 	if (!global_reclaim(sc) && !swappiness) {
20029a265114SJohannes Weiner 		scan_balance = SCAN_FILE;
200310316b31SJohannes Weiner 		goto out;
200410316b31SJohannes Weiner 	}
200510316b31SJohannes Weiner 
200610316b31SJohannes Weiner 	/*
200710316b31SJohannes Weiner 	 * Do not apply any pressure balancing cleverness when the
200810316b31SJohannes Weiner 	 * system is close to OOM, scan both anon and file equally
200910316b31SJohannes Weiner 	 * (unless the swappiness setting disagrees with swapping).
201010316b31SJohannes Weiner 	 */
201102695175SJohannes Weiner 	if (!sc->priority && swappiness) {
20129a265114SJohannes Weiner 		scan_balance = SCAN_EQUAL;
201310316b31SJohannes Weiner 		goto out;
201410316b31SJohannes Weiner 	}
201510316b31SJohannes Weiner 
201611d16c25SJohannes Weiner 	/*
201762376251SJohannes Weiner 	 * Prevent the reclaimer from falling into the cache trap: as
201862376251SJohannes Weiner 	 * cache pages start out inactive, every cache fault will tip
201962376251SJohannes Weiner 	 * the scan balance towards the file LRU.  And as the file LRU
202062376251SJohannes Weiner 	 * shrinks, so does the window for rotation from references.
202162376251SJohannes Weiner 	 * This means we have a runaway feedback loop where a tiny
202262376251SJohannes Weiner 	 * thrashing file LRU becomes infinitely more attractive than
202362376251SJohannes Weiner 	 * anon pages.  Try to detect this based on file LRU size.
202462376251SJohannes Weiner 	 */
202562376251SJohannes Weiner 	if (global_reclaim(sc)) {
20262ab051e1SJerome Marchand 		unsigned long zonefile;
20272ab051e1SJerome Marchand 		unsigned long zonefree;
202862376251SJohannes Weiner 
20292ab051e1SJerome Marchand 		zonefree = zone_page_state(zone, NR_FREE_PAGES);
20302ab051e1SJerome Marchand 		zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
20312ab051e1SJerome Marchand 			   zone_page_state(zone, NR_INACTIVE_FILE);
20322ab051e1SJerome Marchand 
20332ab051e1SJerome Marchand 		if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
203462376251SJohannes Weiner 			scan_balance = SCAN_ANON;
203562376251SJohannes Weiner 			goto out;
203662376251SJohannes Weiner 		}
203762376251SJohannes Weiner 	}
203862376251SJohannes Weiner 
203962376251SJohannes Weiner 	/*
2040316bda0eSVladimir Davydov 	 * If there is enough inactive page cache, i.e. if the size of the
2041316bda0eSVladimir Davydov 	 * inactive list is greater than that of the active list *and* the
2042316bda0eSVladimir Davydov 	 * inactive list actually has some pages to scan on this priority, we
2043316bda0eSVladimir Davydov 	 * do not reclaim anything from the anonymous working set right now.
2044316bda0eSVladimir Davydov 	 * Without the second condition we could end up never scanning an
2045316bda0eSVladimir Davydov 	 * lruvec even if it has plenty of old anonymous pages unless the
2046316bda0eSVladimir Davydov 	 * system is under heavy pressure.
2047e9868505SRik van Riel 	 */
204859dc76b0SRik van Riel 	if (!inactive_list_is_low(lruvec, true) &&
204923047a96SJohannes Weiner 	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
20509a265114SJohannes Weiner 		scan_balance = SCAN_FILE;
2051e9868505SRik van Riel 		goto out;
20524f98a2feSRik van Riel 	}
20534f98a2feSRik van Riel 
20549a265114SJohannes Weiner 	scan_balance = SCAN_FRACT;
20559a265114SJohannes Weiner 
20564f98a2feSRik van Riel 	/*
205758c37f6eSKOSAKI Motohiro 	 * With swappiness at 100, anonymous and file have the same priority.
205858c37f6eSKOSAKI Motohiro 	 * This scanning priority is essentially the inverse of IO cost.
205958c37f6eSKOSAKI Motohiro 	 */
206002695175SJohannes Weiner 	anon_prio = swappiness;
206175b00af7SHugh Dickins 	file_prio = 200 - anon_prio;
206258c37f6eSKOSAKI Motohiro 
206358c37f6eSKOSAKI Motohiro 	/*
20644f98a2feSRik van Riel 	 * OK, so we have swap space and a fair amount of page cache
20654f98a2feSRik van Riel 	 * pages.  We use the recently rotated / recently scanned
20664f98a2feSRik van Riel 	 * ratios to determine how valuable each cache is.
20674f98a2feSRik van Riel 	 *
20684f98a2feSRik van Riel 	 * Because workloads change over time (and to avoid overflow)
20694f98a2feSRik van Riel 	 * we keep these statistics as a floating average, which ends
20704f98a2feSRik van Riel 	 * up weighing recent references more than old ones.
20714f98a2feSRik van Riel 	 *
20724f98a2feSRik van Riel 	 * anon in [0], file in [1]
20734f98a2feSRik van Riel 	 */
20742ab051e1SJerome Marchand 
207523047a96SJohannes Weiner 	anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
207623047a96SJohannes Weiner 		lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
207723047a96SJohannes Weiner 	file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
207823047a96SJohannes Weiner 		lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
20792ab051e1SJerome Marchand 
2080*a52633d8SMel Gorman 	spin_lock_irq(zone_lru_lock(zone));
208158c37f6eSKOSAKI Motohiro 	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
20826e901571SKOSAKI Motohiro 		reclaim_stat->recent_scanned[0] /= 2;
20836e901571SKOSAKI Motohiro 		reclaim_stat->recent_rotated[0] /= 2;
20844f98a2feSRik van Riel 	}
20854f98a2feSRik van Riel 
20866e901571SKOSAKI Motohiro 	if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
20876e901571SKOSAKI Motohiro 		reclaim_stat->recent_scanned[1] /= 2;
20886e901571SKOSAKI Motohiro 		reclaim_stat->recent_rotated[1] /= 2;
20894f98a2feSRik van Riel 	}
20904f98a2feSRik van Riel 
20914f98a2feSRik van Riel 	/*
209200d8089cSRik van Riel 	 * The amount of pressure on anon vs file pages is inversely
209300d8089cSRik van Riel 	 * proportional to the fraction of recently scanned pages on
209400d8089cSRik van Riel 	 * each list that were recently referenced and in active use.
20954f98a2feSRik van Riel 	 */
2096fe35004fSSatoru Moriya 	ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
20976e901571SKOSAKI Motohiro 	ap /= reclaim_stat->recent_rotated[0] + 1;
20984f98a2feSRik van Riel 
2099fe35004fSSatoru Moriya 	fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
21006e901571SKOSAKI Motohiro 	fp /= reclaim_stat->recent_rotated[1] + 1;
2101*a52633d8SMel Gorman 	spin_unlock_irq(zone_lru_lock(zone));
21024f98a2feSRik van Riel 
210376a33fc3SShaohua Li 	fraction[0] = ap;
210476a33fc3SShaohua Li 	fraction[1] = fp;
210576a33fc3SShaohua Li 	denominator = ap + fp + 1;
210676a33fc3SShaohua Li out:
21076f04f48dSSuleiman Souhlal 	some_scanned = false;
21086f04f48dSSuleiman Souhlal 	/* Only use force_scan on second pass. */
21096f04f48dSSuleiman Souhlal 	for (pass = 0; !some_scanned && pass < 2; pass++) {
21106b4f7799SJohannes Weiner 		*lru_pages = 0;
21114111304dSHugh Dickins 		for_each_evictable_lru(lru) {
21124111304dSHugh Dickins 			int file = is_file_lru(lru);
2113d778df51SJohannes Weiner 			unsigned long size;
211476a33fc3SShaohua Li 			unsigned long scan;
211576a33fc3SShaohua Li 
211623047a96SJohannes Weiner 			size = lruvec_lru_size(lruvec, lru);
2117d778df51SJohannes Weiner 			scan = size >> sc->priority;
21189a265114SJohannes Weiner 
21196f04f48dSSuleiman Souhlal 			if (!scan && pass && force_scan)
2120d778df51SJohannes Weiner 				scan = min(size, SWAP_CLUSTER_MAX);
21219a265114SJohannes Weiner 
21229a265114SJohannes Weiner 			switch (scan_balance) {
21239a265114SJohannes Weiner 			case SCAN_EQUAL:
21249a265114SJohannes Weiner 				/* Scan lists relative to size */
21259a265114SJohannes Weiner 				break;
21269a265114SJohannes Weiner 			case SCAN_FRACT:
21279a265114SJohannes Weiner 				/*
21289a265114SJohannes Weiner 				 * Scan types proportional to swappiness and
21299a265114SJohannes Weiner 				 * their relative recent reclaim efficiency.
21309a265114SJohannes Weiner 				 */
21316f04f48dSSuleiman Souhlal 				scan = div64_u64(scan * fraction[file],
21326f04f48dSSuleiman Souhlal 							denominator);
21339a265114SJohannes Weiner 				break;
21349a265114SJohannes Weiner 			case SCAN_FILE:
21359a265114SJohannes Weiner 			case SCAN_ANON:
21369a265114SJohannes Weiner 				/* Scan one type exclusively */
21376b4f7799SJohannes Weiner 				if ((scan_balance == SCAN_FILE) != file) {
21386b4f7799SJohannes Weiner 					size = 0;
21399a265114SJohannes Weiner 					scan = 0;
21406b4f7799SJohannes Weiner 				}
21419a265114SJohannes Weiner 				break;
21429a265114SJohannes Weiner 			default:
21439a265114SJohannes Weiner 				/* Look ma, no brain */
21449a265114SJohannes Weiner 				BUG();
21459a265114SJohannes Weiner 			}
21466b4f7799SJohannes Weiner 
21476b4f7799SJohannes Weiner 			*lru_pages += size;
21484111304dSHugh Dickins 			nr[lru] = scan;
21496b4f7799SJohannes Weiner 
21506f04f48dSSuleiman Souhlal 			/*
21516f04f48dSSuleiman Souhlal 			 * Skip the second pass and don't force_scan,
21526f04f48dSSuleiman Souhlal 			 * if we found something to scan.
21536f04f48dSSuleiman Souhlal 			 */
21546f04f48dSSuleiman Souhlal 			some_scanned |= !!scan;
21556f04f48dSSuleiman Souhlal 		}
215676a33fc3SShaohua Li 	}
21576e08a369SWu Fengguang }
21584f98a2feSRik van Riel 
215972b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
216072b252aeSMel Gorman static void init_tlb_ubc(void)
216172b252aeSMel Gorman {
216272b252aeSMel Gorman 	/*
216372b252aeSMel Gorman 	 * This deliberately does not clear the cpumask as it's expensive
216472b252aeSMel Gorman 	 * and unnecessary. If there happens to be data in there then the
216572b252aeSMel Gorman 	 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
216672b252aeSMel Gorman 	 * then will be cleared.
216772b252aeSMel Gorman 	 */
216872b252aeSMel Gorman 	current->tlb_ubc.flush_required = false;
216972b252aeSMel Gorman }
217072b252aeSMel Gorman #else
217172b252aeSMel Gorman static inline void init_tlb_ubc(void)
217272b252aeSMel Gorman {
217372b252aeSMel Gorman }
217472b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
217572b252aeSMel Gorman 
21769b4f98cdSJohannes Weiner /*
21779b4f98cdSJohannes Weiner  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
21789b4f98cdSJohannes Weiner  */
217933377678SVladimir Davydov static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
21806b4f7799SJohannes Weiner 			      struct scan_control *sc, unsigned long *lru_pages)
21819b4f98cdSJohannes Weiner {
218233377678SVladimir Davydov 	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
21839b4f98cdSJohannes Weiner 	unsigned long nr[NR_LRU_LISTS];
2184e82e0561SMel Gorman 	unsigned long targets[NR_LRU_LISTS];
21859b4f98cdSJohannes Weiner 	unsigned long nr_to_scan;
21869b4f98cdSJohannes Weiner 	enum lru_list lru;
21879b4f98cdSJohannes Weiner 	unsigned long nr_reclaimed = 0;
21889b4f98cdSJohannes Weiner 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
21899b4f98cdSJohannes Weiner 	struct blk_plug plug;
21901a501907SMel Gorman 	bool scan_adjusted;
21919b4f98cdSJohannes Weiner 
219233377678SVladimir Davydov 	get_scan_count(lruvec, memcg, sc, nr, lru_pages);
21939b4f98cdSJohannes Weiner 
2194e82e0561SMel Gorman 	/* Record the original scan target for proportional adjustments later */
2195e82e0561SMel Gorman 	memcpy(targets, nr, sizeof(nr));
2196e82e0561SMel Gorman 
21971a501907SMel Gorman 	/*
21981a501907SMel Gorman 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
21991a501907SMel Gorman 	 * event that can occur when there is little memory pressure e.g.
22001a501907SMel Gorman 	 * multiple streaming readers/writers. Hence, we do not abort scanning
22011a501907SMel Gorman 	 * when the requested number of pages are reclaimed when scanning at
22021a501907SMel Gorman 	 * DEF_PRIORITY on the assumption that the fact we are direct
22031a501907SMel Gorman 	 * reclaiming implies that kswapd is not keeping up and it is best to
22041a501907SMel Gorman 	 * do a batch of work at once. For memcg reclaim one check is made to
22051a501907SMel Gorman 	 * abort proportional reclaim if either the file or anon lru has already
22061a501907SMel Gorman 	 * dropped to zero at the first pass.
22071a501907SMel Gorman 	 */
22081a501907SMel Gorman 	scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
22091a501907SMel Gorman 			 sc->priority == DEF_PRIORITY);
22101a501907SMel Gorman 
221172b252aeSMel Gorman 	init_tlb_ubc();
221272b252aeSMel Gorman 
22139b4f98cdSJohannes Weiner 	blk_start_plug(&plug);
22149b4f98cdSJohannes Weiner 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
22159b4f98cdSJohannes Weiner 					nr[LRU_INACTIVE_FILE]) {
2216e82e0561SMel Gorman 		unsigned long nr_anon, nr_file, percentage;
2217e82e0561SMel Gorman 		unsigned long nr_scanned;
2218e82e0561SMel Gorman 
22199b4f98cdSJohannes Weiner 		for_each_evictable_lru(lru) {
22209b4f98cdSJohannes Weiner 			if (nr[lru]) {
22219b4f98cdSJohannes Weiner 				nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
22229b4f98cdSJohannes Weiner 				nr[lru] -= nr_to_scan;
22239b4f98cdSJohannes Weiner 
22249b4f98cdSJohannes Weiner 				nr_reclaimed += shrink_list(lru, nr_to_scan,
22259b4f98cdSJohannes Weiner 							    lruvec, sc);
22269b4f98cdSJohannes Weiner 			}
22279b4f98cdSJohannes Weiner 		}
2228e82e0561SMel Gorman 
2229e82e0561SMel Gorman 		if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2230e82e0561SMel Gorman 			continue;
2231e82e0561SMel Gorman 
22329b4f98cdSJohannes Weiner 		/*
2233e82e0561SMel Gorman 		 * For kswapd and memcg, reclaim at least the number of pages
22341a501907SMel Gorman 		 * requested. Ensure that the anon and file LRUs are scanned
2235e82e0561SMel Gorman 		 * proportionally what was requested by get_scan_count(). We
2236e82e0561SMel Gorman 		 * stop reclaiming one LRU and reduce the amount scanning
2237e82e0561SMel Gorman 		 * proportional to the original scan target.
2238e82e0561SMel Gorman 		 */
2239e82e0561SMel Gorman 		nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2240e82e0561SMel Gorman 		nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2241e82e0561SMel Gorman 
22421a501907SMel Gorman 		/*
22431a501907SMel Gorman 		 * It's just vindictive to attack the larger once the smaller
22441a501907SMel Gorman 		 * has gone to zero.  And given the way we stop scanning the
22451a501907SMel Gorman 		 * smaller below, this makes sure that we only make one nudge
22461a501907SMel Gorman 		 * towards proportionality once we've got nr_to_reclaim.
22471a501907SMel Gorman 		 */
22481a501907SMel Gorman 		if (!nr_file || !nr_anon)
22491a501907SMel Gorman 			break;
22501a501907SMel Gorman 
2251e82e0561SMel Gorman 		if (nr_file > nr_anon) {
2252e82e0561SMel Gorman 			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2253e82e0561SMel Gorman 						targets[LRU_ACTIVE_ANON] + 1;
2254e82e0561SMel Gorman 			lru = LRU_BASE;
2255e82e0561SMel Gorman 			percentage = nr_anon * 100 / scan_target;
2256e82e0561SMel Gorman 		} else {
2257e82e0561SMel Gorman 			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2258e82e0561SMel Gorman 						targets[LRU_ACTIVE_FILE] + 1;
2259e82e0561SMel Gorman 			lru = LRU_FILE;
2260e82e0561SMel Gorman 			percentage = nr_file * 100 / scan_target;
2261e82e0561SMel Gorman 		}
2262e82e0561SMel Gorman 
2263e82e0561SMel Gorman 		/* Stop scanning the smaller of the LRU */
2264e82e0561SMel Gorman 		nr[lru] = 0;
2265e82e0561SMel Gorman 		nr[lru + LRU_ACTIVE] = 0;
2266e82e0561SMel Gorman 
2267e82e0561SMel Gorman 		/*
2268e82e0561SMel Gorman 		 * Recalculate the other LRU scan count based on its original
2269e82e0561SMel Gorman 		 * scan target and the percentage scanning already complete
2270e82e0561SMel Gorman 		 */
2271e82e0561SMel Gorman 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2272e82e0561SMel Gorman 		nr_scanned = targets[lru] - nr[lru];
2273e82e0561SMel Gorman 		nr[lru] = targets[lru] * (100 - percentage) / 100;
2274e82e0561SMel Gorman 		nr[lru] -= min(nr[lru], nr_scanned);
2275e82e0561SMel Gorman 
2276e82e0561SMel Gorman 		lru += LRU_ACTIVE;
2277e82e0561SMel Gorman 		nr_scanned = targets[lru] - nr[lru];
2278e82e0561SMel Gorman 		nr[lru] = targets[lru] * (100 - percentage) / 100;
2279e82e0561SMel Gorman 		nr[lru] -= min(nr[lru], nr_scanned);
2280e82e0561SMel Gorman 
2281e82e0561SMel Gorman 		scan_adjusted = true;
22829b4f98cdSJohannes Weiner 	}
22839b4f98cdSJohannes Weiner 	blk_finish_plug(&plug);
22849b4f98cdSJohannes Weiner 	sc->nr_reclaimed += nr_reclaimed;
22859b4f98cdSJohannes Weiner 
22869b4f98cdSJohannes Weiner 	/*
22879b4f98cdSJohannes Weiner 	 * Even if we did not try to evict anon pages at all, we want to
22889b4f98cdSJohannes Weiner 	 * rebalance the anon lru active/inactive ratio.
22899b4f98cdSJohannes Weiner 	 */
229059dc76b0SRik van Riel 	if (inactive_list_is_low(lruvec, false))
22919b4f98cdSJohannes Weiner 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
22929b4f98cdSJohannes Weiner 				   sc, LRU_ACTIVE_ANON);
22939b4f98cdSJohannes Weiner 
22949b4f98cdSJohannes Weiner 	throttle_vm_writeout(sc->gfp_mask);
22959b4f98cdSJohannes Weiner }
22969b4f98cdSJohannes Weiner 
229723b9da55SMel Gorman /* Use reclaim/compaction for costly allocs or under memory pressure */
22989e3b2f8cSKonstantin Khlebnikov static bool in_reclaim_compaction(struct scan_control *sc)
229923b9da55SMel Gorman {
2300d84da3f9SKirill A. Shutemov 	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
230123b9da55SMel Gorman 			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
23029e3b2f8cSKonstantin Khlebnikov 			 sc->priority < DEF_PRIORITY - 2))
230323b9da55SMel Gorman 		return true;
230423b9da55SMel Gorman 
230523b9da55SMel Gorman 	return false;
230623b9da55SMel Gorman }
230723b9da55SMel Gorman 
23084f98a2feSRik van Riel /*
230923b9da55SMel Gorman  * Reclaim/compaction is used for high-order allocation requests. It reclaims
231023b9da55SMel Gorman  * order-0 pages before compacting the zone. should_continue_reclaim() returns
231123b9da55SMel Gorman  * true if more pages should be reclaimed such that when the page allocator
231223b9da55SMel Gorman  * calls try_to_compact_zone() that it will have enough free pages to succeed.
231323b9da55SMel Gorman  * It will give up earlier than that if there is difficulty reclaiming pages.
23143e7d3449SMel Gorman  */
23159b4f98cdSJohannes Weiner static inline bool should_continue_reclaim(struct zone *zone,
23163e7d3449SMel Gorman 					unsigned long nr_reclaimed,
23173e7d3449SMel Gorman 					unsigned long nr_scanned,
23183e7d3449SMel Gorman 					struct scan_control *sc)
23193e7d3449SMel Gorman {
23203e7d3449SMel Gorman 	unsigned long pages_for_compaction;
23213e7d3449SMel Gorman 	unsigned long inactive_lru_pages;
23223e7d3449SMel Gorman 
23233e7d3449SMel Gorman 	/* If not in reclaim/compaction mode, stop */
23249e3b2f8cSKonstantin Khlebnikov 	if (!in_reclaim_compaction(sc))
23253e7d3449SMel Gorman 		return false;
23263e7d3449SMel Gorman 
23272876592fSMel Gorman 	/* Consider stopping depending on scan and reclaim activity */
23282876592fSMel Gorman 	if (sc->gfp_mask & __GFP_REPEAT) {
23293e7d3449SMel Gorman 		/*
23302876592fSMel Gorman 		 * For __GFP_REPEAT allocations, stop reclaiming if the
23312876592fSMel Gorman 		 * full LRU list has been scanned and we are still failing
23322876592fSMel Gorman 		 * to reclaim pages. This full LRU scan is potentially
23332876592fSMel Gorman 		 * expensive but a __GFP_REPEAT caller really wants to succeed
23343e7d3449SMel Gorman 		 */
23353e7d3449SMel Gorman 		if (!nr_reclaimed && !nr_scanned)
23363e7d3449SMel Gorman 			return false;
23372876592fSMel Gorman 	} else {
23382876592fSMel Gorman 		/*
23392876592fSMel Gorman 		 * For non-__GFP_REPEAT allocations which can presumably
23402876592fSMel Gorman 		 * fail without consequence, stop if we failed to reclaim
23412876592fSMel Gorman 		 * any pages from the last SWAP_CLUSTER_MAX number of
23422876592fSMel Gorman 		 * pages that were scanned. This will return to the
23432876592fSMel Gorman 		 * caller faster at the risk reclaim/compaction and
23442876592fSMel Gorman 		 * the resulting allocation attempt fails
23452876592fSMel Gorman 		 */
23462876592fSMel Gorman 		if (!nr_reclaimed)
23472876592fSMel Gorman 			return false;
23482876592fSMel Gorman 	}
23493e7d3449SMel Gorman 
23503e7d3449SMel Gorman 	/*
23513e7d3449SMel Gorman 	 * If we have not reclaimed enough pages for compaction and the
23523e7d3449SMel Gorman 	 * inactive lists are large enough, continue reclaiming
23533e7d3449SMel Gorman 	 */
23543e7d3449SMel Gorman 	pages_for_compaction = (2UL << sc->order);
23559b4f98cdSJohannes Weiner 	inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
2356ec8acf20SShaohua Li 	if (get_nr_swap_pages() > 0)
23579b4f98cdSJohannes Weiner 		inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
23583e7d3449SMel Gorman 	if (sc->nr_reclaimed < pages_for_compaction &&
23593e7d3449SMel Gorman 			inactive_lru_pages > pages_for_compaction)
23603e7d3449SMel Gorman 		return true;
23613e7d3449SMel Gorman 
23623e7d3449SMel Gorman 	/* If compaction would go ahead or the allocation would succeed, stop */
2363ebff3980SVlastimil Babka 	switch (compaction_suitable(zone, sc->order, 0, 0)) {
23643e7d3449SMel Gorman 	case COMPACT_PARTIAL:
23653e7d3449SMel Gorman 	case COMPACT_CONTINUE:
23663e7d3449SMel Gorman 		return false;
23673e7d3449SMel Gorman 	default:
23683e7d3449SMel Gorman 		return true;
23693e7d3449SMel Gorman 	}
23703e7d3449SMel Gorman }
23713e7d3449SMel Gorman 
23726b4f7799SJohannes Weiner static bool shrink_zone(struct zone *zone, struct scan_control *sc,
23736b4f7799SJohannes Weiner 			bool is_classzone)
2374f16015fbSJohannes Weiner {
2375cb731d6cSVladimir Davydov 	struct reclaim_state *reclaim_state = current->reclaim_state;
23769b4f98cdSJohannes Weiner 	unsigned long nr_reclaimed, nr_scanned;
23772344d7e4SJohannes Weiner 	bool reclaimable = false;
23789b4f98cdSJohannes Weiner 
23799b4f98cdSJohannes Weiner 	do {
23805660048cSJohannes Weiner 		struct mem_cgroup *root = sc->target_mem_cgroup;
23815660048cSJohannes Weiner 		struct mem_cgroup_reclaim_cookie reclaim = {
23825660048cSJohannes Weiner 			.zone = zone,
23839e3b2f8cSKonstantin Khlebnikov 			.priority = sc->priority,
23845660048cSJohannes Weiner 		};
23856b4f7799SJohannes Weiner 		unsigned long zone_lru_pages = 0;
2386694fbc0fSAndrew Morton 		struct mem_cgroup *memcg;
23875660048cSJohannes Weiner 
23889b4f98cdSJohannes Weiner 		nr_reclaimed = sc->nr_reclaimed;
23899b4f98cdSJohannes Weiner 		nr_scanned = sc->nr_scanned;
23909b4f98cdSJohannes Weiner 
2391694fbc0fSAndrew Morton 		memcg = mem_cgroup_iter(root, NULL, &reclaim);
2392694fbc0fSAndrew Morton 		do {
23936b4f7799SJohannes Weiner 			unsigned long lru_pages;
23948e8ae645SJohannes Weiner 			unsigned long reclaimed;
2395cb731d6cSVladimir Davydov 			unsigned long scanned;
23969b4f98cdSJohannes Weiner 
2397241994edSJohannes Weiner 			if (mem_cgroup_low(root, memcg)) {
2398241994edSJohannes Weiner 				if (!sc->may_thrash)
2399241994edSJohannes Weiner 					continue;
2400241994edSJohannes Weiner 				mem_cgroup_events(memcg, MEMCG_LOW, 1);
2401241994edSJohannes Weiner 			}
2402241994edSJohannes Weiner 
24038e8ae645SJohannes Weiner 			reclaimed = sc->nr_reclaimed;
2404cb731d6cSVladimir Davydov 			scanned = sc->nr_scanned;
24055660048cSJohannes Weiner 
240633377678SVladimir Davydov 			shrink_zone_memcg(zone, memcg, sc, &lru_pages);
24076b4f7799SJohannes Weiner 			zone_lru_pages += lru_pages;
2408f9be23d6SKonstantin Khlebnikov 
2409cb731d6cSVladimir Davydov 			if (memcg && is_classzone)
2410cb731d6cSVladimir Davydov 				shrink_slab(sc->gfp_mask, zone_to_nid(zone),
2411cb731d6cSVladimir Davydov 					    memcg, sc->nr_scanned - scanned,
2412cb731d6cSVladimir Davydov 					    lru_pages);
2413cb731d6cSVladimir Davydov 
24148e8ae645SJohannes Weiner 			/* Record the group's reclaim efficiency */
24158e8ae645SJohannes Weiner 			vmpressure(sc->gfp_mask, memcg, false,
24168e8ae645SJohannes Weiner 				   sc->nr_scanned - scanned,
24178e8ae645SJohannes Weiner 				   sc->nr_reclaimed - reclaimed);
24188e8ae645SJohannes Weiner 
24195660048cSJohannes Weiner 			/*
2420a394cb8eSMichal Hocko 			 * Direct reclaim and kswapd have to scan all memory
2421a394cb8eSMichal Hocko 			 * cgroups to fulfill the overall scan target for the
24229b4f98cdSJohannes Weiner 			 * zone.
2423a394cb8eSMichal Hocko 			 *
2424a394cb8eSMichal Hocko 			 * Limit reclaim, on the other hand, only cares about
2425a394cb8eSMichal Hocko 			 * nr_to_reclaim pages to be reclaimed and it will
2426a394cb8eSMichal Hocko 			 * retry with decreasing priority if one round over the
2427a394cb8eSMichal Hocko 			 * whole hierarchy is not sufficient.
24285660048cSJohannes Weiner 			 */
2429a394cb8eSMichal Hocko 			if (!global_reclaim(sc) &&
2430a394cb8eSMichal Hocko 					sc->nr_reclaimed >= sc->nr_to_reclaim) {
24315660048cSJohannes Weiner 				mem_cgroup_iter_break(root, memcg);
24325660048cSJohannes Weiner 				break;
24335660048cSJohannes Weiner 			}
2434241994edSJohannes Weiner 		} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
243570ddf637SAnton Vorontsov 
24366b4f7799SJohannes Weiner 		/*
24376b4f7799SJohannes Weiner 		 * Shrink the slab caches in the same proportion that
24386b4f7799SJohannes Weiner 		 * the eligible LRU pages were scanned.
24396b4f7799SJohannes Weiner 		 */
2440cb731d6cSVladimir Davydov 		if (global_reclaim(sc) && is_classzone)
2441cb731d6cSVladimir Davydov 			shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
24426b4f7799SJohannes Weiner 				    sc->nr_scanned - nr_scanned,
24436b4f7799SJohannes Weiner 				    zone_lru_pages);
24446b4f7799SJohannes Weiner 
24456b4f7799SJohannes Weiner 		if (reclaim_state) {
2446cb731d6cSVladimir Davydov 			sc->nr_reclaimed += reclaim_state->reclaimed_slab;
24476b4f7799SJohannes Weiner 			reclaim_state->reclaimed_slab = 0;
24486b4f7799SJohannes Weiner 		}
24496b4f7799SJohannes Weiner 
24508e8ae645SJohannes Weiner 		/* Record the subtree's reclaim efficiency */
24518e8ae645SJohannes Weiner 		vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
245270ddf637SAnton Vorontsov 			   sc->nr_scanned - nr_scanned,
245370ddf637SAnton Vorontsov 			   sc->nr_reclaimed - nr_reclaimed);
245470ddf637SAnton Vorontsov 
24552344d7e4SJohannes Weiner 		if (sc->nr_reclaimed - nr_reclaimed)
24562344d7e4SJohannes Weiner 			reclaimable = true;
24572344d7e4SJohannes Weiner 
24589b4f98cdSJohannes Weiner 	} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
24599b4f98cdSJohannes Weiner 					 sc->nr_scanned - nr_scanned, sc));
24602344d7e4SJohannes Weiner 
24612344d7e4SJohannes Weiner 	return reclaimable;
2462f16015fbSJohannes Weiner }
2463f16015fbSJohannes Weiner 
246453853e2dSVlastimil Babka /*
246553853e2dSVlastimil Babka  * Returns true if compaction should go ahead for a high-order request, or
246653853e2dSVlastimil Babka  * the high-order allocation would succeed without compaction.
246753853e2dSVlastimil Babka  */
2468b6459cc1SMichal Hocko static inline bool compaction_ready(struct zone *zone, int order, int classzone_idx)
2469fe4b1b24SMel Gorman {
2470fe4b1b24SMel Gorman 	unsigned long balance_gap, watermark;
2471fe4b1b24SMel Gorman 	bool watermark_ok;
2472fe4b1b24SMel Gorman 
2473fe4b1b24SMel Gorman 	/*
2474fe4b1b24SMel Gorman 	 * Compaction takes time to run and there are potentially other
2475fe4b1b24SMel Gorman 	 * callers using the pages just freed. Continue reclaiming until
2476fe4b1b24SMel Gorman 	 * there is a buffer of free pages available to give compaction
2477fe4b1b24SMel Gorman 	 * a reasonable chance of completing and allocating the page
2478fe4b1b24SMel Gorman 	 */
24794be89a34SJianyu Zhan 	balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
24804be89a34SJianyu Zhan 			zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
24810b06496aSJohannes Weiner 	watermark = high_wmark_pages(zone) + balance_gap + (2UL << order);
2482b6459cc1SMichal Hocko 	watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, classzone_idx);
2483fe4b1b24SMel Gorman 
2484fe4b1b24SMel Gorman 	/*
2485fe4b1b24SMel Gorman 	 * If compaction is deferred, reclaim up to a point where
2486fe4b1b24SMel Gorman 	 * compaction will have a chance of success when re-enabled
2487fe4b1b24SMel Gorman 	 */
24880b06496aSJohannes Weiner 	if (compaction_deferred(zone, order))
2489fe4b1b24SMel Gorman 		return watermark_ok;
2490fe4b1b24SMel Gorman 
249153853e2dSVlastimil Babka 	/*
249253853e2dSVlastimil Babka 	 * If compaction is not ready to start and allocation is not likely
249353853e2dSVlastimil Babka 	 * to succeed without it, then keep reclaiming.
249453853e2dSVlastimil Babka 	 */
2495b6459cc1SMichal Hocko 	if (compaction_suitable(zone, order, 0, classzone_idx) == COMPACT_SKIPPED)
2496fe4b1b24SMel Gorman 		return false;
2497fe4b1b24SMel Gorman 
2498fe4b1b24SMel Gorman 	return watermark_ok;
2499fe4b1b24SMel Gorman }
2500fe4b1b24SMel Gorman 
25011da177e4SLinus Torvalds /*
25021da177e4SLinus Torvalds  * This is the direct reclaim path, for page-allocating processes.  We only
25031da177e4SLinus Torvalds  * try to reclaim pages from zones which will satisfy the caller's allocation
25041da177e4SLinus Torvalds  * request.
25051da177e4SLinus Torvalds  *
250641858966SMel Gorman  * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
250741858966SMel Gorman  * Because:
25081da177e4SLinus Torvalds  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
25091da177e4SLinus Torvalds  *    allocation or
251041858966SMel Gorman  * b) The target zone may be at high_wmark_pages(zone) but the lower zones
251141858966SMel Gorman  *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
251241858966SMel Gorman  *    zone defense algorithm.
25131da177e4SLinus Torvalds  *
25141da177e4SLinus Torvalds  * If a zone is deemed to be full of pinned pages then just give it a light
25151da177e4SLinus Torvalds  * scan then give up on it.
25161da177e4SLinus Torvalds  */
25170a0337e0SMichal Hocko static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
25181da177e4SLinus Torvalds {
2519dd1a239fSMel Gorman 	struct zoneref *z;
252054a6eb5cSMel Gorman 	struct zone *zone;
25210608f43dSAndrew Morton 	unsigned long nr_soft_reclaimed;
25220608f43dSAndrew Morton 	unsigned long nr_soft_scanned;
2523619d0d76SWeijie Yang 	gfp_t orig_mask;
25249bbc04eeSWeijie Yang 	enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
25251cfb419bSKAMEZAWA Hiroyuki 
2526cc715d99SMel Gorman 	/*
2527cc715d99SMel Gorman 	 * If the number of buffer_heads in the machine exceeds the maximum
2528cc715d99SMel Gorman 	 * allowed level, force direct reclaim to scan the highmem zone as
2529cc715d99SMel Gorman 	 * highmem pages could be pinning lowmem pages storing buffer_heads
2530cc715d99SMel Gorman 	 */
2531619d0d76SWeijie Yang 	orig_mask = sc->gfp_mask;
2532cc715d99SMel Gorman 	if (buffer_heads_over_limit)
2533cc715d99SMel Gorman 		sc->gfp_mask |= __GFP_HIGHMEM;
2534cc715d99SMel Gorman 
2535d4debc66SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
25367bf52fb8SMinchan Kim 					gfp_zone(sc->gfp_mask), sc->nodemask) {
25376b4f7799SJohannes Weiner 		enum zone_type classzone_idx;
25386b4f7799SJohannes Weiner 
2539f3fe6512SCon Kolivas 		if (!populated_zone(zone))
25401da177e4SLinus Torvalds 			continue;
25416b4f7799SJohannes Weiner 
25426b4f7799SJohannes Weiner 		classzone_idx = requested_highidx;
25436b4f7799SJohannes Weiner 		while (!populated_zone(zone->zone_pgdat->node_zones +
25446b4f7799SJohannes Weiner 							classzone_idx))
25456b4f7799SJohannes Weiner 			classzone_idx--;
25466b4f7799SJohannes Weiner 
25471cfb419bSKAMEZAWA Hiroyuki 		/*
25481cfb419bSKAMEZAWA Hiroyuki 		 * Take care memory controller reclaiming has small influence
25491cfb419bSKAMEZAWA Hiroyuki 		 * to global LRU.
25501cfb419bSKAMEZAWA Hiroyuki 		 */
255189b5fae5SJohannes Weiner 		if (global_reclaim(sc)) {
2552344736f2SVladimir Davydov 			if (!cpuset_zone_allowed(zone,
2553344736f2SVladimir Davydov 						 GFP_KERNEL | __GFP_HARDWALL))
25541da177e4SLinus Torvalds 				continue;
255565ec02cbSVladimir Davydov 
25566e543d57SLisa Du 			if (sc->priority != DEF_PRIORITY &&
25576e543d57SLisa Du 			    !zone_reclaimable(zone))
25581da177e4SLinus Torvalds 				continue;	/* Let kswapd poll it */
25590b06496aSJohannes Weiner 
2560e0887c19SRik van Riel 			/*
2561e0c23279SMel Gorman 			 * If we already have plenty of memory free for
2562e0c23279SMel Gorman 			 * compaction in this zone, don't free any more.
2563e0c23279SMel Gorman 			 * Even though compaction is invoked for any
2564e0c23279SMel Gorman 			 * non-zero order, only frequent costly order
2565e0c23279SMel Gorman 			 * reclamation is disruptive enough to become a
2566c7cfa37bSCopot Alexandru 			 * noticeable problem, like transparent huge
2567c7cfa37bSCopot Alexandru 			 * page allocations.
2568e0887c19SRik van Riel 			 */
25690b06496aSJohannes Weiner 			if (IS_ENABLED(CONFIG_COMPACTION) &&
25700b06496aSJohannes Weiner 			    sc->order > PAGE_ALLOC_COSTLY_ORDER &&
25710b06496aSJohannes Weiner 			    zonelist_zone_idx(z) <= requested_highidx &&
2572b6459cc1SMichal Hocko 			    compaction_ready(zone, sc->order, requested_highidx)) {
25730b06496aSJohannes Weiner 				sc->compaction_ready = true;
2574e0887c19SRik van Riel 				continue;
2575e0887c19SRik van Riel 			}
25760b06496aSJohannes Weiner 
25770608f43dSAndrew Morton 			/*
25780608f43dSAndrew Morton 			 * This steals pages from memory cgroups over softlimit
25790608f43dSAndrew Morton 			 * and returns the number of reclaimed pages and
25800608f43dSAndrew Morton 			 * scanned pages. This works for global memory pressure
25810608f43dSAndrew Morton 			 * and balancing, not for a memcg's limit.
25820608f43dSAndrew Morton 			 */
25830608f43dSAndrew Morton 			nr_soft_scanned = 0;
25840608f43dSAndrew Morton 			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
25850608f43dSAndrew Morton 						sc->order, sc->gfp_mask,
25860608f43dSAndrew Morton 						&nr_soft_scanned);
25870608f43dSAndrew Morton 			sc->nr_reclaimed += nr_soft_reclaimed;
25880608f43dSAndrew Morton 			sc->nr_scanned += nr_soft_scanned;
2589ac34a1a3SKAMEZAWA Hiroyuki 			/* need some check for avoid more shrink_zone() */
2590ac34a1a3SKAMEZAWA Hiroyuki 		}
2591d149e3b2SYing Han 
25920a0337e0SMichal Hocko 		shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
25931da177e4SLinus Torvalds 	}
2594e0c23279SMel Gorman 
259565ec02cbSVladimir Davydov 	/*
2596619d0d76SWeijie Yang 	 * Restore to original mask to avoid the impact on the caller if we
2597619d0d76SWeijie Yang 	 * promoted it to __GFP_HIGHMEM.
2598619d0d76SWeijie Yang 	 */
2599619d0d76SWeijie Yang 	sc->gfp_mask = orig_mask;
26001da177e4SLinus Torvalds }
26011da177e4SLinus Torvalds 
26021da177e4SLinus Torvalds /*
26031da177e4SLinus Torvalds  * This is the main entry point to direct page reclaim.
26041da177e4SLinus Torvalds  *
26051da177e4SLinus Torvalds  * If a full scan of the inactive list fails to free enough memory then we
26061da177e4SLinus Torvalds  * are "out of memory" and something needs to be killed.
26071da177e4SLinus Torvalds  *
26081da177e4SLinus Torvalds  * If the caller is !__GFP_FS then the probability of a failure is reasonably
26091da177e4SLinus Torvalds  * high - the zone may be full of dirty or under-writeback pages, which this
26105b0830cbSJens Axboe  * caller can't do much about.  We kick the writeback threads and take explicit
26115b0830cbSJens Axboe  * naps in the hope that some of these pages can be written.  But if the
26125b0830cbSJens Axboe  * allocating task holds filesystem locks which prevent writeout this might not
26135b0830cbSJens Axboe  * work, and the allocation attempt will fail.
2614a41f24eaSNishanth Aravamudan  *
2615a41f24eaSNishanth Aravamudan  * returns:	0, if no pages reclaimed
2616a41f24eaSNishanth Aravamudan  * 		else, the number of pages reclaimed
26171da177e4SLinus Torvalds  */
2618dac1d27bSMel Gorman static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
26193115cd91SVladimir Davydov 					  struct scan_control *sc)
26201da177e4SLinus Torvalds {
2621241994edSJohannes Weiner 	int initial_priority = sc->priority;
262269e05944SAndrew Morton 	unsigned long total_scanned = 0;
262322fba335SKOSAKI Motohiro 	unsigned long writeback_threshold;
2624241994edSJohannes Weiner retry:
2625873b4771SKeika Kobayashi 	delayacct_freepages_start();
2626873b4771SKeika Kobayashi 
262789b5fae5SJohannes Weiner 	if (global_reclaim(sc))
2628f8891e5eSChristoph Lameter 		count_vm_event(ALLOCSTALL);
26291da177e4SLinus Torvalds 
26309e3b2f8cSKonstantin Khlebnikov 	do {
263170ddf637SAnton Vorontsov 		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
263270ddf637SAnton Vorontsov 				sc->priority);
263366e1707bSBalbir Singh 		sc->nr_scanned = 0;
26340a0337e0SMichal Hocko 		shrink_zones(zonelist, sc);
2635e0c23279SMel Gorman 
263666e1707bSBalbir Singh 		total_scanned += sc->nr_scanned;
2637bb21c7ceSKOSAKI Motohiro 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
26380b06496aSJohannes Weiner 			break;
26390b06496aSJohannes Weiner 
26400b06496aSJohannes Weiner 		if (sc->compaction_ready)
26410b06496aSJohannes Weiner 			break;
26421da177e4SLinus Torvalds 
26431da177e4SLinus Torvalds 		/*
26440e50ce3bSMinchan Kim 		 * If we're getting trouble reclaiming, start doing
26450e50ce3bSMinchan Kim 		 * writepage even in laptop mode.
26460e50ce3bSMinchan Kim 		 */
26470e50ce3bSMinchan Kim 		if (sc->priority < DEF_PRIORITY - 2)
26480e50ce3bSMinchan Kim 			sc->may_writepage = 1;
26490e50ce3bSMinchan Kim 
26500e50ce3bSMinchan Kim 		/*
26511da177e4SLinus Torvalds 		 * Try to write back as many pages as we just scanned.  This
26521da177e4SLinus Torvalds 		 * tends to cause slow streaming writers to write data to the
26531da177e4SLinus Torvalds 		 * disk smoothly, at the dirtying rate, which is nice.   But
26541da177e4SLinus Torvalds 		 * that's undesirable in laptop mode, where we *want* lumpy
26551da177e4SLinus Torvalds 		 * writeout.  So in laptop mode, write out the whole world.
26561da177e4SLinus Torvalds 		 */
265722fba335SKOSAKI Motohiro 		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
265822fba335SKOSAKI Motohiro 		if (total_scanned > writeback_threshold) {
26590e175a18SCurt Wohlgemuth 			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
26600e175a18SCurt Wohlgemuth 						WB_REASON_TRY_TO_FREE_PAGES);
266166e1707bSBalbir Singh 			sc->may_writepage = 1;
26621da177e4SLinus Torvalds 		}
26630b06496aSJohannes Weiner 	} while (--sc->priority >= 0);
2664bb21c7ceSKOSAKI Motohiro 
2665873b4771SKeika Kobayashi 	delayacct_freepages_end();
2666873b4771SKeika Kobayashi 
2667bb21c7ceSKOSAKI Motohiro 	if (sc->nr_reclaimed)
2668bb21c7ceSKOSAKI Motohiro 		return sc->nr_reclaimed;
2669bb21c7ceSKOSAKI Motohiro 
26700cee34fdSMel Gorman 	/* Aborted reclaim to try compaction? don't OOM, then */
26710b06496aSJohannes Weiner 	if (sc->compaction_ready)
26727335084dSMel Gorman 		return 1;
26737335084dSMel Gorman 
2674241994edSJohannes Weiner 	/* Untapped cgroup reserves?  Don't OOM, retry. */
2675241994edSJohannes Weiner 	if (!sc->may_thrash) {
2676241994edSJohannes Weiner 		sc->priority = initial_priority;
2677241994edSJohannes Weiner 		sc->may_thrash = 1;
2678241994edSJohannes Weiner 		goto retry;
2679241994edSJohannes Weiner 	}
2680241994edSJohannes Weiner 
2681bb21c7ceSKOSAKI Motohiro 	return 0;
26821da177e4SLinus Torvalds }
26831da177e4SLinus Torvalds 
26845515061dSMel Gorman static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
26855515061dSMel Gorman {
26865515061dSMel Gorman 	struct zone *zone;
26875515061dSMel Gorman 	unsigned long pfmemalloc_reserve = 0;
26885515061dSMel Gorman 	unsigned long free_pages = 0;
26895515061dSMel Gorman 	int i;
26905515061dSMel Gorman 	bool wmark_ok;
26915515061dSMel Gorman 
26925515061dSMel Gorman 	for (i = 0; i <= ZONE_NORMAL; i++) {
26935515061dSMel Gorman 		zone = &pgdat->node_zones[i];
2694f012a84aSNishanth Aravamudan 		if (!populated_zone(zone) ||
2695f012a84aSNishanth Aravamudan 		    zone_reclaimable_pages(zone) == 0)
2696675becceSMel Gorman 			continue;
2697675becceSMel Gorman 
26985515061dSMel Gorman 		pfmemalloc_reserve += min_wmark_pages(zone);
26995515061dSMel Gorman 		free_pages += zone_page_state(zone, NR_FREE_PAGES);
27005515061dSMel Gorman 	}
27015515061dSMel Gorman 
2702675becceSMel Gorman 	/* If there are no reserves (unexpected config) then do not throttle */
2703675becceSMel Gorman 	if (!pfmemalloc_reserve)
2704675becceSMel Gorman 		return true;
2705675becceSMel Gorman 
27065515061dSMel Gorman 	wmark_ok = free_pages > pfmemalloc_reserve / 2;
27075515061dSMel Gorman 
27085515061dSMel Gorman 	/* kswapd must be awake if processes are being throttled */
27095515061dSMel Gorman 	if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
27105515061dSMel Gorman 		pgdat->classzone_idx = min(pgdat->classzone_idx,
27115515061dSMel Gorman 						(enum zone_type)ZONE_NORMAL);
27125515061dSMel Gorman 		wake_up_interruptible(&pgdat->kswapd_wait);
27135515061dSMel Gorman 	}
27145515061dSMel Gorman 
27155515061dSMel Gorman 	return wmark_ok;
27165515061dSMel Gorman }
27175515061dSMel Gorman 
27185515061dSMel Gorman /*
27195515061dSMel Gorman  * Throttle direct reclaimers if backing storage is backed by the network
27205515061dSMel Gorman  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
27215515061dSMel Gorman  * depleted. kswapd will continue to make progress and wake the processes
272250694c28SMel Gorman  * when the low watermark is reached.
272350694c28SMel Gorman  *
272450694c28SMel Gorman  * Returns true if a fatal signal was delivered during throttling. If this
272550694c28SMel Gorman  * happens, the page allocator should not consider triggering the OOM killer.
27265515061dSMel Gorman  */
272750694c28SMel Gorman static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
27285515061dSMel Gorman 					nodemask_t *nodemask)
27295515061dSMel Gorman {
2730675becceSMel Gorman 	struct zoneref *z;
27315515061dSMel Gorman 	struct zone *zone;
2732675becceSMel Gorman 	pg_data_t *pgdat = NULL;
27335515061dSMel Gorman 
27345515061dSMel Gorman 	/*
27355515061dSMel Gorman 	 * Kernel threads should not be throttled as they may be indirectly
27365515061dSMel Gorman 	 * responsible for cleaning pages necessary for reclaim to make forward
27375515061dSMel Gorman 	 * progress. kjournald for example may enter direct reclaim while
27385515061dSMel Gorman 	 * committing a transaction where throttling it could forcing other
27395515061dSMel Gorman 	 * processes to block on log_wait_commit().
27405515061dSMel Gorman 	 */
27415515061dSMel Gorman 	if (current->flags & PF_KTHREAD)
274250694c28SMel Gorman 		goto out;
274350694c28SMel Gorman 
274450694c28SMel Gorman 	/*
274550694c28SMel Gorman 	 * If a fatal signal is pending, this process should not throttle.
274650694c28SMel Gorman 	 * It should return quickly so it can exit and free its memory
274750694c28SMel Gorman 	 */
274850694c28SMel Gorman 	if (fatal_signal_pending(current))
274950694c28SMel Gorman 		goto out;
27505515061dSMel Gorman 
2751675becceSMel Gorman 	/*
2752675becceSMel Gorman 	 * Check if the pfmemalloc reserves are ok by finding the first node
2753675becceSMel Gorman 	 * with a usable ZONE_NORMAL or lower zone. The expectation is that
2754675becceSMel Gorman 	 * GFP_KERNEL will be required for allocating network buffers when
2755675becceSMel Gorman 	 * swapping over the network so ZONE_HIGHMEM is unusable.
2756675becceSMel Gorman 	 *
2757675becceSMel Gorman 	 * Throttling is based on the first usable node and throttled processes
2758675becceSMel Gorman 	 * wait on a queue until kswapd makes progress and wakes them. There
2759675becceSMel Gorman 	 * is an affinity then between processes waking up and where reclaim
2760675becceSMel Gorman 	 * progress has been made assuming the process wakes on the same node.
2761675becceSMel Gorman 	 * More importantly, processes running on remote nodes will not compete
2762675becceSMel Gorman 	 * for remote pfmemalloc reserves and processes on different nodes
2763675becceSMel Gorman 	 * should make reasonable progress.
2764675becceSMel Gorman 	 */
2765675becceSMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
276617636faaSMichael S. Tsirkin 					gfp_zone(gfp_mask), nodemask) {
2767675becceSMel Gorman 		if (zone_idx(zone) > ZONE_NORMAL)
2768675becceSMel Gorman 			continue;
2769675becceSMel Gorman 
2770675becceSMel Gorman 		/* Throttle based on the first usable node */
27715515061dSMel Gorman 		pgdat = zone->zone_pgdat;
27725515061dSMel Gorman 		if (pfmemalloc_watermark_ok(pgdat))
277350694c28SMel Gorman 			goto out;
2774675becceSMel Gorman 		break;
2775675becceSMel Gorman 	}
2776675becceSMel Gorman 
2777675becceSMel Gorman 	/* If no zone was usable by the allocation flags then do not throttle */
2778675becceSMel Gorman 	if (!pgdat)
2779675becceSMel Gorman 		goto out;
27805515061dSMel Gorman 
278168243e76SMel Gorman 	/* Account for the throttling */
278268243e76SMel Gorman 	count_vm_event(PGSCAN_DIRECT_THROTTLE);
278368243e76SMel Gorman 
27845515061dSMel Gorman 	/*
27855515061dSMel Gorman 	 * If the caller cannot enter the filesystem, it's possible that it
27865515061dSMel Gorman 	 * is due to the caller holding an FS lock or performing a journal
27875515061dSMel Gorman 	 * transaction in the case of a filesystem like ext[3|4]. In this case,
27885515061dSMel Gorman 	 * it is not safe to block on pfmemalloc_wait as kswapd could be
27895515061dSMel Gorman 	 * blocked waiting on the same lock. Instead, throttle for up to a
27905515061dSMel Gorman 	 * second before continuing.
27915515061dSMel Gorman 	 */
27925515061dSMel Gorman 	if (!(gfp_mask & __GFP_FS)) {
27935515061dSMel Gorman 		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
27945515061dSMel Gorman 			pfmemalloc_watermark_ok(pgdat), HZ);
279550694c28SMel Gorman 
279650694c28SMel Gorman 		goto check_pending;
27975515061dSMel Gorman 	}
27985515061dSMel Gorman 
27995515061dSMel Gorman 	/* Throttle until kswapd wakes the process */
28005515061dSMel Gorman 	wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
28015515061dSMel Gorman 		pfmemalloc_watermark_ok(pgdat));
280250694c28SMel Gorman 
280350694c28SMel Gorman check_pending:
280450694c28SMel Gorman 	if (fatal_signal_pending(current))
280550694c28SMel Gorman 		return true;
280650694c28SMel Gorman 
280750694c28SMel Gorman out:
280850694c28SMel Gorman 	return false;
28095515061dSMel Gorman }
28105515061dSMel Gorman 
2811dac1d27bSMel Gorman unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2812327c0e96SKAMEZAWA Hiroyuki 				gfp_t gfp_mask, nodemask_t *nodemask)
281366e1707bSBalbir Singh {
281433906bc5SMel Gorman 	unsigned long nr_reclaimed;
281566e1707bSBalbir Singh 	struct scan_control sc = {
281622fba335SKOSAKI Motohiro 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2817ee814fe2SJohannes Weiner 		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2818ee814fe2SJohannes Weiner 		.order = order,
2819ee814fe2SJohannes Weiner 		.nodemask = nodemask,
2820ee814fe2SJohannes Weiner 		.priority = DEF_PRIORITY,
2821ee814fe2SJohannes Weiner 		.may_writepage = !laptop_mode,
2822a6dc60f8SJohannes Weiner 		.may_unmap = 1,
28232e2e4259SKOSAKI Motohiro 		.may_swap = 1,
282466e1707bSBalbir Singh 	};
282566e1707bSBalbir Singh 
28265515061dSMel Gorman 	/*
282750694c28SMel Gorman 	 * Do not enter reclaim if fatal signal was delivered while throttled.
282850694c28SMel Gorman 	 * 1 is returned so that the page allocator does not OOM kill at this
282950694c28SMel Gorman 	 * point.
28305515061dSMel Gorman 	 */
283150694c28SMel Gorman 	if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
28325515061dSMel Gorman 		return 1;
28335515061dSMel Gorman 
283433906bc5SMel Gorman 	trace_mm_vmscan_direct_reclaim_begin(order,
283533906bc5SMel Gorman 				sc.may_writepage,
283633906bc5SMel Gorman 				gfp_mask);
283733906bc5SMel Gorman 
28383115cd91SVladimir Davydov 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
283933906bc5SMel Gorman 
284033906bc5SMel Gorman 	trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
284133906bc5SMel Gorman 
284233906bc5SMel Gorman 	return nr_reclaimed;
284366e1707bSBalbir Singh }
284466e1707bSBalbir Singh 
2845c255a458SAndrew Morton #ifdef CONFIG_MEMCG
284666e1707bSBalbir Singh 
284772835c86SJohannes Weiner unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
28484e416953SBalbir Singh 						gfp_t gfp_mask, bool noswap,
28490ae5e89cSYing Han 						struct zone *zone,
28500ae5e89cSYing Han 						unsigned long *nr_scanned)
28514e416953SBalbir Singh {
28524e416953SBalbir Singh 	struct scan_control sc = {
2853b8f5c566SKOSAKI Motohiro 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2854ee814fe2SJohannes Weiner 		.target_mem_cgroup = memcg,
28554e416953SBalbir Singh 		.may_writepage = !laptop_mode,
28564e416953SBalbir Singh 		.may_unmap = 1,
28574e416953SBalbir Singh 		.may_swap = !noswap,
28584e416953SBalbir Singh 	};
28596b4f7799SJohannes Weiner 	unsigned long lru_pages;
28600ae5e89cSYing Han 
28614e416953SBalbir Singh 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
28624e416953SBalbir Singh 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2863bdce6d9eSKOSAKI Motohiro 
28649e3b2f8cSKonstantin Khlebnikov 	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2865bdce6d9eSKOSAKI Motohiro 						      sc.may_writepage,
2866bdce6d9eSKOSAKI Motohiro 						      sc.gfp_mask);
2867bdce6d9eSKOSAKI Motohiro 
28684e416953SBalbir Singh 	/*
28694e416953SBalbir Singh 	 * NOTE: Although we can get the priority field, using it
28704e416953SBalbir Singh 	 * here is not a good idea, since it limits the pages we can scan.
28714e416953SBalbir Singh 	 * if we don't reclaim here, the shrink_zone from balance_pgdat
28724e416953SBalbir Singh 	 * will pick up pages from other mem cgroup's as well. We hack
28734e416953SBalbir Singh 	 * the priority and make it zero.
28744e416953SBalbir Singh 	 */
287533377678SVladimir Davydov 	shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
2876bdce6d9eSKOSAKI Motohiro 
2877bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2878bdce6d9eSKOSAKI Motohiro 
28790ae5e89cSYing Han 	*nr_scanned = sc.nr_scanned;
28804e416953SBalbir Singh 	return sc.nr_reclaimed;
28814e416953SBalbir Singh }
28824e416953SBalbir Singh 
288372835c86SJohannes Weiner unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2884b70a2a21SJohannes Weiner 					   unsigned long nr_pages,
28858c7c6e34SKAMEZAWA Hiroyuki 					   gfp_t gfp_mask,
2886b70a2a21SJohannes Weiner 					   bool may_swap)
288766e1707bSBalbir Singh {
28884e416953SBalbir Singh 	struct zonelist *zonelist;
2889bdce6d9eSKOSAKI Motohiro 	unsigned long nr_reclaimed;
2890889976dbSYing Han 	int nid;
289166e1707bSBalbir Singh 	struct scan_control sc = {
2892b70a2a21SJohannes Weiner 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
2893ee814fe2SJohannes Weiner 		.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2894ee814fe2SJohannes Weiner 				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2895ee814fe2SJohannes Weiner 		.target_mem_cgroup = memcg,
2896ee814fe2SJohannes Weiner 		.priority = DEF_PRIORITY,
289766e1707bSBalbir Singh 		.may_writepage = !laptop_mode,
2898a6dc60f8SJohannes Weiner 		.may_unmap = 1,
2899b70a2a21SJohannes Weiner 		.may_swap = may_swap,
2900a09ed5e0SYing Han 	};
290166e1707bSBalbir Singh 
2902889976dbSYing Han 	/*
2903889976dbSYing Han 	 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2904889976dbSYing Han 	 * take care of from where we get pages. So the node where we start the
2905889976dbSYing Han 	 * scan does not need to be the current node.
2906889976dbSYing Han 	 */
290772835c86SJohannes Weiner 	nid = mem_cgroup_select_victim_node(memcg);
2908889976dbSYing Han 
2909889976dbSYing Han 	zonelist = NODE_DATA(nid)->node_zonelists;
2910bdce6d9eSKOSAKI Motohiro 
2911bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_reclaim_begin(0,
2912bdce6d9eSKOSAKI Motohiro 					    sc.may_writepage,
2913bdce6d9eSKOSAKI Motohiro 					    sc.gfp_mask);
2914bdce6d9eSKOSAKI Motohiro 
29153115cd91SVladimir Davydov 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2916bdce6d9eSKOSAKI Motohiro 
2917bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2918bdce6d9eSKOSAKI Motohiro 
2919bdce6d9eSKOSAKI Motohiro 	return nr_reclaimed;
292066e1707bSBalbir Singh }
292166e1707bSBalbir Singh #endif
292266e1707bSBalbir Singh 
29239e3b2f8cSKonstantin Khlebnikov static void age_active_anon(struct zone *zone, struct scan_control *sc)
2924f16015fbSJohannes Weiner {
2925b95a2f2dSJohannes Weiner 	struct mem_cgroup *memcg;
2926b95a2f2dSJohannes Weiner 
2927b95a2f2dSJohannes Weiner 	if (!total_swap_pages)
2928b95a2f2dSJohannes Weiner 		return;
2929b95a2f2dSJohannes Weiner 
2930b95a2f2dSJohannes Weiner 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
2931b95a2f2dSJohannes Weiner 	do {
2932c56d5c7dSKonstantin Khlebnikov 		struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2933f16015fbSJohannes Weiner 
293459dc76b0SRik van Riel 		if (inactive_list_is_low(lruvec, false))
29351a93be0eSKonstantin Khlebnikov 			shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
29369e3b2f8cSKonstantin Khlebnikov 					   sc, LRU_ACTIVE_ANON);
2937b95a2f2dSJohannes Weiner 
2938b95a2f2dSJohannes Weiner 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
2939b95a2f2dSJohannes Weiner 	} while (memcg);
2940f16015fbSJohannes Weiner }
2941f16015fbSJohannes Weiner 
2942accf6242SVlastimil Babka static bool zone_balanced(struct zone *zone, int order, bool highorder,
294360cefed4SJohannes Weiner 			unsigned long balance_gap, int classzone_idx)
294460cefed4SJohannes Weiner {
2945accf6242SVlastimil Babka 	unsigned long mark = high_wmark_pages(zone) + balance_gap;
294660cefed4SJohannes Weiner 
2947accf6242SVlastimil Babka 	/*
2948accf6242SVlastimil Babka 	 * When checking from pgdat_balanced(), kswapd should stop and sleep
2949accf6242SVlastimil Babka 	 * when it reaches the high order-0 watermark and let kcompactd take
2950accf6242SVlastimil Babka 	 * over. Other callers such as wakeup_kswapd() want to determine the
2951accf6242SVlastimil Babka 	 * true high-order watermark.
2952accf6242SVlastimil Babka 	 */
2953accf6242SVlastimil Babka 	if (IS_ENABLED(CONFIG_COMPACTION) && !highorder) {
2954accf6242SVlastimil Babka 		mark += (1UL << order);
2955accf6242SVlastimil Babka 		order = 0;
2956accf6242SVlastimil Babka 	}
295760cefed4SJohannes Weiner 
2958accf6242SVlastimil Babka 	return zone_watermark_ok_safe(zone, order, mark, classzone_idx);
295960cefed4SJohannes Weiner }
296060cefed4SJohannes Weiner 
29611741c877SMel Gorman /*
29624ae0a48bSZlatko Calusic  * pgdat_balanced() is used when checking if a node is balanced.
29634ae0a48bSZlatko Calusic  *
29644ae0a48bSZlatko Calusic  * For order-0, all zones must be balanced!
29654ae0a48bSZlatko Calusic  *
29664ae0a48bSZlatko Calusic  * For high-order allocations only zones that meet watermarks and are in a
29674ae0a48bSZlatko Calusic  * zone allowed by the callers classzone_idx are added to balanced_pages. The
29684ae0a48bSZlatko Calusic  * total of balanced pages must be at least 25% of the zones allowed by
29694ae0a48bSZlatko Calusic  * classzone_idx for the node to be considered balanced. Forcing all zones to
29704ae0a48bSZlatko Calusic  * be balanced for high orders can cause excessive reclaim when there are
29714ae0a48bSZlatko Calusic  * imbalanced zones.
29721741c877SMel Gorman  * The choice of 25% is due to
29731741c877SMel Gorman  *   o a 16M DMA zone that is balanced will not balance a zone on any
29741741c877SMel Gorman  *     reasonable sized machine
29751741c877SMel Gorman  *   o On all other machines, the top zone must be at least a reasonable
297625985edcSLucas De Marchi  *     percentage of the middle zones. For example, on 32-bit x86, highmem
29771741c877SMel Gorman  *     would need to be at least 256M for it to be balance a whole node.
29781741c877SMel Gorman  *     Similarly, on x86-64 the Normal zone would need to be at least 1G
29791741c877SMel Gorman  *     to balance a node on its own. These seemed like reasonable ratios.
29801741c877SMel Gorman  */
29814ae0a48bSZlatko Calusic static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
29821741c877SMel Gorman {
2983b40da049SJiang Liu 	unsigned long managed_pages = 0;
29844ae0a48bSZlatko Calusic 	unsigned long balanced_pages = 0;
29851741c877SMel Gorman 	int i;
29861741c877SMel Gorman 
29874ae0a48bSZlatko Calusic 	/* Check the watermark levels */
29884ae0a48bSZlatko Calusic 	for (i = 0; i <= classzone_idx; i++) {
29894ae0a48bSZlatko Calusic 		struct zone *zone = pgdat->node_zones + i;
29901741c877SMel Gorman 
29914ae0a48bSZlatko Calusic 		if (!populated_zone(zone))
29924ae0a48bSZlatko Calusic 			continue;
29934ae0a48bSZlatko Calusic 
2994b40da049SJiang Liu 		managed_pages += zone->managed_pages;
29954ae0a48bSZlatko Calusic 
29964ae0a48bSZlatko Calusic 		/*
29974ae0a48bSZlatko Calusic 		 * A special case here:
29984ae0a48bSZlatko Calusic 		 *
29994ae0a48bSZlatko Calusic 		 * balance_pgdat() skips over all_unreclaimable after
30004ae0a48bSZlatko Calusic 		 * DEF_PRIORITY. Effectively, it considers them balanced so
30014ae0a48bSZlatko Calusic 		 * they must be considered balanced here as well!
30024ae0a48bSZlatko Calusic 		 */
30036e543d57SLisa Du 		if (!zone_reclaimable(zone)) {
3004b40da049SJiang Liu 			balanced_pages += zone->managed_pages;
30054ae0a48bSZlatko Calusic 			continue;
30064ae0a48bSZlatko Calusic 		}
30074ae0a48bSZlatko Calusic 
3008accf6242SVlastimil Babka 		if (zone_balanced(zone, order, false, 0, i))
3009b40da049SJiang Liu 			balanced_pages += zone->managed_pages;
30104ae0a48bSZlatko Calusic 		else if (!order)
30114ae0a48bSZlatko Calusic 			return false;
30124ae0a48bSZlatko Calusic 	}
30134ae0a48bSZlatko Calusic 
30144ae0a48bSZlatko Calusic 	if (order)
3015b40da049SJiang Liu 		return balanced_pages >= (managed_pages >> 2);
30164ae0a48bSZlatko Calusic 	else
30174ae0a48bSZlatko Calusic 		return true;
30181741c877SMel Gorman }
30191741c877SMel Gorman 
30205515061dSMel Gorman /*
30215515061dSMel Gorman  * Prepare kswapd for sleeping. This verifies that there are no processes
30225515061dSMel Gorman  * waiting in throttle_direct_reclaim() and that watermarks have been met.
30235515061dSMel Gorman  *
30245515061dSMel Gorman  * Returns true if kswapd is ready to sleep
30255515061dSMel Gorman  */
30265515061dSMel Gorman static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
3027dc83edd9SMel Gorman 					int classzone_idx)
3028f50de2d3SMel Gorman {
3029f50de2d3SMel Gorman 	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
3030f50de2d3SMel Gorman 	if (remaining)
30315515061dSMel Gorman 		return false;
30325515061dSMel Gorman 
30335515061dSMel Gorman 	/*
30349e5e3661SVlastimil Babka 	 * The throttled processes are normally woken up in balance_pgdat() as
30359e5e3661SVlastimil Babka 	 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
30369e5e3661SVlastimil Babka 	 * race between when kswapd checks the watermarks and a process gets
30379e5e3661SVlastimil Babka 	 * throttled. There is also a potential race if processes get
30389e5e3661SVlastimil Babka 	 * throttled, kswapd wakes, a large process exits thereby balancing the
30399e5e3661SVlastimil Babka 	 * zones, which causes kswapd to exit balance_pgdat() before reaching
30409e5e3661SVlastimil Babka 	 * the wake up checks. If kswapd is going to sleep, no process should
30419e5e3661SVlastimil Babka 	 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
30429e5e3661SVlastimil Babka 	 * the wake up is premature, processes will wake kswapd and get
30439e5e3661SVlastimil Babka 	 * throttled again. The difference from wake ups in balance_pgdat() is
30449e5e3661SVlastimil Babka 	 * that here we are under prepare_to_wait().
30455515061dSMel Gorman 	 */
30469e5e3661SVlastimil Babka 	if (waitqueue_active(&pgdat->pfmemalloc_wait))
30479e5e3661SVlastimil Babka 		wake_up_all(&pgdat->pfmemalloc_wait);
3048f50de2d3SMel Gorman 
30494ae0a48bSZlatko Calusic 	return pgdat_balanced(pgdat, order, classzone_idx);
3050f50de2d3SMel Gorman }
3051f50de2d3SMel Gorman 
30521da177e4SLinus Torvalds /*
305375485363SMel Gorman  * kswapd shrinks the zone by the number of pages required to reach
305475485363SMel Gorman  * the high watermark.
3055b8e83b94SMel Gorman  *
3056b8e83b94SMel Gorman  * Returns true if kswapd scanned at least the requested number of pages to
3057283aba9fSMel Gorman  * reclaim or if the lack of progress was due to pages under writeback.
3058283aba9fSMel Gorman  * This is used to determine if the scanning priority needs to be raised.
305975485363SMel Gorman  */
3060b8e83b94SMel Gorman static bool kswapd_shrink_zone(struct zone *zone,
30617c954f6dSMel Gorman 			       int classzone_idx,
3062accf6242SVlastimil Babka 			       struct scan_control *sc)
306375485363SMel Gorman {
30647c954f6dSMel Gorman 	unsigned long balance_gap;
30657c954f6dSMel Gorman 	bool lowmem_pressure;
306675485363SMel Gorman 
306775485363SMel Gorman 	/* Reclaim above the high watermark. */
306875485363SMel Gorman 	sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
30697c954f6dSMel Gorman 
30707c954f6dSMel Gorman 	/*
30717c954f6dSMel Gorman 	 * We put equal pressure on every zone, unless one zone has way too
30727c954f6dSMel Gorman 	 * many pages free already. The "too many pages" is defined as the
30737c954f6dSMel Gorman 	 * high wmark plus a "gap" where the gap is either the low
30747c954f6dSMel Gorman 	 * watermark or 1% of the zone, whichever is smaller.
30757c954f6dSMel Gorman 	 */
30764be89a34SJianyu Zhan 	balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
30774be89a34SJianyu Zhan 			zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
30787c954f6dSMel Gorman 
30797c954f6dSMel Gorman 	/*
30807c954f6dSMel Gorman 	 * If there is no low memory pressure or the zone is balanced then no
30817c954f6dSMel Gorman 	 * reclaim is necessary
30827c954f6dSMel Gorman 	 */
30837c954f6dSMel Gorman 	lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
3084accf6242SVlastimil Babka 	if (!lowmem_pressure && zone_balanced(zone, sc->order, false,
30857c954f6dSMel Gorman 						balance_gap, classzone_idx))
30867c954f6dSMel Gorman 		return true;
30877c954f6dSMel Gorman 
30886b4f7799SJohannes Weiner 	shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
308975485363SMel Gorman 
309057054651SJohannes Weiner 	clear_bit(ZONE_WRITEBACK, &zone->flags);
3091283aba9fSMel Gorman 
30927c954f6dSMel Gorman 	/*
30937c954f6dSMel Gorman 	 * If a zone reaches its high watermark, consider it to be no longer
30947c954f6dSMel Gorman 	 * congested. It's possible there are dirty pages backed by congested
30957c954f6dSMel Gorman 	 * BDIs but as pressure is relieved, speculatively avoid congestion
30967c954f6dSMel Gorman 	 * waits.
30977c954f6dSMel Gorman 	 */
30986e543d57SLisa Du 	if (zone_reclaimable(zone) &&
3099accf6242SVlastimil Babka 	    zone_balanced(zone, sc->order, false, 0, classzone_idx)) {
310057054651SJohannes Weiner 		clear_bit(ZONE_CONGESTED, &zone->flags);
310157054651SJohannes Weiner 		clear_bit(ZONE_DIRTY, &zone->flags);
31027c954f6dSMel Gorman 	}
31037c954f6dSMel Gorman 
3104b8e83b94SMel Gorman 	return sc->nr_scanned >= sc->nr_to_reclaim;
310575485363SMel Gorman }
310675485363SMel Gorman 
310775485363SMel Gorman /*
31081da177e4SLinus Torvalds  * For kswapd, balance_pgdat() will work across all this node's zones until
310941858966SMel Gorman  * they are all at high_wmark_pages(zone).
31101da177e4SLinus Torvalds  *
3111accf6242SVlastimil Babka  * Returns the highest zone idx kswapd was reclaiming at
31121da177e4SLinus Torvalds  *
31131da177e4SLinus Torvalds  * There is special handling here for zones which are full of pinned pages.
31141da177e4SLinus Torvalds  * This can happen if the pages are all mlocked, or if they are all used by
31151da177e4SLinus Torvalds  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
31161da177e4SLinus Torvalds  * What we do is to detect the case where all pages in the zone have been
31171da177e4SLinus Torvalds  * scanned twice and there has been zero successful reclaim.  Mark the zone as
31181da177e4SLinus Torvalds  * dead and from now on, only perform a short scan.  Basically we're polling
31191da177e4SLinus Torvalds  * the zone for when the problem goes away.
31201da177e4SLinus Torvalds  *
31211da177e4SLinus Torvalds  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
312241858966SMel Gorman  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
312341858966SMel Gorman  * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
312441858966SMel Gorman  * lower zones regardless of the number of free pages in the lower zones. This
312541858966SMel Gorman  * interoperates with the page allocator fallback scheme to ensure that aging
312641858966SMel Gorman  * of pages is balanced across the zones.
31271da177e4SLinus Torvalds  */
3128accf6242SVlastimil Babka static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
31291da177e4SLinus Torvalds {
31301da177e4SLinus Torvalds 	int i;
313199504748SMel Gorman 	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
31320608f43dSAndrew Morton 	unsigned long nr_soft_reclaimed;
31330608f43dSAndrew Morton 	unsigned long nr_soft_scanned;
3134179e9639SAndrew Morton 	struct scan_control sc = {
3135179e9639SAndrew Morton 		.gfp_mask = GFP_KERNEL,
3136ee814fe2SJohannes Weiner 		.order = order,
3137b8e83b94SMel Gorman 		.priority = DEF_PRIORITY,
3138ee814fe2SJohannes Weiner 		.may_writepage = !laptop_mode,
3139a6dc60f8SJohannes Weiner 		.may_unmap = 1,
31402e2e4259SKOSAKI Motohiro 		.may_swap = 1,
3141179e9639SAndrew Morton 	};
3142f8891e5eSChristoph Lameter 	count_vm_event(PAGEOUTRUN);
31431da177e4SLinus Torvalds 
31449e3b2f8cSKonstantin Khlebnikov 	do {
3145b8e83b94SMel Gorman 		bool raise_priority = true;
3146b8e83b94SMel Gorman 
3147b8e83b94SMel Gorman 		sc.nr_reclaimed = 0;
31481da177e4SLinus Torvalds 
31491da177e4SLinus Torvalds 		/*
31501da177e4SLinus Torvalds 		 * Scan in the highmem->dma direction for the highest
31511da177e4SLinus Torvalds 		 * zone which needs scanning
31521da177e4SLinus Torvalds 		 */
31531da177e4SLinus Torvalds 		for (i = pgdat->nr_zones - 1; i >= 0; i--) {
31541da177e4SLinus Torvalds 			struct zone *zone = pgdat->node_zones + i;
31551da177e4SLinus Torvalds 
3156f3fe6512SCon Kolivas 			if (!populated_zone(zone))
31571da177e4SLinus Torvalds 				continue;
31581da177e4SLinus Torvalds 
31596e543d57SLisa Du 			if (sc.priority != DEF_PRIORITY &&
31606e543d57SLisa Du 			    !zone_reclaimable(zone))
31611da177e4SLinus Torvalds 				continue;
31621da177e4SLinus Torvalds 
3163556adecbSRik van Riel 			/*
3164556adecbSRik van Riel 			 * Do some background aging of the anon list, to give
3165556adecbSRik van Riel 			 * pages a chance to be referenced before reclaiming.
3166556adecbSRik van Riel 			 */
31679e3b2f8cSKonstantin Khlebnikov 			age_active_anon(zone, &sc);
3168556adecbSRik van Riel 
3169cc715d99SMel Gorman 			/*
3170cc715d99SMel Gorman 			 * If the number of buffer_heads in the machine
3171cc715d99SMel Gorman 			 * exceeds the maximum allowed level and this node
3172cc715d99SMel Gorman 			 * has a highmem zone, force kswapd to reclaim from
3173cc715d99SMel Gorman 			 * it to relieve lowmem pressure.
3174cc715d99SMel Gorman 			 */
3175cc715d99SMel Gorman 			if (buffer_heads_over_limit && is_highmem_idx(i)) {
3176cc715d99SMel Gorman 				end_zone = i;
3177cc715d99SMel Gorman 				break;
3178cc715d99SMel Gorman 			}
3179cc715d99SMel Gorman 
3180accf6242SVlastimil Babka 			if (!zone_balanced(zone, order, false, 0, 0)) {
31811da177e4SLinus Torvalds 				end_zone = i;
3182e1dbeda6SAndrew Morton 				break;
3183439423f6SShaohua Li 			} else {
3184d43006d5SMel Gorman 				/*
3185d43006d5SMel Gorman 				 * If balanced, clear the dirty and congested
3186d43006d5SMel Gorman 				 * flags
3187d43006d5SMel Gorman 				 */
318857054651SJohannes Weiner 				clear_bit(ZONE_CONGESTED, &zone->flags);
318957054651SJohannes Weiner 				clear_bit(ZONE_DIRTY, &zone->flags);
31901da177e4SLinus Torvalds 			}
31911da177e4SLinus Torvalds 		}
3192dafcb73eSZlatko Calusic 
3193b8e83b94SMel Gorman 		if (i < 0)
31941da177e4SLinus Torvalds 			goto out;
3195e1dbeda6SAndrew Morton 
31961da177e4SLinus Torvalds 		/*
3197b7ea3c41SMel Gorman 		 * If we're getting trouble reclaiming, start doing writepage
3198b7ea3c41SMel Gorman 		 * even in laptop mode.
3199b7ea3c41SMel Gorman 		 */
3200b7ea3c41SMel Gorman 		if (sc.priority < DEF_PRIORITY - 2)
3201b7ea3c41SMel Gorman 			sc.may_writepage = 1;
3202b7ea3c41SMel Gorman 
3203b7ea3c41SMel Gorman 		/*
32041da177e4SLinus Torvalds 		 * Now scan the zone in the dma->highmem direction, stopping
32051da177e4SLinus Torvalds 		 * at the last zone which needs scanning.
32061da177e4SLinus Torvalds 		 *
32071da177e4SLinus Torvalds 		 * We do this because the page allocator works in the opposite
32081da177e4SLinus Torvalds 		 * direction.  This prevents the page allocator from allocating
32091da177e4SLinus Torvalds 		 * pages behind kswapd's direction of progress, which would
32101da177e4SLinus Torvalds 		 * cause too much scanning of the lower zones.
32111da177e4SLinus Torvalds 		 */
32121da177e4SLinus Torvalds 		for (i = 0; i <= end_zone; i++) {
32131da177e4SLinus Torvalds 			struct zone *zone = pgdat->node_zones + i;
32141da177e4SLinus Torvalds 
3215f3fe6512SCon Kolivas 			if (!populated_zone(zone))
32161da177e4SLinus Torvalds 				continue;
32171da177e4SLinus Torvalds 
32186e543d57SLisa Du 			if (sc.priority != DEF_PRIORITY &&
32196e543d57SLisa Du 			    !zone_reclaimable(zone))
32201da177e4SLinus Torvalds 				continue;
32211da177e4SLinus Torvalds 
32221da177e4SLinus Torvalds 			sc.nr_scanned = 0;
32234e416953SBalbir Singh 
32240608f43dSAndrew Morton 			nr_soft_scanned = 0;
32250608f43dSAndrew Morton 			/*
32260608f43dSAndrew Morton 			 * Call soft limit reclaim before calling shrink_zone.
32270608f43dSAndrew Morton 			 */
32280608f43dSAndrew Morton 			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
32290608f43dSAndrew Morton 							order, sc.gfp_mask,
32300608f43dSAndrew Morton 							&nr_soft_scanned);
32310608f43dSAndrew Morton 			sc.nr_reclaimed += nr_soft_reclaimed;
32320608f43dSAndrew Morton 
323332a4330dSRik van Riel 			/*
32347c954f6dSMel Gorman 			 * There should be no need to raise the scanning
32357c954f6dSMel Gorman 			 * priority if enough pages are already being scanned
32367c954f6dSMel Gorman 			 * that that high watermark would be met at 100%
32377c954f6dSMel Gorman 			 * efficiency.
323832a4330dSRik van Riel 			 */
3239accf6242SVlastimil Babka 			if (kswapd_shrink_zone(zone, end_zone, &sc))
3240b8e83b94SMel Gorman 				raise_priority = false;
3241b8e83b94SMel Gorman 		}
3242d7868daeSMel Gorman 
32435515061dSMel Gorman 		/*
32445515061dSMel Gorman 		 * If the low watermark is met there is no need for processes
32455515061dSMel Gorman 		 * to be throttled on pfmemalloc_wait as they should not be
32465515061dSMel Gorman 		 * able to safely make forward progress. Wake them
32475515061dSMel Gorman 		 */
32485515061dSMel Gorman 		if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
32495515061dSMel Gorman 				pfmemalloc_watermark_ok(pgdat))
3250cfc51155SVlastimil Babka 			wake_up_all(&pgdat->pfmemalloc_wait);
32515515061dSMel Gorman 
3252b8e83b94SMel Gorman 		/* Check if kswapd should be suspending */
3253b8e83b94SMel Gorman 		if (try_to_freeze() || kthread_should_stop())
3254b8e83b94SMel Gorman 			break;
3255b8e83b94SMel Gorman 
3256b8e83b94SMel Gorman 		/*
3257b8e83b94SMel Gorman 		 * Raise priority if scanning rate is too low or there was no
3258b8e83b94SMel Gorman 		 * progress in reclaiming pages
3259b8e83b94SMel Gorman 		 */
3260b8e83b94SMel Gorman 		if (raise_priority || !sc.nr_reclaimed)
3261b8e83b94SMel Gorman 			sc.priority--;
32629aa41348SMel Gorman 	} while (sc.priority >= 1 &&
3263accf6242SVlastimil Babka 			!pgdat_balanced(pgdat, order, classzone_idx));
32641da177e4SLinus Torvalds 
3265b8e83b94SMel Gorman out:
32660abdee2bSMel Gorman 	/*
3267accf6242SVlastimil Babka 	 * Return the highest zone idx we were reclaiming at so
3268accf6242SVlastimil Babka 	 * prepare_kswapd_sleep() makes the same decisions as here.
32690abdee2bSMel Gorman 	 */
3270accf6242SVlastimil Babka 	return end_zone;
32711da177e4SLinus Torvalds }
32721da177e4SLinus Torvalds 
3273accf6242SVlastimil Babka static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3274accf6242SVlastimil Babka 				int classzone_idx, int balanced_classzone_idx)
3275f0bc0a60SKOSAKI Motohiro {
3276f0bc0a60SKOSAKI Motohiro 	long remaining = 0;
3277f0bc0a60SKOSAKI Motohiro 	DEFINE_WAIT(wait);
3278f0bc0a60SKOSAKI Motohiro 
3279f0bc0a60SKOSAKI Motohiro 	if (freezing(current) || kthread_should_stop())
3280f0bc0a60SKOSAKI Motohiro 		return;
3281f0bc0a60SKOSAKI Motohiro 
3282f0bc0a60SKOSAKI Motohiro 	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3283f0bc0a60SKOSAKI Motohiro 
3284f0bc0a60SKOSAKI Motohiro 	/* Try to sleep for a short interval */
3285accf6242SVlastimil Babka 	if (prepare_kswapd_sleep(pgdat, order, remaining,
3286accf6242SVlastimil Babka 						balanced_classzone_idx)) {
3287fd901c95SVlastimil Babka 		/*
3288fd901c95SVlastimil Babka 		 * Compaction records what page blocks it recently failed to
3289fd901c95SVlastimil Babka 		 * isolate pages from and skips them in the future scanning.
3290fd901c95SVlastimil Babka 		 * When kswapd is going to sleep, it is reasonable to assume
3291fd901c95SVlastimil Babka 		 * that pages and compaction may succeed so reset the cache.
3292fd901c95SVlastimil Babka 		 */
3293fd901c95SVlastimil Babka 		reset_isolation_suitable(pgdat);
3294fd901c95SVlastimil Babka 
3295fd901c95SVlastimil Babka 		/*
3296fd901c95SVlastimil Babka 		 * We have freed the memory, now we should compact it to make
3297fd901c95SVlastimil Babka 		 * allocation of the requested order possible.
3298fd901c95SVlastimil Babka 		 */
3299fd901c95SVlastimil Babka 		wakeup_kcompactd(pgdat, order, classzone_idx);
3300fd901c95SVlastimil Babka 
3301f0bc0a60SKOSAKI Motohiro 		remaining = schedule_timeout(HZ/10);
3302f0bc0a60SKOSAKI Motohiro 		finish_wait(&pgdat->kswapd_wait, &wait);
3303f0bc0a60SKOSAKI Motohiro 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3304f0bc0a60SKOSAKI Motohiro 	}
3305f0bc0a60SKOSAKI Motohiro 
3306f0bc0a60SKOSAKI Motohiro 	/*
3307f0bc0a60SKOSAKI Motohiro 	 * After a short sleep, check if it was a premature sleep. If not, then
3308f0bc0a60SKOSAKI Motohiro 	 * go fully to sleep until explicitly woken up.
3309f0bc0a60SKOSAKI Motohiro 	 */
3310accf6242SVlastimil Babka 	if (prepare_kswapd_sleep(pgdat, order, remaining,
3311accf6242SVlastimil Babka 						balanced_classzone_idx)) {
3312f0bc0a60SKOSAKI Motohiro 		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3313f0bc0a60SKOSAKI Motohiro 
3314f0bc0a60SKOSAKI Motohiro 		/*
3315f0bc0a60SKOSAKI Motohiro 		 * vmstat counters are not perfectly accurate and the estimated
3316f0bc0a60SKOSAKI Motohiro 		 * value for counters such as NR_FREE_PAGES can deviate from the
3317f0bc0a60SKOSAKI Motohiro 		 * true value by nr_online_cpus * threshold. To avoid the zone
3318f0bc0a60SKOSAKI Motohiro 		 * watermarks being breached while under pressure, we reduce the
3319f0bc0a60SKOSAKI Motohiro 		 * per-cpu vmstat threshold while kswapd is awake and restore
3320f0bc0a60SKOSAKI Motohiro 		 * them before going back to sleep.
3321f0bc0a60SKOSAKI Motohiro 		 */
3322f0bc0a60SKOSAKI Motohiro 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
33231c7e7f6cSAaditya Kumar 
33241c7e7f6cSAaditya Kumar 		if (!kthread_should_stop())
3325f0bc0a60SKOSAKI Motohiro 			schedule();
33261c7e7f6cSAaditya Kumar 
3327f0bc0a60SKOSAKI Motohiro 		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3328f0bc0a60SKOSAKI Motohiro 	} else {
3329f0bc0a60SKOSAKI Motohiro 		if (remaining)
3330f0bc0a60SKOSAKI Motohiro 			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3331f0bc0a60SKOSAKI Motohiro 		else
3332f0bc0a60SKOSAKI Motohiro 			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3333f0bc0a60SKOSAKI Motohiro 	}
3334f0bc0a60SKOSAKI Motohiro 	finish_wait(&pgdat->kswapd_wait, &wait);
3335f0bc0a60SKOSAKI Motohiro }
3336f0bc0a60SKOSAKI Motohiro 
33371da177e4SLinus Torvalds /*
33381da177e4SLinus Torvalds  * The background pageout daemon, started as a kernel thread
33391da177e4SLinus Torvalds  * from the init process.
33401da177e4SLinus Torvalds  *
33411da177e4SLinus Torvalds  * This basically trickles out pages so that we have _some_
33421da177e4SLinus Torvalds  * free memory available even if there is no other activity
33431da177e4SLinus Torvalds  * that frees anything up. This is needed for things like routing
33441da177e4SLinus Torvalds  * etc, where we otherwise might have all activity going on in
33451da177e4SLinus Torvalds  * asynchronous contexts that cannot page things out.
33461da177e4SLinus Torvalds  *
33471da177e4SLinus Torvalds  * If there are applications that are active memory-allocators
33481da177e4SLinus Torvalds  * (most normal use), this basically shouldn't matter.
33491da177e4SLinus Torvalds  */
33501da177e4SLinus Torvalds static int kswapd(void *p)
33511da177e4SLinus Torvalds {
3352215ddd66SMel Gorman 	unsigned long order, new_order;
3353215ddd66SMel Gorman 	int classzone_idx, new_classzone_idx;
3354d2ebd0f6SAlex,Shi 	int balanced_classzone_idx;
33551da177e4SLinus Torvalds 	pg_data_t *pgdat = (pg_data_t*)p;
33561da177e4SLinus Torvalds 	struct task_struct *tsk = current;
3357f0bc0a60SKOSAKI Motohiro 
33581da177e4SLinus Torvalds 	struct reclaim_state reclaim_state = {
33591da177e4SLinus Torvalds 		.reclaimed_slab = 0,
33601da177e4SLinus Torvalds 	};
3361a70f7302SRusty Russell 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
33621da177e4SLinus Torvalds 
3363cf40bd16SNick Piggin 	lockdep_set_current_reclaim_state(GFP_KERNEL);
3364cf40bd16SNick Piggin 
3365174596a0SRusty Russell 	if (!cpumask_empty(cpumask))
3366c5f59f08SMike Travis 		set_cpus_allowed_ptr(tsk, cpumask);
33671da177e4SLinus Torvalds 	current->reclaim_state = &reclaim_state;
33681da177e4SLinus Torvalds 
33691da177e4SLinus Torvalds 	/*
33701da177e4SLinus Torvalds 	 * Tell the memory management that we're a "memory allocator",
33711da177e4SLinus Torvalds 	 * and that if we need more memory we should get access to it
33721da177e4SLinus Torvalds 	 * regardless (see "__alloc_pages()"). "kswapd" should
33731da177e4SLinus Torvalds 	 * never get caught in the normal page freeing logic.
33741da177e4SLinus Torvalds 	 *
33751da177e4SLinus Torvalds 	 * (Kswapd normally doesn't need memory anyway, but sometimes
33761da177e4SLinus Torvalds 	 * you need a small amount of memory in order to be able to
33771da177e4SLinus Torvalds 	 * page out something else, and this flag essentially protects
33781da177e4SLinus Torvalds 	 * us from recursively trying to free more memory as we're
33791da177e4SLinus Torvalds 	 * trying to free the first piece of memory in the first place).
33801da177e4SLinus Torvalds 	 */
3381930d9152SChristoph Lameter 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
338283144186SRafael J. Wysocki 	set_freezable();
33831da177e4SLinus Torvalds 
3384215ddd66SMel Gorman 	order = new_order = 0;
3385215ddd66SMel Gorman 	classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
3386d2ebd0f6SAlex,Shi 	balanced_classzone_idx = classzone_idx;
33871da177e4SLinus Torvalds 	for ( ; ; ) {
33886f6313d4SJeff Liu 		bool ret;
33893e1d1d28SChristoph Lameter 
3390215ddd66SMel Gorman 		/*
3391accf6242SVlastimil Babka 		 * While we were reclaiming, there might have been another
3392accf6242SVlastimil Babka 		 * wakeup, so check the values.
3393215ddd66SMel Gorman 		 */
33941da177e4SLinus Torvalds 		new_order = pgdat->kswapd_max_order;
339599504748SMel Gorman 		new_classzone_idx = pgdat->classzone_idx;
33961da177e4SLinus Torvalds 		pgdat->kswapd_max_order =  0;
3397215ddd66SMel Gorman 		pgdat->classzone_idx = pgdat->nr_zones - 1;
3398215ddd66SMel Gorman 
339999504748SMel Gorman 		if (order < new_order || classzone_idx > new_classzone_idx) {
34001da177e4SLinus Torvalds 			/*
34011da177e4SLinus Torvalds 			 * Don't sleep if someone wants a larger 'order'
340299504748SMel Gorman 			 * allocation or has tigher zone constraints
34031da177e4SLinus Torvalds 			 */
34041da177e4SLinus Torvalds 			order = new_order;
340599504748SMel Gorman 			classzone_idx = new_classzone_idx;
34061da177e4SLinus Torvalds 		} else {
3407accf6242SVlastimil Babka 			kswapd_try_to_sleep(pgdat, order, classzone_idx,
3408d2ebd0f6SAlex,Shi 						balanced_classzone_idx);
34091da177e4SLinus Torvalds 			order = pgdat->kswapd_max_order;
341099504748SMel Gorman 			classzone_idx = pgdat->classzone_idx;
3411f0dfcde0SAlex,Shi 			new_order = order;
3412f0dfcde0SAlex,Shi 			new_classzone_idx = classzone_idx;
34134d40502eSMel Gorman 			pgdat->kswapd_max_order = 0;
3414215ddd66SMel Gorman 			pgdat->classzone_idx = pgdat->nr_zones - 1;
34151da177e4SLinus Torvalds 		}
34161da177e4SLinus Torvalds 
34178fe23e05SDavid Rientjes 		ret = try_to_freeze();
34188fe23e05SDavid Rientjes 		if (kthread_should_stop())
34198fe23e05SDavid Rientjes 			break;
34208fe23e05SDavid Rientjes 
34218fe23e05SDavid Rientjes 		/*
34228fe23e05SDavid Rientjes 		 * We can speed up thawing tasks if we don't call balance_pgdat
34238fe23e05SDavid Rientjes 		 * after returning from the refrigerator
3424b1296cc4SRafael J. Wysocki 		 */
342533906bc5SMel Gorman 		if (!ret) {
342633906bc5SMel Gorman 			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
3427accf6242SVlastimil Babka 			balanced_classzone_idx = balance_pgdat(pgdat, order,
3428accf6242SVlastimil Babka 								classzone_idx);
34291da177e4SLinus Torvalds 		}
343033906bc5SMel Gorman 	}
3431b0a8cc58STakamori Yamaguchi 
343271abdc15SJohannes Weiner 	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3433b0a8cc58STakamori Yamaguchi 	current->reclaim_state = NULL;
343471abdc15SJohannes Weiner 	lockdep_clear_current_reclaim_state();
343571abdc15SJohannes Weiner 
34361da177e4SLinus Torvalds 	return 0;
34371da177e4SLinus Torvalds }
34381da177e4SLinus Torvalds 
34391da177e4SLinus Torvalds /*
34401da177e4SLinus Torvalds  * A zone is low on free memory, so wake its kswapd task to service it.
34411da177e4SLinus Torvalds  */
344299504748SMel Gorman void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
34431da177e4SLinus Torvalds {
34441da177e4SLinus Torvalds 	pg_data_t *pgdat;
34451da177e4SLinus Torvalds 
3446f3fe6512SCon Kolivas 	if (!populated_zone(zone))
34471da177e4SLinus Torvalds 		return;
34481da177e4SLinus Torvalds 
3449344736f2SVladimir Davydov 	if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
34501da177e4SLinus Torvalds 		return;
345188f5acf8SMel Gorman 	pgdat = zone->zone_pgdat;
345299504748SMel Gorman 	if (pgdat->kswapd_max_order < order) {
345388f5acf8SMel Gorman 		pgdat->kswapd_max_order = order;
345499504748SMel Gorman 		pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
345599504748SMel Gorman 	}
34568d0986e2SCon Kolivas 	if (!waitqueue_active(&pgdat->kswapd_wait))
34571da177e4SLinus Torvalds 		return;
3458accf6242SVlastimil Babka 	if (zone_balanced(zone, order, true, 0, 0))
345988f5acf8SMel Gorman 		return;
346088f5acf8SMel Gorman 
346188f5acf8SMel Gorman 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
34628d0986e2SCon Kolivas 	wake_up_interruptible(&pgdat->kswapd_wait);
34631da177e4SLinus Torvalds }
34641da177e4SLinus Torvalds 
3465c6f37f12SRafael J. Wysocki #ifdef CONFIG_HIBERNATION
34661da177e4SLinus Torvalds /*
34677b51755cSKOSAKI Motohiro  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3468d6277db4SRafael J. Wysocki  * freed pages.
3469d6277db4SRafael J. Wysocki  *
3470d6277db4SRafael J. Wysocki  * Rather than trying to age LRUs the aim is to preserve the overall
3471d6277db4SRafael J. Wysocki  * LRU order by reclaiming preferentially
3472d6277db4SRafael J. Wysocki  * inactive > active > active referenced > active mapped
34731da177e4SLinus Torvalds  */
34747b51755cSKOSAKI Motohiro unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
34751da177e4SLinus Torvalds {
3476d6277db4SRafael J. Wysocki 	struct reclaim_state reclaim_state;
3477d6277db4SRafael J. Wysocki 	struct scan_control sc = {
34787b51755cSKOSAKI Motohiro 		.nr_to_reclaim = nr_to_reclaim,
3479ee814fe2SJohannes Weiner 		.gfp_mask = GFP_HIGHUSER_MOVABLE,
34809e3b2f8cSKonstantin Khlebnikov 		.priority = DEF_PRIORITY,
3481ee814fe2SJohannes Weiner 		.may_writepage = 1,
3482ee814fe2SJohannes Weiner 		.may_unmap = 1,
3483ee814fe2SJohannes Weiner 		.may_swap = 1,
3484ee814fe2SJohannes Weiner 		.hibernation_mode = 1,
34851da177e4SLinus Torvalds 	};
34867b51755cSKOSAKI Motohiro 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
34877b51755cSKOSAKI Motohiro 	struct task_struct *p = current;
34887b51755cSKOSAKI Motohiro 	unsigned long nr_reclaimed;
34891da177e4SLinus Torvalds 
34907b51755cSKOSAKI Motohiro 	p->flags |= PF_MEMALLOC;
34917b51755cSKOSAKI Motohiro 	lockdep_set_current_reclaim_state(sc.gfp_mask);
3492d6277db4SRafael J. Wysocki 	reclaim_state.reclaimed_slab = 0;
34937b51755cSKOSAKI Motohiro 	p->reclaim_state = &reclaim_state;
3494d6277db4SRafael J. Wysocki 
34953115cd91SVladimir Davydov 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3496d6277db4SRafael J. Wysocki 
34977b51755cSKOSAKI Motohiro 	p->reclaim_state = NULL;
34987b51755cSKOSAKI Motohiro 	lockdep_clear_current_reclaim_state();
34997b51755cSKOSAKI Motohiro 	p->flags &= ~PF_MEMALLOC;
3500d6277db4SRafael J. Wysocki 
35017b51755cSKOSAKI Motohiro 	return nr_reclaimed;
35021da177e4SLinus Torvalds }
3503c6f37f12SRafael J. Wysocki #endif /* CONFIG_HIBERNATION */
35041da177e4SLinus Torvalds 
35051da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but
35061da177e4SLinus Torvalds    not required for correctness.  So if the last cpu in a node goes
35071da177e4SLinus Torvalds    away, we get changed to run anywhere: as the first one comes back,
35081da177e4SLinus Torvalds    restore their cpu bindings. */
3509fcb35a9bSGreg Kroah-Hartman static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3510fcb35a9bSGreg Kroah-Hartman 			void *hcpu)
35111da177e4SLinus Torvalds {
351258c0a4a7SYasunori Goto 	int nid;
35131da177e4SLinus Torvalds 
35148bb78442SRafael J. Wysocki 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
351548fb2e24SLai Jiangshan 		for_each_node_state(nid, N_MEMORY) {
3516c5f59f08SMike Travis 			pg_data_t *pgdat = NODE_DATA(nid);
3517a70f7302SRusty Russell 			const struct cpumask *mask;
3518a70f7302SRusty Russell 
3519a70f7302SRusty Russell 			mask = cpumask_of_node(pgdat->node_id);
3520c5f59f08SMike Travis 
35213e597945SRusty Russell 			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
35221da177e4SLinus Torvalds 				/* One of our CPUs online: restore mask */
3523c5f59f08SMike Travis 				set_cpus_allowed_ptr(pgdat->kswapd, mask);
35241da177e4SLinus Torvalds 		}
35251da177e4SLinus Torvalds 	}
35261da177e4SLinus Torvalds 	return NOTIFY_OK;
35271da177e4SLinus Torvalds }
35281da177e4SLinus Torvalds 
35293218ae14SYasunori Goto /*
35303218ae14SYasunori Goto  * This kswapd start function will be called by init and node-hot-add.
35313218ae14SYasunori Goto  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
35323218ae14SYasunori Goto  */
35333218ae14SYasunori Goto int kswapd_run(int nid)
35343218ae14SYasunori Goto {
35353218ae14SYasunori Goto 	pg_data_t *pgdat = NODE_DATA(nid);
35363218ae14SYasunori Goto 	int ret = 0;
35373218ae14SYasunori Goto 
35383218ae14SYasunori Goto 	if (pgdat->kswapd)
35393218ae14SYasunori Goto 		return 0;
35403218ae14SYasunori Goto 
35413218ae14SYasunori Goto 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
35423218ae14SYasunori Goto 	if (IS_ERR(pgdat->kswapd)) {
35433218ae14SYasunori Goto 		/* failure at boot is fatal */
35443218ae14SYasunori Goto 		BUG_ON(system_state == SYSTEM_BOOTING);
3545d5dc0ad9SGavin Shan 		pr_err("Failed to start kswapd on node %d\n", nid);
3546d5dc0ad9SGavin Shan 		ret = PTR_ERR(pgdat->kswapd);
3547d72515b8SXishi Qiu 		pgdat->kswapd = NULL;
35483218ae14SYasunori Goto 	}
35493218ae14SYasunori Goto 	return ret;
35503218ae14SYasunori Goto }
35513218ae14SYasunori Goto 
35528fe23e05SDavid Rientjes /*
3553d8adde17SJiang Liu  * Called by memory hotplug when all memory in a node is offlined.  Caller must
3554bfc8c901SVladimir Davydov  * hold mem_hotplug_begin/end().
35558fe23e05SDavid Rientjes  */
35568fe23e05SDavid Rientjes void kswapd_stop(int nid)
35578fe23e05SDavid Rientjes {
35588fe23e05SDavid Rientjes 	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
35598fe23e05SDavid Rientjes 
3560d8adde17SJiang Liu 	if (kswapd) {
35618fe23e05SDavid Rientjes 		kthread_stop(kswapd);
3562d8adde17SJiang Liu 		NODE_DATA(nid)->kswapd = NULL;
3563d8adde17SJiang Liu 	}
35648fe23e05SDavid Rientjes }
35658fe23e05SDavid Rientjes 
35661da177e4SLinus Torvalds static int __init kswapd_init(void)
35671da177e4SLinus Torvalds {
35683218ae14SYasunori Goto 	int nid;
356969e05944SAndrew Morton 
35701da177e4SLinus Torvalds 	swap_setup();
357148fb2e24SLai Jiangshan 	for_each_node_state(nid, N_MEMORY)
35723218ae14SYasunori Goto  		kswapd_run(nid);
35731da177e4SLinus Torvalds 	hotcpu_notifier(cpu_callback, 0);
35741da177e4SLinus Torvalds 	return 0;
35751da177e4SLinus Torvalds }
35761da177e4SLinus Torvalds 
35771da177e4SLinus Torvalds module_init(kswapd_init)
35789eeff239SChristoph Lameter 
35799eeff239SChristoph Lameter #ifdef CONFIG_NUMA
35809eeff239SChristoph Lameter /*
35819eeff239SChristoph Lameter  * Zone reclaim mode
35829eeff239SChristoph Lameter  *
35839eeff239SChristoph Lameter  * If non-zero call zone_reclaim when the number of free pages falls below
35849eeff239SChristoph Lameter  * the watermarks.
35859eeff239SChristoph Lameter  */
35869eeff239SChristoph Lameter int zone_reclaim_mode __read_mostly;
35879eeff239SChristoph Lameter 
35881b2ffb78SChristoph Lameter #define RECLAIM_OFF 0
35897d03431cSFernando Luis Vazquez Cao #define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
35901b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
359195bbc0c7SZhihui Zhang #define RECLAIM_UNMAP (1<<2)	/* Unmap pages during reclaim */
35921b2ffb78SChristoph Lameter 
35939eeff239SChristoph Lameter /*
3594a92f7126SChristoph Lameter  * Priority for ZONE_RECLAIM. This determines the fraction of pages
3595a92f7126SChristoph Lameter  * of a node considered for each zone_reclaim. 4 scans 1/16th of
3596a92f7126SChristoph Lameter  * a zone.
3597a92f7126SChristoph Lameter  */
3598a92f7126SChristoph Lameter #define ZONE_RECLAIM_PRIORITY 4
3599a92f7126SChristoph Lameter 
36009eeff239SChristoph Lameter /*
36019614634fSChristoph Lameter  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
36029614634fSChristoph Lameter  * occur.
36039614634fSChristoph Lameter  */
36049614634fSChristoph Lameter int sysctl_min_unmapped_ratio = 1;
36059614634fSChristoph Lameter 
36069614634fSChristoph Lameter /*
36070ff38490SChristoph Lameter  * If the number of slab pages in a zone grows beyond this percentage then
36080ff38490SChristoph Lameter  * slab reclaim needs to occur.
36090ff38490SChristoph Lameter  */
36100ff38490SChristoph Lameter int sysctl_min_slab_ratio = 5;
36110ff38490SChristoph Lameter 
361290afa5deSMel Gorman static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
361390afa5deSMel Gorman {
361490afa5deSMel Gorman 	unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
361590afa5deSMel Gorman 	unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
361690afa5deSMel Gorman 		zone_page_state(zone, NR_ACTIVE_FILE);
361790afa5deSMel Gorman 
361890afa5deSMel Gorman 	/*
361990afa5deSMel Gorman 	 * It's possible for there to be more file mapped pages than
362090afa5deSMel Gorman 	 * accounted for by the pages on the file LRU lists because
362190afa5deSMel Gorman 	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
362290afa5deSMel Gorman 	 */
362390afa5deSMel Gorman 	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
362490afa5deSMel Gorman }
362590afa5deSMel Gorman 
362690afa5deSMel Gorman /* Work out how many page cache pages we can reclaim in this reclaim_mode */
3627d031a157SAlexandru Moise static unsigned long zone_pagecache_reclaimable(struct zone *zone)
362890afa5deSMel Gorman {
3629d031a157SAlexandru Moise 	unsigned long nr_pagecache_reclaimable;
3630d031a157SAlexandru Moise 	unsigned long delta = 0;
363190afa5deSMel Gorman 
363290afa5deSMel Gorman 	/*
363395bbc0c7SZhihui Zhang 	 * If RECLAIM_UNMAP is set, then all file pages are considered
363490afa5deSMel Gorman 	 * potentially reclaimable. Otherwise, we have to worry about
363590afa5deSMel Gorman 	 * pages like swapcache and zone_unmapped_file_pages() provides
363690afa5deSMel Gorman 	 * a better estimate
363790afa5deSMel Gorman 	 */
363895bbc0c7SZhihui Zhang 	if (zone_reclaim_mode & RECLAIM_UNMAP)
363990afa5deSMel Gorman 		nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
364090afa5deSMel Gorman 	else
364190afa5deSMel Gorman 		nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
364290afa5deSMel Gorman 
364390afa5deSMel Gorman 	/* If we can't clean pages, remove dirty pages from consideration */
364490afa5deSMel Gorman 	if (!(zone_reclaim_mode & RECLAIM_WRITE))
364590afa5deSMel Gorman 		delta += zone_page_state(zone, NR_FILE_DIRTY);
364690afa5deSMel Gorman 
364790afa5deSMel Gorman 	/* Watch for any possible underflows due to delta */
364890afa5deSMel Gorman 	if (unlikely(delta > nr_pagecache_reclaimable))
364990afa5deSMel Gorman 		delta = nr_pagecache_reclaimable;
365090afa5deSMel Gorman 
365190afa5deSMel Gorman 	return nr_pagecache_reclaimable - delta;
365290afa5deSMel Gorman }
365390afa5deSMel Gorman 
36540ff38490SChristoph Lameter /*
36559eeff239SChristoph Lameter  * Try to free up some pages from this zone through reclaim.
36569eeff239SChristoph Lameter  */
3657179e9639SAndrew Morton static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
36589eeff239SChristoph Lameter {
36597fb2d46dSChristoph Lameter 	/* Minimum pages needed in order to stay on node */
366069e05944SAndrew Morton 	const unsigned long nr_pages = 1 << order;
36619eeff239SChristoph Lameter 	struct task_struct *p = current;
36629eeff239SChristoph Lameter 	struct reclaim_state reclaim_state;
3663179e9639SAndrew Morton 	struct scan_control sc = {
366462b726c1SAndrew Morton 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
366521caf2fcSMing Lei 		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3666bd2f6199SJohannes Weiner 		.order = order,
36679e3b2f8cSKonstantin Khlebnikov 		.priority = ZONE_RECLAIM_PRIORITY,
3668ee814fe2SJohannes Weiner 		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
366995bbc0c7SZhihui Zhang 		.may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP),
3670ee814fe2SJohannes Weiner 		.may_swap = 1,
3671179e9639SAndrew Morton 	};
36729eeff239SChristoph Lameter 
36739eeff239SChristoph Lameter 	cond_resched();
3674d4f7796eSChristoph Lameter 	/*
367595bbc0c7SZhihui Zhang 	 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
3676d4f7796eSChristoph Lameter 	 * and we also need to be able to write out pages for RECLAIM_WRITE
367795bbc0c7SZhihui Zhang 	 * and RECLAIM_UNMAP.
3678d4f7796eSChristoph Lameter 	 */
3679d4f7796eSChristoph Lameter 	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
368076ca542dSKOSAKI Motohiro 	lockdep_set_current_reclaim_state(gfp_mask);
36819eeff239SChristoph Lameter 	reclaim_state.reclaimed_slab = 0;
36829eeff239SChristoph Lameter 	p->reclaim_state = &reclaim_state;
3683c84db23cSChristoph Lameter 
368490afa5deSMel Gorman 	if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3685a92f7126SChristoph Lameter 		/*
36860ff38490SChristoph Lameter 		 * Free memory by calling shrink zone with increasing
36870ff38490SChristoph Lameter 		 * priorities until we have enough memory freed.
3688a92f7126SChristoph Lameter 		 */
3689a92f7126SChristoph Lameter 		do {
36906b4f7799SJohannes Weiner 			shrink_zone(zone, &sc, true);
36919e3b2f8cSKonstantin Khlebnikov 		} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
36920ff38490SChristoph Lameter 	}
3693a92f7126SChristoph Lameter 
36949eeff239SChristoph Lameter 	p->reclaim_state = NULL;
3695d4f7796eSChristoph Lameter 	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
369676ca542dSKOSAKI Motohiro 	lockdep_clear_current_reclaim_state();
3697a79311c1SRik van Riel 	return sc.nr_reclaimed >= nr_pages;
36989eeff239SChristoph Lameter }
3699179e9639SAndrew Morton 
3700179e9639SAndrew Morton int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3701179e9639SAndrew Morton {
3702179e9639SAndrew Morton 	int node_id;
3703d773ed6bSDavid Rientjes 	int ret;
3704179e9639SAndrew Morton 
3705179e9639SAndrew Morton 	/*
37060ff38490SChristoph Lameter 	 * Zone reclaim reclaims unmapped file backed pages and
37070ff38490SChristoph Lameter 	 * slab pages if we are over the defined limits.
370834aa1330SChristoph Lameter 	 *
37099614634fSChristoph Lameter 	 * A small portion of unmapped file backed pages is needed for
37109614634fSChristoph Lameter 	 * file I/O otherwise pages read by file I/O will be immediately
37119614634fSChristoph Lameter 	 * thrown out if the zone is overallocated. So we do not reclaim
37129614634fSChristoph Lameter 	 * if less than a specified percentage of the zone is used by
37139614634fSChristoph Lameter 	 * unmapped file backed pages.
3714179e9639SAndrew Morton 	 */
371590afa5deSMel Gorman 	if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
371690afa5deSMel Gorman 	    zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3717fa5e084eSMel Gorman 		return ZONE_RECLAIM_FULL;
3718179e9639SAndrew Morton 
37196e543d57SLisa Du 	if (!zone_reclaimable(zone))
3720fa5e084eSMel Gorman 		return ZONE_RECLAIM_FULL;
3721d773ed6bSDavid Rientjes 
3722179e9639SAndrew Morton 	/*
3723d773ed6bSDavid Rientjes 	 * Do not scan if the allocation should not be delayed.
3724179e9639SAndrew Morton 	 */
3725d0164adcSMel Gorman 	if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
3726fa5e084eSMel Gorman 		return ZONE_RECLAIM_NOSCAN;
3727179e9639SAndrew Morton 
3728179e9639SAndrew Morton 	/*
3729179e9639SAndrew Morton 	 * Only run zone reclaim on the local zone or on zones that do not
3730179e9639SAndrew Morton 	 * have associated processors. This will favor the local processor
3731179e9639SAndrew Morton 	 * over remote processors and spread off node memory allocations
3732179e9639SAndrew Morton 	 * as wide as possible.
3733179e9639SAndrew Morton 	 */
373489fa3024SChristoph Lameter 	node_id = zone_to_nid(zone);
373537c0708dSChristoph Lameter 	if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3736fa5e084eSMel Gorman 		return ZONE_RECLAIM_NOSCAN;
3737d773ed6bSDavid Rientjes 
373857054651SJohannes Weiner 	if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
3739fa5e084eSMel Gorman 		return ZONE_RECLAIM_NOSCAN;
3740fa5e084eSMel Gorman 
3741d773ed6bSDavid Rientjes 	ret = __zone_reclaim(zone, gfp_mask, order);
374257054651SJohannes Weiner 	clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
3743d773ed6bSDavid Rientjes 
374424cf7251SMel Gorman 	if (!ret)
374524cf7251SMel Gorman 		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
374624cf7251SMel Gorman 
3747d773ed6bSDavid Rientjes 	return ret;
3748179e9639SAndrew Morton }
37499eeff239SChristoph Lameter #endif
3750894bc310SLee Schermerhorn 
3751894bc310SLee Schermerhorn /*
3752894bc310SLee Schermerhorn  * page_evictable - test whether a page is evictable
3753894bc310SLee Schermerhorn  * @page: the page to test
3754894bc310SLee Schermerhorn  *
3755894bc310SLee Schermerhorn  * Test whether page is evictable--i.e., should be placed on active/inactive
375639b5f29aSHugh Dickins  * lists vs unevictable list.
3757894bc310SLee Schermerhorn  *
3758894bc310SLee Schermerhorn  * Reasons page might not be evictable:
3759ba9ddf49SLee Schermerhorn  * (1) page's mapping marked unevictable
3760b291f000SNick Piggin  * (2) page is part of an mlocked VMA
3761ba9ddf49SLee Schermerhorn  *
3762894bc310SLee Schermerhorn  */
376339b5f29aSHugh Dickins int page_evictable(struct page *page)
3764894bc310SLee Schermerhorn {
376539b5f29aSHugh Dickins 	return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3766894bc310SLee Schermerhorn }
376789e004eaSLee Schermerhorn 
376885046579SHugh Dickins #ifdef CONFIG_SHMEM
376989e004eaSLee Schermerhorn /**
377024513264SHugh Dickins  * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
377124513264SHugh Dickins  * @pages:	array of pages to check
377224513264SHugh Dickins  * @nr_pages:	number of pages to check
377389e004eaSLee Schermerhorn  *
377424513264SHugh Dickins  * Checks pages for evictability and moves them to the appropriate lru list.
377585046579SHugh Dickins  *
377685046579SHugh Dickins  * This function is only used for SysV IPC SHM_UNLOCK.
377789e004eaSLee Schermerhorn  */
377824513264SHugh Dickins void check_move_unevictable_pages(struct page **pages, int nr_pages)
377989e004eaSLee Schermerhorn {
3780925b7673SJohannes Weiner 	struct lruvec *lruvec;
378124513264SHugh Dickins 	struct zone *zone = NULL;
378224513264SHugh Dickins 	int pgscanned = 0;
378324513264SHugh Dickins 	int pgrescued = 0;
378489e004eaSLee Schermerhorn 	int i;
378589e004eaSLee Schermerhorn 
378624513264SHugh Dickins 	for (i = 0; i < nr_pages; i++) {
378724513264SHugh Dickins 		struct page *page = pages[i];
378824513264SHugh Dickins 		struct zone *pagezone;
378989e004eaSLee Schermerhorn 
379024513264SHugh Dickins 		pgscanned++;
379124513264SHugh Dickins 		pagezone = page_zone(page);
379289e004eaSLee Schermerhorn 		if (pagezone != zone) {
379389e004eaSLee Schermerhorn 			if (zone)
3794*a52633d8SMel Gorman 				spin_unlock_irq(zone_lru_lock(zone));
379589e004eaSLee Schermerhorn 			zone = pagezone;
3796*a52633d8SMel Gorman 			spin_lock_irq(zone_lru_lock(zone));
379789e004eaSLee Schermerhorn 		}
3798fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
379989e004eaSLee Schermerhorn 
380024513264SHugh Dickins 		if (!PageLRU(page) || !PageUnevictable(page))
380124513264SHugh Dickins 			continue;
380289e004eaSLee Schermerhorn 
380339b5f29aSHugh Dickins 		if (page_evictable(page)) {
380424513264SHugh Dickins 			enum lru_list lru = page_lru_base_type(page);
380524513264SHugh Dickins 
3806309381feSSasha Levin 			VM_BUG_ON_PAGE(PageActive(page), page);
380724513264SHugh Dickins 			ClearPageUnevictable(page);
3808fa9add64SHugh Dickins 			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3809fa9add64SHugh Dickins 			add_page_to_lru_list(page, lruvec, lru);
381024513264SHugh Dickins 			pgrescued++;
381189e004eaSLee Schermerhorn 		}
381289e004eaSLee Schermerhorn 	}
381324513264SHugh Dickins 
381424513264SHugh Dickins 	if (zone) {
381524513264SHugh Dickins 		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
381624513264SHugh Dickins 		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3817*a52633d8SMel Gorman 		spin_unlock_irq(zone_lru_lock(zone));
381824513264SHugh Dickins 	}
381985046579SHugh Dickins }
382085046579SHugh Dickins #endif /* CONFIG_SHMEM */
3821