xref: /openbmc/linux/mm/vmscan.c (revision 7be62de99adcab4449d416977b4274985c5fe023)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/vmscan.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  Swap reorganised 29.12.95, Stephen Tweedie.
71da177e4SLinus Torvalds  *  kswapd added: 7.1.96  sct
81da177e4SLinus Torvalds  *  Removed kswapd_ctl limits, and swap out as many pages as needed
91da177e4SLinus Torvalds  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
101da177e4SLinus Torvalds  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
111da177e4SLinus Torvalds  *  Multiqueue VM started 5.8.00, Rik van Riel.
121da177e4SLinus Torvalds  */
131da177e4SLinus Torvalds 
141da177e4SLinus Torvalds #include <linux/mm.h>
151da177e4SLinus Torvalds #include <linux/module.h>
165a0e3ad6STejun Heo #include <linux/gfp.h>
171da177e4SLinus Torvalds #include <linux/kernel_stat.h>
181da177e4SLinus Torvalds #include <linux/swap.h>
191da177e4SLinus Torvalds #include <linux/pagemap.h>
201da177e4SLinus Torvalds #include <linux/init.h>
211da177e4SLinus Torvalds #include <linux/highmem.h>
22e129b5c2SAndrew Morton #include <linux/vmstat.h>
231da177e4SLinus Torvalds #include <linux/file.h>
241da177e4SLinus Torvalds #include <linux/writeback.h>
251da177e4SLinus Torvalds #include <linux/blkdev.h>
261da177e4SLinus Torvalds #include <linux/buffer_head.h>	/* for try_to_release_page(),
271da177e4SLinus Torvalds 					buffer_heads_over_limit */
281da177e4SLinus Torvalds #include <linux/mm_inline.h>
291da177e4SLinus Torvalds #include <linux/backing-dev.h>
301da177e4SLinus Torvalds #include <linux/rmap.h>
311da177e4SLinus Torvalds #include <linux/topology.h>
321da177e4SLinus Torvalds #include <linux/cpu.h>
331da177e4SLinus Torvalds #include <linux/cpuset.h>
343e7d3449SMel Gorman #include <linux/compaction.h>
351da177e4SLinus Torvalds #include <linux/notifier.h>
361da177e4SLinus Torvalds #include <linux/rwsem.h>
37248a0301SRafael J. Wysocki #include <linux/delay.h>
383218ae14SYasunori Goto #include <linux/kthread.h>
397dfb7103SNigel Cunningham #include <linux/freezer.h>
4066e1707bSBalbir Singh #include <linux/memcontrol.h>
41873b4771SKeika Kobayashi #include <linux/delayacct.h>
42af936a16SLee Schermerhorn #include <linux/sysctl.h>
43929bea7cSKOSAKI Motohiro #include <linux/oom.h>
44268bb0ceSLinus Torvalds #include <linux/prefetch.h>
451da177e4SLinus Torvalds 
461da177e4SLinus Torvalds #include <asm/tlbflush.h>
471da177e4SLinus Torvalds #include <asm/div64.h>
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds #include <linux/swapops.h>
501da177e4SLinus Torvalds 
510f8053a5SNick Piggin #include "internal.h"
520f8053a5SNick Piggin 
5333906bc5SMel Gorman #define CREATE_TRACE_POINTS
5433906bc5SMel Gorman #include <trace/events/vmscan.h>
5533906bc5SMel Gorman 
56ee64fc93SMel Gorman /*
57f3a310bcSMel Gorman  * reclaim_mode determines how the inactive list is shrunk
58f3a310bcSMel Gorman  * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
59f3a310bcSMel Gorman  * RECLAIM_MODE_ASYNC:  Do not block
60f3a310bcSMel Gorman  * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
61f3a310bcSMel Gorman  * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
62ee64fc93SMel Gorman  *			page from the LRU and reclaim all pages within a
63ee64fc93SMel Gorman  *			naturally aligned range
64f3a310bcSMel Gorman  * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
653e7d3449SMel Gorman  *			order-0 pages and then compact the zone
66ee64fc93SMel Gorman  */
67f3a310bcSMel Gorman typedef unsigned __bitwise__ reclaim_mode_t;
68f3a310bcSMel Gorman #define RECLAIM_MODE_SINGLE		((__force reclaim_mode_t)0x01u)
69f3a310bcSMel Gorman #define RECLAIM_MODE_ASYNC		((__force reclaim_mode_t)0x02u)
70f3a310bcSMel Gorman #define RECLAIM_MODE_SYNC		((__force reclaim_mode_t)0x04u)
71f3a310bcSMel Gorman #define RECLAIM_MODE_LUMPYRECLAIM	((__force reclaim_mode_t)0x08u)
72f3a310bcSMel Gorman #define RECLAIM_MODE_COMPACTION		((__force reclaim_mode_t)0x10u)
737d3579e8SKOSAKI Motohiro 
741da177e4SLinus Torvalds struct scan_control {
751da177e4SLinus Torvalds 	/* Incremented by the number of inactive pages that were scanned */
761da177e4SLinus Torvalds 	unsigned long nr_scanned;
771da177e4SLinus Torvalds 
78a79311c1SRik van Riel 	/* Number of pages freed so far during a call to shrink_zones() */
79a79311c1SRik van Riel 	unsigned long nr_reclaimed;
80a79311c1SRik van Riel 
8122fba335SKOSAKI Motohiro 	/* How many pages shrink_list() should reclaim */
8222fba335SKOSAKI Motohiro 	unsigned long nr_to_reclaim;
8322fba335SKOSAKI Motohiro 
847b51755cSKOSAKI Motohiro 	unsigned long hibernation_mode;
857b51755cSKOSAKI Motohiro 
861da177e4SLinus Torvalds 	/* This context's GFP mask */
876daa0e28SAl Viro 	gfp_t gfp_mask;
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds 	int may_writepage;
901da177e4SLinus Torvalds 
91a6dc60f8SJohannes Weiner 	/* Can mapped pages be reclaimed? */
92a6dc60f8SJohannes Weiner 	int may_unmap;
93f1fd1067SChristoph Lameter 
942e2e4259SKOSAKI Motohiro 	/* Can pages be swapped as part of reclaim? */
952e2e4259SKOSAKI Motohiro 	int may_swap;
962e2e4259SKOSAKI Motohiro 
975ad333ebSAndy Whitcroft 	int order;
9866e1707bSBalbir Singh 
995f53e762SKOSAKI Motohiro 	/*
100415b54e3SNikanth Karthikesan 	 * Intend to reclaim enough continuous memory rather than reclaim
101415b54e3SNikanth Karthikesan 	 * enough amount of memory. i.e, mode for high order allocation.
1025f53e762SKOSAKI Motohiro 	 */
103f3a310bcSMel Gorman 	reclaim_mode_t reclaim_mode;
1045f53e762SKOSAKI Motohiro 
105f16015fbSJohannes Weiner 	/*
106f16015fbSJohannes Weiner 	 * The memory cgroup that hit its limit and as a result is the
107f16015fbSJohannes Weiner 	 * primary target of this reclaim invocation.
108f16015fbSJohannes Weiner 	 */
109f16015fbSJohannes Weiner 	struct mem_cgroup *target_mem_cgroup;
11066e1707bSBalbir Singh 
111327c0e96SKAMEZAWA Hiroyuki 	/*
112327c0e96SKAMEZAWA Hiroyuki 	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
113327c0e96SKAMEZAWA Hiroyuki 	 * are scanned.
114327c0e96SKAMEZAWA Hiroyuki 	 */
115327c0e96SKAMEZAWA Hiroyuki 	nodemask_t	*nodemask;
1161da177e4SLinus Torvalds };
1171da177e4SLinus Torvalds 
118f16015fbSJohannes Weiner struct mem_cgroup_zone {
119f16015fbSJohannes Weiner 	struct mem_cgroup *mem_cgroup;
120f16015fbSJohannes Weiner 	struct zone *zone;
121f16015fbSJohannes Weiner };
122f16015fbSJohannes Weiner 
1231da177e4SLinus Torvalds #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH
1261da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field)			\
1271da177e4SLinus Torvalds 	do {								\
1281da177e4SLinus Torvalds 		if ((_page)->lru.prev != _base) {			\
1291da177e4SLinus Torvalds 			struct page *prev;				\
1301da177e4SLinus Torvalds 									\
1311da177e4SLinus Torvalds 			prev = lru_to_page(&(_page->lru));		\
1321da177e4SLinus Torvalds 			prefetch(&prev->_field);			\
1331da177e4SLinus Torvalds 		}							\
1341da177e4SLinus Torvalds 	} while (0)
1351da177e4SLinus Torvalds #else
1361da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
1371da177e4SLinus Torvalds #endif
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW
1401da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field)			\
1411da177e4SLinus Torvalds 	do {								\
1421da177e4SLinus Torvalds 		if ((_page)->lru.prev != _base) {			\
1431da177e4SLinus Torvalds 			struct page *prev;				\
1441da177e4SLinus Torvalds 									\
1451da177e4SLinus Torvalds 			prev = lru_to_page(&(_page->lru));		\
1461da177e4SLinus Torvalds 			prefetchw(&prev->_field);			\
1471da177e4SLinus Torvalds 		}							\
1481da177e4SLinus Torvalds 	} while (0)
1491da177e4SLinus Torvalds #else
1501da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
1511da177e4SLinus Torvalds #endif
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds /*
1541da177e4SLinus Torvalds  * From 0 .. 100.  Higher means more swappy.
1551da177e4SLinus Torvalds  */
1561da177e4SLinus Torvalds int vm_swappiness = 60;
157bd1e22b8SAndrew Morton long vm_total_pages;	/* The total number of pages which the VM controls */
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds static LIST_HEAD(shrinker_list);
1601da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem);
1611da177e4SLinus Torvalds 
16200f0b825SBalbir Singh #ifdef CONFIG_CGROUP_MEM_RES_CTLR
16389b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc)
16489b5fae5SJohannes Weiner {
165f16015fbSJohannes Weiner 	return !sc->target_mem_cgroup;
16689b5fae5SJohannes Weiner }
16789b5fae5SJohannes Weiner 
168f16015fbSJohannes Weiner static bool scanning_global_lru(struct mem_cgroup_zone *mz)
16989b5fae5SJohannes Weiner {
170f16015fbSJohannes Weiner 	return !mz->mem_cgroup;
17189b5fae5SJohannes Weiner }
17291a45470SKAMEZAWA Hiroyuki #else
17389b5fae5SJohannes Weiner static bool global_reclaim(struct scan_control *sc)
17489b5fae5SJohannes Weiner {
17589b5fae5SJohannes Weiner 	return true;
17689b5fae5SJohannes Weiner }
17789b5fae5SJohannes Weiner 
178f16015fbSJohannes Weiner static bool scanning_global_lru(struct mem_cgroup_zone *mz)
17989b5fae5SJohannes Weiner {
18089b5fae5SJohannes Weiner 	return true;
18189b5fae5SJohannes Weiner }
18291a45470SKAMEZAWA Hiroyuki #endif
18391a45470SKAMEZAWA Hiroyuki 
184f16015fbSJohannes Weiner static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
1856e901571SKOSAKI Motohiro {
186f16015fbSJohannes Weiner 	if (!scanning_global_lru(mz))
187f16015fbSJohannes Weiner 		return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
1883e2f41f1SKOSAKI Motohiro 
189f16015fbSJohannes Weiner 	return &mz->zone->reclaim_stat;
1906e901571SKOSAKI Motohiro }
1916e901571SKOSAKI Motohiro 
192f16015fbSJohannes Weiner static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
193f16015fbSJohannes Weiner 				       enum lru_list lru)
194c9f299d9SKOSAKI Motohiro {
195f16015fbSJohannes Weiner 	if (!scanning_global_lru(mz))
196f16015fbSJohannes Weiner 		return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
197f16015fbSJohannes Weiner 						    zone_to_nid(mz->zone),
198f16015fbSJohannes Weiner 						    zone_idx(mz->zone),
199f16015fbSJohannes Weiner 						    BIT(lru));
200a3d8e054SKOSAKI Motohiro 
201f16015fbSJohannes Weiner 	return zone_page_state(mz->zone, NR_LRU_BASE + lru);
202c9f299d9SKOSAKI Motohiro }
203c9f299d9SKOSAKI Motohiro 
204c9f299d9SKOSAKI Motohiro 
2051da177e4SLinus Torvalds /*
2061da177e4SLinus Torvalds  * Add a shrinker callback to be called from the vm
2071da177e4SLinus Torvalds  */
2088e1f936bSRusty Russell void register_shrinker(struct shrinker *shrinker)
2091da177e4SLinus Torvalds {
21083aeeadaSKonstantin Khlebnikov 	atomic_long_set(&shrinker->nr_in_batch, 0);
2111da177e4SLinus Torvalds 	down_write(&shrinker_rwsem);
2121da177e4SLinus Torvalds 	list_add_tail(&shrinker->list, &shrinker_list);
2131da177e4SLinus Torvalds 	up_write(&shrinker_rwsem);
2141da177e4SLinus Torvalds }
2158e1f936bSRusty Russell EXPORT_SYMBOL(register_shrinker);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds /*
2181da177e4SLinus Torvalds  * Remove one
2191da177e4SLinus Torvalds  */
2208e1f936bSRusty Russell void unregister_shrinker(struct shrinker *shrinker)
2211da177e4SLinus Torvalds {
2221da177e4SLinus Torvalds 	down_write(&shrinker_rwsem);
2231da177e4SLinus Torvalds 	list_del(&shrinker->list);
2241da177e4SLinus Torvalds 	up_write(&shrinker_rwsem);
2251da177e4SLinus Torvalds }
2268e1f936bSRusty Russell EXPORT_SYMBOL(unregister_shrinker);
2271da177e4SLinus Torvalds 
2281495f230SYing Han static inline int do_shrinker_shrink(struct shrinker *shrinker,
2291495f230SYing Han 				     struct shrink_control *sc,
2301495f230SYing Han 				     unsigned long nr_to_scan)
2311495f230SYing Han {
2321495f230SYing Han 	sc->nr_to_scan = nr_to_scan;
2331495f230SYing Han 	return (*shrinker->shrink)(shrinker, sc);
2341495f230SYing Han }
2351495f230SYing Han 
2361da177e4SLinus Torvalds #define SHRINK_BATCH 128
2371da177e4SLinus Torvalds /*
2381da177e4SLinus Torvalds  * Call the shrink functions to age shrinkable caches
2391da177e4SLinus Torvalds  *
2401da177e4SLinus Torvalds  * Here we assume it costs one seek to replace a lru page and that it also
2411da177e4SLinus Torvalds  * takes a seek to recreate a cache object.  With this in mind we age equal
2421da177e4SLinus Torvalds  * percentages of the lru and ageable caches.  This should balance the seeks
2431da177e4SLinus Torvalds  * generated by these structures.
2441da177e4SLinus Torvalds  *
245183ff22bSSimon Arlott  * If the vm encountered mapped pages on the LRU it increase the pressure on
2461da177e4SLinus Torvalds  * slab to avoid swapping.
2471da177e4SLinus Torvalds  *
2481da177e4SLinus Torvalds  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
2491da177e4SLinus Torvalds  *
2501da177e4SLinus Torvalds  * `lru_pages' represents the number of on-LRU pages in all the zones which
2511da177e4SLinus Torvalds  * are eligible for the caller's allocation attempt.  It is used for balancing
2521da177e4SLinus Torvalds  * slab reclaim versus page reclaim.
253b15e0905Sakpm@osdl.org  *
254b15e0905Sakpm@osdl.org  * Returns the number of slab objects which we shrunk.
2551da177e4SLinus Torvalds  */
256a09ed5e0SYing Han unsigned long shrink_slab(struct shrink_control *shrink,
2571495f230SYing Han 			  unsigned long nr_pages_scanned,
25869e05944SAndrew Morton 			  unsigned long lru_pages)
2591da177e4SLinus Torvalds {
2601da177e4SLinus Torvalds 	struct shrinker *shrinker;
26169e05944SAndrew Morton 	unsigned long ret = 0;
2621da177e4SLinus Torvalds 
2631495f230SYing Han 	if (nr_pages_scanned == 0)
2641495f230SYing Han 		nr_pages_scanned = SWAP_CLUSTER_MAX;
2651da177e4SLinus Torvalds 
266f06590bdSMinchan Kim 	if (!down_read_trylock(&shrinker_rwsem)) {
267f06590bdSMinchan Kim 		/* Assume we'll be able to shrink next time */
268f06590bdSMinchan Kim 		ret = 1;
269f06590bdSMinchan Kim 		goto out;
270f06590bdSMinchan Kim 	}
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds 	list_for_each_entry(shrinker, &shrinker_list, list) {
2731da177e4SLinus Torvalds 		unsigned long long delta;
274635697c6SKonstantin Khlebnikov 		long total_scan;
275635697c6SKonstantin Khlebnikov 		long max_pass;
27609576073SDave Chinner 		int shrink_ret = 0;
277acf92b48SDave Chinner 		long nr;
278acf92b48SDave Chinner 		long new_nr;
279e9299f50SDave Chinner 		long batch_size = shrinker->batch ? shrinker->batch
280e9299f50SDave Chinner 						  : SHRINK_BATCH;
2811da177e4SLinus Torvalds 
282635697c6SKonstantin Khlebnikov 		max_pass = do_shrinker_shrink(shrinker, shrink, 0);
283635697c6SKonstantin Khlebnikov 		if (max_pass <= 0)
284635697c6SKonstantin Khlebnikov 			continue;
285635697c6SKonstantin Khlebnikov 
286acf92b48SDave Chinner 		/*
287acf92b48SDave Chinner 		 * copy the current shrinker scan count into a local variable
288acf92b48SDave Chinner 		 * and zero it so that other concurrent shrinker invocations
289acf92b48SDave Chinner 		 * don't also do this scanning work.
290acf92b48SDave Chinner 		 */
29183aeeadaSKonstantin Khlebnikov 		nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
292acf92b48SDave Chinner 
293acf92b48SDave Chinner 		total_scan = nr;
2941495f230SYing Han 		delta = (4 * nr_pages_scanned) / shrinker->seeks;
295ea164d73SAndrea Arcangeli 		delta *= max_pass;
2961da177e4SLinus Torvalds 		do_div(delta, lru_pages + 1);
297acf92b48SDave Chinner 		total_scan += delta;
298acf92b48SDave Chinner 		if (total_scan < 0) {
29988c3bd70SDavid Rientjes 			printk(KERN_ERR "shrink_slab: %pF negative objects to "
30088c3bd70SDavid Rientjes 			       "delete nr=%ld\n",
301acf92b48SDave Chinner 			       shrinker->shrink, total_scan);
302acf92b48SDave Chinner 			total_scan = max_pass;
303ea164d73SAndrea Arcangeli 		}
304ea164d73SAndrea Arcangeli 
305ea164d73SAndrea Arcangeli 		/*
3063567b59aSDave Chinner 		 * We need to avoid excessive windup on filesystem shrinkers
3073567b59aSDave Chinner 		 * due to large numbers of GFP_NOFS allocations causing the
3083567b59aSDave Chinner 		 * shrinkers to return -1 all the time. This results in a large
3093567b59aSDave Chinner 		 * nr being built up so when a shrink that can do some work
3103567b59aSDave Chinner 		 * comes along it empties the entire cache due to nr >>>
3113567b59aSDave Chinner 		 * max_pass.  This is bad for sustaining a working set in
3123567b59aSDave Chinner 		 * memory.
3133567b59aSDave Chinner 		 *
3143567b59aSDave Chinner 		 * Hence only allow the shrinker to scan the entire cache when
3153567b59aSDave Chinner 		 * a large delta change is calculated directly.
3163567b59aSDave Chinner 		 */
3173567b59aSDave Chinner 		if (delta < max_pass / 4)
3183567b59aSDave Chinner 			total_scan = min(total_scan, max_pass / 2);
3193567b59aSDave Chinner 
3203567b59aSDave Chinner 		/*
321ea164d73SAndrea Arcangeli 		 * Avoid risking looping forever due to too large nr value:
322ea164d73SAndrea Arcangeli 		 * never try to free more than twice the estimate number of
323ea164d73SAndrea Arcangeli 		 * freeable entries.
324ea164d73SAndrea Arcangeli 		 */
325acf92b48SDave Chinner 		if (total_scan > max_pass * 2)
326acf92b48SDave Chinner 			total_scan = max_pass * 2;
3271da177e4SLinus Torvalds 
328acf92b48SDave Chinner 		trace_mm_shrink_slab_start(shrinker, shrink, nr,
32909576073SDave Chinner 					nr_pages_scanned, lru_pages,
33009576073SDave Chinner 					max_pass, delta, total_scan);
33109576073SDave Chinner 
332e9299f50SDave Chinner 		while (total_scan >= batch_size) {
333b15e0905Sakpm@osdl.org 			int nr_before;
3341da177e4SLinus Torvalds 
3351495f230SYing Han 			nr_before = do_shrinker_shrink(shrinker, shrink, 0);
3361495f230SYing Han 			shrink_ret = do_shrinker_shrink(shrinker, shrink,
337e9299f50SDave Chinner 							batch_size);
3381da177e4SLinus Torvalds 			if (shrink_ret == -1)
3391da177e4SLinus Torvalds 				break;
340b15e0905Sakpm@osdl.org 			if (shrink_ret < nr_before)
341b15e0905Sakpm@osdl.org 				ret += nr_before - shrink_ret;
342e9299f50SDave Chinner 			count_vm_events(SLABS_SCANNED, batch_size);
343e9299f50SDave Chinner 			total_scan -= batch_size;
3441da177e4SLinus Torvalds 
3451da177e4SLinus Torvalds 			cond_resched();
3461da177e4SLinus Torvalds 		}
3471da177e4SLinus Torvalds 
348acf92b48SDave Chinner 		/*
349acf92b48SDave Chinner 		 * move the unused scan count back into the shrinker in a
350acf92b48SDave Chinner 		 * manner that handles concurrent updates. If we exhausted the
351acf92b48SDave Chinner 		 * scan, there is no need to do an update.
352acf92b48SDave Chinner 		 */
35383aeeadaSKonstantin Khlebnikov 		if (total_scan > 0)
35483aeeadaSKonstantin Khlebnikov 			new_nr = atomic_long_add_return(total_scan,
35583aeeadaSKonstantin Khlebnikov 					&shrinker->nr_in_batch);
35683aeeadaSKonstantin Khlebnikov 		else
35783aeeadaSKonstantin Khlebnikov 			new_nr = atomic_long_read(&shrinker->nr_in_batch);
358acf92b48SDave Chinner 
359acf92b48SDave Chinner 		trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
3601da177e4SLinus Torvalds 	}
3611da177e4SLinus Torvalds 	up_read(&shrinker_rwsem);
362f06590bdSMinchan Kim out:
363f06590bdSMinchan Kim 	cond_resched();
364b15e0905Sakpm@osdl.org 	return ret;
3651da177e4SLinus Torvalds }
3661da177e4SLinus Torvalds 
367f3a310bcSMel Gorman static void set_reclaim_mode(int priority, struct scan_control *sc,
3687d3579e8SKOSAKI Motohiro 				   bool sync)
3697d3579e8SKOSAKI Motohiro {
370f3a310bcSMel Gorman 	reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
3717d3579e8SKOSAKI Motohiro 
3727d3579e8SKOSAKI Motohiro 	/*
3733e7d3449SMel Gorman 	 * Initially assume we are entering either lumpy reclaim or
3743e7d3449SMel Gorman 	 * reclaim/compaction.Depending on the order, we will either set the
3753e7d3449SMel Gorman 	 * sync mode or just reclaim order-0 pages later.
3767d3579e8SKOSAKI Motohiro 	 */
3773e7d3449SMel Gorman 	if (COMPACTION_BUILD)
378f3a310bcSMel Gorman 		sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
3793e7d3449SMel Gorman 	else
380f3a310bcSMel Gorman 		sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
3817d3579e8SKOSAKI Motohiro 
3827d3579e8SKOSAKI Motohiro 	/*
3833e7d3449SMel Gorman 	 * Avoid using lumpy reclaim or reclaim/compaction if possible by
3843e7d3449SMel Gorman 	 * restricting when its set to either costly allocations or when
3853e7d3449SMel Gorman 	 * under memory pressure
3867d3579e8SKOSAKI Motohiro 	 */
3877d3579e8SKOSAKI Motohiro 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
388f3a310bcSMel Gorman 		sc->reclaim_mode |= syncmode;
3897d3579e8SKOSAKI Motohiro 	else if (sc->order && priority < DEF_PRIORITY - 2)
390f3a310bcSMel Gorman 		sc->reclaim_mode |= syncmode;
3917d3579e8SKOSAKI Motohiro 	else
392f3a310bcSMel Gorman 		sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
3937d3579e8SKOSAKI Motohiro }
3947d3579e8SKOSAKI Motohiro 
395f3a310bcSMel Gorman static void reset_reclaim_mode(struct scan_control *sc)
3967d3579e8SKOSAKI Motohiro {
397f3a310bcSMel Gorman 	sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
3987d3579e8SKOSAKI Motohiro }
3997d3579e8SKOSAKI Motohiro 
4001da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page)
4011da177e4SLinus Torvalds {
402ceddc3a5SJohannes Weiner 	/*
403ceddc3a5SJohannes Weiner 	 * A freeable page cache page is referenced only by the caller
404ceddc3a5SJohannes Weiner 	 * that isolated the page, the page cache radix tree and
405ceddc3a5SJohannes Weiner 	 * optional buffer heads at page->private.
406ceddc3a5SJohannes Weiner 	 */
407edcf4748SJohannes Weiner 	return page_count(page) - page_has_private(page) == 2;
4081da177e4SLinus Torvalds }
4091da177e4SLinus Torvalds 
4107d3579e8SKOSAKI Motohiro static int may_write_to_queue(struct backing_dev_info *bdi,
4117d3579e8SKOSAKI Motohiro 			      struct scan_control *sc)
4121da177e4SLinus Torvalds {
413930d9152SChristoph Lameter 	if (current->flags & PF_SWAPWRITE)
4141da177e4SLinus Torvalds 		return 1;
4151da177e4SLinus Torvalds 	if (!bdi_write_congested(bdi))
4161da177e4SLinus Torvalds 		return 1;
4171da177e4SLinus Torvalds 	if (bdi == current->backing_dev_info)
4181da177e4SLinus Torvalds 		return 1;
4197d3579e8SKOSAKI Motohiro 
4207d3579e8SKOSAKI Motohiro 	/* lumpy reclaim for hugepage often need a lot of write */
4217d3579e8SKOSAKI Motohiro 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
4227d3579e8SKOSAKI Motohiro 		return 1;
4231da177e4SLinus Torvalds 	return 0;
4241da177e4SLinus Torvalds }
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds /*
4271da177e4SLinus Torvalds  * We detected a synchronous write error writing a page out.  Probably
4281da177e4SLinus Torvalds  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
4291da177e4SLinus Torvalds  * fsync(), msync() or close().
4301da177e4SLinus Torvalds  *
4311da177e4SLinus Torvalds  * The tricky part is that after writepage we cannot touch the mapping: nothing
4321da177e4SLinus Torvalds  * prevents it from being freed up.  But we have a ref on the page and once
4331da177e4SLinus Torvalds  * that page is locked, the mapping is pinned.
4341da177e4SLinus Torvalds  *
4351da177e4SLinus Torvalds  * We're allowed to run sleeping lock_page() here because we know the caller has
4361da177e4SLinus Torvalds  * __GFP_FS.
4371da177e4SLinus Torvalds  */
4381da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping,
4391da177e4SLinus Torvalds 				struct page *page, int error)
4401da177e4SLinus Torvalds {
4417eaceaccSJens Axboe 	lock_page(page);
4423e9f45bdSGuillaume Chazarain 	if (page_mapping(page) == mapping)
4433e9f45bdSGuillaume Chazarain 		mapping_set_error(mapping, error);
4441da177e4SLinus Torvalds 	unlock_page(page);
4451da177e4SLinus Torvalds }
4461da177e4SLinus Torvalds 
44704e62a29SChristoph Lameter /* possible outcome of pageout() */
44804e62a29SChristoph Lameter typedef enum {
44904e62a29SChristoph Lameter 	/* failed to write page out, page is locked */
45004e62a29SChristoph Lameter 	PAGE_KEEP,
45104e62a29SChristoph Lameter 	/* move page to the active list, page is locked */
45204e62a29SChristoph Lameter 	PAGE_ACTIVATE,
45304e62a29SChristoph Lameter 	/* page has been sent to the disk successfully, page is unlocked */
45404e62a29SChristoph Lameter 	PAGE_SUCCESS,
45504e62a29SChristoph Lameter 	/* page is clean and locked */
45604e62a29SChristoph Lameter 	PAGE_CLEAN,
45704e62a29SChristoph Lameter } pageout_t;
45804e62a29SChristoph Lameter 
4591da177e4SLinus Torvalds /*
4601742f19fSAndrew Morton  * pageout is called by shrink_page_list() for each dirty page.
4611742f19fSAndrew Morton  * Calls ->writepage().
4621da177e4SLinus Torvalds  */
463c661b078SAndy Whitcroft static pageout_t pageout(struct page *page, struct address_space *mapping,
4647d3579e8SKOSAKI Motohiro 			 struct scan_control *sc)
4651da177e4SLinus Torvalds {
4661da177e4SLinus Torvalds 	/*
4671da177e4SLinus Torvalds 	 * If the page is dirty, only perform writeback if that write
4681da177e4SLinus Torvalds 	 * will be non-blocking.  To prevent this allocation from being
4691da177e4SLinus Torvalds 	 * stalled by pagecache activity.  But note that there may be
4701da177e4SLinus Torvalds 	 * stalls if we need to run get_block().  We could test
4711da177e4SLinus Torvalds 	 * PagePrivate for that.
4721da177e4SLinus Torvalds 	 *
4736aceb53bSVincent Li 	 * If this process is currently in __generic_file_aio_write() against
4741da177e4SLinus Torvalds 	 * this page's queue, we can perform writeback even if that
4751da177e4SLinus Torvalds 	 * will block.
4761da177e4SLinus Torvalds 	 *
4771da177e4SLinus Torvalds 	 * If the page is swapcache, write it back even if that would
4781da177e4SLinus Torvalds 	 * block, for some throttling. This happens by accident, because
4791da177e4SLinus Torvalds 	 * swap_backing_dev_info is bust: it doesn't reflect the
4801da177e4SLinus Torvalds 	 * congestion state of the swapdevs.  Easy to fix, if needed.
4811da177e4SLinus Torvalds 	 */
4821da177e4SLinus Torvalds 	if (!is_page_cache_freeable(page))
4831da177e4SLinus Torvalds 		return PAGE_KEEP;
4841da177e4SLinus Torvalds 	if (!mapping) {
4851da177e4SLinus Torvalds 		/*
4861da177e4SLinus Torvalds 		 * Some data journaling orphaned pages can have
4871da177e4SLinus Torvalds 		 * page->mapping == NULL while being dirty with clean buffers.
4881da177e4SLinus Torvalds 		 */
489266cf658SDavid Howells 		if (page_has_private(page)) {
4901da177e4SLinus Torvalds 			if (try_to_free_buffers(page)) {
4911da177e4SLinus Torvalds 				ClearPageDirty(page);
492d40cee24SHarvey Harrison 				printk("%s: orphaned page\n", __func__);
4931da177e4SLinus Torvalds 				return PAGE_CLEAN;
4941da177e4SLinus Torvalds 			}
4951da177e4SLinus Torvalds 		}
4961da177e4SLinus Torvalds 		return PAGE_KEEP;
4971da177e4SLinus Torvalds 	}
4981da177e4SLinus Torvalds 	if (mapping->a_ops->writepage == NULL)
4991da177e4SLinus Torvalds 		return PAGE_ACTIVATE;
5000e093d99SMel Gorman 	if (!may_write_to_queue(mapping->backing_dev_info, sc))
5011da177e4SLinus Torvalds 		return PAGE_KEEP;
5021da177e4SLinus Torvalds 
5031da177e4SLinus Torvalds 	if (clear_page_dirty_for_io(page)) {
5041da177e4SLinus Torvalds 		int res;
5051da177e4SLinus Torvalds 		struct writeback_control wbc = {
5061da177e4SLinus Torvalds 			.sync_mode = WB_SYNC_NONE,
5071da177e4SLinus Torvalds 			.nr_to_write = SWAP_CLUSTER_MAX,
508111ebb6eSOGAWA Hirofumi 			.range_start = 0,
509111ebb6eSOGAWA Hirofumi 			.range_end = LLONG_MAX,
5101da177e4SLinus Torvalds 			.for_reclaim = 1,
5111da177e4SLinus Torvalds 		};
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds 		SetPageReclaim(page);
5141da177e4SLinus Torvalds 		res = mapping->a_ops->writepage(page, &wbc);
5151da177e4SLinus Torvalds 		if (res < 0)
5161da177e4SLinus Torvalds 			handle_write_error(mapping, page, res);
517994fc28cSZach Brown 		if (res == AOP_WRITEPAGE_ACTIVATE) {
5181da177e4SLinus Torvalds 			ClearPageReclaim(page);
5191da177e4SLinus Torvalds 			return PAGE_ACTIVATE;
5201da177e4SLinus Torvalds 		}
521c661b078SAndy Whitcroft 
5221da177e4SLinus Torvalds 		if (!PageWriteback(page)) {
5231da177e4SLinus Torvalds 			/* synchronous write or broken a_ops? */
5241da177e4SLinus Torvalds 			ClearPageReclaim(page);
5251da177e4SLinus Torvalds 		}
526755f0225SMel Gorman 		trace_mm_vmscan_writepage(page,
527f3a310bcSMel Gorman 			trace_reclaim_flags(page, sc->reclaim_mode));
528e129b5c2SAndrew Morton 		inc_zone_page_state(page, NR_VMSCAN_WRITE);
5291da177e4SLinus Torvalds 		return PAGE_SUCCESS;
5301da177e4SLinus Torvalds 	}
5311da177e4SLinus Torvalds 
5321da177e4SLinus Torvalds 	return PAGE_CLEAN;
5331da177e4SLinus Torvalds }
5341da177e4SLinus Torvalds 
535a649fd92SAndrew Morton /*
536e286781dSNick Piggin  * Same as remove_mapping, but if the page is removed from the mapping, it
537e286781dSNick Piggin  * gets returned with a refcount of 0.
538a649fd92SAndrew Morton  */
539e286781dSNick Piggin static int __remove_mapping(struct address_space *mapping, struct page *page)
54049d2e9ccSChristoph Lameter {
54128e4d965SNick Piggin 	BUG_ON(!PageLocked(page));
54228e4d965SNick Piggin 	BUG_ON(mapping != page_mapping(page));
54349d2e9ccSChristoph Lameter 
54419fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
54549d2e9ccSChristoph Lameter 	/*
5460fd0e6b0SNick Piggin 	 * The non racy check for a busy page.
5470fd0e6b0SNick Piggin 	 *
5480fd0e6b0SNick Piggin 	 * Must be careful with the order of the tests. When someone has
5490fd0e6b0SNick Piggin 	 * a ref to the page, it may be possible that they dirty it then
5500fd0e6b0SNick Piggin 	 * drop the reference. So if PageDirty is tested before page_count
5510fd0e6b0SNick Piggin 	 * here, then the following race may occur:
5520fd0e6b0SNick Piggin 	 *
5530fd0e6b0SNick Piggin 	 * get_user_pages(&page);
5540fd0e6b0SNick Piggin 	 * [user mapping goes away]
5550fd0e6b0SNick Piggin 	 * write_to(page);
5560fd0e6b0SNick Piggin 	 *				!PageDirty(page)    [good]
5570fd0e6b0SNick Piggin 	 * SetPageDirty(page);
5580fd0e6b0SNick Piggin 	 * put_page(page);
5590fd0e6b0SNick Piggin 	 *				!page_count(page)   [good, discard it]
5600fd0e6b0SNick Piggin 	 *
5610fd0e6b0SNick Piggin 	 * [oops, our write_to data is lost]
5620fd0e6b0SNick Piggin 	 *
5630fd0e6b0SNick Piggin 	 * Reversing the order of the tests ensures such a situation cannot
5640fd0e6b0SNick Piggin 	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
5650fd0e6b0SNick Piggin 	 * load is not satisfied before that of page->_count.
5660fd0e6b0SNick Piggin 	 *
5670fd0e6b0SNick Piggin 	 * Note that if SetPageDirty is always performed via set_page_dirty,
5680fd0e6b0SNick Piggin 	 * and thus under tree_lock, then this ordering is not required.
56949d2e9ccSChristoph Lameter 	 */
570e286781dSNick Piggin 	if (!page_freeze_refs(page, 2))
57149d2e9ccSChristoph Lameter 		goto cannot_free;
572e286781dSNick Piggin 	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
573e286781dSNick Piggin 	if (unlikely(PageDirty(page))) {
574e286781dSNick Piggin 		page_unfreeze_refs(page, 2);
57549d2e9ccSChristoph Lameter 		goto cannot_free;
576e286781dSNick Piggin 	}
57749d2e9ccSChristoph Lameter 
57849d2e9ccSChristoph Lameter 	if (PageSwapCache(page)) {
57949d2e9ccSChristoph Lameter 		swp_entry_t swap = { .val = page_private(page) };
58049d2e9ccSChristoph Lameter 		__delete_from_swap_cache(page);
58119fd6231SNick Piggin 		spin_unlock_irq(&mapping->tree_lock);
582cb4b86baSKAMEZAWA Hiroyuki 		swapcache_free(swap, page);
583e286781dSNick Piggin 	} else {
5846072d13cSLinus Torvalds 		void (*freepage)(struct page *);
5856072d13cSLinus Torvalds 
5866072d13cSLinus Torvalds 		freepage = mapping->a_ops->freepage;
5876072d13cSLinus Torvalds 
588e64a782fSMinchan Kim 		__delete_from_page_cache(page);
58919fd6231SNick Piggin 		spin_unlock_irq(&mapping->tree_lock);
590e767e056SDaisuke Nishimura 		mem_cgroup_uncharge_cache_page(page);
5916072d13cSLinus Torvalds 
5926072d13cSLinus Torvalds 		if (freepage != NULL)
5936072d13cSLinus Torvalds 			freepage(page);
594e286781dSNick Piggin 	}
595e286781dSNick Piggin 
59649d2e9ccSChristoph Lameter 	return 1;
59749d2e9ccSChristoph Lameter 
59849d2e9ccSChristoph Lameter cannot_free:
59919fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
60049d2e9ccSChristoph Lameter 	return 0;
60149d2e9ccSChristoph Lameter }
60249d2e9ccSChristoph Lameter 
6031da177e4SLinus Torvalds /*
604e286781dSNick Piggin  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
605e286781dSNick Piggin  * someone else has a ref on the page, abort and return 0.  If it was
606e286781dSNick Piggin  * successfully detached, return 1.  Assumes the caller has a single ref on
607e286781dSNick Piggin  * this page.
608e286781dSNick Piggin  */
609e286781dSNick Piggin int remove_mapping(struct address_space *mapping, struct page *page)
610e286781dSNick Piggin {
611e286781dSNick Piggin 	if (__remove_mapping(mapping, page)) {
612e286781dSNick Piggin 		/*
613e286781dSNick Piggin 		 * Unfreezing the refcount with 1 rather than 2 effectively
614e286781dSNick Piggin 		 * drops the pagecache ref for us without requiring another
615e286781dSNick Piggin 		 * atomic operation.
616e286781dSNick Piggin 		 */
617e286781dSNick Piggin 		page_unfreeze_refs(page, 1);
618e286781dSNick Piggin 		return 1;
619e286781dSNick Piggin 	}
620e286781dSNick Piggin 	return 0;
621e286781dSNick Piggin }
622e286781dSNick Piggin 
623894bc310SLee Schermerhorn /**
624894bc310SLee Schermerhorn  * putback_lru_page - put previously isolated page onto appropriate LRU list
625894bc310SLee Schermerhorn  * @page: page to be put back to appropriate lru list
626894bc310SLee Schermerhorn  *
627894bc310SLee Schermerhorn  * Add previously isolated @page to appropriate LRU list.
628894bc310SLee Schermerhorn  * Page may still be unevictable for other reasons.
629894bc310SLee Schermerhorn  *
630894bc310SLee Schermerhorn  * lru_lock must not be held, interrupts must be enabled.
631894bc310SLee Schermerhorn  */
632894bc310SLee Schermerhorn void putback_lru_page(struct page *page)
633894bc310SLee Schermerhorn {
634894bc310SLee Schermerhorn 	int lru;
635894bc310SLee Schermerhorn 	int active = !!TestClearPageActive(page);
636bbfd28eeSLee Schermerhorn 	int was_unevictable = PageUnevictable(page);
637894bc310SLee Schermerhorn 
638894bc310SLee Schermerhorn 	VM_BUG_ON(PageLRU(page));
639894bc310SLee Schermerhorn 
640894bc310SLee Schermerhorn redo:
641894bc310SLee Schermerhorn 	ClearPageUnevictable(page);
642894bc310SLee Schermerhorn 
643894bc310SLee Schermerhorn 	if (page_evictable(page, NULL)) {
644894bc310SLee Schermerhorn 		/*
645894bc310SLee Schermerhorn 		 * For evictable pages, we can use the cache.
646894bc310SLee Schermerhorn 		 * In event of a race, worst case is we end up with an
647894bc310SLee Schermerhorn 		 * unevictable page on [in]active list.
648894bc310SLee Schermerhorn 		 * We know how to handle that.
649894bc310SLee Schermerhorn 		 */
650401a8e1cSJohannes Weiner 		lru = active + page_lru_base_type(page);
651894bc310SLee Schermerhorn 		lru_cache_add_lru(page, lru);
652894bc310SLee Schermerhorn 	} else {
653894bc310SLee Schermerhorn 		/*
654894bc310SLee Schermerhorn 		 * Put unevictable pages directly on zone's unevictable
655894bc310SLee Schermerhorn 		 * list.
656894bc310SLee Schermerhorn 		 */
657894bc310SLee Schermerhorn 		lru = LRU_UNEVICTABLE;
658894bc310SLee Schermerhorn 		add_page_to_unevictable_list(page);
6596a7b9548SJohannes Weiner 		/*
66021ee9f39SMinchan Kim 		 * When racing with an mlock or AS_UNEVICTABLE clearing
66121ee9f39SMinchan Kim 		 * (page is unlocked) make sure that if the other thread
66221ee9f39SMinchan Kim 		 * does not observe our setting of PG_lru and fails
66324513264SHugh Dickins 		 * isolation/check_move_unevictable_pages,
66421ee9f39SMinchan Kim 		 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
6656a7b9548SJohannes Weiner 		 * the page back to the evictable list.
6666a7b9548SJohannes Weiner 		 *
66721ee9f39SMinchan Kim 		 * The other side is TestClearPageMlocked() or shmem_lock().
6686a7b9548SJohannes Weiner 		 */
6696a7b9548SJohannes Weiner 		smp_mb();
670894bc310SLee Schermerhorn 	}
671894bc310SLee Schermerhorn 
672894bc310SLee Schermerhorn 	/*
673894bc310SLee Schermerhorn 	 * page's status can change while we move it among lru. If an evictable
674894bc310SLee Schermerhorn 	 * page is on unevictable list, it never be freed. To avoid that,
675894bc310SLee Schermerhorn 	 * check after we added it to the list, again.
676894bc310SLee Schermerhorn 	 */
677894bc310SLee Schermerhorn 	if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
678894bc310SLee Schermerhorn 		if (!isolate_lru_page(page)) {
679894bc310SLee Schermerhorn 			put_page(page);
680894bc310SLee Schermerhorn 			goto redo;
681894bc310SLee Schermerhorn 		}
682894bc310SLee Schermerhorn 		/* This means someone else dropped this page from LRU
683894bc310SLee Schermerhorn 		 * So, it will be freed or putback to LRU again. There is
684894bc310SLee Schermerhorn 		 * nothing to do here.
685894bc310SLee Schermerhorn 		 */
686894bc310SLee Schermerhorn 	}
687894bc310SLee Schermerhorn 
688bbfd28eeSLee Schermerhorn 	if (was_unevictable && lru != LRU_UNEVICTABLE)
689bbfd28eeSLee Schermerhorn 		count_vm_event(UNEVICTABLE_PGRESCUED);
690bbfd28eeSLee Schermerhorn 	else if (!was_unevictable && lru == LRU_UNEVICTABLE)
691bbfd28eeSLee Schermerhorn 		count_vm_event(UNEVICTABLE_PGCULLED);
692bbfd28eeSLee Schermerhorn 
693894bc310SLee Schermerhorn 	put_page(page);		/* drop ref from isolate */
694894bc310SLee Schermerhorn }
695894bc310SLee Schermerhorn 
696dfc8d636SJohannes Weiner enum page_references {
697dfc8d636SJohannes Weiner 	PAGEREF_RECLAIM,
698dfc8d636SJohannes Weiner 	PAGEREF_RECLAIM_CLEAN,
69964574746SJohannes Weiner 	PAGEREF_KEEP,
700dfc8d636SJohannes Weiner 	PAGEREF_ACTIVATE,
701dfc8d636SJohannes Weiner };
702dfc8d636SJohannes Weiner 
703dfc8d636SJohannes Weiner static enum page_references page_check_references(struct page *page,
704f16015fbSJohannes Weiner 						  struct mem_cgroup_zone *mz,
705dfc8d636SJohannes Weiner 						  struct scan_control *sc)
706dfc8d636SJohannes Weiner {
70764574746SJohannes Weiner 	int referenced_ptes, referenced_page;
708dfc8d636SJohannes Weiner 	unsigned long vm_flags;
709dfc8d636SJohannes Weiner 
710f16015fbSJohannes Weiner 	referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
71164574746SJohannes Weiner 	referenced_page = TestClearPageReferenced(page);
712dfc8d636SJohannes Weiner 
713dfc8d636SJohannes Weiner 	/* Lumpy reclaim - ignore references */
714f3a310bcSMel Gorman 	if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
715dfc8d636SJohannes Weiner 		return PAGEREF_RECLAIM;
716dfc8d636SJohannes Weiner 
717dfc8d636SJohannes Weiner 	/*
718dfc8d636SJohannes Weiner 	 * Mlock lost the isolation race with us.  Let try_to_unmap()
719dfc8d636SJohannes Weiner 	 * move the page to the unevictable list.
720dfc8d636SJohannes Weiner 	 */
721dfc8d636SJohannes Weiner 	if (vm_flags & VM_LOCKED)
722dfc8d636SJohannes Weiner 		return PAGEREF_RECLAIM;
723dfc8d636SJohannes Weiner 
72464574746SJohannes Weiner 	if (referenced_ptes) {
72564574746SJohannes Weiner 		if (PageAnon(page))
72664574746SJohannes Weiner 			return PAGEREF_ACTIVATE;
72764574746SJohannes Weiner 		/*
72864574746SJohannes Weiner 		 * All mapped pages start out with page table
72964574746SJohannes Weiner 		 * references from the instantiating fault, so we need
73064574746SJohannes Weiner 		 * to look twice if a mapped file page is used more
73164574746SJohannes Weiner 		 * than once.
73264574746SJohannes Weiner 		 *
73364574746SJohannes Weiner 		 * Mark it and spare it for another trip around the
73464574746SJohannes Weiner 		 * inactive list.  Another page table reference will
73564574746SJohannes Weiner 		 * lead to its activation.
73664574746SJohannes Weiner 		 *
73764574746SJohannes Weiner 		 * Note: the mark is set for activated pages as well
73864574746SJohannes Weiner 		 * so that recently deactivated but used pages are
73964574746SJohannes Weiner 		 * quickly recovered.
74064574746SJohannes Weiner 		 */
74164574746SJohannes Weiner 		SetPageReferenced(page);
74264574746SJohannes Weiner 
74334dbc67aSKonstantin Khlebnikov 		if (referenced_page || referenced_ptes > 1)
744dfc8d636SJohannes Weiner 			return PAGEREF_ACTIVATE;
745dfc8d636SJohannes Weiner 
746c909e993SKonstantin Khlebnikov 		/*
747c909e993SKonstantin Khlebnikov 		 * Activate file-backed executable pages after first usage.
748c909e993SKonstantin Khlebnikov 		 */
749c909e993SKonstantin Khlebnikov 		if (vm_flags & VM_EXEC)
750c909e993SKonstantin Khlebnikov 			return PAGEREF_ACTIVATE;
751c909e993SKonstantin Khlebnikov 
75264574746SJohannes Weiner 		return PAGEREF_KEEP;
75364574746SJohannes Weiner 	}
75464574746SJohannes Weiner 
755dfc8d636SJohannes Weiner 	/* Reclaim if clean, defer dirty pages to writeback */
7562e30244aSKOSAKI Motohiro 	if (referenced_page && !PageSwapBacked(page))
757dfc8d636SJohannes Weiner 		return PAGEREF_RECLAIM_CLEAN;
75864574746SJohannes Weiner 
75964574746SJohannes Weiner 	return PAGEREF_RECLAIM;
760dfc8d636SJohannes Weiner }
761dfc8d636SJohannes Weiner 
762e286781dSNick Piggin /*
7631742f19fSAndrew Morton  * shrink_page_list() returns the number of reclaimed pages
7641da177e4SLinus Torvalds  */
7651742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list,
766f16015fbSJohannes Weiner 				      struct mem_cgroup_zone *mz,
767f84f6e2bSMel Gorman 				      struct scan_control *sc,
76892df3a72SMel Gorman 				      int priority,
76992df3a72SMel Gorman 				      unsigned long *ret_nr_dirty,
77092df3a72SMel Gorman 				      unsigned long *ret_nr_writeback)
7711da177e4SLinus Torvalds {
7721da177e4SLinus Torvalds 	LIST_HEAD(ret_pages);
773abe4c3b5SMel Gorman 	LIST_HEAD(free_pages);
7741da177e4SLinus Torvalds 	int pgactivate = 0;
7750e093d99SMel Gorman 	unsigned long nr_dirty = 0;
7760e093d99SMel Gorman 	unsigned long nr_congested = 0;
77705ff5137SAndrew Morton 	unsigned long nr_reclaimed = 0;
77892df3a72SMel Gorman 	unsigned long nr_writeback = 0;
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds 	cond_resched();
7811da177e4SLinus Torvalds 
7821da177e4SLinus Torvalds 	while (!list_empty(page_list)) {
783dfc8d636SJohannes Weiner 		enum page_references references;
7841da177e4SLinus Torvalds 		struct address_space *mapping;
7851da177e4SLinus Torvalds 		struct page *page;
7861da177e4SLinus Torvalds 		int may_enter_fs;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 		cond_resched();
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds 		page = lru_to_page(page_list);
7911da177e4SLinus Torvalds 		list_del(&page->lru);
7921da177e4SLinus Torvalds 
793529ae9aaSNick Piggin 		if (!trylock_page(page))
7941da177e4SLinus Torvalds 			goto keep;
7951da177e4SLinus Torvalds 
796725d704eSNick Piggin 		VM_BUG_ON(PageActive(page));
797f16015fbSJohannes Weiner 		VM_BUG_ON(page_zone(page) != mz->zone);
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 		sc->nr_scanned++;
80080e43426SChristoph Lameter 
801b291f000SNick Piggin 		if (unlikely(!page_evictable(page, NULL)))
802b291f000SNick Piggin 			goto cull_mlocked;
803894bc310SLee Schermerhorn 
804a6dc60f8SJohannes Weiner 		if (!sc->may_unmap && page_mapped(page))
80580e43426SChristoph Lameter 			goto keep_locked;
80680e43426SChristoph Lameter 
8071da177e4SLinus Torvalds 		/* Double the slab pressure for mapped and swapcache pages */
8081da177e4SLinus Torvalds 		if (page_mapped(page) || PageSwapCache(page))
8091da177e4SLinus Torvalds 			sc->nr_scanned++;
8101da177e4SLinus Torvalds 
811c661b078SAndy Whitcroft 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
812c661b078SAndy Whitcroft 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
813c661b078SAndy Whitcroft 
814c661b078SAndy Whitcroft 		if (PageWriteback(page)) {
81592df3a72SMel Gorman 			nr_writeback++;
816c661b078SAndy Whitcroft 			/*
817a18bba06SMel Gorman 			 * Synchronous reclaim cannot queue pages for
818a18bba06SMel Gorman 			 * writeback due to the possibility of stack overflow
819a18bba06SMel Gorman 			 * but if it encounters a page under writeback, wait
820a18bba06SMel Gorman 			 * for the IO to complete.
821c661b078SAndy Whitcroft 			 */
822f3a310bcSMel Gorman 			if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
8237d3579e8SKOSAKI Motohiro 			    may_enter_fs)
824c661b078SAndy Whitcroft 				wait_on_page_writeback(page);
8257d3579e8SKOSAKI Motohiro 			else {
8267d3579e8SKOSAKI Motohiro 				unlock_page(page);
8277d3579e8SKOSAKI Motohiro 				goto keep_lumpy;
8287d3579e8SKOSAKI Motohiro 			}
829c661b078SAndy Whitcroft 		}
8301da177e4SLinus Torvalds 
831f16015fbSJohannes Weiner 		references = page_check_references(page, mz, sc);
832dfc8d636SJohannes Weiner 		switch (references) {
833dfc8d636SJohannes Weiner 		case PAGEREF_ACTIVATE:
8341da177e4SLinus Torvalds 			goto activate_locked;
83564574746SJohannes Weiner 		case PAGEREF_KEEP:
83664574746SJohannes Weiner 			goto keep_locked;
837dfc8d636SJohannes Weiner 		case PAGEREF_RECLAIM:
838dfc8d636SJohannes Weiner 		case PAGEREF_RECLAIM_CLEAN:
839dfc8d636SJohannes Weiner 			; /* try to reclaim the page below */
840dfc8d636SJohannes Weiner 		}
8411da177e4SLinus Torvalds 
8421da177e4SLinus Torvalds 		/*
8431da177e4SLinus Torvalds 		 * Anonymous process memory has backing store?
8441da177e4SLinus Torvalds 		 * Try to allocate it some swap space here.
8451da177e4SLinus Torvalds 		 */
846b291f000SNick Piggin 		if (PageAnon(page) && !PageSwapCache(page)) {
84763eb6b93SHugh Dickins 			if (!(sc->gfp_mask & __GFP_IO))
84863eb6b93SHugh Dickins 				goto keep_locked;
849ac47b003SHugh Dickins 			if (!add_to_swap(page))
8501da177e4SLinus Torvalds 				goto activate_locked;
85163eb6b93SHugh Dickins 			may_enter_fs = 1;
852b291f000SNick Piggin 		}
8531da177e4SLinus Torvalds 
8541da177e4SLinus Torvalds 		mapping = page_mapping(page);
8551da177e4SLinus Torvalds 
8561da177e4SLinus Torvalds 		/*
8571da177e4SLinus Torvalds 		 * The page is mapped into the page tables of one or more
8581da177e4SLinus Torvalds 		 * processes. Try to unmap it here.
8591da177e4SLinus Torvalds 		 */
8601da177e4SLinus Torvalds 		if (page_mapped(page) && mapping) {
86114fa31b8SAndi Kleen 			switch (try_to_unmap(page, TTU_UNMAP)) {
8621da177e4SLinus Torvalds 			case SWAP_FAIL:
8631da177e4SLinus Torvalds 				goto activate_locked;
8641da177e4SLinus Torvalds 			case SWAP_AGAIN:
8651da177e4SLinus Torvalds 				goto keep_locked;
866b291f000SNick Piggin 			case SWAP_MLOCK:
867b291f000SNick Piggin 				goto cull_mlocked;
8681da177e4SLinus Torvalds 			case SWAP_SUCCESS:
8691da177e4SLinus Torvalds 				; /* try to free the page below */
8701da177e4SLinus Torvalds 			}
8711da177e4SLinus Torvalds 		}
8721da177e4SLinus Torvalds 
8731da177e4SLinus Torvalds 		if (PageDirty(page)) {
8740e093d99SMel Gorman 			nr_dirty++;
8750e093d99SMel Gorman 
876ee72886dSMel Gorman 			/*
877ee72886dSMel Gorman 			 * Only kswapd can writeback filesystem pages to
878f84f6e2bSMel Gorman 			 * avoid risk of stack overflow but do not writeback
879f84f6e2bSMel Gorman 			 * unless under significant pressure.
880ee72886dSMel Gorman 			 */
881f84f6e2bSMel Gorman 			if (page_is_file_cache(page) &&
882f84f6e2bSMel Gorman 					(!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
88349ea7eb6SMel Gorman 				/*
88449ea7eb6SMel Gorman 				 * Immediately reclaim when written back.
88549ea7eb6SMel Gorman 				 * Similar in principal to deactivate_page()
88649ea7eb6SMel Gorman 				 * except we already have the page isolated
88749ea7eb6SMel Gorman 				 * and know it's dirty
88849ea7eb6SMel Gorman 				 */
88949ea7eb6SMel Gorman 				inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
89049ea7eb6SMel Gorman 				SetPageReclaim(page);
89149ea7eb6SMel Gorman 
892ee72886dSMel Gorman 				goto keep_locked;
893ee72886dSMel Gorman 			}
894ee72886dSMel Gorman 
895dfc8d636SJohannes Weiner 			if (references == PAGEREF_RECLAIM_CLEAN)
8961da177e4SLinus Torvalds 				goto keep_locked;
8974dd4b920SAndrew Morton 			if (!may_enter_fs)
8981da177e4SLinus Torvalds 				goto keep_locked;
89952a8363eSChristoph Lameter 			if (!sc->may_writepage)
9001da177e4SLinus Torvalds 				goto keep_locked;
9011da177e4SLinus Torvalds 
9021da177e4SLinus Torvalds 			/* Page is dirty, try to write it out here */
9037d3579e8SKOSAKI Motohiro 			switch (pageout(page, mapping, sc)) {
9041da177e4SLinus Torvalds 			case PAGE_KEEP:
9050e093d99SMel Gorman 				nr_congested++;
9061da177e4SLinus Torvalds 				goto keep_locked;
9071da177e4SLinus Torvalds 			case PAGE_ACTIVATE:
9081da177e4SLinus Torvalds 				goto activate_locked;
9091da177e4SLinus Torvalds 			case PAGE_SUCCESS:
9107d3579e8SKOSAKI Motohiro 				if (PageWriteback(page))
9117d3579e8SKOSAKI Motohiro 					goto keep_lumpy;
9127d3579e8SKOSAKI Motohiro 				if (PageDirty(page))
9131da177e4SLinus Torvalds 					goto keep;
9147d3579e8SKOSAKI Motohiro 
9151da177e4SLinus Torvalds 				/*
9161da177e4SLinus Torvalds 				 * A synchronous write - probably a ramdisk.  Go
9171da177e4SLinus Torvalds 				 * ahead and try to reclaim the page.
9181da177e4SLinus Torvalds 				 */
919529ae9aaSNick Piggin 				if (!trylock_page(page))
9201da177e4SLinus Torvalds 					goto keep;
9211da177e4SLinus Torvalds 				if (PageDirty(page) || PageWriteback(page))
9221da177e4SLinus Torvalds 					goto keep_locked;
9231da177e4SLinus Torvalds 				mapping = page_mapping(page);
9241da177e4SLinus Torvalds 			case PAGE_CLEAN:
9251da177e4SLinus Torvalds 				; /* try to free the page below */
9261da177e4SLinus Torvalds 			}
9271da177e4SLinus Torvalds 		}
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 		/*
9301da177e4SLinus Torvalds 		 * If the page has buffers, try to free the buffer mappings
9311da177e4SLinus Torvalds 		 * associated with this page. If we succeed we try to free
9321da177e4SLinus Torvalds 		 * the page as well.
9331da177e4SLinus Torvalds 		 *
9341da177e4SLinus Torvalds 		 * We do this even if the page is PageDirty().
9351da177e4SLinus Torvalds 		 * try_to_release_page() does not perform I/O, but it is
9361da177e4SLinus Torvalds 		 * possible for a page to have PageDirty set, but it is actually
9371da177e4SLinus Torvalds 		 * clean (all its buffers are clean).  This happens if the
9381da177e4SLinus Torvalds 		 * buffers were written out directly, with submit_bh(). ext3
9391da177e4SLinus Torvalds 		 * will do this, as well as the blockdev mapping.
9401da177e4SLinus Torvalds 		 * try_to_release_page() will discover that cleanness and will
9411da177e4SLinus Torvalds 		 * drop the buffers and mark the page clean - it can be freed.
9421da177e4SLinus Torvalds 		 *
9431da177e4SLinus Torvalds 		 * Rarely, pages can have buffers and no ->mapping.  These are
9441da177e4SLinus Torvalds 		 * the pages which were not successfully invalidated in
9451da177e4SLinus Torvalds 		 * truncate_complete_page().  We try to drop those buffers here
9461da177e4SLinus Torvalds 		 * and if that worked, and the page is no longer mapped into
9471da177e4SLinus Torvalds 		 * process address space (page_count == 1) it can be freed.
9481da177e4SLinus Torvalds 		 * Otherwise, leave the page on the LRU so it is swappable.
9491da177e4SLinus Torvalds 		 */
950266cf658SDavid Howells 		if (page_has_private(page)) {
9511da177e4SLinus Torvalds 			if (!try_to_release_page(page, sc->gfp_mask))
9521da177e4SLinus Torvalds 				goto activate_locked;
953e286781dSNick Piggin 			if (!mapping && page_count(page) == 1) {
954e286781dSNick Piggin 				unlock_page(page);
955e286781dSNick Piggin 				if (put_page_testzero(page))
9561da177e4SLinus Torvalds 					goto free_it;
957e286781dSNick Piggin 				else {
958e286781dSNick Piggin 					/*
959e286781dSNick Piggin 					 * rare race with speculative reference.
960e286781dSNick Piggin 					 * the speculative reference will free
961e286781dSNick Piggin 					 * this page shortly, so we may
962e286781dSNick Piggin 					 * increment nr_reclaimed here (and
963e286781dSNick Piggin 					 * leave it off the LRU).
964e286781dSNick Piggin 					 */
965e286781dSNick Piggin 					nr_reclaimed++;
966e286781dSNick Piggin 					continue;
967e286781dSNick Piggin 				}
968e286781dSNick Piggin 			}
9691da177e4SLinus Torvalds 		}
9701da177e4SLinus Torvalds 
971e286781dSNick Piggin 		if (!mapping || !__remove_mapping(mapping, page))
97249d2e9ccSChristoph Lameter 			goto keep_locked;
9731da177e4SLinus Torvalds 
974a978d6f5SNick Piggin 		/*
975a978d6f5SNick Piggin 		 * At this point, we have no other references and there is
976a978d6f5SNick Piggin 		 * no way to pick any more up (removed from LRU, removed
977a978d6f5SNick Piggin 		 * from pagecache). Can use non-atomic bitops now (and
978a978d6f5SNick Piggin 		 * we obviously don't have to worry about waking up a process
979a978d6f5SNick Piggin 		 * waiting on the page lock, because there are no references.
980a978d6f5SNick Piggin 		 */
981a978d6f5SNick Piggin 		__clear_page_locked(page);
982e286781dSNick Piggin free_it:
98305ff5137SAndrew Morton 		nr_reclaimed++;
984abe4c3b5SMel Gorman 
985abe4c3b5SMel Gorman 		/*
986abe4c3b5SMel Gorman 		 * Is there need to periodically free_page_list? It would
987abe4c3b5SMel Gorman 		 * appear not as the counts should be low
988abe4c3b5SMel Gorman 		 */
989abe4c3b5SMel Gorman 		list_add(&page->lru, &free_pages);
9901da177e4SLinus Torvalds 		continue;
9911da177e4SLinus Torvalds 
992b291f000SNick Piggin cull_mlocked:
99363d6c5adSHugh Dickins 		if (PageSwapCache(page))
99463d6c5adSHugh Dickins 			try_to_free_swap(page);
995b291f000SNick Piggin 		unlock_page(page);
996b291f000SNick Piggin 		putback_lru_page(page);
997f3a310bcSMel Gorman 		reset_reclaim_mode(sc);
998b291f000SNick Piggin 		continue;
999b291f000SNick Piggin 
10001da177e4SLinus Torvalds activate_locked:
100168a22394SRik van Riel 		/* Not a candidate for swapping, so reclaim swap space. */
100268a22394SRik van Riel 		if (PageSwapCache(page) && vm_swap_full())
1003a2c43eedSHugh Dickins 			try_to_free_swap(page);
1004894bc310SLee Schermerhorn 		VM_BUG_ON(PageActive(page));
10051da177e4SLinus Torvalds 		SetPageActive(page);
10061da177e4SLinus Torvalds 		pgactivate++;
10071da177e4SLinus Torvalds keep_locked:
10081da177e4SLinus Torvalds 		unlock_page(page);
10091da177e4SLinus Torvalds keep:
1010f3a310bcSMel Gorman 		reset_reclaim_mode(sc);
10117d3579e8SKOSAKI Motohiro keep_lumpy:
10121da177e4SLinus Torvalds 		list_add(&page->lru, &ret_pages);
1013b291f000SNick Piggin 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
10141da177e4SLinus Torvalds 	}
1015abe4c3b5SMel Gorman 
10160e093d99SMel Gorman 	/*
10170e093d99SMel Gorman 	 * Tag a zone as congested if all the dirty pages encountered were
10180e093d99SMel Gorman 	 * backed by a congested BDI. In this case, reclaimers should just
10190e093d99SMel Gorman 	 * back off and wait for congestion to clear because further reclaim
10200e093d99SMel Gorman 	 * will encounter the same problem
10210e093d99SMel Gorman 	 */
102289b5fae5SJohannes Weiner 	if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
1023f16015fbSJohannes Weiner 		zone_set_flag(mz->zone, ZONE_CONGESTED);
10240e093d99SMel Gorman 
1025cc59850eSKonstantin Khlebnikov 	free_hot_cold_page_list(&free_pages, 1);
1026abe4c3b5SMel Gorman 
10271da177e4SLinus Torvalds 	list_splice(&ret_pages, page_list);
1028f8891e5eSChristoph Lameter 	count_vm_events(PGACTIVATE, pgactivate);
102992df3a72SMel Gorman 	*ret_nr_dirty += nr_dirty;
103092df3a72SMel Gorman 	*ret_nr_writeback += nr_writeback;
103105ff5137SAndrew Morton 	return nr_reclaimed;
10321da177e4SLinus Torvalds }
10331da177e4SLinus Torvalds 
10345ad333ebSAndy Whitcroft /*
10355ad333ebSAndy Whitcroft  * Attempt to remove the specified page from its LRU.  Only take this page
10365ad333ebSAndy Whitcroft  * if it is of the appropriate PageActive status.  Pages which are being
10375ad333ebSAndy Whitcroft  * freed elsewhere are also ignored.
10385ad333ebSAndy Whitcroft  *
10395ad333ebSAndy Whitcroft  * page:	page to consider
10405ad333ebSAndy Whitcroft  * mode:	one of the LRU isolation modes defined above
10415ad333ebSAndy Whitcroft  *
10425ad333ebSAndy Whitcroft  * returns 0 on success, -ve errno on failure.
10435ad333ebSAndy Whitcroft  */
10444356f21dSMinchan Kim int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
10455ad333ebSAndy Whitcroft {
10464356f21dSMinchan Kim 	bool all_lru_mode;
10475ad333ebSAndy Whitcroft 	int ret = -EINVAL;
10485ad333ebSAndy Whitcroft 
10495ad333ebSAndy Whitcroft 	/* Only take pages on the LRU. */
10505ad333ebSAndy Whitcroft 	if (!PageLRU(page))
10515ad333ebSAndy Whitcroft 		return ret;
10525ad333ebSAndy Whitcroft 
10534356f21dSMinchan Kim 	all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
10544356f21dSMinchan Kim 		(ISOLATE_ACTIVE|ISOLATE_INACTIVE);
10554356f21dSMinchan Kim 
10565ad333ebSAndy Whitcroft 	/*
10575ad333ebSAndy Whitcroft 	 * When checking the active state, we need to be sure we are
10585ad333ebSAndy Whitcroft 	 * dealing with comparible boolean values.  Take the logical not
10595ad333ebSAndy Whitcroft 	 * of each.
10605ad333ebSAndy Whitcroft 	 */
10614356f21dSMinchan Kim 	if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
10625ad333ebSAndy Whitcroft 		return ret;
10635ad333ebSAndy Whitcroft 
10644356f21dSMinchan Kim 	if (!all_lru_mode && !!page_is_file_cache(page) != file)
10654f98a2feSRik van Riel 		return ret;
10664f98a2feSRik van Riel 
1067894bc310SLee Schermerhorn 	/*
1068894bc310SLee Schermerhorn 	 * When this function is being called for lumpy reclaim, we
1069894bc310SLee Schermerhorn 	 * initially look into all LRU pages, active, inactive and
1070894bc310SLee Schermerhorn 	 * unevictable; only give shrink_page_list evictable pages.
1071894bc310SLee Schermerhorn 	 */
1072894bc310SLee Schermerhorn 	if (PageUnevictable(page))
1073894bc310SLee Schermerhorn 		return ret;
1074894bc310SLee Schermerhorn 
10755ad333ebSAndy Whitcroft 	ret = -EBUSY;
107608e552c6SKAMEZAWA Hiroyuki 
1077c8244935SMel Gorman 	/*
1078c8244935SMel Gorman 	 * To minimise LRU disruption, the caller can indicate that it only
1079c8244935SMel Gorman 	 * wants to isolate pages it will be able to operate on without
1080c8244935SMel Gorman 	 * blocking - clean pages for the most part.
1081c8244935SMel Gorman 	 *
1082c8244935SMel Gorman 	 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1083c8244935SMel Gorman 	 * is used by reclaim when it is cannot write to backing storage
1084c8244935SMel Gorman 	 *
1085c8244935SMel Gorman 	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1086c8244935SMel Gorman 	 * that it is possible to migrate without blocking
1087c8244935SMel Gorman 	 */
1088c8244935SMel Gorman 	if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1089c8244935SMel Gorman 		/* All the caller can do on PageWriteback is block */
1090c8244935SMel Gorman 		if (PageWriteback(page))
109139deaf85SMinchan Kim 			return ret;
109239deaf85SMinchan Kim 
1093c8244935SMel Gorman 		if (PageDirty(page)) {
1094c8244935SMel Gorman 			struct address_space *mapping;
1095c8244935SMel Gorman 
1096c8244935SMel Gorman 			/* ISOLATE_CLEAN means only clean pages */
1097c8244935SMel Gorman 			if (mode & ISOLATE_CLEAN)
1098c8244935SMel Gorman 				return ret;
1099c8244935SMel Gorman 
1100c8244935SMel Gorman 			/*
1101c8244935SMel Gorman 			 * Only pages without mappings or that have a
1102c8244935SMel Gorman 			 * ->migratepage callback are possible to migrate
1103c8244935SMel Gorman 			 * without blocking
1104c8244935SMel Gorman 			 */
1105c8244935SMel Gorman 			mapping = page_mapping(page);
1106c8244935SMel Gorman 			if (mapping && !mapping->a_ops->migratepage)
1107c8244935SMel Gorman 				return ret;
1108c8244935SMel Gorman 		}
1109c8244935SMel Gorman 	}
1110c8244935SMel Gorman 
1111f80c0673SMinchan Kim 	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1112f80c0673SMinchan Kim 		return ret;
1113f80c0673SMinchan Kim 
11145ad333ebSAndy Whitcroft 	if (likely(get_page_unless_zero(page))) {
11155ad333ebSAndy Whitcroft 		/*
11165ad333ebSAndy Whitcroft 		 * Be careful not to clear PageLRU until after we're
11175ad333ebSAndy Whitcroft 		 * sure the page is not being freed elsewhere -- the
11185ad333ebSAndy Whitcroft 		 * page release code relies on it.
11195ad333ebSAndy Whitcroft 		 */
11205ad333ebSAndy Whitcroft 		ClearPageLRU(page);
11215ad333ebSAndy Whitcroft 		ret = 0;
11225ad333ebSAndy Whitcroft 	}
11235ad333ebSAndy Whitcroft 
11245ad333ebSAndy Whitcroft 	return ret;
11255ad333ebSAndy Whitcroft }
11265ad333ebSAndy Whitcroft 
112749d2e9ccSChristoph Lameter /*
11281da177e4SLinus Torvalds  * zone->lru_lock is heavily contended.  Some of the functions that
11291da177e4SLinus Torvalds  * shrink the lists perform better by taking out a batch of pages
11301da177e4SLinus Torvalds  * and working on them outside the LRU lock.
11311da177e4SLinus Torvalds  *
11321da177e4SLinus Torvalds  * For pagecache intensive workloads, this function is the hottest
11331da177e4SLinus Torvalds  * spot in the kernel (apart from copy_*_user functions).
11341da177e4SLinus Torvalds  *
11351da177e4SLinus Torvalds  * Appropriate locks must be held before calling this function.
11361da177e4SLinus Torvalds  *
11371da177e4SLinus Torvalds  * @nr_to_scan:	The number of pages to look through on the list.
1138f626012dSHugh Dickins  * @mz:		The mem_cgroup_zone to pull pages from.
11391da177e4SLinus Torvalds  * @dst:	The temp list to put pages on to.
1140f626012dSHugh Dickins  * @nr_scanned:	The number of pages that were scanned.
1141fe2c2a10SRik van Riel  * @sc:		The scan_control struct for this reclaim session
11425ad333ebSAndy Whitcroft  * @mode:	One of the LRU isolation modes
1143f626012dSHugh Dickins  * @active:	True [1] if isolating active pages
11444f98a2feSRik van Riel  * @file:	True [1] if isolating file [!anon] pages
11451da177e4SLinus Torvalds  *
11461da177e4SLinus Torvalds  * returns how many pages were moved onto *@dst.
11471da177e4SLinus Torvalds  */
114869e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1149f626012dSHugh Dickins 		struct mem_cgroup_zone *mz, struct list_head *dst,
1150fe2c2a10SRik van Riel 		unsigned long *nr_scanned, struct scan_control *sc,
1151fe2c2a10SRik van Riel 		isolate_mode_t mode, int active, int file)
11521da177e4SLinus Torvalds {
1153f626012dSHugh Dickins 	struct lruvec *lruvec;
1154f626012dSHugh Dickins 	struct list_head *src;
115569e05944SAndrew Morton 	unsigned long nr_taken = 0;
1156a8a94d15SMel Gorman 	unsigned long nr_lumpy_taken = 0;
1157a8a94d15SMel Gorman 	unsigned long nr_lumpy_dirty = 0;
1158a8a94d15SMel Gorman 	unsigned long nr_lumpy_failed = 0;
1159c9b02d97SWu Fengguang 	unsigned long scan;
1160f626012dSHugh Dickins 	int lru = LRU_BASE;
1161f626012dSHugh Dickins 
1162f626012dSHugh Dickins 	lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
1163f626012dSHugh Dickins 	if (active)
1164f626012dSHugh Dickins 		lru += LRU_ACTIVE;
1165f626012dSHugh Dickins 	if (file)
1166f626012dSHugh Dickins 		lru += LRU_FILE;
1167f626012dSHugh Dickins 	src = &lruvec->lists[lru];
11681da177e4SLinus Torvalds 
1169c9b02d97SWu Fengguang 	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
11705ad333ebSAndy Whitcroft 		struct page *page;
11715ad333ebSAndy Whitcroft 		unsigned long pfn;
11725ad333ebSAndy Whitcroft 		unsigned long end_pfn;
11735ad333ebSAndy Whitcroft 		unsigned long page_pfn;
11745ad333ebSAndy Whitcroft 		int zone_id;
11755ad333ebSAndy Whitcroft 
11761da177e4SLinus Torvalds 		page = lru_to_page(src);
11771da177e4SLinus Torvalds 		prefetchw_prev_lru_page(page, src, flags);
11781da177e4SLinus Torvalds 
1179725d704eSNick Piggin 		VM_BUG_ON(!PageLRU(page));
11808d438f96SNick Piggin 
11814f98a2feSRik van Riel 		switch (__isolate_lru_page(page, mode, file)) {
11825ad333ebSAndy Whitcroft 		case 0:
1183925b7673SJohannes Weiner 			mem_cgroup_lru_del(page);
11845ad333ebSAndy Whitcroft 			list_move(&page->lru, dst);
11852c888cfbSRik van Riel 			nr_taken += hpage_nr_pages(page);
11865ad333ebSAndy Whitcroft 			break;
11877c8ee9a8SNick Piggin 
11885ad333ebSAndy Whitcroft 		case -EBUSY:
11895ad333ebSAndy Whitcroft 			/* else it is being freed elsewhere */
11905ad333ebSAndy Whitcroft 			list_move(&page->lru, src);
11915ad333ebSAndy Whitcroft 			continue;
11925ad333ebSAndy Whitcroft 
11935ad333ebSAndy Whitcroft 		default:
11945ad333ebSAndy Whitcroft 			BUG();
11955ad333ebSAndy Whitcroft 		}
11965ad333ebSAndy Whitcroft 
1197fe2c2a10SRik van Riel 		if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
11985ad333ebSAndy Whitcroft 			continue;
11995ad333ebSAndy Whitcroft 
12005ad333ebSAndy Whitcroft 		/*
12015ad333ebSAndy Whitcroft 		 * Attempt to take all pages in the order aligned region
12025ad333ebSAndy Whitcroft 		 * surrounding the tag page.  Only take those pages of
12035ad333ebSAndy Whitcroft 		 * the same active state as that tag page.  We may safely
12045ad333ebSAndy Whitcroft 		 * round the target page pfn down to the requested order
120525985edcSLucas De Marchi 		 * as the mem_map is guaranteed valid out to MAX_ORDER,
12065ad333ebSAndy Whitcroft 		 * where that page is in a different zone we will detect
12075ad333ebSAndy Whitcroft 		 * it from its zone id and abort this block scan.
12085ad333ebSAndy Whitcroft 		 */
12095ad333ebSAndy Whitcroft 		zone_id = page_zone_id(page);
12105ad333ebSAndy Whitcroft 		page_pfn = page_to_pfn(page);
1211fe2c2a10SRik van Riel 		pfn = page_pfn & ~((1 << sc->order) - 1);
1212fe2c2a10SRik van Riel 		end_pfn = pfn + (1 << sc->order);
12135ad333ebSAndy Whitcroft 		for (; pfn < end_pfn; pfn++) {
12145ad333ebSAndy Whitcroft 			struct page *cursor_page;
12155ad333ebSAndy Whitcroft 
12165ad333ebSAndy Whitcroft 			/* The target page is in the block, ignore it. */
12175ad333ebSAndy Whitcroft 			if (unlikely(pfn == page_pfn))
12185ad333ebSAndy Whitcroft 				continue;
12195ad333ebSAndy Whitcroft 
12205ad333ebSAndy Whitcroft 			/* Avoid holes within the zone. */
12215ad333ebSAndy Whitcroft 			if (unlikely(!pfn_valid_within(pfn)))
12225ad333ebSAndy Whitcroft 				break;
12235ad333ebSAndy Whitcroft 
12245ad333ebSAndy Whitcroft 			cursor_page = pfn_to_page(pfn);
12254f98a2feSRik van Riel 
12265ad333ebSAndy Whitcroft 			/* Check that we have not crossed a zone boundary. */
12275ad333ebSAndy Whitcroft 			if (unlikely(page_zone_id(cursor_page) != zone_id))
122808fc468fSKOSAKI Motohiro 				break;
1229de2e7567SMinchan Kim 
1230de2e7567SMinchan Kim 			/*
1231de2e7567SMinchan Kim 			 * If we don't have enough swap space, reclaiming of
1232de2e7567SMinchan Kim 			 * anon page which don't already have a swap slot is
1233de2e7567SMinchan Kim 			 * pointless.
1234de2e7567SMinchan Kim 			 */
1235043bcbe5SHugh Dickins 			if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
1236de2e7567SMinchan Kim 			    !PageSwapCache(cursor_page))
123708fc468fSKOSAKI Motohiro 				break;
1238de2e7567SMinchan Kim 
1239ee993b13SKAMEZAWA Hiroyuki 			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
124050134731SAndrea Arcangeli 				unsigned int isolated_pages;
124150134731SAndrea Arcangeli 
1242925b7673SJohannes Weiner 				mem_cgroup_lru_del(cursor_page);
12435ad333ebSAndy Whitcroft 				list_move(&cursor_page->lru, dst);
124450134731SAndrea Arcangeli 				isolated_pages = hpage_nr_pages(cursor_page);
124550134731SAndrea Arcangeli 				nr_taken += isolated_pages;
124650134731SAndrea Arcangeli 				nr_lumpy_taken += isolated_pages;
1247a8a94d15SMel Gorman 				if (PageDirty(cursor_page))
124850134731SAndrea Arcangeli 					nr_lumpy_dirty += isolated_pages;
12495ad333ebSAndy Whitcroft 				scan++;
125050134731SAndrea Arcangeli 				pfn += isolated_pages - 1;
1251a8a94d15SMel Gorman 			} else {
1252d179e84bSAndrea Arcangeli 				/*
1253d179e84bSAndrea Arcangeli 				 * Check if the page is freed already.
1254d179e84bSAndrea Arcangeli 				 *
1255d179e84bSAndrea Arcangeli 				 * We can't use page_count() as that
1256d179e84bSAndrea Arcangeli 				 * requires compound_head and we don't
1257d179e84bSAndrea Arcangeli 				 * have a pin on the page here. If a
1258d179e84bSAndrea Arcangeli 				 * page is tail, we may or may not
1259d179e84bSAndrea Arcangeli 				 * have isolated the head, so assume
1260d179e84bSAndrea Arcangeli 				 * it's not free, it'd be tricky to
1261d179e84bSAndrea Arcangeli 				 * track the head status without a
1262d179e84bSAndrea Arcangeli 				 * page pin.
1263d179e84bSAndrea Arcangeli 				 */
1264d179e84bSAndrea Arcangeli 				if (!PageTail(cursor_page) &&
1265d179e84bSAndrea Arcangeli 				    !atomic_read(&cursor_page->_count))
126608fc468fSKOSAKI Motohiro 					continue;
126708fc468fSKOSAKI Motohiro 				break;
126808fc468fSKOSAKI Motohiro 			}
126908fc468fSKOSAKI Motohiro 		}
127008fc468fSKOSAKI Motohiro 
127108fc468fSKOSAKI Motohiro 		/* If we break out of the loop above, lumpy reclaim failed */
127208fc468fSKOSAKI Motohiro 		if (pfn < end_pfn)
1273a8a94d15SMel Gorman 			nr_lumpy_failed++;
12745ad333ebSAndy Whitcroft 	}
12751da177e4SLinus Torvalds 
1276f626012dSHugh Dickins 	*nr_scanned = scan;
1277a8a94d15SMel Gorman 
1278fe2c2a10SRik van Riel 	trace_mm_vmscan_lru_isolate(sc->order,
1279a8a94d15SMel Gorman 			nr_to_scan, scan,
1280a8a94d15SMel Gorman 			nr_taken,
1281a8a94d15SMel Gorman 			nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1282ea4d349fSTao Ma 			mode, file);
12831da177e4SLinus Torvalds 	return nr_taken;
12841da177e4SLinus Torvalds }
12851da177e4SLinus Torvalds 
128662695a84SNick Piggin /**
128762695a84SNick Piggin  * isolate_lru_page - tries to isolate a page from its LRU list
128862695a84SNick Piggin  * @page: page to isolate from its LRU list
128962695a84SNick Piggin  *
129062695a84SNick Piggin  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
129162695a84SNick Piggin  * vmstat statistic corresponding to whatever LRU list the page was on.
129262695a84SNick Piggin  *
129362695a84SNick Piggin  * Returns 0 if the page was removed from an LRU list.
129462695a84SNick Piggin  * Returns -EBUSY if the page was not on an LRU list.
129562695a84SNick Piggin  *
129662695a84SNick Piggin  * The returned page will have PageLRU() cleared.  If it was found on
1297894bc310SLee Schermerhorn  * the active list, it will have PageActive set.  If it was found on
1298894bc310SLee Schermerhorn  * the unevictable list, it will have the PageUnevictable bit set. That flag
1299894bc310SLee Schermerhorn  * may need to be cleared by the caller before letting the page go.
130062695a84SNick Piggin  *
130162695a84SNick Piggin  * The vmstat statistic corresponding to the list on which the page was
130262695a84SNick Piggin  * found will be decremented.
130362695a84SNick Piggin  *
130462695a84SNick Piggin  * Restrictions:
130562695a84SNick Piggin  * (1) Must be called with an elevated refcount on the page. This is a
130662695a84SNick Piggin  *     fundamentnal difference from isolate_lru_pages (which is called
130762695a84SNick Piggin  *     without a stable reference).
130862695a84SNick Piggin  * (2) the lru_lock must not be held.
130962695a84SNick Piggin  * (3) interrupts must be enabled.
131062695a84SNick Piggin  */
131162695a84SNick Piggin int isolate_lru_page(struct page *page)
131262695a84SNick Piggin {
131362695a84SNick Piggin 	int ret = -EBUSY;
131462695a84SNick Piggin 
13150c917313SKonstantin Khlebnikov 	VM_BUG_ON(!page_count(page));
13160c917313SKonstantin Khlebnikov 
131762695a84SNick Piggin 	if (PageLRU(page)) {
131862695a84SNick Piggin 		struct zone *zone = page_zone(page);
131962695a84SNick Piggin 
132062695a84SNick Piggin 		spin_lock_irq(&zone->lru_lock);
13210c917313SKonstantin Khlebnikov 		if (PageLRU(page)) {
1322894bc310SLee Schermerhorn 			int lru = page_lru(page);
132362695a84SNick Piggin 			ret = 0;
13240c917313SKonstantin Khlebnikov 			get_page(page);
132562695a84SNick Piggin 			ClearPageLRU(page);
13264f98a2feSRik van Riel 
13274f98a2feSRik van Riel 			del_page_from_lru_list(zone, page, lru);
132862695a84SNick Piggin 		}
132962695a84SNick Piggin 		spin_unlock_irq(&zone->lru_lock);
133062695a84SNick Piggin 	}
133162695a84SNick Piggin 	return ret;
133262695a84SNick Piggin }
133362695a84SNick Piggin 
13345ad333ebSAndy Whitcroft /*
133535cd7815SRik van Riel  * Are there way too many processes in the direct reclaim path already?
133635cd7815SRik van Riel  */
133735cd7815SRik van Riel static int too_many_isolated(struct zone *zone, int file,
133835cd7815SRik van Riel 		struct scan_control *sc)
133935cd7815SRik van Riel {
134035cd7815SRik van Riel 	unsigned long inactive, isolated;
134135cd7815SRik van Riel 
134235cd7815SRik van Riel 	if (current_is_kswapd())
134335cd7815SRik van Riel 		return 0;
134435cd7815SRik van Riel 
134589b5fae5SJohannes Weiner 	if (!global_reclaim(sc))
134635cd7815SRik van Riel 		return 0;
134735cd7815SRik van Riel 
134835cd7815SRik van Riel 	if (file) {
134935cd7815SRik van Riel 		inactive = zone_page_state(zone, NR_INACTIVE_FILE);
135035cd7815SRik van Riel 		isolated = zone_page_state(zone, NR_ISOLATED_FILE);
135135cd7815SRik van Riel 	} else {
135235cd7815SRik van Riel 		inactive = zone_page_state(zone, NR_INACTIVE_ANON);
135335cd7815SRik van Riel 		isolated = zone_page_state(zone, NR_ISOLATED_ANON);
135435cd7815SRik van Riel 	}
135535cd7815SRik van Riel 
135635cd7815SRik van Riel 	return isolated > inactive;
135735cd7815SRik van Riel }
135835cd7815SRik van Riel 
135966635629SMel Gorman static noinline_for_stack void
13603f79768fSHugh Dickins putback_inactive_pages(struct mem_cgroup_zone *mz,
136166635629SMel Gorman 		       struct list_head *page_list)
136266635629SMel Gorman {
1363f16015fbSJohannes Weiner 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
13643f79768fSHugh Dickins 	struct zone *zone = mz->zone;
13653f79768fSHugh Dickins 	LIST_HEAD(pages_to_free);
136666635629SMel Gorman 
136766635629SMel Gorman 	/*
136866635629SMel Gorman 	 * Put back any unfreeable pages.
136966635629SMel Gorman 	 */
137066635629SMel Gorman 	while (!list_empty(page_list)) {
13713f79768fSHugh Dickins 		struct page *page = lru_to_page(page_list);
137266635629SMel Gorman 		int lru;
13733f79768fSHugh Dickins 
137466635629SMel Gorman 		VM_BUG_ON(PageLRU(page));
137566635629SMel Gorman 		list_del(&page->lru);
137666635629SMel Gorman 		if (unlikely(!page_evictable(page, NULL))) {
137766635629SMel Gorman 			spin_unlock_irq(&zone->lru_lock);
137866635629SMel Gorman 			putback_lru_page(page);
137966635629SMel Gorman 			spin_lock_irq(&zone->lru_lock);
138066635629SMel Gorman 			continue;
138166635629SMel Gorman 		}
13827a608572SLinus Torvalds 		SetPageLRU(page);
138366635629SMel Gorman 		lru = page_lru(page);
13847a608572SLinus Torvalds 		add_page_to_lru_list(zone, page, lru);
138566635629SMel Gorman 		if (is_active_lru(lru)) {
138666635629SMel Gorman 			int file = is_file_lru(lru);
13879992af10SRik van Riel 			int numpages = hpage_nr_pages(page);
13889992af10SRik van Riel 			reclaim_stat->recent_rotated[file] += numpages;
138966635629SMel Gorman 		}
13902bcf8879SHugh Dickins 		if (put_page_testzero(page)) {
13912bcf8879SHugh Dickins 			__ClearPageLRU(page);
13922bcf8879SHugh Dickins 			__ClearPageActive(page);
13932bcf8879SHugh Dickins 			del_page_from_lru_list(zone, page, lru);
13942bcf8879SHugh Dickins 
13952bcf8879SHugh Dickins 			if (unlikely(PageCompound(page))) {
139666635629SMel Gorman 				spin_unlock_irq(&zone->lru_lock);
13972bcf8879SHugh Dickins 				(*get_compound_page_dtor(page))(page);
139866635629SMel Gorman 				spin_lock_irq(&zone->lru_lock);
13992bcf8879SHugh Dickins 			} else
14002bcf8879SHugh Dickins 				list_add(&page->lru, &pages_to_free);
140166635629SMel Gorman 		}
140266635629SMel Gorman 	}
140366635629SMel Gorman 
14043f79768fSHugh Dickins 	/*
14053f79768fSHugh Dickins 	 * To save our caller's stack, now use input list for pages to free.
14063f79768fSHugh Dickins 	 */
14073f79768fSHugh Dickins 	list_splice(&pages_to_free, page_list);
140866635629SMel Gorman }
140966635629SMel Gorman 
1410f16015fbSJohannes Weiner static noinline_for_stack void
1411f16015fbSJohannes Weiner update_isolated_counts(struct mem_cgroup_zone *mz,
14123f79768fSHugh Dickins 		       struct list_head *page_list,
14131489fa14SMel Gorman 		       unsigned long *nr_anon,
14143f79768fSHugh Dickins 		       unsigned long *nr_file)
14151489fa14SMel Gorman {
14163f79768fSHugh Dickins 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
1417f16015fbSJohannes Weiner 	struct zone *zone = mz->zone;
14181489fa14SMel Gorman 	unsigned int count[NR_LRU_LISTS] = { 0, };
14193f79768fSHugh Dickins 	unsigned long nr_active = 0;
14203f79768fSHugh Dickins 	struct page *page;
14213f79768fSHugh Dickins 	int lru;
14221489fa14SMel Gorman 
14233f79768fSHugh Dickins 	/*
14243f79768fSHugh Dickins 	 * Count pages and clear active flags
14253f79768fSHugh Dickins 	 */
14263f79768fSHugh Dickins 	list_for_each_entry(page, page_list, lru) {
14273f79768fSHugh Dickins 		int numpages = hpage_nr_pages(page);
14283f79768fSHugh Dickins 		lru = page_lru_base_type(page);
14293f79768fSHugh Dickins 		if (PageActive(page)) {
14303f79768fSHugh Dickins 			lru += LRU_ACTIVE;
14313f79768fSHugh Dickins 			ClearPageActive(page);
14323f79768fSHugh Dickins 			nr_active += numpages;
14333f79768fSHugh Dickins 		}
14343f79768fSHugh Dickins 		count[lru] += numpages;
14353f79768fSHugh Dickins 	}
14363f79768fSHugh Dickins 
14371489fa14SMel Gorman 	__count_vm_events(PGDEACTIVATE, nr_active);
14381489fa14SMel Gorman 
14391489fa14SMel Gorman 	__mod_zone_page_state(zone, NR_ACTIVE_FILE,
14401489fa14SMel Gorman 			      -count[LRU_ACTIVE_FILE]);
14411489fa14SMel Gorman 	__mod_zone_page_state(zone, NR_INACTIVE_FILE,
14421489fa14SMel Gorman 			      -count[LRU_INACTIVE_FILE]);
14431489fa14SMel Gorman 	__mod_zone_page_state(zone, NR_ACTIVE_ANON,
14441489fa14SMel Gorman 			      -count[LRU_ACTIVE_ANON]);
14451489fa14SMel Gorman 	__mod_zone_page_state(zone, NR_INACTIVE_ANON,
14461489fa14SMel Gorman 			      -count[LRU_INACTIVE_ANON]);
14471489fa14SMel Gorman 
14481489fa14SMel Gorman 	*nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
14491489fa14SMel Gorman 	*nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
14501489fa14SMel Gorman 
14511489fa14SMel Gorman 	reclaim_stat->recent_scanned[0] += *nr_anon;
14521489fa14SMel Gorman 	reclaim_stat->recent_scanned[1] += *nr_file;
14531489fa14SMel Gorman }
14541489fa14SMel Gorman 
145566635629SMel Gorman /*
1456a18bba06SMel Gorman  * Returns true if a direct reclaim should wait on pages under writeback.
1457e31f3698SWu Fengguang  *
1458e31f3698SWu Fengguang  * If we are direct reclaiming for contiguous pages and we do not reclaim
1459e31f3698SWu Fengguang  * everything in the list, try again and wait for writeback IO to complete.
1460e31f3698SWu Fengguang  * This will stall high-order allocations noticeably. Only do that when really
1461e31f3698SWu Fengguang  * need to free the pages under high memory pressure.
1462e31f3698SWu Fengguang  */
1463e31f3698SWu Fengguang static inline bool should_reclaim_stall(unsigned long nr_taken,
1464e31f3698SWu Fengguang 					unsigned long nr_freed,
1465e31f3698SWu Fengguang 					int priority,
1466e31f3698SWu Fengguang 					struct scan_control *sc)
1467e31f3698SWu Fengguang {
1468e31f3698SWu Fengguang 	int lumpy_stall_priority;
1469e31f3698SWu Fengguang 
1470e31f3698SWu Fengguang 	/* kswapd should not stall on sync IO */
1471e31f3698SWu Fengguang 	if (current_is_kswapd())
1472e31f3698SWu Fengguang 		return false;
1473e31f3698SWu Fengguang 
1474e31f3698SWu Fengguang 	/* Only stall on lumpy reclaim */
1475f3a310bcSMel Gorman 	if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
1476e31f3698SWu Fengguang 		return false;
1477e31f3698SWu Fengguang 
147881d66c70SJustin P. Mattock 	/* If we have reclaimed everything on the isolated list, no stall */
1479e31f3698SWu Fengguang 	if (nr_freed == nr_taken)
1480e31f3698SWu Fengguang 		return false;
1481e31f3698SWu Fengguang 
1482e31f3698SWu Fengguang 	/*
1483e31f3698SWu Fengguang 	 * For high-order allocations, there are two stall thresholds.
1484e31f3698SWu Fengguang 	 * High-cost allocations stall immediately where as lower
1485e31f3698SWu Fengguang 	 * order allocations such as stacks require the scanning
1486e31f3698SWu Fengguang 	 * priority to be much higher before stalling.
1487e31f3698SWu Fengguang 	 */
1488e31f3698SWu Fengguang 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1489e31f3698SWu Fengguang 		lumpy_stall_priority = DEF_PRIORITY;
1490e31f3698SWu Fengguang 	else
1491e31f3698SWu Fengguang 		lumpy_stall_priority = DEF_PRIORITY / 3;
1492e31f3698SWu Fengguang 
1493e31f3698SWu Fengguang 	return priority <= lumpy_stall_priority;
1494e31f3698SWu Fengguang }
1495e31f3698SWu Fengguang 
1496e31f3698SWu Fengguang /*
14971742f19fSAndrew Morton  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
14981742f19fSAndrew Morton  * of reclaimed pages
14991da177e4SLinus Torvalds  */
150066635629SMel Gorman static noinline_for_stack unsigned long
1501f16015fbSJohannes Weiner shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
150266635629SMel Gorman 		     struct scan_control *sc, int priority, int file)
15031da177e4SLinus Torvalds {
15041da177e4SLinus Torvalds 	LIST_HEAD(page_list);
1505e247dbceSKOSAKI Motohiro 	unsigned long nr_scanned;
150605ff5137SAndrew Morton 	unsigned long nr_reclaimed = 0;
1507e247dbceSKOSAKI Motohiro 	unsigned long nr_taken;
1508e247dbceSKOSAKI Motohiro 	unsigned long nr_anon;
1509e247dbceSKOSAKI Motohiro 	unsigned long nr_file;
151092df3a72SMel Gorman 	unsigned long nr_dirty = 0;
151192df3a72SMel Gorman 	unsigned long nr_writeback = 0;
151261317289SHillf Danton 	isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
1513f16015fbSJohannes Weiner 	struct zone *zone = mz->zone;
151478dc583dSKOSAKI Motohiro 
151535cd7815SRik van Riel 	while (unlikely(too_many_isolated(zone, file, sc))) {
151658355c78SKOSAKI Motohiro 		congestion_wait(BLK_RW_ASYNC, HZ/10);
151735cd7815SRik van Riel 
151835cd7815SRik van Riel 		/* We are about to die and free our memory. Return now. */
151935cd7815SRik van Riel 		if (fatal_signal_pending(current))
152035cd7815SRik van Riel 			return SWAP_CLUSTER_MAX;
152135cd7815SRik van Riel 	}
152235cd7815SRik van Riel 
1523f3a310bcSMel Gorman 	set_reclaim_mode(priority, sc, false);
15244356f21dSMinchan Kim 	if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
152561317289SHillf Danton 		isolate_mode |= ISOLATE_ACTIVE;
15264356f21dSMinchan Kim 
15271da177e4SLinus Torvalds 	lru_add_drain();
1528f80c0673SMinchan Kim 
1529f80c0673SMinchan Kim 	if (!sc->may_unmap)
153061317289SHillf Danton 		isolate_mode |= ISOLATE_UNMAPPED;
1531f80c0673SMinchan Kim 	if (!sc->may_writepage)
153261317289SHillf Danton 		isolate_mode |= ISOLATE_CLEAN;
1533f80c0673SMinchan Kim 
15341da177e4SLinus Torvalds 	spin_lock_irq(&zone->lru_lock);
15351da177e4SLinus Torvalds 
1536fe2c2a10SRik van Riel 	nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
1537fe2c2a10SRik van Riel 				     sc, isolate_mode, 0, file);
153889b5fae5SJohannes Weiner 	if (global_reclaim(sc)) {
1539e247dbceSKOSAKI Motohiro 		zone->pages_scanned += nr_scanned;
1540b35ea17bSKOSAKI Motohiro 		if (current_is_kswapd())
1541b35ea17bSKOSAKI Motohiro 			__count_zone_vm_events(PGSCAN_KSWAPD, zone,
1542e247dbceSKOSAKI Motohiro 					       nr_scanned);
1543b35ea17bSKOSAKI Motohiro 		else
1544b35ea17bSKOSAKI Motohiro 			__count_zone_vm_events(PGSCAN_DIRECT, zone,
1545e247dbceSKOSAKI Motohiro 					       nr_scanned);
1546b35ea17bSKOSAKI Motohiro 	}
1547b35ea17bSKOSAKI Motohiro 
154866635629SMel Gorman 	if (nr_taken == 0) {
154966635629SMel Gorman 		spin_unlock_irq(&zone->lru_lock);
155066635629SMel Gorman 		return 0;
155166635629SMel Gorman 	}
1552b35ea17bSKOSAKI Motohiro 
15533f79768fSHugh Dickins 	update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
15543f79768fSHugh Dickins 
15553f79768fSHugh Dickins 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
15563f79768fSHugh Dickins 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
15573e2f41f1SKOSAKI Motohiro 
15581da177e4SLinus Torvalds 	spin_unlock_irq(&zone->lru_lock);
15591da177e4SLinus Torvalds 
1560f16015fbSJohannes Weiner 	nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
156192df3a72SMel Gorman 						&nr_dirty, &nr_writeback);
1562c661b078SAndy Whitcroft 
1563e31f3698SWu Fengguang 	/* Check if we should syncronously wait for writeback */
1564e31f3698SWu Fengguang 	if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1565f3a310bcSMel Gorman 		set_reclaim_mode(priority, sc, true);
1566f16015fbSJohannes Weiner 		nr_reclaimed += shrink_page_list(&page_list, mz, sc,
156792df3a72SMel Gorman 					priority, &nr_dirty, &nr_writeback);
1568c661b078SAndy Whitcroft 	}
1569c661b078SAndy Whitcroft 
15703f79768fSHugh Dickins 	spin_lock_irq(&zone->lru_lock);
15713f79768fSHugh Dickins 
1572b35ea17bSKOSAKI Motohiro 	if (current_is_kswapd())
1573e247dbceSKOSAKI Motohiro 		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1574e247dbceSKOSAKI Motohiro 	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1575a74609faSNick Piggin 
15763f79768fSHugh Dickins 	putback_inactive_pages(mz, &page_list);
15773f79768fSHugh Dickins 
15783f79768fSHugh Dickins 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
15793f79768fSHugh Dickins 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
15803f79768fSHugh Dickins 
15813f79768fSHugh Dickins 	spin_unlock_irq(&zone->lru_lock);
15823f79768fSHugh Dickins 
15833f79768fSHugh Dickins 	free_hot_cold_page_list(&page_list, 1);
1584e11da5b4SMel Gorman 
158592df3a72SMel Gorman 	/*
158692df3a72SMel Gorman 	 * If reclaim is isolating dirty pages under writeback, it implies
158792df3a72SMel Gorman 	 * that the long-lived page allocation rate is exceeding the page
158892df3a72SMel Gorman 	 * laundering rate. Either the global limits are not being effective
158992df3a72SMel Gorman 	 * at throttling processes due to the page distribution throughout
159092df3a72SMel Gorman 	 * zones or there is heavy usage of a slow backing device. The
159192df3a72SMel Gorman 	 * only option is to throttle from reclaim context which is not ideal
159292df3a72SMel Gorman 	 * as there is no guarantee the dirtying process is throttled in the
159392df3a72SMel Gorman 	 * same way balance_dirty_pages() manages.
159492df3a72SMel Gorman 	 *
159592df3a72SMel Gorman 	 * This scales the number of dirty pages that must be under writeback
159692df3a72SMel Gorman 	 * before throttling depending on priority. It is a simple backoff
159792df3a72SMel Gorman 	 * function that has the most effect in the range DEF_PRIORITY to
159892df3a72SMel Gorman 	 * DEF_PRIORITY-2 which is the priority reclaim is considered to be
159992df3a72SMel Gorman 	 * in trouble and reclaim is considered to be in trouble.
160092df3a72SMel Gorman 	 *
160192df3a72SMel Gorman 	 * DEF_PRIORITY   100% isolated pages must be PageWriteback to throttle
160292df3a72SMel Gorman 	 * DEF_PRIORITY-1  50% must be PageWriteback
160392df3a72SMel Gorman 	 * DEF_PRIORITY-2  25% must be PageWriteback, kswapd in trouble
160492df3a72SMel Gorman 	 * ...
160592df3a72SMel Gorman 	 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
160692df3a72SMel Gorman 	 *                     isolated page is PageWriteback
160792df3a72SMel Gorman 	 */
160892df3a72SMel Gorman 	if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
160992df3a72SMel Gorman 		wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
161092df3a72SMel Gorman 
1611e11da5b4SMel Gorman 	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1612e11da5b4SMel Gorman 		zone_idx(zone),
1613e11da5b4SMel Gorman 		nr_scanned, nr_reclaimed,
1614e11da5b4SMel Gorman 		priority,
1615f3a310bcSMel Gorman 		trace_shrink_flags(file, sc->reclaim_mode));
161605ff5137SAndrew Morton 	return nr_reclaimed;
16171da177e4SLinus Torvalds }
16181da177e4SLinus Torvalds 
16193bb1a852SMartin Bligh /*
16201cfb419bSKAMEZAWA Hiroyuki  * This moves pages from the active list to the inactive list.
16211cfb419bSKAMEZAWA Hiroyuki  *
16221cfb419bSKAMEZAWA Hiroyuki  * We move them the other way if the page is referenced by one or more
16231cfb419bSKAMEZAWA Hiroyuki  * processes, from rmap.
16241cfb419bSKAMEZAWA Hiroyuki  *
16251cfb419bSKAMEZAWA Hiroyuki  * If the pages are mostly unmapped, the processing is fast and it is
16261cfb419bSKAMEZAWA Hiroyuki  * appropriate to hold zone->lru_lock across the whole operation.  But if
16271cfb419bSKAMEZAWA Hiroyuki  * the pages are mapped, the processing is slow (page_referenced()) so we
16281cfb419bSKAMEZAWA Hiroyuki  * should drop zone->lru_lock around each page.  It's impossible to balance
16291cfb419bSKAMEZAWA Hiroyuki  * this, so instead we remove the pages from the LRU while processing them.
16301cfb419bSKAMEZAWA Hiroyuki  * It is safe to rely on PG_active against the non-LRU pages in here because
16311cfb419bSKAMEZAWA Hiroyuki  * nobody will play with that bit on a non-LRU page.
16321cfb419bSKAMEZAWA Hiroyuki  *
16331cfb419bSKAMEZAWA Hiroyuki  * The downside is that we have to touch page->_count against each page.
16341cfb419bSKAMEZAWA Hiroyuki  * But we had to alter page->flags anyway.
16351cfb419bSKAMEZAWA Hiroyuki  */
16361cfb419bSKAMEZAWA Hiroyuki 
16373eb4140fSWu Fengguang static void move_active_pages_to_lru(struct zone *zone,
16383eb4140fSWu Fengguang 				     struct list_head *list,
16392bcf8879SHugh Dickins 				     struct list_head *pages_to_free,
16403eb4140fSWu Fengguang 				     enum lru_list lru)
16413eb4140fSWu Fengguang {
16423eb4140fSWu Fengguang 	unsigned long pgmoved = 0;
16433eb4140fSWu Fengguang 	struct page *page;
16443eb4140fSWu Fengguang 
16452bcf8879SHugh Dickins 	if (buffer_heads_over_limit) {
16462bcf8879SHugh Dickins 		spin_unlock_irq(&zone->lru_lock);
16472bcf8879SHugh Dickins 		list_for_each_entry(page, list, lru) {
16482bcf8879SHugh Dickins 			if (page_has_private(page) && trylock_page(page)) {
16492bcf8879SHugh Dickins 				if (page_has_private(page))
16502bcf8879SHugh Dickins 					try_to_release_page(page, 0);
16512bcf8879SHugh Dickins 				unlock_page(page);
16522bcf8879SHugh Dickins 			}
16532bcf8879SHugh Dickins 		}
16542bcf8879SHugh Dickins 		spin_lock_irq(&zone->lru_lock);
16552bcf8879SHugh Dickins 	}
16563eb4140fSWu Fengguang 
16573eb4140fSWu Fengguang 	while (!list_empty(list)) {
1658925b7673SJohannes Weiner 		struct lruvec *lruvec;
1659925b7673SJohannes Weiner 
16603eb4140fSWu Fengguang 		page = lru_to_page(list);
16613eb4140fSWu Fengguang 
16623eb4140fSWu Fengguang 		VM_BUG_ON(PageLRU(page));
16633eb4140fSWu Fengguang 		SetPageLRU(page);
16643eb4140fSWu Fengguang 
1665925b7673SJohannes Weiner 		lruvec = mem_cgroup_lru_add_list(zone, page, lru);
1666925b7673SJohannes Weiner 		list_move(&page->lru, &lruvec->lists[lru]);
16672c888cfbSRik van Riel 		pgmoved += hpage_nr_pages(page);
16683eb4140fSWu Fengguang 
16692bcf8879SHugh Dickins 		if (put_page_testzero(page)) {
16702bcf8879SHugh Dickins 			__ClearPageLRU(page);
16712bcf8879SHugh Dickins 			__ClearPageActive(page);
16722bcf8879SHugh Dickins 			del_page_from_lru_list(zone, page, lru);
16732bcf8879SHugh Dickins 
16742bcf8879SHugh Dickins 			if (unlikely(PageCompound(page))) {
16753eb4140fSWu Fengguang 				spin_unlock_irq(&zone->lru_lock);
16762bcf8879SHugh Dickins 				(*get_compound_page_dtor(page))(page);
16773eb4140fSWu Fengguang 				spin_lock_irq(&zone->lru_lock);
16782bcf8879SHugh Dickins 			} else
16792bcf8879SHugh Dickins 				list_add(&page->lru, pages_to_free);
16803eb4140fSWu Fengguang 		}
16813eb4140fSWu Fengguang 	}
16823eb4140fSWu Fengguang 	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
16833eb4140fSWu Fengguang 	if (!is_active_lru(lru))
16843eb4140fSWu Fengguang 		__count_vm_events(PGDEACTIVATE, pgmoved);
16853eb4140fSWu Fengguang }
16861cfb419bSKAMEZAWA Hiroyuki 
1687f626012dSHugh Dickins static void shrink_active_list(unsigned long nr_to_scan,
1688f16015fbSJohannes Weiner 			       struct mem_cgroup_zone *mz,
1689f16015fbSJohannes Weiner 			       struct scan_control *sc,
1690f16015fbSJohannes Weiner 			       int priority, int file)
16911cfb419bSKAMEZAWA Hiroyuki {
169244c241f1SKOSAKI Motohiro 	unsigned long nr_taken;
1693f626012dSHugh Dickins 	unsigned long nr_scanned;
16946fe6b7e3SWu Fengguang 	unsigned long vm_flags;
16951cfb419bSKAMEZAWA Hiroyuki 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
16968cab4754SWu Fengguang 	LIST_HEAD(l_active);
1697b69408e8SChristoph Lameter 	LIST_HEAD(l_inactive);
16981cfb419bSKAMEZAWA Hiroyuki 	struct page *page;
1699f16015fbSJohannes Weiner 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
170044c241f1SKOSAKI Motohiro 	unsigned long nr_rotated = 0;
170161317289SHillf Danton 	isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
1702f16015fbSJohannes Weiner 	struct zone *zone = mz->zone;
17031cfb419bSKAMEZAWA Hiroyuki 
17041da177e4SLinus Torvalds 	lru_add_drain();
1705f80c0673SMinchan Kim 
1706f80c0673SMinchan Kim 	if (!sc->may_unmap)
170761317289SHillf Danton 		isolate_mode |= ISOLATE_UNMAPPED;
1708f80c0673SMinchan Kim 	if (!sc->may_writepage)
170961317289SHillf Danton 		isolate_mode |= ISOLATE_CLEAN;
1710f80c0673SMinchan Kim 
17111da177e4SLinus Torvalds 	spin_lock_irq(&zone->lru_lock);
1712925b7673SJohannes Weiner 
1713fe2c2a10SRik van Riel 	nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
171461317289SHillf Danton 				     isolate_mode, 1, file);
171589b5fae5SJohannes Weiner 	if (global_reclaim(sc))
1716f626012dSHugh Dickins 		zone->pages_scanned += nr_scanned;
171789b5fae5SJohannes Weiner 
1718b7c46d15SJohannes Weiner 	reclaim_stat->recent_scanned[file] += nr_taken;
17191cfb419bSKAMEZAWA Hiroyuki 
1720f626012dSHugh Dickins 	__count_zone_vm_events(PGREFILL, zone, nr_scanned);
17214f98a2feSRik van Riel 	if (file)
172244c241f1SKOSAKI Motohiro 		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
17234f98a2feSRik van Riel 	else
172444c241f1SKOSAKI Motohiro 		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1725a731286dSKOSAKI Motohiro 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
17261da177e4SLinus Torvalds 	spin_unlock_irq(&zone->lru_lock);
17271da177e4SLinus Torvalds 
17281da177e4SLinus Torvalds 	while (!list_empty(&l_hold)) {
17291da177e4SLinus Torvalds 		cond_resched();
17301da177e4SLinus Torvalds 		page = lru_to_page(&l_hold);
17311da177e4SLinus Torvalds 		list_del(&page->lru);
17327e9cd484SRik van Riel 
1733894bc310SLee Schermerhorn 		if (unlikely(!page_evictable(page, NULL))) {
1734894bc310SLee Schermerhorn 			putback_lru_page(page);
1735894bc310SLee Schermerhorn 			continue;
1736894bc310SLee Schermerhorn 		}
1737894bc310SLee Schermerhorn 
1738f16015fbSJohannes Weiner 		if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
17399992af10SRik van Riel 			nr_rotated += hpage_nr_pages(page);
17408cab4754SWu Fengguang 			/*
17418cab4754SWu Fengguang 			 * Identify referenced, file-backed active pages and
17428cab4754SWu Fengguang 			 * give them one more trip around the active list. So
17438cab4754SWu Fengguang 			 * that executable code get better chances to stay in
17448cab4754SWu Fengguang 			 * memory under moderate memory pressure.  Anon pages
17458cab4754SWu Fengguang 			 * are not likely to be evicted by use-once streaming
17468cab4754SWu Fengguang 			 * IO, plus JVM can create lots of anon VM_EXEC pages,
17478cab4754SWu Fengguang 			 * so we ignore them here.
17488cab4754SWu Fengguang 			 */
174941e20983SWu Fengguang 			if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
17508cab4754SWu Fengguang 				list_add(&page->lru, &l_active);
17518cab4754SWu Fengguang 				continue;
17528cab4754SWu Fengguang 			}
17538cab4754SWu Fengguang 		}
17547e9cd484SRik van Riel 
17555205e56eSKOSAKI Motohiro 		ClearPageActive(page);	/* we are de-activating */
17561da177e4SLinus Torvalds 		list_add(&page->lru, &l_inactive);
17571da177e4SLinus Torvalds 	}
17581da177e4SLinus Torvalds 
1759b555749aSAndrew Morton 	/*
17608cab4754SWu Fengguang 	 * Move pages back to the lru list.
1761b555749aSAndrew Morton 	 */
17622a1dc509SJohannes Weiner 	spin_lock_irq(&zone->lru_lock);
17634f98a2feSRik van Riel 	/*
17648cab4754SWu Fengguang 	 * Count referenced pages from currently used mappings as rotated,
17658cab4754SWu Fengguang 	 * even though only some of them are actually re-activated.  This
17668cab4754SWu Fengguang 	 * helps balance scan pressure between file and anonymous pages in
17678cab4754SWu Fengguang 	 * get_scan_ratio.
1768556adecbSRik van Riel 	 */
1769b7c46d15SJohannes Weiner 	reclaim_stat->recent_rotated[file] += nr_rotated;
1770556adecbSRik van Riel 
17712bcf8879SHugh Dickins 	move_active_pages_to_lru(zone, &l_active, &l_hold,
17723eb4140fSWu Fengguang 						LRU_ACTIVE + file * LRU_FILE);
17732bcf8879SHugh Dickins 	move_active_pages_to_lru(zone, &l_inactive, &l_hold,
17743eb4140fSWu Fengguang 						LRU_BASE   + file * LRU_FILE);
1775a731286dSKOSAKI Motohiro 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1776f8891e5eSChristoph Lameter 	spin_unlock_irq(&zone->lru_lock);
17772bcf8879SHugh Dickins 
17782bcf8879SHugh Dickins 	free_hot_cold_page_list(&l_hold, 1);
17791da177e4SLinus Torvalds }
17801da177e4SLinus Torvalds 
178174e3f3c3SMinchan Kim #ifdef CONFIG_SWAP
178214797e23SKOSAKI Motohiro static int inactive_anon_is_low_global(struct zone *zone)
1783f89eb90eSKOSAKI Motohiro {
1784f89eb90eSKOSAKI Motohiro 	unsigned long active, inactive;
1785f89eb90eSKOSAKI Motohiro 
1786f89eb90eSKOSAKI Motohiro 	active = zone_page_state(zone, NR_ACTIVE_ANON);
1787f89eb90eSKOSAKI Motohiro 	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1788f89eb90eSKOSAKI Motohiro 
1789f89eb90eSKOSAKI Motohiro 	if (inactive * zone->inactive_ratio < active)
1790f89eb90eSKOSAKI Motohiro 		return 1;
1791f89eb90eSKOSAKI Motohiro 
1792f89eb90eSKOSAKI Motohiro 	return 0;
1793f89eb90eSKOSAKI Motohiro }
1794f89eb90eSKOSAKI Motohiro 
179514797e23SKOSAKI Motohiro /**
179614797e23SKOSAKI Motohiro  * inactive_anon_is_low - check if anonymous pages need to be deactivated
179714797e23SKOSAKI Motohiro  * @zone: zone to check
179814797e23SKOSAKI Motohiro  * @sc:   scan control of this context
179914797e23SKOSAKI Motohiro  *
180014797e23SKOSAKI Motohiro  * Returns true if the zone does not have enough inactive anon pages,
180114797e23SKOSAKI Motohiro  * meaning some active anon pages need to be deactivated.
180214797e23SKOSAKI Motohiro  */
1803f16015fbSJohannes Weiner static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
180414797e23SKOSAKI Motohiro {
180574e3f3c3SMinchan Kim 	/*
180674e3f3c3SMinchan Kim 	 * If we don't have swap space, anonymous page deactivation
180774e3f3c3SMinchan Kim 	 * is pointless.
180874e3f3c3SMinchan Kim 	 */
180974e3f3c3SMinchan Kim 	if (!total_swap_pages)
181074e3f3c3SMinchan Kim 		return 0;
181174e3f3c3SMinchan Kim 
1812f16015fbSJohannes Weiner 	if (!scanning_global_lru(mz))
1813f16015fbSJohannes Weiner 		return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
1814f16015fbSJohannes Weiner 						       mz->zone);
1815f16015fbSJohannes Weiner 
1816f16015fbSJohannes Weiner 	return inactive_anon_is_low_global(mz->zone);
181714797e23SKOSAKI Motohiro }
181874e3f3c3SMinchan Kim #else
1819f16015fbSJohannes Weiner static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
182074e3f3c3SMinchan Kim {
182174e3f3c3SMinchan Kim 	return 0;
182274e3f3c3SMinchan Kim }
182374e3f3c3SMinchan Kim #endif
182414797e23SKOSAKI Motohiro 
182556e49d21SRik van Riel static int inactive_file_is_low_global(struct zone *zone)
182656e49d21SRik van Riel {
182756e49d21SRik van Riel 	unsigned long active, inactive;
182856e49d21SRik van Riel 
182956e49d21SRik van Riel 	active = zone_page_state(zone, NR_ACTIVE_FILE);
183056e49d21SRik van Riel 	inactive = zone_page_state(zone, NR_INACTIVE_FILE);
183156e49d21SRik van Riel 
183256e49d21SRik van Riel 	return (active > inactive);
183356e49d21SRik van Riel }
183456e49d21SRik van Riel 
183556e49d21SRik van Riel /**
183656e49d21SRik van Riel  * inactive_file_is_low - check if file pages need to be deactivated
1837f16015fbSJohannes Weiner  * @mz: memory cgroup and zone to check
183856e49d21SRik van Riel  *
183956e49d21SRik van Riel  * When the system is doing streaming IO, memory pressure here
184056e49d21SRik van Riel  * ensures that active file pages get deactivated, until more
184156e49d21SRik van Riel  * than half of the file pages are on the inactive list.
184256e49d21SRik van Riel  *
184356e49d21SRik van Riel  * Once we get to that situation, protect the system's working
184456e49d21SRik van Riel  * set from being evicted by disabling active file page aging.
184556e49d21SRik van Riel  *
184656e49d21SRik van Riel  * This uses a different ratio than the anonymous pages, because
184756e49d21SRik van Riel  * the page cache uses a use-once replacement algorithm.
184856e49d21SRik van Riel  */
1849f16015fbSJohannes Weiner static int inactive_file_is_low(struct mem_cgroup_zone *mz)
185056e49d21SRik van Riel {
1851f16015fbSJohannes Weiner 	if (!scanning_global_lru(mz))
1852f16015fbSJohannes Weiner 		return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
1853f16015fbSJohannes Weiner 						       mz->zone);
185456e49d21SRik van Riel 
1855f16015fbSJohannes Weiner 	return inactive_file_is_low_global(mz->zone);
185656e49d21SRik van Riel }
185756e49d21SRik van Riel 
1858f16015fbSJohannes Weiner static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
1859b39415b2SRik van Riel {
1860b39415b2SRik van Riel 	if (file)
1861f16015fbSJohannes Weiner 		return inactive_file_is_low(mz);
1862b39415b2SRik van Riel 	else
1863f16015fbSJohannes Weiner 		return inactive_anon_is_low(mz);
1864b39415b2SRik van Riel }
1865b39415b2SRik van Riel 
18664f98a2feSRik van Riel static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1867f16015fbSJohannes Weiner 				 struct mem_cgroup_zone *mz,
1868f16015fbSJohannes Weiner 				 struct scan_control *sc, int priority)
1869b69408e8SChristoph Lameter {
18704f98a2feSRik van Riel 	int file = is_file_lru(lru);
18714f98a2feSRik van Riel 
1872b39415b2SRik van Riel 	if (is_active_lru(lru)) {
1873f16015fbSJohannes Weiner 		if (inactive_list_is_low(mz, file))
1874f16015fbSJohannes Weiner 			shrink_active_list(nr_to_scan, mz, sc, priority, file);
1875556adecbSRik van Riel 		return 0;
1876556adecbSRik van Riel 	}
1877556adecbSRik van Riel 
1878f16015fbSJohannes Weiner 	return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
1879b69408e8SChristoph Lameter }
1880b69408e8SChristoph Lameter 
1881f16015fbSJohannes Weiner static int vmscan_swappiness(struct mem_cgroup_zone *mz,
1882f16015fbSJohannes Weiner 			     struct scan_control *sc)
18831f4c025bSKAMEZAWA Hiroyuki {
188489b5fae5SJohannes Weiner 	if (global_reclaim(sc))
18851f4c025bSKAMEZAWA Hiroyuki 		return vm_swappiness;
1886f16015fbSJohannes Weiner 	return mem_cgroup_swappiness(mz->mem_cgroup);
18871f4c025bSKAMEZAWA Hiroyuki }
18881f4c025bSKAMEZAWA Hiroyuki 
18891da177e4SLinus Torvalds /*
18904f98a2feSRik van Riel  * Determine how aggressively the anon and file LRU lists should be
18914f98a2feSRik van Riel  * scanned.  The relative value of each set of LRU lists is determined
18924f98a2feSRik van Riel  * by looking at the fraction of the pages scanned we did rotate back
18934f98a2feSRik van Riel  * onto the active list instead of evict.
18944f98a2feSRik van Riel  *
189576a33fc3SShaohua Li  * nr[0] = anon pages to scan; nr[1] = file pages to scan
18964f98a2feSRik van Riel  */
1897f16015fbSJohannes Weiner static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
189876a33fc3SShaohua Li 			   unsigned long *nr, int priority)
18994f98a2feSRik van Riel {
19004f98a2feSRik van Riel 	unsigned long anon, file, free;
19014f98a2feSRik van Riel 	unsigned long anon_prio, file_prio;
19024f98a2feSRik van Riel 	unsigned long ap, fp;
1903f16015fbSJohannes Weiner 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
190476a33fc3SShaohua Li 	u64 fraction[2], denominator;
19054111304dSHugh Dickins 	enum lru_list lru;
190676a33fc3SShaohua Li 	int noswap = 0;
1907a4d3e9e7SJohannes Weiner 	bool force_scan = false;
1908246e87a9SKAMEZAWA Hiroyuki 
1909f11c0ca5SJohannes Weiner 	/*
1910f11c0ca5SJohannes Weiner 	 * If the zone or memcg is small, nr[l] can be 0.  This
1911f11c0ca5SJohannes Weiner 	 * results in no scanning on this priority and a potential
1912f11c0ca5SJohannes Weiner 	 * priority drop.  Global direct reclaim can go to the next
1913f11c0ca5SJohannes Weiner 	 * zone and tends to have no problems. Global kswapd is for
1914f11c0ca5SJohannes Weiner 	 * zone balancing and it needs to scan a minimum amount. When
1915f11c0ca5SJohannes Weiner 	 * reclaiming for a memcg, a priority drop can cause high
1916f11c0ca5SJohannes Weiner 	 * latencies, so it's better to scan a minimum amount there as
1917f11c0ca5SJohannes Weiner 	 * well.
1918f11c0ca5SJohannes Weiner 	 */
1919b95a2f2dSJohannes Weiner 	if (current_is_kswapd() && mz->zone->all_unreclaimable)
1920a4d3e9e7SJohannes Weiner 		force_scan = true;
192189b5fae5SJohannes Weiner 	if (!global_reclaim(sc))
1922a4d3e9e7SJohannes Weiner 		force_scan = true;
192376a33fc3SShaohua Li 
192476a33fc3SShaohua Li 	/* If we have no swap space, do not bother scanning anon pages. */
192576a33fc3SShaohua Li 	if (!sc->may_swap || (nr_swap_pages <= 0)) {
192676a33fc3SShaohua Li 		noswap = 1;
192776a33fc3SShaohua Li 		fraction[0] = 0;
192876a33fc3SShaohua Li 		fraction[1] = 1;
192976a33fc3SShaohua Li 		denominator = 1;
193076a33fc3SShaohua Li 		goto out;
193176a33fc3SShaohua Li 	}
19324f98a2feSRik van Riel 
1933f16015fbSJohannes Weiner 	anon  = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
1934f16015fbSJohannes Weiner 		zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
1935f16015fbSJohannes Weiner 	file  = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
1936f16015fbSJohannes Weiner 		zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
1937a4d3e9e7SJohannes Weiner 
193889b5fae5SJohannes Weiner 	if (global_reclaim(sc)) {
1939f16015fbSJohannes Weiner 		free  = zone_page_state(mz->zone, NR_FREE_PAGES);
1940eeee9a8cSKOSAKI Motohiro 		/* If we have very few page cache pages,
1941eeee9a8cSKOSAKI Motohiro 		   force-scan anon pages. */
1942f16015fbSJohannes Weiner 		if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
194376a33fc3SShaohua Li 			fraction[0] = 1;
194476a33fc3SShaohua Li 			fraction[1] = 0;
194576a33fc3SShaohua Li 			denominator = 1;
194676a33fc3SShaohua Li 			goto out;
19474f98a2feSRik van Riel 		}
1948eeee9a8cSKOSAKI Motohiro 	}
19494f98a2feSRik van Riel 
19504f98a2feSRik van Riel 	/*
195158c37f6eSKOSAKI Motohiro 	 * With swappiness at 100, anonymous and file have the same priority.
195258c37f6eSKOSAKI Motohiro 	 * This scanning priority is essentially the inverse of IO cost.
195358c37f6eSKOSAKI Motohiro 	 */
1954f16015fbSJohannes Weiner 	anon_prio = vmscan_swappiness(mz, sc);
1955f16015fbSJohannes Weiner 	file_prio = 200 - vmscan_swappiness(mz, sc);
195658c37f6eSKOSAKI Motohiro 
195758c37f6eSKOSAKI Motohiro 	/*
19584f98a2feSRik van Riel 	 * OK, so we have swap space and a fair amount of page cache
19594f98a2feSRik van Riel 	 * pages.  We use the recently rotated / recently scanned
19604f98a2feSRik van Riel 	 * ratios to determine how valuable each cache is.
19614f98a2feSRik van Riel 	 *
19624f98a2feSRik van Riel 	 * Because workloads change over time (and to avoid overflow)
19634f98a2feSRik van Riel 	 * we keep these statistics as a floating average, which ends
19644f98a2feSRik van Riel 	 * up weighing recent references more than old ones.
19654f98a2feSRik van Riel 	 *
19664f98a2feSRik van Riel 	 * anon in [0], file in [1]
19674f98a2feSRik van Riel 	 */
1968f16015fbSJohannes Weiner 	spin_lock_irq(&mz->zone->lru_lock);
196958c37f6eSKOSAKI Motohiro 	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
19706e901571SKOSAKI Motohiro 		reclaim_stat->recent_scanned[0] /= 2;
19716e901571SKOSAKI Motohiro 		reclaim_stat->recent_rotated[0] /= 2;
19724f98a2feSRik van Riel 	}
19734f98a2feSRik van Riel 
19746e901571SKOSAKI Motohiro 	if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
19756e901571SKOSAKI Motohiro 		reclaim_stat->recent_scanned[1] /= 2;
19766e901571SKOSAKI Motohiro 		reclaim_stat->recent_rotated[1] /= 2;
19774f98a2feSRik van Riel 	}
19784f98a2feSRik van Riel 
19794f98a2feSRik van Riel 	/*
198000d8089cSRik van Riel 	 * The amount of pressure on anon vs file pages is inversely
198100d8089cSRik van Riel 	 * proportional to the fraction of recently scanned pages on
198200d8089cSRik van Riel 	 * each list that were recently referenced and in active use.
19834f98a2feSRik van Riel 	 */
19846e901571SKOSAKI Motohiro 	ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
19856e901571SKOSAKI Motohiro 	ap /= reclaim_stat->recent_rotated[0] + 1;
19864f98a2feSRik van Riel 
19876e901571SKOSAKI Motohiro 	fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
19886e901571SKOSAKI Motohiro 	fp /= reclaim_stat->recent_rotated[1] + 1;
1989f16015fbSJohannes Weiner 	spin_unlock_irq(&mz->zone->lru_lock);
19904f98a2feSRik van Riel 
199176a33fc3SShaohua Li 	fraction[0] = ap;
199276a33fc3SShaohua Li 	fraction[1] = fp;
199376a33fc3SShaohua Li 	denominator = ap + fp + 1;
199476a33fc3SShaohua Li out:
19954111304dSHugh Dickins 	for_each_evictable_lru(lru) {
19964111304dSHugh Dickins 		int file = is_file_lru(lru);
199776a33fc3SShaohua Li 		unsigned long scan;
199876a33fc3SShaohua Li 
19994111304dSHugh Dickins 		scan = zone_nr_lru_pages(mz, lru);
200076a33fc3SShaohua Li 		if (priority || noswap) {
200176a33fc3SShaohua Li 			scan >>= priority;
2002f11c0ca5SJohannes Weiner 			if (!scan && force_scan)
2003f11c0ca5SJohannes Weiner 				scan = SWAP_CLUSTER_MAX;
200476a33fc3SShaohua Li 			scan = div64_u64(scan * fraction[file], denominator);
20054f98a2feSRik van Riel 		}
20064111304dSHugh Dickins 		nr[lru] = scan;
200776a33fc3SShaohua Li 	}
20086e08a369SWu Fengguang }
20094f98a2feSRik van Riel 
20104f98a2feSRik van Riel /*
20113e7d3449SMel Gorman  * Reclaim/compaction depends on a number of pages being freed. To avoid
20123e7d3449SMel Gorman  * disruption to the system, a small number of order-0 pages continue to be
20133e7d3449SMel Gorman  * rotated and reclaimed in the normal fashion. However, by the time we get
20143e7d3449SMel Gorman  * back to the allocator and call try_to_compact_zone(), we ensure that
20153e7d3449SMel Gorman  * there are enough free pages for it to be likely successful
20163e7d3449SMel Gorman  */
2017f16015fbSJohannes Weiner static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
20183e7d3449SMel Gorman 					unsigned long nr_reclaimed,
20193e7d3449SMel Gorman 					unsigned long nr_scanned,
20203e7d3449SMel Gorman 					struct scan_control *sc)
20213e7d3449SMel Gorman {
20223e7d3449SMel Gorman 	unsigned long pages_for_compaction;
20233e7d3449SMel Gorman 	unsigned long inactive_lru_pages;
20243e7d3449SMel Gorman 
20253e7d3449SMel Gorman 	/* If not in reclaim/compaction mode, stop */
2026f3a310bcSMel Gorman 	if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
20273e7d3449SMel Gorman 		return false;
20283e7d3449SMel Gorman 
20292876592fSMel Gorman 	/* Consider stopping depending on scan and reclaim activity */
20302876592fSMel Gorman 	if (sc->gfp_mask & __GFP_REPEAT) {
20313e7d3449SMel Gorman 		/*
20322876592fSMel Gorman 		 * For __GFP_REPEAT allocations, stop reclaiming if the
20332876592fSMel Gorman 		 * full LRU list has been scanned and we are still failing
20342876592fSMel Gorman 		 * to reclaim pages. This full LRU scan is potentially
20352876592fSMel Gorman 		 * expensive but a __GFP_REPEAT caller really wants to succeed
20363e7d3449SMel Gorman 		 */
20373e7d3449SMel Gorman 		if (!nr_reclaimed && !nr_scanned)
20383e7d3449SMel Gorman 			return false;
20392876592fSMel Gorman 	} else {
20402876592fSMel Gorman 		/*
20412876592fSMel Gorman 		 * For non-__GFP_REPEAT allocations which can presumably
20422876592fSMel Gorman 		 * fail without consequence, stop if we failed to reclaim
20432876592fSMel Gorman 		 * any pages from the last SWAP_CLUSTER_MAX number of
20442876592fSMel Gorman 		 * pages that were scanned. This will return to the
20452876592fSMel Gorman 		 * caller faster at the risk reclaim/compaction and
20462876592fSMel Gorman 		 * the resulting allocation attempt fails
20472876592fSMel Gorman 		 */
20482876592fSMel Gorman 		if (!nr_reclaimed)
20492876592fSMel Gorman 			return false;
20502876592fSMel Gorman 	}
20513e7d3449SMel Gorman 
20523e7d3449SMel Gorman 	/*
20533e7d3449SMel Gorman 	 * If we have not reclaimed enough pages for compaction and the
20543e7d3449SMel Gorman 	 * inactive lists are large enough, continue reclaiming
20553e7d3449SMel Gorman 	 */
20563e7d3449SMel Gorman 	pages_for_compaction = (2UL << sc->order);
2057f16015fbSJohannes Weiner 	inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
205886cfd3a4SMinchan Kim 	if (nr_swap_pages > 0)
2059f16015fbSJohannes Weiner 		inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
20603e7d3449SMel Gorman 	if (sc->nr_reclaimed < pages_for_compaction &&
20613e7d3449SMel Gorman 			inactive_lru_pages > pages_for_compaction)
20623e7d3449SMel Gorman 		return true;
20633e7d3449SMel Gorman 
20643e7d3449SMel Gorman 	/* If compaction would go ahead or the allocation would succeed, stop */
2065f16015fbSJohannes Weiner 	switch (compaction_suitable(mz->zone, sc->order)) {
20663e7d3449SMel Gorman 	case COMPACT_PARTIAL:
20673e7d3449SMel Gorman 	case COMPACT_CONTINUE:
20683e7d3449SMel Gorman 		return false;
20693e7d3449SMel Gorman 	default:
20703e7d3449SMel Gorman 		return true;
20713e7d3449SMel Gorman 	}
20723e7d3449SMel Gorman }
20733e7d3449SMel Gorman 
20743e7d3449SMel Gorman /*
20751da177e4SLinus Torvalds  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
20761da177e4SLinus Torvalds  */
2077f16015fbSJohannes Weiner static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
207869e05944SAndrew Morton 				   struct scan_control *sc)
20791da177e4SLinus Torvalds {
2080b69408e8SChristoph Lameter 	unsigned long nr[NR_LRU_LISTS];
20818695949aSChristoph Lameter 	unsigned long nr_to_scan;
20824111304dSHugh Dickins 	enum lru_list lru;
2083f0fdc5e8SJohannes Weiner 	unsigned long nr_reclaimed, nr_scanned;
208422fba335SKOSAKI Motohiro 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
20853da367c3SShaohua Li 	struct blk_plug plug;
20861da177e4SLinus Torvalds 
20873e7d3449SMel Gorman restart:
20883e7d3449SMel Gorman 	nr_reclaimed = 0;
2089f0fdc5e8SJohannes Weiner 	nr_scanned = sc->nr_scanned;
2090f16015fbSJohannes Weiner 	get_scan_count(mz, sc, nr, priority);
20911cfb419bSKAMEZAWA Hiroyuki 
20923da367c3SShaohua Li 	blk_start_plug(&plug);
2093556adecbSRik van Riel 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2094556adecbSRik van Riel 					nr[LRU_INACTIVE_FILE]) {
20954111304dSHugh Dickins 		for_each_evictable_lru(lru) {
20964111304dSHugh Dickins 			if (nr[lru]) {
2097ece74b2eSKOSAKI Motohiro 				nr_to_scan = min_t(unsigned long,
20984111304dSHugh Dickins 						   nr[lru], SWAP_CLUSTER_MAX);
20994111304dSHugh Dickins 				nr[lru] -= nr_to_scan;
2100b69408e8SChristoph Lameter 
21014111304dSHugh Dickins 				nr_reclaimed += shrink_list(lru, nr_to_scan,
2102f16015fbSJohannes Weiner 							    mz, sc, priority);
21031da177e4SLinus Torvalds 			}
21041da177e4SLinus Torvalds 		}
2105a79311c1SRik van Riel 		/*
2106a79311c1SRik van Riel 		 * On large memory systems, scan >> priority can become
2107a79311c1SRik van Riel 		 * really large. This is fine for the starting priority;
2108a79311c1SRik van Riel 		 * we want to put equal scanning pressure on each zone.
2109a79311c1SRik van Riel 		 * However, if the VM has a harder time of freeing pages,
2110a79311c1SRik van Riel 		 * with multiple processes reclaiming pages, the total
2111a79311c1SRik van Riel 		 * freeing target can get unreasonably large.
2112a79311c1SRik van Riel 		 */
2113c38446ccSHillf Danton 		if (nr_reclaimed >= nr_to_reclaim)
2114c38446ccSHillf Danton 			nr_to_reclaim = 0;
2115c38446ccSHillf Danton 		else
2116c38446ccSHillf Danton 			nr_to_reclaim -= nr_reclaimed;
2117c38446ccSHillf Danton 
2118c38446ccSHillf Danton 		if (!nr_to_reclaim && priority < DEF_PRIORITY)
2119a79311c1SRik van Riel 			break;
21201da177e4SLinus Torvalds 	}
21213da367c3SShaohua Li 	blk_finish_plug(&plug);
21223e7d3449SMel Gorman 	sc->nr_reclaimed += nr_reclaimed;
212301dbe5c9SKOSAKI Motohiro 
2124556adecbSRik van Riel 	/*
2125556adecbSRik van Riel 	 * Even if we did not try to evict anon pages at all, we want to
2126556adecbSRik van Riel 	 * rebalance the anon lru active/inactive ratio.
2127556adecbSRik van Riel 	 */
2128f16015fbSJohannes Weiner 	if (inactive_anon_is_low(mz))
2129f16015fbSJohannes Weiner 		shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
2130556adecbSRik van Riel 
21313e7d3449SMel Gorman 	/* reclaim/compaction might need reclaim to continue */
2132f16015fbSJohannes Weiner 	if (should_continue_reclaim(mz, nr_reclaimed,
21333e7d3449SMel Gorman 					sc->nr_scanned - nr_scanned, sc))
21343e7d3449SMel Gorman 		goto restart;
21353e7d3449SMel Gorman 
2136232ea4d6SAndrew Morton 	throttle_vm_writeout(sc->gfp_mask);
21371da177e4SLinus Torvalds }
21381da177e4SLinus Torvalds 
2139f16015fbSJohannes Weiner static void shrink_zone(int priority, struct zone *zone,
2140f16015fbSJohannes Weiner 			struct scan_control *sc)
2141f16015fbSJohannes Weiner {
21425660048cSJohannes Weiner 	struct mem_cgroup *root = sc->target_mem_cgroup;
21435660048cSJohannes Weiner 	struct mem_cgroup_reclaim_cookie reclaim = {
21445660048cSJohannes Weiner 		.zone = zone,
21455660048cSJohannes Weiner 		.priority = priority,
21465660048cSJohannes Weiner 	};
21475660048cSJohannes Weiner 	struct mem_cgroup *memcg;
21485660048cSJohannes Weiner 
21495660048cSJohannes Weiner 	memcg = mem_cgroup_iter(root, NULL, &reclaim);
21505660048cSJohannes Weiner 	do {
21515660048cSJohannes Weiner 		struct mem_cgroup_zone mz = {
21525660048cSJohannes Weiner 			.mem_cgroup = memcg,
21535660048cSJohannes Weiner 			.zone = zone,
21545660048cSJohannes Weiner 		};
21555660048cSJohannes Weiner 
21565660048cSJohannes Weiner 		shrink_mem_cgroup_zone(priority, &mz, sc);
21575660048cSJohannes Weiner 		/*
21585660048cSJohannes Weiner 		 * Limit reclaim has historically picked one memcg and
21595660048cSJohannes Weiner 		 * scanned it with decreasing priority levels until
21605660048cSJohannes Weiner 		 * nr_to_reclaim had been reclaimed.  This priority
21615660048cSJohannes Weiner 		 * cycle is thus over after a single memcg.
2162b95a2f2dSJohannes Weiner 		 *
2163b95a2f2dSJohannes Weiner 		 * Direct reclaim and kswapd, on the other hand, have
2164b95a2f2dSJohannes Weiner 		 * to scan all memory cgroups to fulfill the overall
2165b95a2f2dSJohannes Weiner 		 * scan target for the zone.
21665660048cSJohannes Weiner 		 */
21675660048cSJohannes Weiner 		if (!global_reclaim(sc)) {
21685660048cSJohannes Weiner 			mem_cgroup_iter_break(root, memcg);
21695660048cSJohannes Weiner 			break;
21705660048cSJohannes Weiner 		}
21715660048cSJohannes Weiner 		memcg = mem_cgroup_iter(root, memcg, &reclaim);
21725660048cSJohannes Weiner 	} while (memcg);
2173f16015fbSJohannes Weiner }
2174f16015fbSJohannes Weiner 
2175fe4b1b24SMel Gorman /* Returns true if compaction should go ahead for a high-order request */
2176fe4b1b24SMel Gorman static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2177fe4b1b24SMel Gorman {
2178fe4b1b24SMel Gorman 	unsigned long balance_gap, watermark;
2179fe4b1b24SMel Gorman 	bool watermark_ok;
2180fe4b1b24SMel Gorman 
2181fe4b1b24SMel Gorman 	/* Do not consider compaction for orders reclaim is meant to satisfy */
2182fe4b1b24SMel Gorman 	if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
2183fe4b1b24SMel Gorman 		return false;
2184fe4b1b24SMel Gorman 
2185fe4b1b24SMel Gorman 	/*
2186fe4b1b24SMel Gorman 	 * Compaction takes time to run and there are potentially other
2187fe4b1b24SMel Gorman 	 * callers using the pages just freed. Continue reclaiming until
2188fe4b1b24SMel Gorman 	 * there is a buffer of free pages available to give compaction
2189fe4b1b24SMel Gorman 	 * a reasonable chance of completing and allocating the page
2190fe4b1b24SMel Gorman 	 */
2191fe4b1b24SMel Gorman 	balance_gap = min(low_wmark_pages(zone),
2192fe4b1b24SMel Gorman 		(zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2193fe4b1b24SMel Gorman 			KSWAPD_ZONE_BALANCE_GAP_RATIO);
2194fe4b1b24SMel Gorman 	watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
2195fe4b1b24SMel Gorman 	watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2196fe4b1b24SMel Gorman 
2197fe4b1b24SMel Gorman 	/*
2198fe4b1b24SMel Gorman 	 * If compaction is deferred, reclaim up to a point where
2199fe4b1b24SMel Gorman 	 * compaction will have a chance of success when re-enabled
2200fe4b1b24SMel Gorman 	 */
2201fe4b1b24SMel Gorman 	if (compaction_deferred(zone))
2202fe4b1b24SMel Gorman 		return watermark_ok;
2203fe4b1b24SMel Gorman 
2204fe4b1b24SMel Gorman 	/* If compaction is not ready to start, keep reclaiming */
2205fe4b1b24SMel Gorman 	if (!compaction_suitable(zone, sc->order))
2206fe4b1b24SMel Gorman 		return false;
2207fe4b1b24SMel Gorman 
2208fe4b1b24SMel Gorman 	return watermark_ok;
2209fe4b1b24SMel Gorman }
2210fe4b1b24SMel Gorman 
22111da177e4SLinus Torvalds /*
22121da177e4SLinus Torvalds  * This is the direct reclaim path, for page-allocating processes.  We only
22131da177e4SLinus Torvalds  * try to reclaim pages from zones which will satisfy the caller's allocation
22141da177e4SLinus Torvalds  * request.
22151da177e4SLinus Torvalds  *
221641858966SMel Gorman  * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
221741858966SMel Gorman  * Because:
22181da177e4SLinus Torvalds  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
22191da177e4SLinus Torvalds  *    allocation or
222041858966SMel Gorman  * b) The target zone may be at high_wmark_pages(zone) but the lower zones
222141858966SMel Gorman  *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
222241858966SMel Gorman  *    zone defense algorithm.
22231da177e4SLinus Torvalds  *
22241da177e4SLinus Torvalds  * If a zone is deemed to be full of pinned pages then just give it a light
22251da177e4SLinus Torvalds  * scan then give up on it.
2226e0c23279SMel Gorman  *
2227e0c23279SMel Gorman  * This function returns true if a zone is being reclaimed for a costly
2228fe4b1b24SMel Gorman  * high-order allocation and compaction is ready to begin. This indicates to
22290cee34fdSMel Gorman  * the caller that it should consider retrying the allocation instead of
22300cee34fdSMel Gorman  * further reclaim.
22311da177e4SLinus Torvalds  */
2232e0c23279SMel Gorman static bool shrink_zones(int priority, struct zonelist *zonelist,
223369e05944SAndrew Morton 					struct scan_control *sc)
22341da177e4SLinus Torvalds {
2235dd1a239fSMel Gorman 	struct zoneref *z;
223654a6eb5cSMel Gorman 	struct zone *zone;
2237d149e3b2SYing Han 	unsigned long nr_soft_reclaimed;
2238d149e3b2SYing Han 	unsigned long nr_soft_scanned;
22390cee34fdSMel Gorman 	bool aborted_reclaim = false;
22401cfb419bSKAMEZAWA Hiroyuki 
2241d4debc66SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
2242d4debc66SMel Gorman 					gfp_zone(sc->gfp_mask), sc->nodemask) {
2243f3fe6512SCon Kolivas 		if (!populated_zone(zone))
22441da177e4SLinus Torvalds 			continue;
22451cfb419bSKAMEZAWA Hiroyuki 		/*
22461cfb419bSKAMEZAWA Hiroyuki 		 * Take care memory controller reclaiming has small influence
22471cfb419bSKAMEZAWA Hiroyuki 		 * to global LRU.
22481cfb419bSKAMEZAWA Hiroyuki 		 */
224989b5fae5SJohannes Weiner 		if (global_reclaim(sc)) {
225002a0e53dSPaul Jackson 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
22511da177e4SLinus Torvalds 				continue;
225293e4a89aSKOSAKI Motohiro 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
22531da177e4SLinus Torvalds 				continue;	/* Let kswapd poll it */
2254e0887c19SRik van Riel 			if (COMPACTION_BUILD) {
2255e0887c19SRik van Riel 				/*
2256e0c23279SMel Gorman 				 * If we already have plenty of memory free for
2257e0c23279SMel Gorman 				 * compaction in this zone, don't free any more.
2258e0c23279SMel Gorman 				 * Even though compaction is invoked for any
2259e0c23279SMel Gorman 				 * non-zero order, only frequent costly order
2260e0c23279SMel Gorman 				 * reclamation is disruptive enough to become a
2261e0c23279SMel Gorman 				 * noticable problem, like transparent huge page
2262e0c23279SMel Gorman 				 * allocations.
2263e0887c19SRik van Riel 				 */
2264fe4b1b24SMel Gorman 				if (compaction_ready(zone, sc)) {
22650cee34fdSMel Gorman 					aborted_reclaim = true;
2266e0887c19SRik van Riel 					continue;
2267e0887c19SRik van Riel 				}
2268e0c23279SMel Gorman 			}
2269ac34a1a3SKAMEZAWA Hiroyuki 			/*
2270ac34a1a3SKAMEZAWA Hiroyuki 			 * This steals pages from memory cgroups over softlimit
2271ac34a1a3SKAMEZAWA Hiroyuki 			 * and returns the number of reclaimed pages and
2272ac34a1a3SKAMEZAWA Hiroyuki 			 * scanned pages. This works for global memory pressure
2273ac34a1a3SKAMEZAWA Hiroyuki 			 * and balancing, not for a memcg's limit.
2274ac34a1a3SKAMEZAWA Hiroyuki 			 */
2275d149e3b2SYing Han 			nr_soft_scanned = 0;
2276d149e3b2SYing Han 			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2277d149e3b2SYing Han 						sc->order, sc->gfp_mask,
2278d149e3b2SYing Han 						&nr_soft_scanned);
2279d149e3b2SYing Han 			sc->nr_reclaimed += nr_soft_reclaimed;
2280ac34a1a3SKAMEZAWA Hiroyuki 			sc->nr_scanned += nr_soft_scanned;
2281ac34a1a3SKAMEZAWA Hiroyuki 			/* need some check for avoid more shrink_zone() */
2282ac34a1a3SKAMEZAWA Hiroyuki 		}
2283d149e3b2SYing Han 
2284a79311c1SRik van Riel 		shrink_zone(priority, zone, sc);
22851da177e4SLinus Torvalds 	}
2286e0c23279SMel Gorman 
22870cee34fdSMel Gorman 	return aborted_reclaim;
2288d1908362SMinchan Kim }
2289d1908362SMinchan Kim 
2290d1908362SMinchan Kim static bool zone_reclaimable(struct zone *zone)
2291d1908362SMinchan Kim {
2292d1908362SMinchan Kim 	return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
2293d1908362SMinchan Kim }
2294d1908362SMinchan Kim 
2295929bea7cSKOSAKI Motohiro /* All zones in zonelist are unreclaimable? */
2296d1908362SMinchan Kim static bool all_unreclaimable(struct zonelist *zonelist,
2297d1908362SMinchan Kim 		struct scan_control *sc)
2298d1908362SMinchan Kim {
2299d1908362SMinchan Kim 	struct zoneref *z;
2300d1908362SMinchan Kim 	struct zone *zone;
2301d1908362SMinchan Kim 
2302d1908362SMinchan Kim 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
2303d1908362SMinchan Kim 			gfp_zone(sc->gfp_mask), sc->nodemask) {
2304d1908362SMinchan Kim 		if (!populated_zone(zone))
2305d1908362SMinchan Kim 			continue;
2306d1908362SMinchan Kim 		if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2307d1908362SMinchan Kim 			continue;
2308929bea7cSKOSAKI Motohiro 		if (!zone->all_unreclaimable)
2309929bea7cSKOSAKI Motohiro 			return false;
2310d1908362SMinchan Kim 	}
2311d1908362SMinchan Kim 
2312929bea7cSKOSAKI Motohiro 	return true;
23131da177e4SLinus Torvalds }
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds /*
23161da177e4SLinus Torvalds  * This is the main entry point to direct page reclaim.
23171da177e4SLinus Torvalds  *
23181da177e4SLinus Torvalds  * If a full scan of the inactive list fails to free enough memory then we
23191da177e4SLinus Torvalds  * are "out of memory" and something needs to be killed.
23201da177e4SLinus Torvalds  *
23211da177e4SLinus Torvalds  * If the caller is !__GFP_FS then the probability of a failure is reasonably
23221da177e4SLinus Torvalds  * high - the zone may be full of dirty or under-writeback pages, which this
23235b0830cbSJens Axboe  * caller can't do much about.  We kick the writeback threads and take explicit
23245b0830cbSJens Axboe  * naps in the hope that some of these pages can be written.  But if the
23255b0830cbSJens Axboe  * allocating task holds filesystem locks which prevent writeout this might not
23265b0830cbSJens Axboe  * work, and the allocation attempt will fail.
2327a41f24eaSNishanth Aravamudan  *
2328a41f24eaSNishanth Aravamudan  * returns:	0, if no pages reclaimed
2329a41f24eaSNishanth Aravamudan  * 		else, the number of pages reclaimed
23301da177e4SLinus Torvalds  */
2331dac1d27bSMel Gorman static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2332a09ed5e0SYing Han 					struct scan_control *sc,
2333a09ed5e0SYing Han 					struct shrink_control *shrink)
23341da177e4SLinus Torvalds {
23351da177e4SLinus Torvalds 	int priority;
233669e05944SAndrew Morton 	unsigned long total_scanned = 0;
23371da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state = current->reclaim_state;
2338dd1a239fSMel Gorman 	struct zoneref *z;
233954a6eb5cSMel Gorman 	struct zone *zone;
234022fba335SKOSAKI Motohiro 	unsigned long writeback_threshold;
23410cee34fdSMel Gorman 	bool aborted_reclaim;
23421da177e4SLinus Torvalds 
2343c0ff7453SMiao Xie 	get_mems_allowed();
2344873b4771SKeika Kobayashi 	delayacct_freepages_start();
2345873b4771SKeika Kobayashi 
234689b5fae5SJohannes Weiner 	if (global_reclaim(sc))
2347f8891e5eSChristoph Lameter 		count_vm_event(ALLOCSTALL);
23481da177e4SLinus Torvalds 
23491da177e4SLinus Torvalds 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
235066e1707bSBalbir Singh 		sc->nr_scanned = 0;
2351f7b7fd8fSRik van Riel 		if (!priority)
2352f16015fbSJohannes Weiner 			disable_swap_token(sc->target_mem_cgroup);
23530cee34fdSMel Gorman 		aborted_reclaim = shrink_zones(priority, zonelist, sc);
2354e0c23279SMel Gorman 
235566e1707bSBalbir Singh 		/*
235666e1707bSBalbir Singh 		 * Don't shrink slabs when reclaiming memory from
235766e1707bSBalbir Singh 		 * over limit cgroups
235866e1707bSBalbir Singh 		 */
235989b5fae5SJohannes Weiner 		if (global_reclaim(sc)) {
2360c6a8a8c5SKOSAKI Motohiro 			unsigned long lru_pages = 0;
2361d4debc66SMel Gorman 			for_each_zone_zonelist(zone, z, zonelist,
2362d4debc66SMel Gorman 					gfp_zone(sc->gfp_mask)) {
2363c6a8a8c5SKOSAKI Motohiro 				if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2364c6a8a8c5SKOSAKI Motohiro 					continue;
2365c6a8a8c5SKOSAKI Motohiro 
2366c6a8a8c5SKOSAKI Motohiro 				lru_pages += zone_reclaimable_pages(zone);
2367c6a8a8c5SKOSAKI Motohiro 			}
2368c6a8a8c5SKOSAKI Motohiro 
23691495f230SYing Han 			shrink_slab(shrink, sc->nr_scanned, lru_pages);
23701da177e4SLinus Torvalds 			if (reclaim_state) {
2371a79311c1SRik van Riel 				sc->nr_reclaimed += reclaim_state->reclaimed_slab;
23721da177e4SLinus Torvalds 				reclaim_state->reclaimed_slab = 0;
23731da177e4SLinus Torvalds 			}
237491a45470SKAMEZAWA Hiroyuki 		}
237566e1707bSBalbir Singh 		total_scanned += sc->nr_scanned;
2376bb21c7ceSKOSAKI Motohiro 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
23771da177e4SLinus Torvalds 			goto out;
23781da177e4SLinus Torvalds 
23791da177e4SLinus Torvalds 		/*
23801da177e4SLinus Torvalds 		 * Try to write back as many pages as we just scanned.  This
23811da177e4SLinus Torvalds 		 * tends to cause slow streaming writers to write data to the
23821da177e4SLinus Torvalds 		 * disk smoothly, at the dirtying rate, which is nice.   But
23831da177e4SLinus Torvalds 		 * that's undesirable in laptop mode, where we *want* lumpy
23841da177e4SLinus Torvalds 		 * writeout.  So in laptop mode, write out the whole world.
23851da177e4SLinus Torvalds 		 */
238622fba335SKOSAKI Motohiro 		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
238722fba335SKOSAKI Motohiro 		if (total_scanned > writeback_threshold) {
23880e175a18SCurt Wohlgemuth 			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
23890e175a18SCurt Wohlgemuth 						WB_REASON_TRY_TO_FREE_PAGES);
239066e1707bSBalbir Singh 			sc->may_writepage = 1;
23911da177e4SLinus Torvalds 		}
23921da177e4SLinus Torvalds 
23931da177e4SLinus Torvalds 		/* Take a nap, wait for some writeback to complete */
23947b51755cSKOSAKI Motohiro 		if (!sc->hibernation_mode && sc->nr_scanned &&
23950e093d99SMel Gorman 		    priority < DEF_PRIORITY - 2) {
23960e093d99SMel Gorman 			struct zone *preferred_zone;
23970e093d99SMel Gorman 
23980e093d99SMel Gorman 			first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2399f33261d7SDavid Rientjes 						&cpuset_current_mems_allowed,
2400f33261d7SDavid Rientjes 						&preferred_zone);
24010e093d99SMel Gorman 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
24020e093d99SMel Gorman 		}
24031da177e4SLinus Torvalds 	}
2404bb21c7ceSKOSAKI Motohiro 
24051da177e4SLinus Torvalds out:
2406873b4771SKeika Kobayashi 	delayacct_freepages_end();
2407c0ff7453SMiao Xie 	put_mems_allowed();
2408873b4771SKeika Kobayashi 
2409bb21c7ceSKOSAKI Motohiro 	if (sc->nr_reclaimed)
2410bb21c7ceSKOSAKI Motohiro 		return sc->nr_reclaimed;
2411bb21c7ceSKOSAKI Motohiro 
2412929bea7cSKOSAKI Motohiro 	/*
2413929bea7cSKOSAKI Motohiro 	 * As hibernation is going on, kswapd is freezed so that it can't mark
2414929bea7cSKOSAKI Motohiro 	 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2415929bea7cSKOSAKI Motohiro 	 * check.
2416929bea7cSKOSAKI Motohiro 	 */
2417929bea7cSKOSAKI Motohiro 	if (oom_killer_disabled)
2418929bea7cSKOSAKI Motohiro 		return 0;
2419929bea7cSKOSAKI Motohiro 
24200cee34fdSMel Gorman 	/* Aborted reclaim to try compaction? don't OOM, then */
24210cee34fdSMel Gorman 	if (aborted_reclaim)
24227335084dSMel Gorman 		return 1;
24237335084dSMel Gorman 
2424bb21c7ceSKOSAKI Motohiro 	/* top priority shrink_zones still had more to do? don't OOM, then */
242589b5fae5SJohannes Weiner 	if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
2426bb21c7ceSKOSAKI Motohiro 		return 1;
2427bb21c7ceSKOSAKI Motohiro 
2428bb21c7ceSKOSAKI Motohiro 	return 0;
24291da177e4SLinus Torvalds }
24301da177e4SLinus Torvalds 
2431dac1d27bSMel Gorman unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2432327c0e96SKAMEZAWA Hiroyuki 				gfp_t gfp_mask, nodemask_t *nodemask)
243366e1707bSBalbir Singh {
243433906bc5SMel Gorman 	unsigned long nr_reclaimed;
243566e1707bSBalbir Singh 	struct scan_control sc = {
243666e1707bSBalbir Singh 		.gfp_mask = gfp_mask,
243766e1707bSBalbir Singh 		.may_writepage = !laptop_mode,
243822fba335SKOSAKI Motohiro 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2439a6dc60f8SJohannes Weiner 		.may_unmap = 1,
24402e2e4259SKOSAKI Motohiro 		.may_swap = 1,
244166e1707bSBalbir Singh 		.order = order,
2442f16015fbSJohannes Weiner 		.target_mem_cgroup = NULL,
2443327c0e96SKAMEZAWA Hiroyuki 		.nodemask = nodemask,
244466e1707bSBalbir Singh 	};
2445a09ed5e0SYing Han 	struct shrink_control shrink = {
2446a09ed5e0SYing Han 		.gfp_mask = sc.gfp_mask,
2447a09ed5e0SYing Han 	};
244866e1707bSBalbir Singh 
244933906bc5SMel Gorman 	trace_mm_vmscan_direct_reclaim_begin(order,
245033906bc5SMel Gorman 				sc.may_writepage,
245133906bc5SMel Gorman 				gfp_mask);
245233906bc5SMel Gorman 
2453a09ed5e0SYing Han 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
245433906bc5SMel Gorman 
245533906bc5SMel Gorman 	trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
245633906bc5SMel Gorman 
245733906bc5SMel Gorman 	return nr_reclaimed;
245866e1707bSBalbir Singh }
245966e1707bSBalbir Singh 
246000f0b825SBalbir Singh #ifdef CONFIG_CGROUP_MEM_RES_CTLR
246166e1707bSBalbir Singh 
246272835c86SJohannes Weiner unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
24634e416953SBalbir Singh 						gfp_t gfp_mask, bool noswap,
24640ae5e89cSYing Han 						struct zone *zone,
24650ae5e89cSYing Han 						unsigned long *nr_scanned)
24664e416953SBalbir Singh {
24674e416953SBalbir Singh 	struct scan_control sc = {
24680ae5e89cSYing Han 		.nr_scanned = 0,
2469b8f5c566SKOSAKI Motohiro 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
24704e416953SBalbir Singh 		.may_writepage = !laptop_mode,
24714e416953SBalbir Singh 		.may_unmap = 1,
24724e416953SBalbir Singh 		.may_swap = !noswap,
24734e416953SBalbir Singh 		.order = 0,
247472835c86SJohannes Weiner 		.target_mem_cgroup = memcg,
24754e416953SBalbir Singh 	};
24765660048cSJohannes Weiner 	struct mem_cgroup_zone mz = {
247772835c86SJohannes Weiner 		.mem_cgroup = memcg,
24785660048cSJohannes Weiner 		.zone = zone,
24795660048cSJohannes Weiner 	};
24800ae5e89cSYing Han 
24814e416953SBalbir Singh 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
24824e416953SBalbir Singh 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2483bdce6d9eSKOSAKI Motohiro 
2484bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2485bdce6d9eSKOSAKI Motohiro 						      sc.may_writepage,
2486bdce6d9eSKOSAKI Motohiro 						      sc.gfp_mask);
2487bdce6d9eSKOSAKI Motohiro 
24884e416953SBalbir Singh 	/*
24894e416953SBalbir Singh 	 * NOTE: Although we can get the priority field, using it
24904e416953SBalbir Singh 	 * here is not a good idea, since it limits the pages we can scan.
24914e416953SBalbir Singh 	 * if we don't reclaim here, the shrink_zone from balance_pgdat
24924e416953SBalbir Singh 	 * will pick up pages from other mem cgroup's as well. We hack
24934e416953SBalbir Singh 	 * the priority and make it zero.
24944e416953SBalbir Singh 	 */
24955660048cSJohannes Weiner 	shrink_mem_cgroup_zone(0, &mz, &sc);
2496bdce6d9eSKOSAKI Motohiro 
2497bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2498bdce6d9eSKOSAKI Motohiro 
24990ae5e89cSYing Han 	*nr_scanned = sc.nr_scanned;
25004e416953SBalbir Singh 	return sc.nr_reclaimed;
25014e416953SBalbir Singh }
25024e416953SBalbir Singh 
250372835c86SJohannes Weiner unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
25048c7c6e34SKAMEZAWA Hiroyuki 					   gfp_t gfp_mask,
2505185efc0fSJohannes Weiner 					   bool noswap)
250666e1707bSBalbir Singh {
25074e416953SBalbir Singh 	struct zonelist *zonelist;
2508bdce6d9eSKOSAKI Motohiro 	unsigned long nr_reclaimed;
2509889976dbSYing Han 	int nid;
251066e1707bSBalbir Singh 	struct scan_control sc = {
251166e1707bSBalbir Singh 		.may_writepage = !laptop_mode,
2512a6dc60f8SJohannes Weiner 		.may_unmap = 1,
25132e2e4259SKOSAKI Motohiro 		.may_swap = !noswap,
251422fba335SKOSAKI Motohiro 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
251566e1707bSBalbir Singh 		.order = 0,
251672835c86SJohannes Weiner 		.target_mem_cgroup = memcg,
2517327c0e96SKAMEZAWA Hiroyuki 		.nodemask = NULL, /* we don't care the placement */
2518a09ed5e0SYing Han 		.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2519a09ed5e0SYing Han 				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2520a09ed5e0SYing Han 	};
2521a09ed5e0SYing Han 	struct shrink_control shrink = {
2522a09ed5e0SYing Han 		.gfp_mask = sc.gfp_mask,
252366e1707bSBalbir Singh 	};
252466e1707bSBalbir Singh 
2525889976dbSYing Han 	/*
2526889976dbSYing Han 	 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2527889976dbSYing Han 	 * take care of from where we get pages. So the node where we start the
2528889976dbSYing Han 	 * scan does not need to be the current node.
2529889976dbSYing Han 	 */
253072835c86SJohannes Weiner 	nid = mem_cgroup_select_victim_node(memcg);
2531889976dbSYing Han 
2532889976dbSYing Han 	zonelist = NODE_DATA(nid)->node_zonelists;
2533bdce6d9eSKOSAKI Motohiro 
2534bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_reclaim_begin(0,
2535bdce6d9eSKOSAKI Motohiro 					    sc.may_writepage,
2536bdce6d9eSKOSAKI Motohiro 					    sc.gfp_mask);
2537bdce6d9eSKOSAKI Motohiro 
2538a09ed5e0SYing Han 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2539bdce6d9eSKOSAKI Motohiro 
2540bdce6d9eSKOSAKI Motohiro 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2541bdce6d9eSKOSAKI Motohiro 
2542bdce6d9eSKOSAKI Motohiro 	return nr_reclaimed;
254366e1707bSBalbir Singh }
254466e1707bSBalbir Singh #endif
254566e1707bSBalbir Singh 
2546f16015fbSJohannes Weiner static void age_active_anon(struct zone *zone, struct scan_control *sc,
2547f16015fbSJohannes Weiner 			    int priority)
2548f16015fbSJohannes Weiner {
2549b95a2f2dSJohannes Weiner 	struct mem_cgroup *memcg;
2550b95a2f2dSJohannes Weiner 
2551b95a2f2dSJohannes Weiner 	if (!total_swap_pages)
2552b95a2f2dSJohannes Weiner 		return;
2553b95a2f2dSJohannes Weiner 
2554b95a2f2dSJohannes Weiner 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
2555b95a2f2dSJohannes Weiner 	do {
2556f16015fbSJohannes Weiner 		struct mem_cgroup_zone mz = {
2557b95a2f2dSJohannes Weiner 			.mem_cgroup = memcg,
2558f16015fbSJohannes Weiner 			.zone = zone,
2559f16015fbSJohannes Weiner 		};
2560f16015fbSJohannes Weiner 
2561f16015fbSJohannes Weiner 		if (inactive_anon_is_low(&mz))
2562b95a2f2dSJohannes Weiner 			shrink_active_list(SWAP_CLUSTER_MAX, &mz,
2563b95a2f2dSJohannes Weiner 					   sc, priority, 0);
2564b95a2f2dSJohannes Weiner 
2565b95a2f2dSJohannes Weiner 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
2566b95a2f2dSJohannes Weiner 	} while (memcg);
2567f16015fbSJohannes Weiner }
2568f16015fbSJohannes Weiner 
25691741c877SMel Gorman /*
25701741c877SMel Gorman  * pgdat_balanced is used when checking if a node is balanced for high-order
25711741c877SMel Gorman  * allocations. Only zones that meet watermarks and are in a zone allowed
25721741c877SMel Gorman  * by the callers classzone_idx are added to balanced_pages. The total of
25731741c877SMel Gorman  * balanced pages must be at least 25% of the zones allowed by classzone_idx
25741741c877SMel Gorman  * for the node to be considered balanced. Forcing all zones to be balanced
25751741c877SMel Gorman  * for high orders can cause excessive reclaim when there are imbalanced zones.
25761741c877SMel Gorman  * The choice of 25% is due to
25771741c877SMel Gorman  *   o a 16M DMA zone that is balanced will not balance a zone on any
25781741c877SMel Gorman  *     reasonable sized machine
25791741c877SMel Gorman  *   o On all other machines, the top zone must be at least a reasonable
258025985edcSLucas De Marchi  *     percentage of the middle zones. For example, on 32-bit x86, highmem
25811741c877SMel Gorman  *     would need to be at least 256M for it to be balance a whole node.
25821741c877SMel Gorman  *     Similarly, on x86-64 the Normal zone would need to be at least 1G
25831741c877SMel Gorman  *     to balance a node on its own. These seemed like reasonable ratios.
25841741c877SMel Gorman  */
25851741c877SMel Gorman static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
25861741c877SMel Gorman 						int classzone_idx)
25871741c877SMel Gorman {
25881741c877SMel Gorman 	unsigned long present_pages = 0;
25891741c877SMel Gorman 	int i;
25901741c877SMel Gorman 
25911741c877SMel Gorman 	for (i = 0; i <= classzone_idx; i++)
25921741c877SMel Gorman 		present_pages += pgdat->node_zones[i].present_pages;
25931741c877SMel Gorman 
25944746efdeSShaohua Li 	/* A special case here: if zone has no page, we think it's balanced */
25954746efdeSShaohua Li 	return balanced_pages >= (present_pages >> 2);
25961741c877SMel Gorman }
25971741c877SMel Gorman 
2598f50de2d3SMel Gorman /* is kswapd sleeping prematurely? */
2599dc83edd9SMel Gorman static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2600dc83edd9SMel Gorman 					int classzone_idx)
2601f50de2d3SMel Gorman {
2602bb3ab596SKOSAKI Motohiro 	int i;
26031741c877SMel Gorman 	unsigned long balanced = 0;
26041741c877SMel Gorman 	bool all_zones_ok = true;
2605f50de2d3SMel Gorman 
2606f50de2d3SMel Gorman 	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2607f50de2d3SMel Gorman 	if (remaining)
2608dc83edd9SMel Gorman 		return true;
2609f50de2d3SMel Gorman 
26100abdee2bSMel Gorman 	/* Check the watermark levels */
261108951e54SMel Gorman 	for (i = 0; i <= classzone_idx; i++) {
2612bb3ab596SKOSAKI Motohiro 		struct zone *zone = pgdat->node_zones + i;
2613bb3ab596SKOSAKI Motohiro 
2614bb3ab596SKOSAKI Motohiro 		if (!populated_zone(zone))
2615bb3ab596SKOSAKI Motohiro 			continue;
2616bb3ab596SKOSAKI Motohiro 
2617355b09c4SMel Gorman 		/*
2618355b09c4SMel Gorman 		 * balance_pgdat() skips over all_unreclaimable after
2619355b09c4SMel Gorman 		 * DEF_PRIORITY. Effectively, it considers them balanced so
2620355b09c4SMel Gorman 		 * they must be considered balanced here as well if kswapd
2621355b09c4SMel Gorman 		 * is to sleep
2622355b09c4SMel Gorman 		 */
2623355b09c4SMel Gorman 		if (zone->all_unreclaimable) {
2624355b09c4SMel Gorman 			balanced += zone->present_pages;
2625de3fab39SKOSAKI Motohiro 			continue;
2626355b09c4SMel Gorman 		}
2627de3fab39SKOSAKI Motohiro 
262888f5acf8SMel Gorman 		if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2629da175d06SMel Gorman 							i, 0))
26301741c877SMel Gorman 			all_zones_ok = false;
26311741c877SMel Gorman 		else
26321741c877SMel Gorman 			balanced += zone->present_pages;
2633bb3ab596SKOSAKI Motohiro 	}
2634f50de2d3SMel Gorman 
26351741c877SMel Gorman 	/*
26361741c877SMel Gorman 	 * For high-order requests, the balanced zones must contain at least
26371741c877SMel Gorman 	 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
26381741c877SMel Gorman 	 * must be balanced
26391741c877SMel Gorman 	 */
26401741c877SMel Gorman 	if (order)
2641afc7e326SJohannes Weiner 		return !pgdat_balanced(pgdat, balanced, classzone_idx);
26421741c877SMel Gorman 	else
26431741c877SMel Gorman 		return !all_zones_ok;
2644f50de2d3SMel Gorman }
2645f50de2d3SMel Gorman 
26461da177e4SLinus Torvalds /*
26471da177e4SLinus Torvalds  * For kswapd, balance_pgdat() will work across all this node's zones until
264841858966SMel Gorman  * they are all at high_wmark_pages(zone).
26491da177e4SLinus Torvalds  *
26500abdee2bSMel Gorman  * Returns the final order kswapd was reclaiming at
26511da177e4SLinus Torvalds  *
26521da177e4SLinus Torvalds  * There is special handling here for zones which are full of pinned pages.
26531da177e4SLinus Torvalds  * This can happen if the pages are all mlocked, or if they are all used by
26541da177e4SLinus Torvalds  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
26551da177e4SLinus Torvalds  * What we do is to detect the case where all pages in the zone have been
26561da177e4SLinus Torvalds  * scanned twice and there has been zero successful reclaim.  Mark the zone as
26571da177e4SLinus Torvalds  * dead and from now on, only perform a short scan.  Basically we're polling
26581da177e4SLinus Torvalds  * the zone for when the problem goes away.
26591da177e4SLinus Torvalds  *
26601da177e4SLinus Torvalds  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
266141858966SMel Gorman  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
266241858966SMel Gorman  * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
266341858966SMel Gorman  * lower zones regardless of the number of free pages in the lower zones. This
266441858966SMel Gorman  * interoperates with the page allocator fallback scheme to ensure that aging
266541858966SMel Gorman  * of pages is balanced across the zones.
26661da177e4SLinus Torvalds  */
266799504748SMel Gorman static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2668dc83edd9SMel Gorman 							int *classzone_idx)
26691da177e4SLinus Torvalds {
26701da177e4SLinus Torvalds 	int all_zones_ok;
26711741c877SMel Gorman 	unsigned long balanced;
26721da177e4SLinus Torvalds 	int priority;
26731da177e4SLinus Torvalds 	int i;
267499504748SMel Gorman 	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
267569e05944SAndrew Morton 	unsigned long total_scanned;
26761da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state = current->reclaim_state;
26770ae5e89cSYing Han 	unsigned long nr_soft_reclaimed;
26780ae5e89cSYing Han 	unsigned long nr_soft_scanned;
2679179e9639SAndrew Morton 	struct scan_control sc = {
2680179e9639SAndrew Morton 		.gfp_mask = GFP_KERNEL,
2681a6dc60f8SJohannes Weiner 		.may_unmap = 1,
26822e2e4259SKOSAKI Motohiro 		.may_swap = 1,
268322fba335SKOSAKI Motohiro 		/*
268422fba335SKOSAKI Motohiro 		 * kswapd doesn't want to be bailed out while reclaim. because
268522fba335SKOSAKI Motohiro 		 * we want to put equal scanning pressure on each zone.
268622fba335SKOSAKI Motohiro 		 */
268722fba335SKOSAKI Motohiro 		.nr_to_reclaim = ULONG_MAX,
26885ad333ebSAndy Whitcroft 		.order = order,
2689f16015fbSJohannes Weiner 		.target_mem_cgroup = NULL,
2690179e9639SAndrew Morton 	};
2691a09ed5e0SYing Han 	struct shrink_control shrink = {
2692a09ed5e0SYing Han 		.gfp_mask = sc.gfp_mask,
2693a09ed5e0SYing Han 	};
26941da177e4SLinus Torvalds loop_again:
26951da177e4SLinus Torvalds 	total_scanned = 0;
2696a79311c1SRik van Riel 	sc.nr_reclaimed = 0;
2697c0bbbc73SChristoph Lameter 	sc.may_writepage = !laptop_mode;
2698f8891e5eSChristoph Lameter 	count_vm_event(PAGEOUTRUN);
26991da177e4SLinus Torvalds 
27001da177e4SLinus Torvalds 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
27011da177e4SLinus Torvalds 		unsigned long lru_pages = 0;
2702bb3ab596SKOSAKI Motohiro 		int has_under_min_watermark_zone = 0;
27031da177e4SLinus Torvalds 
2704f7b7fd8fSRik van Riel 		/* The swap token gets in the way of swapout... */
2705f7b7fd8fSRik van Riel 		if (!priority)
2706a433658cSKOSAKI Motohiro 			disable_swap_token(NULL);
2707f7b7fd8fSRik van Riel 
27081da177e4SLinus Torvalds 		all_zones_ok = 1;
27091741c877SMel Gorman 		balanced = 0;
27101da177e4SLinus Torvalds 
27111da177e4SLinus Torvalds 		/*
27121da177e4SLinus Torvalds 		 * Scan in the highmem->dma direction for the highest
27131da177e4SLinus Torvalds 		 * zone which needs scanning
27141da177e4SLinus Torvalds 		 */
27151da177e4SLinus Torvalds 		for (i = pgdat->nr_zones - 1; i >= 0; i--) {
27161da177e4SLinus Torvalds 			struct zone *zone = pgdat->node_zones + i;
27171da177e4SLinus Torvalds 
2718f3fe6512SCon Kolivas 			if (!populated_zone(zone))
27191da177e4SLinus Torvalds 				continue;
27201da177e4SLinus Torvalds 
272193e4a89aSKOSAKI Motohiro 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
27221da177e4SLinus Torvalds 				continue;
27231da177e4SLinus Torvalds 
2724556adecbSRik van Riel 			/*
2725556adecbSRik van Riel 			 * Do some background aging of the anon list, to give
2726556adecbSRik van Riel 			 * pages a chance to be referenced before reclaiming.
2727556adecbSRik van Riel 			 */
2728f16015fbSJohannes Weiner 			age_active_anon(zone, &sc, priority);
2729556adecbSRik van Riel 
273088f5acf8SMel Gorman 			if (!zone_watermark_ok_safe(zone, order,
273141858966SMel Gorman 					high_wmark_pages(zone), 0, 0)) {
27321da177e4SLinus Torvalds 				end_zone = i;
2733e1dbeda6SAndrew Morton 				break;
2734439423f6SShaohua Li 			} else {
2735439423f6SShaohua Li 				/* If balanced, clear the congested flag */
2736439423f6SShaohua Li 				zone_clear_flag(zone, ZONE_CONGESTED);
27371da177e4SLinus Torvalds 			}
27381da177e4SLinus Torvalds 		}
2739e1dbeda6SAndrew Morton 		if (i < 0)
27401da177e4SLinus Torvalds 			goto out;
2741e1dbeda6SAndrew Morton 
27421da177e4SLinus Torvalds 		for (i = 0; i <= end_zone; i++) {
27431da177e4SLinus Torvalds 			struct zone *zone = pgdat->node_zones + i;
27441da177e4SLinus Torvalds 
2745adea02a1SWu Fengguang 			lru_pages += zone_reclaimable_pages(zone);
27461da177e4SLinus Torvalds 		}
27471da177e4SLinus Torvalds 
27481da177e4SLinus Torvalds 		/*
27491da177e4SLinus Torvalds 		 * Now scan the zone in the dma->highmem direction, stopping
27501da177e4SLinus Torvalds 		 * at the last zone which needs scanning.
27511da177e4SLinus Torvalds 		 *
27521da177e4SLinus Torvalds 		 * We do this because the page allocator works in the opposite
27531da177e4SLinus Torvalds 		 * direction.  This prevents the page allocator from allocating
27541da177e4SLinus Torvalds 		 * pages behind kswapd's direction of progress, which would
27551da177e4SLinus Torvalds 		 * cause too much scanning of the lower zones.
27561da177e4SLinus Torvalds 		 */
27571da177e4SLinus Torvalds 		for (i = 0; i <= end_zone; i++) {
27581da177e4SLinus Torvalds 			struct zone *zone = pgdat->node_zones + i;
2759fe2c2a10SRik van Riel 			int nr_slab, testorder;
27608afdceceSMel Gorman 			unsigned long balance_gap;
27611da177e4SLinus Torvalds 
2762f3fe6512SCon Kolivas 			if (!populated_zone(zone))
27631da177e4SLinus Torvalds 				continue;
27641da177e4SLinus Torvalds 
276593e4a89aSKOSAKI Motohiro 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
27661da177e4SLinus Torvalds 				continue;
27671da177e4SLinus Torvalds 
27681da177e4SLinus Torvalds 			sc.nr_scanned = 0;
27694e416953SBalbir Singh 
27700ae5e89cSYing Han 			nr_soft_scanned = 0;
27714e416953SBalbir Singh 			/*
27724e416953SBalbir Singh 			 * Call soft limit reclaim before calling shrink_zone.
27734e416953SBalbir Singh 			 */
27740ae5e89cSYing Han 			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
27750ae5e89cSYing Han 							order, sc.gfp_mask,
27760ae5e89cSYing Han 							&nr_soft_scanned);
27770ae5e89cSYing Han 			sc.nr_reclaimed += nr_soft_reclaimed;
27780ae5e89cSYing Han 			total_scanned += nr_soft_scanned;
277900918b6aSKOSAKI Motohiro 
278032a4330dSRik van Riel 			/*
27818afdceceSMel Gorman 			 * We put equal pressure on every zone, unless
27828afdceceSMel Gorman 			 * one zone has way too many pages free
27838afdceceSMel Gorman 			 * already. The "too many pages" is defined
27848afdceceSMel Gorman 			 * as the high wmark plus a "gap" where the
27858afdceceSMel Gorman 			 * gap is either the low watermark or 1%
27868afdceceSMel Gorman 			 * of the zone, whichever is smaller.
278732a4330dSRik van Riel 			 */
27888afdceceSMel Gorman 			balance_gap = min(low_wmark_pages(zone),
27898afdceceSMel Gorman 				(zone->present_pages +
27908afdceceSMel Gorman 					KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
27918afdceceSMel Gorman 				KSWAPD_ZONE_BALANCE_GAP_RATIO);
2792fe2c2a10SRik van Riel 			/*
2793fe2c2a10SRik van Riel 			 * Kswapd reclaims only single pages with compaction
2794fe2c2a10SRik van Riel 			 * enabled. Trying too hard to reclaim until contiguous
2795fe2c2a10SRik van Riel 			 * free pages have become available can hurt performance
2796fe2c2a10SRik van Riel 			 * by evicting too much useful data from memory.
2797fe2c2a10SRik van Riel 			 * Do not reclaim more than needed for compaction.
2798fe2c2a10SRik van Riel 			 */
2799fe2c2a10SRik van Riel 			testorder = order;
2800fe2c2a10SRik van Riel 			if (COMPACTION_BUILD && order &&
2801fe2c2a10SRik van Riel 					compaction_suitable(zone, order) !=
2802fe2c2a10SRik van Riel 						COMPACT_SKIPPED)
2803fe2c2a10SRik van Riel 				testorder = 0;
2804fe2c2a10SRik van Riel 
2805fe2c2a10SRik van Riel 			if (!zone_watermark_ok_safe(zone, testorder,
28068afdceceSMel Gorman 					high_wmark_pages(zone) + balance_gap,
2807d7868daeSMel Gorman 					end_zone, 0)) {
2808a79311c1SRik van Riel 				shrink_zone(priority, zone, &sc);
2809d7868daeSMel Gorman 
28101da177e4SLinus Torvalds 				reclaim_state->reclaimed_slab = 0;
28111495f230SYing Han 				nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2812a79311c1SRik van Riel 				sc.nr_reclaimed += reclaim_state->reclaimed_slab;
28131da177e4SLinus Torvalds 				total_scanned += sc.nr_scanned;
28145a03b051SAndrea Arcangeli 
2815d7868daeSMel Gorman 				if (nr_slab == 0 && !zone_reclaimable(zone))
281693e4a89aSKOSAKI Motohiro 					zone->all_unreclaimable = 1;
2817d7868daeSMel Gorman 			}
2818d7868daeSMel Gorman 
28191da177e4SLinus Torvalds 			/*
28201da177e4SLinus Torvalds 			 * If we've done a decent amount of scanning and
28211da177e4SLinus Torvalds 			 * the reclaim ratio is low, start doing writepage
28221da177e4SLinus Torvalds 			 * even in laptop mode
28231da177e4SLinus Torvalds 			 */
28241da177e4SLinus Torvalds 			if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2825a79311c1SRik van Riel 			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
28261da177e4SLinus Torvalds 				sc.may_writepage = 1;
2827bb3ab596SKOSAKI Motohiro 
2828215ddd66SMel Gorman 			if (zone->all_unreclaimable) {
2829215ddd66SMel Gorman 				if (end_zone && end_zone == i)
2830215ddd66SMel Gorman 					end_zone--;
2831d7868daeSMel Gorman 				continue;
2832215ddd66SMel Gorman 			}
2833d7868daeSMel Gorman 
2834fe2c2a10SRik van Riel 			if (!zone_watermark_ok_safe(zone, testorder,
283545973d74SMinchan Kim 					high_wmark_pages(zone), end_zone, 0)) {
283645973d74SMinchan Kim 				all_zones_ok = 0;
2837bb3ab596SKOSAKI Motohiro 				/*
283845973d74SMinchan Kim 				 * We are still under min water mark.  This
283945973d74SMinchan Kim 				 * means that we have a GFP_ATOMIC allocation
284045973d74SMinchan Kim 				 * failure risk. Hurry up!
2841bb3ab596SKOSAKI Motohiro 				 */
284288f5acf8SMel Gorman 				if (!zone_watermark_ok_safe(zone, order,
284345973d74SMinchan Kim 					    min_wmark_pages(zone), end_zone, 0))
2844bb3ab596SKOSAKI Motohiro 					has_under_min_watermark_zone = 1;
28450e093d99SMel Gorman 			} else {
28460e093d99SMel Gorman 				/*
28470e093d99SMel Gorman 				 * If a zone reaches its high watermark,
28480e093d99SMel Gorman 				 * consider it to be no longer congested. It's
28490e093d99SMel Gorman 				 * possible there are dirty pages backed by
28500e093d99SMel Gorman 				 * congested BDIs but as pressure is relieved,
28510e093d99SMel Gorman 				 * spectulatively avoid congestion waits
28520e093d99SMel Gorman 				 */
28530e093d99SMel Gorman 				zone_clear_flag(zone, ZONE_CONGESTED);
2854dc83edd9SMel Gorman 				if (i <= *classzone_idx)
28551741c877SMel Gorman 					balanced += zone->present_pages;
285645973d74SMinchan Kim 			}
2857bb3ab596SKOSAKI Motohiro 
28581da177e4SLinus Torvalds 		}
2859dc83edd9SMel Gorman 		if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
28601da177e4SLinus Torvalds 			break;		/* kswapd: all done */
28611da177e4SLinus Torvalds 		/*
28621da177e4SLinus Torvalds 		 * OK, kswapd is getting into trouble.  Take a nap, then take
28631da177e4SLinus Torvalds 		 * another pass across the zones.
28641da177e4SLinus Torvalds 		 */
2865bb3ab596SKOSAKI Motohiro 		if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2866bb3ab596SKOSAKI Motohiro 			if (has_under_min_watermark_zone)
2867bb3ab596SKOSAKI Motohiro 				count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2868bb3ab596SKOSAKI Motohiro 			else
28698aa7e847SJens Axboe 				congestion_wait(BLK_RW_ASYNC, HZ/10);
2870bb3ab596SKOSAKI Motohiro 		}
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds 		/*
28731da177e4SLinus Torvalds 		 * We do this so kswapd doesn't build up large priorities for
28741da177e4SLinus Torvalds 		 * example when it is freeing in parallel with allocators. It
28751da177e4SLinus Torvalds 		 * matches the direct reclaim path behaviour in terms of impact
28761da177e4SLinus Torvalds 		 * on zone->*_priority.
28771da177e4SLinus Torvalds 		 */
2878a79311c1SRik van Riel 		if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
28791da177e4SLinus Torvalds 			break;
28801da177e4SLinus Torvalds 	}
28811da177e4SLinus Torvalds out:
288299504748SMel Gorman 
288399504748SMel Gorman 	/*
288499504748SMel Gorman 	 * order-0: All zones must meet high watermark for a balanced node
28851741c877SMel Gorman 	 * high-order: Balanced zones must make up at least 25% of the node
28861741c877SMel Gorman 	 *             for the node to be balanced
288799504748SMel Gorman 	 */
2888dc83edd9SMel Gorman 	if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
28891da177e4SLinus Torvalds 		cond_resched();
28908357376dSRafael J. Wysocki 
28918357376dSRafael J. Wysocki 		try_to_freeze();
28928357376dSRafael J. Wysocki 
289373ce02e9SKOSAKI Motohiro 		/*
289473ce02e9SKOSAKI Motohiro 		 * Fragmentation may mean that the system cannot be
289573ce02e9SKOSAKI Motohiro 		 * rebalanced for high-order allocations in all zones.
289673ce02e9SKOSAKI Motohiro 		 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
289773ce02e9SKOSAKI Motohiro 		 * it means the zones have been fully scanned and are still
289873ce02e9SKOSAKI Motohiro 		 * not balanced. For high-order allocations, there is
289973ce02e9SKOSAKI Motohiro 		 * little point trying all over again as kswapd may
290073ce02e9SKOSAKI Motohiro 		 * infinite loop.
290173ce02e9SKOSAKI Motohiro 		 *
290273ce02e9SKOSAKI Motohiro 		 * Instead, recheck all watermarks at order-0 as they
290373ce02e9SKOSAKI Motohiro 		 * are the most important. If watermarks are ok, kswapd will go
290473ce02e9SKOSAKI Motohiro 		 * back to sleep. High-order users can still perform direct
290573ce02e9SKOSAKI Motohiro 		 * reclaim if they wish.
290673ce02e9SKOSAKI Motohiro 		 */
290773ce02e9SKOSAKI Motohiro 		if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
290873ce02e9SKOSAKI Motohiro 			order = sc.order = 0;
290973ce02e9SKOSAKI Motohiro 
29101da177e4SLinus Torvalds 		goto loop_again;
29111da177e4SLinus Torvalds 	}
29121da177e4SLinus Torvalds 
291399504748SMel Gorman 	/*
291499504748SMel Gorman 	 * If kswapd was reclaiming at a higher order, it has the option of
291599504748SMel Gorman 	 * sleeping without all zones being balanced. Before it does, it must
291699504748SMel Gorman 	 * ensure that the watermarks for order-0 on *all* zones are met and
291799504748SMel Gorman 	 * that the congestion flags are cleared. The congestion flag must
291899504748SMel Gorman 	 * be cleared as kswapd is the only mechanism that clears the flag
291999504748SMel Gorman 	 * and it is potentially going to sleep here.
292099504748SMel Gorman 	 */
292199504748SMel Gorman 	if (order) {
2922*7be62de9SRik van Riel 		int zones_need_compaction = 1;
2923*7be62de9SRik van Riel 
292499504748SMel Gorman 		for (i = 0; i <= end_zone; i++) {
292599504748SMel Gorman 			struct zone *zone = pgdat->node_zones + i;
292699504748SMel Gorman 
292799504748SMel Gorman 			if (!populated_zone(zone))
292899504748SMel Gorman 				continue;
292999504748SMel Gorman 
293099504748SMel Gorman 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
293199504748SMel Gorman 				continue;
293299504748SMel Gorman 
2933fe2c2a10SRik van Riel 			/* Would compaction fail due to lack of free memory? */
2934fe2c2a10SRik van Riel 			if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
2935fe2c2a10SRik van Riel 				goto loop_again;
2936fe2c2a10SRik van Riel 
293799504748SMel Gorman 			/* Confirm the zone is balanced for order-0 */
293899504748SMel Gorman 			if (!zone_watermark_ok(zone, 0,
293999504748SMel Gorman 					high_wmark_pages(zone), 0, 0)) {
294099504748SMel Gorman 				order = sc.order = 0;
294199504748SMel Gorman 				goto loop_again;
294299504748SMel Gorman 			}
294399504748SMel Gorman 
2944*7be62de9SRik van Riel 			/* Check if the memory needs to be defragmented. */
2945*7be62de9SRik van Riel 			if (zone_watermark_ok(zone, order,
2946*7be62de9SRik van Riel 				    low_wmark_pages(zone), *classzone_idx, 0))
2947*7be62de9SRik van Riel 				zones_need_compaction = 0;
2948*7be62de9SRik van Riel 
294999504748SMel Gorman 			/* If balanced, clear the congested flag */
295099504748SMel Gorman 			zone_clear_flag(zone, ZONE_CONGESTED);
295199504748SMel Gorman 		}
2952*7be62de9SRik van Riel 
2953*7be62de9SRik van Riel 		if (zones_need_compaction)
2954*7be62de9SRik van Riel 			compact_pgdat(pgdat, order);
295599504748SMel Gorman 	}
295699504748SMel Gorman 
29570abdee2bSMel Gorman 	/*
29580abdee2bSMel Gorman 	 * Return the order we were reclaiming at so sleeping_prematurely()
29590abdee2bSMel Gorman 	 * makes a decision on the order we were last reclaiming at. However,
29600abdee2bSMel Gorman 	 * if another caller entered the allocator slow path while kswapd
29610abdee2bSMel Gorman 	 * was awake, order will remain at the higher level
29620abdee2bSMel Gorman 	 */
2963dc83edd9SMel Gorman 	*classzone_idx = end_zone;
29640abdee2bSMel Gorman 	return order;
29651da177e4SLinus Torvalds }
29661da177e4SLinus Torvalds 
2967dc83edd9SMel Gorman static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2968f0bc0a60SKOSAKI Motohiro {
2969f0bc0a60SKOSAKI Motohiro 	long remaining = 0;
2970f0bc0a60SKOSAKI Motohiro 	DEFINE_WAIT(wait);
2971f0bc0a60SKOSAKI Motohiro 
2972f0bc0a60SKOSAKI Motohiro 	if (freezing(current) || kthread_should_stop())
2973f0bc0a60SKOSAKI Motohiro 		return;
2974f0bc0a60SKOSAKI Motohiro 
2975f0bc0a60SKOSAKI Motohiro 	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2976f0bc0a60SKOSAKI Motohiro 
2977f0bc0a60SKOSAKI Motohiro 	/* Try to sleep for a short interval */
2978dc83edd9SMel Gorman 	if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2979f0bc0a60SKOSAKI Motohiro 		remaining = schedule_timeout(HZ/10);
2980f0bc0a60SKOSAKI Motohiro 		finish_wait(&pgdat->kswapd_wait, &wait);
2981f0bc0a60SKOSAKI Motohiro 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2982f0bc0a60SKOSAKI Motohiro 	}
2983f0bc0a60SKOSAKI Motohiro 
2984f0bc0a60SKOSAKI Motohiro 	/*
2985f0bc0a60SKOSAKI Motohiro 	 * After a short sleep, check if it was a premature sleep. If not, then
2986f0bc0a60SKOSAKI Motohiro 	 * go fully to sleep until explicitly woken up.
2987f0bc0a60SKOSAKI Motohiro 	 */
2988dc83edd9SMel Gorman 	if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2989f0bc0a60SKOSAKI Motohiro 		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2990f0bc0a60SKOSAKI Motohiro 
2991f0bc0a60SKOSAKI Motohiro 		/*
2992f0bc0a60SKOSAKI Motohiro 		 * vmstat counters are not perfectly accurate and the estimated
2993f0bc0a60SKOSAKI Motohiro 		 * value for counters such as NR_FREE_PAGES can deviate from the
2994f0bc0a60SKOSAKI Motohiro 		 * true value by nr_online_cpus * threshold. To avoid the zone
2995f0bc0a60SKOSAKI Motohiro 		 * watermarks being breached while under pressure, we reduce the
2996f0bc0a60SKOSAKI Motohiro 		 * per-cpu vmstat threshold while kswapd is awake and restore
2997f0bc0a60SKOSAKI Motohiro 		 * them before going back to sleep.
2998f0bc0a60SKOSAKI Motohiro 		 */
2999f0bc0a60SKOSAKI Motohiro 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3000f0bc0a60SKOSAKI Motohiro 		schedule();
3001f0bc0a60SKOSAKI Motohiro 		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3002f0bc0a60SKOSAKI Motohiro 	} else {
3003f0bc0a60SKOSAKI Motohiro 		if (remaining)
3004f0bc0a60SKOSAKI Motohiro 			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3005f0bc0a60SKOSAKI Motohiro 		else
3006f0bc0a60SKOSAKI Motohiro 			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3007f0bc0a60SKOSAKI Motohiro 	}
3008f0bc0a60SKOSAKI Motohiro 	finish_wait(&pgdat->kswapd_wait, &wait);
3009f0bc0a60SKOSAKI Motohiro }
3010f0bc0a60SKOSAKI Motohiro 
30111da177e4SLinus Torvalds /*
30121da177e4SLinus Torvalds  * The background pageout daemon, started as a kernel thread
30131da177e4SLinus Torvalds  * from the init process.
30141da177e4SLinus Torvalds  *
30151da177e4SLinus Torvalds  * This basically trickles out pages so that we have _some_
30161da177e4SLinus Torvalds  * free memory available even if there is no other activity
30171da177e4SLinus Torvalds  * that frees anything up. This is needed for things like routing
30181da177e4SLinus Torvalds  * etc, where we otherwise might have all activity going on in
30191da177e4SLinus Torvalds  * asynchronous contexts that cannot page things out.
30201da177e4SLinus Torvalds  *
30211da177e4SLinus Torvalds  * If there are applications that are active memory-allocators
30221da177e4SLinus Torvalds  * (most normal use), this basically shouldn't matter.
30231da177e4SLinus Torvalds  */
30241da177e4SLinus Torvalds static int kswapd(void *p)
30251da177e4SLinus Torvalds {
3026215ddd66SMel Gorman 	unsigned long order, new_order;
3027d2ebd0f6SAlex,Shi 	unsigned balanced_order;
3028215ddd66SMel Gorman 	int classzone_idx, new_classzone_idx;
3029d2ebd0f6SAlex,Shi 	int balanced_classzone_idx;
30301da177e4SLinus Torvalds 	pg_data_t *pgdat = (pg_data_t*)p;
30311da177e4SLinus Torvalds 	struct task_struct *tsk = current;
3032f0bc0a60SKOSAKI Motohiro 
30331da177e4SLinus Torvalds 	struct reclaim_state reclaim_state = {
30341da177e4SLinus Torvalds 		.reclaimed_slab = 0,
30351da177e4SLinus Torvalds 	};
3036a70f7302SRusty Russell 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
30371da177e4SLinus Torvalds 
3038cf40bd16SNick Piggin 	lockdep_set_current_reclaim_state(GFP_KERNEL);
3039cf40bd16SNick Piggin 
3040174596a0SRusty Russell 	if (!cpumask_empty(cpumask))
3041c5f59f08SMike Travis 		set_cpus_allowed_ptr(tsk, cpumask);
30421da177e4SLinus Torvalds 	current->reclaim_state = &reclaim_state;
30431da177e4SLinus Torvalds 
30441da177e4SLinus Torvalds 	/*
30451da177e4SLinus Torvalds 	 * Tell the memory management that we're a "memory allocator",
30461da177e4SLinus Torvalds 	 * and that if we need more memory we should get access to it
30471da177e4SLinus Torvalds 	 * regardless (see "__alloc_pages()"). "kswapd" should
30481da177e4SLinus Torvalds 	 * never get caught in the normal page freeing logic.
30491da177e4SLinus Torvalds 	 *
30501da177e4SLinus Torvalds 	 * (Kswapd normally doesn't need memory anyway, but sometimes
30511da177e4SLinus Torvalds 	 * you need a small amount of memory in order to be able to
30521da177e4SLinus Torvalds 	 * page out something else, and this flag essentially protects
30531da177e4SLinus Torvalds 	 * us from recursively trying to free more memory as we're
30541da177e4SLinus Torvalds 	 * trying to free the first piece of memory in the first place).
30551da177e4SLinus Torvalds 	 */
3056930d9152SChristoph Lameter 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
305783144186SRafael J. Wysocki 	set_freezable();
30581da177e4SLinus Torvalds 
3059215ddd66SMel Gorman 	order = new_order = 0;
3060d2ebd0f6SAlex,Shi 	balanced_order = 0;
3061215ddd66SMel Gorman 	classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
3062d2ebd0f6SAlex,Shi 	balanced_classzone_idx = classzone_idx;
30631da177e4SLinus Torvalds 	for ( ; ; ) {
30648fe23e05SDavid Rientjes 		int ret;
30653e1d1d28SChristoph Lameter 
3066215ddd66SMel Gorman 		/*
3067215ddd66SMel Gorman 		 * If the last balance_pgdat was unsuccessful it's unlikely a
3068215ddd66SMel Gorman 		 * new request of a similar or harder type will succeed soon
3069215ddd66SMel Gorman 		 * so consider going to sleep on the basis we reclaimed at
3070215ddd66SMel Gorman 		 */
3071d2ebd0f6SAlex,Shi 		if (balanced_classzone_idx >= new_classzone_idx &&
3072d2ebd0f6SAlex,Shi 					balanced_order == new_order) {
30731da177e4SLinus Torvalds 			new_order = pgdat->kswapd_max_order;
307499504748SMel Gorman 			new_classzone_idx = pgdat->classzone_idx;
30751da177e4SLinus Torvalds 			pgdat->kswapd_max_order =  0;
3076215ddd66SMel Gorman 			pgdat->classzone_idx = pgdat->nr_zones - 1;
3077215ddd66SMel Gorman 		}
3078215ddd66SMel Gorman 
307999504748SMel Gorman 		if (order < new_order || classzone_idx > new_classzone_idx) {
30801da177e4SLinus Torvalds 			/*
30811da177e4SLinus Torvalds 			 * Don't sleep if someone wants a larger 'order'
308299504748SMel Gorman 			 * allocation or has tigher zone constraints
30831da177e4SLinus Torvalds 			 */
30841da177e4SLinus Torvalds 			order = new_order;
308599504748SMel Gorman 			classzone_idx = new_classzone_idx;
30861da177e4SLinus Torvalds 		} else {
3087d2ebd0f6SAlex,Shi 			kswapd_try_to_sleep(pgdat, balanced_order,
3088d2ebd0f6SAlex,Shi 						balanced_classzone_idx);
30891da177e4SLinus Torvalds 			order = pgdat->kswapd_max_order;
309099504748SMel Gorman 			classzone_idx = pgdat->classzone_idx;
3091f0dfcde0SAlex,Shi 			new_order = order;
3092f0dfcde0SAlex,Shi 			new_classzone_idx = classzone_idx;
30934d40502eSMel Gorman 			pgdat->kswapd_max_order = 0;
3094215ddd66SMel Gorman 			pgdat->classzone_idx = pgdat->nr_zones - 1;
30951da177e4SLinus Torvalds 		}
30961da177e4SLinus Torvalds 
30978fe23e05SDavid Rientjes 		ret = try_to_freeze();
30988fe23e05SDavid Rientjes 		if (kthread_should_stop())
30998fe23e05SDavid Rientjes 			break;
31008fe23e05SDavid Rientjes 
31018fe23e05SDavid Rientjes 		/*
31028fe23e05SDavid Rientjes 		 * We can speed up thawing tasks if we don't call balance_pgdat
31038fe23e05SDavid Rientjes 		 * after returning from the refrigerator
3104b1296cc4SRafael J. Wysocki 		 */
310533906bc5SMel Gorman 		if (!ret) {
310633906bc5SMel Gorman 			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
3107d2ebd0f6SAlex,Shi 			balanced_classzone_idx = classzone_idx;
3108d2ebd0f6SAlex,Shi 			balanced_order = balance_pgdat(pgdat, order,
3109d2ebd0f6SAlex,Shi 						&balanced_classzone_idx);
31101da177e4SLinus Torvalds 		}
311133906bc5SMel Gorman 	}
31121da177e4SLinus Torvalds 	return 0;
31131da177e4SLinus Torvalds }
31141da177e4SLinus Torvalds 
31151da177e4SLinus Torvalds /*
31161da177e4SLinus Torvalds  * A zone is low on free memory, so wake its kswapd task to service it.
31171da177e4SLinus Torvalds  */
311899504748SMel Gorman void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
31191da177e4SLinus Torvalds {
31201da177e4SLinus Torvalds 	pg_data_t *pgdat;
31211da177e4SLinus Torvalds 
3122f3fe6512SCon Kolivas 	if (!populated_zone(zone))
31231da177e4SLinus Torvalds 		return;
31241da177e4SLinus Torvalds 
312502a0e53dSPaul Jackson 	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
31261da177e4SLinus Torvalds 		return;
312788f5acf8SMel Gorman 	pgdat = zone->zone_pgdat;
312899504748SMel Gorman 	if (pgdat->kswapd_max_order < order) {
312988f5acf8SMel Gorman 		pgdat->kswapd_max_order = order;
313099504748SMel Gorman 		pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
313199504748SMel Gorman 	}
31328d0986e2SCon Kolivas 	if (!waitqueue_active(&pgdat->kswapd_wait))
31331da177e4SLinus Torvalds 		return;
313488f5acf8SMel Gorman 	if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
313588f5acf8SMel Gorman 		return;
313688f5acf8SMel Gorman 
313788f5acf8SMel Gorman 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
31388d0986e2SCon Kolivas 	wake_up_interruptible(&pgdat->kswapd_wait);
31391da177e4SLinus Torvalds }
31401da177e4SLinus Torvalds 
3141adea02a1SWu Fengguang /*
3142adea02a1SWu Fengguang  * The reclaimable count would be mostly accurate.
3143adea02a1SWu Fengguang  * The less reclaimable pages may be
3144adea02a1SWu Fengguang  * - mlocked pages, which will be moved to unevictable list when encountered
3145adea02a1SWu Fengguang  * - mapped pages, which may require several travels to be reclaimed
3146adea02a1SWu Fengguang  * - dirty pages, which is not "instantly" reclaimable
3147adea02a1SWu Fengguang  */
3148adea02a1SWu Fengguang unsigned long global_reclaimable_pages(void)
31494f98a2feSRik van Riel {
3150adea02a1SWu Fengguang 	int nr;
3151adea02a1SWu Fengguang 
3152adea02a1SWu Fengguang 	nr = global_page_state(NR_ACTIVE_FILE) +
3153adea02a1SWu Fengguang 	     global_page_state(NR_INACTIVE_FILE);
3154adea02a1SWu Fengguang 
3155adea02a1SWu Fengguang 	if (nr_swap_pages > 0)
3156adea02a1SWu Fengguang 		nr += global_page_state(NR_ACTIVE_ANON) +
3157adea02a1SWu Fengguang 		      global_page_state(NR_INACTIVE_ANON);
3158adea02a1SWu Fengguang 
3159adea02a1SWu Fengguang 	return nr;
3160adea02a1SWu Fengguang }
3161adea02a1SWu Fengguang 
3162adea02a1SWu Fengguang unsigned long zone_reclaimable_pages(struct zone *zone)
3163adea02a1SWu Fengguang {
3164adea02a1SWu Fengguang 	int nr;
3165adea02a1SWu Fengguang 
3166adea02a1SWu Fengguang 	nr = zone_page_state(zone, NR_ACTIVE_FILE) +
3167adea02a1SWu Fengguang 	     zone_page_state(zone, NR_INACTIVE_FILE);
3168adea02a1SWu Fengguang 
3169adea02a1SWu Fengguang 	if (nr_swap_pages > 0)
3170adea02a1SWu Fengguang 		nr += zone_page_state(zone, NR_ACTIVE_ANON) +
3171adea02a1SWu Fengguang 		      zone_page_state(zone, NR_INACTIVE_ANON);
3172adea02a1SWu Fengguang 
3173adea02a1SWu Fengguang 	return nr;
31744f98a2feSRik van Riel }
31754f98a2feSRik van Riel 
3176c6f37f12SRafael J. Wysocki #ifdef CONFIG_HIBERNATION
31771da177e4SLinus Torvalds /*
31787b51755cSKOSAKI Motohiro  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3179d6277db4SRafael J. Wysocki  * freed pages.
3180d6277db4SRafael J. Wysocki  *
3181d6277db4SRafael J. Wysocki  * Rather than trying to age LRUs the aim is to preserve the overall
3182d6277db4SRafael J. Wysocki  * LRU order by reclaiming preferentially
3183d6277db4SRafael J. Wysocki  * inactive > active > active referenced > active mapped
31841da177e4SLinus Torvalds  */
31857b51755cSKOSAKI Motohiro unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
31861da177e4SLinus Torvalds {
3187d6277db4SRafael J. Wysocki 	struct reclaim_state reclaim_state;
3188d6277db4SRafael J. Wysocki 	struct scan_control sc = {
31897b51755cSKOSAKI Motohiro 		.gfp_mask = GFP_HIGHUSER_MOVABLE,
31907b51755cSKOSAKI Motohiro 		.may_swap = 1,
31917b51755cSKOSAKI Motohiro 		.may_unmap = 1,
3192d6277db4SRafael J. Wysocki 		.may_writepage = 1,
31937b51755cSKOSAKI Motohiro 		.nr_to_reclaim = nr_to_reclaim,
31947b51755cSKOSAKI Motohiro 		.hibernation_mode = 1,
31957b51755cSKOSAKI Motohiro 		.order = 0,
31961da177e4SLinus Torvalds 	};
3197a09ed5e0SYing Han 	struct shrink_control shrink = {
3198a09ed5e0SYing Han 		.gfp_mask = sc.gfp_mask,
3199a09ed5e0SYing Han 	};
32007b51755cSKOSAKI Motohiro 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
32017b51755cSKOSAKI Motohiro 	struct task_struct *p = current;
32027b51755cSKOSAKI Motohiro 	unsigned long nr_reclaimed;
32031da177e4SLinus Torvalds 
32047b51755cSKOSAKI Motohiro 	p->flags |= PF_MEMALLOC;
32057b51755cSKOSAKI Motohiro 	lockdep_set_current_reclaim_state(sc.gfp_mask);
3206d6277db4SRafael J. Wysocki 	reclaim_state.reclaimed_slab = 0;
32077b51755cSKOSAKI Motohiro 	p->reclaim_state = &reclaim_state;
3208d6277db4SRafael J. Wysocki 
3209a09ed5e0SYing Han 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
3210d6277db4SRafael J. Wysocki 
32117b51755cSKOSAKI Motohiro 	p->reclaim_state = NULL;
32127b51755cSKOSAKI Motohiro 	lockdep_clear_current_reclaim_state();
32137b51755cSKOSAKI Motohiro 	p->flags &= ~PF_MEMALLOC;
3214d6277db4SRafael J. Wysocki 
32157b51755cSKOSAKI Motohiro 	return nr_reclaimed;
32161da177e4SLinus Torvalds }
3217c6f37f12SRafael J. Wysocki #endif /* CONFIG_HIBERNATION */
32181da177e4SLinus Torvalds 
32191da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but
32201da177e4SLinus Torvalds    not required for correctness.  So if the last cpu in a node goes
32211da177e4SLinus Torvalds    away, we get changed to run anywhere: as the first one comes back,
32221da177e4SLinus Torvalds    restore their cpu bindings. */
32239c7b216dSChandra Seetharaman static int __devinit cpu_callback(struct notifier_block *nfb,
322469e05944SAndrew Morton 				  unsigned long action, void *hcpu)
32251da177e4SLinus Torvalds {
322658c0a4a7SYasunori Goto 	int nid;
32271da177e4SLinus Torvalds 
32288bb78442SRafael J. Wysocki 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
322958c0a4a7SYasunori Goto 		for_each_node_state(nid, N_HIGH_MEMORY) {
3230c5f59f08SMike Travis 			pg_data_t *pgdat = NODE_DATA(nid);
3231a70f7302SRusty Russell 			const struct cpumask *mask;
3232a70f7302SRusty Russell 
3233a70f7302SRusty Russell 			mask = cpumask_of_node(pgdat->node_id);
3234c5f59f08SMike Travis 
32353e597945SRusty Russell 			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
32361da177e4SLinus Torvalds 				/* One of our CPUs online: restore mask */
3237c5f59f08SMike Travis 				set_cpus_allowed_ptr(pgdat->kswapd, mask);
32381da177e4SLinus Torvalds 		}
32391da177e4SLinus Torvalds 	}
32401da177e4SLinus Torvalds 	return NOTIFY_OK;
32411da177e4SLinus Torvalds }
32421da177e4SLinus Torvalds 
32433218ae14SYasunori Goto /*
32443218ae14SYasunori Goto  * This kswapd start function will be called by init and node-hot-add.
32453218ae14SYasunori Goto  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
32463218ae14SYasunori Goto  */
32473218ae14SYasunori Goto int kswapd_run(int nid)
32483218ae14SYasunori Goto {
32493218ae14SYasunori Goto 	pg_data_t *pgdat = NODE_DATA(nid);
32503218ae14SYasunori Goto 	int ret = 0;
32513218ae14SYasunori Goto 
32523218ae14SYasunori Goto 	if (pgdat->kswapd)
32533218ae14SYasunori Goto 		return 0;
32543218ae14SYasunori Goto 
32553218ae14SYasunori Goto 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
32563218ae14SYasunori Goto 	if (IS_ERR(pgdat->kswapd)) {
32573218ae14SYasunori Goto 		/* failure at boot is fatal */
32583218ae14SYasunori Goto 		BUG_ON(system_state == SYSTEM_BOOTING);
32593218ae14SYasunori Goto 		printk("Failed to start kswapd on node %d\n",nid);
32603218ae14SYasunori Goto 		ret = -1;
32613218ae14SYasunori Goto 	}
32623218ae14SYasunori Goto 	return ret;
32633218ae14SYasunori Goto }
32643218ae14SYasunori Goto 
32658fe23e05SDavid Rientjes /*
32668fe23e05SDavid Rientjes  * Called by memory hotplug when all memory in a node is offlined.
32678fe23e05SDavid Rientjes  */
32688fe23e05SDavid Rientjes void kswapd_stop(int nid)
32698fe23e05SDavid Rientjes {
32708fe23e05SDavid Rientjes 	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
32718fe23e05SDavid Rientjes 
32728fe23e05SDavid Rientjes 	if (kswapd)
32738fe23e05SDavid Rientjes 		kthread_stop(kswapd);
32748fe23e05SDavid Rientjes }
32758fe23e05SDavid Rientjes 
32761da177e4SLinus Torvalds static int __init kswapd_init(void)
32771da177e4SLinus Torvalds {
32783218ae14SYasunori Goto 	int nid;
327969e05944SAndrew Morton 
32801da177e4SLinus Torvalds 	swap_setup();
32819422ffbaSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY)
32823218ae14SYasunori Goto  		kswapd_run(nid);
32831da177e4SLinus Torvalds 	hotcpu_notifier(cpu_callback, 0);
32841da177e4SLinus Torvalds 	return 0;
32851da177e4SLinus Torvalds }
32861da177e4SLinus Torvalds 
32871da177e4SLinus Torvalds module_init(kswapd_init)
32889eeff239SChristoph Lameter 
32899eeff239SChristoph Lameter #ifdef CONFIG_NUMA
32909eeff239SChristoph Lameter /*
32919eeff239SChristoph Lameter  * Zone reclaim mode
32929eeff239SChristoph Lameter  *
32939eeff239SChristoph Lameter  * If non-zero call zone_reclaim when the number of free pages falls below
32949eeff239SChristoph Lameter  * the watermarks.
32959eeff239SChristoph Lameter  */
32969eeff239SChristoph Lameter int zone_reclaim_mode __read_mostly;
32979eeff239SChristoph Lameter 
32981b2ffb78SChristoph Lameter #define RECLAIM_OFF 0
32997d03431cSFernando Luis Vazquez Cao #define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
33001b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
33011b2ffb78SChristoph Lameter #define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */
33021b2ffb78SChristoph Lameter 
33039eeff239SChristoph Lameter /*
3304a92f7126SChristoph Lameter  * Priority for ZONE_RECLAIM. This determines the fraction of pages
3305a92f7126SChristoph Lameter  * of a node considered for each zone_reclaim. 4 scans 1/16th of
3306a92f7126SChristoph Lameter  * a zone.
3307a92f7126SChristoph Lameter  */
3308a92f7126SChristoph Lameter #define ZONE_RECLAIM_PRIORITY 4
3309a92f7126SChristoph Lameter 
33109eeff239SChristoph Lameter /*
33119614634fSChristoph Lameter  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
33129614634fSChristoph Lameter  * occur.
33139614634fSChristoph Lameter  */
33149614634fSChristoph Lameter int sysctl_min_unmapped_ratio = 1;
33159614634fSChristoph Lameter 
33169614634fSChristoph Lameter /*
33170ff38490SChristoph Lameter  * If the number of slab pages in a zone grows beyond this percentage then
33180ff38490SChristoph Lameter  * slab reclaim needs to occur.
33190ff38490SChristoph Lameter  */
33200ff38490SChristoph Lameter int sysctl_min_slab_ratio = 5;
33210ff38490SChristoph Lameter 
332290afa5deSMel Gorman static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
332390afa5deSMel Gorman {
332490afa5deSMel Gorman 	unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
332590afa5deSMel Gorman 	unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
332690afa5deSMel Gorman 		zone_page_state(zone, NR_ACTIVE_FILE);
332790afa5deSMel Gorman 
332890afa5deSMel Gorman 	/*
332990afa5deSMel Gorman 	 * It's possible for there to be more file mapped pages than
333090afa5deSMel Gorman 	 * accounted for by the pages on the file LRU lists because
333190afa5deSMel Gorman 	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
333290afa5deSMel Gorman 	 */
333390afa5deSMel Gorman 	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
333490afa5deSMel Gorman }
333590afa5deSMel Gorman 
333690afa5deSMel Gorman /* Work out how many page cache pages we can reclaim in this reclaim_mode */
333790afa5deSMel Gorman static long zone_pagecache_reclaimable(struct zone *zone)
333890afa5deSMel Gorman {
333990afa5deSMel Gorman 	long nr_pagecache_reclaimable;
334090afa5deSMel Gorman 	long delta = 0;
334190afa5deSMel Gorman 
334290afa5deSMel Gorman 	/*
334390afa5deSMel Gorman 	 * If RECLAIM_SWAP is set, then all file pages are considered
334490afa5deSMel Gorman 	 * potentially reclaimable. Otherwise, we have to worry about
334590afa5deSMel Gorman 	 * pages like swapcache and zone_unmapped_file_pages() provides
334690afa5deSMel Gorman 	 * a better estimate
334790afa5deSMel Gorman 	 */
334890afa5deSMel Gorman 	if (zone_reclaim_mode & RECLAIM_SWAP)
334990afa5deSMel Gorman 		nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
335090afa5deSMel Gorman 	else
335190afa5deSMel Gorman 		nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
335290afa5deSMel Gorman 
335390afa5deSMel Gorman 	/* If we can't clean pages, remove dirty pages from consideration */
335490afa5deSMel Gorman 	if (!(zone_reclaim_mode & RECLAIM_WRITE))
335590afa5deSMel Gorman 		delta += zone_page_state(zone, NR_FILE_DIRTY);
335690afa5deSMel Gorman 
335790afa5deSMel Gorman 	/* Watch for any possible underflows due to delta */
335890afa5deSMel Gorman 	if (unlikely(delta > nr_pagecache_reclaimable))
335990afa5deSMel Gorman 		delta = nr_pagecache_reclaimable;
336090afa5deSMel Gorman 
336190afa5deSMel Gorman 	return nr_pagecache_reclaimable - delta;
336290afa5deSMel Gorman }
336390afa5deSMel Gorman 
33640ff38490SChristoph Lameter /*
33659eeff239SChristoph Lameter  * Try to free up some pages from this zone through reclaim.
33669eeff239SChristoph Lameter  */
3367179e9639SAndrew Morton static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
33689eeff239SChristoph Lameter {
33697fb2d46dSChristoph Lameter 	/* Minimum pages needed in order to stay on node */
337069e05944SAndrew Morton 	const unsigned long nr_pages = 1 << order;
33719eeff239SChristoph Lameter 	struct task_struct *p = current;
33729eeff239SChristoph Lameter 	struct reclaim_state reclaim_state;
33738695949aSChristoph Lameter 	int priority;
3374179e9639SAndrew Morton 	struct scan_control sc = {
3375179e9639SAndrew Morton 		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3376a6dc60f8SJohannes Weiner 		.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
33772e2e4259SKOSAKI Motohiro 		.may_swap = 1,
337822fba335SKOSAKI Motohiro 		.nr_to_reclaim = max_t(unsigned long, nr_pages,
337922fba335SKOSAKI Motohiro 				       SWAP_CLUSTER_MAX),
3380179e9639SAndrew Morton 		.gfp_mask = gfp_mask,
3381bd2f6199SJohannes Weiner 		.order = order,
3382179e9639SAndrew Morton 	};
3383a09ed5e0SYing Han 	struct shrink_control shrink = {
3384a09ed5e0SYing Han 		.gfp_mask = sc.gfp_mask,
3385a09ed5e0SYing Han 	};
338615748048SKOSAKI Motohiro 	unsigned long nr_slab_pages0, nr_slab_pages1;
33879eeff239SChristoph Lameter 
33889eeff239SChristoph Lameter 	cond_resched();
3389d4f7796eSChristoph Lameter 	/*
3390d4f7796eSChristoph Lameter 	 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3391d4f7796eSChristoph Lameter 	 * and we also need to be able to write out pages for RECLAIM_WRITE
3392d4f7796eSChristoph Lameter 	 * and RECLAIM_SWAP.
3393d4f7796eSChristoph Lameter 	 */
3394d4f7796eSChristoph Lameter 	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
339576ca542dSKOSAKI Motohiro 	lockdep_set_current_reclaim_state(gfp_mask);
33969eeff239SChristoph Lameter 	reclaim_state.reclaimed_slab = 0;
33979eeff239SChristoph Lameter 	p->reclaim_state = &reclaim_state;
3398c84db23cSChristoph Lameter 
339990afa5deSMel Gorman 	if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3400a92f7126SChristoph Lameter 		/*
34010ff38490SChristoph Lameter 		 * Free memory by calling shrink zone with increasing
34020ff38490SChristoph Lameter 		 * priorities until we have enough memory freed.
3403a92f7126SChristoph Lameter 		 */
34048695949aSChristoph Lameter 		priority = ZONE_RECLAIM_PRIORITY;
3405a92f7126SChristoph Lameter 		do {
3406a79311c1SRik van Riel 			shrink_zone(priority, zone, &sc);
34078695949aSChristoph Lameter 			priority--;
3408a79311c1SRik van Riel 		} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
34090ff38490SChristoph Lameter 	}
3410a92f7126SChristoph Lameter 
341115748048SKOSAKI Motohiro 	nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
341215748048SKOSAKI Motohiro 	if (nr_slab_pages0 > zone->min_slab_pages) {
34132a16e3f4SChristoph Lameter 		/*
34147fb2d46dSChristoph Lameter 		 * shrink_slab() does not currently allow us to determine how
34150ff38490SChristoph Lameter 		 * many pages were freed in this zone. So we take the current
34160ff38490SChristoph Lameter 		 * number of slab pages and shake the slab until it is reduced
34170ff38490SChristoph Lameter 		 * by the same nr_pages that we used for reclaiming unmapped
34180ff38490SChristoph Lameter 		 * pages.
34192a16e3f4SChristoph Lameter 		 *
34200ff38490SChristoph Lameter 		 * Note that shrink_slab will free memory on all zones and may
34210ff38490SChristoph Lameter 		 * take a long time.
34222a16e3f4SChristoph Lameter 		 */
34234dc4b3d9SKOSAKI Motohiro 		for (;;) {
34244dc4b3d9SKOSAKI Motohiro 			unsigned long lru_pages = zone_reclaimable_pages(zone);
34254dc4b3d9SKOSAKI Motohiro 
34264dc4b3d9SKOSAKI Motohiro 			/* No reclaimable slab or very low memory pressure */
34271495f230SYing Han 			if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
34284dc4b3d9SKOSAKI Motohiro 				break;
34294dc4b3d9SKOSAKI Motohiro 
34304dc4b3d9SKOSAKI Motohiro 			/* Freed enough memory */
34314dc4b3d9SKOSAKI Motohiro 			nr_slab_pages1 = zone_page_state(zone,
34324dc4b3d9SKOSAKI Motohiro 							NR_SLAB_RECLAIMABLE);
34334dc4b3d9SKOSAKI Motohiro 			if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
34344dc4b3d9SKOSAKI Motohiro 				break;
34354dc4b3d9SKOSAKI Motohiro 		}
343683e33a47SChristoph Lameter 
343783e33a47SChristoph Lameter 		/*
343883e33a47SChristoph Lameter 		 * Update nr_reclaimed by the number of slab pages we
343983e33a47SChristoph Lameter 		 * reclaimed from this zone.
344083e33a47SChristoph Lameter 		 */
344115748048SKOSAKI Motohiro 		nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
344215748048SKOSAKI Motohiro 		if (nr_slab_pages1 < nr_slab_pages0)
344315748048SKOSAKI Motohiro 			sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
34442a16e3f4SChristoph Lameter 	}
34452a16e3f4SChristoph Lameter 
34469eeff239SChristoph Lameter 	p->reclaim_state = NULL;
3447d4f7796eSChristoph Lameter 	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
344876ca542dSKOSAKI Motohiro 	lockdep_clear_current_reclaim_state();
3449a79311c1SRik van Riel 	return sc.nr_reclaimed >= nr_pages;
34509eeff239SChristoph Lameter }
3451179e9639SAndrew Morton 
3452179e9639SAndrew Morton int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3453179e9639SAndrew Morton {
3454179e9639SAndrew Morton 	int node_id;
3455d773ed6bSDavid Rientjes 	int ret;
3456179e9639SAndrew Morton 
3457179e9639SAndrew Morton 	/*
34580ff38490SChristoph Lameter 	 * Zone reclaim reclaims unmapped file backed pages and
34590ff38490SChristoph Lameter 	 * slab pages if we are over the defined limits.
346034aa1330SChristoph Lameter 	 *
34619614634fSChristoph Lameter 	 * A small portion of unmapped file backed pages is needed for
34629614634fSChristoph Lameter 	 * file I/O otherwise pages read by file I/O will be immediately
34639614634fSChristoph Lameter 	 * thrown out if the zone is overallocated. So we do not reclaim
34649614634fSChristoph Lameter 	 * if less than a specified percentage of the zone is used by
34659614634fSChristoph Lameter 	 * unmapped file backed pages.
3466179e9639SAndrew Morton 	 */
346790afa5deSMel Gorman 	if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
346890afa5deSMel Gorman 	    zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3469fa5e084eSMel Gorman 		return ZONE_RECLAIM_FULL;
3470179e9639SAndrew Morton 
347193e4a89aSKOSAKI Motohiro 	if (zone->all_unreclaimable)
3472fa5e084eSMel Gorman 		return ZONE_RECLAIM_FULL;
3473d773ed6bSDavid Rientjes 
3474179e9639SAndrew Morton 	/*
3475d773ed6bSDavid Rientjes 	 * Do not scan if the allocation should not be delayed.
3476179e9639SAndrew Morton 	 */
3477d773ed6bSDavid Rientjes 	if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3478fa5e084eSMel Gorman 		return ZONE_RECLAIM_NOSCAN;
3479179e9639SAndrew Morton 
3480179e9639SAndrew Morton 	/*
3481179e9639SAndrew Morton 	 * Only run zone reclaim on the local zone or on zones that do not
3482179e9639SAndrew Morton 	 * have associated processors. This will favor the local processor
3483179e9639SAndrew Morton 	 * over remote processors and spread off node memory allocations
3484179e9639SAndrew Morton 	 * as wide as possible.
3485179e9639SAndrew Morton 	 */
348689fa3024SChristoph Lameter 	node_id = zone_to_nid(zone);
348737c0708dSChristoph Lameter 	if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3488fa5e084eSMel Gorman 		return ZONE_RECLAIM_NOSCAN;
3489d773ed6bSDavid Rientjes 
3490d773ed6bSDavid Rientjes 	if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3491fa5e084eSMel Gorman 		return ZONE_RECLAIM_NOSCAN;
3492fa5e084eSMel Gorman 
3493d773ed6bSDavid Rientjes 	ret = __zone_reclaim(zone, gfp_mask, order);
3494d773ed6bSDavid Rientjes 	zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3495d773ed6bSDavid Rientjes 
349624cf7251SMel Gorman 	if (!ret)
349724cf7251SMel Gorman 		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
349824cf7251SMel Gorman 
3499d773ed6bSDavid Rientjes 	return ret;
3500179e9639SAndrew Morton }
35019eeff239SChristoph Lameter #endif
3502894bc310SLee Schermerhorn 
3503894bc310SLee Schermerhorn /*
3504894bc310SLee Schermerhorn  * page_evictable - test whether a page is evictable
3505894bc310SLee Schermerhorn  * @page: the page to test
3506894bc310SLee Schermerhorn  * @vma: the VMA in which the page is or will be mapped, may be NULL
3507894bc310SLee Schermerhorn  *
3508894bc310SLee Schermerhorn  * Test whether page is evictable--i.e., should be placed on active/inactive
3509b291f000SNick Piggin  * lists vs unevictable list.  The vma argument is !NULL when called from the
3510b291f000SNick Piggin  * fault path to determine how to instantate a new page.
3511894bc310SLee Schermerhorn  *
3512894bc310SLee Schermerhorn  * Reasons page might not be evictable:
3513ba9ddf49SLee Schermerhorn  * (1) page's mapping marked unevictable
3514b291f000SNick Piggin  * (2) page is part of an mlocked VMA
3515ba9ddf49SLee Schermerhorn  *
3516894bc310SLee Schermerhorn  */
3517894bc310SLee Schermerhorn int page_evictable(struct page *page, struct vm_area_struct *vma)
3518894bc310SLee Schermerhorn {
3519894bc310SLee Schermerhorn 
3520ba9ddf49SLee Schermerhorn 	if (mapping_unevictable(page_mapping(page)))
3521ba9ddf49SLee Schermerhorn 		return 0;
3522ba9ddf49SLee Schermerhorn 
3523b291f000SNick Piggin 	if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
3524b291f000SNick Piggin 		return 0;
3525894bc310SLee Schermerhorn 
3526894bc310SLee Schermerhorn 	return 1;
3527894bc310SLee Schermerhorn }
352889e004eaSLee Schermerhorn 
352985046579SHugh Dickins #ifdef CONFIG_SHMEM
353089e004eaSLee Schermerhorn /**
353124513264SHugh Dickins  * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
353224513264SHugh Dickins  * @pages:	array of pages to check
353324513264SHugh Dickins  * @nr_pages:	number of pages to check
353489e004eaSLee Schermerhorn  *
353524513264SHugh Dickins  * Checks pages for evictability and moves them to the appropriate lru list.
353685046579SHugh Dickins  *
353785046579SHugh Dickins  * This function is only used for SysV IPC SHM_UNLOCK.
353889e004eaSLee Schermerhorn  */
353924513264SHugh Dickins void check_move_unevictable_pages(struct page **pages, int nr_pages)
354089e004eaSLee Schermerhorn {
3541925b7673SJohannes Weiner 	struct lruvec *lruvec;
354224513264SHugh Dickins 	struct zone *zone = NULL;
354324513264SHugh Dickins 	int pgscanned = 0;
354424513264SHugh Dickins 	int pgrescued = 0;
354589e004eaSLee Schermerhorn 	int i;
354689e004eaSLee Schermerhorn 
354724513264SHugh Dickins 	for (i = 0; i < nr_pages; i++) {
354824513264SHugh Dickins 		struct page *page = pages[i];
354924513264SHugh Dickins 		struct zone *pagezone;
355089e004eaSLee Schermerhorn 
355124513264SHugh Dickins 		pgscanned++;
355224513264SHugh Dickins 		pagezone = page_zone(page);
355389e004eaSLee Schermerhorn 		if (pagezone != zone) {
355489e004eaSLee Schermerhorn 			if (zone)
355589e004eaSLee Schermerhorn 				spin_unlock_irq(&zone->lru_lock);
355689e004eaSLee Schermerhorn 			zone = pagezone;
355789e004eaSLee Schermerhorn 			spin_lock_irq(&zone->lru_lock);
355889e004eaSLee Schermerhorn 		}
355989e004eaSLee Schermerhorn 
356024513264SHugh Dickins 		if (!PageLRU(page) || !PageUnevictable(page))
356124513264SHugh Dickins 			continue;
356289e004eaSLee Schermerhorn 
356324513264SHugh Dickins 		if (page_evictable(page, NULL)) {
356424513264SHugh Dickins 			enum lru_list lru = page_lru_base_type(page);
356524513264SHugh Dickins 
356624513264SHugh Dickins 			VM_BUG_ON(PageActive(page));
356724513264SHugh Dickins 			ClearPageUnevictable(page);
356824513264SHugh Dickins 			__dec_zone_state(zone, NR_UNEVICTABLE);
356924513264SHugh Dickins 			lruvec = mem_cgroup_lru_move_lists(zone, page,
357024513264SHugh Dickins 						LRU_UNEVICTABLE, lru);
357124513264SHugh Dickins 			list_move(&page->lru, &lruvec->lists[lru]);
357224513264SHugh Dickins 			__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
357324513264SHugh Dickins 			pgrescued++;
357489e004eaSLee Schermerhorn 		}
357589e004eaSLee Schermerhorn 	}
357624513264SHugh Dickins 
357724513264SHugh Dickins 	if (zone) {
357824513264SHugh Dickins 		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
357924513264SHugh Dickins 		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
358024513264SHugh Dickins 		spin_unlock_irq(&zone->lru_lock);
358124513264SHugh Dickins 	}
358285046579SHugh Dickins }
358385046579SHugh Dickins #endif /* CONFIG_SHMEM */
3584af936a16SLee Schermerhorn 
3585264e56d8SJohannes Weiner static void warn_scan_unevictable_pages(void)
3586af936a16SLee Schermerhorn {
3587264e56d8SJohannes Weiner 	printk_once(KERN_WARNING
358825bd91bdSKOSAKI Motohiro 		    "%s: The scan_unevictable_pages sysctl/node-interface has been "
3589264e56d8SJohannes Weiner 		    "disabled for lack of a legitimate use case.  If you have "
359025bd91bdSKOSAKI Motohiro 		    "one, please send an email to linux-mm@kvack.org.\n",
359125bd91bdSKOSAKI Motohiro 		    current->comm);
3592af936a16SLee Schermerhorn }
3593af936a16SLee Schermerhorn 
3594af936a16SLee Schermerhorn /*
3595af936a16SLee Schermerhorn  * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
3596af936a16SLee Schermerhorn  * all nodes' unevictable lists for evictable pages
3597af936a16SLee Schermerhorn  */
3598af936a16SLee Schermerhorn unsigned long scan_unevictable_pages;
3599af936a16SLee Schermerhorn 
3600af936a16SLee Schermerhorn int scan_unevictable_handler(struct ctl_table *table, int write,
36018d65af78SAlexey Dobriyan 			   void __user *buffer,
3602af936a16SLee Schermerhorn 			   size_t *length, loff_t *ppos)
3603af936a16SLee Schermerhorn {
3604264e56d8SJohannes Weiner 	warn_scan_unevictable_pages();
36058d65af78SAlexey Dobriyan 	proc_doulongvec_minmax(table, write, buffer, length, ppos);
3606af936a16SLee Schermerhorn 	scan_unevictable_pages = 0;
3607af936a16SLee Schermerhorn 	return 0;
3608af936a16SLee Schermerhorn }
3609af936a16SLee Schermerhorn 
3610e4455abbSThadeu Lima de Souza Cascardo #ifdef CONFIG_NUMA
3611af936a16SLee Schermerhorn /*
3612af936a16SLee Schermerhorn  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
3613af936a16SLee Schermerhorn  * a specified node's per zone unevictable lists for evictable pages.
3614af936a16SLee Schermerhorn  */
3615af936a16SLee Schermerhorn 
361610fbcf4cSKay Sievers static ssize_t read_scan_unevictable_node(struct device *dev,
361710fbcf4cSKay Sievers 					  struct device_attribute *attr,
3618af936a16SLee Schermerhorn 					  char *buf)
3619af936a16SLee Schermerhorn {
3620264e56d8SJohannes Weiner 	warn_scan_unevictable_pages();
3621af936a16SLee Schermerhorn 	return sprintf(buf, "0\n");	/* always zero; should fit... */
3622af936a16SLee Schermerhorn }
3623af936a16SLee Schermerhorn 
362410fbcf4cSKay Sievers static ssize_t write_scan_unevictable_node(struct device *dev,
362510fbcf4cSKay Sievers 					   struct device_attribute *attr,
3626af936a16SLee Schermerhorn 					const char *buf, size_t count)
3627af936a16SLee Schermerhorn {
3628264e56d8SJohannes Weiner 	warn_scan_unevictable_pages();
3629af936a16SLee Schermerhorn 	return 1;
3630af936a16SLee Schermerhorn }
3631af936a16SLee Schermerhorn 
3632af936a16SLee Schermerhorn 
363310fbcf4cSKay Sievers static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3634af936a16SLee Schermerhorn 			read_scan_unevictable_node,
3635af936a16SLee Schermerhorn 			write_scan_unevictable_node);
3636af936a16SLee Schermerhorn 
3637af936a16SLee Schermerhorn int scan_unevictable_register_node(struct node *node)
3638af936a16SLee Schermerhorn {
363910fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
3640af936a16SLee Schermerhorn }
3641af936a16SLee Schermerhorn 
3642af936a16SLee Schermerhorn void scan_unevictable_unregister_node(struct node *node)
3643af936a16SLee Schermerhorn {
364410fbcf4cSKay Sievers 	device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
3645af936a16SLee Schermerhorn }
3646e4455abbSThadeu Lima de Souza Cascardo #endif
3647