xref: /openbmc/linux/mm/vmscan.c (revision bc57e00f5e0b2480ef222c775c49552d3a930db7)
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h>	/* for try_to_release_page(),
27 					buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 #include <linux/memcontrol.h>
41 #include <linux/delayacct.h>
42 #include <linux/sysctl.h>
43 
44 #include <asm/tlbflush.h>
45 #include <asm/div64.h>
46 
47 #include <linux/swapops.h>
48 
49 #include "internal.h"
50 
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/vmscan.h>
53 
54 struct scan_control {
55 	/* Incremented by the number of inactive pages that were scanned */
56 	unsigned long nr_scanned;
57 
58 	/* Number of pages freed so far during a call to shrink_zones() */
59 	unsigned long nr_reclaimed;
60 
61 	/* How many pages shrink_list() should reclaim */
62 	unsigned long nr_to_reclaim;
63 
64 	unsigned long hibernation_mode;
65 
66 	/* This context's GFP mask */
67 	gfp_t gfp_mask;
68 
69 	int may_writepage;
70 
71 	/* Can mapped pages be reclaimed? */
72 	int may_unmap;
73 
74 	/* Can pages be swapped as part of reclaim? */
75 	int may_swap;
76 
77 	int swappiness;
78 
79 	int order;
80 
81 	/*
82 	 * Intend to reclaim enough continuous memory rather than reclaim
83 	 * enough amount of memory. i.e, mode for high order allocation.
84 	 */
85 	bool lumpy_reclaim_mode;
86 
87 	/* Which cgroup do we reclaim from */
88 	struct mem_cgroup *mem_cgroup;
89 
90 	/*
91 	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
92 	 * are scanned.
93 	 */
94 	nodemask_t	*nodemask;
95 };
96 
97 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
98 
99 #ifdef ARCH_HAS_PREFETCH
100 #define prefetch_prev_lru_page(_page, _base, _field)			\
101 	do {								\
102 		if ((_page)->lru.prev != _base) {			\
103 			struct page *prev;				\
104 									\
105 			prev = lru_to_page(&(_page->lru));		\
106 			prefetch(&prev->_field);			\
107 		}							\
108 	} while (0)
109 #else
110 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
111 #endif
112 
113 #ifdef ARCH_HAS_PREFETCHW
114 #define prefetchw_prev_lru_page(_page, _base, _field)			\
115 	do {								\
116 		if ((_page)->lru.prev != _base) {			\
117 			struct page *prev;				\
118 									\
119 			prev = lru_to_page(&(_page->lru));		\
120 			prefetchw(&prev->_field);			\
121 		}							\
122 	} while (0)
123 #else
124 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
125 #endif
126 
127 /*
128  * From 0 .. 100.  Higher means more swappy.
129  */
130 int vm_swappiness = 60;
131 long vm_total_pages;	/* The total number of pages which the VM controls */
132 
133 static LIST_HEAD(shrinker_list);
134 static DECLARE_RWSEM(shrinker_rwsem);
135 
136 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
137 #define scanning_global_lru(sc)	(!(sc)->mem_cgroup)
138 #else
139 #define scanning_global_lru(sc)	(1)
140 #endif
141 
142 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
143 						  struct scan_control *sc)
144 {
145 	if (!scanning_global_lru(sc))
146 		return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
147 
148 	return &zone->reclaim_stat;
149 }
150 
151 static unsigned long zone_nr_lru_pages(struct zone *zone,
152 				struct scan_control *sc, enum lru_list lru)
153 {
154 	if (!scanning_global_lru(sc))
155 		return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
156 
157 	return zone_page_state(zone, NR_LRU_BASE + lru);
158 }
159 
160 
161 /*
162  * Add a shrinker callback to be called from the vm
163  */
164 void register_shrinker(struct shrinker *shrinker)
165 {
166 	shrinker->nr = 0;
167 	down_write(&shrinker_rwsem);
168 	list_add_tail(&shrinker->list, &shrinker_list);
169 	up_write(&shrinker_rwsem);
170 }
171 EXPORT_SYMBOL(register_shrinker);
172 
173 /*
174  * Remove one
175  */
176 void unregister_shrinker(struct shrinker *shrinker)
177 {
178 	down_write(&shrinker_rwsem);
179 	list_del(&shrinker->list);
180 	up_write(&shrinker_rwsem);
181 }
182 EXPORT_SYMBOL(unregister_shrinker);
183 
184 #define SHRINK_BATCH 128
185 /*
186  * Call the shrink functions to age shrinkable caches
187  *
188  * Here we assume it costs one seek to replace a lru page and that it also
189  * takes a seek to recreate a cache object.  With this in mind we age equal
190  * percentages of the lru and ageable caches.  This should balance the seeks
191  * generated by these structures.
192  *
193  * If the vm encountered mapped pages on the LRU it increase the pressure on
194  * slab to avoid swapping.
195  *
196  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
197  *
198  * `lru_pages' represents the number of on-LRU pages in all the zones which
199  * are eligible for the caller's allocation attempt.  It is used for balancing
200  * slab reclaim versus page reclaim.
201  *
202  * Returns the number of slab objects which we shrunk.
203  */
204 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
205 			unsigned long lru_pages)
206 {
207 	struct shrinker *shrinker;
208 	unsigned long ret = 0;
209 
210 	if (scanned == 0)
211 		scanned = SWAP_CLUSTER_MAX;
212 
213 	if (!down_read_trylock(&shrinker_rwsem))
214 		return 1;	/* Assume we'll be able to shrink next time */
215 
216 	list_for_each_entry(shrinker, &shrinker_list, list) {
217 		unsigned long long delta;
218 		unsigned long total_scan;
219 		unsigned long max_pass;
220 
221 		max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
222 		delta = (4 * scanned) / shrinker->seeks;
223 		delta *= max_pass;
224 		do_div(delta, lru_pages + 1);
225 		shrinker->nr += delta;
226 		if (shrinker->nr < 0) {
227 			printk(KERN_ERR "shrink_slab: %pF negative objects to "
228 			       "delete nr=%ld\n",
229 			       shrinker->shrink, shrinker->nr);
230 			shrinker->nr = max_pass;
231 		}
232 
233 		/*
234 		 * Avoid risking looping forever due to too large nr value:
235 		 * never try to free more than twice the estimate number of
236 		 * freeable entries.
237 		 */
238 		if (shrinker->nr > max_pass * 2)
239 			shrinker->nr = max_pass * 2;
240 
241 		total_scan = shrinker->nr;
242 		shrinker->nr = 0;
243 
244 		while (total_scan >= SHRINK_BATCH) {
245 			long this_scan = SHRINK_BATCH;
246 			int shrink_ret;
247 			int nr_before;
248 
249 			nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
250 			shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
251 								gfp_mask);
252 			if (shrink_ret == -1)
253 				break;
254 			if (shrink_ret < nr_before)
255 				ret += nr_before - shrink_ret;
256 			count_vm_events(SLABS_SCANNED, this_scan);
257 			total_scan -= this_scan;
258 
259 			cond_resched();
260 		}
261 
262 		shrinker->nr += total_scan;
263 	}
264 	up_read(&shrinker_rwsem);
265 	return ret;
266 }
267 
268 static inline int is_page_cache_freeable(struct page *page)
269 {
270 	/*
271 	 * A freeable page cache page is referenced only by the caller
272 	 * that isolated the page, the page cache radix tree and
273 	 * optional buffer heads at page->private.
274 	 */
275 	return page_count(page) - page_has_private(page) == 2;
276 }
277 
278 static int may_write_to_queue(struct backing_dev_info *bdi)
279 {
280 	if (current->flags & PF_SWAPWRITE)
281 		return 1;
282 	if (!bdi_write_congested(bdi))
283 		return 1;
284 	if (bdi == current->backing_dev_info)
285 		return 1;
286 	return 0;
287 }
288 
289 /*
290  * We detected a synchronous write error writing a page out.  Probably
291  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
292  * fsync(), msync() or close().
293  *
294  * The tricky part is that after writepage we cannot touch the mapping: nothing
295  * prevents it from being freed up.  But we have a ref on the page and once
296  * that page is locked, the mapping is pinned.
297  *
298  * We're allowed to run sleeping lock_page() here because we know the caller has
299  * __GFP_FS.
300  */
301 static void handle_write_error(struct address_space *mapping,
302 				struct page *page, int error)
303 {
304 	lock_page_nosync(page);
305 	if (page_mapping(page) == mapping)
306 		mapping_set_error(mapping, error);
307 	unlock_page(page);
308 }
309 
310 /* Request for sync pageout. */
311 enum pageout_io {
312 	PAGEOUT_IO_ASYNC,
313 	PAGEOUT_IO_SYNC,
314 };
315 
316 /* possible outcome of pageout() */
317 typedef enum {
318 	/* failed to write page out, page is locked */
319 	PAGE_KEEP,
320 	/* move page to the active list, page is locked */
321 	PAGE_ACTIVATE,
322 	/* page has been sent to the disk successfully, page is unlocked */
323 	PAGE_SUCCESS,
324 	/* page is clean and locked */
325 	PAGE_CLEAN,
326 } pageout_t;
327 
328 /*
329  * pageout is called by shrink_page_list() for each dirty page.
330  * Calls ->writepage().
331  */
332 static pageout_t pageout(struct page *page, struct address_space *mapping,
333 						enum pageout_io sync_writeback)
334 {
335 	/*
336 	 * If the page is dirty, only perform writeback if that write
337 	 * will be non-blocking.  To prevent this allocation from being
338 	 * stalled by pagecache activity.  But note that there may be
339 	 * stalls if we need to run get_block().  We could test
340 	 * PagePrivate for that.
341 	 *
342 	 * If this process is currently in __generic_file_aio_write() against
343 	 * this page's queue, we can perform writeback even if that
344 	 * will block.
345 	 *
346 	 * If the page is swapcache, write it back even if that would
347 	 * block, for some throttling. This happens by accident, because
348 	 * swap_backing_dev_info is bust: it doesn't reflect the
349 	 * congestion state of the swapdevs.  Easy to fix, if needed.
350 	 */
351 	if (!is_page_cache_freeable(page))
352 		return PAGE_KEEP;
353 	if (!mapping) {
354 		/*
355 		 * Some data journaling orphaned pages can have
356 		 * page->mapping == NULL while being dirty with clean buffers.
357 		 */
358 		if (page_has_private(page)) {
359 			if (try_to_free_buffers(page)) {
360 				ClearPageDirty(page);
361 				printk("%s: orphaned page\n", __func__);
362 				return PAGE_CLEAN;
363 			}
364 		}
365 		return PAGE_KEEP;
366 	}
367 	if (mapping->a_ops->writepage == NULL)
368 		return PAGE_ACTIVATE;
369 	if (!may_write_to_queue(mapping->backing_dev_info))
370 		return PAGE_KEEP;
371 
372 	if (clear_page_dirty_for_io(page)) {
373 		int res;
374 		struct writeback_control wbc = {
375 			.sync_mode = WB_SYNC_NONE,
376 			.nr_to_write = SWAP_CLUSTER_MAX,
377 			.range_start = 0,
378 			.range_end = LLONG_MAX,
379 			.for_reclaim = 1,
380 		};
381 
382 		SetPageReclaim(page);
383 		res = mapping->a_ops->writepage(page, &wbc);
384 		if (res < 0)
385 			handle_write_error(mapping, page, res);
386 		if (res == AOP_WRITEPAGE_ACTIVATE) {
387 			ClearPageReclaim(page);
388 			return PAGE_ACTIVATE;
389 		}
390 
391 		/*
392 		 * Wait on writeback if requested to. This happens when
393 		 * direct reclaiming a large contiguous area and the
394 		 * first attempt to free a range of pages fails.
395 		 */
396 		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
397 			wait_on_page_writeback(page);
398 
399 		if (!PageWriteback(page)) {
400 			/* synchronous write or broken a_ops? */
401 			ClearPageReclaim(page);
402 		}
403 		trace_mm_vmscan_writepage(page,
404 			trace_reclaim_flags(page, sync_writeback));
405 		inc_zone_page_state(page, NR_VMSCAN_WRITE);
406 		return PAGE_SUCCESS;
407 	}
408 
409 	return PAGE_CLEAN;
410 }
411 
412 /*
413  * Same as remove_mapping, but if the page is removed from the mapping, it
414  * gets returned with a refcount of 0.
415  */
416 static int __remove_mapping(struct address_space *mapping, struct page *page)
417 {
418 	BUG_ON(!PageLocked(page));
419 	BUG_ON(mapping != page_mapping(page));
420 
421 	spin_lock_irq(&mapping->tree_lock);
422 	/*
423 	 * The non racy check for a busy page.
424 	 *
425 	 * Must be careful with the order of the tests. When someone has
426 	 * a ref to the page, it may be possible that they dirty it then
427 	 * drop the reference. So if PageDirty is tested before page_count
428 	 * here, then the following race may occur:
429 	 *
430 	 * get_user_pages(&page);
431 	 * [user mapping goes away]
432 	 * write_to(page);
433 	 *				!PageDirty(page)    [good]
434 	 * SetPageDirty(page);
435 	 * put_page(page);
436 	 *				!page_count(page)   [good, discard it]
437 	 *
438 	 * [oops, our write_to data is lost]
439 	 *
440 	 * Reversing the order of the tests ensures such a situation cannot
441 	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
442 	 * load is not satisfied before that of page->_count.
443 	 *
444 	 * Note that if SetPageDirty is always performed via set_page_dirty,
445 	 * and thus under tree_lock, then this ordering is not required.
446 	 */
447 	if (!page_freeze_refs(page, 2))
448 		goto cannot_free;
449 	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
450 	if (unlikely(PageDirty(page))) {
451 		page_unfreeze_refs(page, 2);
452 		goto cannot_free;
453 	}
454 
455 	if (PageSwapCache(page)) {
456 		swp_entry_t swap = { .val = page_private(page) };
457 		__delete_from_swap_cache(page);
458 		spin_unlock_irq(&mapping->tree_lock);
459 		swapcache_free(swap, page);
460 	} else {
461 		__remove_from_page_cache(page);
462 		spin_unlock_irq(&mapping->tree_lock);
463 		mem_cgroup_uncharge_cache_page(page);
464 	}
465 
466 	return 1;
467 
468 cannot_free:
469 	spin_unlock_irq(&mapping->tree_lock);
470 	return 0;
471 }
472 
473 /*
474  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
475  * someone else has a ref on the page, abort and return 0.  If it was
476  * successfully detached, return 1.  Assumes the caller has a single ref on
477  * this page.
478  */
479 int remove_mapping(struct address_space *mapping, struct page *page)
480 {
481 	if (__remove_mapping(mapping, page)) {
482 		/*
483 		 * Unfreezing the refcount with 1 rather than 2 effectively
484 		 * drops the pagecache ref for us without requiring another
485 		 * atomic operation.
486 		 */
487 		page_unfreeze_refs(page, 1);
488 		return 1;
489 	}
490 	return 0;
491 }
492 
493 /**
494  * putback_lru_page - put previously isolated page onto appropriate LRU list
495  * @page: page to be put back to appropriate lru list
496  *
497  * Add previously isolated @page to appropriate LRU list.
498  * Page may still be unevictable for other reasons.
499  *
500  * lru_lock must not be held, interrupts must be enabled.
501  */
502 void putback_lru_page(struct page *page)
503 {
504 	int lru;
505 	int active = !!TestClearPageActive(page);
506 	int was_unevictable = PageUnevictable(page);
507 
508 	VM_BUG_ON(PageLRU(page));
509 
510 redo:
511 	ClearPageUnevictable(page);
512 
513 	if (page_evictable(page, NULL)) {
514 		/*
515 		 * For evictable pages, we can use the cache.
516 		 * In event of a race, worst case is we end up with an
517 		 * unevictable page on [in]active list.
518 		 * We know how to handle that.
519 		 */
520 		lru = active + page_lru_base_type(page);
521 		lru_cache_add_lru(page, lru);
522 	} else {
523 		/*
524 		 * Put unevictable pages directly on zone's unevictable
525 		 * list.
526 		 */
527 		lru = LRU_UNEVICTABLE;
528 		add_page_to_unevictable_list(page);
529 		/*
530 		 * When racing with an mlock clearing (page is
531 		 * unlocked), make sure that if the other thread does
532 		 * not observe our setting of PG_lru and fails
533 		 * isolation, we see PG_mlocked cleared below and move
534 		 * the page back to the evictable list.
535 		 *
536 		 * The other side is TestClearPageMlocked().
537 		 */
538 		smp_mb();
539 	}
540 
541 	/*
542 	 * page's status can change while we move it among lru. If an evictable
543 	 * page is on unevictable list, it never be freed. To avoid that,
544 	 * check after we added it to the list, again.
545 	 */
546 	if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
547 		if (!isolate_lru_page(page)) {
548 			put_page(page);
549 			goto redo;
550 		}
551 		/* This means someone else dropped this page from LRU
552 		 * So, it will be freed or putback to LRU again. There is
553 		 * nothing to do here.
554 		 */
555 	}
556 
557 	if (was_unevictable && lru != LRU_UNEVICTABLE)
558 		count_vm_event(UNEVICTABLE_PGRESCUED);
559 	else if (!was_unevictable && lru == LRU_UNEVICTABLE)
560 		count_vm_event(UNEVICTABLE_PGCULLED);
561 
562 	put_page(page);		/* drop ref from isolate */
563 }
564 
565 enum page_references {
566 	PAGEREF_RECLAIM,
567 	PAGEREF_RECLAIM_CLEAN,
568 	PAGEREF_KEEP,
569 	PAGEREF_ACTIVATE,
570 };
571 
572 static enum page_references page_check_references(struct page *page,
573 						  struct scan_control *sc)
574 {
575 	int referenced_ptes, referenced_page;
576 	unsigned long vm_flags;
577 
578 	referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
579 	referenced_page = TestClearPageReferenced(page);
580 
581 	/* Lumpy reclaim - ignore references */
582 	if (sc->lumpy_reclaim_mode)
583 		return PAGEREF_RECLAIM;
584 
585 	/*
586 	 * Mlock lost the isolation race with us.  Let try_to_unmap()
587 	 * move the page to the unevictable list.
588 	 */
589 	if (vm_flags & VM_LOCKED)
590 		return PAGEREF_RECLAIM;
591 
592 	if (referenced_ptes) {
593 		if (PageAnon(page))
594 			return PAGEREF_ACTIVATE;
595 		/*
596 		 * All mapped pages start out with page table
597 		 * references from the instantiating fault, so we need
598 		 * to look twice if a mapped file page is used more
599 		 * than once.
600 		 *
601 		 * Mark it and spare it for another trip around the
602 		 * inactive list.  Another page table reference will
603 		 * lead to its activation.
604 		 *
605 		 * Note: the mark is set for activated pages as well
606 		 * so that recently deactivated but used pages are
607 		 * quickly recovered.
608 		 */
609 		SetPageReferenced(page);
610 
611 		if (referenced_page)
612 			return PAGEREF_ACTIVATE;
613 
614 		return PAGEREF_KEEP;
615 	}
616 
617 	/* Reclaim if clean, defer dirty pages to writeback */
618 	if (referenced_page)
619 		return PAGEREF_RECLAIM_CLEAN;
620 
621 	return PAGEREF_RECLAIM;
622 }
623 
624 static noinline_for_stack void free_page_list(struct list_head *free_pages)
625 {
626 	struct pagevec freed_pvec;
627 	struct page *page, *tmp;
628 
629 	pagevec_init(&freed_pvec, 1);
630 
631 	list_for_each_entry_safe(page, tmp, free_pages, lru) {
632 		list_del(&page->lru);
633 		if (!pagevec_add(&freed_pvec, page)) {
634 			__pagevec_free(&freed_pvec);
635 			pagevec_reinit(&freed_pvec);
636 		}
637 	}
638 
639 	pagevec_free(&freed_pvec);
640 }
641 
642 /*
643  * shrink_page_list() returns the number of reclaimed pages
644  */
645 static unsigned long shrink_page_list(struct list_head *page_list,
646 					struct scan_control *sc,
647 					enum pageout_io sync_writeback)
648 {
649 	LIST_HEAD(ret_pages);
650 	LIST_HEAD(free_pages);
651 	int pgactivate = 0;
652 	unsigned long nr_reclaimed = 0;
653 
654 	cond_resched();
655 
656 	while (!list_empty(page_list)) {
657 		enum page_references references;
658 		struct address_space *mapping;
659 		struct page *page;
660 		int may_enter_fs;
661 
662 		cond_resched();
663 
664 		page = lru_to_page(page_list);
665 		list_del(&page->lru);
666 
667 		if (!trylock_page(page))
668 			goto keep;
669 
670 		VM_BUG_ON(PageActive(page));
671 
672 		sc->nr_scanned++;
673 
674 		if (unlikely(!page_evictable(page, NULL)))
675 			goto cull_mlocked;
676 
677 		if (!sc->may_unmap && page_mapped(page))
678 			goto keep_locked;
679 
680 		/* Double the slab pressure for mapped and swapcache pages */
681 		if (page_mapped(page) || PageSwapCache(page))
682 			sc->nr_scanned++;
683 
684 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
685 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
686 
687 		if (PageWriteback(page)) {
688 			/*
689 			 * Synchronous reclaim is performed in two passes,
690 			 * first an asynchronous pass over the list to
691 			 * start parallel writeback, and a second synchronous
692 			 * pass to wait for the IO to complete.  Wait here
693 			 * for any page for which writeback has already
694 			 * started.
695 			 */
696 			if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
697 				wait_on_page_writeback(page);
698 			else
699 				goto keep_locked;
700 		}
701 
702 		references = page_check_references(page, sc);
703 		switch (references) {
704 		case PAGEREF_ACTIVATE:
705 			goto activate_locked;
706 		case PAGEREF_KEEP:
707 			goto keep_locked;
708 		case PAGEREF_RECLAIM:
709 		case PAGEREF_RECLAIM_CLEAN:
710 			; /* try to reclaim the page below */
711 		}
712 
713 		/*
714 		 * Anonymous process memory has backing store?
715 		 * Try to allocate it some swap space here.
716 		 */
717 		if (PageAnon(page) && !PageSwapCache(page)) {
718 			if (!(sc->gfp_mask & __GFP_IO))
719 				goto keep_locked;
720 			if (!add_to_swap(page))
721 				goto activate_locked;
722 			may_enter_fs = 1;
723 		}
724 
725 		mapping = page_mapping(page);
726 
727 		/*
728 		 * The page is mapped into the page tables of one or more
729 		 * processes. Try to unmap it here.
730 		 */
731 		if (page_mapped(page) && mapping) {
732 			switch (try_to_unmap(page, TTU_UNMAP)) {
733 			case SWAP_FAIL:
734 				goto activate_locked;
735 			case SWAP_AGAIN:
736 				goto keep_locked;
737 			case SWAP_MLOCK:
738 				goto cull_mlocked;
739 			case SWAP_SUCCESS:
740 				; /* try to free the page below */
741 			}
742 		}
743 
744 		if (PageDirty(page)) {
745 			if (references == PAGEREF_RECLAIM_CLEAN)
746 				goto keep_locked;
747 			if (!may_enter_fs)
748 				goto keep_locked;
749 			if (!sc->may_writepage)
750 				goto keep_locked;
751 
752 			/* Page is dirty, try to write it out here */
753 			switch (pageout(page, mapping, sync_writeback)) {
754 			case PAGE_KEEP:
755 				goto keep_locked;
756 			case PAGE_ACTIVATE:
757 				goto activate_locked;
758 			case PAGE_SUCCESS:
759 				if (PageWriteback(page) || PageDirty(page))
760 					goto keep;
761 				/*
762 				 * A synchronous write - probably a ramdisk.  Go
763 				 * ahead and try to reclaim the page.
764 				 */
765 				if (!trylock_page(page))
766 					goto keep;
767 				if (PageDirty(page) || PageWriteback(page))
768 					goto keep_locked;
769 				mapping = page_mapping(page);
770 			case PAGE_CLEAN:
771 				; /* try to free the page below */
772 			}
773 		}
774 
775 		/*
776 		 * If the page has buffers, try to free the buffer mappings
777 		 * associated with this page. If we succeed we try to free
778 		 * the page as well.
779 		 *
780 		 * We do this even if the page is PageDirty().
781 		 * try_to_release_page() does not perform I/O, but it is
782 		 * possible for a page to have PageDirty set, but it is actually
783 		 * clean (all its buffers are clean).  This happens if the
784 		 * buffers were written out directly, with submit_bh(). ext3
785 		 * will do this, as well as the blockdev mapping.
786 		 * try_to_release_page() will discover that cleanness and will
787 		 * drop the buffers and mark the page clean - it can be freed.
788 		 *
789 		 * Rarely, pages can have buffers and no ->mapping.  These are
790 		 * the pages which were not successfully invalidated in
791 		 * truncate_complete_page().  We try to drop those buffers here
792 		 * and if that worked, and the page is no longer mapped into
793 		 * process address space (page_count == 1) it can be freed.
794 		 * Otherwise, leave the page on the LRU so it is swappable.
795 		 */
796 		if (page_has_private(page)) {
797 			if (!try_to_release_page(page, sc->gfp_mask))
798 				goto activate_locked;
799 			if (!mapping && page_count(page) == 1) {
800 				unlock_page(page);
801 				if (put_page_testzero(page))
802 					goto free_it;
803 				else {
804 					/*
805 					 * rare race with speculative reference.
806 					 * the speculative reference will free
807 					 * this page shortly, so we may
808 					 * increment nr_reclaimed here (and
809 					 * leave it off the LRU).
810 					 */
811 					nr_reclaimed++;
812 					continue;
813 				}
814 			}
815 		}
816 
817 		if (!mapping || !__remove_mapping(mapping, page))
818 			goto keep_locked;
819 
820 		/*
821 		 * At this point, we have no other references and there is
822 		 * no way to pick any more up (removed from LRU, removed
823 		 * from pagecache). Can use non-atomic bitops now (and
824 		 * we obviously don't have to worry about waking up a process
825 		 * waiting on the page lock, because there are no references.
826 		 */
827 		__clear_page_locked(page);
828 free_it:
829 		nr_reclaimed++;
830 
831 		/*
832 		 * Is there need to periodically free_page_list? It would
833 		 * appear not as the counts should be low
834 		 */
835 		list_add(&page->lru, &free_pages);
836 		continue;
837 
838 cull_mlocked:
839 		if (PageSwapCache(page))
840 			try_to_free_swap(page);
841 		unlock_page(page);
842 		putback_lru_page(page);
843 		continue;
844 
845 activate_locked:
846 		/* Not a candidate for swapping, so reclaim swap space. */
847 		if (PageSwapCache(page) && vm_swap_full())
848 			try_to_free_swap(page);
849 		VM_BUG_ON(PageActive(page));
850 		SetPageActive(page);
851 		pgactivate++;
852 keep_locked:
853 		unlock_page(page);
854 keep:
855 		list_add(&page->lru, &ret_pages);
856 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
857 	}
858 
859 	free_page_list(&free_pages);
860 
861 	list_splice(&ret_pages, page_list);
862 	count_vm_events(PGACTIVATE, pgactivate);
863 	return nr_reclaimed;
864 }
865 
866 /*
867  * Attempt to remove the specified page from its LRU.  Only take this page
868  * if it is of the appropriate PageActive status.  Pages which are being
869  * freed elsewhere are also ignored.
870  *
871  * page:	page to consider
872  * mode:	one of the LRU isolation modes defined above
873  *
874  * returns 0 on success, -ve errno on failure.
875  */
876 int __isolate_lru_page(struct page *page, int mode, int file)
877 {
878 	int ret = -EINVAL;
879 
880 	/* Only take pages on the LRU. */
881 	if (!PageLRU(page))
882 		return ret;
883 
884 	/*
885 	 * When checking the active state, we need to be sure we are
886 	 * dealing with comparible boolean values.  Take the logical not
887 	 * of each.
888 	 */
889 	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
890 		return ret;
891 
892 	if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
893 		return ret;
894 
895 	/*
896 	 * When this function is being called for lumpy reclaim, we
897 	 * initially look into all LRU pages, active, inactive and
898 	 * unevictable; only give shrink_page_list evictable pages.
899 	 */
900 	if (PageUnevictable(page))
901 		return ret;
902 
903 	ret = -EBUSY;
904 
905 	if (likely(get_page_unless_zero(page))) {
906 		/*
907 		 * Be careful not to clear PageLRU until after we're
908 		 * sure the page is not being freed elsewhere -- the
909 		 * page release code relies on it.
910 		 */
911 		ClearPageLRU(page);
912 		ret = 0;
913 	}
914 
915 	return ret;
916 }
917 
918 /*
919  * zone->lru_lock is heavily contended.  Some of the functions that
920  * shrink the lists perform better by taking out a batch of pages
921  * and working on them outside the LRU lock.
922  *
923  * For pagecache intensive workloads, this function is the hottest
924  * spot in the kernel (apart from copy_*_user functions).
925  *
926  * Appropriate locks must be held before calling this function.
927  *
928  * @nr_to_scan:	The number of pages to look through on the list.
929  * @src:	The LRU list to pull pages off.
930  * @dst:	The temp list to put pages on to.
931  * @scanned:	The number of pages that were scanned.
932  * @order:	The caller's attempted allocation order
933  * @mode:	One of the LRU isolation modes
934  * @file:	True [1] if isolating file [!anon] pages
935  *
936  * returns how many pages were moved onto *@dst.
937  */
938 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
939 		struct list_head *src, struct list_head *dst,
940 		unsigned long *scanned, int order, int mode, int file)
941 {
942 	unsigned long nr_taken = 0;
943 	unsigned long nr_lumpy_taken = 0;
944 	unsigned long nr_lumpy_dirty = 0;
945 	unsigned long nr_lumpy_failed = 0;
946 	unsigned long scan;
947 
948 	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
949 		struct page *page;
950 		unsigned long pfn;
951 		unsigned long end_pfn;
952 		unsigned long page_pfn;
953 		int zone_id;
954 
955 		page = lru_to_page(src);
956 		prefetchw_prev_lru_page(page, src, flags);
957 
958 		VM_BUG_ON(!PageLRU(page));
959 
960 		switch (__isolate_lru_page(page, mode, file)) {
961 		case 0:
962 			list_move(&page->lru, dst);
963 			mem_cgroup_del_lru(page);
964 			nr_taken++;
965 			break;
966 
967 		case -EBUSY:
968 			/* else it is being freed elsewhere */
969 			list_move(&page->lru, src);
970 			mem_cgroup_rotate_lru_list(page, page_lru(page));
971 			continue;
972 
973 		default:
974 			BUG();
975 		}
976 
977 		if (!order)
978 			continue;
979 
980 		/*
981 		 * Attempt to take all pages in the order aligned region
982 		 * surrounding the tag page.  Only take those pages of
983 		 * the same active state as that tag page.  We may safely
984 		 * round the target page pfn down to the requested order
985 		 * as the mem_map is guarenteed valid out to MAX_ORDER,
986 		 * where that page is in a different zone we will detect
987 		 * it from its zone id and abort this block scan.
988 		 */
989 		zone_id = page_zone_id(page);
990 		page_pfn = page_to_pfn(page);
991 		pfn = page_pfn & ~((1 << order) - 1);
992 		end_pfn = pfn + (1 << order);
993 		for (; pfn < end_pfn; pfn++) {
994 			struct page *cursor_page;
995 
996 			/* The target page is in the block, ignore it. */
997 			if (unlikely(pfn == page_pfn))
998 				continue;
999 
1000 			/* Avoid holes within the zone. */
1001 			if (unlikely(!pfn_valid_within(pfn)))
1002 				break;
1003 
1004 			cursor_page = pfn_to_page(pfn);
1005 
1006 			/* Check that we have not crossed a zone boundary. */
1007 			if (unlikely(page_zone_id(cursor_page) != zone_id))
1008 				continue;
1009 
1010 			/*
1011 			 * If we don't have enough swap space, reclaiming of
1012 			 * anon page which don't already have a swap slot is
1013 			 * pointless.
1014 			 */
1015 			if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
1016 					!PageSwapCache(cursor_page))
1017 				continue;
1018 
1019 			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1020 				list_move(&cursor_page->lru, dst);
1021 				mem_cgroup_del_lru(cursor_page);
1022 				nr_taken++;
1023 				nr_lumpy_taken++;
1024 				if (PageDirty(cursor_page))
1025 					nr_lumpy_dirty++;
1026 				scan++;
1027 			} else {
1028 				if (mode == ISOLATE_BOTH &&
1029 						page_count(cursor_page))
1030 					nr_lumpy_failed++;
1031 			}
1032 		}
1033 	}
1034 
1035 	*scanned = scan;
1036 
1037 	trace_mm_vmscan_lru_isolate(order,
1038 			nr_to_scan, scan,
1039 			nr_taken,
1040 			nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1041 			mode);
1042 	return nr_taken;
1043 }
1044 
1045 static unsigned long isolate_pages_global(unsigned long nr,
1046 					struct list_head *dst,
1047 					unsigned long *scanned, int order,
1048 					int mode, struct zone *z,
1049 					int active, int file)
1050 {
1051 	int lru = LRU_BASE;
1052 	if (active)
1053 		lru += LRU_ACTIVE;
1054 	if (file)
1055 		lru += LRU_FILE;
1056 	return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
1057 								mode, file);
1058 }
1059 
1060 /*
1061  * clear_active_flags() is a helper for shrink_active_list(), clearing
1062  * any active bits from the pages in the list.
1063  */
1064 static unsigned long clear_active_flags(struct list_head *page_list,
1065 					unsigned int *count)
1066 {
1067 	int nr_active = 0;
1068 	int lru;
1069 	struct page *page;
1070 
1071 	list_for_each_entry(page, page_list, lru) {
1072 		lru = page_lru_base_type(page);
1073 		if (PageActive(page)) {
1074 			lru += LRU_ACTIVE;
1075 			ClearPageActive(page);
1076 			nr_active++;
1077 		}
1078 		if (count)
1079 			count[lru]++;
1080 	}
1081 
1082 	return nr_active;
1083 }
1084 
1085 /**
1086  * isolate_lru_page - tries to isolate a page from its LRU list
1087  * @page: page to isolate from its LRU list
1088  *
1089  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1090  * vmstat statistic corresponding to whatever LRU list the page was on.
1091  *
1092  * Returns 0 if the page was removed from an LRU list.
1093  * Returns -EBUSY if the page was not on an LRU list.
1094  *
1095  * The returned page will have PageLRU() cleared.  If it was found on
1096  * the active list, it will have PageActive set.  If it was found on
1097  * the unevictable list, it will have the PageUnevictable bit set. That flag
1098  * may need to be cleared by the caller before letting the page go.
1099  *
1100  * The vmstat statistic corresponding to the list on which the page was
1101  * found will be decremented.
1102  *
1103  * Restrictions:
1104  * (1) Must be called with an elevated refcount on the page. This is a
1105  *     fundamentnal difference from isolate_lru_pages (which is called
1106  *     without a stable reference).
1107  * (2) the lru_lock must not be held.
1108  * (3) interrupts must be enabled.
1109  */
1110 int isolate_lru_page(struct page *page)
1111 {
1112 	int ret = -EBUSY;
1113 
1114 	if (PageLRU(page)) {
1115 		struct zone *zone = page_zone(page);
1116 
1117 		spin_lock_irq(&zone->lru_lock);
1118 		if (PageLRU(page) && get_page_unless_zero(page)) {
1119 			int lru = page_lru(page);
1120 			ret = 0;
1121 			ClearPageLRU(page);
1122 
1123 			del_page_from_lru_list(zone, page, lru);
1124 		}
1125 		spin_unlock_irq(&zone->lru_lock);
1126 	}
1127 	return ret;
1128 }
1129 
1130 /*
1131  * Are there way too many processes in the direct reclaim path already?
1132  */
1133 static int too_many_isolated(struct zone *zone, int file,
1134 		struct scan_control *sc)
1135 {
1136 	unsigned long inactive, isolated;
1137 
1138 	if (current_is_kswapd())
1139 		return 0;
1140 
1141 	if (!scanning_global_lru(sc))
1142 		return 0;
1143 
1144 	if (file) {
1145 		inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1146 		isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1147 	} else {
1148 		inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1149 		isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1150 	}
1151 
1152 	return isolated > inactive;
1153 }
1154 
1155 /*
1156  * TODO: Try merging with migrations version of putback_lru_pages
1157  */
1158 static noinline_for_stack void
1159 putback_lru_pages(struct zone *zone, struct scan_control *sc,
1160 				unsigned long nr_anon, unsigned long nr_file,
1161 				struct list_head *page_list)
1162 {
1163 	struct page *page;
1164 	struct pagevec pvec;
1165 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1166 
1167 	pagevec_init(&pvec, 1);
1168 
1169 	/*
1170 	 * Put back any unfreeable pages.
1171 	 */
1172 	spin_lock(&zone->lru_lock);
1173 	while (!list_empty(page_list)) {
1174 		int lru;
1175 		page = lru_to_page(page_list);
1176 		VM_BUG_ON(PageLRU(page));
1177 		list_del(&page->lru);
1178 		if (unlikely(!page_evictable(page, NULL))) {
1179 			spin_unlock_irq(&zone->lru_lock);
1180 			putback_lru_page(page);
1181 			spin_lock_irq(&zone->lru_lock);
1182 			continue;
1183 		}
1184 		SetPageLRU(page);
1185 		lru = page_lru(page);
1186 		add_page_to_lru_list(zone, page, lru);
1187 		if (is_active_lru(lru)) {
1188 			int file = is_file_lru(lru);
1189 			reclaim_stat->recent_rotated[file]++;
1190 		}
1191 		if (!pagevec_add(&pvec, page)) {
1192 			spin_unlock_irq(&zone->lru_lock);
1193 			__pagevec_release(&pvec);
1194 			spin_lock_irq(&zone->lru_lock);
1195 		}
1196 	}
1197 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1198 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1199 
1200 	spin_unlock_irq(&zone->lru_lock);
1201 	pagevec_release(&pvec);
1202 }
1203 
1204 static noinline_for_stack void update_isolated_counts(struct zone *zone,
1205 					struct scan_control *sc,
1206 					unsigned long *nr_anon,
1207 					unsigned long *nr_file,
1208 					struct list_head *isolated_list)
1209 {
1210 	unsigned long nr_active;
1211 	unsigned int count[NR_LRU_LISTS] = { 0, };
1212 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1213 
1214 	nr_active = clear_active_flags(isolated_list, count);
1215 	__count_vm_events(PGDEACTIVATE, nr_active);
1216 
1217 	__mod_zone_page_state(zone, NR_ACTIVE_FILE,
1218 			      -count[LRU_ACTIVE_FILE]);
1219 	__mod_zone_page_state(zone, NR_INACTIVE_FILE,
1220 			      -count[LRU_INACTIVE_FILE]);
1221 	__mod_zone_page_state(zone, NR_ACTIVE_ANON,
1222 			      -count[LRU_ACTIVE_ANON]);
1223 	__mod_zone_page_state(zone, NR_INACTIVE_ANON,
1224 			      -count[LRU_INACTIVE_ANON]);
1225 
1226 	*nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1227 	*nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1228 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1229 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1230 
1231 	reclaim_stat->recent_scanned[0] += *nr_anon;
1232 	reclaim_stat->recent_scanned[1] += *nr_file;
1233 }
1234 
1235 /*
1236  * Returns true if the caller should wait to clean dirty/writeback pages.
1237  *
1238  * If we are direct reclaiming for contiguous pages and we do not reclaim
1239  * everything in the list, try again and wait for writeback IO to complete.
1240  * This will stall high-order allocations noticeably. Only do that when really
1241  * need to free the pages under high memory pressure.
1242  */
1243 static inline bool should_reclaim_stall(unsigned long nr_taken,
1244 					unsigned long nr_freed,
1245 					int priority,
1246 					struct scan_control *sc)
1247 {
1248 	int lumpy_stall_priority;
1249 
1250 	/* kswapd should not stall on sync IO */
1251 	if (current_is_kswapd())
1252 		return false;
1253 
1254 	/* Only stall on lumpy reclaim */
1255 	if (!sc->lumpy_reclaim_mode)
1256 		return false;
1257 
1258 	/* If we have relaimed everything on the isolated list, no stall */
1259 	if (nr_freed == nr_taken)
1260 		return false;
1261 
1262 	/*
1263 	 * For high-order allocations, there are two stall thresholds.
1264 	 * High-cost allocations stall immediately where as lower
1265 	 * order allocations such as stacks require the scanning
1266 	 * priority to be much higher before stalling.
1267 	 */
1268 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1269 		lumpy_stall_priority = DEF_PRIORITY;
1270 	else
1271 		lumpy_stall_priority = DEF_PRIORITY / 3;
1272 
1273 	return priority <= lumpy_stall_priority;
1274 }
1275 
1276 /*
1277  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1278  * of reclaimed pages
1279  */
1280 static noinline_for_stack unsigned long
1281 shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1282 			struct scan_control *sc, int priority, int file)
1283 {
1284 	LIST_HEAD(page_list);
1285 	unsigned long nr_scanned;
1286 	unsigned long nr_reclaimed = 0;
1287 	unsigned long nr_taken;
1288 	unsigned long nr_active;
1289 	unsigned long nr_anon;
1290 	unsigned long nr_file;
1291 
1292 	while (unlikely(too_many_isolated(zone, file, sc))) {
1293 		congestion_wait(BLK_RW_ASYNC, HZ/10);
1294 
1295 		/* We are about to die and free our memory. Return now. */
1296 		if (fatal_signal_pending(current))
1297 			return SWAP_CLUSTER_MAX;
1298 	}
1299 
1300 
1301 	lru_add_drain();
1302 	spin_lock_irq(&zone->lru_lock);
1303 
1304 	if (scanning_global_lru(sc)) {
1305 		nr_taken = isolate_pages_global(nr_to_scan,
1306 			&page_list, &nr_scanned, sc->order,
1307 			sc->lumpy_reclaim_mode ?
1308 				ISOLATE_BOTH : ISOLATE_INACTIVE,
1309 			zone, 0, file);
1310 		zone->pages_scanned += nr_scanned;
1311 		if (current_is_kswapd())
1312 			__count_zone_vm_events(PGSCAN_KSWAPD, zone,
1313 					       nr_scanned);
1314 		else
1315 			__count_zone_vm_events(PGSCAN_DIRECT, zone,
1316 					       nr_scanned);
1317 	} else {
1318 		nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1319 			&page_list, &nr_scanned, sc->order,
1320 			sc->lumpy_reclaim_mode ?
1321 				ISOLATE_BOTH : ISOLATE_INACTIVE,
1322 			zone, sc->mem_cgroup,
1323 			0, file);
1324 		/*
1325 		 * mem_cgroup_isolate_pages() keeps track of
1326 		 * scanned pages on its own.
1327 		 */
1328 	}
1329 
1330 	if (nr_taken == 0) {
1331 		spin_unlock_irq(&zone->lru_lock);
1332 		return 0;
1333 	}
1334 
1335 	update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1336 
1337 	spin_unlock_irq(&zone->lru_lock);
1338 
1339 	nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1340 
1341 	/* Check if we should syncronously wait for writeback */
1342 	if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1343 		/*
1344 		 * The attempt at page out may have made some
1345 		 * of the pages active, mark them inactive again.
1346 		 */
1347 		nr_active = clear_active_flags(&page_list, NULL);
1348 		count_vm_events(PGDEACTIVATE, nr_active);
1349 
1350 		nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
1351 	}
1352 
1353 	local_irq_disable();
1354 	if (current_is_kswapd())
1355 		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1356 	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1357 
1358 	putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
1359 
1360 	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1361 		zone_idx(zone),
1362 		nr_scanned, nr_reclaimed,
1363 		priority,
1364 		trace_shrink_flags(file, sc->lumpy_reclaim_mode));
1365 	return nr_reclaimed;
1366 }
1367 
1368 /*
1369  * This moves pages from the active list to the inactive list.
1370  *
1371  * We move them the other way if the page is referenced by one or more
1372  * processes, from rmap.
1373  *
1374  * If the pages are mostly unmapped, the processing is fast and it is
1375  * appropriate to hold zone->lru_lock across the whole operation.  But if
1376  * the pages are mapped, the processing is slow (page_referenced()) so we
1377  * should drop zone->lru_lock around each page.  It's impossible to balance
1378  * this, so instead we remove the pages from the LRU while processing them.
1379  * It is safe to rely on PG_active against the non-LRU pages in here because
1380  * nobody will play with that bit on a non-LRU page.
1381  *
1382  * The downside is that we have to touch page->_count against each page.
1383  * But we had to alter page->flags anyway.
1384  */
1385 
1386 static void move_active_pages_to_lru(struct zone *zone,
1387 				     struct list_head *list,
1388 				     enum lru_list lru)
1389 {
1390 	unsigned long pgmoved = 0;
1391 	struct pagevec pvec;
1392 	struct page *page;
1393 
1394 	pagevec_init(&pvec, 1);
1395 
1396 	while (!list_empty(list)) {
1397 		page = lru_to_page(list);
1398 
1399 		VM_BUG_ON(PageLRU(page));
1400 		SetPageLRU(page);
1401 
1402 		list_move(&page->lru, &zone->lru[lru].list);
1403 		mem_cgroup_add_lru_list(page, lru);
1404 		pgmoved++;
1405 
1406 		if (!pagevec_add(&pvec, page) || list_empty(list)) {
1407 			spin_unlock_irq(&zone->lru_lock);
1408 			if (buffer_heads_over_limit)
1409 				pagevec_strip(&pvec);
1410 			__pagevec_release(&pvec);
1411 			spin_lock_irq(&zone->lru_lock);
1412 		}
1413 	}
1414 	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1415 	if (!is_active_lru(lru))
1416 		__count_vm_events(PGDEACTIVATE, pgmoved);
1417 }
1418 
1419 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1420 			struct scan_control *sc, int priority, int file)
1421 {
1422 	unsigned long nr_taken;
1423 	unsigned long pgscanned;
1424 	unsigned long vm_flags;
1425 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
1426 	LIST_HEAD(l_active);
1427 	LIST_HEAD(l_inactive);
1428 	struct page *page;
1429 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1430 	unsigned long nr_rotated = 0;
1431 
1432 	lru_add_drain();
1433 	spin_lock_irq(&zone->lru_lock);
1434 	if (scanning_global_lru(sc)) {
1435 		nr_taken = isolate_pages_global(nr_pages, &l_hold,
1436 						&pgscanned, sc->order,
1437 						ISOLATE_ACTIVE, zone,
1438 						1, file);
1439 		zone->pages_scanned += pgscanned;
1440 	} else {
1441 		nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1442 						&pgscanned, sc->order,
1443 						ISOLATE_ACTIVE, zone,
1444 						sc->mem_cgroup, 1, file);
1445 		/*
1446 		 * mem_cgroup_isolate_pages() keeps track of
1447 		 * scanned pages on its own.
1448 		 */
1449 	}
1450 
1451 	reclaim_stat->recent_scanned[file] += nr_taken;
1452 
1453 	__count_zone_vm_events(PGREFILL, zone, pgscanned);
1454 	if (file)
1455 		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1456 	else
1457 		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1458 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1459 	spin_unlock_irq(&zone->lru_lock);
1460 
1461 	while (!list_empty(&l_hold)) {
1462 		cond_resched();
1463 		page = lru_to_page(&l_hold);
1464 		list_del(&page->lru);
1465 
1466 		if (unlikely(!page_evictable(page, NULL))) {
1467 			putback_lru_page(page);
1468 			continue;
1469 		}
1470 
1471 		if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1472 			nr_rotated++;
1473 			/*
1474 			 * Identify referenced, file-backed active pages and
1475 			 * give them one more trip around the active list. So
1476 			 * that executable code get better chances to stay in
1477 			 * memory under moderate memory pressure.  Anon pages
1478 			 * are not likely to be evicted by use-once streaming
1479 			 * IO, plus JVM can create lots of anon VM_EXEC pages,
1480 			 * so we ignore them here.
1481 			 */
1482 			if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1483 				list_add(&page->lru, &l_active);
1484 				continue;
1485 			}
1486 		}
1487 
1488 		ClearPageActive(page);	/* we are de-activating */
1489 		list_add(&page->lru, &l_inactive);
1490 	}
1491 
1492 	/*
1493 	 * Move pages back to the lru list.
1494 	 */
1495 	spin_lock_irq(&zone->lru_lock);
1496 	/*
1497 	 * Count referenced pages from currently used mappings as rotated,
1498 	 * even though only some of them are actually re-activated.  This
1499 	 * helps balance scan pressure between file and anonymous pages in
1500 	 * get_scan_ratio.
1501 	 */
1502 	reclaim_stat->recent_rotated[file] += nr_rotated;
1503 
1504 	move_active_pages_to_lru(zone, &l_active,
1505 						LRU_ACTIVE + file * LRU_FILE);
1506 	move_active_pages_to_lru(zone, &l_inactive,
1507 						LRU_BASE   + file * LRU_FILE);
1508 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1509 	spin_unlock_irq(&zone->lru_lock);
1510 }
1511 
1512 #ifdef CONFIG_SWAP
1513 static int inactive_anon_is_low_global(struct zone *zone)
1514 {
1515 	unsigned long active, inactive;
1516 
1517 	active = zone_page_state(zone, NR_ACTIVE_ANON);
1518 	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1519 
1520 	if (inactive * zone->inactive_ratio < active)
1521 		return 1;
1522 
1523 	return 0;
1524 }
1525 
1526 /**
1527  * inactive_anon_is_low - check if anonymous pages need to be deactivated
1528  * @zone: zone to check
1529  * @sc:   scan control of this context
1530  *
1531  * Returns true if the zone does not have enough inactive anon pages,
1532  * meaning some active anon pages need to be deactivated.
1533  */
1534 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1535 {
1536 	int low;
1537 
1538 	/*
1539 	 * If we don't have swap space, anonymous page deactivation
1540 	 * is pointless.
1541 	 */
1542 	if (!total_swap_pages)
1543 		return 0;
1544 
1545 	if (scanning_global_lru(sc))
1546 		low = inactive_anon_is_low_global(zone);
1547 	else
1548 		low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1549 	return low;
1550 }
1551 #else
1552 static inline int inactive_anon_is_low(struct zone *zone,
1553 					struct scan_control *sc)
1554 {
1555 	return 0;
1556 }
1557 #endif
1558 
1559 static int inactive_file_is_low_global(struct zone *zone)
1560 {
1561 	unsigned long active, inactive;
1562 
1563 	active = zone_page_state(zone, NR_ACTIVE_FILE);
1564 	inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1565 
1566 	return (active > inactive);
1567 }
1568 
1569 /**
1570  * inactive_file_is_low - check if file pages need to be deactivated
1571  * @zone: zone to check
1572  * @sc:   scan control of this context
1573  *
1574  * When the system is doing streaming IO, memory pressure here
1575  * ensures that active file pages get deactivated, until more
1576  * than half of the file pages are on the inactive list.
1577  *
1578  * Once we get to that situation, protect the system's working
1579  * set from being evicted by disabling active file page aging.
1580  *
1581  * This uses a different ratio than the anonymous pages, because
1582  * the page cache uses a use-once replacement algorithm.
1583  */
1584 static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1585 {
1586 	int low;
1587 
1588 	if (scanning_global_lru(sc))
1589 		low = inactive_file_is_low_global(zone);
1590 	else
1591 		low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1592 	return low;
1593 }
1594 
1595 static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1596 				int file)
1597 {
1598 	if (file)
1599 		return inactive_file_is_low(zone, sc);
1600 	else
1601 		return inactive_anon_is_low(zone, sc);
1602 }
1603 
1604 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1605 	struct zone *zone, struct scan_control *sc, int priority)
1606 {
1607 	int file = is_file_lru(lru);
1608 
1609 	if (is_active_lru(lru)) {
1610 		if (inactive_list_is_low(zone, sc, file))
1611 		    shrink_active_list(nr_to_scan, zone, sc, priority, file);
1612 		return 0;
1613 	}
1614 
1615 	return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1616 }
1617 
1618 /*
1619  * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1620  * until we collected @swap_cluster_max pages to scan.
1621  */
1622 static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1623 				       unsigned long *nr_saved_scan)
1624 {
1625 	unsigned long nr;
1626 
1627 	*nr_saved_scan += nr_to_scan;
1628 	nr = *nr_saved_scan;
1629 
1630 	if (nr >= SWAP_CLUSTER_MAX)
1631 		*nr_saved_scan = 0;
1632 	else
1633 		nr = 0;
1634 
1635 	return nr;
1636 }
1637 
1638 /*
1639  * Determine how aggressively the anon and file LRU lists should be
1640  * scanned.  The relative value of each set of LRU lists is determined
1641  * by looking at the fraction of the pages scanned we did rotate back
1642  * onto the active list instead of evict.
1643  *
1644  * nr[0] = anon pages to scan; nr[1] = file pages to scan
1645  */
1646 static void get_scan_count(struct zone *zone, struct scan_control *sc,
1647 					unsigned long *nr, int priority)
1648 {
1649 	unsigned long anon, file, free;
1650 	unsigned long anon_prio, file_prio;
1651 	unsigned long ap, fp;
1652 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1653 	u64 fraction[2], denominator;
1654 	enum lru_list l;
1655 	int noswap = 0;
1656 
1657 	/* If we have no swap space, do not bother scanning anon pages. */
1658 	if (!sc->may_swap || (nr_swap_pages <= 0)) {
1659 		noswap = 1;
1660 		fraction[0] = 0;
1661 		fraction[1] = 1;
1662 		denominator = 1;
1663 		goto out;
1664 	}
1665 
1666 	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1667 		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1668 	file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1669 		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1670 
1671 	if (scanning_global_lru(sc)) {
1672 		free  = zone_page_state(zone, NR_FREE_PAGES);
1673 		/* If we have very few page cache pages,
1674 		   force-scan anon pages. */
1675 		if (unlikely(file + free <= high_wmark_pages(zone))) {
1676 			fraction[0] = 1;
1677 			fraction[1] = 0;
1678 			denominator = 1;
1679 			goto out;
1680 		}
1681 	}
1682 
1683 	/*
1684 	 * With swappiness at 100, anonymous and file have the same priority.
1685 	 * This scanning priority is essentially the inverse of IO cost.
1686 	 */
1687 	anon_prio = sc->swappiness;
1688 	file_prio = 200 - sc->swappiness;
1689 
1690 	/*
1691 	 * OK, so we have swap space and a fair amount of page cache
1692 	 * pages.  We use the recently rotated / recently scanned
1693 	 * ratios to determine how valuable each cache is.
1694 	 *
1695 	 * Because workloads change over time (and to avoid overflow)
1696 	 * we keep these statistics as a floating average, which ends
1697 	 * up weighing recent references more than old ones.
1698 	 *
1699 	 * anon in [0], file in [1]
1700 	 */
1701 	spin_lock_irq(&zone->lru_lock);
1702 	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1703 		reclaim_stat->recent_scanned[0] /= 2;
1704 		reclaim_stat->recent_rotated[0] /= 2;
1705 	}
1706 
1707 	if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1708 		reclaim_stat->recent_scanned[1] /= 2;
1709 		reclaim_stat->recent_rotated[1] /= 2;
1710 	}
1711 
1712 	/*
1713 	 * The amount of pressure on anon vs file pages is inversely
1714 	 * proportional to the fraction of recently scanned pages on
1715 	 * each list that were recently referenced and in active use.
1716 	 */
1717 	ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1718 	ap /= reclaim_stat->recent_rotated[0] + 1;
1719 
1720 	fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1721 	fp /= reclaim_stat->recent_rotated[1] + 1;
1722 	spin_unlock_irq(&zone->lru_lock);
1723 
1724 	fraction[0] = ap;
1725 	fraction[1] = fp;
1726 	denominator = ap + fp + 1;
1727 out:
1728 	for_each_evictable_lru(l) {
1729 		int file = is_file_lru(l);
1730 		unsigned long scan;
1731 
1732 		scan = zone_nr_lru_pages(zone, sc, l);
1733 		if (priority || noswap) {
1734 			scan >>= priority;
1735 			scan = div64_u64(scan * fraction[file], denominator);
1736 		}
1737 		nr[l] = nr_scan_try_batch(scan,
1738 					  &reclaim_stat->nr_saved_scan[l]);
1739 	}
1740 }
1741 
1742 static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
1743 {
1744 	/*
1745 	 * If we need a large contiguous chunk of memory, or have
1746 	 * trouble getting a small set of contiguous pages, we
1747 	 * will reclaim both active and inactive pages.
1748 	 */
1749 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1750 		sc->lumpy_reclaim_mode = 1;
1751 	else if (sc->order && priority < DEF_PRIORITY - 2)
1752 		sc->lumpy_reclaim_mode = 1;
1753 	else
1754 		sc->lumpy_reclaim_mode = 0;
1755 }
1756 
1757 /*
1758  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1759  */
1760 static void shrink_zone(int priority, struct zone *zone,
1761 				struct scan_control *sc)
1762 {
1763 	unsigned long nr[NR_LRU_LISTS];
1764 	unsigned long nr_to_scan;
1765 	enum lru_list l;
1766 	unsigned long nr_reclaimed = sc->nr_reclaimed;
1767 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1768 
1769 	get_scan_count(zone, sc, nr, priority);
1770 
1771 	set_lumpy_reclaim_mode(priority, sc);
1772 
1773 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1774 					nr[LRU_INACTIVE_FILE]) {
1775 		for_each_evictable_lru(l) {
1776 			if (nr[l]) {
1777 				nr_to_scan = min_t(unsigned long,
1778 						   nr[l], SWAP_CLUSTER_MAX);
1779 				nr[l] -= nr_to_scan;
1780 
1781 				nr_reclaimed += shrink_list(l, nr_to_scan,
1782 							    zone, sc, priority);
1783 			}
1784 		}
1785 		/*
1786 		 * On large memory systems, scan >> priority can become
1787 		 * really large. This is fine for the starting priority;
1788 		 * we want to put equal scanning pressure on each zone.
1789 		 * However, if the VM has a harder time of freeing pages,
1790 		 * with multiple processes reclaiming pages, the total
1791 		 * freeing target can get unreasonably large.
1792 		 */
1793 		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
1794 			break;
1795 	}
1796 
1797 	sc->nr_reclaimed = nr_reclaimed;
1798 
1799 	/*
1800 	 * Even if we did not try to evict anon pages at all, we want to
1801 	 * rebalance the anon lru active/inactive ratio.
1802 	 */
1803 	if (inactive_anon_is_low(zone, sc))
1804 		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1805 
1806 	throttle_vm_writeout(sc->gfp_mask);
1807 }
1808 
1809 /*
1810  * This is the direct reclaim path, for page-allocating processes.  We only
1811  * try to reclaim pages from zones which will satisfy the caller's allocation
1812  * request.
1813  *
1814  * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1815  * Because:
1816  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1817  *    allocation or
1818  * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1819  *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1820  *    zone defense algorithm.
1821  *
1822  * If a zone is deemed to be full of pinned pages then just give it a light
1823  * scan then give up on it.
1824  */
1825 static void shrink_zones(int priority, struct zonelist *zonelist,
1826 					struct scan_control *sc)
1827 {
1828 	struct zoneref *z;
1829 	struct zone *zone;
1830 
1831 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1832 					gfp_zone(sc->gfp_mask), sc->nodemask) {
1833 		if (!populated_zone(zone))
1834 			continue;
1835 		/*
1836 		 * Take care memory controller reclaiming has small influence
1837 		 * to global LRU.
1838 		 */
1839 		if (scanning_global_lru(sc)) {
1840 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1841 				continue;
1842 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1843 				continue;	/* Let kswapd poll it */
1844 		}
1845 
1846 		shrink_zone(priority, zone, sc);
1847 	}
1848 }
1849 
1850 static bool zone_reclaimable(struct zone *zone)
1851 {
1852 	return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1853 }
1854 
1855 /*
1856  * As hibernation is going on, kswapd is freezed so that it can't mark
1857  * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1858  * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1859  */
1860 static bool all_unreclaimable(struct zonelist *zonelist,
1861 		struct scan_control *sc)
1862 {
1863 	struct zoneref *z;
1864 	struct zone *zone;
1865 	bool all_unreclaimable = true;
1866 
1867 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1868 			gfp_zone(sc->gfp_mask), sc->nodemask) {
1869 		if (!populated_zone(zone))
1870 			continue;
1871 		if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1872 			continue;
1873 		if (zone_reclaimable(zone)) {
1874 			all_unreclaimable = false;
1875 			break;
1876 		}
1877 	}
1878 
1879 	return all_unreclaimable;
1880 }
1881 
1882 /*
1883  * This is the main entry point to direct page reclaim.
1884  *
1885  * If a full scan of the inactive list fails to free enough memory then we
1886  * are "out of memory" and something needs to be killed.
1887  *
1888  * If the caller is !__GFP_FS then the probability of a failure is reasonably
1889  * high - the zone may be full of dirty or under-writeback pages, which this
1890  * caller can't do much about.  We kick the writeback threads and take explicit
1891  * naps in the hope that some of these pages can be written.  But if the
1892  * allocating task holds filesystem locks which prevent writeout this might not
1893  * work, and the allocation attempt will fail.
1894  *
1895  * returns:	0, if no pages reclaimed
1896  * 		else, the number of pages reclaimed
1897  */
1898 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1899 					struct scan_control *sc)
1900 {
1901 	int priority;
1902 	unsigned long total_scanned = 0;
1903 	struct reclaim_state *reclaim_state = current->reclaim_state;
1904 	struct zoneref *z;
1905 	struct zone *zone;
1906 	unsigned long writeback_threshold;
1907 
1908 	get_mems_allowed();
1909 	delayacct_freepages_start();
1910 
1911 	if (scanning_global_lru(sc))
1912 		count_vm_event(ALLOCSTALL);
1913 
1914 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1915 		sc->nr_scanned = 0;
1916 		if (!priority)
1917 			disable_swap_token();
1918 		shrink_zones(priority, zonelist, sc);
1919 		/*
1920 		 * Don't shrink slabs when reclaiming memory from
1921 		 * over limit cgroups
1922 		 */
1923 		if (scanning_global_lru(sc)) {
1924 			unsigned long lru_pages = 0;
1925 			for_each_zone_zonelist(zone, z, zonelist,
1926 					gfp_zone(sc->gfp_mask)) {
1927 				if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1928 					continue;
1929 
1930 				lru_pages += zone_reclaimable_pages(zone);
1931 			}
1932 
1933 			shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1934 			if (reclaim_state) {
1935 				sc->nr_reclaimed += reclaim_state->reclaimed_slab;
1936 				reclaim_state->reclaimed_slab = 0;
1937 			}
1938 		}
1939 		total_scanned += sc->nr_scanned;
1940 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
1941 			goto out;
1942 
1943 		/*
1944 		 * Try to write back as many pages as we just scanned.  This
1945 		 * tends to cause slow streaming writers to write data to the
1946 		 * disk smoothly, at the dirtying rate, which is nice.   But
1947 		 * that's undesirable in laptop mode, where we *want* lumpy
1948 		 * writeout.  So in laptop mode, write out the whole world.
1949 		 */
1950 		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
1951 		if (total_scanned > writeback_threshold) {
1952 			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
1953 			sc->may_writepage = 1;
1954 		}
1955 
1956 		/* Take a nap, wait for some writeback to complete */
1957 		if (!sc->hibernation_mode && sc->nr_scanned &&
1958 		    priority < DEF_PRIORITY - 2)
1959 			congestion_wait(BLK_RW_ASYNC, HZ/10);
1960 	}
1961 
1962 out:
1963 	delayacct_freepages_end();
1964 	put_mems_allowed();
1965 
1966 	if (sc->nr_reclaimed)
1967 		return sc->nr_reclaimed;
1968 
1969 	/* top priority shrink_zones still had more to do? don't OOM, then */
1970 	if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
1971 		return 1;
1972 
1973 	return 0;
1974 }
1975 
1976 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1977 				gfp_t gfp_mask, nodemask_t *nodemask)
1978 {
1979 	unsigned long nr_reclaimed;
1980 	struct scan_control sc = {
1981 		.gfp_mask = gfp_mask,
1982 		.may_writepage = !laptop_mode,
1983 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
1984 		.may_unmap = 1,
1985 		.may_swap = 1,
1986 		.swappiness = vm_swappiness,
1987 		.order = order,
1988 		.mem_cgroup = NULL,
1989 		.nodemask = nodemask,
1990 	};
1991 
1992 	trace_mm_vmscan_direct_reclaim_begin(order,
1993 				sc.may_writepage,
1994 				gfp_mask);
1995 
1996 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
1997 
1998 	trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
1999 
2000 	return nr_reclaimed;
2001 }
2002 
2003 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2004 
2005 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2006 						gfp_t gfp_mask, bool noswap,
2007 						unsigned int swappiness,
2008 						struct zone *zone)
2009 {
2010 	struct scan_control sc = {
2011 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2012 		.may_writepage = !laptop_mode,
2013 		.may_unmap = 1,
2014 		.may_swap = !noswap,
2015 		.swappiness = swappiness,
2016 		.order = 0,
2017 		.mem_cgroup = mem,
2018 	};
2019 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2020 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2021 
2022 	trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2023 						      sc.may_writepage,
2024 						      sc.gfp_mask);
2025 
2026 	/*
2027 	 * NOTE: Although we can get the priority field, using it
2028 	 * here is not a good idea, since it limits the pages we can scan.
2029 	 * if we don't reclaim here, the shrink_zone from balance_pgdat
2030 	 * will pick up pages from other mem cgroup's as well. We hack
2031 	 * the priority and make it zero.
2032 	 */
2033 	shrink_zone(0, zone, &sc);
2034 
2035 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2036 
2037 	return sc.nr_reclaimed;
2038 }
2039 
2040 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2041 					   gfp_t gfp_mask,
2042 					   bool noswap,
2043 					   unsigned int swappiness)
2044 {
2045 	struct zonelist *zonelist;
2046 	unsigned long nr_reclaimed;
2047 	struct scan_control sc = {
2048 		.may_writepage = !laptop_mode,
2049 		.may_unmap = 1,
2050 		.may_swap = !noswap,
2051 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2052 		.swappiness = swappiness,
2053 		.order = 0,
2054 		.mem_cgroup = mem_cont,
2055 		.nodemask = NULL, /* we don't care the placement */
2056 	};
2057 
2058 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2059 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2060 	zonelist = NODE_DATA(numa_node_id())->node_zonelists;
2061 
2062 	trace_mm_vmscan_memcg_reclaim_begin(0,
2063 					    sc.may_writepage,
2064 					    sc.gfp_mask);
2065 
2066 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2067 
2068 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2069 
2070 	return nr_reclaimed;
2071 }
2072 #endif
2073 
2074 /* is kswapd sleeping prematurely? */
2075 static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
2076 {
2077 	int i;
2078 
2079 	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2080 	if (remaining)
2081 		return 1;
2082 
2083 	/* If after HZ/10, a zone is below the high mark, it's premature */
2084 	for (i = 0; i < pgdat->nr_zones; i++) {
2085 		struct zone *zone = pgdat->node_zones + i;
2086 
2087 		if (!populated_zone(zone))
2088 			continue;
2089 
2090 		if (zone->all_unreclaimable)
2091 			continue;
2092 
2093 		if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
2094 								0, 0))
2095 			return 1;
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 /*
2102  * For kswapd, balance_pgdat() will work across all this node's zones until
2103  * they are all at high_wmark_pages(zone).
2104  *
2105  * Returns the number of pages which were actually freed.
2106  *
2107  * There is special handling here for zones which are full of pinned pages.
2108  * This can happen if the pages are all mlocked, or if they are all used by
2109  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
2110  * What we do is to detect the case where all pages in the zone have been
2111  * scanned twice and there has been zero successful reclaim.  Mark the zone as
2112  * dead and from now on, only perform a short scan.  Basically we're polling
2113  * the zone for when the problem goes away.
2114  *
2115  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
2116  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2117  * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2118  * lower zones regardless of the number of free pages in the lower zones. This
2119  * interoperates with the page allocator fallback scheme to ensure that aging
2120  * of pages is balanced across the zones.
2121  */
2122 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
2123 {
2124 	int all_zones_ok;
2125 	int priority;
2126 	int i;
2127 	unsigned long total_scanned;
2128 	struct reclaim_state *reclaim_state = current->reclaim_state;
2129 	struct scan_control sc = {
2130 		.gfp_mask = GFP_KERNEL,
2131 		.may_unmap = 1,
2132 		.may_swap = 1,
2133 		/*
2134 		 * kswapd doesn't want to be bailed out while reclaim. because
2135 		 * we want to put equal scanning pressure on each zone.
2136 		 */
2137 		.nr_to_reclaim = ULONG_MAX,
2138 		.swappiness = vm_swappiness,
2139 		.order = order,
2140 		.mem_cgroup = NULL,
2141 	};
2142 loop_again:
2143 	total_scanned = 0;
2144 	sc.nr_reclaimed = 0;
2145 	sc.may_writepage = !laptop_mode;
2146 	count_vm_event(PAGEOUTRUN);
2147 
2148 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2149 		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
2150 		unsigned long lru_pages = 0;
2151 		int has_under_min_watermark_zone = 0;
2152 
2153 		/* The swap token gets in the way of swapout... */
2154 		if (!priority)
2155 			disable_swap_token();
2156 
2157 		all_zones_ok = 1;
2158 
2159 		/*
2160 		 * Scan in the highmem->dma direction for the highest
2161 		 * zone which needs scanning
2162 		 */
2163 		for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2164 			struct zone *zone = pgdat->node_zones + i;
2165 
2166 			if (!populated_zone(zone))
2167 				continue;
2168 
2169 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2170 				continue;
2171 
2172 			/*
2173 			 * Do some background aging of the anon list, to give
2174 			 * pages a chance to be referenced before reclaiming.
2175 			 */
2176 			if (inactive_anon_is_low(zone, &sc))
2177 				shrink_active_list(SWAP_CLUSTER_MAX, zone,
2178 							&sc, priority, 0);
2179 
2180 			if (!zone_watermark_ok(zone, order,
2181 					high_wmark_pages(zone), 0, 0)) {
2182 				end_zone = i;
2183 				break;
2184 			}
2185 		}
2186 		if (i < 0)
2187 			goto out;
2188 
2189 		for (i = 0; i <= end_zone; i++) {
2190 			struct zone *zone = pgdat->node_zones + i;
2191 
2192 			lru_pages += zone_reclaimable_pages(zone);
2193 		}
2194 
2195 		/*
2196 		 * Now scan the zone in the dma->highmem direction, stopping
2197 		 * at the last zone which needs scanning.
2198 		 *
2199 		 * We do this because the page allocator works in the opposite
2200 		 * direction.  This prevents the page allocator from allocating
2201 		 * pages behind kswapd's direction of progress, which would
2202 		 * cause too much scanning of the lower zones.
2203 		 */
2204 		for (i = 0; i <= end_zone; i++) {
2205 			struct zone *zone = pgdat->node_zones + i;
2206 			int nr_slab;
2207 
2208 			if (!populated_zone(zone))
2209 				continue;
2210 
2211 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2212 				continue;
2213 
2214 			sc.nr_scanned = 0;
2215 
2216 			/*
2217 			 * Call soft limit reclaim before calling shrink_zone.
2218 			 * For now we ignore the return value
2219 			 */
2220 			mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2221 
2222 			/*
2223 			 * We put equal pressure on every zone, unless one
2224 			 * zone has way too many pages free already.
2225 			 */
2226 			if (!zone_watermark_ok(zone, order,
2227 					8*high_wmark_pages(zone), end_zone, 0))
2228 				shrink_zone(priority, zone, &sc);
2229 			reclaim_state->reclaimed_slab = 0;
2230 			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2231 						lru_pages);
2232 			sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2233 			total_scanned += sc.nr_scanned;
2234 			if (zone->all_unreclaimable)
2235 				continue;
2236 			if (nr_slab == 0 && !zone_reclaimable(zone))
2237 				zone->all_unreclaimable = 1;
2238 			/*
2239 			 * If we've done a decent amount of scanning and
2240 			 * the reclaim ratio is low, start doing writepage
2241 			 * even in laptop mode
2242 			 */
2243 			if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2244 			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2245 				sc.may_writepage = 1;
2246 
2247 			if (!zone_watermark_ok(zone, order,
2248 					high_wmark_pages(zone), end_zone, 0)) {
2249 				all_zones_ok = 0;
2250 				/*
2251 				 * We are still under min water mark.  This
2252 				 * means that we have a GFP_ATOMIC allocation
2253 				 * failure risk. Hurry up!
2254 				 */
2255 				if (!zone_watermark_ok(zone, order,
2256 					    min_wmark_pages(zone), end_zone, 0))
2257 					has_under_min_watermark_zone = 1;
2258 			}
2259 
2260 		}
2261 		if (all_zones_ok)
2262 			break;		/* kswapd: all done */
2263 		/*
2264 		 * OK, kswapd is getting into trouble.  Take a nap, then take
2265 		 * another pass across the zones.
2266 		 */
2267 		if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2268 			if (has_under_min_watermark_zone)
2269 				count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2270 			else
2271 				congestion_wait(BLK_RW_ASYNC, HZ/10);
2272 		}
2273 
2274 		/*
2275 		 * We do this so kswapd doesn't build up large priorities for
2276 		 * example when it is freeing in parallel with allocators. It
2277 		 * matches the direct reclaim path behaviour in terms of impact
2278 		 * on zone->*_priority.
2279 		 */
2280 		if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2281 			break;
2282 	}
2283 out:
2284 	if (!all_zones_ok) {
2285 		cond_resched();
2286 
2287 		try_to_freeze();
2288 
2289 		/*
2290 		 * Fragmentation may mean that the system cannot be
2291 		 * rebalanced for high-order allocations in all zones.
2292 		 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2293 		 * it means the zones have been fully scanned and are still
2294 		 * not balanced. For high-order allocations, there is
2295 		 * little point trying all over again as kswapd may
2296 		 * infinite loop.
2297 		 *
2298 		 * Instead, recheck all watermarks at order-0 as they
2299 		 * are the most important. If watermarks are ok, kswapd will go
2300 		 * back to sleep. High-order users can still perform direct
2301 		 * reclaim if they wish.
2302 		 */
2303 		if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2304 			order = sc.order = 0;
2305 
2306 		goto loop_again;
2307 	}
2308 
2309 	return sc.nr_reclaimed;
2310 }
2311 
2312 /*
2313  * The background pageout daemon, started as a kernel thread
2314  * from the init process.
2315  *
2316  * This basically trickles out pages so that we have _some_
2317  * free memory available even if there is no other activity
2318  * that frees anything up. This is needed for things like routing
2319  * etc, where we otherwise might have all activity going on in
2320  * asynchronous contexts that cannot page things out.
2321  *
2322  * If there are applications that are active memory-allocators
2323  * (most normal use), this basically shouldn't matter.
2324  */
2325 static int kswapd(void *p)
2326 {
2327 	unsigned long order;
2328 	pg_data_t *pgdat = (pg_data_t*)p;
2329 	struct task_struct *tsk = current;
2330 	DEFINE_WAIT(wait);
2331 	struct reclaim_state reclaim_state = {
2332 		.reclaimed_slab = 0,
2333 	};
2334 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2335 
2336 	lockdep_set_current_reclaim_state(GFP_KERNEL);
2337 
2338 	if (!cpumask_empty(cpumask))
2339 		set_cpus_allowed_ptr(tsk, cpumask);
2340 	current->reclaim_state = &reclaim_state;
2341 
2342 	/*
2343 	 * Tell the memory management that we're a "memory allocator",
2344 	 * and that if we need more memory we should get access to it
2345 	 * regardless (see "__alloc_pages()"). "kswapd" should
2346 	 * never get caught in the normal page freeing logic.
2347 	 *
2348 	 * (Kswapd normally doesn't need memory anyway, but sometimes
2349 	 * you need a small amount of memory in order to be able to
2350 	 * page out something else, and this flag essentially protects
2351 	 * us from recursively trying to free more memory as we're
2352 	 * trying to free the first piece of memory in the first place).
2353 	 */
2354 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2355 	set_freezable();
2356 
2357 	order = 0;
2358 	for ( ; ; ) {
2359 		unsigned long new_order;
2360 		int ret;
2361 
2362 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2363 		new_order = pgdat->kswapd_max_order;
2364 		pgdat->kswapd_max_order = 0;
2365 		if (order < new_order) {
2366 			/*
2367 			 * Don't sleep if someone wants a larger 'order'
2368 			 * allocation
2369 			 */
2370 			order = new_order;
2371 		} else {
2372 			if (!freezing(current) && !kthread_should_stop()) {
2373 				long remaining = 0;
2374 
2375 				/* Try to sleep for a short interval */
2376 				if (!sleeping_prematurely(pgdat, order, remaining)) {
2377 					remaining = schedule_timeout(HZ/10);
2378 					finish_wait(&pgdat->kswapd_wait, &wait);
2379 					prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2380 				}
2381 
2382 				/*
2383 				 * After a short sleep, check if it was a
2384 				 * premature sleep. If not, then go fully
2385 				 * to sleep until explicitly woken up
2386 				 */
2387 				if (!sleeping_prematurely(pgdat, order, remaining)) {
2388 					trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2389 					schedule();
2390 				} else {
2391 					if (remaining)
2392 						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2393 					else
2394 						count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2395 				}
2396 			}
2397 
2398 			order = pgdat->kswapd_max_order;
2399 		}
2400 		finish_wait(&pgdat->kswapd_wait, &wait);
2401 
2402 		ret = try_to_freeze();
2403 		if (kthread_should_stop())
2404 			break;
2405 
2406 		/*
2407 		 * We can speed up thawing tasks if we don't call balance_pgdat
2408 		 * after returning from the refrigerator
2409 		 */
2410 		if (!ret) {
2411 			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2412 			balance_pgdat(pgdat, order);
2413 		}
2414 	}
2415 	return 0;
2416 }
2417 
2418 /*
2419  * A zone is low on free memory, so wake its kswapd task to service it.
2420  */
2421 void wakeup_kswapd(struct zone *zone, int order)
2422 {
2423 	pg_data_t *pgdat;
2424 
2425 	if (!populated_zone(zone))
2426 		return;
2427 
2428 	pgdat = zone->zone_pgdat;
2429 	if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
2430 		return;
2431 	if (pgdat->kswapd_max_order < order)
2432 		pgdat->kswapd_max_order = order;
2433 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2434 	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2435 		return;
2436 	if (!waitqueue_active(&pgdat->kswapd_wait))
2437 		return;
2438 	wake_up_interruptible(&pgdat->kswapd_wait);
2439 }
2440 
2441 /*
2442  * The reclaimable count would be mostly accurate.
2443  * The less reclaimable pages may be
2444  * - mlocked pages, which will be moved to unevictable list when encountered
2445  * - mapped pages, which may require several travels to be reclaimed
2446  * - dirty pages, which is not "instantly" reclaimable
2447  */
2448 unsigned long global_reclaimable_pages(void)
2449 {
2450 	int nr;
2451 
2452 	nr = global_page_state(NR_ACTIVE_FILE) +
2453 	     global_page_state(NR_INACTIVE_FILE);
2454 
2455 	if (nr_swap_pages > 0)
2456 		nr += global_page_state(NR_ACTIVE_ANON) +
2457 		      global_page_state(NR_INACTIVE_ANON);
2458 
2459 	return nr;
2460 }
2461 
2462 unsigned long zone_reclaimable_pages(struct zone *zone)
2463 {
2464 	int nr;
2465 
2466 	nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2467 	     zone_page_state(zone, NR_INACTIVE_FILE);
2468 
2469 	if (nr_swap_pages > 0)
2470 		nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2471 		      zone_page_state(zone, NR_INACTIVE_ANON);
2472 
2473 	return nr;
2474 }
2475 
2476 #ifdef CONFIG_HIBERNATION
2477 /*
2478  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2479  * freed pages.
2480  *
2481  * Rather than trying to age LRUs the aim is to preserve the overall
2482  * LRU order by reclaiming preferentially
2483  * inactive > active > active referenced > active mapped
2484  */
2485 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2486 {
2487 	struct reclaim_state reclaim_state;
2488 	struct scan_control sc = {
2489 		.gfp_mask = GFP_HIGHUSER_MOVABLE,
2490 		.may_swap = 1,
2491 		.may_unmap = 1,
2492 		.may_writepage = 1,
2493 		.nr_to_reclaim = nr_to_reclaim,
2494 		.hibernation_mode = 1,
2495 		.swappiness = vm_swappiness,
2496 		.order = 0,
2497 	};
2498 	struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2499 	struct task_struct *p = current;
2500 	unsigned long nr_reclaimed;
2501 
2502 	p->flags |= PF_MEMALLOC;
2503 	lockdep_set_current_reclaim_state(sc.gfp_mask);
2504 	reclaim_state.reclaimed_slab = 0;
2505 	p->reclaim_state = &reclaim_state;
2506 
2507 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2508 
2509 	p->reclaim_state = NULL;
2510 	lockdep_clear_current_reclaim_state();
2511 	p->flags &= ~PF_MEMALLOC;
2512 
2513 	return nr_reclaimed;
2514 }
2515 #endif /* CONFIG_HIBERNATION */
2516 
2517 /* It's optimal to keep kswapds on the same CPUs as their memory, but
2518    not required for correctness.  So if the last cpu in a node goes
2519    away, we get changed to run anywhere: as the first one comes back,
2520    restore their cpu bindings. */
2521 static int __devinit cpu_callback(struct notifier_block *nfb,
2522 				  unsigned long action, void *hcpu)
2523 {
2524 	int nid;
2525 
2526 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2527 		for_each_node_state(nid, N_HIGH_MEMORY) {
2528 			pg_data_t *pgdat = NODE_DATA(nid);
2529 			const struct cpumask *mask;
2530 
2531 			mask = cpumask_of_node(pgdat->node_id);
2532 
2533 			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2534 				/* One of our CPUs online: restore mask */
2535 				set_cpus_allowed_ptr(pgdat->kswapd, mask);
2536 		}
2537 	}
2538 	return NOTIFY_OK;
2539 }
2540 
2541 /*
2542  * This kswapd start function will be called by init and node-hot-add.
2543  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2544  */
2545 int kswapd_run(int nid)
2546 {
2547 	pg_data_t *pgdat = NODE_DATA(nid);
2548 	int ret = 0;
2549 
2550 	if (pgdat->kswapd)
2551 		return 0;
2552 
2553 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2554 	if (IS_ERR(pgdat->kswapd)) {
2555 		/* failure at boot is fatal */
2556 		BUG_ON(system_state == SYSTEM_BOOTING);
2557 		printk("Failed to start kswapd on node %d\n",nid);
2558 		ret = -1;
2559 	}
2560 	return ret;
2561 }
2562 
2563 /*
2564  * Called by memory hotplug when all memory in a node is offlined.
2565  */
2566 void kswapd_stop(int nid)
2567 {
2568 	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2569 
2570 	if (kswapd)
2571 		kthread_stop(kswapd);
2572 }
2573 
2574 static int __init kswapd_init(void)
2575 {
2576 	int nid;
2577 
2578 	swap_setup();
2579 	for_each_node_state(nid, N_HIGH_MEMORY)
2580  		kswapd_run(nid);
2581 	hotcpu_notifier(cpu_callback, 0);
2582 	return 0;
2583 }
2584 
2585 module_init(kswapd_init)
2586 
2587 #ifdef CONFIG_NUMA
2588 /*
2589  * Zone reclaim mode
2590  *
2591  * If non-zero call zone_reclaim when the number of free pages falls below
2592  * the watermarks.
2593  */
2594 int zone_reclaim_mode __read_mostly;
2595 
2596 #define RECLAIM_OFF 0
2597 #define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
2598 #define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
2599 #define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */
2600 
2601 /*
2602  * Priority for ZONE_RECLAIM. This determines the fraction of pages
2603  * of a node considered for each zone_reclaim. 4 scans 1/16th of
2604  * a zone.
2605  */
2606 #define ZONE_RECLAIM_PRIORITY 4
2607 
2608 /*
2609  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2610  * occur.
2611  */
2612 int sysctl_min_unmapped_ratio = 1;
2613 
2614 /*
2615  * If the number of slab pages in a zone grows beyond this percentage then
2616  * slab reclaim needs to occur.
2617  */
2618 int sysctl_min_slab_ratio = 5;
2619 
2620 static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2621 {
2622 	unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2623 	unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2624 		zone_page_state(zone, NR_ACTIVE_FILE);
2625 
2626 	/*
2627 	 * It's possible for there to be more file mapped pages than
2628 	 * accounted for by the pages on the file LRU lists because
2629 	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2630 	 */
2631 	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2632 }
2633 
2634 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
2635 static long zone_pagecache_reclaimable(struct zone *zone)
2636 {
2637 	long nr_pagecache_reclaimable;
2638 	long delta = 0;
2639 
2640 	/*
2641 	 * If RECLAIM_SWAP is set, then all file pages are considered
2642 	 * potentially reclaimable. Otherwise, we have to worry about
2643 	 * pages like swapcache and zone_unmapped_file_pages() provides
2644 	 * a better estimate
2645 	 */
2646 	if (zone_reclaim_mode & RECLAIM_SWAP)
2647 		nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2648 	else
2649 		nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2650 
2651 	/* If we can't clean pages, remove dirty pages from consideration */
2652 	if (!(zone_reclaim_mode & RECLAIM_WRITE))
2653 		delta += zone_page_state(zone, NR_FILE_DIRTY);
2654 
2655 	/* Watch for any possible underflows due to delta */
2656 	if (unlikely(delta > nr_pagecache_reclaimable))
2657 		delta = nr_pagecache_reclaimable;
2658 
2659 	return nr_pagecache_reclaimable - delta;
2660 }
2661 
2662 /*
2663  * Try to free up some pages from this zone through reclaim.
2664  */
2665 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2666 {
2667 	/* Minimum pages needed in order to stay on node */
2668 	const unsigned long nr_pages = 1 << order;
2669 	struct task_struct *p = current;
2670 	struct reclaim_state reclaim_state;
2671 	int priority;
2672 	struct scan_control sc = {
2673 		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2674 		.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2675 		.may_swap = 1,
2676 		.nr_to_reclaim = max_t(unsigned long, nr_pages,
2677 				       SWAP_CLUSTER_MAX),
2678 		.gfp_mask = gfp_mask,
2679 		.swappiness = vm_swappiness,
2680 		.order = order,
2681 	};
2682 	unsigned long nr_slab_pages0, nr_slab_pages1;
2683 
2684 	cond_resched();
2685 	/*
2686 	 * We need to be able to allocate from the reserves for RECLAIM_SWAP
2687 	 * and we also need to be able to write out pages for RECLAIM_WRITE
2688 	 * and RECLAIM_SWAP.
2689 	 */
2690 	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2691 	lockdep_set_current_reclaim_state(gfp_mask);
2692 	reclaim_state.reclaimed_slab = 0;
2693 	p->reclaim_state = &reclaim_state;
2694 
2695 	if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2696 		/*
2697 		 * Free memory by calling shrink zone with increasing
2698 		 * priorities until we have enough memory freed.
2699 		 */
2700 		priority = ZONE_RECLAIM_PRIORITY;
2701 		do {
2702 			shrink_zone(priority, zone, &sc);
2703 			priority--;
2704 		} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2705 	}
2706 
2707 	nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2708 	if (nr_slab_pages0 > zone->min_slab_pages) {
2709 		/*
2710 		 * shrink_slab() does not currently allow us to determine how
2711 		 * many pages were freed in this zone. So we take the current
2712 		 * number of slab pages and shake the slab until it is reduced
2713 		 * by the same nr_pages that we used for reclaiming unmapped
2714 		 * pages.
2715 		 *
2716 		 * Note that shrink_slab will free memory on all zones and may
2717 		 * take a long time.
2718 		 */
2719 		for (;;) {
2720 			unsigned long lru_pages = zone_reclaimable_pages(zone);
2721 
2722 			/* No reclaimable slab or very low memory pressure */
2723 			if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
2724 				break;
2725 
2726 			/* Freed enough memory */
2727 			nr_slab_pages1 = zone_page_state(zone,
2728 							NR_SLAB_RECLAIMABLE);
2729 			if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
2730 				break;
2731 		}
2732 
2733 		/*
2734 		 * Update nr_reclaimed by the number of slab pages we
2735 		 * reclaimed from this zone.
2736 		 */
2737 		nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2738 		if (nr_slab_pages1 < nr_slab_pages0)
2739 			sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
2740 	}
2741 
2742 	p->reclaim_state = NULL;
2743 	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2744 	lockdep_clear_current_reclaim_state();
2745 	return sc.nr_reclaimed >= nr_pages;
2746 }
2747 
2748 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2749 {
2750 	int node_id;
2751 	int ret;
2752 
2753 	/*
2754 	 * Zone reclaim reclaims unmapped file backed pages and
2755 	 * slab pages if we are over the defined limits.
2756 	 *
2757 	 * A small portion of unmapped file backed pages is needed for
2758 	 * file I/O otherwise pages read by file I/O will be immediately
2759 	 * thrown out if the zone is overallocated. So we do not reclaim
2760 	 * if less than a specified percentage of the zone is used by
2761 	 * unmapped file backed pages.
2762 	 */
2763 	if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2764 	    zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2765 		return ZONE_RECLAIM_FULL;
2766 
2767 	if (zone->all_unreclaimable)
2768 		return ZONE_RECLAIM_FULL;
2769 
2770 	/*
2771 	 * Do not scan if the allocation should not be delayed.
2772 	 */
2773 	if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2774 		return ZONE_RECLAIM_NOSCAN;
2775 
2776 	/*
2777 	 * Only run zone reclaim on the local zone or on zones that do not
2778 	 * have associated processors. This will favor the local processor
2779 	 * over remote processors and spread off node memory allocations
2780 	 * as wide as possible.
2781 	 */
2782 	node_id = zone_to_nid(zone);
2783 	if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2784 		return ZONE_RECLAIM_NOSCAN;
2785 
2786 	if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2787 		return ZONE_RECLAIM_NOSCAN;
2788 
2789 	ret = __zone_reclaim(zone, gfp_mask, order);
2790 	zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2791 
2792 	if (!ret)
2793 		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2794 
2795 	return ret;
2796 }
2797 #endif
2798 
2799 /*
2800  * page_evictable - test whether a page is evictable
2801  * @page: the page to test
2802  * @vma: the VMA in which the page is or will be mapped, may be NULL
2803  *
2804  * Test whether page is evictable--i.e., should be placed on active/inactive
2805  * lists vs unevictable list.  The vma argument is !NULL when called from the
2806  * fault path to determine how to instantate a new page.
2807  *
2808  * Reasons page might not be evictable:
2809  * (1) page's mapping marked unevictable
2810  * (2) page is part of an mlocked VMA
2811  *
2812  */
2813 int page_evictable(struct page *page, struct vm_area_struct *vma)
2814 {
2815 
2816 	if (mapping_unevictable(page_mapping(page)))
2817 		return 0;
2818 
2819 	if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2820 		return 0;
2821 
2822 	return 1;
2823 }
2824 
2825 /**
2826  * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2827  * @page: page to check evictability and move to appropriate lru list
2828  * @zone: zone page is in
2829  *
2830  * Checks a page for evictability and moves the page to the appropriate
2831  * zone lru list.
2832  *
2833  * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2834  * have PageUnevictable set.
2835  */
2836 static void check_move_unevictable_page(struct page *page, struct zone *zone)
2837 {
2838 	VM_BUG_ON(PageActive(page));
2839 
2840 retry:
2841 	ClearPageUnevictable(page);
2842 	if (page_evictable(page, NULL)) {
2843 		enum lru_list l = page_lru_base_type(page);
2844 
2845 		__dec_zone_state(zone, NR_UNEVICTABLE);
2846 		list_move(&page->lru, &zone->lru[l].list);
2847 		mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2848 		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
2849 		__count_vm_event(UNEVICTABLE_PGRESCUED);
2850 	} else {
2851 		/*
2852 		 * rotate unevictable list
2853 		 */
2854 		SetPageUnevictable(page);
2855 		list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2856 		mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2857 		if (page_evictable(page, NULL))
2858 			goto retry;
2859 	}
2860 }
2861 
2862 /**
2863  * scan_mapping_unevictable_pages - scan an address space for evictable pages
2864  * @mapping: struct address_space to scan for evictable pages
2865  *
2866  * Scan all pages in mapping.  Check unevictable pages for
2867  * evictability and move them to the appropriate zone lru list.
2868  */
2869 void scan_mapping_unevictable_pages(struct address_space *mapping)
2870 {
2871 	pgoff_t next = 0;
2872 	pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2873 			 PAGE_CACHE_SHIFT;
2874 	struct zone *zone;
2875 	struct pagevec pvec;
2876 
2877 	if (mapping->nrpages == 0)
2878 		return;
2879 
2880 	pagevec_init(&pvec, 0);
2881 	while (next < end &&
2882 		pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2883 		int i;
2884 		int pg_scanned = 0;
2885 
2886 		zone = NULL;
2887 
2888 		for (i = 0; i < pagevec_count(&pvec); i++) {
2889 			struct page *page = pvec.pages[i];
2890 			pgoff_t page_index = page->index;
2891 			struct zone *pagezone = page_zone(page);
2892 
2893 			pg_scanned++;
2894 			if (page_index > next)
2895 				next = page_index;
2896 			next++;
2897 
2898 			if (pagezone != zone) {
2899 				if (zone)
2900 					spin_unlock_irq(&zone->lru_lock);
2901 				zone = pagezone;
2902 				spin_lock_irq(&zone->lru_lock);
2903 			}
2904 
2905 			if (PageLRU(page) && PageUnevictable(page))
2906 				check_move_unevictable_page(page, zone);
2907 		}
2908 		if (zone)
2909 			spin_unlock_irq(&zone->lru_lock);
2910 		pagevec_release(&pvec);
2911 
2912 		count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2913 	}
2914 
2915 }
2916 
2917 /**
2918  * scan_zone_unevictable_pages - check unevictable list for evictable pages
2919  * @zone - zone of which to scan the unevictable list
2920  *
2921  * Scan @zone's unevictable LRU lists to check for pages that have become
2922  * evictable.  Move those that have to @zone's inactive list where they
2923  * become candidates for reclaim, unless shrink_inactive_zone() decides
2924  * to reactivate them.  Pages that are still unevictable are rotated
2925  * back onto @zone's unevictable list.
2926  */
2927 #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
2928 static void scan_zone_unevictable_pages(struct zone *zone)
2929 {
2930 	struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2931 	unsigned long scan;
2932 	unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2933 
2934 	while (nr_to_scan > 0) {
2935 		unsigned long batch_size = min(nr_to_scan,
2936 						SCAN_UNEVICTABLE_BATCH_SIZE);
2937 
2938 		spin_lock_irq(&zone->lru_lock);
2939 		for (scan = 0;  scan < batch_size; scan++) {
2940 			struct page *page = lru_to_page(l_unevictable);
2941 
2942 			if (!trylock_page(page))
2943 				continue;
2944 
2945 			prefetchw_prev_lru_page(page, l_unevictable, flags);
2946 
2947 			if (likely(PageLRU(page) && PageUnevictable(page)))
2948 				check_move_unevictable_page(page, zone);
2949 
2950 			unlock_page(page);
2951 		}
2952 		spin_unlock_irq(&zone->lru_lock);
2953 
2954 		nr_to_scan -= batch_size;
2955 	}
2956 }
2957 
2958 
2959 /**
2960  * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2961  *
2962  * A really big hammer:  scan all zones' unevictable LRU lists to check for
2963  * pages that have become evictable.  Move those back to the zones'
2964  * inactive list where they become candidates for reclaim.
2965  * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2966  * and we add swap to the system.  As such, it runs in the context of a task
2967  * that has possibly/probably made some previously unevictable pages
2968  * evictable.
2969  */
2970 static void scan_all_zones_unevictable_pages(void)
2971 {
2972 	struct zone *zone;
2973 
2974 	for_each_zone(zone) {
2975 		scan_zone_unevictable_pages(zone);
2976 	}
2977 }
2978 
2979 /*
2980  * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
2981  * all nodes' unevictable lists for evictable pages
2982  */
2983 unsigned long scan_unevictable_pages;
2984 
2985 int scan_unevictable_handler(struct ctl_table *table, int write,
2986 			   void __user *buffer,
2987 			   size_t *length, loff_t *ppos)
2988 {
2989 	proc_doulongvec_minmax(table, write, buffer, length, ppos);
2990 
2991 	if (write && *(unsigned long *)table->data)
2992 		scan_all_zones_unevictable_pages();
2993 
2994 	scan_unevictable_pages = 0;
2995 	return 0;
2996 }
2997 
2998 #ifdef CONFIG_NUMA
2999 /*
3000  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
3001  * a specified node's per zone unevictable lists for evictable pages.
3002  */
3003 
3004 static ssize_t read_scan_unevictable_node(struct sys_device *dev,
3005 					  struct sysdev_attribute *attr,
3006 					  char *buf)
3007 {
3008 	return sprintf(buf, "0\n");	/* always zero; should fit... */
3009 }
3010 
3011 static ssize_t write_scan_unevictable_node(struct sys_device *dev,
3012 					   struct sysdev_attribute *attr,
3013 					const char *buf, size_t count)
3014 {
3015 	struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
3016 	struct zone *zone;
3017 	unsigned long res;
3018 	unsigned long req = strict_strtoul(buf, 10, &res);
3019 
3020 	if (!req)
3021 		return 1;	/* zero is no-op */
3022 
3023 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
3024 		if (!populated_zone(zone))
3025 			continue;
3026 		scan_zone_unevictable_pages(zone);
3027 	}
3028 	return 1;
3029 }
3030 
3031 
3032 static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3033 			read_scan_unevictable_node,
3034 			write_scan_unevictable_node);
3035 
3036 int scan_unevictable_register_node(struct node *node)
3037 {
3038 	return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
3039 }
3040 
3041 void scan_unevictable_unregister_node(struct node *node)
3042 {
3043 	sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
3044 }
3045 #endif
3046