xref: /openbmc/linux/mm/compaction.c (revision e0b9daeb453e602a95ea43853dc12d385558ce1f)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h>
18194159fbSMinchan Kim #include <linux/page-isolation.h>
19748446bbSMel Gorman #include "internal.h"
20748446bbSMel Gorman 
21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION
22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item)
23010fc29aSMinchan Kim {
24010fc29aSMinchan Kim 	count_vm_event(item);
25010fc29aSMinchan Kim }
26010fc29aSMinchan Kim 
27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta)
28010fc29aSMinchan Kim {
29010fc29aSMinchan Kim 	count_vm_events(item, delta);
30010fc29aSMinchan Kim }
31010fc29aSMinchan Kim #else
32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0)
33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0)
34010fc29aSMinchan Kim #endif
35010fc29aSMinchan Kim 
36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
37ff9543fdSMichal Nazarewicz 
38b7aba698SMel Gorman #define CREATE_TRACE_POINTS
39b7aba698SMel Gorman #include <trace/events/compaction.h>
40b7aba698SMel Gorman 
41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
42748446bbSMel Gorman {
43748446bbSMel Gorman 	struct page *page, *next;
44748446bbSMel Gorman 	unsigned long count = 0;
45748446bbSMel Gorman 
46748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
47748446bbSMel Gorman 		list_del(&page->lru);
48748446bbSMel Gorman 		__free_page(page);
49748446bbSMel Gorman 		count++;
50748446bbSMel Gorman 	}
51748446bbSMel Gorman 
52748446bbSMel Gorman 	return count;
53748446bbSMel Gorman }
54748446bbSMel Gorman 
55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list)
56ff9543fdSMichal Nazarewicz {
57ff9543fdSMichal Nazarewicz 	struct page *page;
58ff9543fdSMichal Nazarewicz 
59ff9543fdSMichal Nazarewicz 	list_for_each_entry(page, list, lru) {
60ff9543fdSMichal Nazarewicz 		arch_alloc_page(page, 0);
61ff9543fdSMichal Nazarewicz 		kernel_map_pages(page, 1, 1);
62ff9543fdSMichal Nazarewicz 	}
63ff9543fdSMichal Nazarewicz }
64ff9543fdSMichal Nazarewicz 
6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype)
6647118af0SMichal Nazarewicz {
6747118af0SMichal Nazarewicz 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
6847118af0SMichal Nazarewicz }
6947118af0SMichal Nazarewicz 
70bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION
71bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */
72bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
73bb13ffebSMel Gorman 					struct page *page)
74bb13ffebSMel Gorman {
75bb13ffebSMel Gorman 	if (cc->ignore_skip_hint)
76bb13ffebSMel Gorman 		return true;
77bb13ffebSMel Gorman 
78bb13ffebSMel Gorman 	return !get_pageblock_skip(page);
79bb13ffebSMel Gorman }
80bb13ffebSMel Gorman 
81bb13ffebSMel Gorman /*
82bb13ffebSMel Gorman  * This function is called to clear all cached information on pageblocks that
83bb13ffebSMel Gorman  * should be skipped for page isolation when the migrate and free page scanner
84bb13ffebSMel Gorman  * meet.
85bb13ffebSMel Gorman  */
8662997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone)
87bb13ffebSMel Gorman {
88bb13ffebSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
89108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
90bb13ffebSMel Gorman 	unsigned long pfn;
91bb13ffebSMel Gorman 
9235979ef3SDavid Rientjes 	zone->compact_cached_migrate_pfn[0] = start_pfn;
9335979ef3SDavid Rientjes 	zone->compact_cached_migrate_pfn[1] = start_pfn;
94c89511abSMel Gorman 	zone->compact_cached_free_pfn = end_pfn;
9562997027SMel Gorman 	zone->compact_blockskip_flush = false;
96bb13ffebSMel Gorman 
97bb13ffebSMel Gorman 	/* Walk the zone and mark every pageblock as suitable for isolation */
98bb13ffebSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
99bb13ffebSMel Gorman 		struct page *page;
100bb13ffebSMel Gorman 
101bb13ffebSMel Gorman 		cond_resched();
102bb13ffebSMel Gorman 
103bb13ffebSMel Gorman 		if (!pfn_valid(pfn))
104bb13ffebSMel Gorman 			continue;
105bb13ffebSMel Gorman 
106bb13ffebSMel Gorman 		page = pfn_to_page(pfn);
107bb13ffebSMel Gorman 		if (zone != page_zone(page))
108bb13ffebSMel Gorman 			continue;
109bb13ffebSMel Gorman 
110bb13ffebSMel Gorman 		clear_pageblock_skip(page);
111bb13ffebSMel Gorman 	}
112bb13ffebSMel Gorman }
113bb13ffebSMel Gorman 
11462997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat)
11562997027SMel Gorman {
11662997027SMel Gorman 	int zoneid;
11762997027SMel Gorman 
11862997027SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
11962997027SMel Gorman 		struct zone *zone = &pgdat->node_zones[zoneid];
12062997027SMel Gorman 		if (!populated_zone(zone))
12162997027SMel Gorman 			continue;
12262997027SMel Gorman 
12362997027SMel Gorman 		/* Only flush if a full compaction finished recently */
12462997027SMel Gorman 		if (zone->compact_blockskip_flush)
12562997027SMel Gorman 			__reset_isolation_suitable(zone);
12662997027SMel Gorman 	}
12762997027SMel Gorman }
12862997027SMel Gorman 
129bb13ffebSMel Gorman /*
130bb13ffebSMel Gorman  * If no pages were isolated then mark this pageblock to be skipped in the
13162997027SMel Gorman  * future. The information is later cleared by __reset_isolation_suitable().
132bb13ffebSMel Gorman  */
133c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
134c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
13535979ef3SDavid Rientjes 			bool set_unsuitable, bool migrate_scanner)
136bb13ffebSMel Gorman {
137c89511abSMel Gorman 	struct zone *zone = cc->zone;
13835979ef3SDavid Rientjes 	unsigned long pfn;
1396815bf3fSJoonsoo Kim 
1406815bf3fSJoonsoo Kim 	if (cc->ignore_skip_hint)
1416815bf3fSJoonsoo Kim 		return;
1426815bf3fSJoonsoo Kim 
143bb13ffebSMel Gorman 	if (!page)
144bb13ffebSMel Gorman 		return;
145bb13ffebSMel Gorman 
14635979ef3SDavid Rientjes 	if (nr_isolated)
14735979ef3SDavid Rientjes 		return;
14835979ef3SDavid Rientjes 
14935979ef3SDavid Rientjes 	/*
15035979ef3SDavid Rientjes 	 * Only skip pageblocks when all forms of compaction will be known to
15135979ef3SDavid Rientjes 	 * fail in the near future.
15235979ef3SDavid Rientjes 	 */
15335979ef3SDavid Rientjes 	if (set_unsuitable)
154bb13ffebSMel Gorman 		set_pageblock_skip(page);
155c89511abSMel Gorman 
15635979ef3SDavid Rientjes 	pfn = page_to_pfn(page);
15735979ef3SDavid Rientjes 
15835979ef3SDavid Rientjes 	/* Update where async and sync compaction should restart */
159c89511abSMel Gorman 	if (migrate_scanner) {
16035979ef3SDavid Rientjes 		if (cc->finished_update_migrate)
16135979ef3SDavid Rientjes 			return;
16235979ef3SDavid Rientjes 		if (pfn > zone->compact_cached_migrate_pfn[0])
16335979ef3SDavid Rientjes 			zone->compact_cached_migrate_pfn[0] = pfn;
164*e0b9daebSDavid Rientjes 		if (cc->mode != MIGRATE_ASYNC &&
165*e0b9daebSDavid Rientjes 		    pfn > zone->compact_cached_migrate_pfn[1])
16635979ef3SDavid Rientjes 			zone->compact_cached_migrate_pfn[1] = pfn;
167c89511abSMel Gorman 	} else {
16835979ef3SDavid Rientjes 		if (cc->finished_update_free)
16935979ef3SDavid Rientjes 			return;
17035979ef3SDavid Rientjes 		if (pfn < zone->compact_cached_free_pfn)
171c89511abSMel Gorman 			zone->compact_cached_free_pfn = pfn;
172c89511abSMel Gorman 	}
173c89511abSMel Gorman }
174bb13ffebSMel Gorman #else
175bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
176bb13ffebSMel Gorman 					struct page *page)
177bb13ffebSMel Gorman {
178bb13ffebSMel Gorman 	return true;
179bb13ffebSMel Gorman }
180bb13ffebSMel Gorman 
181c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
182c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
18335979ef3SDavid Rientjes 			bool set_unsuitable, bool migrate_scanner)
184bb13ffebSMel Gorman {
185bb13ffebSMel Gorman }
186bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */
187bb13ffebSMel Gorman 
1882a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock)
1892a1402aaSMel Gorman {
1902a1402aaSMel Gorman 	return need_resched() || spin_is_contended(lock);
1912a1402aaSMel Gorman }
1922a1402aaSMel Gorman 
19385aa125fSMichal Nazarewicz /*
194c67fe375SMel Gorman  * Compaction requires the taking of some coarse locks that are potentially
195c67fe375SMel Gorman  * very heavily contended. Check if the process needs to be scheduled or
196c67fe375SMel Gorman  * if the lock is contended. For async compaction, back out in the event
197c67fe375SMel Gorman  * if contention is severe. For sync compaction, schedule.
198c67fe375SMel Gorman  *
199c67fe375SMel Gorman  * Returns true if the lock is held.
200c67fe375SMel Gorman  * Returns false if the lock is released and compaction should abort
201c67fe375SMel Gorman  */
202c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
203c67fe375SMel Gorman 				      bool locked, struct compact_control *cc)
204c67fe375SMel Gorman {
2052a1402aaSMel Gorman 	if (should_release_lock(lock)) {
206c67fe375SMel Gorman 		if (locked) {
207c67fe375SMel Gorman 			spin_unlock_irqrestore(lock, *flags);
208c67fe375SMel Gorman 			locked = false;
209c67fe375SMel Gorman 		}
210c67fe375SMel Gorman 
211c67fe375SMel Gorman 		/* async aborts if taking too long or contended */
212*e0b9daebSDavid Rientjes 		if (cc->mode == MIGRATE_ASYNC) {
213e64c5237SShaohua Li 			cc->contended = true;
214c67fe375SMel Gorman 			return false;
215c67fe375SMel Gorman 		}
216c67fe375SMel Gorman 
217c67fe375SMel Gorman 		cond_resched();
218c67fe375SMel Gorman 	}
219c67fe375SMel Gorman 
220c67fe375SMel Gorman 	if (!locked)
221c67fe375SMel Gorman 		spin_lock_irqsave(lock, *flags);
222c67fe375SMel Gorman 	return true;
223c67fe375SMel Gorman }
224c67fe375SMel Gorman 
225f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */
226f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page)
227f40d1e42SMel Gorman {
2287d348b9eSJoonsoo Kim 	/* If the page is a large free page, then disallow migration */
229f40d1e42SMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
2307d348b9eSJoonsoo Kim 		return false;
231f40d1e42SMel Gorman 
232f40d1e42SMel Gorman 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
2337d348b9eSJoonsoo Kim 	if (migrate_async_suitable(get_pageblock_migratetype(page)))
234f40d1e42SMel Gorman 		return true;
235f40d1e42SMel Gorman 
236f40d1e42SMel Gorman 	/* Otherwise skip the block */
237f40d1e42SMel Gorman 	return false;
238f40d1e42SMel Gorman }
239f40d1e42SMel Gorman 
240c67fe375SMel Gorman /*
2419e4be470SJerome Marchand  * Isolate free pages onto a private freelist. If @strict is true, will abort
2429e4be470SJerome Marchand  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
2439e4be470SJerome Marchand  * (even though it may still end up isolating some pages).
24485aa125fSMichal Nazarewicz  */
245f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc,
246f40d1e42SMel Gorman 				unsigned long blockpfn,
24785aa125fSMichal Nazarewicz 				unsigned long end_pfn,
24885aa125fSMichal Nazarewicz 				struct list_head *freelist,
24985aa125fSMichal Nazarewicz 				bool strict)
250748446bbSMel Gorman {
251b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
252bb13ffebSMel Gorman 	struct page *cursor, *valid_page = NULL;
253f40d1e42SMel Gorman 	unsigned long flags;
254f40d1e42SMel Gorman 	bool locked = false;
25501ead534SJoonsoo Kim 	bool checked_pageblock = false;
256748446bbSMel Gorman 
257748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
258748446bbSMel Gorman 
259f40d1e42SMel Gorman 	/* Isolate free pages. */
260748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
261748446bbSMel Gorman 		int isolated, i;
262748446bbSMel Gorman 		struct page *page = cursor;
263748446bbSMel Gorman 
264b7aba698SMel Gorman 		nr_scanned++;
265f40d1e42SMel Gorman 		if (!pfn_valid_within(blockpfn))
2662af120bcSLaura Abbott 			goto isolate_fail;
2672af120bcSLaura Abbott 
268bb13ffebSMel Gorman 		if (!valid_page)
269bb13ffebSMel Gorman 			valid_page = page;
270f40d1e42SMel Gorman 		if (!PageBuddy(page))
2712af120bcSLaura Abbott 			goto isolate_fail;
272f40d1e42SMel Gorman 
273f40d1e42SMel Gorman 		/*
274f40d1e42SMel Gorman 		 * The zone lock must be held to isolate freepages.
275f40d1e42SMel Gorman 		 * Unfortunately this is a very coarse lock and can be
276f40d1e42SMel Gorman 		 * heavily contended if there are parallel allocations
277f40d1e42SMel Gorman 		 * or parallel compactions. For async compaction do not
278f40d1e42SMel Gorman 		 * spin on the lock and we acquire the lock as late as
279f40d1e42SMel Gorman 		 * possible.
280f40d1e42SMel Gorman 		 */
281f40d1e42SMel Gorman 		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
282f40d1e42SMel Gorman 								locked, cc);
283f40d1e42SMel Gorman 		if (!locked)
284f40d1e42SMel Gorman 			break;
285f40d1e42SMel Gorman 
286f40d1e42SMel Gorman 		/* Recheck this is a suitable migration target under lock */
28701ead534SJoonsoo Kim 		if (!strict && !checked_pageblock) {
28801ead534SJoonsoo Kim 			/*
28901ead534SJoonsoo Kim 			 * We need to check suitability of pageblock only once
29001ead534SJoonsoo Kim 			 * and this isolate_freepages_block() is called with
29101ead534SJoonsoo Kim 			 * pageblock range, so just check once is sufficient.
29201ead534SJoonsoo Kim 			 */
29301ead534SJoonsoo Kim 			checked_pageblock = true;
29401ead534SJoonsoo Kim 			if (!suitable_migration_target(page))
295f40d1e42SMel Gorman 				break;
29601ead534SJoonsoo Kim 		}
297f40d1e42SMel Gorman 
298f40d1e42SMel Gorman 		/* Recheck this is a buddy page under lock */
299f40d1e42SMel Gorman 		if (!PageBuddy(page))
3002af120bcSLaura Abbott 			goto isolate_fail;
301748446bbSMel Gorman 
302748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
303748446bbSMel Gorman 		isolated = split_free_page(page);
304748446bbSMel Gorman 		total_isolated += isolated;
305748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
306748446bbSMel Gorman 			list_add(&page->lru, freelist);
307748446bbSMel Gorman 			page++;
308748446bbSMel Gorman 		}
309748446bbSMel Gorman 
310748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
311748446bbSMel Gorman 		if (isolated) {
312748446bbSMel Gorman 			blockpfn += isolated - 1;
313748446bbSMel Gorman 			cursor += isolated - 1;
3142af120bcSLaura Abbott 			continue;
315748446bbSMel Gorman 		}
3162af120bcSLaura Abbott 
3172af120bcSLaura Abbott isolate_fail:
3182af120bcSLaura Abbott 		if (strict)
3192af120bcSLaura Abbott 			break;
3202af120bcSLaura Abbott 		else
3212af120bcSLaura Abbott 			continue;
3222af120bcSLaura Abbott 
323748446bbSMel Gorman 	}
324748446bbSMel Gorman 
325b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
326f40d1e42SMel Gorman 
327f40d1e42SMel Gorman 	/*
328f40d1e42SMel Gorman 	 * If strict isolation is requested by CMA then check that all the
329f40d1e42SMel Gorman 	 * pages requested were isolated. If there were any failures, 0 is
330f40d1e42SMel Gorman 	 * returned and CMA will fail.
331f40d1e42SMel Gorman 	 */
3322af120bcSLaura Abbott 	if (strict && blockpfn < end_pfn)
333f40d1e42SMel Gorman 		total_isolated = 0;
334f40d1e42SMel Gorman 
335f40d1e42SMel Gorman 	if (locked)
336f40d1e42SMel Gorman 		spin_unlock_irqrestore(&cc->zone->lock, flags);
337f40d1e42SMel Gorman 
338bb13ffebSMel Gorman 	/* Update the pageblock-skip if the whole pageblock was scanned */
339bb13ffebSMel Gorman 	if (blockpfn == end_pfn)
34035979ef3SDavid Rientjes 		update_pageblock_skip(cc, valid_page, total_isolated, true,
34135979ef3SDavid Rientjes 				      false);
342bb13ffebSMel Gorman 
343010fc29aSMinchan Kim 	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
344397487dbSMel Gorman 	if (total_isolated)
345010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, total_isolated);
346748446bbSMel Gorman 	return total_isolated;
347748446bbSMel Gorman }
348748446bbSMel Gorman 
34985aa125fSMichal Nazarewicz /**
35085aa125fSMichal Nazarewicz  * isolate_freepages_range() - isolate free pages.
35185aa125fSMichal Nazarewicz  * @start_pfn: The first PFN to start isolating.
35285aa125fSMichal Nazarewicz  * @end_pfn:   The one-past-last PFN.
35385aa125fSMichal Nazarewicz  *
35485aa125fSMichal Nazarewicz  * Non-free pages, invalid PFNs, or zone boundaries within the
35585aa125fSMichal Nazarewicz  * [start_pfn, end_pfn) range are considered errors, cause function to
35685aa125fSMichal Nazarewicz  * undo its actions and return zero.
35785aa125fSMichal Nazarewicz  *
35885aa125fSMichal Nazarewicz  * Otherwise, function returns one-past-the-last PFN of isolated page
35985aa125fSMichal Nazarewicz  * (which may be greater then end_pfn if end fell in a middle of
36085aa125fSMichal Nazarewicz  * a free page).
36185aa125fSMichal Nazarewicz  */
362ff9543fdSMichal Nazarewicz unsigned long
363bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc,
364bb13ffebSMel Gorman 			unsigned long start_pfn, unsigned long end_pfn)
36585aa125fSMichal Nazarewicz {
366f40d1e42SMel Gorman 	unsigned long isolated, pfn, block_end_pfn;
36785aa125fSMichal Nazarewicz 	LIST_HEAD(freelist);
36885aa125fSMichal Nazarewicz 
36985aa125fSMichal Nazarewicz 	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
370bb13ffebSMel Gorman 		if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
37185aa125fSMichal Nazarewicz 			break;
37285aa125fSMichal Nazarewicz 
37385aa125fSMichal Nazarewicz 		/*
37485aa125fSMichal Nazarewicz 		 * On subsequent iterations ALIGN() is actually not needed,
37585aa125fSMichal Nazarewicz 		 * but we keep it that we not to complicate the code.
37685aa125fSMichal Nazarewicz 		 */
37785aa125fSMichal Nazarewicz 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
37885aa125fSMichal Nazarewicz 		block_end_pfn = min(block_end_pfn, end_pfn);
37985aa125fSMichal Nazarewicz 
380bb13ffebSMel Gorman 		isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
38185aa125fSMichal Nazarewicz 						   &freelist, true);
38285aa125fSMichal Nazarewicz 
38385aa125fSMichal Nazarewicz 		/*
38485aa125fSMichal Nazarewicz 		 * In strict mode, isolate_freepages_block() returns 0 if
38585aa125fSMichal Nazarewicz 		 * there are any holes in the block (ie. invalid PFNs or
38685aa125fSMichal Nazarewicz 		 * non-free pages).
38785aa125fSMichal Nazarewicz 		 */
38885aa125fSMichal Nazarewicz 		if (!isolated)
38985aa125fSMichal Nazarewicz 			break;
39085aa125fSMichal Nazarewicz 
39185aa125fSMichal Nazarewicz 		/*
39285aa125fSMichal Nazarewicz 		 * If we managed to isolate pages, it is always (1 << n) *
39385aa125fSMichal Nazarewicz 		 * pageblock_nr_pages for some non-negative n.  (Max order
39485aa125fSMichal Nazarewicz 		 * page may span two pageblocks).
39585aa125fSMichal Nazarewicz 		 */
39685aa125fSMichal Nazarewicz 	}
39785aa125fSMichal Nazarewicz 
39885aa125fSMichal Nazarewicz 	/* split_free_page does not map the pages */
39985aa125fSMichal Nazarewicz 	map_pages(&freelist);
40085aa125fSMichal Nazarewicz 
40185aa125fSMichal Nazarewicz 	if (pfn < end_pfn) {
40285aa125fSMichal Nazarewicz 		/* Loop terminated early, cleanup. */
40385aa125fSMichal Nazarewicz 		release_freepages(&freelist);
40485aa125fSMichal Nazarewicz 		return 0;
40585aa125fSMichal Nazarewicz 	}
40685aa125fSMichal Nazarewicz 
40785aa125fSMichal Nazarewicz 	/* We don't use freelists for anything. */
40885aa125fSMichal Nazarewicz 	return pfn;
40985aa125fSMichal Nazarewicz }
41085aa125fSMichal Nazarewicz 
411748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
412c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
413748446bbSMel Gorman {
414748446bbSMel Gorman 	struct page *page;
415b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
416748446bbSMel Gorman 
417b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
418b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
419748446bbSMel Gorman 
420c67fe375SMel Gorman 	/* If locked we can use the interrupt unsafe versions */
421c67fe375SMel Gorman 	if (locked) {
422b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
423b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
424c67fe375SMel Gorman 	} else {
425c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
426c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
427c67fe375SMel Gorman 	}
428748446bbSMel Gorman }
429748446bbSMel Gorman 
430748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
431748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
432748446bbSMel Gorman {
433bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
434748446bbSMel Gorman 
435748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
436748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
437bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
438bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
439748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
440748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
441748446bbSMel Gorman 
442bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
443748446bbSMel Gorman }
444748446bbSMel Gorman 
4452fe86e00SMichal Nazarewicz /**
4462fe86e00SMichal Nazarewicz  * isolate_migratepages_range() - isolate all migrate-able pages in range.
4472fe86e00SMichal Nazarewicz  * @zone:	Zone pages are in.
4482fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
4492fe86e00SMichal Nazarewicz  * @low_pfn:	The first PFN of the range.
4502fe86e00SMichal Nazarewicz  * @end_pfn:	The one-past-the-last PFN of the range.
451e46a2879SMinchan Kim  * @unevictable: true if it allows to isolate unevictable pages
4522fe86e00SMichal Nazarewicz  *
4532fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
4542fe86e00SMichal Nazarewicz  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
4552fe86e00SMichal Nazarewicz  * pending), otherwise PFN of the first page that was not scanned
4562fe86e00SMichal Nazarewicz  * (which may be both less, equal to or more then end_pfn).
4572fe86e00SMichal Nazarewicz  *
4582fe86e00SMichal Nazarewicz  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
4592fe86e00SMichal Nazarewicz  * zero.
4602fe86e00SMichal Nazarewicz  *
4612fe86e00SMichal Nazarewicz  * Apart from cc->migratepages and cc->nr_migratetypes this function
4622fe86e00SMichal Nazarewicz  * does not modify any cc's fields, in particular it does not modify
4632fe86e00SMichal Nazarewicz  * (or read for that matter) cc->migrate_pfn.
464748446bbSMel Gorman  */
465ff9543fdSMichal Nazarewicz unsigned long
4662fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
467e46a2879SMinchan Kim 		unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
468748446bbSMel Gorman {
4699927af74SMel Gorman 	unsigned long last_pageblock_nr = 0, pageblock_nr;
470b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
471748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
472fa9add64SHugh Dickins 	struct lruvec *lruvec;
473c67fe375SMel Gorman 	unsigned long flags;
4742a1402aaSMel Gorman 	bool locked = false;
475bb13ffebSMel Gorman 	struct page *page = NULL, *valid_page = NULL;
47635979ef3SDavid Rientjes 	bool set_unsuitable = true;
477*e0b9daebSDavid Rientjes 	const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
478*e0b9daebSDavid Rientjes 					ISOLATE_ASYNC_MIGRATE : 0) |
479da1c67a7SDavid Rientjes 				    (unevictable ? ISOLATE_UNEVICTABLE : 0);
480748446bbSMel Gorman 
481748446bbSMel Gorman 	/*
482748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
483748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
484748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
485748446bbSMel Gorman 	 */
486748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
487f9e35b3bSMel Gorman 		/* async migration should just abort */
488*e0b9daebSDavid Rientjes 		if (cc->mode == MIGRATE_ASYNC)
4892fe86e00SMichal Nazarewicz 			return 0;
490f9e35b3bSMel Gorman 
491748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
492748446bbSMel Gorman 
493748446bbSMel Gorman 		if (fatal_signal_pending(current))
4942fe86e00SMichal Nazarewicz 			return 0;
495748446bbSMel Gorman 	}
496748446bbSMel Gorman 
497748446bbSMel Gorman 	/* Time to isolate some pages for migration */
498b2eef8c0SAndrea Arcangeli 	cond_resched();
499748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
500b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
501be1aa03bSJoonsoo Kim 		if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
5022a1402aaSMel Gorman 			if (should_release_lock(&zone->lru_lock)) {
503c67fe375SMel Gorman 				spin_unlock_irqrestore(&zone->lru_lock, flags);
504b2eef8c0SAndrea Arcangeli 				locked = false;
505b2eef8c0SAndrea Arcangeli 			}
5062a1402aaSMel Gorman 		}
507b2eef8c0SAndrea Arcangeli 
5080bf380bcSMel Gorman 		/*
5090bf380bcSMel Gorman 		 * migrate_pfn does not necessarily start aligned to a
5100bf380bcSMel Gorman 		 * pageblock. Ensure that pfn_valid is called when moving
5110bf380bcSMel Gorman 		 * into a new MAX_ORDER_NR_PAGES range in case of large
5120bf380bcSMel Gorman 		 * memory holes within the zone
5130bf380bcSMel Gorman 		 */
5140bf380bcSMel Gorman 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
5150bf380bcSMel Gorman 			if (!pfn_valid(low_pfn)) {
5160bf380bcSMel Gorman 				low_pfn += MAX_ORDER_NR_PAGES - 1;
5170bf380bcSMel Gorman 				continue;
5180bf380bcSMel Gorman 			}
5190bf380bcSMel Gorman 		}
5200bf380bcSMel Gorman 
521748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
522748446bbSMel Gorman 			continue;
523b7aba698SMel Gorman 		nr_scanned++;
524748446bbSMel Gorman 
525dc908600SMel Gorman 		/*
526dc908600SMel Gorman 		 * Get the page and ensure the page is within the same zone.
527dc908600SMel Gorman 		 * See the comment in isolate_freepages about overlapping
528dc908600SMel Gorman 		 * nodes. It is deliberate that the new zone lock is not taken
529dc908600SMel Gorman 		 * as memory compaction should not move pages between nodes.
530dc908600SMel Gorman 		 */
531748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
532dc908600SMel Gorman 		if (page_zone(page) != zone)
533dc908600SMel Gorman 			continue;
534dc908600SMel Gorman 
535bb13ffebSMel Gorman 		if (!valid_page)
536bb13ffebSMel Gorman 			valid_page = page;
537bb13ffebSMel Gorman 
538bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
539bb13ffebSMel Gorman 		pageblock_nr = low_pfn >> pageblock_order;
540c122b208SJoonsoo Kim 		if (last_pageblock_nr != pageblock_nr) {
541c122b208SJoonsoo Kim 			int mt;
542c122b208SJoonsoo Kim 
543c122b208SJoonsoo Kim 			last_pageblock_nr = pageblock_nr;
544bb13ffebSMel Gorman 			if (!isolation_suitable(cc, page))
545bb13ffebSMel Gorman 				goto next_pageblock;
546bb13ffebSMel Gorman 
5476c14466cSMel Gorman 			/*
548c122b208SJoonsoo Kim 			 * For async migration, also only scan in MOVABLE
549c122b208SJoonsoo Kim 			 * blocks. Async migration is optimistic to see if
550c122b208SJoonsoo Kim 			 * the minimum amount of work satisfies the allocation
551c122b208SJoonsoo Kim 			 */
552c122b208SJoonsoo Kim 			mt = get_pageblock_migratetype(page);
553*e0b9daebSDavid Rientjes 			if (cc->mode == MIGRATE_ASYNC &&
554*e0b9daebSDavid Rientjes 			    !migrate_async_suitable(mt)) {
55535979ef3SDavid Rientjes 				set_unsuitable = false;
556c122b208SJoonsoo Kim 				goto next_pageblock;
557c122b208SJoonsoo Kim 			}
558c122b208SJoonsoo Kim 		}
559c122b208SJoonsoo Kim 
560c122b208SJoonsoo Kim 		/*
5616c14466cSMel Gorman 		 * Skip if free. page_order cannot be used without zone->lock
5626c14466cSMel Gorman 		 * as nothing prevents parallel allocations or buddy merging.
5636c14466cSMel Gorman 		 */
564748446bbSMel Gorman 		if (PageBuddy(page))
565748446bbSMel Gorman 			continue;
566748446bbSMel Gorman 
5679927af74SMel Gorman 		/*
568bf6bddf1SRafael Aquini 		 * Check may be lockless but that's ok as we recheck later.
569bf6bddf1SRafael Aquini 		 * It's possible to migrate LRU pages and balloon pages
570bf6bddf1SRafael Aquini 		 * Skip any other type of page
571bf6bddf1SRafael Aquini 		 */
572bf6bddf1SRafael Aquini 		if (!PageLRU(page)) {
573bf6bddf1SRafael Aquini 			if (unlikely(balloon_page_movable(page))) {
574bf6bddf1SRafael Aquini 				if (locked && balloon_page_isolate(page)) {
575bf6bddf1SRafael Aquini 					/* Successfully isolated */
576b6c75016SJoonsoo Kim 					goto isolate_success;
577bf6bddf1SRafael Aquini 				}
578bf6bddf1SRafael Aquini 			}
579bc835011SAndrea Arcangeli 			continue;
580bf6bddf1SRafael Aquini 		}
581bc835011SAndrea Arcangeli 
582bc835011SAndrea Arcangeli 		/*
5832a1402aaSMel Gorman 		 * PageLRU is set. lru_lock normally excludes isolation
5842a1402aaSMel Gorman 		 * splitting and collapsing (collapsing has already happened
5852a1402aaSMel Gorman 		 * if PageLRU is set) but the lock is not necessarily taken
5862a1402aaSMel Gorman 		 * here and it is wasteful to take it just to check transhuge.
5872a1402aaSMel Gorman 		 * Check TransHuge without lock and skip the whole pageblock if
5882a1402aaSMel Gorman 		 * it's either a transhuge or hugetlbfs page, as calling
5892a1402aaSMel Gorman 		 * compound_order() without preventing THP from splitting the
5902a1402aaSMel Gorman 		 * page underneath us may return surprising results.
591bc835011SAndrea Arcangeli 		 */
592bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
5932a1402aaSMel Gorman 			if (!locked)
5942a1402aaSMel Gorman 				goto next_pageblock;
5952a1402aaSMel Gorman 			low_pfn += (1 << compound_order(page)) - 1;
5962a1402aaSMel Gorman 			continue;
5972a1402aaSMel Gorman 		}
5982a1402aaSMel Gorman 
599119d6d59SDavid Rientjes 		/*
600119d6d59SDavid Rientjes 		 * Migration will fail if an anonymous page is pinned in memory,
601119d6d59SDavid Rientjes 		 * so avoid taking lru_lock and isolating it unnecessarily in an
602119d6d59SDavid Rientjes 		 * admittedly racy check.
603119d6d59SDavid Rientjes 		 */
604119d6d59SDavid Rientjes 		if (!page_mapping(page) &&
605119d6d59SDavid Rientjes 		    page_count(page) > page_mapcount(page))
606119d6d59SDavid Rientjes 			continue;
607119d6d59SDavid Rientjes 
6082a1402aaSMel Gorman 		/* Check if it is ok to still hold the lock */
6092a1402aaSMel Gorman 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
6102a1402aaSMel Gorman 								locked, cc);
6112a1402aaSMel Gorman 		if (!locked || fatal_signal_pending(current))
6122a1402aaSMel Gorman 			break;
6132a1402aaSMel Gorman 
6142a1402aaSMel Gorman 		/* Recheck PageLRU and PageTransHuge under lock */
6152a1402aaSMel Gorman 		if (!PageLRU(page))
6162a1402aaSMel Gorman 			continue;
6172a1402aaSMel Gorman 		if (PageTransHuge(page)) {
618bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
619bc835011SAndrea Arcangeli 			continue;
620bc835011SAndrea Arcangeli 		}
621bc835011SAndrea Arcangeli 
622fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
623fa9add64SHugh Dickins 
624748446bbSMel Gorman 		/* Try isolate the page */
625f3fd4a61SKonstantin Khlebnikov 		if (__isolate_lru_page(page, mode) != 0)
626748446bbSMel Gorman 			continue;
627748446bbSMel Gorman 
628309381feSSasha Levin 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
629bc835011SAndrea Arcangeli 
630748446bbSMel Gorman 		/* Successfully isolated */
631fa9add64SHugh Dickins 		del_page_from_lru_list(page, lruvec, page_lru(page));
632b6c75016SJoonsoo Kim 
633b6c75016SJoonsoo Kim isolate_success:
634b6c75016SJoonsoo Kim 		cc->finished_update_migrate = true;
635748446bbSMel Gorman 		list_add(&page->lru, migratelist);
636748446bbSMel Gorman 		cc->nr_migratepages++;
637b7aba698SMel Gorman 		nr_isolated++;
638748446bbSMel Gorman 
639748446bbSMel Gorman 		/* Avoid isolating too much */
64031b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
64131b8384aSHillf Danton 			++low_pfn;
642748446bbSMel Gorman 			break;
643748446bbSMel Gorman 		}
6442a1402aaSMel Gorman 
6452a1402aaSMel Gorman 		continue;
6462a1402aaSMel Gorman 
6472a1402aaSMel Gorman next_pageblock:
648a9aacbccSMel Gorman 		low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
64931b8384aSHillf Danton 	}
650748446bbSMel Gorman 
651c67fe375SMel Gorman 	acct_isolated(zone, locked, cc);
652748446bbSMel Gorman 
653c67fe375SMel Gorman 	if (locked)
654c67fe375SMel Gorman 		spin_unlock_irqrestore(&zone->lru_lock, flags);
655748446bbSMel Gorman 
65650b5b094SVlastimil Babka 	/*
65750b5b094SVlastimil Babka 	 * Update the pageblock-skip information and cached scanner pfn,
65850b5b094SVlastimil Babka 	 * if the whole pageblock was scanned without isolating any page.
65950b5b094SVlastimil Babka 	 */
66035979ef3SDavid Rientjes 	if (low_pfn == end_pfn)
66135979ef3SDavid Rientjes 		update_pageblock_skip(cc, valid_page, nr_isolated,
66235979ef3SDavid Rientjes 				      set_unsuitable, true);
663bb13ffebSMel Gorman 
664b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
665b7aba698SMel Gorman 
666010fc29aSMinchan Kim 	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
667397487dbSMel Gorman 	if (nr_isolated)
668010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, nr_isolated);
669397487dbSMel Gorman 
6702fe86e00SMichal Nazarewicz 	return low_pfn;
6712fe86e00SMichal Nazarewicz }
6722fe86e00SMichal Nazarewicz 
673ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */
674ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION
675ff9543fdSMichal Nazarewicz /*
676ff9543fdSMichal Nazarewicz  * Based on information in the current compact_control, find blocks
677ff9543fdSMichal Nazarewicz  * suitable for isolating free pages from and then isolate them.
678ff9543fdSMichal Nazarewicz  */
679ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone,
680ff9543fdSMichal Nazarewicz 				struct compact_control *cc)
681ff9543fdSMichal Nazarewicz {
682ff9543fdSMichal Nazarewicz 	struct page *page;
683c96b9e50SVlastimil Babka 	unsigned long block_start_pfn;	/* start of current pageblock */
684c96b9e50SVlastimil Babka 	unsigned long block_end_pfn;	/* end of current pageblock */
685c96b9e50SVlastimil Babka 	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
686c96b9e50SVlastimil Babka 	unsigned long next_free_pfn; /* start pfn for scaning at next round */
687ff9543fdSMichal Nazarewicz 	int nr_freepages = cc->nr_freepages;
688ff9543fdSMichal Nazarewicz 	struct list_head *freelist = &cc->freepages;
6892fe86e00SMichal Nazarewicz 
690ff9543fdSMichal Nazarewicz 	/*
691ff9543fdSMichal Nazarewicz 	 * Initialise the free scanner. The starting point is where we last
69249e068f0SVlastimil Babka 	 * successfully isolated from, zone-cached value, or the end of the
69349e068f0SVlastimil Babka 	 * zone when isolating for the first time. We need this aligned to
694c96b9e50SVlastimil Babka 	 * the pageblock boundary, because we do
695c96b9e50SVlastimil Babka 	 * block_start_pfn -= pageblock_nr_pages in the for loop.
696c96b9e50SVlastimil Babka 	 * For ending point, take care when isolating in last pageblock of a
697c96b9e50SVlastimil Babka 	 * a zone which ends in the middle of a pageblock.
69849e068f0SVlastimil Babka 	 * The low boundary is the end of the pageblock the migration scanner
69949e068f0SVlastimil Babka 	 * is using.
700ff9543fdSMichal Nazarewicz 	 */
701c96b9e50SVlastimil Babka 	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
702c96b9e50SVlastimil Babka 	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
703c96b9e50SVlastimil Babka 						zone_end_pfn(zone));
7047ed695e0SVlastimil Babka 	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
7052fe86e00SMichal Nazarewicz 
706ff9543fdSMichal Nazarewicz 	/*
707c96b9e50SVlastimil Babka 	 * If no pages are isolated, the block_start_pfn < low_pfn check
708c96b9e50SVlastimil Babka 	 * will kick in.
709ff9543fdSMichal Nazarewicz 	 */
710c96b9e50SVlastimil Babka 	next_free_pfn = 0;
711ff9543fdSMichal Nazarewicz 
712ff9543fdSMichal Nazarewicz 	/*
713ff9543fdSMichal Nazarewicz 	 * Isolate free pages until enough are available to migrate the
714ff9543fdSMichal Nazarewicz 	 * pages on cc->migratepages. We stop searching if the migrate
715ff9543fdSMichal Nazarewicz 	 * and free page scanners meet or enough free pages are isolated.
716ff9543fdSMichal Nazarewicz 	 */
717c96b9e50SVlastimil Babka 	for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
718c96b9e50SVlastimil Babka 				block_end_pfn = block_start_pfn,
719c96b9e50SVlastimil Babka 				block_start_pfn -= pageblock_nr_pages) {
720ff9543fdSMichal Nazarewicz 		unsigned long isolated;
721ff9543fdSMichal Nazarewicz 
722f6ea3adbSDavid Rientjes 		/*
723f6ea3adbSDavid Rientjes 		 * This can iterate a massively long zone without finding any
724f6ea3adbSDavid Rientjes 		 * suitable migration targets, so periodically check if we need
725f6ea3adbSDavid Rientjes 		 * to schedule.
726f6ea3adbSDavid Rientjes 		 */
727f6ea3adbSDavid Rientjes 		cond_resched();
728f6ea3adbSDavid Rientjes 
729c96b9e50SVlastimil Babka 		if (!pfn_valid(block_start_pfn))
730ff9543fdSMichal Nazarewicz 			continue;
731ff9543fdSMichal Nazarewicz 
732ff9543fdSMichal Nazarewicz 		/*
733ff9543fdSMichal Nazarewicz 		 * Check for overlapping nodes/zones. It's possible on some
734ff9543fdSMichal Nazarewicz 		 * configurations to have a setup like
735ff9543fdSMichal Nazarewicz 		 * node0 node1 node0
736ff9543fdSMichal Nazarewicz 		 * i.e. it's possible that all pages within a zones range of
737ff9543fdSMichal Nazarewicz 		 * pages do not belong to a single zone.
738ff9543fdSMichal Nazarewicz 		 */
739c96b9e50SVlastimil Babka 		page = pfn_to_page(block_start_pfn);
740ff9543fdSMichal Nazarewicz 		if (page_zone(page) != zone)
741ff9543fdSMichal Nazarewicz 			continue;
742ff9543fdSMichal Nazarewicz 
743ff9543fdSMichal Nazarewicz 		/* Check the block is suitable for migration */
74468e3e926SLinus Torvalds 		if (!suitable_migration_target(page))
745ff9543fdSMichal Nazarewicz 			continue;
74668e3e926SLinus Torvalds 
747bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
748bb13ffebSMel Gorman 		if (!isolation_suitable(cc, page))
749bb13ffebSMel Gorman 			continue;
750bb13ffebSMel Gorman 
751f40d1e42SMel Gorman 		/* Found a block suitable for isolating free pages from */
752c96b9e50SVlastimil Babka 		isolated = isolate_freepages_block(cc, block_start_pfn,
753c96b9e50SVlastimil Babka 					block_end_pfn, freelist, false);
754ff9543fdSMichal Nazarewicz 		nr_freepages += isolated;
755ff9543fdSMichal Nazarewicz 
756ff9543fdSMichal Nazarewicz 		/*
757ff9543fdSMichal Nazarewicz 		 * Record the highest PFN we isolated pages from. When next
758ff9543fdSMichal Nazarewicz 		 * looking for free pages, the search will restart here as
759ff9543fdSMichal Nazarewicz 		 * page migration may have returned some pages to the allocator
760ff9543fdSMichal Nazarewicz 		 */
761c96b9e50SVlastimil Babka 		if (isolated && next_free_pfn == 0) {
762c89511abSMel Gorman 			cc->finished_update_free = true;
763c96b9e50SVlastimil Babka 			next_free_pfn = block_start_pfn;
764ff9543fdSMichal Nazarewicz 		}
765c89511abSMel Gorman 	}
766ff9543fdSMichal Nazarewicz 
767ff9543fdSMichal Nazarewicz 	/* split_free_page does not map the pages */
768ff9543fdSMichal Nazarewicz 	map_pages(freelist);
769ff9543fdSMichal Nazarewicz 
7707ed695e0SVlastimil Babka 	/*
7717ed695e0SVlastimil Babka 	 * If we crossed the migrate scanner, we want to keep it that way
7727ed695e0SVlastimil Babka 	 * so that compact_finished() may detect this
7737ed695e0SVlastimil Babka 	 */
774c96b9e50SVlastimil Babka 	if (block_start_pfn < low_pfn)
775c96b9e50SVlastimil Babka 		next_free_pfn = cc->migrate_pfn;
776c96b9e50SVlastimil Babka 
777c96b9e50SVlastimil Babka 	cc->free_pfn = next_free_pfn;
778ff9543fdSMichal Nazarewicz 	cc->nr_freepages = nr_freepages;
779748446bbSMel Gorman }
780748446bbSMel Gorman 
781748446bbSMel Gorman /*
782748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
783748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
784748446bbSMel Gorman  */
785748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
786748446bbSMel Gorman 					unsigned long data,
787748446bbSMel Gorman 					int **result)
788748446bbSMel Gorman {
789748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
790748446bbSMel Gorman 	struct page *freepage;
791748446bbSMel Gorman 
792748446bbSMel Gorman 	/* Isolate free pages if necessary */
793748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
794748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
795748446bbSMel Gorman 
796748446bbSMel Gorman 		if (list_empty(&cc->freepages))
797748446bbSMel Gorman 			return NULL;
798748446bbSMel Gorman 	}
799748446bbSMel Gorman 
800748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
801748446bbSMel Gorman 	list_del(&freepage->lru);
802748446bbSMel Gorman 	cc->nr_freepages--;
803748446bbSMel Gorman 
804748446bbSMel Gorman 	return freepage;
805748446bbSMel Gorman }
806748446bbSMel Gorman 
807748446bbSMel Gorman /*
808d53aea3dSDavid Rientjes  * This is a migrate-callback that "frees" freepages back to the isolated
809d53aea3dSDavid Rientjes  * freelist.  All pages on the freelist are from the same zone, so there is no
810d53aea3dSDavid Rientjes  * special handling needed for NUMA.
811d53aea3dSDavid Rientjes  */
812d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data)
813d53aea3dSDavid Rientjes {
814d53aea3dSDavid Rientjes 	struct compact_control *cc = (struct compact_control *)data;
815d53aea3dSDavid Rientjes 
816d53aea3dSDavid Rientjes 	list_add(&page->lru, &cc->freepages);
817d53aea3dSDavid Rientjes 	cc->nr_freepages++;
818d53aea3dSDavid Rientjes }
819d53aea3dSDavid Rientjes 
820d53aea3dSDavid Rientjes /*
821d53aea3dSDavid Rientjes  * We cannot control nr_migratepages fully when migration is running as
822d53aea3dSDavid Rientjes  * migrate_pages() has no knowledge of of compact_control.  When migration is
823d53aea3dSDavid Rientjes  * complete, we count the number of pages on the list by hand.
824748446bbSMel Gorman  */
825748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
826748446bbSMel Gorman {
827748446bbSMel Gorman 	int nr_migratepages = 0;
828748446bbSMel Gorman 	struct page *page;
829748446bbSMel Gorman 
830748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
831748446bbSMel Gorman 		nr_migratepages++;
832748446bbSMel Gorman 
833748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
834748446bbSMel Gorman }
835748446bbSMel Gorman 
836ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */
837ff9543fdSMichal Nazarewicz typedef enum {
838ff9543fdSMichal Nazarewicz 	ISOLATE_ABORT,		/* Abort compaction now */
839ff9543fdSMichal Nazarewicz 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
840ff9543fdSMichal Nazarewicz 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
841ff9543fdSMichal Nazarewicz } isolate_migrate_t;
842ff9543fdSMichal Nazarewicz 
843ff9543fdSMichal Nazarewicz /*
844ff9543fdSMichal Nazarewicz  * Isolate all pages that can be migrated from the block pointed to by
845ff9543fdSMichal Nazarewicz  * the migrate scanner within compact_control.
846ff9543fdSMichal Nazarewicz  */
847ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
848ff9543fdSMichal Nazarewicz 					struct compact_control *cc)
849ff9543fdSMichal Nazarewicz {
850ff9543fdSMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
851ff9543fdSMichal Nazarewicz 
852ff9543fdSMichal Nazarewicz 	/* Do not scan outside zone boundaries */
853ff9543fdSMichal Nazarewicz 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
854ff9543fdSMichal Nazarewicz 
855ff9543fdSMichal Nazarewicz 	/* Only scan within a pageblock boundary */
856a9aacbccSMel Gorman 	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
857ff9543fdSMichal Nazarewicz 
858ff9543fdSMichal Nazarewicz 	/* Do not cross the free scanner or scan within a memory hole */
859ff9543fdSMichal Nazarewicz 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
860ff9543fdSMichal Nazarewicz 		cc->migrate_pfn = end_pfn;
861ff9543fdSMichal Nazarewicz 		return ISOLATE_NONE;
862ff9543fdSMichal Nazarewicz 	}
863ff9543fdSMichal Nazarewicz 
864ff9543fdSMichal Nazarewicz 	/* Perform the isolation */
865e46a2879SMinchan Kim 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
866e64c5237SShaohua Li 	if (!low_pfn || cc->contended)
867ff9543fdSMichal Nazarewicz 		return ISOLATE_ABORT;
868ff9543fdSMichal Nazarewicz 
869ff9543fdSMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
870ff9543fdSMichal Nazarewicz 
871ff9543fdSMichal Nazarewicz 	return ISOLATE_SUCCESS;
872ff9543fdSMichal Nazarewicz }
873ff9543fdSMichal Nazarewicz 
874748446bbSMel Gorman static int compact_finished(struct zone *zone,
875748446bbSMel Gorman 			    struct compact_control *cc)
876748446bbSMel Gorman {
8778fb74b9fSMel Gorman 	unsigned int order;
8785a03b051SAndrea Arcangeli 	unsigned long watermark;
87956de7263SMel Gorman 
880748446bbSMel Gorman 	if (fatal_signal_pending(current))
881748446bbSMel Gorman 		return COMPACT_PARTIAL;
882748446bbSMel Gorman 
883753341a4SMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
884bb13ffebSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn) {
88555b7c4c9SVlastimil Babka 		/* Let the next compaction start anew. */
88635979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
88735979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
88855b7c4c9SVlastimil Babka 		zone->compact_cached_free_pfn = zone_end_pfn(zone);
88955b7c4c9SVlastimil Babka 
89062997027SMel Gorman 		/*
89162997027SMel Gorman 		 * Mark that the PG_migrate_skip information should be cleared
89262997027SMel Gorman 		 * by kswapd when it goes to sleep. kswapd does not set the
89362997027SMel Gorman 		 * flag itself as the decision to be clear should be directly
89462997027SMel Gorman 		 * based on an allocation request.
89562997027SMel Gorman 		 */
89662997027SMel Gorman 		if (!current_is_kswapd())
89762997027SMel Gorman 			zone->compact_blockskip_flush = true;
89862997027SMel Gorman 
899748446bbSMel Gorman 		return COMPACT_COMPLETE;
900bb13ffebSMel Gorman 	}
901748446bbSMel Gorman 
90282478fb7SJohannes Weiner 	/*
90382478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
90482478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
90582478fb7SJohannes Weiner 	 */
90656de7263SMel Gorman 	if (cc->order == -1)
90756de7263SMel Gorman 		return COMPACT_CONTINUE;
90856de7263SMel Gorman 
9093957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
9103957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
9113957c776SMichal Hocko 	watermark += (1 << cc->order);
9123957c776SMichal Hocko 
9133957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
9143957c776SMichal Hocko 		return COMPACT_CONTINUE;
9153957c776SMichal Hocko 
91656de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
91756de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
9188fb74b9fSMel Gorman 		struct free_area *area = &zone->free_area[order];
9198fb74b9fSMel Gorman 
92056de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
9211fb3f8caSMel Gorman 		if (!list_empty(&area->free_list[cc->migratetype]))
92256de7263SMel Gorman 			return COMPACT_PARTIAL;
92356de7263SMel Gorman 
92456de7263SMel Gorman 		/* Job done if allocation would set block type */
9251fb3f8caSMel Gorman 		if (cc->order >= pageblock_order && area->nr_free)
92656de7263SMel Gorman 			return COMPACT_PARTIAL;
92756de7263SMel Gorman 	}
92856de7263SMel Gorman 
929748446bbSMel Gorman 	return COMPACT_CONTINUE;
930748446bbSMel Gorman }
931748446bbSMel Gorman 
9323e7d3449SMel Gorman /*
9333e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
9343e7d3449SMel Gorman  * Returns
9353e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
9363e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
9373e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
9383e7d3449SMel Gorman  */
9393e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
9403e7d3449SMel Gorman {
9413e7d3449SMel Gorman 	int fragindex;
9423e7d3449SMel Gorman 	unsigned long watermark;
9433e7d3449SMel Gorman 
9443e7d3449SMel Gorman 	/*
9453957c776SMichal Hocko 	 * order == -1 is expected when compacting via
9463957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
9473957c776SMichal Hocko 	 */
9483957c776SMichal Hocko 	if (order == -1)
9493957c776SMichal Hocko 		return COMPACT_CONTINUE;
9503957c776SMichal Hocko 
9513957c776SMichal Hocko 	/*
9523e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
9533e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
9543e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
9553e7d3449SMel Gorman 	 */
9563e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
9573e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
9583e7d3449SMel Gorman 		return COMPACT_SKIPPED;
9593e7d3449SMel Gorman 
9603e7d3449SMel Gorman 	/*
9613e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
9623e7d3449SMel Gorman 	 * low memory or external fragmentation
9633e7d3449SMel Gorman 	 *
964a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
965a582a738SShaohua Li 	 * watermarks
9663e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
9673e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
9683e7d3449SMel Gorman 	 *
9693e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
9703e7d3449SMel Gorman 	 */
9713e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
9723e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
9733e7d3449SMel Gorman 		return COMPACT_SKIPPED;
9743e7d3449SMel Gorman 
975a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
976a582a738SShaohua Li 	    0, 0))
9773e7d3449SMel Gorman 		return COMPACT_PARTIAL;
9783e7d3449SMel Gorman 
9793e7d3449SMel Gorman 	return COMPACT_CONTINUE;
9803e7d3449SMel Gorman }
9813e7d3449SMel Gorman 
982748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
983748446bbSMel Gorman {
984748446bbSMel Gorman 	int ret;
985c89511abSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
986108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
987*e0b9daebSDavid Rientjes 	const bool sync = cc->mode != MIGRATE_ASYNC;
988748446bbSMel Gorman 
9893e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
9903e7d3449SMel Gorman 	switch (ret) {
9913e7d3449SMel Gorman 	case COMPACT_PARTIAL:
9923e7d3449SMel Gorman 	case COMPACT_SKIPPED:
9933e7d3449SMel Gorman 		/* Compaction is likely to fail */
9943e7d3449SMel Gorman 		return ret;
9953e7d3449SMel Gorman 	case COMPACT_CONTINUE:
9963e7d3449SMel Gorman 		/* Fall through to compaction */
9973e7d3449SMel Gorman 		;
9983e7d3449SMel Gorman 	}
9993e7d3449SMel Gorman 
1000c89511abSMel Gorman 	/*
1001d3132e4bSVlastimil Babka 	 * Clear pageblock skip if there were failures recently and compaction
1002d3132e4bSVlastimil Babka 	 * is about to be retried after being deferred. kswapd does not do
1003d3132e4bSVlastimil Babka 	 * this reset as it'll reset the cached information when going to sleep.
1004d3132e4bSVlastimil Babka 	 */
1005d3132e4bSVlastimil Babka 	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1006d3132e4bSVlastimil Babka 		__reset_isolation_suitable(zone);
1007d3132e4bSVlastimil Babka 
1008d3132e4bSVlastimil Babka 	/*
1009c89511abSMel Gorman 	 * Setup to move all movable pages to the end of the zone. Used cached
1010c89511abSMel Gorman 	 * information on where the scanners should start but check that it
1011c89511abSMel Gorman 	 * is initialised by ensuring the values are within zone boundaries.
1012c89511abSMel Gorman 	 */
1013*e0b9daebSDavid Rientjes 	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1014c89511abSMel Gorman 	cc->free_pfn = zone->compact_cached_free_pfn;
1015c89511abSMel Gorman 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1016c89511abSMel Gorman 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1017c89511abSMel Gorman 		zone->compact_cached_free_pfn = cc->free_pfn;
1018c89511abSMel Gorman 	}
1019c89511abSMel Gorman 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1020c89511abSMel Gorman 		cc->migrate_pfn = start_pfn;
102135979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
102235979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1023c89511abSMel Gorman 	}
1024748446bbSMel Gorman 
10250eb927c0SMel Gorman 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
10260eb927c0SMel Gorman 
1027748446bbSMel Gorman 	migrate_prep_local();
1028748446bbSMel Gorman 
1029748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1030748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
10319d502c1cSMinchan Kim 		int err;
1032748446bbSMel Gorman 
1033f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
1034f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
1035f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
10365733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
1037e64c5237SShaohua Li 			cc->nr_migratepages = 0;
1038f9e35b3bSMel Gorman 			goto out;
1039f9e35b3bSMel Gorman 		case ISOLATE_NONE:
1040748446bbSMel Gorman 			continue;
1041f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
1042f9e35b3bSMel Gorman 			;
1043f9e35b3bSMel Gorman 		}
1044748446bbSMel Gorman 
1045748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
1046d53aea3dSDavid Rientjes 		err = migrate_pages(&cc->migratepages, compaction_alloc,
1047*e0b9daebSDavid Rientjes 				compaction_free, (unsigned long)cc, cc->mode,
10487b2a2d4aSMel Gorman 				MR_COMPACTION);
1049748446bbSMel Gorman 		update_nr_listpages(cc);
1050748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
1051748446bbSMel Gorman 
1052b7aba698SMel Gorman 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1053b7aba698SMel Gorman 						nr_remaining);
1054748446bbSMel Gorman 
10555733c7d1SRafael Aquini 		/* Release isolated pages not migrated */
10569d502c1cSMinchan Kim 		if (err) {
10575733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
1058748446bbSMel Gorman 			cc->nr_migratepages = 0;
10597ed695e0SVlastimil Babka 			/*
10607ed695e0SVlastimil Babka 			 * migrate_pages() may return -ENOMEM when scanners meet
10617ed695e0SVlastimil Babka 			 * and we want compact_finished() to detect it
10627ed695e0SVlastimil Babka 			 */
10637ed695e0SVlastimil Babka 			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
10644bf2bba3SDavid Rientjes 				ret = COMPACT_PARTIAL;
10654bf2bba3SDavid Rientjes 				goto out;
1066748446bbSMel Gorman 			}
10674bf2bba3SDavid Rientjes 		}
1068748446bbSMel Gorman 	}
1069748446bbSMel Gorman 
1070f9e35b3bSMel Gorman out:
1071748446bbSMel Gorman 	/* Release free pages and check accounting */
1072748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
1073748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
1074748446bbSMel Gorman 
10750eb927c0SMel Gorman 	trace_mm_compaction_end(ret);
10760eb927c0SMel Gorman 
1077748446bbSMel Gorman 	return ret;
1078748446bbSMel Gorman }
107976ab0f53SMel Gorman 
1080*e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order,
1081*e0b9daebSDavid Rientjes 		gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
108256de7263SMel Gorman {
1083e64c5237SShaohua Li 	unsigned long ret;
108456de7263SMel Gorman 	struct compact_control cc = {
108556de7263SMel Gorman 		.nr_freepages = 0,
108656de7263SMel Gorman 		.nr_migratepages = 0,
108756de7263SMel Gorman 		.order = order,
108856de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
108956de7263SMel Gorman 		.zone = zone,
1090*e0b9daebSDavid Rientjes 		.mode = mode,
109156de7263SMel Gorman 	};
109256de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
109356de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
109456de7263SMel Gorman 
1095e64c5237SShaohua Li 	ret = compact_zone(zone, &cc);
1096e64c5237SShaohua Li 
1097e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.freepages));
1098e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.migratepages));
1099e64c5237SShaohua Li 
1100e64c5237SShaohua Li 	*contended = cc.contended;
1101e64c5237SShaohua Li 	return ret;
110256de7263SMel Gorman }
110356de7263SMel Gorman 
11045e771905SMel Gorman int sysctl_extfrag_threshold = 500;
11055e771905SMel Gorman 
110656de7263SMel Gorman /**
110756de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
110856de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
110956de7263SMel Gorman  * @order: The order of the current allocation
111056de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
111156de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
1112*e0b9daebSDavid Rientjes  * @mode: The migration mode for async, sync light, or sync migration
1113661c4cb9SMel Gorman  * @contended: Return value that is true if compaction was aborted due to lock contention
1114661c4cb9SMel Gorman  * @page: Optionally capture a free page of the requested order during compaction
111556de7263SMel Gorman  *
111656de7263SMel Gorman  * This is the main entry point for direct page compaction.
111756de7263SMel Gorman  */
111856de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
111977f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
1120*e0b9daebSDavid Rientjes 			enum migrate_mode mode, bool *contended)
112156de7263SMel Gorman {
112256de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
112356de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
112456de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
112556de7263SMel Gorman 	struct zoneref *z;
112656de7263SMel Gorman 	struct zone *zone;
112756de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
1128d95ea5d1SBartlomiej Zolnierkiewicz 	int alloc_flags = 0;
112956de7263SMel Gorman 
11304ffb6335SMel Gorman 	/* Check if the GFP flags allow compaction */
1131c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
113256de7263SMel Gorman 		return rc;
113356de7263SMel Gorman 
1134010fc29aSMinchan Kim 	count_compact_event(COMPACTSTALL);
113556de7263SMel Gorman 
1136d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA
1137d95ea5d1SBartlomiej Zolnierkiewicz 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1138d95ea5d1SBartlomiej Zolnierkiewicz 		alloc_flags |= ALLOC_CMA;
1139d95ea5d1SBartlomiej Zolnierkiewicz #endif
114056de7263SMel Gorman 	/* Compact each zone in the list */
114156de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
114256de7263SMel Gorman 								nodemask) {
114356de7263SMel Gorman 		int status;
114456de7263SMel Gorman 
1145*e0b9daebSDavid Rientjes 		status = compact_zone_order(zone, order, gfp_mask, mode,
11468fb74b9fSMel Gorman 						contended);
114756de7263SMel Gorman 		rc = max(status, rc);
114856de7263SMel Gorman 
11493e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
1150d95ea5d1SBartlomiej Zolnierkiewicz 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1151d95ea5d1SBartlomiej Zolnierkiewicz 				      alloc_flags))
115256de7263SMel Gorman 			break;
115356de7263SMel Gorman 	}
115456de7263SMel Gorman 
115556de7263SMel Gorman 	return rc;
115656de7263SMel Gorman }
115756de7263SMel Gorman 
115856de7263SMel Gorman 
115976ab0f53SMel Gorman /* Compact all zones within a node */
11607103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
116176ab0f53SMel Gorman {
116276ab0f53SMel Gorman 	int zoneid;
116376ab0f53SMel Gorman 	struct zone *zone;
116476ab0f53SMel Gorman 
116576ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
116676ab0f53SMel Gorman 
116776ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
116876ab0f53SMel Gorman 		if (!populated_zone(zone))
116976ab0f53SMel Gorman 			continue;
117076ab0f53SMel Gorman 
11717be62de9SRik van Riel 		cc->nr_freepages = 0;
11727be62de9SRik van Riel 		cc->nr_migratepages = 0;
11737be62de9SRik van Riel 		cc->zone = zone;
11747be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
11757be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
117676ab0f53SMel Gorman 
1177aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
11787be62de9SRik van Riel 			compact_zone(zone, cc);
117976ab0f53SMel Gorman 
1180aff62249SRik van Riel 		if (cc->order > 0) {
1181de6c60a6SVlastimil Babka 			if (zone_watermark_ok(zone, cc->order,
1182de6c60a6SVlastimil Babka 						low_wmark_pages(zone), 0, 0))
1183de6c60a6SVlastimil Babka 				compaction_defer_reset(zone, cc->order, false);
1184aff62249SRik van Riel 		}
1185aff62249SRik van Riel 
11867be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
11877be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
118876ab0f53SMel Gorman 	}
118976ab0f53SMel Gorman }
119076ab0f53SMel Gorman 
11917103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order)
11927be62de9SRik van Riel {
11937be62de9SRik van Riel 	struct compact_control cc = {
11947be62de9SRik van Riel 		.order = order,
1195*e0b9daebSDavid Rientjes 		.mode = MIGRATE_ASYNC,
11967be62de9SRik van Riel 	};
11977be62de9SRik van Riel 
11983a7200afSMel Gorman 	if (!order)
11993a7200afSMel Gorman 		return;
12003a7200afSMel Gorman 
12017103f16dSAndrew Morton 	__compact_pgdat(pgdat, &cc);
12027be62de9SRik van Riel }
12037be62de9SRik van Riel 
12047103f16dSAndrew Morton static void compact_node(int nid)
12057be62de9SRik van Riel {
12067be62de9SRik van Riel 	struct compact_control cc = {
12077be62de9SRik van Riel 		.order = -1,
1208*e0b9daebSDavid Rientjes 		.mode = MIGRATE_SYNC,
120991ca9186SDavid Rientjes 		.ignore_skip_hint = true,
12107be62de9SRik van Riel 	};
12117be62de9SRik van Riel 
12127103f16dSAndrew Morton 	__compact_pgdat(NODE_DATA(nid), &cc);
12137be62de9SRik van Riel }
12147be62de9SRik van Riel 
121576ab0f53SMel Gorman /* Compact all nodes in the system */
12167964c06dSJason Liu static void compact_nodes(void)
121776ab0f53SMel Gorman {
121876ab0f53SMel Gorman 	int nid;
121976ab0f53SMel Gorman 
12208575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
12218575ec29SHugh Dickins 	lru_add_drain_all();
12228575ec29SHugh Dickins 
122376ab0f53SMel Gorman 	for_each_online_node(nid)
122476ab0f53SMel Gorman 		compact_node(nid);
122576ab0f53SMel Gorman }
122676ab0f53SMel Gorman 
122776ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
122876ab0f53SMel Gorman int sysctl_compact_memory;
122976ab0f53SMel Gorman 
123076ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
123176ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
123276ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
123376ab0f53SMel Gorman {
123476ab0f53SMel Gorman 	if (write)
12357964c06dSJason Liu 		compact_nodes();
123676ab0f53SMel Gorman 
123776ab0f53SMel Gorman 	return 0;
123876ab0f53SMel Gorman }
1239ed4a6d7fSMel Gorman 
12405e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
12415e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
12425e771905SMel Gorman {
12435e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
12445e771905SMel Gorman 
12455e771905SMel Gorman 	return 0;
12465e771905SMel Gorman }
12475e771905SMel Gorman 
1248ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
124974e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev,
125010fbcf4cSKay Sievers 			struct device_attribute *attr,
1251ed4a6d7fSMel Gorman 			const char *buf, size_t count)
1252ed4a6d7fSMel Gorman {
12538575ec29SHugh Dickins 	int nid = dev->id;
12548575ec29SHugh Dickins 
12558575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
12568575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
12578575ec29SHugh Dickins 		lru_add_drain_all();
12588575ec29SHugh Dickins 
12598575ec29SHugh Dickins 		compact_node(nid);
12608575ec29SHugh Dickins 	}
1261ed4a6d7fSMel Gorman 
1262ed4a6d7fSMel Gorman 	return count;
1263ed4a6d7fSMel Gorman }
126410fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1265ed4a6d7fSMel Gorman 
1266ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
1267ed4a6d7fSMel Gorman {
126810fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
1269ed4a6d7fSMel Gorman }
1270ed4a6d7fSMel Gorman 
1271ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
1272ed4a6d7fSMel Gorman {
127310fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
1274ed4a6d7fSMel Gorman }
1275ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1276ff9543fdSMichal Nazarewicz 
1277ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */
1278