xref: /openbmc/linux/mm/compaction.c (revision be9765722e6b7ece8263cbab857490332339bd6f)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h>
18194159fbSMinchan Kim #include <linux/page-isolation.h>
19748446bbSMel Gorman #include "internal.h"
20748446bbSMel Gorman 
21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION
22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item)
23010fc29aSMinchan Kim {
24010fc29aSMinchan Kim 	count_vm_event(item);
25010fc29aSMinchan Kim }
26010fc29aSMinchan Kim 
27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta)
28010fc29aSMinchan Kim {
29010fc29aSMinchan Kim 	count_vm_events(item, delta);
30010fc29aSMinchan Kim }
31010fc29aSMinchan Kim #else
32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0)
33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0)
34010fc29aSMinchan Kim #endif
35010fc29aSMinchan Kim 
36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
37ff9543fdSMichal Nazarewicz 
38b7aba698SMel Gorman #define CREATE_TRACE_POINTS
39b7aba698SMel Gorman #include <trace/events/compaction.h>
40b7aba698SMel Gorman 
41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
42748446bbSMel Gorman {
43748446bbSMel Gorman 	struct page *page, *next;
44748446bbSMel Gorman 	unsigned long count = 0;
45748446bbSMel Gorman 
46748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
47748446bbSMel Gorman 		list_del(&page->lru);
48748446bbSMel Gorman 		__free_page(page);
49748446bbSMel Gorman 		count++;
50748446bbSMel Gorman 	}
51748446bbSMel Gorman 
52748446bbSMel Gorman 	return count;
53748446bbSMel Gorman }
54748446bbSMel Gorman 
55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list)
56ff9543fdSMichal Nazarewicz {
57ff9543fdSMichal Nazarewicz 	struct page *page;
58ff9543fdSMichal Nazarewicz 
59ff9543fdSMichal Nazarewicz 	list_for_each_entry(page, list, lru) {
60ff9543fdSMichal Nazarewicz 		arch_alloc_page(page, 0);
61ff9543fdSMichal Nazarewicz 		kernel_map_pages(page, 1, 1);
62ff9543fdSMichal Nazarewicz 	}
63ff9543fdSMichal Nazarewicz }
64ff9543fdSMichal Nazarewicz 
6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype)
6647118af0SMichal Nazarewicz {
6747118af0SMichal Nazarewicz 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
6847118af0SMichal Nazarewicz }
6947118af0SMichal Nazarewicz 
70bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION
71bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */
72bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
73bb13ffebSMel Gorman 					struct page *page)
74bb13ffebSMel Gorman {
75bb13ffebSMel Gorman 	if (cc->ignore_skip_hint)
76bb13ffebSMel Gorman 		return true;
77bb13ffebSMel Gorman 
78bb13ffebSMel Gorman 	return !get_pageblock_skip(page);
79bb13ffebSMel Gorman }
80bb13ffebSMel Gorman 
81bb13ffebSMel Gorman /*
82bb13ffebSMel Gorman  * This function is called to clear all cached information on pageblocks that
83bb13ffebSMel Gorman  * should be skipped for page isolation when the migrate and free page scanner
84bb13ffebSMel Gorman  * meet.
85bb13ffebSMel Gorman  */
8662997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone)
87bb13ffebSMel Gorman {
88bb13ffebSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
89108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
90bb13ffebSMel Gorman 	unsigned long pfn;
91bb13ffebSMel Gorman 
9235979ef3SDavid Rientjes 	zone->compact_cached_migrate_pfn[0] = start_pfn;
9335979ef3SDavid Rientjes 	zone->compact_cached_migrate_pfn[1] = start_pfn;
94c89511abSMel Gorman 	zone->compact_cached_free_pfn = end_pfn;
9562997027SMel Gorman 	zone->compact_blockskip_flush = false;
96bb13ffebSMel Gorman 
97bb13ffebSMel Gorman 	/* Walk the zone and mark every pageblock as suitable for isolation */
98bb13ffebSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
99bb13ffebSMel Gorman 		struct page *page;
100bb13ffebSMel Gorman 
101bb13ffebSMel Gorman 		cond_resched();
102bb13ffebSMel Gorman 
103bb13ffebSMel Gorman 		if (!pfn_valid(pfn))
104bb13ffebSMel Gorman 			continue;
105bb13ffebSMel Gorman 
106bb13ffebSMel Gorman 		page = pfn_to_page(pfn);
107bb13ffebSMel Gorman 		if (zone != page_zone(page))
108bb13ffebSMel Gorman 			continue;
109bb13ffebSMel Gorman 
110bb13ffebSMel Gorman 		clear_pageblock_skip(page);
111bb13ffebSMel Gorman 	}
112bb13ffebSMel Gorman }
113bb13ffebSMel Gorman 
11462997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat)
11562997027SMel Gorman {
11662997027SMel Gorman 	int zoneid;
11762997027SMel Gorman 
11862997027SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
11962997027SMel Gorman 		struct zone *zone = &pgdat->node_zones[zoneid];
12062997027SMel Gorman 		if (!populated_zone(zone))
12162997027SMel Gorman 			continue;
12262997027SMel Gorman 
12362997027SMel Gorman 		/* Only flush if a full compaction finished recently */
12462997027SMel Gorman 		if (zone->compact_blockskip_flush)
12562997027SMel Gorman 			__reset_isolation_suitable(zone);
12662997027SMel Gorman 	}
12762997027SMel Gorman }
12862997027SMel Gorman 
129bb13ffebSMel Gorman /*
130bb13ffebSMel Gorman  * If no pages were isolated then mark this pageblock to be skipped in the
13162997027SMel Gorman  * future. The information is later cleared by __reset_isolation_suitable().
132bb13ffebSMel Gorman  */
133c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
134c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
13535979ef3SDavid Rientjes 			bool set_unsuitable, bool migrate_scanner)
136bb13ffebSMel Gorman {
137c89511abSMel Gorman 	struct zone *zone = cc->zone;
13835979ef3SDavid Rientjes 	unsigned long pfn;
1396815bf3fSJoonsoo Kim 
1406815bf3fSJoonsoo Kim 	if (cc->ignore_skip_hint)
1416815bf3fSJoonsoo Kim 		return;
1426815bf3fSJoonsoo Kim 
143bb13ffebSMel Gorman 	if (!page)
144bb13ffebSMel Gorman 		return;
145bb13ffebSMel Gorman 
14635979ef3SDavid Rientjes 	if (nr_isolated)
14735979ef3SDavid Rientjes 		return;
14835979ef3SDavid Rientjes 
14935979ef3SDavid Rientjes 	/*
15035979ef3SDavid Rientjes 	 * Only skip pageblocks when all forms of compaction will be known to
15135979ef3SDavid Rientjes 	 * fail in the near future.
15235979ef3SDavid Rientjes 	 */
15335979ef3SDavid Rientjes 	if (set_unsuitable)
154bb13ffebSMel Gorman 		set_pageblock_skip(page);
155c89511abSMel Gorman 
15635979ef3SDavid Rientjes 	pfn = page_to_pfn(page);
15735979ef3SDavid Rientjes 
15835979ef3SDavid Rientjes 	/* Update where async and sync compaction should restart */
159c89511abSMel Gorman 	if (migrate_scanner) {
16035979ef3SDavid Rientjes 		if (cc->finished_update_migrate)
16135979ef3SDavid Rientjes 			return;
16235979ef3SDavid Rientjes 		if (pfn > zone->compact_cached_migrate_pfn[0])
16335979ef3SDavid Rientjes 			zone->compact_cached_migrate_pfn[0] = pfn;
164e0b9daebSDavid Rientjes 		if (cc->mode != MIGRATE_ASYNC &&
165e0b9daebSDavid Rientjes 		    pfn > zone->compact_cached_migrate_pfn[1])
16635979ef3SDavid Rientjes 			zone->compact_cached_migrate_pfn[1] = pfn;
167c89511abSMel Gorman 	} else {
16835979ef3SDavid Rientjes 		if (cc->finished_update_free)
16935979ef3SDavid Rientjes 			return;
17035979ef3SDavid Rientjes 		if (pfn < zone->compact_cached_free_pfn)
171c89511abSMel Gorman 			zone->compact_cached_free_pfn = pfn;
172c89511abSMel Gorman 	}
173c89511abSMel Gorman }
174bb13ffebSMel Gorman #else
175bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
176bb13ffebSMel Gorman 					struct page *page)
177bb13ffebSMel Gorman {
178bb13ffebSMel Gorman 	return true;
179bb13ffebSMel Gorman }
180bb13ffebSMel Gorman 
181c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
182c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
18335979ef3SDavid Rientjes 			bool set_unsuitable, bool migrate_scanner)
184bb13ffebSMel Gorman {
185bb13ffebSMel Gorman }
186bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */
187bb13ffebSMel Gorman 
1882a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock)
1892a1402aaSMel Gorman {
1902a1402aaSMel Gorman 	return need_resched() || spin_is_contended(lock);
1912a1402aaSMel Gorman }
1922a1402aaSMel Gorman 
19385aa125fSMichal Nazarewicz /*
194c67fe375SMel Gorman  * Compaction requires the taking of some coarse locks that are potentially
195c67fe375SMel Gorman  * very heavily contended. Check if the process needs to be scheduled or
196c67fe375SMel Gorman  * if the lock is contended. For async compaction, back out in the event
197c67fe375SMel Gorman  * if contention is severe. For sync compaction, schedule.
198c67fe375SMel Gorman  *
199c67fe375SMel Gorman  * Returns true if the lock is held.
200c67fe375SMel Gorman  * Returns false if the lock is released and compaction should abort
201c67fe375SMel Gorman  */
202c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
203c67fe375SMel Gorman 				      bool locked, struct compact_control *cc)
204c67fe375SMel Gorman {
2052a1402aaSMel Gorman 	if (should_release_lock(lock)) {
206c67fe375SMel Gorman 		if (locked) {
207c67fe375SMel Gorman 			spin_unlock_irqrestore(lock, *flags);
208c67fe375SMel Gorman 			locked = false;
209c67fe375SMel Gorman 		}
210c67fe375SMel Gorman 
211c67fe375SMel Gorman 		/* async aborts if taking too long or contended */
212e0b9daebSDavid Rientjes 		if (cc->mode == MIGRATE_ASYNC) {
213e64c5237SShaohua Li 			cc->contended = true;
214c67fe375SMel Gorman 			return false;
215c67fe375SMel Gorman 		}
216c67fe375SMel Gorman 
217c67fe375SMel Gorman 		cond_resched();
218c67fe375SMel Gorman 	}
219c67fe375SMel Gorman 
220c67fe375SMel Gorman 	if (!locked)
221c67fe375SMel Gorman 		spin_lock_irqsave(lock, *flags);
222c67fe375SMel Gorman 	return true;
223c67fe375SMel Gorman }
224c67fe375SMel Gorman 
225*be976572SVlastimil Babka /*
226*be976572SVlastimil Babka  * Aside from avoiding lock contention, compaction also periodically checks
227*be976572SVlastimil Babka  * need_resched() and either schedules in sync compaction or aborts async
228*be976572SVlastimil Babka  * compaction. This is similar to what compact_checklock_irqsave() does, but
229*be976572SVlastimil Babka  * is used where no lock is concerned.
230*be976572SVlastimil Babka  *
231*be976572SVlastimil Babka  * Returns false when no scheduling was needed, or sync compaction scheduled.
232*be976572SVlastimil Babka  * Returns true when async compaction should abort.
233*be976572SVlastimil Babka  */
234*be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc)
235*be976572SVlastimil Babka {
236*be976572SVlastimil Babka 	/* async compaction aborts if contended */
237*be976572SVlastimil Babka 	if (need_resched()) {
238*be976572SVlastimil Babka 		if (cc->mode == MIGRATE_ASYNC) {
239*be976572SVlastimil Babka 			cc->contended = true;
240*be976572SVlastimil Babka 			return true;
241*be976572SVlastimil Babka 		}
242*be976572SVlastimil Babka 
243*be976572SVlastimil Babka 		cond_resched();
244*be976572SVlastimil Babka 	}
245*be976572SVlastimil Babka 
246*be976572SVlastimil Babka 	return false;
247*be976572SVlastimil Babka }
248*be976572SVlastimil Babka 
249f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */
250f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page)
251f40d1e42SMel Gorman {
2527d348b9eSJoonsoo Kim 	/* If the page is a large free page, then disallow migration */
253f40d1e42SMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
2547d348b9eSJoonsoo Kim 		return false;
255f40d1e42SMel Gorman 
256f40d1e42SMel Gorman 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
2577d348b9eSJoonsoo Kim 	if (migrate_async_suitable(get_pageblock_migratetype(page)))
258f40d1e42SMel Gorman 		return true;
259f40d1e42SMel Gorman 
260f40d1e42SMel Gorman 	/* Otherwise skip the block */
261f40d1e42SMel Gorman 	return false;
262f40d1e42SMel Gorman }
263f40d1e42SMel Gorman 
264c67fe375SMel Gorman /*
2659e4be470SJerome Marchand  * Isolate free pages onto a private freelist. If @strict is true, will abort
2669e4be470SJerome Marchand  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
2679e4be470SJerome Marchand  * (even though it may still end up isolating some pages).
26885aa125fSMichal Nazarewicz  */
269f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc,
270f40d1e42SMel Gorman 				unsigned long blockpfn,
27185aa125fSMichal Nazarewicz 				unsigned long end_pfn,
27285aa125fSMichal Nazarewicz 				struct list_head *freelist,
27385aa125fSMichal Nazarewicz 				bool strict)
274748446bbSMel Gorman {
275b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
276bb13ffebSMel Gorman 	struct page *cursor, *valid_page = NULL;
277f40d1e42SMel Gorman 	unsigned long flags;
278f40d1e42SMel Gorman 	bool locked = false;
27901ead534SJoonsoo Kim 	bool checked_pageblock = false;
280748446bbSMel Gorman 
281748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
282748446bbSMel Gorman 
283f40d1e42SMel Gorman 	/* Isolate free pages. */
284748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
285748446bbSMel Gorman 		int isolated, i;
286748446bbSMel Gorman 		struct page *page = cursor;
287748446bbSMel Gorman 
288b7aba698SMel Gorman 		nr_scanned++;
289f40d1e42SMel Gorman 		if (!pfn_valid_within(blockpfn))
2902af120bcSLaura Abbott 			goto isolate_fail;
2912af120bcSLaura Abbott 
292bb13ffebSMel Gorman 		if (!valid_page)
293bb13ffebSMel Gorman 			valid_page = page;
294f40d1e42SMel Gorman 		if (!PageBuddy(page))
2952af120bcSLaura Abbott 			goto isolate_fail;
296f40d1e42SMel Gorman 
297f40d1e42SMel Gorman 		/*
298f40d1e42SMel Gorman 		 * The zone lock must be held to isolate freepages.
299f40d1e42SMel Gorman 		 * Unfortunately this is a very coarse lock and can be
300f40d1e42SMel Gorman 		 * heavily contended if there are parallel allocations
301f40d1e42SMel Gorman 		 * or parallel compactions. For async compaction do not
302f40d1e42SMel Gorman 		 * spin on the lock and we acquire the lock as late as
303f40d1e42SMel Gorman 		 * possible.
304f40d1e42SMel Gorman 		 */
305f40d1e42SMel Gorman 		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
306f40d1e42SMel Gorman 								locked, cc);
307f40d1e42SMel Gorman 		if (!locked)
308f40d1e42SMel Gorman 			break;
309f40d1e42SMel Gorman 
310f40d1e42SMel Gorman 		/* Recheck this is a suitable migration target under lock */
31101ead534SJoonsoo Kim 		if (!strict && !checked_pageblock) {
31201ead534SJoonsoo Kim 			/*
31301ead534SJoonsoo Kim 			 * We need to check suitability of pageblock only once
31401ead534SJoonsoo Kim 			 * and this isolate_freepages_block() is called with
31501ead534SJoonsoo Kim 			 * pageblock range, so just check once is sufficient.
31601ead534SJoonsoo Kim 			 */
31701ead534SJoonsoo Kim 			checked_pageblock = true;
31801ead534SJoonsoo Kim 			if (!suitable_migration_target(page))
319f40d1e42SMel Gorman 				break;
32001ead534SJoonsoo Kim 		}
321f40d1e42SMel Gorman 
322f40d1e42SMel Gorman 		/* Recheck this is a buddy page under lock */
323f40d1e42SMel Gorman 		if (!PageBuddy(page))
3242af120bcSLaura Abbott 			goto isolate_fail;
325748446bbSMel Gorman 
326748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
327748446bbSMel Gorman 		isolated = split_free_page(page);
328748446bbSMel Gorman 		total_isolated += isolated;
329748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
330748446bbSMel Gorman 			list_add(&page->lru, freelist);
331748446bbSMel Gorman 			page++;
332748446bbSMel Gorman 		}
333748446bbSMel Gorman 
334748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
335748446bbSMel Gorman 		if (isolated) {
336748446bbSMel Gorman 			blockpfn += isolated - 1;
337748446bbSMel Gorman 			cursor += isolated - 1;
3382af120bcSLaura Abbott 			continue;
339748446bbSMel Gorman 		}
3402af120bcSLaura Abbott 
3412af120bcSLaura Abbott isolate_fail:
3422af120bcSLaura Abbott 		if (strict)
3432af120bcSLaura Abbott 			break;
3442af120bcSLaura Abbott 		else
3452af120bcSLaura Abbott 			continue;
3462af120bcSLaura Abbott 
347748446bbSMel Gorman 	}
348748446bbSMel Gorman 
349b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
350f40d1e42SMel Gorman 
351f40d1e42SMel Gorman 	/*
352f40d1e42SMel Gorman 	 * If strict isolation is requested by CMA then check that all the
353f40d1e42SMel Gorman 	 * pages requested were isolated. If there were any failures, 0 is
354f40d1e42SMel Gorman 	 * returned and CMA will fail.
355f40d1e42SMel Gorman 	 */
3562af120bcSLaura Abbott 	if (strict && blockpfn < end_pfn)
357f40d1e42SMel Gorman 		total_isolated = 0;
358f40d1e42SMel Gorman 
359f40d1e42SMel Gorman 	if (locked)
360f40d1e42SMel Gorman 		spin_unlock_irqrestore(&cc->zone->lock, flags);
361f40d1e42SMel Gorman 
362bb13ffebSMel Gorman 	/* Update the pageblock-skip if the whole pageblock was scanned */
363bb13ffebSMel Gorman 	if (blockpfn == end_pfn)
36435979ef3SDavid Rientjes 		update_pageblock_skip(cc, valid_page, total_isolated, true,
36535979ef3SDavid Rientjes 				      false);
366bb13ffebSMel Gorman 
367010fc29aSMinchan Kim 	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
368397487dbSMel Gorman 	if (total_isolated)
369010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, total_isolated);
370748446bbSMel Gorman 	return total_isolated;
371748446bbSMel Gorman }
372748446bbSMel Gorman 
37385aa125fSMichal Nazarewicz /**
37485aa125fSMichal Nazarewicz  * isolate_freepages_range() - isolate free pages.
37585aa125fSMichal Nazarewicz  * @start_pfn: The first PFN to start isolating.
37685aa125fSMichal Nazarewicz  * @end_pfn:   The one-past-last PFN.
37785aa125fSMichal Nazarewicz  *
37885aa125fSMichal Nazarewicz  * Non-free pages, invalid PFNs, or zone boundaries within the
37985aa125fSMichal Nazarewicz  * [start_pfn, end_pfn) range are considered errors, cause function to
38085aa125fSMichal Nazarewicz  * undo its actions and return zero.
38185aa125fSMichal Nazarewicz  *
38285aa125fSMichal Nazarewicz  * Otherwise, function returns one-past-the-last PFN of isolated page
38385aa125fSMichal Nazarewicz  * (which may be greater then end_pfn if end fell in a middle of
38485aa125fSMichal Nazarewicz  * a free page).
38585aa125fSMichal Nazarewicz  */
386ff9543fdSMichal Nazarewicz unsigned long
387bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc,
388bb13ffebSMel Gorman 			unsigned long start_pfn, unsigned long end_pfn)
38985aa125fSMichal Nazarewicz {
390f40d1e42SMel Gorman 	unsigned long isolated, pfn, block_end_pfn;
39185aa125fSMichal Nazarewicz 	LIST_HEAD(freelist);
39285aa125fSMichal Nazarewicz 
39385aa125fSMichal Nazarewicz 	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
394bb13ffebSMel Gorman 		if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
39585aa125fSMichal Nazarewicz 			break;
39685aa125fSMichal Nazarewicz 
39785aa125fSMichal Nazarewicz 		/*
39885aa125fSMichal Nazarewicz 		 * On subsequent iterations ALIGN() is actually not needed,
39985aa125fSMichal Nazarewicz 		 * but we keep it that we not to complicate the code.
40085aa125fSMichal Nazarewicz 		 */
40185aa125fSMichal Nazarewicz 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
40285aa125fSMichal Nazarewicz 		block_end_pfn = min(block_end_pfn, end_pfn);
40385aa125fSMichal Nazarewicz 
404bb13ffebSMel Gorman 		isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
40585aa125fSMichal Nazarewicz 						   &freelist, true);
40685aa125fSMichal Nazarewicz 
40785aa125fSMichal Nazarewicz 		/*
40885aa125fSMichal Nazarewicz 		 * In strict mode, isolate_freepages_block() returns 0 if
40985aa125fSMichal Nazarewicz 		 * there are any holes in the block (ie. invalid PFNs or
41085aa125fSMichal Nazarewicz 		 * non-free pages).
41185aa125fSMichal Nazarewicz 		 */
41285aa125fSMichal Nazarewicz 		if (!isolated)
41385aa125fSMichal Nazarewicz 			break;
41485aa125fSMichal Nazarewicz 
41585aa125fSMichal Nazarewicz 		/*
41685aa125fSMichal Nazarewicz 		 * If we managed to isolate pages, it is always (1 << n) *
41785aa125fSMichal Nazarewicz 		 * pageblock_nr_pages for some non-negative n.  (Max order
41885aa125fSMichal Nazarewicz 		 * page may span two pageblocks).
41985aa125fSMichal Nazarewicz 		 */
42085aa125fSMichal Nazarewicz 	}
42185aa125fSMichal Nazarewicz 
42285aa125fSMichal Nazarewicz 	/* split_free_page does not map the pages */
42385aa125fSMichal Nazarewicz 	map_pages(&freelist);
42485aa125fSMichal Nazarewicz 
42585aa125fSMichal Nazarewicz 	if (pfn < end_pfn) {
42685aa125fSMichal Nazarewicz 		/* Loop terminated early, cleanup. */
42785aa125fSMichal Nazarewicz 		release_freepages(&freelist);
42885aa125fSMichal Nazarewicz 		return 0;
42985aa125fSMichal Nazarewicz 	}
43085aa125fSMichal Nazarewicz 
43185aa125fSMichal Nazarewicz 	/* We don't use freelists for anything. */
43285aa125fSMichal Nazarewicz 	return pfn;
43385aa125fSMichal Nazarewicz }
43485aa125fSMichal Nazarewicz 
435748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
436c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
437748446bbSMel Gorman {
438748446bbSMel Gorman 	struct page *page;
439b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
440748446bbSMel Gorman 
441b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
442b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
443748446bbSMel Gorman 
444c67fe375SMel Gorman 	/* If locked we can use the interrupt unsafe versions */
445c67fe375SMel Gorman 	if (locked) {
446b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
447b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
448c67fe375SMel Gorman 	} else {
449c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
450c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
451c67fe375SMel Gorman 	}
452748446bbSMel Gorman }
453748446bbSMel Gorman 
454748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
455748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
456748446bbSMel Gorman {
457bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
458748446bbSMel Gorman 
459748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
460748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
461bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
462bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
463748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
464748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
465748446bbSMel Gorman 
466bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
467748446bbSMel Gorman }
468748446bbSMel Gorman 
4692fe86e00SMichal Nazarewicz /**
4702fe86e00SMichal Nazarewicz  * isolate_migratepages_range() - isolate all migrate-able pages in range.
4712fe86e00SMichal Nazarewicz  * @zone:	Zone pages are in.
4722fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
4732fe86e00SMichal Nazarewicz  * @low_pfn:	The first PFN of the range.
4742fe86e00SMichal Nazarewicz  * @end_pfn:	The one-past-the-last PFN of the range.
475e46a2879SMinchan Kim  * @unevictable: true if it allows to isolate unevictable pages
4762fe86e00SMichal Nazarewicz  *
4772fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
4782fe86e00SMichal Nazarewicz  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
4792fe86e00SMichal Nazarewicz  * pending), otherwise PFN of the first page that was not scanned
4802fe86e00SMichal Nazarewicz  * (which may be both less, equal to or more then end_pfn).
4812fe86e00SMichal Nazarewicz  *
4822fe86e00SMichal Nazarewicz  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
4832fe86e00SMichal Nazarewicz  * zero.
4842fe86e00SMichal Nazarewicz  *
4852fe86e00SMichal Nazarewicz  * Apart from cc->migratepages and cc->nr_migratetypes this function
4862fe86e00SMichal Nazarewicz  * does not modify any cc's fields, in particular it does not modify
4872fe86e00SMichal Nazarewicz  * (or read for that matter) cc->migrate_pfn.
488748446bbSMel Gorman  */
489ff9543fdSMichal Nazarewicz unsigned long
4902fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
491e46a2879SMinchan Kim 		unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
492748446bbSMel Gorman {
4939927af74SMel Gorman 	unsigned long last_pageblock_nr = 0, pageblock_nr;
494b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
495748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
496fa9add64SHugh Dickins 	struct lruvec *lruvec;
497c67fe375SMel Gorman 	unsigned long flags;
4982a1402aaSMel Gorman 	bool locked = false;
499bb13ffebSMel Gorman 	struct page *page = NULL, *valid_page = NULL;
50035979ef3SDavid Rientjes 	bool set_unsuitable = true;
501e0b9daebSDavid Rientjes 	const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
502e0b9daebSDavid Rientjes 					ISOLATE_ASYNC_MIGRATE : 0) |
503da1c67a7SDavid Rientjes 				    (unevictable ? ISOLATE_UNEVICTABLE : 0);
504748446bbSMel Gorman 
505748446bbSMel Gorman 	/*
506748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
507748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
508748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
509748446bbSMel Gorman 	 */
510748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
511f9e35b3bSMel Gorman 		/* async migration should just abort */
512e0b9daebSDavid Rientjes 		if (cc->mode == MIGRATE_ASYNC)
5132fe86e00SMichal Nazarewicz 			return 0;
514f9e35b3bSMel Gorman 
515748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
516748446bbSMel Gorman 
517748446bbSMel Gorman 		if (fatal_signal_pending(current))
5182fe86e00SMichal Nazarewicz 			return 0;
519748446bbSMel Gorman 	}
520748446bbSMel Gorman 
521*be976572SVlastimil Babka 	if (compact_should_abort(cc))
522aeef4b83SDavid Rientjes 		return 0;
523aeef4b83SDavid Rientjes 
524748446bbSMel Gorman 	/* Time to isolate some pages for migration */
525748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
526b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
527be1aa03bSJoonsoo Kim 		if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
5282a1402aaSMel Gorman 			if (should_release_lock(&zone->lru_lock)) {
529c67fe375SMel Gorman 				spin_unlock_irqrestore(&zone->lru_lock, flags);
530b2eef8c0SAndrea Arcangeli 				locked = false;
531b2eef8c0SAndrea Arcangeli 			}
5322a1402aaSMel Gorman 		}
533b2eef8c0SAndrea Arcangeli 
5340bf380bcSMel Gorman 		/*
5350bf380bcSMel Gorman 		 * migrate_pfn does not necessarily start aligned to a
5360bf380bcSMel Gorman 		 * pageblock. Ensure that pfn_valid is called when moving
5370bf380bcSMel Gorman 		 * into a new MAX_ORDER_NR_PAGES range in case of large
5380bf380bcSMel Gorman 		 * memory holes within the zone
5390bf380bcSMel Gorman 		 */
5400bf380bcSMel Gorman 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
5410bf380bcSMel Gorman 			if (!pfn_valid(low_pfn)) {
5420bf380bcSMel Gorman 				low_pfn += MAX_ORDER_NR_PAGES - 1;
5430bf380bcSMel Gorman 				continue;
5440bf380bcSMel Gorman 			}
5450bf380bcSMel Gorman 		}
5460bf380bcSMel Gorman 
547748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
548748446bbSMel Gorman 			continue;
549b7aba698SMel Gorman 		nr_scanned++;
550748446bbSMel Gorman 
551dc908600SMel Gorman 		/*
552dc908600SMel Gorman 		 * Get the page and ensure the page is within the same zone.
553dc908600SMel Gorman 		 * See the comment in isolate_freepages about overlapping
554dc908600SMel Gorman 		 * nodes. It is deliberate that the new zone lock is not taken
555dc908600SMel Gorman 		 * as memory compaction should not move pages between nodes.
556dc908600SMel Gorman 		 */
557748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
558dc908600SMel Gorman 		if (page_zone(page) != zone)
559dc908600SMel Gorman 			continue;
560dc908600SMel Gorman 
561bb13ffebSMel Gorman 		if (!valid_page)
562bb13ffebSMel Gorman 			valid_page = page;
563bb13ffebSMel Gorman 
564bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
565bb13ffebSMel Gorman 		pageblock_nr = low_pfn >> pageblock_order;
566c122b208SJoonsoo Kim 		if (last_pageblock_nr != pageblock_nr) {
567c122b208SJoonsoo Kim 			int mt;
568c122b208SJoonsoo Kim 
569c122b208SJoonsoo Kim 			last_pageblock_nr = pageblock_nr;
570bb13ffebSMel Gorman 			if (!isolation_suitable(cc, page))
571bb13ffebSMel Gorman 				goto next_pageblock;
572bb13ffebSMel Gorman 
5736c14466cSMel Gorman 			/*
574c122b208SJoonsoo Kim 			 * For async migration, also only scan in MOVABLE
575c122b208SJoonsoo Kim 			 * blocks. Async migration is optimistic to see if
576c122b208SJoonsoo Kim 			 * the minimum amount of work satisfies the allocation
577c122b208SJoonsoo Kim 			 */
578c122b208SJoonsoo Kim 			mt = get_pageblock_migratetype(page);
579e0b9daebSDavid Rientjes 			if (cc->mode == MIGRATE_ASYNC &&
580e0b9daebSDavid Rientjes 			    !migrate_async_suitable(mt)) {
58135979ef3SDavid Rientjes 				set_unsuitable = false;
582c122b208SJoonsoo Kim 				goto next_pageblock;
583c122b208SJoonsoo Kim 			}
584c122b208SJoonsoo Kim 		}
585c122b208SJoonsoo Kim 
586c122b208SJoonsoo Kim 		/*
5876c14466cSMel Gorman 		 * Skip if free. page_order cannot be used without zone->lock
5886c14466cSMel Gorman 		 * as nothing prevents parallel allocations or buddy merging.
5896c14466cSMel Gorman 		 */
590748446bbSMel Gorman 		if (PageBuddy(page))
591748446bbSMel Gorman 			continue;
592748446bbSMel Gorman 
5939927af74SMel Gorman 		/*
594bf6bddf1SRafael Aquini 		 * Check may be lockless but that's ok as we recheck later.
595bf6bddf1SRafael Aquini 		 * It's possible to migrate LRU pages and balloon pages
596bf6bddf1SRafael Aquini 		 * Skip any other type of page
597bf6bddf1SRafael Aquini 		 */
598bf6bddf1SRafael Aquini 		if (!PageLRU(page)) {
599bf6bddf1SRafael Aquini 			if (unlikely(balloon_page_movable(page))) {
600bf6bddf1SRafael Aquini 				if (locked && balloon_page_isolate(page)) {
601bf6bddf1SRafael Aquini 					/* Successfully isolated */
602b6c75016SJoonsoo Kim 					goto isolate_success;
603bf6bddf1SRafael Aquini 				}
604bf6bddf1SRafael Aquini 			}
605bc835011SAndrea Arcangeli 			continue;
606bf6bddf1SRafael Aquini 		}
607bc835011SAndrea Arcangeli 
608bc835011SAndrea Arcangeli 		/*
6092a1402aaSMel Gorman 		 * PageLRU is set. lru_lock normally excludes isolation
6102a1402aaSMel Gorman 		 * splitting and collapsing (collapsing has already happened
6112a1402aaSMel Gorman 		 * if PageLRU is set) but the lock is not necessarily taken
6122a1402aaSMel Gorman 		 * here and it is wasteful to take it just to check transhuge.
6132a1402aaSMel Gorman 		 * Check TransHuge without lock and skip the whole pageblock if
6142a1402aaSMel Gorman 		 * it's either a transhuge or hugetlbfs page, as calling
6152a1402aaSMel Gorman 		 * compound_order() without preventing THP from splitting the
6162a1402aaSMel Gorman 		 * page underneath us may return surprising results.
617bc835011SAndrea Arcangeli 		 */
618bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
6192a1402aaSMel Gorman 			if (!locked)
6202a1402aaSMel Gorman 				goto next_pageblock;
6212a1402aaSMel Gorman 			low_pfn += (1 << compound_order(page)) - 1;
6222a1402aaSMel Gorman 			continue;
6232a1402aaSMel Gorman 		}
6242a1402aaSMel Gorman 
625119d6d59SDavid Rientjes 		/*
626119d6d59SDavid Rientjes 		 * Migration will fail if an anonymous page is pinned in memory,
627119d6d59SDavid Rientjes 		 * so avoid taking lru_lock and isolating it unnecessarily in an
628119d6d59SDavid Rientjes 		 * admittedly racy check.
629119d6d59SDavid Rientjes 		 */
630119d6d59SDavid Rientjes 		if (!page_mapping(page) &&
631119d6d59SDavid Rientjes 		    page_count(page) > page_mapcount(page))
632119d6d59SDavid Rientjes 			continue;
633119d6d59SDavid Rientjes 
6342a1402aaSMel Gorman 		/* Check if it is ok to still hold the lock */
6352a1402aaSMel Gorman 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
6362a1402aaSMel Gorman 								locked, cc);
6372a1402aaSMel Gorman 		if (!locked || fatal_signal_pending(current))
6382a1402aaSMel Gorman 			break;
6392a1402aaSMel Gorman 
6402a1402aaSMel Gorman 		/* Recheck PageLRU and PageTransHuge under lock */
6412a1402aaSMel Gorman 		if (!PageLRU(page))
6422a1402aaSMel Gorman 			continue;
6432a1402aaSMel Gorman 		if (PageTransHuge(page)) {
644bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
645bc835011SAndrea Arcangeli 			continue;
646bc835011SAndrea Arcangeli 		}
647bc835011SAndrea Arcangeli 
648fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
649fa9add64SHugh Dickins 
650748446bbSMel Gorman 		/* Try isolate the page */
651f3fd4a61SKonstantin Khlebnikov 		if (__isolate_lru_page(page, mode) != 0)
652748446bbSMel Gorman 			continue;
653748446bbSMel Gorman 
654309381feSSasha Levin 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
655bc835011SAndrea Arcangeli 
656748446bbSMel Gorman 		/* Successfully isolated */
657fa9add64SHugh Dickins 		del_page_from_lru_list(page, lruvec, page_lru(page));
658b6c75016SJoonsoo Kim 
659b6c75016SJoonsoo Kim isolate_success:
660b6c75016SJoonsoo Kim 		cc->finished_update_migrate = true;
661748446bbSMel Gorman 		list_add(&page->lru, migratelist);
662748446bbSMel Gorman 		cc->nr_migratepages++;
663b7aba698SMel Gorman 		nr_isolated++;
664748446bbSMel Gorman 
665748446bbSMel Gorman 		/* Avoid isolating too much */
66631b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
66731b8384aSHillf Danton 			++low_pfn;
668748446bbSMel Gorman 			break;
669748446bbSMel Gorman 		}
6702a1402aaSMel Gorman 
6712a1402aaSMel Gorman 		continue;
6722a1402aaSMel Gorman 
6732a1402aaSMel Gorman next_pageblock:
674a9aacbccSMel Gorman 		low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
67531b8384aSHillf Danton 	}
676748446bbSMel Gorman 
677c67fe375SMel Gorman 	acct_isolated(zone, locked, cc);
678748446bbSMel Gorman 
679c67fe375SMel Gorman 	if (locked)
680c67fe375SMel Gorman 		spin_unlock_irqrestore(&zone->lru_lock, flags);
681748446bbSMel Gorman 
68250b5b094SVlastimil Babka 	/*
68350b5b094SVlastimil Babka 	 * Update the pageblock-skip information and cached scanner pfn,
68450b5b094SVlastimil Babka 	 * if the whole pageblock was scanned without isolating any page.
68550b5b094SVlastimil Babka 	 */
68635979ef3SDavid Rientjes 	if (low_pfn == end_pfn)
68735979ef3SDavid Rientjes 		update_pageblock_skip(cc, valid_page, nr_isolated,
68835979ef3SDavid Rientjes 				      set_unsuitable, true);
689bb13ffebSMel Gorman 
690b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
691b7aba698SMel Gorman 
692010fc29aSMinchan Kim 	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
693397487dbSMel Gorman 	if (nr_isolated)
694010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, nr_isolated);
695397487dbSMel Gorman 
6962fe86e00SMichal Nazarewicz 	return low_pfn;
6972fe86e00SMichal Nazarewicz }
6982fe86e00SMichal Nazarewicz 
699ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */
700ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION
701ff9543fdSMichal Nazarewicz /*
702ff9543fdSMichal Nazarewicz  * Based on information in the current compact_control, find blocks
703ff9543fdSMichal Nazarewicz  * suitable for isolating free pages from and then isolate them.
704ff9543fdSMichal Nazarewicz  */
705ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone,
706ff9543fdSMichal Nazarewicz 				struct compact_control *cc)
707ff9543fdSMichal Nazarewicz {
708ff9543fdSMichal Nazarewicz 	struct page *page;
709c96b9e50SVlastimil Babka 	unsigned long block_start_pfn;	/* start of current pageblock */
710c96b9e50SVlastimil Babka 	unsigned long block_end_pfn;	/* end of current pageblock */
711c96b9e50SVlastimil Babka 	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
712ff9543fdSMichal Nazarewicz 	int nr_freepages = cc->nr_freepages;
713ff9543fdSMichal Nazarewicz 	struct list_head *freelist = &cc->freepages;
7142fe86e00SMichal Nazarewicz 
715ff9543fdSMichal Nazarewicz 	/*
716ff9543fdSMichal Nazarewicz 	 * Initialise the free scanner. The starting point is where we last
71749e068f0SVlastimil Babka 	 * successfully isolated from, zone-cached value, or the end of the
71849e068f0SVlastimil Babka 	 * zone when isolating for the first time. We need this aligned to
719c96b9e50SVlastimil Babka 	 * the pageblock boundary, because we do
720c96b9e50SVlastimil Babka 	 * block_start_pfn -= pageblock_nr_pages in the for loop.
721c96b9e50SVlastimil Babka 	 * For ending point, take care when isolating in last pageblock of a
722c96b9e50SVlastimil Babka 	 * a zone which ends in the middle of a pageblock.
72349e068f0SVlastimil Babka 	 * The low boundary is the end of the pageblock the migration scanner
72449e068f0SVlastimil Babka 	 * is using.
725ff9543fdSMichal Nazarewicz 	 */
726c96b9e50SVlastimil Babka 	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
727c96b9e50SVlastimil Babka 	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
728c96b9e50SVlastimil Babka 						zone_end_pfn(zone));
7297ed695e0SVlastimil Babka 	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
7302fe86e00SMichal Nazarewicz 
731ff9543fdSMichal Nazarewicz 	/*
732ff9543fdSMichal Nazarewicz 	 * Isolate free pages until enough are available to migrate the
733ff9543fdSMichal Nazarewicz 	 * pages on cc->migratepages. We stop searching if the migrate
734ff9543fdSMichal Nazarewicz 	 * and free page scanners meet or enough free pages are isolated.
735ff9543fdSMichal Nazarewicz 	 */
736c96b9e50SVlastimil Babka 	for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
737c96b9e50SVlastimil Babka 				block_end_pfn = block_start_pfn,
738c96b9e50SVlastimil Babka 				block_start_pfn -= pageblock_nr_pages) {
739ff9543fdSMichal Nazarewicz 		unsigned long isolated;
740ff9543fdSMichal Nazarewicz 
741f6ea3adbSDavid Rientjes 		/*
742f6ea3adbSDavid Rientjes 		 * This can iterate a massively long zone without finding any
743f6ea3adbSDavid Rientjes 		 * suitable migration targets, so periodically check if we need
744*be976572SVlastimil Babka 		 * to schedule, or even abort async compaction.
745f6ea3adbSDavid Rientjes 		 */
746*be976572SVlastimil Babka 		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
747*be976572SVlastimil Babka 						&& compact_should_abort(cc))
748*be976572SVlastimil Babka 			break;
749f6ea3adbSDavid Rientjes 
750c96b9e50SVlastimil Babka 		if (!pfn_valid(block_start_pfn))
751ff9543fdSMichal Nazarewicz 			continue;
752ff9543fdSMichal Nazarewicz 
753ff9543fdSMichal Nazarewicz 		/*
754ff9543fdSMichal Nazarewicz 		 * Check for overlapping nodes/zones. It's possible on some
755ff9543fdSMichal Nazarewicz 		 * configurations to have a setup like
756ff9543fdSMichal Nazarewicz 		 * node0 node1 node0
757ff9543fdSMichal Nazarewicz 		 * i.e. it's possible that all pages within a zones range of
758ff9543fdSMichal Nazarewicz 		 * pages do not belong to a single zone.
759ff9543fdSMichal Nazarewicz 		 */
760c96b9e50SVlastimil Babka 		page = pfn_to_page(block_start_pfn);
761ff9543fdSMichal Nazarewicz 		if (page_zone(page) != zone)
762ff9543fdSMichal Nazarewicz 			continue;
763ff9543fdSMichal Nazarewicz 
764ff9543fdSMichal Nazarewicz 		/* Check the block is suitable for migration */
76568e3e926SLinus Torvalds 		if (!suitable_migration_target(page))
766ff9543fdSMichal Nazarewicz 			continue;
76768e3e926SLinus Torvalds 
768bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
769bb13ffebSMel Gorman 		if (!isolation_suitable(cc, page))
770bb13ffebSMel Gorman 			continue;
771bb13ffebSMel Gorman 
772f40d1e42SMel Gorman 		/* Found a block suitable for isolating free pages from */
773e9ade569SVlastimil Babka 		cc->free_pfn = block_start_pfn;
774c96b9e50SVlastimil Babka 		isolated = isolate_freepages_block(cc, block_start_pfn,
775c96b9e50SVlastimil Babka 					block_end_pfn, freelist, false);
776ff9543fdSMichal Nazarewicz 		nr_freepages += isolated;
777ff9543fdSMichal Nazarewicz 
778ff9543fdSMichal Nazarewicz 		/*
779e9ade569SVlastimil Babka 		 * Set a flag that we successfully isolated in this pageblock.
780e9ade569SVlastimil Babka 		 * In the next loop iteration, zone->compact_cached_free_pfn
781e9ade569SVlastimil Babka 		 * will not be updated and thus it will effectively contain the
782e9ade569SVlastimil Babka 		 * highest pageblock we isolated pages from.
783ff9543fdSMichal Nazarewicz 		 */
784e9ade569SVlastimil Babka 		if (isolated)
785c89511abSMel Gorman 			cc->finished_update_free = true;
786*be976572SVlastimil Babka 
787*be976572SVlastimil Babka 		/*
788*be976572SVlastimil Babka 		 * isolate_freepages_block() might have aborted due to async
789*be976572SVlastimil Babka 		 * compaction being contended
790*be976572SVlastimil Babka 		 */
791*be976572SVlastimil Babka 		if (cc->contended)
792*be976572SVlastimil Babka 			break;
793c89511abSMel Gorman 	}
794ff9543fdSMichal Nazarewicz 
795ff9543fdSMichal Nazarewicz 	/* split_free_page does not map the pages */
796ff9543fdSMichal Nazarewicz 	map_pages(freelist);
797ff9543fdSMichal Nazarewicz 
7987ed695e0SVlastimil Babka 	/*
7997ed695e0SVlastimil Babka 	 * If we crossed the migrate scanner, we want to keep it that way
8007ed695e0SVlastimil Babka 	 * so that compact_finished() may detect this
8017ed695e0SVlastimil Babka 	 */
802c96b9e50SVlastimil Babka 	if (block_start_pfn < low_pfn)
803e9ade569SVlastimil Babka 		cc->free_pfn = cc->migrate_pfn;
804c96b9e50SVlastimil Babka 
805ff9543fdSMichal Nazarewicz 	cc->nr_freepages = nr_freepages;
806748446bbSMel Gorman }
807748446bbSMel Gorman 
808748446bbSMel Gorman /*
809748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
810748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
811748446bbSMel Gorman  */
812748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
813748446bbSMel Gorman 					unsigned long data,
814748446bbSMel Gorman 					int **result)
815748446bbSMel Gorman {
816748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
817748446bbSMel Gorman 	struct page *freepage;
818748446bbSMel Gorman 
819*be976572SVlastimil Babka 	/*
820*be976572SVlastimil Babka 	 * Isolate free pages if necessary, and if we are not aborting due to
821*be976572SVlastimil Babka 	 * contention.
822*be976572SVlastimil Babka 	 */
823748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
824*be976572SVlastimil Babka 		if (!cc->contended)
825748446bbSMel Gorman 			isolate_freepages(cc->zone, cc);
826748446bbSMel Gorman 
827748446bbSMel Gorman 		if (list_empty(&cc->freepages))
828748446bbSMel Gorman 			return NULL;
829748446bbSMel Gorman 	}
830748446bbSMel Gorman 
831748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
832748446bbSMel Gorman 	list_del(&freepage->lru);
833748446bbSMel Gorman 	cc->nr_freepages--;
834748446bbSMel Gorman 
835748446bbSMel Gorman 	return freepage;
836748446bbSMel Gorman }
837748446bbSMel Gorman 
838748446bbSMel Gorman /*
839d53aea3dSDavid Rientjes  * This is a migrate-callback that "frees" freepages back to the isolated
840d53aea3dSDavid Rientjes  * freelist.  All pages on the freelist are from the same zone, so there is no
841d53aea3dSDavid Rientjes  * special handling needed for NUMA.
842d53aea3dSDavid Rientjes  */
843d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data)
844d53aea3dSDavid Rientjes {
845d53aea3dSDavid Rientjes 	struct compact_control *cc = (struct compact_control *)data;
846d53aea3dSDavid Rientjes 
847d53aea3dSDavid Rientjes 	list_add(&page->lru, &cc->freepages);
848d53aea3dSDavid Rientjes 	cc->nr_freepages++;
849d53aea3dSDavid Rientjes }
850d53aea3dSDavid Rientjes 
851ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */
852ff9543fdSMichal Nazarewicz typedef enum {
853ff9543fdSMichal Nazarewicz 	ISOLATE_ABORT,		/* Abort compaction now */
854ff9543fdSMichal Nazarewicz 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
855ff9543fdSMichal Nazarewicz 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
856ff9543fdSMichal Nazarewicz } isolate_migrate_t;
857ff9543fdSMichal Nazarewicz 
858ff9543fdSMichal Nazarewicz /*
859ff9543fdSMichal Nazarewicz  * Isolate all pages that can be migrated from the block pointed to by
860ff9543fdSMichal Nazarewicz  * the migrate scanner within compact_control.
861ff9543fdSMichal Nazarewicz  */
862ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
863ff9543fdSMichal Nazarewicz 					struct compact_control *cc)
864ff9543fdSMichal Nazarewicz {
865ff9543fdSMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
866ff9543fdSMichal Nazarewicz 
867ff9543fdSMichal Nazarewicz 	/* Do not scan outside zone boundaries */
868ff9543fdSMichal Nazarewicz 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
869ff9543fdSMichal Nazarewicz 
870ff9543fdSMichal Nazarewicz 	/* Only scan within a pageblock boundary */
871a9aacbccSMel Gorman 	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
872ff9543fdSMichal Nazarewicz 
873ff9543fdSMichal Nazarewicz 	/* Do not cross the free scanner or scan within a memory hole */
874ff9543fdSMichal Nazarewicz 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
875ff9543fdSMichal Nazarewicz 		cc->migrate_pfn = end_pfn;
876ff9543fdSMichal Nazarewicz 		return ISOLATE_NONE;
877ff9543fdSMichal Nazarewicz 	}
878ff9543fdSMichal Nazarewicz 
879ff9543fdSMichal Nazarewicz 	/* Perform the isolation */
880e46a2879SMinchan Kim 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
881e64c5237SShaohua Li 	if (!low_pfn || cc->contended)
882ff9543fdSMichal Nazarewicz 		return ISOLATE_ABORT;
883ff9543fdSMichal Nazarewicz 
884ff9543fdSMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
885ff9543fdSMichal Nazarewicz 
886ff9543fdSMichal Nazarewicz 	return ISOLATE_SUCCESS;
887ff9543fdSMichal Nazarewicz }
888ff9543fdSMichal Nazarewicz 
889748446bbSMel Gorman static int compact_finished(struct zone *zone,
890748446bbSMel Gorman 			    struct compact_control *cc)
891748446bbSMel Gorman {
8928fb74b9fSMel Gorman 	unsigned int order;
8935a03b051SAndrea Arcangeli 	unsigned long watermark;
89456de7263SMel Gorman 
895*be976572SVlastimil Babka 	if (cc->contended || fatal_signal_pending(current))
896748446bbSMel Gorman 		return COMPACT_PARTIAL;
897748446bbSMel Gorman 
898753341a4SMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
899bb13ffebSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn) {
90055b7c4c9SVlastimil Babka 		/* Let the next compaction start anew. */
90135979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
90235979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
90355b7c4c9SVlastimil Babka 		zone->compact_cached_free_pfn = zone_end_pfn(zone);
90455b7c4c9SVlastimil Babka 
90562997027SMel Gorman 		/*
90662997027SMel Gorman 		 * Mark that the PG_migrate_skip information should be cleared
90762997027SMel Gorman 		 * by kswapd when it goes to sleep. kswapd does not set the
90862997027SMel Gorman 		 * flag itself as the decision to be clear should be directly
90962997027SMel Gorman 		 * based on an allocation request.
91062997027SMel Gorman 		 */
91162997027SMel Gorman 		if (!current_is_kswapd())
91262997027SMel Gorman 			zone->compact_blockskip_flush = true;
91362997027SMel Gorman 
914748446bbSMel Gorman 		return COMPACT_COMPLETE;
915bb13ffebSMel Gorman 	}
916748446bbSMel Gorman 
91782478fb7SJohannes Weiner 	/*
91882478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
91982478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
92082478fb7SJohannes Weiner 	 */
92156de7263SMel Gorman 	if (cc->order == -1)
92256de7263SMel Gorman 		return COMPACT_CONTINUE;
92356de7263SMel Gorman 
9243957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
9253957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
9263957c776SMichal Hocko 	watermark += (1 << cc->order);
9273957c776SMichal Hocko 
9283957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
9293957c776SMichal Hocko 		return COMPACT_CONTINUE;
9303957c776SMichal Hocko 
93156de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
93256de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
9338fb74b9fSMel Gorman 		struct free_area *area = &zone->free_area[order];
9348fb74b9fSMel Gorman 
93556de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
9361fb3f8caSMel Gorman 		if (!list_empty(&area->free_list[cc->migratetype]))
93756de7263SMel Gorman 			return COMPACT_PARTIAL;
93856de7263SMel Gorman 
93956de7263SMel Gorman 		/* Job done if allocation would set block type */
9401fb3f8caSMel Gorman 		if (cc->order >= pageblock_order && area->nr_free)
94156de7263SMel Gorman 			return COMPACT_PARTIAL;
94256de7263SMel Gorman 	}
94356de7263SMel Gorman 
944748446bbSMel Gorman 	return COMPACT_CONTINUE;
945748446bbSMel Gorman }
946748446bbSMel Gorman 
9473e7d3449SMel Gorman /*
9483e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
9493e7d3449SMel Gorman  * Returns
9503e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
9513e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
9523e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
9533e7d3449SMel Gorman  */
9543e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
9553e7d3449SMel Gorman {
9563e7d3449SMel Gorman 	int fragindex;
9573e7d3449SMel Gorman 	unsigned long watermark;
9583e7d3449SMel Gorman 
9593e7d3449SMel Gorman 	/*
9603957c776SMichal Hocko 	 * order == -1 is expected when compacting via
9613957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
9623957c776SMichal Hocko 	 */
9633957c776SMichal Hocko 	if (order == -1)
9643957c776SMichal Hocko 		return COMPACT_CONTINUE;
9653957c776SMichal Hocko 
9663957c776SMichal Hocko 	/*
9673e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
9683e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
9693e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
9703e7d3449SMel Gorman 	 */
9713e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
9723e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
9733e7d3449SMel Gorman 		return COMPACT_SKIPPED;
9743e7d3449SMel Gorman 
9753e7d3449SMel Gorman 	/*
9763e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
9773e7d3449SMel Gorman 	 * low memory or external fragmentation
9783e7d3449SMel Gorman 	 *
979a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
980a582a738SShaohua Li 	 * watermarks
9813e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
9823e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
9833e7d3449SMel Gorman 	 *
9843e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
9853e7d3449SMel Gorman 	 */
9863e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
9873e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
9883e7d3449SMel Gorman 		return COMPACT_SKIPPED;
9893e7d3449SMel Gorman 
990a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
991a582a738SShaohua Li 	    0, 0))
9923e7d3449SMel Gorman 		return COMPACT_PARTIAL;
9933e7d3449SMel Gorman 
9943e7d3449SMel Gorman 	return COMPACT_CONTINUE;
9953e7d3449SMel Gorman }
9963e7d3449SMel Gorman 
997748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
998748446bbSMel Gorman {
999748446bbSMel Gorman 	int ret;
1000c89511abSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
1001108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
1002e0b9daebSDavid Rientjes 	const bool sync = cc->mode != MIGRATE_ASYNC;
1003748446bbSMel Gorman 
10043e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
10053e7d3449SMel Gorman 	switch (ret) {
10063e7d3449SMel Gorman 	case COMPACT_PARTIAL:
10073e7d3449SMel Gorman 	case COMPACT_SKIPPED:
10083e7d3449SMel Gorman 		/* Compaction is likely to fail */
10093e7d3449SMel Gorman 		return ret;
10103e7d3449SMel Gorman 	case COMPACT_CONTINUE:
10113e7d3449SMel Gorman 		/* Fall through to compaction */
10123e7d3449SMel Gorman 		;
10133e7d3449SMel Gorman 	}
10143e7d3449SMel Gorman 
1015c89511abSMel Gorman 	/*
1016d3132e4bSVlastimil Babka 	 * Clear pageblock skip if there were failures recently and compaction
1017d3132e4bSVlastimil Babka 	 * is about to be retried after being deferred. kswapd does not do
1018d3132e4bSVlastimil Babka 	 * this reset as it'll reset the cached information when going to sleep.
1019d3132e4bSVlastimil Babka 	 */
1020d3132e4bSVlastimil Babka 	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1021d3132e4bSVlastimil Babka 		__reset_isolation_suitable(zone);
1022d3132e4bSVlastimil Babka 
1023d3132e4bSVlastimil Babka 	/*
1024c89511abSMel Gorman 	 * Setup to move all movable pages to the end of the zone. Used cached
1025c89511abSMel Gorman 	 * information on where the scanners should start but check that it
1026c89511abSMel Gorman 	 * is initialised by ensuring the values are within zone boundaries.
1027c89511abSMel Gorman 	 */
1028e0b9daebSDavid Rientjes 	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1029c89511abSMel Gorman 	cc->free_pfn = zone->compact_cached_free_pfn;
1030c89511abSMel Gorman 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1031c89511abSMel Gorman 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1032c89511abSMel Gorman 		zone->compact_cached_free_pfn = cc->free_pfn;
1033c89511abSMel Gorman 	}
1034c89511abSMel Gorman 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1035c89511abSMel Gorman 		cc->migrate_pfn = start_pfn;
103635979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
103735979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1038c89511abSMel Gorman 	}
1039748446bbSMel Gorman 
10400eb927c0SMel Gorman 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
10410eb927c0SMel Gorman 
1042748446bbSMel Gorman 	migrate_prep_local();
1043748446bbSMel Gorman 
1044748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
10459d502c1cSMinchan Kim 		int err;
1046748446bbSMel Gorman 
1047f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
1048f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
1049f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
10505733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
1051e64c5237SShaohua Li 			cc->nr_migratepages = 0;
1052f9e35b3bSMel Gorman 			goto out;
1053f9e35b3bSMel Gorman 		case ISOLATE_NONE:
1054748446bbSMel Gorman 			continue;
1055f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
1056f9e35b3bSMel Gorman 			;
1057f9e35b3bSMel Gorman 		}
1058748446bbSMel Gorman 
1059f8c9301fSVlastimil Babka 		if (!cc->nr_migratepages)
1060f8c9301fSVlastimil Babka 			continue;
1061f8c9301fSVlastimil Babka 
1062d53aea3dSDavid Rientjes 		err = migrate_pages(&cc->migratepages, compaction_alloc,
1063e0b9daebSDavid Rientjes 				compaction_free, (unsigned long)cc, cc->mode,
10647b2a2d4aSMel Gorman 				MR_COMPACTION);
1065748446bbSMel Gorman 
1066f8c9301fSVlastimil Babka 		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1067f8c9301fSVlastimil Babka 							&cc->migratepages);
1068748446bbSMel Gorman 
1069f8c9301fSVlastimil Babka 		/* All pages were either migrated or will be released */
1070f8c9301fSVlastimil Babka 		cc->nr_migratepages = 0;
10719d502c1cSMinchan Kim 		if (err) {
10725733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
10737ed695e0SVlastimil Babka 			/*
10747ed695e0SVlastimil Babka 			 * migrate_pages() may return -ENOMEM when scanners meet
10757ed695e0SVlastimil Babka 			 * and we want compact_finished() to detect it
10767ed695e0SVlastimil Babka 			 */
10777ed695e0SVlastimil Babka 			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
10784bf2bba3SDavid Rientjes 				ret = COMPACT_PARTIAL;
10794bf2bba3SDavid Rientjes 				goto out;
1080748446bbSMel Gorman 			}
10814bf2bba3SDavid Rientjes 		}
1082748446bbSMel Gorman 	}
1083748446bbSMel Gorman 
1084f9e35b3bSMel Gorman out:
1085748446bbSMel Gorman 	/* Release free pages and check accounting */
1086748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
1087748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
1088748446bbSMel Gorman 
10890eb927c0SMel Gorman 	trace_mm_compaction_end(ret);
10900eb927c0SMel Gorman 
1091748446bbSMel Gorman 	return ret;
1092748446bbSMel Gorman }
109376ab0f53SMel Gorman 
1094e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order,
1095e0b9daebSDavid Rientjes 		gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
109656de7263SMel Gorman {
1097e64c5237SShaohua Li 	unsigned long ret;
109856de7263SMel Gorman 	struct compact_control cc = {
109956de7263SMel Gorman 		.nr_freepages = 0,
110056de7263SMel Gorman 		.nr_migratepages = 0,
110156de7263SMel Gorman 		.order = order,
110256de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
110356de7263SMel Gorman 		.zone = zone,
1104e0b9daebSDavid Rientjes 		.mode = mode,
110556de7263SMel Gorman 	};
110656de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
110756de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
110856de7263SMel Gorman 
1109e64c5237SShaohua Li 	ret = compact_zone(zone, &cc);
1110e64c5237SShaohua Li 
1111e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.freepages));
1112e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.migratepages));
1113e64c5237SShaohua Li 
1114e64c5237SShaohua Li 	*contended = cc.contended;
1115e64c5237SShaohua Li 	return ret;
111656de7263SMel Gorman }
111756de7263SMel Gorman 
11185e771905SMel Gorman int sysctl_extfrag_threshold = 500;
11195e771905SMel Gorman 
112056de7263SMel Gorman /**
112156de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
112256de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
112356de7263SMel Gorman  * @order: The order of the current allocation
112456de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
112556de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
1126e0b9daebSDavid Rientjes  * @mode: The migration mode for async, sync light, or sync migration
1127661c4cb9SMel Gorman  * @contended: Return value that is true if compaction was aborted due to lock contention
1128661c4cb9SMel Gorman  * @page: Optionally capture a free page of the requested order during compaction
112956de7263SMel Gorman  *
113056de7263SMel Gorman  * This is the main entry point for direct page compaction.
113156de7263SMel Gorman  */
113256de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
113377f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
1134e0b9daebSDavid Rientjes 			enum migrate_mode mode, bool *contended)
113556de7263SMel Gorman {
113656de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
113756de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
113856de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
113956de7263SMel Gorman 	struct zoneref *z;
114056de7263SMel Gorman 	struct zone *zone;
114156de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
1142d95ea5d1SBartlomiej Zolnierkiewicz 	int alloc_flags = 0;
114356de7263SMel Gorman 
11444ffb6335SMel Gorman 	/* Check if the GFP flags allow compaction */
1145c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
114656de7263SMel Gorman 		return rc;
114756de7263SMel Gorman 
1148010fc29aSMinchan Kim 	count_compact_event(COMPACTSTALL);
114956de7263SMel Gorman 
1150d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA
1151d95ea5d1SBartlomiej Zolnierkiewicz 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1152d95ea5d1SBartlomiej Zolnierkiewicz 		alloc_flags |= ALLOC_CMA;
1153d95ea5d1SBartlomiej Zolnierkiewicz #endif
115456de7263SMel Gorman 	/* Compact each zone in the list */
115556de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
115656de7263SMel Gorman 								nodemask) {
115756de7263SMel Gorman 		int status;
115856de7263SMel Gorman 
1159e0b9daebSDavid Rientjes 		status = compact_zone_order(zone, order, gfp_mask, mode,
11608fb74b9fSMel Gorman 						contended);
116156de7263SMel Gorman 		rc = max(status, rc);
116256de7263SMel Gorman 
11633e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
1164d95ea5d1SBartlomiej Zolnierkiewicz 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1165d95ea5d1SBartlomiej Zolnierkiewicz 				      alloc_flags))
116656de7263SMel Gorman 			break;
116756de7263SMel Gorman 	}
116856de7263SMel Gorman 
116956de7263SMel Gorman 	return rc;
117056de7263SMel Gorman }
117156de7263SMel Gorman 
117256de7263SMel Gorman 
117376ab0f53SMel Gorman /* Compact all zones within a node */
11747103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
117576ab0f53SMel Gorman {
117676ab0f53SMel Gorman 	int zoneid;
117776ab0f53SMel Gorman 	struct zone *zone;
117876ab0f53SMel Gorman 
117976ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
118076ab0f53SMel Gorman 
118176ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
118276ab0f53SMel Gorman 		if (!populated_zone(zone))
118376ab0f53SMel Gorman 			continue;
118476ab0f53SMel Gorman 
11857be62de9SRik van Riel 		cc->nr_freepages = 0;
11867be62de9SRik van Riel 		cc->nr_migratepages = 0;
11877be62de9SRik van Riel 		cc->zone = zone;
11887be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
11897be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
119076ab0f53SMel Gorman 
1191aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
11927be62de9SRik van Riel 			compact_zone(zone, cc);
119376ab0f53SMel Gorman 
1194aff62249SRik van Riel 		if (cc->order > 0) {
1195de6c60a6SVlastimil Babka 			if (zone_watermark_ok(zone, cc->order,
1196de6c60a6SVlastimil Babka 						low_wmark_pages(zone), 0, 0))
1197de6c60a6SVlastimil Babka 				compaction_defer_reset(zone, cc->order, false);
1198aff62249SRik van Riel 		}
1199aff62249SRik van Riel 
12007be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
12017be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
120276ab0f53SMel Gorman 	}
120376ab0f53SMel Gorman }
120476ab0f53SMel Gorman 
12057103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order)
12067be62de9SRik van Riel {
12077be62de9SRik van Riel 	struct compact_control cc = {
12087be62de9SRik van Riel 		.order = order,
1209e0b9daebSDavid Rientjes 		.mode = MIGRATE_ASYNC,
12107be62de9SRik van Riel 	};
12117be62de9SRik van Riel 
12123a7200afSMel Gorman 	if (!order)
12133a7200afSMel Gorman 		return;
12143a7200afSMel Gorman 
12157103f16dSAndrew Morton 	__compact_pgdat(pgdat, &cc);
12167be62de9SRik van Riel }
12177be62de9SRik van Riel 
12187103f16dSAndrew Morton static void compact_node(int nid)
12197be62de9SRik van Riel {
12207be62de9SRik van Riel 	struct compact_control cc = {
12217be62de9SRik van Riel 		.order = -1,
1222e0b9daebSDavid Rientjes 		.mode = MIGRATE_SYNC,
122391ca9186SDavid Rientjes 		.ignore_skip_hint = true,
12247be62de9SRik van Riel 	};
12257be62de9SRik van Riel 
12267103f16dSAndrew Morton 	__compact_pgdat(NODE_DATA(nid), &cc);
12277be62de9SRik van Riel }
12287be62de9SRik van Riel 
122976ab0f53SMel Gorman /* Compact all nodes in the system */
12307964c06dSJason Liu static void compact_nodes(void)
123176ab0f53SMel Gorman {
123276ab0f53SMel Gorman 	int nid;
123376ab0f53SMel Gorman 
12348575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
12358575ec29SHugh Dickins 	lru_add_drain_all();
12368575ec29SHugh Dickins 
123776ab0f53SMel Gorman 	for_each_online_node(nid)
123876ab0f53SMel Gorman 		compact_node(nid);
123976ab0f53SMel Gorman }
124076ab0f53SMel Gorman 
124176ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
124276ab0f53SMel Gorman int sysctl_compact_memory;
124376ab0f53SMel Gorman 
124476ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
124576ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
124676ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
124776ab0f53SMel Gorman {
124876ab0f53SMel Gorman 	if (write)
12497964c06dSJason Liu 		compact_nodes();
125076ab0f53SMel Gorman 
125176ab0f53SMel Gorman 	return 0;
125276ab0f53SMel Gorman }
1253ed4a6d7fSMel Gorman 
12545e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
12555e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
12565e771905SMel Gorman {
12575e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
12585e771905SMel Gorman 
12595e771905SMel Gorman 	return 0;
12605e771905SMel Gorman }
12615e771905SMel Gorman 
1262ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
126374e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev,
126410fbcf4cSKay Sievers 			struct device_attribute *attr,
1265ed4a6d7fSMel Gorman 			const char *buf, size_t count)
1266ed4a6d7fSMel Gorman {
12678575ec29SHugh Dickins 	int nid = dev->id;
12688575ec29SHugh Dickins 
12698575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
12708575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
12718575ec29SHugh Dickins 		lru_add_drain_all();
12728575ec29SHugh Dickins 
12738575ec29SHugh Dickins 		compact_node(nid);
12748575ec29SHugh Dickins 	}
1275ed4a6d7fSMel Gorman 
1276ed4a6d7fSMel Gorman 	return count;
1277ed4a6d7fSMel Gorman }
127810fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1279ed4a6d7fSMel Gorman 
1280ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
1281ed4a6d7fSMel Gorman {
128210fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
1283ed4a6d7fSMel Gorman }
1284ed4a6d7fSMel Gorman 
1285ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
1286ed4a6d7fSMel Gorman {
128710fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
1288ed4a6d7fSMel Gorman }
1289ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1290ff9543fdSMichal Nazarewicz 
1291ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */
1292