xref: /openbmc/linux/mm/compaction.c (revision 1f9efdef4f3f1d2a073e524113fd0038af636f2b)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h>
18194159fbSMinchan Kim #include <linux/page-isolation.h>
19748446bbSMel Gorman #include "internal.h"
20748446bbSMel Gorman 
21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION
22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item)
23010fc29aSMinchan Kim {
24010fc29aSMinchan Kim 	count_vm_event(item);
25010fc29aSMinchan Kim }
26010fc29aSMinchan Kim 
27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta)
28010fc29aSMinchan Kim {
29010fc29aSMinchan Kim 	count_vm_events(item, delta);
30010fc29aSMinchan Kim }
31010fc29aSMinchan Kim #else
32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0)
33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0)
34010fc29aSMinchan Kim #endif
35010fc29aSMinchan Kim 
36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
37ff9543fdSMichal Nazarewicz 
38b7aba698SMel Gorman #define CREATE_TRACE_POINTS
39b7aba698SMel Gorman #include <trace/events/compaction.h>
40b7aba698SMel Gorman 
41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
42748446bbSMel Gorman {
43748446bbSMel Gorman 	struct page *page, *next;
44748446bbSMel Gorman 	unsigned long count = 0;
45748446bbSMel Gorman 
46748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
47748446bbSMel Gorman 		list_del(&page->lru);
48748446bbSMel Gorman 		__free_page(page);
49748446bbSMel Gorman 		count++;
50748446bbSMel Gorman 	}
51748446bbSMel Gorman 
52748446bbSMel Gorman 	return count;
53748446bbSMel Gorman }
54748446bbSMel Gorman 
55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list)
56ff9543fdSMichal Nazarewicz {
57ff9543fdSMichal Nazarewicz 	struct page *page;
58ff9543fdSMichal Nazarewicz 
59ff9543fdSMichal Nazarewicz 	list_for_each_entry(page, list, lru) {
60ff9543fdSMichal Nazarewicz 		arch_alloc_page(page, 0);
61ff9543fdSMichal Nazarewicz 		kernel_map_pages(page, 1, 1);
62ff9543fdSMichal Nazarewicz 	}
63ff9543fdSMichal Nazarewicz }
64ff9543fdSMichal Nazarewicz 
6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype)
6647118af0SMichal Nazarewicz {
6747118af0SMichal Nazarewicz 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
6847118af0SMichal Nazarewicz }
6947118af0SMichal Nazarewicz 
707d49d886SVlastimil Babka /*
717d49d886SVlastimil Babka  * Check that the whole (or subset of) a pageblock given by the interval of
727d49d886SVlastimil Babka  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
737d49d886SVlastimil Babka  * with the migration of free compaction scanner. The scanners then need to
747d49d886SVlastimil Babka  * use only pfn_valid_within() check for arches that allow holes within
757d49d886SVlastimil Babka  * pageblocks.
767d49d886SVlastimil Babka  *
777d49d886SVlastimil Babka  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
787d49d886SVlastimil Babka  *
797d49d886SVlastimil Babka  * It's possible on some configurations to have a setup like node0 node1 node0
807d49d886SVlastimil Babka  * i.e. it's possible that all pages within a zones range of pages do not
817d49d886SVlastimil Babka  * belong to a single zone. We assume that a border between node0 and node1
827d49d886SVlastimil Babka  * can occur within a single pageblock, but not a node0 node1 node0
837d49d886SVlastimil Babka  * interleaving within a single pageblock. It is therefore sufficient to check
847d49d886SVlastimil Babka  * the first and last page of a pageblock and avoid checking each individual
857d49d886SVlastimil Babka  * page in a pageblock.
867d49d886SVlastimil Babka  */
877d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
887d49d886SVlastimil Babka 				unsigned long end_pfn, struct zone *zone)
897d49d886SVlastimil Babka {
907d49d886SVlastimil Babka 	struct page *start_page;
917d49d886SVlastimil Babka 	struct page *end_page;
927d49d886SVlastimil Babka 
937d49d886SVlastimil Babka 	/* end_pfn is one past the range we are checking */
947d49d886SVlastimil Babka 	end_pfn--;
957d49d886SVlastimil Babka 
967d49d886SVlastimil Babka 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
977d49d886SVlastimil Babka 		return NULL;
987d49d886SVlastimil Babka 
997d49d886SVlastimil Babka 	start_page = pfn_to_page(start_pfn);
1007d49d886SVlastimil Babka 
1017d49d886SVlastimil Babka 	if (page_zone(start_page) != zone)
1027d49d886SVlastimil Babka 		return NULL;
1037d49d886SVlastimil Babka 
1047d49d886SVlastimil Babka 	end_page = pfn_to_page(end_pfn);
1057d49d886SVlastimil Babka 
1067d49d886SVlastimil Babka 	/* This gives a shorter code than deriving page_zone(end_page) */
1077d49d886SVlastimil Babka 	if (page_zone_id(start_page) != page_zone_id(end_page))
1087d49d886SVlastimil Babka 		return NULL;
1097d49d886SVlastimil Babka 
1107d49d886SVlastimil Babka 	return start_page;
1117d49d886SVlastimil Babka }
1127d49d886SVlastimil Babka 
113bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION
114bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */
115bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
116bb13ffebSMel Gorman 					struct page *page)
117bb13ffebSMel Gorman {
118bb13ffebSMel Gorman 	if (cc->ignore_skip_hint)
119bb13ffebSMel Gorman 		return true;
120bb13ffebSMel Gorman 
121bb13ffebSMel Gorman 	return !get_pageblock_skip(page);
122bb13ffebSMel Gorman }
123bb13ffebSMel Gorman 
124bb13ffebSMel Gorman /*
125bb13ffebSMel Gorman  * This function is called to clear all cached information on pageblocks that
126bb13ffebSMel Gorman  * should be skipped for page isolation when the migrate and free page scanner
127bb13ffebSMel Gorman  * meet.
128bb13ffebSMel Gorman  */
12962997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone)
130bb13ffebSMel Gorman {
131bb13ffebSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
132108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
133bb13ffebSMel Gorman 	unsigned long pfn;
134bb13ffebSMel Gorman 
13535979ef3SDavid Rientjes 	zone->compact_cached_migrate_pfn[0] = start_pfn;
13635979ef3SDavid Rientjes 	zone->compact_cached_migrate_pfn[1] = start_pfn;
137c89511abSMel Gorman 	zone->compact_cached_free_pfn = end_pfn;
13862997027SMel Gorman 	zone->compact_blockskip_flush = false;
139bb13ffebSMel Gorman 
140bb13ffebSMel Gorman 	/* Walk the zone and mark every pageblock as suitable for isolation */
141bb13ffebSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
142bb13ffebSMel Gorman 		struct page *page;
143bb13ffebSMel Gorman 
144bb13ffebSMel Gorman 		cond_resched();
145bb13ffebSMel Gorman 
146bb13ffebSMel Gorman 		if (!pfn_valid(pfn))
147bb13ffebSMel Gorman 			continue;
148bb13ffebSMel Gorman 
149bb13ffebSMel Gorman 		page = pfn_to_page(pfn);
150bb13ffebSMel Gorman 		if (zone != page_zone(page))
151bb13ffebSMel Gorman 			continue;
152bb13ffebSMel Gorman 
153bb13ffebSMel Gorman 		clear_pageblock_skip(page);
154bb13ffebSMel Gorman 	}
155bb13ffebSMel Gorman }
156bb13ffebSMel Gorman 
15762997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat)
15862997027SMel Gorman {
15962997027SMel Gorman 	int zoneid;
16062997027SMel Gorman 
16162997027SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
16262997027SMel Gorman 		struct zone *zone = &pgdat->node_zones[zoneid];
16362997027SMel Gorman 		if (!populated_zone(zone))
16462997027SMel Gorman 			continue;
16562997027SMel Gorman 
16662997027SMel Gorman 		/* Only flush if a full compaction finished recently */
16762997027SMel Gorman 		if (zone->compact_blockskip_flush)
16862997027SMel Gorman 			__reset_isolation_suitable(zone);
16962997027SMel Gorman 	}
17062997027SMel Gorman }
17162997027SMel Gorman 
172bb13ffebSMel Gorman /*
173bb13ffebSMel Gorman  * If no pages were isolated then mark this pageblock to be skipped in the
17462997027SMel Gorman  * future. The information is later cleared by __reset_isolation_suitable().
175bb13ffebSMel Gorman  */
176c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
177c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
178edc2ca61SVlastimil Babka 			bool migrate_scanner)
179bb13ffebSMel Gorman {
180c89511abSMel Gorman 	struct zone *zone = cc->zone;
18135979ef3SDavid Rientjes 	unsigned long pfn;
1826815bf3fSJoonsoo Kim 
1836815bf3fSJoonsoo Kim 	if (cc->ignore_skip_hint)
1846815bf3fSJoonsoo Kim 		return;
1856815bf3fSJoonsoo Kim 
186bb13ffebSMel Gorman 	if (!page)
187bb13ffebSMel Gorman 		return;
188bb13ffebSMel Gorman 
18935979ef3SDavid Rientjes 	if (nr_isolated)
19035979ef3SDavid Rientjes 		return;
19135979ef3SDavid Rientjes 
192bb13ffebSMel Gorman 	set_pageblock_skip(page);
193c89511abSMel Gorman 
19435979ef3SDavid Rientjes 	pfn = page_to_pfn(page);
19535979ef3SDavid Rientjes 
19635979ef3SDavid Rientjes 	/* Update where async and sync compaction should restart */
197c89511abSMel Gorman 	if (migrate_scanner) {
19835979ef3SDavid Rientjes 		if (cc->finished_update_migrate)
19935979ef3SDavid Rientjes 			return;
20035979ef3SDavid Rientjes 		if (pfn > zone->compact_cached_migrate_pfn[0])
20135979ef3SDavid Rientjes 			zone->compact_cached_migrate_pfn[0] = pfn;
202e0b9daebSDavid Rientjes 		if (cc->mode != MIGRATE_ASYNC &&
203e0b9daebSDavid Rientjes 		    pfn > zone->compact_cached_migrate_pfn[1])
20435979ef3SDavid Rientjes 			zone->compact_cached_migrate_pfn[1] = pfn;
205c89511abSMel Gorman 	} else {
20635979ef3SDavid Rientjes 		if (cc->finished_update_free)
20735979ef3SDavid Rientjes 			return;
20835979ef3SDavid Rientjes 		if (pfn < zone->compact_cached_free_pfn)
209c89511abSMel Gorman 			zone->compact_cached_free_pfn = pfn;
210c89511abSMel Gorman 	}
211c89511abSMel Gorman }
212bb13ffebSMel Gorman #else
213bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
214bb13ffebSMel Gorman 					struct page *page)
215bb13ffebSMel Gorman {
216bb13ffebSMel Gorman 	return true;
217bb13ffebSMel Gorman }
218bb13ffebSMel Gorman 
219c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
220c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
221edc2ca61SVlastimil Babka 			bool migrate_scanner)
222bb13ffebSMel Gorman {
223bb13ffebSMel Gorman }
224bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */
225bb13ffebSMel Gorman 
226*1f9efdefSVlastimil Babka static int should_release_lock(spinlock_t *lock)
2272a1402aaSMel Gorman {
228*1f9efdefSVlastimil Babka 	/*
229*1f9efdefSVlastimil Babka 	 * Sched contention has higher priority here as we may potentially
230*1f9efdefSVlastimil Babka 	 * have to abort whole compaction ASAP. Returning with lock contention
231*1f9efdefSVlastimil Babka 	 * means we will try another zone, and further decisions are
232*1f9efdefSVlastimil Babka 	 * influenced only when all zones are lock contended. That means
233*1f9efdefSVlastimil Babka 	 * potentially missing a lock contention is less critical.
234*1f9efdefSVlastimil Babka 	 */
235*1f9efdefSVlastimil Babka 	if (need_resched())
236*1f9efdefSVlastimil Babka 		return COMPACT_CONTENDED_SCHED;
237*1f9efdefSVlastimil Babka 	else if (spin_is_contended(lock))
238*1f9efdefSVlastimil Babka 		return COMPACT_CONTENDED_LOCK;
239*1f9efdefSVlastimil Babka 
240*1f9efdefSVlastimil Babka 	return COMPACT_CONTENDED_NONE;
2412a1402aaSMel Gorman }
2422a1402aaSMel Gorman 
24385aa125fSMichal Nazarewicz /*
244c67fe375SMel Gorman  * Compaction requires the taking of some coarse locks that are potentially
245c67fe375SMel Gorman  * very heavily contended. Check if the process needs to be scheduled or
246c67fe375SMel Gorman  * if the lock is contended. For async compaction, back out in the event
247c67fe375SMel Gorman  * if contention is severe. For sync compaction, schedule.
248c67fe375SMel Gorman  *
249c67fe375SMel Gorman  * Returns true if the lock is held.
250c67fe375SMel Gorman  * Returns false if the lock is released and compaction should abort
251c67fe375SMel Gorman  */
252c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
253c67fe375SMel Gorman 				      bool locked, struct compact_control *cc)
254c67fe375SMel Gorman {
255*1f9efdefSVlastimil Babka 	int contended = should_release_lock(lock);
256*1f9efdefSVlastimil Babka 
257*1f9efdefSVlastimil Babka 	if (contended) {
258c67fe375SMel Gorman 		if (locked) {
259c67fe375SMel Gorman 			spin_unlock_irqrestore(lock, *flags);
260c67fe375SMel Gorman 			locked = false;
261c67fe375SMel Gorman 		}
262c67fe375SMel Gorman 
263c67fe375SMel Gorman 		/* async aborts if taking too long or contended */
264e0b9daebSDavid Rientjes 		if (cc->mode == MIGRATE_ASYNC) {
265*1f9efdefSVlastimil Babka 			cc->contended = contended;
266c67fe375SMel Gorman 			return false;
267c67fe375SMel Gorman 		}
268c67fe375SMel Gorman 
269c67fe375SMel Gorman 		cond_resched();
270c67fe375SMel Gorman 	}
271c67fe375SMel Gorman 
272c67fe375SMel Gorman 	if (!locked)
273c67fe375SMel Gorman 		spin_lock_irqsave(lock, *flags);
274c67fe375SMel Gorman 	return true;
275c67fe375SMel Gorman }
276c67fe375SMel Gorman 
277be976572SVlastimil Babka /*
278be976572SVlastimil Babka  * Aside from avoiding lock contention, compaction also periodically checks
279be976572SVlastimil Babka  * need_resched() and either schedules in sync compaction or aborts async
280be976572SVlastimil Babka  * compaction. This is similar to what compact_checklock_irqsave() does, but
281be976572SVlastimil Babka  * is used where no lock is concerned.
282be976572SVlastimil Babka  *
283be976572SVlastimil Babka  * Returns false when no scheduling was needed, or sync compaction scheduled.
284be976572SVlastimil Babka  * Returns true when async compaction should abort.
285be976572SVlastimil Babka  */
286be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc)
287be976572SVlastimil Babka {
288be976572SVlastimil Babka 	/* async compaction aborts if contended */
289be976572SVlastimil Babka 	if (need_resched()) {
290be976572SVlastimil Babka 		if (cc->mode == MIGRATE_ASYNC) {
291*1f9efdefSVlastimil Babka 			cc->contended = COMPACT_CONTENDED_SCHED;
292be976572SVlastimil Babka 			return true;
293be976572SVlastimil Babka 		}
294be976572SVlastimil Babka 
295be976572SVlastimil Babka 		cond_resched();
296be976572SVlastimil Babka 	}
297be976572SVlastimil Babka 
298be976572SVlastimil Babka 	return false;
299be976572SVlastimil Babka }
300be976572SVlastimil Babka 
301f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */
302f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page)
303f40d1e42SMel Gorman {
3047d348b9eSJoonsoo Kim 	/* If the page is a large free page, then disallow migration */
305f40d1e42SMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
3067d348b9eSJoonsoo Kim 		return false;
307f40d1e42SMel Gorman 
308f40d1e42SMel Gorman 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
3097d348b9eSJoonsoo Kim 	if (migrate_async_suitable(get_pageblock_migratetype(page)))
310f40d1e42SMel Gorman 		return true;
311f40d1e42SMel Gorman 
312f40d1e42SMel Gorman 	/* Otherwise skip the block */
313f40d1e42SMel Gorman 	return false;
314f40d1e42SMel Gorman }
315f40d1e42SMel Gorman 
316c67fe375SMel Gorman /*
3179e4be470SJerome Marchand  * Isolate free pages onto a private freelist. If @strict is true, will abort
3189e4be470SJerome Marchand  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
3199e4be470SJerome Marchand  * (even though it may still end up isolating some pages).
32085aa125fSMichal Nazarewicz  */
321f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc,
322f40d1e42SMel Gorman 				unsigned long blockpfn,
32385aa125fSMichal Nazarewicz 				unsigned long end_pfn,
32485aa125fSMichal Nazarewicz 				struct list_head *freelist,
32585aa125fSMichal Nazarewicz 				bool strict)
326748446bbSMel Gorman {
327b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
328bb13ffebSMel Gorman 	struct page *cursor, *valid_page = NULL;
329f40d1e42SMel Gorman 	unsigned long flags;
330f40d1e42SMel Gorman 	bool locked = false;
331748446bbSMel Gorman 
332748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
333748446bbSMel Gorman 
334f40d1e42SMel Gorman 	/* Isolate free pages. */
335748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
336748446bbSMel Gorman 		int isolated, i;
337748446bbSMel Gorman 		struct page *page = cursor;
338748446bbSMel Gorman 
339b7aba698SMel Gorman 		nr_scanned++;
340f40d1e42SMel Gorman 		if (!pfn_valid_within(blockpfn))
3412af120bcSLaura Abbott 			goto isolate_fail;
3422af120bcSLaura Abbott 
343bb13ffebSMel Gorman 		if (!valid_page)
344bb13ffebSMel Gorman 			valid_page = page;
345f40d1e42SMel Gorman 		if (!PageBuddy(page))
3462af120bcSLaura Abbott 			goto isolate_fail;
347f40d1e42SMel Gorman 
348f40d1e42SMel Gorman 		/*
349f40d1e42SMel Gorman 		 * The zone lock must be held to isolate freepages.
350f40d1e42SMel Gorman 		 * Unfortunately this is a very coarse lock and can be
351f40d1e42SMel Gorman 		 * heavily contended if there are parallel allocations
352f40d1e42SMel Gorman 		 * or parallel compactions. For async compaction do not
353f40d1e42SMel Gorman 		 * spin on the lock and we acquire the lock as late as
354f40d1e42SMel Gorman 		 * possible.
355f40d1e42SMel Gorman 		 */
356f40d1e42SMel Gorman 		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
357f40d1e42SMel Gorman 								locked, cc);
358f40d1e42SMel Gorman 		if (!locked)
359f40d1e42SMel Gorman 			break;
360f40d1e42SMel Gorman 
361f40d1e42SMel Gorman 		/* Recheck this is a buddy page under lock */
362f40d1e42SMel Gorman 		if (!PageBuddy(page))
3632af120bcSLaura Abbott 			goto isolate_fail;
364748446bbSMel Gorman 
365748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
366748446bbSMel Gorman 		isolated = split_free_page(page);
367748446bbSMel Gorman 		total_isolated += isolated;
368748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
369748446bbSMel Gorman 			list_add(&page->lru, freelist);
370748446bbSMel Gorman 			page++;
371748446bbSMel Gorman 		}
372748446bbSMel Gorman 
373748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
374748446bbSMel Gorman 		if (isolated) {
375748446bbSMel Gorman 			blockpfn += isolated - 1;
376748446bbSMel Gorman 			cursor += isolated - 1;
3772af120bcSLaura Abbott 			continue;
378748446bbSMel Gorman 		}
3792af120bcSLaura Abbott 
3802af120bcSLaura Abbott isolate_fail:
3812af120bcSLaura Abbott 		if (strict)
3822af120bcSLaura Abbott 			break;
3832af120bcSLaura Abbott 		else
3842af120bcSLaura Abbott 			continue;
3852af120bcSLaura Abbott 
386748446bbSMel Gorman 	}
387748446bbSMel Gorman 
388b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
389f40d1e42SMel Gorman 
390f40d1e42SMel Gorman 	/*
391f40d1e42SMel Gorman 	 * If strict isolation is requested by CMA then check that all the
392f40d1e42SMel Gorman 	 * pages requested were isolated. If there were any failures, 0 is
393f40d1e42SMel Gorman 	 * returned and CMA will fail.
394f40d1e42SMel Gorman 	 */
3952af120bcSLaura Abbott 	if (strict && blockpfn < end_pfn)
396f40d1e42SMel Gorman 		total_isolated = 0;
397f40d1e42SMel Gorman 
398f40d1e42SMel Gorman 	if (locked)
399f40d1e42SMel Gorman 		spin_unlock_irqrestore(&cc->zone->lock, flags);
400f40d1e42SMel Gorman 
401bb13ffebSMel Gorman 	/* Update the pageblock-skip if the whole pageblock was scanned */
402bb13ffebSMel Gorman 	if (blockpfn == end_pfn)
403edc2ca61SVlastimil Babka 		update_pageblock_skip(cc, valid_page, total_isolated, false);
404bb13ffebSMel Gorman 
405010fc29aSMinchan Kim 	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
406397487dbSMel Gorman 	if (total_isolated)
407010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, total_isolated);
408748446bbSMel Gorman 	return total_isolated;
409748446bbSMel Gorman }
410748446bbSMel Gorman 
41185aa125fSMichal Nazarewicz /**
41285aa125fSMichal Nazarewicz  * isolate_freepages_range() - isolate free pages.
41385aa125fSMichal Nazarewicz  * @start_pfn: The first PFN to start isolating.
41485aa125fSMichal Nazarewicz  * @end_pfn:   The one-past-last PFN.
41585aa125fSMichal Nazarewicz  *
41685aa125fSMichal Nazarewicz  * Non-free pages, invalid PFNs, or zone boundaries within the
41785aa125fSMichal Nazarewicz  * [start_pfn, end_pfn) range are considered errors, cause function to
41885aa125fSMichal Nazarewicz  * undo its actions and return zero.
41985aa125fSMichal Nazarewicz  *
42085aa125fSMichal Nazarewicz  * Otherwise, function returns one-past-the-last PFN of isolated page
42185aa125fSMichal Nazarewicz  * (which may be greater then end_pfn if end fell in a middle of
42285aa125fSMichal Nazarewicz  * a free page).
42385aa125fSMichal Nazarewicz  */
424ff9543fdSMichal Nazarewicz unsigned long
425bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc,
426bb13ffebSMel Gorman 			unsigned long start_pfn, unsigned long end_pfn)
42785aa125fSMichal Nazarewicz {
428f40d1e42SMel Gorman 	unsigned long isolated, pfn, block_end_pfn;
42985aa125fSMichal Nazarewicz 	LIST_HEAD(freelist);
43085aa125fSMichal Nazarewicz 
4317d49d886SVlastimil Babka 	pfn = start_pfn;
43285aa125fSMichal Nazarewicz 	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
4337d49d886SVlastimil Babka 
4347d49d886SVlastimil Babka 	for (; pfn < end_pfn; pfn += isolated,
4357d49d886SVlastimil Babka 				block_end_pfn += pageblock_nr_pages) {
4367d49d886SVlastimil Babka 
43785aa125fSMichal Nazarewicz 		block_end_pfn = min(block_end_pfn, end_pfn);
43885aa125fSMichal Nazarewicz 
4397d49d886SVlastimil Babka 		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
4407d49d886SVlastimil Babka 			break;
4417d49d886SVlastimil Babka 
442bb13ffebSMel Gorman 		isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
44385aa125fSMichal Nazarewicz 						   &freelist, true);
44485aa125fSMichal Nazarewicz 
44585aa125fSMichal Nazarewicz 		/*
44685aa125fSMichal Nazarewicz 		 * In strict mode, isolate_freepages_block() returns 0 if
44785aa125fSMichal Nazarewicz 		 * there are any holes in the block (ie. invalid PFNs or
44885aa125fSMichal Nazarewicz 		 * non-free pages).
44985aa125fSMichal Nazarewicz 		 */
45085aa125fSMichal Nazarewicz 		if (!isolated)
45185aa125fSMichal Nazarewicz 			break;
45285aa125fSMichal Nazarewicz 
45385aa125fSMichal Nazarewicz 		/*
45485aa125fSMichal Nazarewicz 		 * If we managed to isolate pages, it is always (1 << n) *
45585aa125fSMichal Nazarewicz 		 * pageblock_nr_pages for some non-negative n.  (Max order
45685aa125fSMichal Nazarewicz 		 * page may span two pageblocks).
45785aa125fSMichal Nazarewicz 		 */
45885aa125fSMichal Nazarewicz 	}
45985aa125fSMichal Nazarewicz 
46085aa125fSMichal Nazarewicz 	/* split_free_page does not map the pages */
46185aa125fSMichal Nazarewicz 	map_pages(&freelist);
46285aa125fSMichal Nazarewicz 
46385aa125fSMichal Nazarewicz 	if (pfn < end_pfn) {
46485aa125fSMichal Nazarewicz 		/* Loop terminated early, cleanup. */
46585aa125fSMichal Nazarewicz 		release_freepages(&freelist);
46685aa125fSMichal Nazarewicz 		return 0;
46785aa125fSMichal Nazarewicz 	}
46885aa125fSMichal Nazarewicz 
46985aa125fSMichal Nazarewicz 	/* We don't use freelists for anything. */
47085aa125fSMichal Nazarewicz 	return pfn;
47185aa125fSMichal Nazarewicz }
47285aa125fSMichal Nazarewicz 
473748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
474edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc)
475748446bbSMel Gorman {
476748446bbSMel Gorman 	struct page *page;
477b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
478748446bbSMel Gorman 
479edc2ca61SVlastimil Babka 	if (list_empty(&cc->migratepages))
480edc2ca61SVlastimil Babka 		return;
481edc2ca61SVlastimil Babka 
482b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
483b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
484748446bbSMel Gorman 
485c67fe375SMel Gorman 	mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
486c67fe375SMel Gorman 	mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
487c67fe375SMel Gorman }
488748446bbSMel Gorman 
489748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
490748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
491748446bbSMel Gorman {
492bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
493748446bbSMel Gorman 
494748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
495748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
496bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
497bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
498748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
499748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
500748446bbSMel Gorman 
501bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
502748446bbSMel Gorman }
503748446bbSMel Gorman 
5042fe86e00SMichal Nazarewicz /**
505edc2ca61SVlastimil Babka  * isolate_migratepages_block() - isolate all migrate-able pages within
506edc2ca61SVlastimil Babka  *				  a single pageblock
5072fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
508edc2ca61SVlastimil Babka  * @low_pfn:	The first PFN to isolate
509edc2ca61SVlastimil Babka  * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
510edc2ca61SVlastimil Babka  * @isolate_mode: Isolation mode to be used.
5112fe86e00SMichal Nazarewicz  *
5122fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
513edc2ca61SVlastimil Babka  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
514edc2ca61SVlastimil Babka  * Returns zero if there is a fatal signal pending, otherwise PFN of the
515edc2ca61SVlastimil Babka  * first page that was not scanned (which may be both less, equal to or more
516edc2ca61SVlastimil Babka  * than end_pfn).
5172fe86e00SMichal Nazarewicz  *
518edc2ca61SVlastimil Babka  * The pages are isolated on cc->migratepages list (not required to be empty),
519edc2ca61SVlastimil Babka  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
520edc2ca61SVlastimil Babka  * is neither read nor updated.
521748446bbSMel Gorman  */
522edc2ca61SVlastimil Babka static unsigned long
523edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
524edc2ca61SVlastimil Babka 			unsigned long end_pfn, isolate_mode_t isolate_mode)
525748446bbSMel Gorman {
526edc2ca61SVlastimil Babka 	struct zone *zone = cc->zone;
527b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
528748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
529fa9add64SHugh Dickins 	struct lruvec *lruvec;
530c67fe375SMel Gorman 	unsigned long flags;
5312a1402aaSMel Gorman 	bool locked = false;
532bb13ffebSMel Gorman 	struct page *page = NULL, *valid_page = NULL;
533748446bbSMel Gorman 
534748446bbSMel Gorman 	/*
535748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
536748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
537748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
538748446bbSMel Gorman 	 */
539748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
540f9e35b3bSMel Gorman 		/* async migration should just abort */
541e0b9daebSDavid Rientjes 		if (cc->mode == MIGRATE_ASYNC)
5422fe86e00SMichal Nazarewicz 			return 0;
543f9e35b3bSMel Gorman 
544748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
545748446bbSMel Gorman 
546748446bbSMel Gorman 		if (fatal_signal_pending(current))
5472fe86e00SMichal Nazarewicz 			return 0;
548748446bbSMel Gorman 	}
549748446bbSMel Gorman 
550be976572SVlastimil Babka 	if (compact_should_abort(cc))
551aeef4b83SDavid Rientjes 		return 0;
552aeef4b83SDavid Rientjes 
553748446bbSMel Gorman 	/* Time to isolate some pages for migration */
554748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
555b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
556be1aa03bSJoonsoo Kim 		if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
5572a1402aaSMel Gorman 			if (should_release_lock(&zone->lru_lock)) {
558c67fe375SMel Gorman 				spin_unlock_irqrestore(&zone->lru_lock, flags);
559b2eef8c0SAndrea Arcangeli 				locked = false;
560b2eef8c0SAndrea Arcangeli 			}
5612a1402aaSMel Gorman 		}
562b2eef8c0SAndrea Arcangeli 
563748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
564748446bbSMel Gorman 			continue;
565b7aba698SMel Gorman 		nr_scanned++;
566748446bbSMel Gorman 
567748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
568dc908600SMel Gorman 
569bb13ffebSMel Gorman 		if (!valid_page)
570bb13ffebSMel Gorman 			valid_page = page;
571bb13ffebSMel Gorman 
572c122b208SJoonsoo Kim 		/*
5736c14466cSMel Gorman 		 * Skip if free. page_order cannot be used without zone->lock
5746c14466cSMel Gorman 		 * as nothing prevents parallel allocations or buddy merging.
5756c14466cSMel Gorman 		 */
576748446bbSMel Gorman 		if (PageBuddy(page))
577748446bbSMel Gorman 			continue;
578748446bbSMel Gorman 
5799927af74SMel Gorman 		/*
580bf6bddf1SRafael Aquini 		 * Check may be lockless but that's ok as we recheck later.
581bf6bddf1SRafael Aquini 		 * It's possible to migrate LRU pages and balloon pages
582bf6bddf1SRafael Aquini 		 * Skip any other type of page
583bf6bddf1SRafael Aquini 		 */
584bf6bddf1SRafael Aquini 		if (!PageLRU(page)) {
585bf6bddf1SRafael Aquini 			if (unlikely(balloon_page_movable(page))) {
586bf6bddf1SRafael Aquini 				if (locked && balloon_page_isolate(page)) {
587bf6bddf1SRafael Aquini 					/* Successfully isolated */
588b6c75016SJoonsoo Kim 					goto isolate_success;
589bf6bddf1SRafael Aquini 				}
590bf6bddf1SRafael Aquini 			}
591bc835011SAndrea Arcangeli 			continue;
592bf6bddf1SRafael Aquini 		}
593bc835011SAndrea Arcangeli 
594bc835011SAndrea Arcangeli 		/*
5952a1402aaSMel Gorman 		 * PageLRU is set. lru_lock normally excludes isolation
5962a1402aaSMel Gorman 		 * splitting and collapsing (collapsing has already happened
5972a1402aaSMel Gorman 		 * if PageLRU is set) but the lock is not necessarily taken
5982a1402aaSMel Gorman 		 * here and it is wasteful to take it just to check transhuge.
5992a1402aaSMel Gorman 		 * Check TransHuge without lock and skip the whole pageblock if
6002a1402aaSMel Gorman 		 * it's either a transhuge or hugetlbfs page, as calling
6012a1402aaSMel Gorman 		 * compound_order() without preventing THP from splitting the
6022a1402aaSMel Gorman 		 * page underneath us may return surprising results.
603bc835011SAndrea Arcangeli 		 */
604bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
6052a1402aaSMel Gorman 			if (!locked)
606edc2ca61SVlastimil Babka 				low_pfn = ALIGN(low_pfn + 1,
607edc2ca61SVlastimil Babka 						pageblock_nr_pages) - 1;
608edc2ca61SVlastimil Babka 			else
6092a1402aaSMel Gorman 				low_pfn += (1 << compound_order(page)) - 1;
610edc2ca61SVlastimil Babka 
6112a1402aaSMel Gorman 			continue;
6122a1402aaSMel Gorman 		}
6132a1402aaSMel Gorman 
614119d6d59SDavid Rientjes 		/*
615119d6d59SDavid Rientjes 		 * Migration will fail if an anonymous page is pinned in memory,
616119d6d59SDavid Rientjes 		 * so avoid taking lru_lock and isolating it unnecessarily in an
617119d6d59SDavid Rientjes 		 * admittedly racy check.
618119d6d59SDavid Rientjes 		 */
619119d6d59SDavid Rientjes 		if (!page_mapping(page) &&
620119d6d59SDavid Rientjes 		    page_count(page) > page_mapcount(page))
621119d6d59SDavid Rientjes 			continue;
622119d6d59SDavid Rientjes 
6232a1402aaSMel Gorman 		/* Check if it is ok to still hold the lock */
6242a1402aaSMel Gorman 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
6252a1402aaSMel Gorman 								locked, cc);
6262a1402aaSMel Gorman 		if (!locked || fatal_signal_pending(current))
6272a1402aaSMel Gorman 			break;
6282a1402aaSMel Gorman 
6292a1402aaSMel Gorman 		/* Recheck PageLRU and PageTransHuge under lock */
6302a1402aaSMel Gorman 		if (!PageLRU(page))
6312a1402aaSMel Gorman 			continue;
6322a1402aaSMel Gorman 		if (PageTransHuge(page)) {
633bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
634bc835011SAndrea Arcangeli 			continue;
635bc835011SAndrea Arcangeli 		}
636bc835011SAndrea Arcangeli 
637fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
638fa9add64SHugh Dickins 
639748446bbSMel Gorman 		/* Try isolate the page */
640edc2ca61SVlastimil Babka 		if (__isolate_lru_page(page, isolate_mode) != 0)
641748446bbSMel Gorman 			continue;
642748446bbSMel Gorman 
643309381feSSasha Levin 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
644bc835011SAndrea Arcangeli 
645748446bbSMel Gorman 		/* Successfully isolated */
646fa9add64SHugh Dickins 		del_page_from_lru_list(page, lruvec, page_lru(page));
647b6c75016SJoonsoo Kim 
648b6c75016SJoonsoo Kim isolate_success:
649b6c75016SJoonsoo Kim 		cc->finished_update_migrate = true;
650748446bbSMel Gorman 		list_add(&page->lru, migratelist);
651748446bbSMel Gorman 		cc->nr_migratepages++;
652b7aba698SMel Gorman 		nr_isolated++;
653748446bbSMel Gorman 
654748446bbSMel Gorman 		/* Avoid isolating too much */
65531b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
65631b8384aSHillf Danton 			++low_pfn;
657748446bbSMel Gorman 			break;
658748446bbSMel Gorman 		}
65931b8384aSHillf Danton 	}
660748446bbSMel Gorman 
661c67fe375SMel Gorman 	if (locked)
662c67fe375SMel Gorman 		spin_unlock_irqrestore(&zone->lru_lock, flags);
663748446bbSMel Gorman 
66450b5b094SVlastimil Babka 	/*
66550b5b094SVlastimil Babka 	 * Update the pageblock-skip information and cached scanner pfn,
66650b5b094SVlastimil Babka 	 * if the whole pageblock was scanned without isolating any page.
66750b5b094SVlastimil Babka 	 */
66835979ef3SDavid Rientjes 	if (low_pfn == end_pfn)
669edc2ca61SVlastimil Babka 		update_pageblock_skip(cc, valid_page, nr_isolated, true);
670bb13ffebSMel Gorman 
671b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
672b7aba698SMel Gorman 
673010fc29aSMinchan Kim 	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
674397487dbSMel Gorman 	if (nr_isolated)
675010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, nr_isolated);
676397487dbSMel Gorman 
6772fe86e00SMichal Nazarewicz 	return low_pfn;
6782fe86e00SMichal Nazarewicz }
6792fe86e00SMichal Nazarewicz 
680edc2ca61SVlastimil Babka /**
681edc2ca61SVlastimil Babka  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
682edc2ca61SVlastimil Babka  * @cc:        Compaction control structure.
683edc2ca61SVlastimil Babka  * @start_pfn: The first PFN to start isolating.
684edc2ca61SVlastimil Babka  * @end_pfn:   The one-past-last PFN.
685edc2ca61SVlastimil Babka  *
686edc2ca61SVlastimil Babka  * Returns zero if isolation fails fatally due to e.g. pending signal.
687edc2ca61SVlastimil Babka  * Otherwise, function returns one-past-the-last PFN of isolated page
688edc2ca61SVlastimil Babka  * (which may be greater than end_pfn if end fell in a middle of a THP page).
689edc2ca61SVlastimil Babka  */
690edc2ca61SVlastimil Babka unsigned long
691edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
692edc2ca61SVlastimil Babka 							unsigned long end_pfn)
693edc2ca61SVlastimil Babka {
694edc2ca61SVlastimil Babka 	unsigned long pfn, block_end_pfn;
695edc2ca61SVlastimil Babka 
696edc2ca61SVlastimil Babka 	/* Scan block by block. First and last block may be incomplete */
697edc2ca61SVlastimil Babka 	pfn = start_pfn;
698edc2ca61SVlastimil Babka 	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
699edc2ca61SVlastimil Babka 
700edc2ca61SVlastimil Babka 	for (; pfn < end_pfn; pfn = block_end_pfn,
701edc2ca61SVlastimil Babka 				block_end_pfn += pageblock_nr_pages) {
702edc2ca61SVlastimil Babka 
703edc2ca61SVlastimil Babka 		block_end_pfn = min(block_end_pfn, end_pfn);
704edc2ca61SVlastimil Babka 
7057d49d886SVlastimil Babka 		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
706edc2ca61SVlastimil Babka 			continue;
707edc2ca61SVlastimil Babka 
708edc2ca61SVlastimil Babka 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
709edc2ca61SVlastimil Babka 							ISOLATE_UNEVICTABLE);
710edc2ca61SVlastimil Babka 
711edc2ca61SVlastimil Babka 		/*
712edc2ca61SVlastimil Babka 		 * In case of fatal failure, release everything that might
713edc2ca61SVlastimil Babka 		 * have been isolated in the previous iteration, and signal
714edc2ca61SVlastimil Babka 		 * the failure back to caller.
715edc2ca61SVlastimil Babka 		 */
716edc2ca61SVlastimil Babka 		if (!pfn) {
717edc2ca61SVlastimil Babka 			putback_movable_pages(&cc->migratepages);
718edc2ca61SVlastimil Babka 			cc->nr_migratepages = 0;
719edc2ca61SVlastimil Babka 			break;
720edc2ca61SVlastimil Babka 		}
721edc2ca61SVlastimil Babka 	}
722edc2ca61SVlastimil Babka 	acct_isolated(cc->zone, cc);
723edc2ca61SVlastimil Babka 
724edc2ca61SVlastimil Babka 	return pfn;
725edc2ca61SVlastimil Babka }
726edc2ca61SVlastimil Babka 
727ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */
728ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION
729ff9543fdSMichal Nazarewicz /*
730ff9543fdSMichal Nazarewicz  * Based on information in the current compact_control, find blocks
731ff9543fdSMichal Nazarewicz  * suitable for isolating free pages from and then isolate them.
732ff9543fdSMichal Nazarewicz  */
733edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc)
734ff9543fdSMichal Nazarewicz {
735edc2ca61SVlastimil Babka 	struct zone *zone = cc->zone;
736ff9543fdSMichal Nazarewicz 	struct page *page;
737c96b9e50SVlastimil Babka 	unsigned long block_start_pfn;	/* start of current pageblock */
738c96b9e50SVlastimil Babka 	unsigned long block_end_pfn;	/* end of current pageblock */
739c96b9e50SVlastimil Babka 	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
740ff9543fdSMichal Nazarewicz 	int nr_freepages = cc->nr_freepages;
741ff9543fdSMichal Nazarewicz 	struct list_head *freelist = &cc->freepages;
7422fe86e00SMichal Nazarewicz 
743ff9543fdSMichal Nazarewicz 	/*
744ff9543fdSMichal Nazarewicz 	 * Initialise the free scanner. The starting point is where we last
74549e068f0SVlastimil Babka 	 * successfully isolated from, zone-cached value, or the end of the
74649e068f0SVlastimil Babka 	 * zone when isolating for the first time. We need this aligned to
747c96b9e50SVlastimil Babka 	 * the pageblock boundary, because we do
748c96b9e50SVlastimil Babka 	 * block_start_pfn -= pageblock_nr_pages in the for loop.
749c96b9e50SVlastimil Babka 	 * For ending point, take care when isolating in last pageblock of a
750c96b9e50SVlastimil Babka 	 * a zone which ends in the middle of a pageblock.
75149e068f0SVlastimil Babka 	 * The low boundary is the end of the pageblock the migration scanner
75249e068f0SVlastimil Babka 	 * is using.
753ff9543fdSMichal Nazarewicz 	 */
754c96b9e50SVlastimil Babka 	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
755c96b9e50SVlastimil Babka 	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
756c96b9e50SVlastimil Babka 						zone_end_pfn(zone));
7577ed695e0SVlastimil Babka 	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
7582fe86e00SMichal Nazarewicz 
759ff9543fdSMichal Nazarewicz 	/*
760ff9543fdSMichal Nazarewicz 	 * Isolate free pages until enough are available to migrate the
761ff9543fdSMichal Nazarewicz 	 * pages on cc->migratepages. We stop searching if the migrate
762ff9543fdSMichal Nazarewicz 	 * and free page scanners meet or enough free pages are isolated.
763ff9543fdSMichal Nazarewicz 	 */
764c96b9e50SVlastimil Babka 	for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
765c96b9e50SVlastimil Babka 				block_end_pfn = block_start_pfn,
766c96b9e50SVlastimil Babka 				block_start_pfn -= pageblock_nr_pages) {
767ff9543fdSMichal Nazarewicz 		unsigned long isolated;
768ff9543fdSMichal Nazarewicz 
769f6ea3adbSDavid Rientjes 		/*
770f6ea3adbSDavid Rientjes 		 * This can iterate a massively long zone without finding any
771f6ea3adbSDavid Rientjes 		 * suitable migration targets, so periodically check if we need
772be976572SVlastimil Babka 		 * to schedule, or even abort async compaction.
773f6ea3adbSDavid Rientjes 		 */
774be976572SVlastimil Babka 		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
775be976572SVlastimil Babka 						&& compact_should_abort(cc))
776be976572SVlastimil Babka 			break;
777f6ea3adbSDavid Rientjes 
7787d49d886SVlastimil Babka 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
7797d49d886SVlastimil Babka 									zone);
7807d49d886SVlastimil Babka 		if (!page)
781ff9543fdSMichal Nazarewicz 			continue;
782ff9543fdSMichal Nazarewicz 
783ff9543fdSMichal Nazarewicz 		/* Check the block is suitable for migration */
78468e3e926SLinus Torvalds 		if (!suitable_migration_target(page))
785ff9543fdSMichal Nazarewicz 			continue;
78668e3e926SLinus Torvalds 
787bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
788bb13ffebSMel Gorman 		if (!isolation_suitable(cc, page))
789bb13ffebSMel Gorman 			continue;
790bb13ffebSMel Gorman 
791f40d1e42SMel Gorman 		/* Found a block suitable for isolating free pages from */
792e9ade569SVlastimil Babka 		cc->free_pfn = block_start_pfn;
793c96b9e50SVlastimil Babka 		isolated = isolate_freepages_block(cc, block_start_pfn,
794c96b9e50SVlastimil Babka 					block_end_pfn, freelist, false);
795ff9543fdSMichal Nazarewicz 		nr_freepages += isolated;
796ff9543fdSMichal Nazarewicz 
797ff9543fdSMichal Nazarewicz 		/*
798e9ade569SVlastimil Babka 		 * Set a flag that we successfully isolated in this pageblock.
799e9ade569SVlastimil Babka 		 * In the next loop iteration, zone->compact_cached_free_pfn
800e9ade569SVlastimil Babka 		 * will not be updated and thus it will effectively contain the
801e9ade569SVlastimil Babka 		 * highest pageblock we isolated pages from.
802ff9543fdSMichal Nazarewicz 		 */
803e9ade569SVlastimil Babka 		if (isolated)
804c89511abSMel Gorman 			cc->finished_update_free = true;
805be976572SVlastimil Babka 
806be976572SVlastimil Babka 		/*
807be976572SVlastimil Babka 		 * isolate_freepages_block() might have aborted due to async
808be976572SVlastimil Babka 		 * compaction being contended
809be976572SVlastimil Babka 		 */
810be976572SVlastimil Babka 		if (cc->contended)
811be976572SVlastimil Babka 			break;
812c89511abSMel Gorman 	}
813ff9543fdSMichal Nazarewicz 
814ff9543fdSMichal Nazarewicz 	/* split_free_page does not map the pages */
815ff9543fdSMichal Nazarewicz 	map_pages(freelist);
816ff9543fdSMichal Nazarewicz 
8177ed695e0SVlastimil Babka 	/*
8187ed695e0SVlastimil Babka 	 * If we crossed the migrate scanner, we want to keep it that way
8197ed695e0SVlastimil Babka 	 * so that compact_finished() may detect this
8207ed695e0SVlastimil Babka 	 */
821c96b9e50SVlastimil Babka 	if (block_start_pfn < low_pfn)
822e9ade569SVlastimil Babka 		cc->free_pfn = cc->migrate_pfn;
823c96b9e50SVlastimil Babka 
824ff9543fdSMichal Nazarewicz 	cc->nr_freepages = nr_freepages;
825748446bbSMel Gorman }
826748446bbSMel Gorman 
827748446bbSMel Gorman /*
828748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
829748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
830748446bbSMel Gorman  */
831748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
832748446bbSMel Gorman 					unsigned long data,
833748446bbSMel Gorman 					int **result)
834748446bbSMel Gorman {
835748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
836748446bbSMel Gorman 	struct page *freepage;
837748446bbSMel Gorman 
838be976572SVlastimil Babka 	/*
839be976572SVlastimil Babka 	 * Isolate free pages if necessary, and if we are not aborting due to
840be976572SVlastimil Babka 	 * contention.
841be976572SVlastimil Babka 	 */
842748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
843be976572SVlastimil Babka 		if (!cc->contended)
844edc2ca61SVlastimil Babka 			isolate_freepages(cc);
845748446bbSMel Gorman 
846748446bbSMel Gorman 		if (list_empty(&cc->freepages))
847748446bbSMel Gorman 			return NULL;
848748446bbSMel Gorman 	}
849748446bbSMel Gorman 
850748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
851748446bbSMel Gorman 	list_del(&freepage->lru);
852748446bbSMel Gorman 	cc->nr_freepages--;
853748446bbSMel Gorman 
854748446bbSMel Gorman 	return freepage;
855748446bbSMel Gorman }
856748446bbSMel Gorman 
857748446bbSMel Gorman /*
858d53aea3dSDavid Rientjes  * This is a migrate-callback that "frees" freepages back to the isolated
859d53aea3dSDavid Rientjes  * freelist.  All pages on the freelist are from the same zone, so there is no
860d53aea3dSDavid Rientjes  * special handling needed for NUMA.
861d53aea3dSDavid Rientjes  */
862d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data)
863d53aea3dSDavid Rientjes {
864d53aea3dSDavid Rientjes 	struct compact_control *cc = (struct compact_control *)data;
865d53aea3dSDavid Rientjes 
866d53aea3dSDavid Rientjes 	list_add(&page->lru, &cc->freepages);
867d53aea3dSDavid Rientjes 	cc->nr_freepages++;
868d53aea3dSDavid Rientjes }
869d53aea3dSDavid Rientjes 
870ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */
871ff9543fdSMichal Nazarewicz typedef enum {
872ff9543fdSMichal Nazarewicz 	ISOLATE_ABORT,		/* Abort compaction now */
873ff9543fdSMichal Nazarewicz 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
874ff9543fdSMichal Nazarewicz 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
875ff9543fdSMichal Nazarewicz } isolate_migrate_t;
876ff9543fdSMichal Nazarewicz 
877ff9543fdSMichal Nazarewicz /*
878edc2ca61SVlastimil Babka  * Isolate all pages that can be migrated from the first suitable block,
879edc2ca61SVlastimil Babka  * starting at the block pointed to by the migrate scanner pfn within
880edc2ca61SVlastimil Babka  * compact_control.
881ff9543fdSMichal Nazarewicz  */
882ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
883ff9543fdSMichal Nazarewicz 					struct compact_control *cc)
884ff9543fdSMichal Nazarewicz {
885ff9543fdSMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
886edc2ca61SVlastimil Babka 	struct page *page;
887edc2ca61SVlastimil Babka 	const isolate_mode_t isolate_mode =
888edc2ca61SVlastimil Babka 		(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
889ff9543fdSMichal Nazarewicz 
890edc2ca61SVlastimil Babka 	/*
891edc2ca61SVlastimil Babka 	 * Start at where we last stopped, or beginning of the zone as
892edc2ca61SVlastimil Babka 	 * initialized by compact_zone()
893edc2ca61SVlastimil Babka 	 */
894edc2ca61SVlastimil Babka 	low_pfn = cc->migrate_pfn;
895ff9543fdSMichal Nazarewicz 
896ff9543fdSMichal Nazarewicz 	/* Only scan within a pageblock boundary */
897a9aacbccSMel Gorman 	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
898ff9543fdSMichal Nazarewicz 
899edc2ca61SVlastimil Babka 	/*
900edc2ca61SVlastimil Babka 	 * Iterate over whole pageblocks until we find the first suitable.
901edc2ca61SVlastimil Babka 	 * Do not cross the free scanner.
902edc2ca61SVlastimil Babka 	 */
903edc2ca61SVlastimil Babka 	for (; end_pfn <= cc->free_pfn;
904edc2ca61SVlastimil Babka 			low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
905edc2ca61SVlastimil Babka 
906edc2ca61SVlastimil Babka 		/*
907edc2ca61SVlastimil Babka 		 * This can potentially iterate a massively long zone with
908edc2ca61SVlastimil Babka 		 * many pageblocks unsuitable, so periodically check if we
909edc2ca61SVlastimil Babka 		 * need to schedule, or even abort async compaction.
910edc2ca61SVlastimil Babka 		 */
911edc2ca61SVlastimil Babka 		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
912edc2ca61SVlastimil Babka 						&& compact_should_abort(cc))
913edc2ca61SVlastimil Babka 			break;
914edc2ca61SVlastimil Babka 
9157d49d886SVlastimil Babka 		page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
9167d49d886SVlastimil Babka 		if (!page)
917edc2ca61SVlastimil Babka 			continue;
918edc2ca61SVlastimil Babka 
919edc2ca61SVlastimil Babka 		/* If isolation recently failed, do not retry */
920edc2ca61SVlastimil Babka 		if (!isolation_suitable(cc, page))
921edc2ca61SVlastimil Babka 			continue;
922edc2ca61SVlastimil Babka 
923edc2ca61SVlastimil Babka 		/*
924edc2ca61SVlastimil Babka 		 * For async compaction, also only scan in MOVABLE blocks.
925edc2ca61SVlastimil Babka 		 * Async compaction is optimistic to see if the minimum amount
926edc2ca61SVlastimil Babka 		 * of work satisfies the allocation.
927edc2ca61SVlastimil Babka 		 */
928edc2ca61SVlastimil Babka 		if (cc->mode == MIGRATE_ASYNC &&
929edc2ca61SVlastimil Babka 		    !migrate_async_suitable(get_pageblock_migratetype(page)))
930edc2ca61SVlastimil Babka 			continue;
931ff9543fdSMichal Nazarewicz 
932ff9543fdSMichal Nazarewicz 		/* Perform the isolation */
933edc2ca61SVlastimil Babka 		low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
934edc2ca61SVlastimil Babka 								isolate_mode);
935edc2ca61SVlastimil Babka 
936e64c5237SShaohua Li 		if (!low_pfn || cc->contended)
937ff9543fdSMichal Nazarewicz 			return ISOLATE_ABORT;
938ff9543fdSMichal Nazarewicz 
939edc2ca61SVlastimil Babka 		/*
940edc2ca61SVlastimil Babka 		 * Either we isolated something and proceed with migration. Or
941edc2ca61SVlastimil Babka 		 * we failed and compact_zone should decide if we should
942edc2ca61SVlastimil Babka 		 * continue or not.
943edc2ca61SVlastimil Babka 		 */
944edc2ca61SVlastimil Babka 		break;
945edc2ca61SVlastimil Babka 	}
946edc2ca61SVlastimil Babka 
947edc2ca61SVlastimil Babka 	acct_isolated(zone, cc);
948edc2ca61SVlastimil Babka 	/* Record where migration scanner will be restarted */
949ff9543fdSMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
950ff9543fdSMichal Nazarewicz 
951edc2ca61SVlastimil Babka 	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
952ff9543fdSMichal Nazarewicz }
953ff9543fdSMichal Nazarewicz 
954748446bbSMel Gorman static int compact_finished(struct zone *zone,
955748446bbSMel Gorman 			    struct compact_control *cc)
956748446bbSMel Gorman {
9578fb74b9fSMel Gorman 	unsigned int order;
9585a03b051SAndrea Arcangeli 	unsigned long watermark;
95956de7263SMel Gorman 
960be976572SVlastimil Babka 	if (cc->contended || fatal_signal_pending(current))
961748446bbSMel Gorman 		return COMPACT_PARTIAL;
962748446bbSMel Gorman 
963753341a4SMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
964bb13ffebSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn) {
96555b7c4c9SVlastimil Babka 		/* Let the next compaction start anew. */
96635979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
96735979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
96855b7c4c9SVlastimil Babka 		zone->compact_cached_free_pfn = zone_end_pfn(zone);
96955b7c4c9SVlastimil Babka 
97062997027SMel Gorman 		/*
97162997027SMel Gorman 		 * Mark that the PG_migrate_skip information should be cleared
97262997027SMel Gorman 		 * by kswapd when it goes to sleep. kswapd does not set the
97362997027SMel Gorman 		 * flag itself as the decision to be clear should be directly
97462997027SMel Gorman 		 * based on an allocation request.
97562997027SMel Gorman 		 */
97662997027SMel Gorman 		if (!current_is_kswapd())
97762997027SMel Gorman 			zone->compact_blockskip_flush = true;
97862997027SMel Gorman 
979748446bbSMel Gorman 		return COMPACT_COMPLETE;
980bb13ffebSMel Gorman 	}
981748446bbSMel Gorman 
98282478fb7SJohannes Weiner 	/*
98382478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
98482478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
98582478fb7SJohannes Weiner 	 */
98656de7263SMel Gorman 	if (cc->order == -1)
98756de7263SMel Gorman 		return COMPACT_CONTINUE;
98856de7263SMel Gorman 
9893957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
9903957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
9913957c776SMichal Hocko 	watermark += (1 << cc->order);
9923957c776SMichal Hocko 
9933957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
9943957c776SMichal Hocko 		return COMPACT_CONTINUE;
9953957c776SMichal Hocko 
99656de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
99756de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
9988fb74b9fSMel Gorman 		struct free_area *area = &zone->free_area[order];
9998fb74b9fSMel Gorman 
100056de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
10011fb3f8caSMel Gorman 		if (!list_empty(&area->free_list[cc->migratetype]))
100256de7263SMel Gorman 			return COMPACT_PARTIAL;
100356de7263SMel Gorman 
100456de7263SMel Gorman 		/* Job done if allocation would set block type */
10051fb3f8caSMel Gorman 		if (cc->order >= pageblock_order && area->nr_free)
100656de7263SMel Gorman 			return COMPACT_PARTIAL;
100756de7263SMel Gorman 	}
100856de7263SMel Gorman 
1009748446bbSMel Gorman 	return COMPACT_CONTINUE;
1010748446bbSMel Gorman }
1011748446bbSMel Gorman 
10123e7d3449SMel Gorman /*
10133e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
10143e7d3449SMel Gorman  * Returns
10153e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
10163e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
10173e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
10183e7d3449SMel Gorman  */
10193e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
10203e7d3449SMel Gorman {
10213e7d3449SMel Gorman 	int fragindex;
10223e7d3449SMel Gorman 	unsigned long watermark;
10233e7d3449SMel Gorman 
10243e7d3449SMel Gorman 	/*
10253957c776SMichal Hocko 	 * order == -1 is expected when compacting via
10263957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
10273957c776SMichal Hocko 	 */
10283957c776SMichal Hocko 	if (order == -1)
10293957c776SMichal Hocko 		return COMPACT_CONTINUE;
10303957c776SMichal Hocko 
10313957c776SMichal Hocko 	/*
10323e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
10333e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
10343e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
10353e7d3449SMel Gorman 	 */
10363e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
10373e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
10383e7d3449SMel Gorman 		return COMPACT_SKIPPED;
10393e7d3449SMel Gorman 
10403e7d3449SMel Gorman 	/*
10413e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
10423e7d3449SMel Gorman 	 * low memory or external fragmentation
10433e7d3449SMel Gorman 	 *
1044a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
1045a582a738SShaohua Li 	 * watermarks
10463e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
10473e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
10483e7d3449SMel Gorman 	 *
10493e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
10503e7d3449SMel Gorman 	 */
10513e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
10523e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
10533e7d3449SMel Gorman 		return COMPACT_SKIPPED;
10543e7d3449SMel Gorman 
1055a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
1056a582a738SShaohua Li 	    0, 0))
10573e7d3449SMel Gorman 		return COMPACT_PARTIAL;
10583e7d3449SMel Gorman 
10593e7d3449SMel Gorman 	return COMPACT_CONTINUE;
10603e7d3449SMel Gorman }
10613e7d3449SMel Gorman 
1062748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
1063748446bbSMel Gorman {
1064748446bbSMel Gorman 	int ret;
1065c89511abSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
1066108bcc96SCody P Schafer 	unsigned long end_pfn = zone_end_pfn(zone);
1067e0b9daebSDavid Rientjes 	const bool sync = cc->mode != MIGRATE_ASYNC;
1068748446bbSMel Gorman 
10693e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
10703e7d3449SMel Gorman 	switch (ret) {
10713e7d3449SMel Gorman 	case COMPACT_PARTIAL:
10723e7d3449SMel Gorman 	case COMPACT_SKIPPED:
10733e7d3449SMel Gorman 		/* Compaction is likely to fail */
10743e7d3449SMel Gorman 		return ret;
10753e7d3449SMel Gorman 	case COMPACT_CONTINUE:
10763e7d3449SMel Gorman 		/* Fall through to compaction */
10773e7d3449SMel Gorman 		;
10783e7d3449SMel Gorman 	}
10793e7d3449SMel Gorman 
1080c89511abSMel Gorman 	/*
1081d3132e4bSVlastimil Babka 	 * Clear pageblock skip if there were failures recently and compaction
1082d3132e4bSVlastimil Babka 	 * is about to be retried after being deferred. kswapd does not do
1083d3132e4bSVlastimil Babka 	 * this reset as it'll reset the cached information when going to sleep.
1084d3132e4bSVlastimil Babka 	 */
1085d3132e4bSVlastimil Babka 	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1086d3132e4bSVlastimil Babka 		__reset_isolation_suitable(zone);
1087d3132e4bSVlastimil Babka 
1088d3132e4bSVlastimil Babka 	/*
1089c89511abSMel Gorman 	 * Setup to move all movable pages to the end of the zone. Used cached
1090c89511abSMel Gorman 	 * information on where the scanners should start but check that it
1091c89511abSMel Gorman 	 * is initialised by ensuring the values are within zone boundaries.
1092c89511abSMel Gorman 	 */
1093e0b9daebSDavid Rientjes 	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1094c89511abSMel Gorman 	cc->free_pfn = zone->compact_cached_free_pfn;
1095c89511abSMel Gorman 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1096c89511abSMel Gorman 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1097c89511abSMel Gorman 		zone->compact_cached_free_pfn = cc->free_pfn;
1098c89511abSMel Gorman 	}
1099c89511abSMel Gorman 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1100c89511abSMel Gorman 		cc->migrate_pfn = start_pfn;
110135979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
110235979ef3SDavid Rientjes 		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1103c89511abSMel Gorman 	}
1104748446bbSMel Gorman 
11050eb927c0SMel Gorman 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
11060eb927c0SMel Gorman 
1107748446bbSMel Gorman 	migrate_prep_local();
1108748446bbSMel Gorman 
1109748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
11109d502c1cSMinchan Kim 		int err;
1111748446bbSMel Gorman 
1112f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
1113f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
1114f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
11155733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
1116e64c5237SShaohua Li 			cc->nr_migratepages = 0;
1117f9e35b3bSMel Gorman 			goto out;
1118f9e35b3bSMel Gorman 		case ISOLATE_NONE:
1119748446bbSMel Gorman 			continue;
1120f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
1121f9e35b3bSMel Gorman 			;
1122f9e35b3bSMel Gorman 		}
1123748446bbSMel Gorman 
1124d53aea3dSDavid Rientjes 		err = migrate_pages(&cc->migratepages, compaction_alloc,
1125e0b9daebSDavid Rientjes 				compaction_free, (unsigned long)cc, cc->mode,
11267b2a2d4aSMel Gorman 				MR_COMPACTION);
1127748446bbSMel Gorman 
1128f8c9301fSVlastimil Babka 		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1129f8c9301fSVlastimil Babka 							&cc->migratepages);
1130748446bbSMel Gorman 
1131f8c9301fSVlastimil Babka 		/* All pages were either migrated or will be released */
1132f8c9301fSVlastimil Babka 		cc->nr_migratepages = 0;
11339d502c1cSMinchan Kim 		if (err) {
11345733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
11357ed695e0SVlastimil Babka 			/*
11367ed695e0SVlastimil Babka 			 * migrate_pages() may return -ENOMEM when scanners meet
11377ed695e0SVlastimil Babka 			 * and we want compact_finished() to detect it
11387ed695e0SVlastimil Babka 			 */
11397ed695e0SVlastimil Babka 			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
11404bf2bba3SDavid Rientjes 				ret = COMPACT_PARTIAL;
11414bf2bba3SDavid Rientjes 				goto out;
1142748446bbSMel Gorman 			}
11434bf2bba3SDavid Rientjes 		}
1144748446bbSMel Gorman 	}
1145748446bbSMel Gorman 
1146f9e35b3bSMel Gorman out:
1147748446bbSMel Gorman 	/* Release free pages and check accounting */
1148748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
1149748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
1150748446bbSMel Gorman 
11510eb927c0SMel Gorman 	trace_mm_compaction_end(ret);
11520eb927c0SMel Gorman 
1153748446bbSMel Gorman 	return ret;
1154748446bbSMel Gorman }
115576ab0f53SMel Gorman 
1156e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order,
1157*1f9efdefSVlastimil Babka 		gfp_t gfp_mask, enum migrate_mode mode, int *contended)
115856de7263SMel Gorman {
1159e64c5237SShaohua Li 	unsigned long ret;
116056de7263SMel Gorman 	struct compact_control cc = {
116156de7263SMel Gorman 		.nr_freepages = 0,
116256de7263SMel Gorman 		.nr_migratepages = 0,
116356de7263SMel Gorman 		.order = order,
116456de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
116556de7263SMel Gorman 		.zone = zone,
1166e0b9daebSDavid Rientjes 		.mode = mode,
116756de7263SMel Gorman 	};
116856de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
116956de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
117056de7263SMel Gorman 
1171e64c5237SShaohua Li 	ret = compact_zone(zone, &cc);
1172e64c5237SShaohua Li 
1173e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.freepages));
1174e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.migratepages));
1175e64c5237SShaohua Li 
1176e64c5237SShaohua Li 	*contended = cc.contended;
1177e64c5237SShaohua Li 	return ret;
117856de7263SMel Gorman }
117956de7263SMel Gorman 
11805e771905SMel Gorman int sysctl_extfrag_threshold = 500;
11815e771905SMel Gorman 
118256de7263SMel Gorman /**
118356de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
118456de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
118556de7263SMel Gorman  * @order: The order of the current allocation
118656de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
118756de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
1188e0b9daebSDavid Rientjes  * @mode: The migration mode for async, sync light, or sync migration
1189*1f9efdefSVlastimil Babka  * @contended: Return value that determines if compaction was aborted due to
1190*1f9efdefSVlastimil Babka  *	       need_resched() or lock contention
119153853e2dSVlastimil Babka  * @candidate_zone: Return the zone where we think allocation should succeed
119256de7263SMel Gorman  *
119356de7263SMel Gorman  * This is the main entry point for direct page compaction.
119456de7263SMel Gorman  */
119556de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
119677f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
1197*1f9efdefSVlastimil Babka 			enum migrate_mode mode, int *contended,
119853853e2dSVlastimil Babka 			struct zone **candidate_zone)
119956de7263SMel Gorman {
120056de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
120156de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
120256de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
120356de7263SMel Gorman 	struct zoneref *z;
120456de7263SMel Gorman 	struct zone *zone;
120553853e2dSVlastimil Babka 	int rc = COMPACT_DEFERRED;
1206d95ea5d1SBartlomiej Zolnierkiewicz 	int alloc_flags = 0;
1207*1f9efdefSVlastimil Babka 	int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1208*1f9efdefSVlastimil Babka 
1209*1f9efdefSVlastimil Babka 	*contended = COMPACT_CONTENDED_NONE;
121056de7263SMel Gorman 
12114ffb6335SMel Gorman 	/* Check if the GFP flags allow compaction */
1212c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
121353853e2dSVlastimil Babka 		return COMPACT_SKIPPED;
121456de7263SMel Gorman 
1215d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA
1216d95ea5d1SBartlomiej Zolnierkiewicz 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1217d95ea5d1SBartlomiej Zolnierkiewicz 		alloc_flags |= ALLOC_CMA;
1218d95ea5d1SBartlomiej Zolnierkiewicz #endif
121956de7263SMel Gorman 	/* Compact each zone in the list */
122056de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
122156de7263SMel Gorman 								nodemask) {
122256de7263SMel Gorman 		int status;
1223*1f9efdefSVlastimil Babka 		int zone_contended;
122456de7263SMel Gorman 
122553853e2dSVlastimil Babka 		if (compaction_deferred(zone, order))
122653853e2dSVlastimil Babka 			continue;
122753853e2dSVlastimil Babka 
1228e0b9daebSDavid Rientjes 		status = compact_zone_order(zone, order, gfp_mask, mode,
1229*1f9efdefSVlastimil Babka 							&zone_contended);
123056de7263SMel Gorman 		rc = max(status, rc);
1231*1f9efdefSVlastimil Babka 		/*
1232*1f9efdefSVlastimil Babka 		 * It takes at least one zone that wasn't lock contended
1233*1f9efdefSVlastimil Babka 		 * to clear all_zones_contended.
1234*1f9efdefSVlastimil Babka 		 */
1235*1f9efdefSVlastimil Babka 		all_zones_contended &= zone_contended;
123656de7263SMel Gorman 
12373e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
1238d95ea5d1SBartlomiej Zolnierkiewicz 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
123953853e2dSVlastimil Babka 				      alloc_flags)) {
124053853e2dSVlastimil Babka 			*candidate_zone = zone;
124153853e2dSVlastimil Babka 			/*
124253853e2dSVlastimil Babka 			 * We think the allocation will succeed in this zone,
124353853e2dSVlastimil Babka 			 * but it is not certain, hence the false. The caller
124453853e2dSVlastimil Babka 			 * will repeat this with true if allocation indeed
124553853e2dSVlastimil Babka 			 * succeeds in this zone.
124653853e2dSVlastimil Babka 			 */
124753853e2dSVlastimil Babka 			compaction_defer_reset(zone, order, false);
1248*1f9efdefSVlastimil Babka 			/*
1249*1f9efdefSVlastimil Babka 			 * It is possible that async compaction aborted due to
1250*1f9efdefSVlastimil Babka 			 * need_resched() and the watermarks were ok thanks to
1251*1f9efdefSVlastimil Babka 			 * somebody else freeing memory. The allocation can
1252*1f9efdefSVlastimil Babka 			 * however still fail so we better signal the
1253*1f9efdefSVlastimil Babka 			 * need_resched() contention anyway (this will not
1254*1f9efdefSVlastimil Babka 			 * prevent the allocation attempt).
1255*1f9efdefSVlastimil Babka 			 */
1256*1f9efdefSVlastimil Babka 			if (zone_contended == COMPACT_CONTENDED_SCHED)
1257*1f9efdefSVlastimil Babka 				*contended = COMPACT_CONTENDED_SCHED;
1258*1f9efdefSVlastimil Babka 
1259*1f9efdefSVlastimil Babka 			goto break_loop;
1260*1f9efdefSVlastimil Babka 		}
1261*1f9efdefSVlastimil Babka 
1262*1f9efdefSVlastimil Babka 		if (mode != MIGRATE_ASYNC) {
126353853e2dSVlastimil Babka 			/*
126453853e2dSVlastimil Babka 			 * We think that allocation won't succeed in this zone
126553853e2dSVlastimil Babka 			 * so we defer compaction there. If it ends up
126653853e2dSVlastimil Babka 			 * succeeding after all, it will be reset.
126753853e2dSVlastimil Babka 			 */
126853853e2dSVlastimil Babka 			defer_compaction(zone, order);
126953853e2dSVlastimil Babka 		}
1270*1f9efdefSVlastimil Babka 
1271*1f9efdefSVlastimil Babka 		/*
1272*1f9efdefSVlastimil Babka 		 * We might have stopped compacting due to need_resched() in
1273*1f9efdefSVlastimil Babka 		 * async compaction, or due to a fatal signal detected. In that
1274*1f9efdefSVlastimil Babka 		 * case do not try further zones and signal need_resched()
1275*1f9efdefSVlastimil Babka 		 * contention.
1276*1f9efdefSVlastimil Babka 		 */
1277*1f9efdefSVlastimil Babka 		if ((zone_contended == COMPACT_CONTENDED_SCHED)
1278*1f9efdefSVlastimil Babka 					|| fatal_signal_pending(current)) {
1279*1f9efdefSVlastimil Babka 			*contended = COMPACT_CONTENDED_SCHED;
1280*1f9efdefSVlastimil Babka 			goto break_loop;
128156de7263SMel Gorman 		}
128256de7263SMel Gorman 
1283*1f9efdefSVlastimil Babka 		continue;
1284*1f9efdefSVlastimil Babka break_loop:
1285*1f9efdefSVlastimil Babka 		/*
1286*1f9efdefSVlastimil Babka 		 * We might not have tried all the zones, so  be conservative
1287*1f9efdefSVlastimil Babka 		 * and assume they are not all lock contended.
1288*1f9efdefSVlastimil Babka 		 */
1289*1f9efdefSVlastimil Babka 		all_zones_contended = 0;
1290*1f9efdefSVlastimil Babka 		break;
1291*1f9efdefSVlastimil Babka 	}
1292*1f9efdefSVlastimil Babka 
1293*1f9efdefSVlastimil Babka 	/*
1294*1f9efdefSVlastimil Babka 	 * If at least one zone wasn't deferred or skipped, we report if all
1295*1f9efdefSVlastimil Babka 	 * zones that were tried were lock contended.
1296*1f9efdefSVlastimil Babka 	 */
1297*1f9efdefSVlastimil Babka 	if (rc > COMPACT_SKIPPED && all_zones_contended)
1298*1f9efdefSVlastimil Babka 		*contended = COMPACT_CONTENDED_LOCK;
1299*1f9efdefSVlastimil Babka 
130056de7263SMel Gorman 	return rc;
130156de7263SMel Gorman }
130256de7263SMel Gorman 
130356de7263SMel Gorman 
130476ab0f53SMel Gorman /* Compact all zones within a node */
13057103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
130676ab0f53SMel Gorman {
130776ab0f53SMel Gorman 	int zoneid;
130876ab0f53SMel Gorman 	struct zone *zone;
130976ab0f53SMel Gorman 
131076ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
131176ab0f53SMel Gorman 
131276ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
131376ab0f53SMel Gorman 		if (!populated_zone(zone))
131476ab0f53SMel Gorman 			continue;
131576ab0f53SMel Gorman 
13167be62de9SRik van Riel 		cc->nr_freepages = 0;
13177be62de9SRik van Riel 		cc->nr_migratepages = 0;
13187be62de9SRik van Riel 		cc->zone = zone;
13197be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
13207be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
132176ab0f53SMel Gorman 
1322aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
13237be62de9SRik van Riel 			compact_zone(zone, cc);
132476ab0f53SMel Gorman 
1325aff62249SRik van Riel 		if (cc->order > 0) {
1326de6c60a6SVlastimil Babka 			if (zone_watermark_ok(zone, cc->order,
1327de6c60a6SVlastimil Babka 						low_wmark_pages(zone), 0, 0))
1328de6c60a6SVlastimil Babka 				compaction_defer_reset(zone, cc->order, false);
1329aff62249SRik van Riel 		}
1330aff62249SRik van Riel 
13317be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
13327be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
133376ab0f53SMel Gorman 	}
133476ab0f53SMel Gorman }
133576ab0f53SMel Gorman 
13367103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order)
13377be62de9SRik van Riel {
13387be62de9SRik van Riel 	struct compact_control cc = {
13397be62de9SRik van Riel 		.order = order,
1340e0b9daebSDavid Rientjes 		.mode = MIGRATE_ASYNC,
13417be62de9SRik van Riel 	};
13427be62de9SRik van Riel 
13433a7200afSMel Gorman 	if (!order)
13443a7200afSMel Gorman 		return;
13453a7200afSMel Gorman 
13467103f16dSAndrew Morton 	__compact_pgdat(pgdat, &cc);
13477be62de9SRik van Riel }
13487be62de9SRik van Riel 
13497103f16dSAndrew Morton static void compact_node(int nid)
13507be62de9SRik van Riel {
13517be62de9SRik van Riel 	struct compact_control cc = {
13527be62de9SRik van Riel 		.order = -1,
1353e0b9daebSDavid Rientjes 		.mode = MIGRATE_SYNC,
135491ca9186SDavid Rientjes 		.ignore_skip_hint = true,
13557be62de9SRik van Riel 	};
13567be62de9SRik van Riel 
13577103f16dSAndrew Morton 	__compact_pgdat(NODE_DATA(nid), &cc);
13587be62de9SRik van Riel }
13597be62de9SRik van Riel 
136076ab0f53SMel Gorman /* Compact all nodes in the system */
13617964c06dSJason Liu static void compact_nodes(void)
136276ab0f53SMel Gorman {
136376ab0f53SMel Gorman 	int nid;
136476ab0f53SMel Gorman 
13658575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
13668575ec29SHugh Dickins 	lru_add_drain_all();
13678575ec29SHugh Dickins 
136876ab0f53SMel Gorman 	for_each_online_node(nid)
136976ab0f53SMel Gorman 		compact_node(nid);
137076ab0f53SMel Gorman }
137176ab0f53SMel Gorman 
137276ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
137376ab0f53SMel Gorman int sysctl_compact_memory;
137476ab0f53SMel Gorman 
137576ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
137676ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
137776ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
137876ab0f53SMel Gorman {
137976ab0f53SMel Gorman 	if (write)
13807964c06dSJason Liu 		compact_nodes();
138176ab0f53SMel Gorman 
138276ab0f53SMel Gorman 	return 0;
138376ab0f53SMel Gorman }
1384ed4a6d7fSMel Gorman 
13855e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
13865e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
13875e771905SMel Gorman {
13885e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
13895e771905SMel Gorman 
13905e771905SMel Gorman 	return 0;
13915e771905SMel Gorman }
13925e771905SMel Gorman 
1393ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
139474e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev,
139510fbcf4cSKay Sievers 			struct device_attribute *attr,
1396ed4a6d7fSMel Gorman 			const char *buf, size_t count)
1397ed4a6d7fSMel Gorman {
13988575ec29SHugh Dickins 	int nid = dev->id;
13998575ec29SHugh Dickins 
14008575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
14018575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
14028575ec29SHugh Dickins 		lru_add_drain_all();
14038575ec29SHugh Dickins 
14048575ec29SHugh Dickins 		compact_node(nid);
14058575ec29SHugh Dickins 	}
1406ed4a6d7fSMel Gorman 
1407ed4a6d7fSMel Gorman 	return count;
1408ed4a6d7fSMel Gorman }
140910fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1410ed4a6d7fSMel Gorman 
1411ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
1412ed4a6d7fSMel Gorman {
141310fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
1414ed4a6d7fSMel Gorman }
1415ed4a6d7fSMel Gorman 
1416ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
1417ed4a6d7fSMel Gorman {
141810fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
1419ed4a6d7fSMel Gorman }
1420ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1421ff9543fdSMichal Nazarewicz 
1422ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */
1423