xref: /openbmc/linux/mm/compaction.c (revision 110e6f26)
1 /*
2  * linux/mm/compaction.c
3  *
4  * Memory compaction for the reduction of external fragmentation. Note that
5  * this heavily depends upon page migration to do all the real heavy
6  * lifting
7  *
8  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9  */
10 #include <linux/cpu.h>
11 #include <linux/swap.h>
12 #include <linux/migrate.h>
13 #include <linux/compaction.h>
14 #include <linux/mm_inline.h>
15 #include <linux/backing-dev.h>
16 #include <linux/sysctl.h>
17 #include <linux/sysfs.h>
18 #include <linux/balloon_compaction.h>
19 #include <linux/page-isolation.h>
20 #include <linux/kasan.h>
21 #include <linux/kthread.h>
22 #include <linux/freezer.h>
23 #include "internal.h"
24 
25 #ifdef CONFIG_COMPACTION
26 static inline void count_compact_event(enum vm_event_item item)
27 {
28 	count_vm_event(item);
29 }
30 
31 static inline void count_compact_events(enum vm_event_item item, long delta)
32 {
33 	count_vm_events(item, delta);
34 }
35 #else
36 #define count_compact_event(item) do { } while (0)
37 #define count_compact_events(item, delta) do { } while (0)
38 #endif
39 
40 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
41 
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/compaction.h>
44 
45 static unsigned long release_freepages(struct list_head *freelist)
46 {
47 	struct page *page, *next;
48 	unsigned long high_pfn = 0;
49 
50 	list_for_each_entry_safe(page, next, freelist, lru) {
51 		unsigned long pfn = page_to_pfn(page);
52 		list_del(&page->lru);
53 		__free_page(page);
54 		if (pfn > high_pfn)
55 			high_pfn = pfn;
56 	}
57 
58 	return high_pfn;
59 }
60 
61 static void map_pages(struct list_head *list)
62 {
63 	struct page *page;
64 
65 	list_for_each_entry(page, list, lru) {
66 		arch_alloc_page(page, 0);
67 		kernel_map_pages(page, 1, 1);
68 		kasan_alloc_pages(page, 0);
69 	}
70 }
71 
72 static inline bool migrate_async_suitable(int migratetype)
73 {
74 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
75 }
76 
77 #ifdef CONFIG_COMPACTION
78 
79 /* Do not skip compaction more than 64 times */
80 #define COMPACT_MAX_DEFER_SHIFT 6
81 
82 /*
83  * Compaction is deferred when compaction fails to result in a page
84  * allocation success. 1 << compact_defer_limit compactions are skipped up
85  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
86  */
87 void defer_compaction(struct zone *zone, int order)
88 {
89 	zone->compact_considered = 0;
90 	zone->compact_defer_shift++;
91 
92 	if (order < zone->compact_order_failed)
93 		zone->compact_order_failed = order;
94 
95 	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
96 		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
97 
98 	trace_mm_compaction_defer_compaction(zone, order);
99 }
100 
101 /* Returns true if compaction should be skipped this time */
102 bool compaction_deferred(struct zone *zone, int order)
103 {
104 	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
105 
106 	if (order < zone->compact_order_failed)
107 		return false;
108 
109 	/* Avoid possible overflow */
110 	if (++zone->compact_considered > defer_limit)
111 		zone->compact_considered = defer_limit;
112 
113 	if (zone->compact_considered >= defer_limit)
114 		return false;
115 
116 	trace_mm_compaction_deferred(zone, order);
117 
118 	return true;
119 }
120 
121 /*
122  * Update defer tracking counters after successful compaction of given order,
123  * which means an allocation either succeeded (alloc_success == true) or is
124  * expected to succeed.
125  */
126 void compaction_defer_reset(struct zone *zone, int order,
127 		bool alloc_success)
128 {
129 	if (alloc_success) {
130 		zone->compact_considered = 0;
131 		zone->compact_defer_shift = 0;
132 	}
133 	if (order >= zone->compact_order_failed)
134 		zone->compact_order_failed = order + 1;
135 
136 	trace_mm_compaction_defer_reset(zone, order);
137 }
138 
139 /* Returns true if restarting compaction after many failures */
140 bool compaction_restarting(struct zone *zone, int order)
141 {
142 	if (order < zone->compact_order_failed)
143 		return false;
144 
145 	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
146 		zone->compact_considered >= 1UL << zone->compact_defer_shift;
147 }
148 
149 /* Returns true if the pageblock should be scanned for pages to isolate. */
150 static inline bool isolation_suitable(struct compact_control *cc,
151 					struct page *page)
152 {
153 	if (cc->ignore_skip_hint)
154 		return true;
155 
156 	return !get_pageblock_skip(page);
157 }
158 
159 static void reset_cached_positions(struct zone *zone)
160 {
161 	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
162 	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
163 	zone->compact_cached_free_pfn =
164 			round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages);
165 }
166 
167 /*
168  * This function is called to clear all cached information on pageblocks that
169  * should be skipped for page isolation when the migrate and free page scanner
170  * meet.
171  */
172 static void __reset_isolation_suitable(struct zone *zone)
173 {
174 	unsigned long start_pfn = zone->zone_start_pfn;
175 	unsigned long end_pfn = zone_end_pfn(zone);
176 	unsigned long pfn;
177 
178 	zone->compact_blockskip_flush = false;
179 
180 	/* Walk the zone and mark every pageblock as suitable for isolation */
181 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
182 		struct page *page;
183 
184 		cond_resched();
185 
186 		if (!pfn_valid(pfn))
187 			continue;
188 
189 		page = pfn_to_page(pfn);
190 		if (zone != page_zone(page))
191 			continue;
192 
193 		clear_pageblock_skip(page);
194 	}
195 
196 	reset_cached_positions(zone);
197 }
198 
199 void reset_isolation_suitable(pg_data_t *pgdat)
200 {
201 	int zoneid;
202 
203 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
204 		struct zone *zone = &pgdat->node_zones[zoneid];
205 		if (!populated_zone(zone))
206 			continue;
207 
208 		/* Only flush if a full compaction finished recently */
209 		if (zone->compact_blockskip_flush)
210 			__reset_isolation_suitable(zone);
211 	}
212 }
213 
214 /*
215  * If no pages were isolated then mark this pageblock to be skipped in the
216  * future. The information is later cleared by __reset_isolation_suitable().
217  */
218 static void update_pageblock_skip(struct compact_control *cc,
219 			struct page *page, unsigned long nr_isolated,
220 			bool migrate_scanner)
221 {
222 	struct zone *zone = cc->zone;
223 	unsigned long pfn;
224 
225 	if (cc->ignore_skip_hint)
226 		return;
227 
228 	if (!page)
229 		return;
230 
231 	if (nr_isolated)
232 		return;
233 
234 	set_pageblock_skip(page);
235 
236 	pfn = page_to_pfn(page);
237 
238 	/* Update where async and sync compaction should restart */
239 	if (migrate_scanner) {
240 		if (pfn > zone->compact_cached_migrate_pfn[0])
241 			zone->compact_cached_migrate_pfn[0] = pfn;
242 		if (cc->mode != MIGRATE_ASYNC &&
243 		    pfn > zone->compact_cached_migrate_pfn[1])
244 			zone->compact_cached_migrate_pfn[1] = pfn;
245 	} else {
246 		if (pfn < zone->compact_cached_free_pfn)
247 			zone->compact_cached_free_pfn = pfn;
248 	}
249 }
250 #else
251 static inline bool isolation_suitable(struct compact_control *cc,
252 					struct page *page)
253 {
254 	return true;
255 }
256 
257 static void update_pageblock_skip(struct compact_control *cc,
258 			struct page *page, unsigned long nr_isolated,
259 			bool migrate_scanner)
260 {
261 }
262 #endif /* CONFIG_COMPACTION */
263 
264 /*
265  * Compaction requires the taking of some coarse locks that are potentially
266  * very heavily contended. For async compaction, back out if the lock cannot
267  * be taken immediately. For sync compaction, spin on the lock if needed.
268  *
269  * Returns true if the lock is held
270  * Returns false if the lock is not held and compaction should abort
271  */
272 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
273 						struct compact_control *cc)
274 {
275 	if (cc->mode == MIGRATE_ASYNC) {
276 		if (!spin_trylock_irqsave(lock, *flags)) {
277 			cc->contended = COMPACT_CONTENDED_LOCK;
278 			return false;
279 		}
280 	} else {
281 		spin_lock_irqsave(lock, *flags);
282 	}
283 
284 	return true;
285 }
286 
287 /*
288  * Compaction requires the taking of some coarse locks that are potentially
289  * very heavily contended. The lock should be periodically unlocked to avoid
290  * having disabled IRQs for a long time, even when there is nobody waiting on
291  * the lock. It might also be that allowing the IRQs will result in
292  * need_resched() becoming true. If scheduling is needed, async compaction
293  * aborts. Sync compaction schedules.
294  * Either compaction type will also abort if a fatal signal is pending.
295  * In either case if the lock was locked, it is dropped and not regained.
296  *
297  * Returns true if compaction should abort due to fatal signal pending, or
298  *		async compaction due to need_resched()
299  * Returns false when compaction can continue (sync compaction might have
300  *		scheduled)
301  */
302 static bool compact_unlock_should_abort(spinlock_t *lock,
303 		unsigned long flags, bool *locked, struct compact_control *cc)
304 {
305 	if (*locked) {
306 		spin_unlock_irqrestore(lock, flags);
307 		*locked = false;
308 	}
309 
310 	if (fatal_signal_pending(current)) {
311 		cc->contended = COMPACT_CONTENDED_SCHED;
312 		return true;
313 	}
314 
315 	if (need_resched()) {
316 		if (cc->mode == MIGRATE_ASYNC) {
317 			cc->contended = COMPACT_CONTENDED_SCHED;
318 			return true;
319 		}
320 		cond_resched();
321 	}
322 
323 	return false;
324 }
325 
326 /*
327  * Aside from avoiding lock contention, compaction also periodically checks
328  * need_resched() and either schedules in sync compaction or aborts async
329  * compaction. This is similar to what compact_unlock_should_abort() does, but
330  * is used where no lock is concerned.
331  *
332  * Returns false when no scheduling was needed, or sync compaction scheduled.
333  * Returns true when async compaction should abort.
334  */
335 static inline bool compact_should_abort(struct compact_control *cc)
336 {
337 	/* async compaction aborts if contended */
338 	if (need_resched()) {
339 		if (cc->mode == MIGRATE_ASYNC) {
340 			cc->contended = COMPACT_CONTENDED_SCHED;
341 			return true;
342 		}
343 
344 		cond_resched();
345 	}
346 
347 	return false;
348 }
349 
350 /*
351  * Isolate free pages onto a private freelist. If @strict is true, will abort
352  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
353  * (even though it may still end up isolating some pages).
354  */
355 static unsigned long isolate_freepages_block(struct compact_control *cc,
356 				unsigned long *start_pfn,
357 				unsigned long end_pfn,
358 				struct list_head *freelist,
359 				bool strict)
360 {
361 	int nr_scanned = 0, total_isolated = 0;
362 	struct page *cursor, *valid_page = NULL;
363 	unsigned long flags = 0;
364 	bool locked = false;
365 	unsigned long blockpfn = *start_pfn;
366 
367 	cursor = pfn_to_page(blockpfn);
368 
369 	/* Isolate free pages. */
370 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
371 		int isolated, i;
372 		struct page *page = cursor;
373 
374 		/*
375 		 * Periodically drop the lock (if held) regardless of its
376 		 * contention, to give chance to IRQs. Abort if fatal signal
377 		 * pending or async compaction detects need_resched()
378 		 */
379 		if (!(blockpfn % SWAP_CLUSTER_MAX)
380 		    && compact_unlock_should_abort(&cc->zone->lock, flags,
381 								&locked, cc))
382 			break;
383 
384 		nr_scanned++;
385 		if (!pfn_valid_within(blockpfn))
386 			goto isolate_fail;
387 
388 		if (!valid_page)
389 			valid_page = page;
390 
391 		/*
392 		 * For compound pages such as THP and hugetlbfs, we can save
393 		 * potentially a lot of iterations if we skip them at once.
394 		 * The check is racy, but we can consider only valid values
395 		 * and the only danger is skipping too much.
396 		 */
397 		if (PageCompound(page)) {
398 			unsigned int comp_order = compound_order(page);
399 
400 			if (likely(comp_order < MAX_ORDER)) {
401 				blockpfn += (1UL << comp_order) - 1;
402 				cursor += (1UL << comp_order) - 1;
403 			}
404 
405 			goto isolate_fail;
406 		}
407 
408 		if (!PageBuddy(page))
409 			goto isolate_fail;
410 
411 		/*
412 		 * If we already hold the lock, we can skip some rechecking.
413 		 * Note that if we hold the lock now, checked_pageblock was
414 		 * already set in some previous iteration (or strict is true),
415 		 * so it is correct to skip the suitable migration target
416 		 * recheck as well.
417 		 */
418 		if (!locked) {
419 			/*
420 			 * The zone lock must be held to isolate freepages.
421 			 * Unfortunately this is a very coarse lock and can be
422 			 * heavily contended if there are parallel allocations
423 			 * or parallel compactions. For async compaction do not
424 			 * spin on the lock and we acquire the lock as late as
425 			 * possible.
426 			 */
427 			locked = compact_trylock_irqsave(&cc->zone->lock,
428 								&flags, cc);
429 			if (!locked)
430 				break;
431 
432 			/* Recheck this is a buddy page under lock */
433 			if (!PageBuddy(page))
434 				goto isolate_fail;
435 		}
436 
437 		/* Found a free page, break it into order-0 pages */
438 		isolated = split_free_page(page);
439 		total_isolated += isolated;
440 		for (i = 0; i < isolated; i++) {
441 			list_add(&page->lru, freelist);
442 			page++;
443 		}
444 
445 		/* If a page was split, advance to the end of it */
446 		if (isolated) {
447 			cc->nr_freepages += isolated;
448 			if (!strict &&
449 				cc->nr_migratepages <= cc->nr_freepages) {
450 				blockpfn += isolated;
451 				break;
452 			}
453 
454 			blockpfn += isolated - 1;
455 			cursor += isolated - 1;
456 			continue;
457 		}
458 
459 isolate_fail:
460 		if (strict)
461 			break;
462 		else
463 			continue;
464 
465 	}
466 
467 	/*
468 	 * There is a tiny chance that we have read bogus compound_order(),
469 	 * so be careful to not go outside of the pageblock.
470 	 */
471 	if (unlikely(blockpfn > end_pfn))
472 		blockpfn = end_pfn;
473 
474 	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
475 					nr_scanned, total_isolated);
476 
477 	/* Record how far we have got within the block */
478 	*start_pfn = blockpfn;
479 
480 	/*
481 	 * If strict isolation is requested by CMA then check that all the
482 	 * pages requested were isolated. If there were any failures, 0 is
483 	 * returned and CMA will fail.
484 	 */
485 	if (strict && blockpfn < end_pfn)
486 		total_isolated = 0;
487 
488 	if (locked)
489 		spin_unlock_irqrestore(&cc->zone->lock, flags);
490 
491 	/* Update the pageblock-skip if the whole pageblock was scanned */
492 	if (blockpfn == end_pfn)
493 		update_pageblock_skip(cc, valid_page, total_isolated, false);
494 
495 	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
496 	if (total_isolated)
497 		count_compact_events(COMPACTISOLATED, total_isolated);
498 	return total_isolated;
499 }
500 
501 /**
502  * isolate_freepages_range() - isolate free pages.
503  * @start_pfn: The first PFN to start isolating.
504  * @end_pfn:   The one-past-last PFN.
505  *
506  * Non-free pages, invalid PFNs, or zone boundaries within the
507  * [start_pfn, end_pfn) range are considered errors, cause function to
508  * undo its actions and return zero.
509  *
510  * Otherwise, function returns one-past-the-last PFN of isolated page
511  * (which may be greater then end_pfn if end fell in a middle of
512  * a free page).
513  */
514 unsigned long
515 isolate_freepages_range(struct compact_control *cc,
516 			unsigned long start_pfn, unsigned long end_pfn)
517 {
518 	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
519 	LIST_HEAD(freelist);
520 
521 	pfn = start_pfn;
522 	block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
523 	if (block_start_pfn < cc->zone->zone_start_pfn)
524 		block_start_pfn = cc->zone->zone_start_pfn;
525 	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
526 
527 	for (; pfn < end_pfn; pfn += isolated,
528 				block_start_pfn = block_end_pfn,
529 				block_end_pfn += pageblock_nr_pages) {
530 		/* Protect pfn from changing by isolate_freepages_block */
531 		unsigned long isolate_start_pfn = pfn;
532 
533 		block_end_pfn = min(block_end_pfn, end_pfn);
534 
535 		/*
536 		 * pfn could pass the block_end_pfn if isolated freepage
537 		 * is more than pageblock order. In this case, we adjust
538 		 * scanning range to right one.
539 		 */
540 		if (pfn >= block_end_pfn) {
541 			block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
542 			block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
543 			block_end_pfn = min(block_end_pfn, end_pfn);
544 		}
545 
546 		if (!pageblock_pfn_to_page(block_start_pfn,
547 					block_end_pfn, cc->zone))
548 			break;
549 
550 		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
551 						block_end_pfn, &freelist, true);
552 
553 		/*
554 		 * In strict mode, isolate_freepages_block() returns 0 if
555 		 * there are any holes in the block (ie. invalid PFNs or
556 		 * non-free pages).
557 		 */
558 		if (!isolated)
559 			break;
560 
561 		/*
562 		 * If we managed to isolate pages, it is always (1 << n) *
563 		 * pageblock_nr_pages for some non-negative n.  (Max order
564 		 * page may span two pageblocks).
565 		 */
566 	}
567 
568 	/* split_free_page does not map the pages */
569 	map_pages(&freelist);
570 
571 	if (pfn < end_pfn) {
572 		/* Loop terminated early, cleanup. */
573 		release_freepages(&freelist);
574 		return 0;
575 	}
576 
577 	/* We don't use freelists for anything. */
578 	return pfn;
579 }
580 
581 /* Update the number of anon and file isolated pages in the zone */
582 static void acct_isolated(struct zone *zone, struct compact_control *cc)
583 {
584 	struct page *page;
585 	unsigned int count[2] = { 0, };
586 
587 	if (list_empty(&cc->migratepages))
588 		return;
589 
590 	list_for_each_entry(page, &cc->migratepages, lru)
591 		count[!!page_is_file_cache(page)]++;
592 
593 	mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
594 	mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
595 }
596 
597 /* Similar to reclaim, but different enough that they don't share logic */
598 static bool too_many_isolated(struct zone *zone)
599 {
600 	unsigned long active, inactive, isolated;
601 
602 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
603 					zone_page_state(zone, NR_INACTIVE_ANON);
604 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
605 					zone_page_state(zone, NR_ACTIVE_ANON);
606 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
607 					zone_page_state(zone, NR_ISOLATED_ANON);
608 
609 	return isolated > (inactive + active) / 2;
610 }
611 
612 /**
613  * isolate_migratepages_block() - isolate all migrate-able pages within
614  *				  a single pageblock
615  * @cc:		Compaction control structure.
616  * @low_pfn:	The first PFN to isolate
617  * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
618  * @isolate_mode: Isolation mode to be used.
619  *
620  * Isolate all pages that can be migrated from the range specified by
621  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
622  * Returns zero if there is a fatal signal pending, otherwise PFN of the
623  * first page that was not scanned (which may be both less, equal to or more
624  * than end_pfn).
625  *
626  * The pages are isolated on cc->migratepages list (not required to be empty),
627  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
628  * is neither read nor updated.
629  */
630 static unsigned long
631 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
632 			unsigned long end_pfn, isolate_mode_t isolate_mode)
633 {
634 	struct zone *zone = cc->zone;
635 	unsigned long nr_scanned = 0, nr_isolated = 0;
636 	struct list_head *migratelist = &cc->migratepages;
637 	struct lruvec *lruvec;
638 	unsigned long flags = 0;
639 	bool locked = false;
640 	struct page *page = NULL, *valid_page = NULL;
641 	unsigned long start_pfn = low_pfn;
642 
643 	/*
644 	 * Ensure that there are not too many pages isolated from the LRU
645 	 * list by either parallel reclaimers or compaction. If there are,
646 	 * delay for some time until fewer pages are isolated
647 	 */
648 	while (unlikely(too_many_isolated(zone))) {
649 		/* async migration should just abort */
650 		if (cc->mode == MIGRATE_ASYNC)
651 			return 0;
652 
653 		congestion_wait(BLK_RW_ASYNC, HZ/10);
654 
655 		if (fatal_signal_pending(current))
656 			return 0;
657 	}
658 
659 	if (compact_should_abort(cc))
660 		return 0;
661 
662 	/* Time to isolate some pages for migration */
663 	for (; low_pfn < end_pfn; low_pfn++) {
664 		bool is_lru;
665 
666 		/*
667 		 * Periodically drop the lock (if held) regardless of its
668 		 * contention, to give chance to IRQs. Abort async compaction
669 		 * if contended.
670 		 */
671 		if (!(low_pfn % SWAP_CLUSTER_MAX)
672 		    && compact_unlock_should_abort(&zone->lru_lock, flags,
673 								&locked, cc))
674 			break;
675 
676 		if (!pfn_valid_within(low_pfn))
677 			continue;
678 		nr_scanned++;
679 
680 		page = pfn_to_page(low_pfn);
681 
682 		if (!valid_page)
683 			valid_page = page;
684 
685 		/*
686 		 * Skip if free. We read page order here without zone lock
687 		 * which is generally unsafe, but the race window is small and
688 		 * the worst thing that can happen is that we skip some
689 		 * potential isolation targets.
690 		 */
691 		if (PageBuddy(page)) {
692 			unsigned long freepage_order = page_order_unsafe(page);
693 
694 			/*
695 			 * Without lock, we cannot be sure that what we got is
696 			 * a valid page order. Consider only values in the
697 			 * valid order range to prevent low_pfn overflow.
698 			 */
699 			if (freepage_order > 0 && freepage_order < MAX_ORDER)
700 				low_pfn += (1UL << freepage_order) - 1;
701 			continue;
702 		}
703 
704 		/*
705 		 * Check may be lockless but that's ok as we recheck later.
706 		 * It's possible to migrate LRU pages and balloon pages
707 		 * Skip any other type of page
708 		 */
709 		is_lru = PageLRU(page);
710 		if (!is_lru) {
711 			if (unlikely(balloon_page_movable(page))) {
712 				if (balloon_page_isolate(page)) {
713 					/* Successfully isolated */
714 					goto isolate_success;
715 				}
716 			}
717 		}
718 
719 		/*
720 		 * Regardless of being on LRU, compound pages such as THP and
721 		 * hugetlbfs are not to be compacted. We can potentially save
722 		 * a lot of iterations if we skip them at once. The check is
723 		 * racy, but we can consider only valid values and the only
724 		 * danger is skipping too much.
725 		 */
726 		if (PageCompound(page)) {
727 			unsigned int comp_order = compound_order(page);
728 
729 			if (likely(comp_order < MAX_ORDER))
730 				low_pfn += (1UL << comp_order) - 1;
731 
732 			continue;
733 		}
734 
735 		if (!is_lru)
736 			continue;
737 
738 		/*
739 		 * Migration will fail if an anonymous page is pinned in memory,
740 		 * so avoid taking lru_lock and isolating it unnecessarily in an
741 		 * admittedly racy check.
742 		 */
743 		if (!page_mapping(page) &&
744 		    page_count(page) > page_mapcount(page))
745 			continue;
746 
747 		/* If we already hold the lock, we can skip some rechecking */
748 		if (!locked) {
749 			locked = compact_trylock_irqsave(&zone->lru_lock,
750 								&flags, cc);
751 			if (!locked)
752 				break;
753 
754 			/* Recheck PageLRU and PageCompound under lock */
755 			if (!PageLRU(page))
756 				continue;
757 
758 			/*
759 			 * Page become compound since the non-locked check,
760 			 * and it's on LRU. It can only be a THP so the order
761 			 * is safe to read and it's 0 for tail pages.
762 			 */
763 			if (unlikely(PageCompound(page))) {
764 				low_pfn += (1UL << compound_order(page)) - 1;
765 				continue;
766 			}
767 		}
768 
769 		lruvec = mem_cgroup_page_lruvec(page, zone);
770 
771 		/* Try isolate the page */
772 		if (__isolate_lru_page(page, isolate_mode) != 0)
773 			continue;
774 
775 		VM_BUG_ON_PAGE(PageCompound(page), page);
776 
777 		/* Successfully isolated */
778 		del_page_from_lru_list(page, lruvec, page_lru(page));
779 
780 isolate_success:
781 		list_add(&page->lru, migratelist);
782 		cc->nr_migratepages++;
783 		nr_isolated++;
784 
785 		/* Avoid isolating too much */
786 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
787 			++low_pfn;
788 			break;
789 		}
790 	}
791 
792 	/*
793 	 * The PageBuddy() check could have potentially brought us outside
794 	 * the range to be scanned.
795 	 */
796 	if (unlikely(low_pfn > end_pfn))
797 		low_pfn = end_pfn;
798 
799 	if (locked)
800 		spin_unlock_irqrestore(&zone->lru_lock, flags);
801 
802 	/*
803 	 * Update the pageblock-skip information and cached scanner pfn,
804 	 * if the whole pageblock was scanned without isolating any page.
805 	 */
806 	if (low_pfn == end_pfn)
807 		update_pageblock_skip(cc, valid_page, nr_isolated, true);
808 
809 	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
810 						nr_scanned, nr_isolated);
811 
812 	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
813 	if (nr_isolated)
814 		count_compact_events(COMPACTISOLATED, nr_isolated);
815 
816 	return low_pfn;
817 }
818 
819 /**
820  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
821  * @cc:        Compaction control structure.
822  * @start_pfn: The first PFN to start isolating.
823  * @end_pfn:   The one-past-last PFN.
824  *
825  * Returns zero if isolation fails fatally due to e.g. pending signal.
826  * Otherwise, function returns one-past-the-last PFN of isolated page
827  * (which may be greater than end_pfn if end fell in a middle of a THP page).
828  */
829 unsigned long
830 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
831 							unsigned long end_pfn)
832 {
833 	unsigned long pfn, block_start_pfn, block_end_pfn;
834 
835 	/* Scan block by block. First and last block may be incomplete */
836 	pfn = start_pfn;
837 	block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
838 	if (block_start_pfn < cc->zone->zone_start_pfn)
839 		block_start_pfn = cc->zone->zone_start_pfn;
840 	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
841 
842 	for (; pfn < end_pfn; pfn = block_end_pfn,
843 				block_start_pfn = block_end_pfn,
844 				block_end_pfn += pageblock_nr_pages) {
845 
846 		block_end_pfn = min(block_end_pfn, end_pfn);
847 
848 		if (!pageblock_pfn_to_page(block_start_pfn,
849 					block_end_pfn, cc->zone))
850 			continue;
851 
852 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
853 							ISOLATE_UNEVICTABLE);
854 
855 		/*
856 		 * In case of fatal failure, release everything that might
857 		 * have been isolated in the previous iteration, and signal
858 		 * the failure back to caller.
859 		 */
860 		if (!pfn) {
861 			putback_movable_pages(&cc->migratepages);
862 			cc->nr_migratepages = 0;
863 			break;
864 		}
865 
866 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
867 			break;
868 	}
869 	acct_isolated(cc->zone, cc);
870 
871 	return pfn;
872 }
873 
874 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
875 #ifdef CONFIG_COMPACTION
876 
877 /* Returns true if the page is within a block suitable for migration to */
878 static bool suitable_migration_target(struct page *page)
879 {
880 	/* If the page is a large free page, then disallow migration */
881 	if (PageBuddy(page)) {
882 		/*
883 		 * We are checking page_order without zone->lock taken. But
884 		 * the only small danger is that we skip a potentially suitable
885 		 * pageblock, so it's not worth to check order for valid range.
886 		 */
887 		if (page_order_unsafe(page) >= pageblock_order)
888 			return false;
889 	}
890 
891 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
892 	if (migrate_async_suitable(get_pageblock_migratetype(page)))
893 		return true;
894 
895 	/* Otherwise skip the block */
896 	return false;
897 }
898 
899 /*
900  * Test whether the free scanner has reached the same or lower pageblock than
901  * the migration scanner, and compaction should thus terminate.
902  */
903 static inline bool compact_scanners_met(struct compact_control *cc)
904 {
905 	return (cc->free_pfn >> pageblock_order)
906 		<= (cc->migrate_pfn >> pageblock_order);
907 }
908 
909 /*
910  * Based on information in the current compact_control, find blocks
911  * suitable for isolating free pages from and then isolate them.
912  */
913 static void isolate_freepages(struct compact_control *cc)
914 {
915 	struct zone *zone = cc->zone;
916 	struct page *page;
917 	unsigned long block_start_pfn;	/* start of current pageblock */
918 	unsigned long isolate_start_pfn; /* exact pfn we start at */
919 	unsigned long block_end_pfn;	/* end of current pageblock */
920 	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
921 	struct list_head *freelist = &cc->freepages;
922 
923 	/*
924 	 * Initialise the free scanner. The starting point is where we last
925 	 * successfully isolated from, zone-cached value, or the end of the
926 	 * zone when isolating for the first time. For looping we also need
927 	 * this pfn aligned down to the pageblock boundary, because we do
928 	 * block_start_pfn -= pageblock_nr_pages in the for loop.
929 	 * For ending point, take care when isolating in last pageblock of a
930 	 * a zone which ends in the middle of a pageblock.
931 	 * The low boundary is the end of the pageblock the migration scanner
932 	 * is using.
933 	 */
934 	isolate_start_pfn = cc->free_pfn;
935 	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
936 	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
937 						zone_end_pfn(zone));
938 	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
939 
940 	/*
941 	 * Isolate free pages until enough are available to migrate the
942 	 * pages on cc->migratepages. We stop searching if the migrate
943 	 * and free page scanners meet or enough free pages are isolated.
944 	 */
945 	for (; block_start_pfn >= low_pfn;
946 				block_end_pfn = block_start_pfn,
947 				block_start_pfn -= pageblock_nr_pages,
948 				isolate_start_pfn = block_start_pfn) {
949 
950 		/*
951 		 * This can iterate a massively long zone without finding any
952 		 * suitable migration targets, so periodically check if we need
953 		 * to schedule, or even abort async compaction.
954 		 */
955 		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
956 						&& compact_should_abort(cc))
957 			break;
958 
959 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
960 									zone);
961 		if (!page)
962 			continue;
963 
964 		/* Check the block is suitable for migration */
965 		if (!suitable_migration_target(page))
966 			continue;
967 
968 		/* If isolation recently failed, do not retry */
969 		if (!isolation_suitable(cc, page))
970 			continue;
971 
972 		/* Found a block suitable for isolating free pages from. */
973 		isolate_freepages_block(cc, &isolate_start_pfn,
974 					block_end_pfn, freelist, false);
975 
976 		/*
977 		 * If we isolated enough freepages, or aborted due to async
978 		 * compaction being contended, terminate the loop.
979 		 * Remember where the free scanner should restart next time,
980 		 * which is where isolate_freepages_block() left off.
981 		 * But if it scanned the whole pageblock, isolate_start_pfn
982 		 * now points at block_end_pfn, which is the start of the next
983 		 * pageblock.
984 		 * In that case we will however want to restart at the start
985 		 * of the previous pageblock.
986 		 */
987 		if ((cc->nr_freepages >= cc->nr_migratepages)
988 							|| cc->contended) {
989 			if (isolate_start_pfn >= block_end_pfn)
990 				isolate_start_pfn =
991 					block_start_pfn - pageblock_nr_pages;
992 			break;
993 		} else {
994 			/*
995 			 * isolate_freepages_block() should not terminate
996 			 * prematurely unless contended, or isolated enough
997 			 */
998 			VM_BUG_ON(isolate_start_pfn < block_end_pfn);
999 		}
1000 	}
1001 
1002 	/* split_free_page does not map the pages */
1003 	map_pages(freelist);
1004 
1005 	/*
1006 	 * Record where the free scanner will restart next time. Either we
1007 	 * broke from the loop and set isolate_start_pfn based on the last
1008 	 * call to isolate_freepages_block(), or we met the migration scanner
1009 	 * and the loop terminated due to isolate_start_pfn < low_pfn
1010 	 */
1011 	cc->free_pfn = isolate_start_pfn;
1012 }
1013 
1014 /*
1015  * This is a migrate-callback that "allocates" freepages by taking pages
1016  * from the isolated freelists in the block we are migrating to.
1017  */
1018 static struct page *compaction_alloc(struct page *migratepage,
1019 					unsigned long data,
1020 					int **result)
1021 {
1022 	struct compact_control *cc = (struct compact_control *)data;
1023 	struct page *freepage;
1024 
1025 	/*
1026 	 * Isolate free pages if necessary, and if we are not aborting due to
1027 	 * contention.
1028 	 */
1029 	if (list_empty(&cc->freepages)) {
1030 		if (!cc->contended)
1031 			isolate_freepages(cc);
1032 
1033 		if (list_empty(&cc->freepages))
1034 			return NULL;
1035 	}
1036 
1037 	freepage = list_entry(cc->freepages.next, struct page, lru);
1038 	list_del(&freepage->lru);
1039 	cc->nr_freepages--;
1040 
1041 	return freepage;
1042 }
1043 
1044 /*
1045  * This is a migrate-callback that "frees" freepages back to the isolated
1046  * freelist.  All pages on the freelist are from the same zone, so there is no
1047  * special handling needed for NUMA.
1048  */
1049 static void compaction_free(struct page *page, unsigned long data)
1050 {
1051 	struct compact_control *cc = (struct compact_control *)data;
1052 
1053 	list_add(&page->lru, &cc->freepages);
1054 	cc->nr_freepages++;
1055 }
1056 
1057 /* possible outcome of isolate_migratepages */
1058 typedef enum {
1059 	ISOLATE_ABORT,		/* Abort compaction now */
1060 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
1061 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
1062 } isolate_migrate_t;
1063 
1064 /*
1065  * Allow userspace to control policy on scanning the unevictable LRU for
1066  * compactable pages.
1067  */
1068 int sysctl_compact_unevictable_allowed __read_mostly = 1;
1069 
1070 /*
1071  * Isolate all pages that can be migrated from the first suitable block,
1072  * starting at the block pointed to by the migrate scanner pfn within
1073  * compact_control.
1074  */
1075 static isolate_migrate_t isolate_migratepages(struct zone *zone,
1076 					struct compact_control *cc)
1077 {
1078 	unsigned long block_start_pfn;
1079 	unsigned long block_end_pfn;
1080 	unsigned long low_pfn;
1081 	unsigned long isolate_start_pfn;
1082 	struct page *page;
1083 	const isolate_mode_t isolate_mode =
1084 		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1085 		(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1086 
1087 	/*
1088 	 * Start at where we last stopped, or beginning of the zone as
1089 	 * initialized by compact_zone()
1090 	 */
1091 	low_pfn = cc->migrate_pfn;
1092 	block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1);
1093 	if (block_start_pfn < zone->zone_start_pfn)
1094 		block_start_pfn = zone->zone_start_pfn;
1095 
1096 	/* Only scan within a pageblock boundary */
1097 	block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
1098 
1099 	/*
1100 	 * Iterate over whole pageblocks until we find the first suitable.
1101 	 * Do not cross the free scanner.
1102 	 */
1103 	for (; block_end_pfn <= cc->free_pfn;
1104 			low_pfn = block_end_pfn,
1105 			block_start_pfn = block_end_pfn,
1106 			block_end_pfn += pageblock_nr_pages) {
1107 
1108 		/*
1109 		 * This can potentially iterate a massively long zone with
1110 		 * many pageblocks unsuitable, so periodically check if we
1111 		 * need to schedule, or even abort async compaction.
1112 		 */
1113 		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1114 						&& compact_should_abort(cc))
1115 			break;
1116 
1117 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1118 									zone);
1119 		if (!page)
1120 			continue;
1121 
1122 		/* If isolation recently failed, do not retry */
1123 		if (!isolation_suitable(cc, page))
1124 			continue;
1125 
1126 		/*
1127 		 * For async compaction, also only scan in MOVABLE blocks.
1128 		 * Async compaction is optimistic to see if the minimum amount
1129 		 * of work satisfies the allocation.
1130 		 */
1131 		if (cc->mode == MIGRATE_ASYNC &&
1132 		    !migrate_async_suitable(get_pageblock_migratetype(page)))
1133 			continue;
1134 
1135 		/* Perform the isolation */
1136 		isolate_start_pfn = low_pfn;
1137 		low_pfn = isolate_migratepages_block(cc, low_pfn,
1138 						block_end_pfn, isolate_mode);
1139 
1140 		if (!low_pfn || cc->contended) {
1141 			acct_isolated(zone, cc);
1142 			return ISOLATE_ABORT;
1143 		}
1144 
1145 		/*
1146 		 * Record where we could have freed pages by migration and not
1147 		 * yet flushed them to buddy allocator.
1148 		 * - this is the lowest page that could have been isolated and
1149 		 * then freed by migration.
1150 		 */
1151 		if (cc->nr_migratepages && !cc->last_migrated_pfn)
1152 			cc->last_migrated_pfn = isolate_start_pfn;
1153 
1154 		/*
1155 		 * Either we isolated something and proceed with migration. Or
1156 		 * we failed and compact_zone should decide if we should
1157 		 * continue or not.
1158 		 */
1159 		break;
1160 	}
1161 
1162 	acct_isolated(zone, cc);
1163 	/* Record where migration scanner will be restarted. */
1164 	cc->migrate_pfn = low_pfn;
1165 
1166 	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1167 }
1168 
1169 /*
1170  * order == -1 is expected when compacting via
1171  * /proc/sys/vm/compact_memory
1172  */
1173 static inline bool is_via_compact_memory(int order)
1174 {
1175 	return order == -1;
1176 }
1177 
1178 static int __compact_finished(struct zone *zone, struct compact_control *cc,
1179 			    const int migratetype)
1180 {
1181 	unsigned int order;
1182 	unsigned long watermark;
1183 
1184 	if (cc->contended || fatal_signal_pending(current))
1185 		return COMPACT_CONTENDED;
1186 
1187 	/* Compaction run completes if the migrate and free scanner meet */
1188 	if (compact_scanners_met(cc)) {
1189 		/* Let the next compaction start anew. */
1190 		reset_cached_positions(zone);
1191 
1192 		/*
1193 		 * Mark that the PG_migrate_skip information should be cleared
1194 		 * by kswapd when it goes to sleep. kcompactd does not set the
1195 		 * flag itself as the decision to be clear should be directly
1196 		 * based on an allocation request.
1197 		 */
1198 		if (cc->direct_compaction)
1199 			zone->compact_blockskip_flush = true;
1200 
1201 		return COMPACT_COMPLETE;
1202 	}
1203 
1204 	if (is_via_compact_memory(cc->order))
1205 		return COMPACT_CONTINUE;
1206 
1207 	/* Compaction run is not finished if the watermark is not met */
1208 	watermark = low_wmark_pages(zone);
1209 
1210 	if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1211 							cc->alloc_flags))
1212 		return COMPACT_CONTINUE;
1213 
1214 	/* Direct compactor: Is a suitable page free? */
1215 	for (order = cc->order; order < MAX_ORDER; order++) {
1216 		struct free_area *area = &zone->free_area[order];
1217 		bool can_steal;
1218 
1219 		/* Job done if page is free of the right migratetype */
1220 		if (!list_empty(&area->free_list[migratetype]))
1221 			return COMPACT_PARTIAL;
1222 
1223 #ifdef CONFIG_CMA
1224 		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1225 		if (migratetype == MIGRATE_MOVABLE &&
1226 			!list_empty(&area->free_list[MIGRATE_CMA]))
1227 			return COMPACT_PARTIAL;
1228 #endif
1229 		/*
1230 		 * Job done if allocation would steal freepages from
1231 		 * other migratetype buddy lists.
1232 		 */
1233 		if (find_suitable_fallback(area, order, migratetype,
1234 						true, &can_steal) != -1)
1235 			return COMPACT_PARTIAL;
1236 	}
1237 
1238 	return COMPACT_NO_SUITABLE_PAGE;
1239 }
1240 
1241 static int compact_finished(struct zone *zone, struct compact_control *cc,
1242 			    const int migratetype)
1243 {
1244 	int ret;
1245 
1246 	ret = __compact_finished(zone, cc, migratetype);
1247 	trace_mm_compaction_finished(zone, cc->order, ret);
1248 	if (ret == COMPACT_NO_SUITABLE_PAGE)
1249 		ret = COMPACT_CONTINUE;
1250 
1251 	return ret;
1252 }
1253 
1254 /*
1255  * compaction_suitable: Is this suitable to run compaction on this zone now?
1256  * Returns
1257  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1258  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
1259  *   COMPACT_CONTINUE - If compaction should run now
1260  */
1261 static unsigned long __compaction_suitable(struct zone *zone, int order,
1262 					int alloc_flags, int classzone_idx)
1263 {
1264 	int fragindex;
1265 	unsigned long watermark;
1266 
1267 	if (is_via_compact_memory(order))
1268 		return COMPACT_CONTINUE;
1269 
1270 	watermark = low_wmark_pages(zone);
1271 	/*
1272 	 * If watermarks for high-order allocation are already met, there
1273 	 * should be no need for compaction at all.
1274 	 */
1275 	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1276 								alloc_flags))
1277 		return COMPACT_PARTIAL;
1278 
1279 	/*
1280 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
1281 	 * This is because during migration, copies of pages need to be
1282 	 * allocated and for a short time, the footprint is higher
1283 	 */
1284 	watermark += (2UL << order);
1285 	if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
1286 		return COMPACT_SKIPPED;
1287 
1288 	/*
1289 	 * fragmentation index determines if allocation failures are due to
1290 	 * low memory or external fragmentation
1291 	 *
1292 	 * index of -1000 would imply allocations might succeed depending on
1293 	 * watermarks, but we already failed the high-order watermark check
1294 	 * index towards 0 implies failure is due to lack of memory
1295 	 * index towards 1000 implies failure is due to fragmentation
1296 	 *
1297 	 * Only compact if a failure would be due to fragmentation.
1298 	 */
1299 	fragindex = fragmentation_index(zone, order);
1300 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1301 		return COMPACT_NOT_SUITABLE_ZONE;
1302 
1303 	return COMPACT_CONTINUE;
1304 }
1305 
1306 unsigned long compaction_suitable(struct zone *zone, int order,
1307 					int alloc_flags, int classzone_idx)
1308 {
1309 	unsigned long ret;
1310 
1311 	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
1312 	trace_mm_compaction_suitable(zone, order, ret);
1313 	if (ret == COMPACT_NOT_SUITABLE_ZONE)
1314 		ret = COMPACT_SKIPPED;
1315 
1316 	return ret;
1317 }
1318 
1319 static int compact_zone(struct zone *zone, struct compact_control *cc)
1320 {
1321 	int ret;
1322 	unsigned long start_pfn = zone->zone_start_pfn;
1323 	unsigned long end_pfn = zone_end_pfn(zone);
1324 	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1325 	const bool sync = cc->mode != MIGRATE_ASYNC;
1326 
1327 	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1328 							cc->classzone_idx);
1329 	switch (ret) {
1330 	case COMPACT_PARTIAL:
1331 	case COMPACT_SKIPPED:
1332 		/* Compaction is likely to fail */
1333 		return ret;
1334 	case COMPACT_CONTINUE:
1335 		/* Fall through to compaction */
1336 		;
1337 	}
1338 
1339 	/*
1340 	 * Clear pageblock skip if there were failures recently and compaction
1341 	 * is about to be retried after being deferred.
1342 	 */
1343 	if (compaction_restarting(zone, cc->order))
1344 		__reset_isolation_suitable(zone);
1345 
1346 	/*
1347 	 * Setup to move all movable pages to the end of the zone. Used cached
1348 	 * information on where the scanners should start but check that it
1349 	 * is initialised by ensuring the values are within zone boundaries.
1350 	 */
1351 	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1352 	cc->free_pfn = zone->compact_cached_free_pfn;
1353 	if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1354 		cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages);
1355 		zone->compact_cached_free_pfn = cc->free_pfn;
1356 	}
1357 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1358 		cc->migrate_pfn = start_pfn;
1359 		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1360 		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1361 	}
1362 	cc->last_migrated_pfn = 0;
1363 
1364 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1365 				cc->free_pfn, end_pfn, sync);
1366 
1367 	migrate_prep_local();
1368 
1369 	while ((ret = compact_finished(zone, cc, migratetype)) ==
1370 						COMPACT_CONTINUE) {
1371 		int err;
1372 
1373 		switch (isolate_migratepages(zone, cc)) {
1374 		case ISOLATE_ABORT:
1375 			ret = COMPACT_CONTENDED;
1376 			putback_movable_pages(&cc->migratepages);
1377 			cc->nr_migratepages = 0;
1378 			goto out;
1379 		case ISOLATE_NONE:
1380 			/*
1381 			 * We haven't isolated and migrated anything, but
1382 			 * there might still be unflushed migrations from
1383 			 * previous cc->order aligned block.
1384 			 */
1385 			goto check_drain;
1386 		case ISOLATE_SUCCESS:
1387 			;
1388 		}
1389 
1390 		err = migrate_pages(&cc->migratepages, compaction_alloc,
1391 				compaction_free, (unsigned long)cc, cc->mode,
1392 				MR_COMPACTION);
1393 
1394 		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1395 							&cc->migratepages);
1396 
1397 		/* All pages were either migrated or will be released */
1398 		cc->nr_migratepages = 0;
1399 		if (err) {
1400 			putback_movable_pages(&cc->migratepages);
1401 			/*
1402 			 * migrate_pages() may return -ENOMEM when scanners meet
1403 			 * and we want compact_finished() to detect it
1404 			 */
1405 			if (err == -ENOMEM && !compact_scanners_met(cc)) {
1406 				ret = COMPACT_CONTENDED;
1407 				goto out;
1408 			}
1409 		}
1410 
1411 check_drain:
1412 		/*
1413 		 * Has the migration scanner moved away from the previous
1414 		 * cc->order aligned block where we migrated from? If yes,
1415 		 * flush the pages that were freed, so that they can merge and
1416 		 * compact_finished() can detect immediately if allocation
1417 		 * would succeed.
1418 		 */
1419 		if (cc->order > 0 && cc->last_migrated_pfn) {
1420 			int cpu;
1421 			unsigned long current_block_start =
1422 				cc->migrate_pfn & ~((1UL << cc->order) - 1);
1423 
1424 			if (cc->last_migrated_pfn < current_block_start) {
1425 				cpu = get_cpu();
1426 				lru_add_drain_cpu(cpu);
1427 				drain_local_pages(zone);
1428 				put_cpu();
1429 				/* No more flushing until we migrate again */
1430 				cc->last_migrated_pfn = 0;
1431 			}
1432 		}
1433 
1434 	}
1435 
1436 out:
1437 	/*
1438 	 * Release free pages and update where the free scanner should restart,
1439 	 * so we don't leave any returned pages behind in the next attempt.
1440 	 */
1441 	if (cc->nr_freepages > 0) {
1442 		unsigned long free_pfn = release_freepages(&cc->freepages);
1443 
1444 		cc->nr_freepages = 0;
1445 		VM_BUG_ON(free_pfn == 0);
1446 		/* The cached pfn is always the first in a pageblock */
1447 		free_pfn &= ~(pageblock_nr_pages-1);
1448 		/*
1449 		 * Only go back, not forward. The cached pfn might have been
1450 		 * already reset to zone end in compact_finished()
1451 		 */
1452 		if (free_pfn > zone->compact_cached_free_pfn)
1453 			zone->compact_cached_free_pfn = free_pfn;
1454 	}
1455 
1456 	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1457 				cc->free_pfn, end_pfn, sync, ret);
1458 
1459 	if (ret == COMPACT_CONTENDED)
1460 		ret = COMPACT_PARTIAL;
1461 
1462 	return ret;
1463 }
1464 
1465 static unsigned long compact_zone_order(struct zone *zone, int order,
1466 		gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1467 		int alloc_flags, int classzone_idx)
1468 {
1469 	unsigned long ret;
1470 	struct compact_control cc = {
1471 		.nr_freepages = 0,
1472 		.nr_migratepages = 0,
1473 		.order = order,
1474 		.gfp_mask = gfp_mask,
1475 		.zone = zone,
1476 		.mode = mode,
1477 		.alloc_flags = alloc_flags,
1478 		.classzone_idx = classzone_idx,
1479 		.direct_compaction = true,
1480 	};
1481 	INIT_LIST_HEAD(&cc.freepages);
1482 	INIT_LIST_HEAD(&cc.migratepages);
1483 
1484 	ret = compact_zone(zone, &cc);
1485 
1486 	VM_BUG_ON(!list_empty(&cc.freepages));
1487 	VM_BUG_ON(!list_empty(&cc.migratepages));
1488 
1489 	*contended = cc.contended;
1490 	return ret;
1491 }
1492 
1493 int sysctl_extfrag_threshold = 500;
1494 
1495 /**
1496  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1497  * @gfp_mask: The GFP mask of the current allocation
1498  * @order: The order of the current allocation
1499  * @alloc_flags: The allocation flags of the current allocation
1500  * @ac: The context of current allocation
1501  * @mode: The migration mode for async, sync light, or sync migration
1502  * @contended: Return value that determines if compaction was aborted due to
1503  *	       need_resched() or lock contention
1504  *
1505  * This is the main entry point for direct page compaction.
1506  */
1507 unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1508 			int alloc_flags, const struct alloc_context *ac,
1509 			enum migrate_mode mode, int *contended)
1510 {
1511 	int may_enter_fs = gfp_mask & __GFP_FS;
1512 	int may_perform_io = gfp_mask & __GFP_IO;
1513 	struct zoneref *z;
1514 	struct zone *zone;
1515 	int rc = COMPACT_DEFERRED;
1516 	int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1517 
1518 	*contended = COMPACT_CONTENDED_NONE;
1519 
1520 	/* Check if the GFP flags allow compaction */
1521 	if (!order || !may_enter_fs || !may_perform_io)
1522 		return COMPACT_SKIPPED;
1523 
1524 	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1525 
1526 	/* Compact each zone in the list */
1527 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1528 								ac->nodemask) {
1529 		int status;
1530 		int zone_contended;
1531 
1532 		if (compaction_deferred(zone, order))
1533 			continue;
1534 
1535 		status = compact_zone_order(zone, order, gfp_mask, mode,
1536 				&zone_contended, alloc_flags,
1537 				ac->classzone_idx);
1538 		rc = max(status, rc);
1539 		/*
1540 		 * It takes at least one zone that wasn't lock contended
1541 		 * to clear all_zones_contended.
1542 		 */
1543 		all_zones_contended &= zone_contended;
1544 
1545 		/* If a normal allocation would succeed, stop compacting */
1546 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1547 					ac->classzone_idx, alloc_flags)) {
1548 			/*
1549 			 * We think the allocation will succeed in this zone,
1550 			 * but it is not certain, hence the false. The caller
1551 			 * will repeat this with true if allocation indeed
1552 			 * succeeds in this zone.
1553 			 */
1554 			compaction_defer_reset(zone, order, false);
1555 			/*
1556 			 * It is possible that async compaction aborted due to
1557 			 * need_resched() and the watermarks were ok thanks to
1558 			 * somebody else freeing memory. The allocation can
1559 			 * however still fail so we better signal the
1560 			 * need_resched() contention anyway (this will not
1561 			 * prevent the allocation attempt).
1562 			 */
1563 			if (zone_contended == COMPACT_CONTENDED_SCHED)
1564 				*contended = COMPACT_CONTENDED_SCHED;
1565 
1566 			goto break_loop;
1567 		}
1568 
1569 		if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
1570 			/*
1571 			 * We think that allocation won't succeed in this zone
1572 			 * so we defer compaction there. If it ends up
1573 			 * succeeding after all, it will be reset.
1574 			 */
1575 			defer_compaction(zone, order);
1576 		}
1577 
1578 		/*
1579 		 * We might have stopped compacting due to need_resched() in
1580 		 * async compaction, or due to a fatal signal detected. In that
1581 		 * case do not try further zones and signal need_resched()
1582 		 * contention.
1583 		 */
1584 		if ((zone_contended == COMPACT_CONTENDED_SCHED)
1585 					|| fatal_signal_pending(current)) {
1586 			*contended = COMPACT_CONTENDED_SCHED;
1587 			goto break_loop;
1588 		}
1589 
1590 		continue;
1591 break_loop:
1592 		/*
1593 		 * We might not have tried all the zones, so  be conservative
1594 		 * and assume they are not all lock contended.
1595 		 */
1596 		all_zones_contended = 0;
1597 		break;
1598 	}
1599 
1600 	/*
1601 	 * If at least one zone wasn't deferred or skipped, we report if all
1602 	 * zones that were tried were lock contended.
1603 	 */
1604 	if (rc > COMPACT_SKIPPED && all_zones_contended)
1605 		*contended = COMPACT_CONTENDED_LOCK;
1606 
1607 	return rc;
1608 }
1609 
1610 
1611 /* Compact all zones within a node */
1612 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1613 {
1614 	int zoneid;
1615 	struct zone *zone;
1616 
1617 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1618 
1619 		zone = &pgdat->node_zones[zoneid];
1620 		if (!populated_zone(zone))
1621 			continue;
1622 
1623 		cc->nr_freepages = 0;
1624 		cc->nr_migratepages = 0;
1625 		cc->zone = zone;
1626 		INIT_LIST_HEAD(&cc->freepages);
1627 		INIT_LIST_HEAD(&cc->migratepages);
1628 
1629 		/*
1630 		 * When called via /proc/sys/vm/compact_memory
1631 		 * this makes sure we compact the whole zone regardless of
1632 		 * cached scanner positions.
1633 		 */
1634 		if (is_via_compact_memory(cc->order))
1635 			__reset_isolation_suitable(zone);
1636 
1637 		if (is_via_compact_memory(cc->order) ||
1638 				!compaction_deferred(zone, cc->order))
1639 			compact_zone(zone, cc);
1640 
1641 		VM_BUG_ON(!list_empty(&cc->freepages));
1642 		VM_BUG_ON(!list_empty(&cc->migratepages));
1643 
1644 		if (is_via_compact_memory(cc->order))
1645 			continue;
1646 
1647 		if (zone_watermark_ok(zone, cc->order,
1648 				low_wmark_pages(zone), 0, 0))
1649 			compaction_defer_reset(zone, cc->order, false);
1650 	}
1651 }
1652 
1653 void compact_pgdat(pg_data_t *pgdat, int order)
1654 {
1655 	struct compact_control cc = {
1656 		.order = order,
1657 		.mode = MIGRATE_ASYNC,
1658 	};
1659 
1660 	if (!order)
1661 		return;
1662 
1663 	__compact_pgdat(pgdat, &cc);
1664 }
1665 
1666 static void compact_node(int nid)
1667 {
1668 	struct compact_control cc = {
1669 		.order = -1,
1670 		.mode = MIGRATE_SYNC,
1671 		.ignore_skip_hint = true,
1672 	};
1673 
1674 	__compact_pgdat(NODE_DATA(nid), &cc);
1675 }
1676 
1677 /* Compact all nodes in the system */
1678 static void compact_nodes(void)
1679 {
1680 	int nid;
1681 
1682 	/* Flush pending updates to the LRU lists */
1683 	lru_add_drain_all();
1684 
1685 	for_each_online_node(nid)
1686 		compact_node(nid);
1687 }
1688 
1689 /* The written value is actually unused, all memory is compacted */
1690 int sysctl_compact_memory;
1691 
1692 /*
1693  * This is the entry point for compacting all nodes via
1694  * /proc/sys/vm/compact_memory
1695  */
1696 int sysctl_compaction_handler(struct ctl_table *table, int write,
1697 			void __user *buffer, size_t *length, loff_t *ppos)
1698 {
1699 	if (write)
1700 		compact_nodes();
1701 
1702 	return 0;
1703 }
1704 
1705 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1706 			void __user *buffer, size_t *length, loff_t *ppos)
1707 {
1708 	proc_dointvec_minmax(table, write, buffer, length, ppos);
1709 
1710 	return 0;
1711 }
1712 
1713 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1714 static ssize_t sysfs_compact_node(struct device *dev,
1715 			struct device_attribute *attr,
1716 			const char *buf, size_t count)
1717 {
1718 	int nid = dev->id;
1719 
1720 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1721 		/* Flush pending updates to the LRU lists */
1722 		lru_add_drain_all();
1723 
1724 		compact_node(nid);
1725 	}
1726 
1727 	return count;
1728 }
1729 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1730 
1731 int compaction_register_node(struct node *node)
1732 {
1733 	return device_create_file(&node->dev, &dev_attr_compact);
1734 }
1735 
1736 void compaction_unregister_node(struct node *node)
1737 {
1738 	return device_remove_file(&node->dev, &dev_attr_compact);
1739 }
1740 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1741 
1742 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1743 {
1744 	return pgdat->kcompactd_max_order > 0;
1745 }
1746 
1747 static bool kcompactd_node_suitable(pg_data_t *pgdat)
1748 {
1749 	int zoneid;
1750 	struct zone *zone;
1751 	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1752 
1753 	for (zoneid = 0; zoneid < classzone_idx; zoneid++) {
1754 		zone = &pgdat->node_zones[zoneid];
1755 
1756 		if (!populated_zone(zone))
1757 			continue;
1758 
1759 		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1760 					classzone_idx) == COMPACT_CONTINUE)
1761 			return true;
1762 	}
1763 
1764 	return false;
1765 }
1766 
1767 static void kcompactd_do_work(pg_data_t *pgdat)
1768 {
1769 	/*
1770 	 * With no special task, compact all zones so that a page of requested
1771 	 * order is allocatable.
1772 	 */
1773 	int zoneid;
1774 	struct zone *zone;
1775 	struct compact_control cc = {
1776 		.order = pgdat->kcompactd_max_order,
1777 		.classzone_idx = pgdat->kcompactd_classzone_idx,
1778 		.mode = MIGRATE_SYNC_LIGHT,
1779 		.ignore_skip_hint = true,
1780 
1781 	};
1782 	bool success = false;
1783 
1784 	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1785 							cc.classzone_idx);
1786 	count_vm_event(KCOMPACTD_WAKE);
1787 
1788 	for (zoneid = 0; zoneid < cc.classzone_idx; zoneid++) {
1789 		int status;
1790 
1791 		zone = &pgdat->node_zones[zoneid];
1792 		if (!populated_zone(zone))
1793 			continue;
1794 
1795 		if (compaction_deferred(zone, cc.order))
1796 			continue;
1797 
1798 		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1799 							COMPACT_CONTINUE)
1800 			continue;
1801 
1802 		cc.nr_freepages = 0;
1803 		cc.nr_migratepages = 0;
1804 		cc.zone = zone;
1805 		INIT_LIST_HEAD(&cc.freepages);
1806 		INIT_LIST_HEAD(&cc.migratepages);
1807 
1808 		status = compact_zone(zone, &cc);
1809 
1810 		if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1811 						cc.classzone_idx, 0)) {
1812 			success = true;
1813 			compaction_defer_reset(zone, cc.order, false);
1814 		} else if (status == COMPACT_COMPLETE) {
1815 			/*
1816 			 * We use sync migration mode here, so we defer like
1817 			 * sync direct compaction does.
1818 			 */
1819 			defer_compaction(zone, cc.order);
1820 		}
1821 
1822 		VM_BUG_ON(!list_empty(&cc.freepages));
1823 		VM_BUG_ON(!list_empty(&cc.migratepages));
1824 	}
1825 
1826 	/*
1827 	 * Regardless of success, we are done until woken up next. But remember
1828 	 * the requested order/classzone_idx in case it was higher/tighter than
1829 	 * our current ones
1830 	 */
1831 	if (pgdat->kcompactd_max_order <= cc.order)
1832 		pgdat->kcompactd_max_order = 0;
1833 	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1834 		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1835 }
1836 
1837 void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1838 {
1839 	if (!order)
1840 		return;
1841 
1842 	if (pgdat->kcompactd_max_order < order)
1843 		pgdat->kcompactd_max_order = order;
1844 
1845 	if (pgdat->kcompactd_classzone_idx > classzone_idx)
1846 		pgdat->kcompactd_classzone_idx = classzone_idx;
1847 
1848 	if (!waitqueue_active(&pgdat->kcompactd_wait))
1849 		return;
1850 
1851 	if (!kcompactd_node_suitable(pgdat))
1852 		return;
1853 
1854 	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1855 							classzone_idx);
1856 	wake_up_interruptible(&pgdat->kcompactd_wait);
1857 }
1858 
1859 /*
1860  * The background compaction daemon, started as a kernel thread
1861  * from the init process.
1862  */
1863 static int kcompactd(void *p)
1864 {
1865 	pg_data_t *pgdat = (pg_data_t*)p;
1866 	struct task_struct *tsk = current;
1867 
1868 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1869 
1870 	if (!cpumask_empty(cpumask))
1871 		set_cpus_allowed_ptr(tsk, cpumask);
1872 
1873 	set_freezable();
1874 
1875 	pgdat->kcompactd_max_order = 0;
1876 	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1877 
1878 	while (!kthread_should_stop()) {
1879 		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
1880 		wait_event_freezable(pgdat->kcompactd_wait,
1881 				kcompactd_work_requested(pgdat));
1882 
1883 		kcompactd_do_work(pgdat);
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 /*
1890  * This kcompactd start function will be called by init and node-hot-add.
1891  * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
1892  */
1893 int kcompactd_run(int nid)
1894 {
1895 	pg_data_t *pgdat = NODE_DATA(nid);
1896 	int ret = 0;
1897 
1898 	if (pgdat->kcompactd)
1899 		return 0;
1900 
1901 	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
1902 	if (IS_ERR(pgdat->kcompactd)) {
1903 		pr_err("Failed to start kcompactd on node %d\n", nid);
1904 		ret = PTR_ERR(pgdat->kcompactd);
1905 		pgdat->kcompactd = NULL;
1906 	}
1907 	return ret;
1908 }
1909 
1910 /*
1911  * Called by memory hotplug when all memory in a node is offlined. Caller must
1912  * hold mem_hotplug_begin/end().
1913  */
1914 void kcompactd_stop(int nid)
1915 {
1916 	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
1917 
1918 	if (kcompactd) {
1919 		kthread_stop(kcompactd);
1920 		NODE_DATA(nid)->kcompactd = NULL;
1921 	}
1922 }
1923 
1924 /*
1925  * It's optimal to keep kcompactd on the same CPUs as their memory, but
1926  * not required for correctness. So if the last cpu in a node goes
1927  * away, we get changed to run anywhere: as the first one comes back,
1928  * restore their cpu bindings.
1929  */
1930 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
1931 			void *hcpu)
1932 {
1933 	int nid;
1934 
1935 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1936 		for_each_node_state(nid, N_MEMORY) {
1937 			pg_data_t *pgdat = NODE_DATA(nid);
1938 			const struct cpumask *mask;
1939 
1940 			mask = cpumask_of_node(pgdat->node_id);
1941 
1942 			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
1943 				/* One of our CPUs online: restore mask */
1944 				set_cpus_allowed_ptr(pgdat->kcompactd, mask);
1945 		}
1946 	}
1947 	return NOTIFY_OK;
1948 }
1949 
1950 static int __init kcompactd_init(void)
1951 {
1952 	int nid;
1953 
1954 	for_each_node_state(nid, N_MEMORY)
1955 		kcompactd_run(nid);
1956 	hotcpu_notifier(cpu_callback, 0);
1957 	return 0;
1958 }
1959 subsys_initcall(kcompactd_init)
1960 
1961 #endif /* CONFIG_COMPACTION */
1962