xref: /openbmc/linux/mm/compaction.c (revision 2fe86e0004076128f05d5a774b5c9c03d9dc3de2)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17748446bbSMel Gorman #include "internal.h"
18748446bbSMel Gorman 
19b7aba698SMel Gorman #define CREATE_TRACE_POINTS
20b7aba698SMel Gorman #include <trace/events/compaction.h>
21b7aba698SMel Gorman 
22748446bbSMel Gorman /*
23748446bbSMel Gorman  * compact_control is used to track pages being migrated and the free pages
24748446bbSMel Gorman  * they are being migrated to during memory compaction. The free_pfn starts
25748446bbSMel Gorman  * at the end of a zone and migrate_pfn begins at the start. Movable pages
26748446bbSMel Gorman  * are moved to the end of a zone during a compaction run and the run
27748446bbSMel Gorman  * completes when free_pfn <= migrate_pfn
28748446bbSMel Gorman  */
29748446bbSMel Gorman struct compact_control {
30748446bbSMel Gorman 	struct list_head freepages;	/* List of free pages to migrate to */
31748446bbSMel Gorman 	struct list_head migratepages;	/* List of pages being migrated */
32748446bbSMel Gorman 	unsigned long nr_freepages;	/* Number of isolated free pages */
33748446bbSMel Gorman 	unsigned long nr_migratepages;	/* Number of pages to migrate */
34748446bbSMel Gorman 	unsigned long free_pfn;		/* isolate_freepages search base */
35748446bbSMel Gorman 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
3677f1fe6bSMel Gorman 	bool sync;			/* Synchronous migration */
37748446bbSMel Gorman 
38aad6ec37SDan Carpenter 	int order;			/* order a direct compactor needs */
3956de7263SMel Gorman 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
40748446bbSMel Gorman 	struct zone *zone;
41748446bbSMel Gorman };
42748446bbSMel Gorman 
43748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
44748446bbSMel Gorman {
45748446bbSMel Gorman 	struct page *page, *next;
46748446bbSMel Gorman 	unsigned long count = 0;
47748446bbSMel Gorman 
48748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
49748446bbSMel Gorman 		list_del(&page->lru);
50748446bbSMel Gorman 		__free_page(page);
51748446bbSMel Gorman 		count++;
52748446bbSMel Gorman 	}
53748446bbSMel Gorman 
54748446bbSMel Gorman 	return count;
55748446bbSMel Gorman }
56748446bbSMel Gorman 
57748446bbSMel Gorman /* Isolate free pages onto a private freelist. Must hold zone->lock */
58748446bbSMel Gorman static unsigned long isolate_freepages_block(struct zone *zone,
59748446bbSMel Gorman 				unsigned long blockpfn,
60748446bbSMel Gorman 				struct list_head *freelist)
61748446bbSMel Gorman {
62748446bbSMel Gorman 	unsigned long zone_end_pfn, end_pfn;
63b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
64748446bbSMel Gorman 	struct page *cursor;
65748446bbSMel Gorman 
66748446bbSMel Gorman 	/* Get the last PFN we should scan for free pages at */
67748446bbSMel Gorman 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
68748446bbSMel Gorman 	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
69748446bbSMel Gorman 
70748446bbSMel Gorman 	/* Find the first usable PFN in the block to initialse page cursor */
71748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++) {
72748446bbSMel Gorman 		if (pfn_valid_within(blockpfn))
73748446bbSMel Gorman 			break;
74748446bbSMel Gorman 	}
75748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
76748446bbSMel Gorman 
77748446bbSMel Gorman 	/* Isolate free pages. This assumes the block is valid */
78748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
79748446bbSMel Gorman 		int isolated, i;
80748446bbSMel Gorman 		struct page *page = cursor;
81748446bbSMel Gorman 
82748446bbSMel Gorman 		if (!pfn_valid_within(blockpfn))
83748446bbSMel Gorman 			continue;
84b7aba698SMel Gorman 		nr_scanned++;
85748446bbSMel Gorman 
86748446bbSMel Gorman 		if (!PageBuddy(page))
87748446bbSMel Gorman 			continue;
88748446bbSMel Gorman 
89748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
90748446bbSMel Gorman 		isolated = split_free_page(page);
91748446bbSMel Gorman 		total_isolated += isolated;
92748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
93748446bbSMel Gorman 			list_add(&page->lru, freelist);
94748446bbSMel Gorman 			page++;
95748446bbSMel Gorman 		}
96748446bbSMel Gorman 
97748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
98748446bbSMel Gorman 		if (isolated) {
99748446bbSMel Gorman 			blockpfn += isolated - 1;
100748446bbSMel Gorman 			cursor += isolated - 1;
101748446bbSMel Gorman 		}
102748446bbSMel Gorman 	}
103748446bbSMel Gorman 
104b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
105748446bbSMel Gorman 	return total_isolated;
106748446bbSMel Gorman }
107748446bbSMel Gorman 
108748446bbSMel Gorman /* Returns true if the page is within a block suitable for migration to */
109748446bbSMel Gorman static bool suitable_migration_target(struct page *page)
110748446bbSMel Gorman {
111748446bbSMel Gorman 
112748446bbSMel Gorman 	int migratetype = get_pageblock_migratetype(page);
113748446bbSMel Gorman 
114748446bbSMel Gorman 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
115748446bbSMel Gorman 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
116748446bbSMel Gorman 		return false;
117748446bbSMel Gorman 
118748446bbSMel Gorman 	/* If the page is a large free page, then allow migration */
119748446bbSMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
120748446bbSMel Gorman 		return true;
121748446bbSMel Gorman 
122748446bbSMel Gorman 	/* If the block is MIGRATE_MOVABLE, allow migration */
123748446bbSMel Gorman 	if (migratetype == MIGRATE_MOVABLE)
124748446bbSMel Gorman 		return true;
125748446bbSMel Gorman 
126748446bbSMel Gorman 	/* Otherwise skip the block */
127748446bbSMel Gorman 	return false;
128748446bbSMel Gorman }
129748446bbSMel Gorman 
130748446bbSMel Gorman /*
131748446bbSMel Gorman  * Based on information in the current compact_control, find blocks
132748446bbSMel Gorman  * suitable for isolating free pages from and then isolate them.
133748446bbSMel Gorman  */
134748446bbSMel Gorman static void isolate_freepages(struct zone *zone,
135748446bbSMel Gorman 				struct compact_control *cc)
136748446bbSMel Gorman {
137748446bbSMel Gorman 	struct page *page;
138748446bbSMel Gorman 	unsigned long high_pfn, low_pfn, pfn;
139748446bbSMel Gorman 	unsigned long flags;
140748446bbSMel Gorman 	int nr_freepages = cc->nr_freepages;
141748446bbSMel Gorman 	struct list_head *freelist = &cc->freepages;
142748446bbSMel Gorman 
1437454f4baSMel Gorman 	/*
1447454f4baSMel Gorman 	 * Initialise the free scanner. The starting point is where we last
1457454f4baSMel Gorman 	 * scanned from (or the end of the zone if starting). The low point
1467454f4baSMel Gorman 	 * is the end of the pageblock the migration scanner is using.
1477454f4baSMel Gorman 	 */
148748446bbSMel Gorman 	pfn = cc->free_pfn;
149748446bbSMel Gorman 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
1507454f4baSMel Gorman 
1517454f4baSMel Gorman 	/*
1527454f4baSMel Gorman 	 * Take care that if the migration scanner is at the end of the zone
1537454f4baSMel Gorman 	 * that the free scanner does not accidentally move to the next zone
1547454f4baSMel Gorman 	 * in the next isolation cycle.
1557454f4baSMel Gorman 	 */
1567454f4baSMel Gorman 	high_pfn = min(low_pfn, pfn);
157748446bbSMel Gorman 
158748446bbSMel Gorman 	/*
159748446bbSMel Gorman 	 * Isolate free pages until enough are available to migrate the
160748446bbSMel Gorman 	 * pages on cc->migratepages. We stop searching if the migrate
161748446bbSMel Gorman 	 * and free page scanners meet or enough free pages are isolated.
162748446bbSMel Gorman 	 */
163748446bbSMel Gorman 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
164748446bbSMel Gorman 					pfn -= pageblock_nr_pages) {
165748446bbSMel Gorman 		unsigned long isolated;
166748446bbSMel Gorman 
167748446bbSMel Gorman 		if (!pfn_valid(pfn))
168748446bbSMel Gorman 			continue;
169748446bbSMel Gorman 
170748446bbSMel Gorman 		/*
171748446bbSMel Gorman 		 * Check for overlapping nodes/zones. It's possible on some
172748446bbSMel Gorman 		 * configurations to have a setup like
173748446bbSMel Gorman 		 * node0 node1 node0
174748446bbSMel Gorman 		 * i.e. it's possible that all pages within a zones range of
175748446bbSMel Gorman 		 * pages do not belong to a single zone.
176748446bbSMel Gorman 		 */
177748446bbSMel Gorman 		page = pfn_to_page(pfn);
178748446bbSMel Gorman 		if (page_zone(page) != zone)
179748446bbSMel Gorman 			continue;
180748446bbSMel Gorman 
181748446bbSMel Gorman 		/* Check the block is suitable for migration */
182748446bbSMel Gorman 		if (!suitable_migration_target(page))
183748446bbSMel Gorman 			continue;
184748446bbSMel Gorman 
185602605a4SMel Gorman 		/*
186602605a4SMel Gorman 		 * Found a block suitable for isolating free pages from. Now
187602605a4SMel Gorman 		 * we disabled interrupts, double check things are ok and
188602605a4SMel Gorman 		 * isolate the pages. This is to minimise the time IRQs
189602605a4SMel Gorman 		 * are disabled
190602605a4SMel Gorman 		 */
191602605a4SMel Gorman 		isolated = 0;
192602605a4SMel Gorman 		spin_lock_irqsave(&zone->lock, flags);
193602605a4SMel Gorman 		if (suitable_migration_target(page)) {
194748446bbSMel Gorman 			isolated = isolate_freepages_block(zone, pfn, freelist);
195748446bbSMel Gorman 			nr_freepages += isolated;
196602605a4SMel Gorman 		}
197602605a4SMel Gorman 		spin_unlock_irqrestore(&zone->lock, flags);
198748446bbSMel Gorman 
199748446bbSMel Gorman 		/*
200748446bbSMel Gorman 		 * Record the highest PFN we isolated pages from. When next
201748446bbSMel Gorman 		 * looking for free pages, the search will restart here as
202748446bbSMel Gorman 		 * page migration may have returned some pages to the allocator
203748446bbSMel Gorman 		 */
204748446bbSMel Gorman 		if (isolated)
205748446bbSMel Gorman 			high_pfn = max(high_pfn, pfn);
206748446bbSMel Gorman 	}
207748446bbSMel Gorman 
208748446bbSMel Gorman 	/* split_free_page does not map the pages */
209748446bbSMel Gorman 	list_for_each_entry(page, freelist, lru) {
210748446bbSMel Gorman 		arch_alloc_page(page, 0);
211748446bbSMel Gorman 		kernel_map_pages(page, 1, 1);
212748446bbSMel Gorman 	}
213748446bbSMel Gorman 
214748446bbSMel Gorman 	cc->free_pfn = high_pfn;
215748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
216748446bbSMel Gorman }
217748446bbSMel Gorman 
218748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
219748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc)
220748446bbSMel Gorman {
221748446bbSMel Gorman 	struct page *page;
222b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
223748446bbSMel Gorman 
224b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
225b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
226748446bbSMel Gorman 
227b9e84ac1SMinchan Kim 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
228b9e84ac1SMinchan Kim 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
229748446bbSMel Gorman }
230748446bbSMel Gorman 
231748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
232748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
233748446bbSMel Gorman {
234bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
235748446bbSMel Gorman 
236748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
237748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
238bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
239bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
240748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
241748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
242748446bbSMel Gorman 
243bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
244748446bbSMel Gorman }
245748446bbSMel Gorman 
246f9e35b3bSMel Gorman /* possible outcome of isolate_migratepages */
247f9e35b3bSMel Gorman typedef enum {
248f9e35b3bSMel Gorman 	ISOLATE_ABORT,		/* Abort compaction now */
249f9e35b3bSMel Gorman 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
250f9e35b3bSMel Gorman 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
251f9e35b3bSMel Gorman } isolate_migrate_t;
252f9e35b3bSMel Gorman 
253*2fe86e00SMichal Nazarewicz /**
254*2fe86e00SMichal Nazarewicz  * isolate_migratepages_range() - isolate all migrate-able pages in range.
255*2fe86e00SMichal Nazarewicz  * @zone:	Zone pages are in.
256*2fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
257*2fe86e00SMichal Nazarewicz  * @low_pfn:	The first PFN of the range.
258*2fe86e00SMichal Nazarewicz  * @end_pfn:	The one-past-the-last PFN of the range.
259*2fe86e00SMichal Nazarewicz  *
260*2fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
261*2fe86e00SMichal Nazarewicz  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
262*2fe86e00SMichal Nazarewicz  * pending), otherwise PFN of the first page that was not scanned
263*2fe86e00SMichal Nazarewicz  * (which may be both less, equal to or more then end_pfn).
264*2fe86e00SMichal Nazarewicz  *
265*2fe86e00SMichal Nazarewicz  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
266*2fe86e00SMichal Nazarewicz  * zero.
267*2fe86e00SMichal Nazarewicz  *
268*2fe86e00SMichal Nazarewicz  * Apart from cc->migratepages and cc->nr_migratetypes this function
269*2fe86e00SMichal Nazarewicz  * does not modify any cc's fields, in particular it does not modify
270*2fe86e00SMichal Nazarewicz  * (or read for that matter) cc->migrate_pfn.
271748446bbSMel Gorman  */
272*2fe86e00SMichal Nazarewicz static unsigned long
273*2fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
274*2fe86e00SMichal Nazarewicz 			   unsigned long low_pfn, unsigned long end_pfn)
275748446bbSMel Gorman {
2769927af74SMel Gorman 	unsigned long last_pageblock_nr = 0, pageblock_nr;
277b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
278748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
27939deaf85SMinchan Kim 	isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
280748446bbSMel Gorman 
281748446bbSMel Gorman 	/*
282748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
283748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
284748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
285748446bbSMel Gorman 	 */
286748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
287f9e35b3bSMel Gorman 		/* async migration should just abort */
288f9e35b3bSMel Gorman 		if (!cc->sync)
289*2fe86e00SMichal Nazarewicz 			return 0;
290f9e35b3bSMel Gorman 
291748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
292748446bbSMel Gorman 
293748446bbSMel Gorman 		if (fatal_signal_pending(current))
294*2fe86e00SMichal Nazarewicz 			return 0;
295748446bbSMel Gorman 	}
296748446bbSMel Gorman 
297748446bbSMel Gorman 	/* Time to isolate some pages for migration */
298b2eef8c0SAndrea Arcangeli 	cond_resched();
299748446bbSMel Gorman 	spin_lock_irq(&zone->lru_lock);
300748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
301748446bbSMel Gorman 		struct page *page;
302b2eef8c0SAndrea Arcangeli 		bool locked = true;
303b2eef8c0SAndrea Arcangeli 
304b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
305b2eef8c0SAndrea Arcangeli 		if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
306b2eef8c0SAndrea Arcangeli 			spin_unlock_irq(&zone->lru_lock);
307b2eef8c0SAndrea Arcangeli 			locked = false;
308b2eef8c0SAndrea Arcangeli 		}
309b2eef8c0SAndrea Arcangeli 		if (need_resched() || spin_is_contended(&zone->lru_lock)) {
310b2eef8c0SAndrea Arcangeli 			if (locked)
311b2eef8c0SAndrea Arcangeli 				spin_unlock_irq(&zone->lru_lock);
312b2eef8c0SAndrea Arcangeli 			cond_resched();
313b2eef8c0SAndrea Arcangeli 			spin_lock_irq(&zone->lru_lock);
314b2eef8c0SAndrea Arcangeli 			if (fatal_signal_pending(current))
315b2eef8c0SAndrea Arcangeli 				break;
316b2eef8c0SAndrea Arcangeli 		} else if (!locked)
317b2eef8c0SAndrea Arcangeli 			spin_lock_irq(&zone->lru_lock);
318b2eef8c0SAndrea Arcangeli 
3190bf380bcSMel Gorman 		/*
3200bf380bcSMel Gorman 		 * migrate_pfn does not necessarily start aligned to a
3210bf380bcSMel Gorman 		 * pageblock. Ensure that pfn_valid is called when moving
3220bf380bcSMel Gorman 		 * into a new MAX_ORDER_NR_PAGES range in case of large
3230bf380bcSMel Gorman 		 * memory holes within the zone
3240bf380bcSMel Gorman 		 */
3250bf380bcSMel Gorman 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
3260bf380bcSMel Gorman 			if (!pfn_valid(low_pfn)) {
3270bf380bcSMel Gorman 				low_pfn += MAX_ORDER_NR_PAGES - 1;
3280bf380bcSMel Gorman 				continue;
3290bf380bcSMel Gorman 			}
3300bf380bcSMel Gorman 		}
3310bf380bcSMel Gorman 
332748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
333748446bbSMel Gorman 			continue;
334b7aba698SMel Gorman 		nr_scanned++;
335748446bbSMel Gorman 
336dc908600SMel Gorman 		/*
337dc908600SMel Gorman 		 * Get the page and ensure the page is within the same zone.
338dc908600SMel Gorman 		 * See the comment in isolate_freepages about overlapping
339dc908600SMel Gorman 		 * nodes. It is deliberate that the new zone lock is not taken
340dc908600SMel Gorman 		 * as memory compaction should not move pages between nodes.
341dc908600SMel Gorman 		 */
342748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
343dc908600SMel Gorman 		if (page_zone(page) != zone)
344dc908600SMel Gorman 			continue;
345dc908600SMel Gorman 
346dc908600SMel Gorman 		/* Skip if free */
347748446bbSMel Gorman 		if (PageBuddy(page))
348748446bbSMel Gorman 			continue;
349748446bbSMel Gorman 
3509927af74SMel Gorman 		/*
3519927af74SMel Gorman 		 * For async migration, also only scan in MOVABLE blocks. Async
3529927af74SMel Gorman 		 * migration is optimistic to see if the minimum amount of work
3539927af74SMel Gorman 		 * satisfies the allocation
3549927af74SMel Gorman 		 */
3559927af74SMel Gorman 		pageblock_nr = low_pfn >> pageblock_order;
3569927af74SMel Gorman 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
3579927af74SMel Gorman 				get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
3589927af74SMel Gorman 			low_pfn += pageblock_nr_pages;
3599927af74SMel Gorman 			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
3609927af74SMel Gorman 			last_pageblock_nr = pageblock_nr;
3619927af74SMel Gorman 			continue;
3629927af74SMel Gorman 		}
3639927af74SMel Gorman 
364bc835011SAndrea Arcangeli 		if (!PageLRU(page))
365bc835011SAndrea Arcangeli 			continue;
366bc835011SAndrea Arcangeli 
367bc835011SAndrea Arcangeli 		/*
368bc835011SAndrea Arcangeli 		 * PageLRU is set, and lru_lock excludes isolation,
369bc835011SAndrea Arcangeli 		 * splitting and collapsing (collapsing has already
370bc835011SAndrea Arcangeli 		 * happened if PageLRU is set).
371bc835011SAndrea Arcangeli 		 */
372bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
373bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
374bc835011SAndrea Arcangeli 			continue;
375bc835011SAndrea Arcangeli 		}
376bc835011SAndrea Arcangeli 
377c8244935SMel Gorman 		if (!cc->sync)
378c8244935SMel Gorman 			mode |= ISOLATE_ASYNC_MIGRATE;
379c8244935SMel Gorman 
380748446bbSMel Gorman 		/* Try isolate the page */
38139deaf85SMinchan Kim 		if (__isolate_lru_page(page, mode, 0) != 0)
382748446bbSMel Gorman 			continue;
383748446bbSMel Gorman 
384bc835011SAndrea Arcangeli 		VM_BUG_ON(PageTransCompound(page));
385bc835011SAndrea Arcangeli 
386748446bbSMel Gorman 		/* Successfully isolated */
387748446bbSMel Gorman 		del_page_from_lru_list(zone, page, page_lru(page));
388748446bbSMel Gorman 		list_add(&page->lru, migratelist);
389748446bbSMel Gorman 		cc->nr_migratepages++;
390b7aba698SMel Gorman 		nr_isolated++;
391748446bbSMel Gorman 
392748446bbSMel Gorman 		/* Avoid isolating too much */
39331b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
39431b8384aSHillf Danton 			++low_pfn;
395748446bbSMel Gorman 			break;
396748446bbSMel Gorman 		}
39731b8384aSHillf Danton 	}
398748446bbSMel Gorman 
399748446bbSMel Gorman 	acct_isolated(zone, cc);
400748446bbSMel Gorman 
401748446bbSMel Gorman 	spin_unlock_irq(&zone->lru_lock);
402748446bbSMel Gorman 
403b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
404b7aba698SMel Gorman 
405*2fe86e00SMichal Nazarewicz 	return low_pfn;
406*2fe86e00SMichal Nazarewicz }
407*2fe86e00SMichal Nazarewicz 
408*2fe86e00SMichal Nazarewicz /*
409*2fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the block pointed to by
410*2fe86e00SMichal Nazarewicz  * the migrate scanner within compact_control.
411*2fe86e00SMichal Nazarewicz  */
412*2fe86e00SMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
413*2fe86e00SMichal Nazarewicz 					struct compact_control *cc)
414*2fe86e00SMichal Nazarewicz {
415*2fe86e00SMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
416*2fe86e00SMichal Nazarewicz 
417*2fe86e00SMichal Nazarewicz 	/* Do not scan outside zone boundaries */
418*2fe86e00SMichal Nazarewicz 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
419*2fe86e00SMichal Nazarewicz 
420*2fe86e00SMichal Nazarewicz 	/* Only scan within a pageblock boundary */
421*2fe86e00SMichal Nazarewicz 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
422*2fe86e00SMichal Nazarewicz 
423*2fe86e00SMichal Nazarewicz 	/* Do not cross the free scanner or scan within a memory hole */
424*2fe86e00SMichal Nazarewicz 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
425*2fe86e00SMichal Nazarewicz 		cc->migrate_pfn = end_pfn;
426*2fe86e00SMichal Nazarewicz 		return ISOLATE_NONE;
427*2fe86e00SMichal Nazarewicz 	}
428*2fe86e00SMichal Nazarewicz 
429*2fe86e00SMichal Nazarewicz 	/* Perform the isolation */
430*2fe86e00SMichal Nazarewicz 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
431*2fe86e00SMichal Nazarewicz 	if (!low_pfn)
432*2fe86e00SMichal Nazarewicz 		return ISOLATE_ABORT;
433*2fe86e00SMichal Nazarewicz 
434*2fe86e00SMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
435*2fe86e00SMichal Nazarewicz 
436f9e35b3bSMel Gorman 	return ISOLATE_SUCCESS;
437748446bbSMel Gorman }
438748446bbSMel Gorman 
439748446bbSMel Gorman /*
440748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
441748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
442748446bbSMel Gorman  */
443748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
444748446bbSMel Gorman 					unsigned long data,
445748446bbSMel Gorman 					int **result)
446748446bbSMel Gorman {
447748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
448748446bbSMel Gorman 	struct page *freepage;
449748446bbSMel Gorman 
450748446bbSMel Gorman 	/* Isolate free pages if necessary */
451748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
452748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
453748446bbSMel Gorman 
454748446bbSMel Gorman 		if (list_empty(&cc->freepages))
455748446bbSMel Gorman 			return NULL;
456748446bbSMel Gorman 	}
457748446bbSMel Gorman 
458748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
459748446bbSMel Gorman 	list_del(&freepage->lru);
460748446bbSMel Gorman 	cc->nr_freepages--;
461748446bbSMel Gorman 
462748446bbSMel Gorman 	return freepage;
463748446bbSMel Gorman }
464748446bbSMel Gorman 
465748446bbSMel Gorman /*
466748446bbSMel Gorman  * We cannot control nr_migratepages and nr_freepages fully when migration is
467748446bbSMel Gorman  * running as migrate_pages() has no knowledge of compact_control. When
468748446bbSMel Gorman  * migration is complete, we count the number of pages on the lists by hand.
469748446bbSMel Gorman  */
470748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
471748446bbSMel Gorman {
472748446bbSMel Gorman 	int nr_migratepages = 0;
473748446bbSMel Gorman 	int nr_freepages = 0;
474748446bbSMel Gorman 	struct page *page;
475748446bbSMel Gorman 
476748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
477748446bbSMel Gorman 		nr_migratepages++;
478748446bbSMel Gorman 	list_for_each_entry(page, &cc->freepages, lru)
479748446bbSMel Gorman 		nr_freepages++;
480748446bbSMel Gorman 
481748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
482748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
483748446bbSMel Gorman }
484748446bbSMel Gorman 
485748446bbSMel Gorman static int compact_finished(struct zone *zone,
486748446bbSMel Gorman 			    struct compact_control *cc)
487748446bbSMel Gorman {
48856de7263SMel Gorman 	unsigned int order;
4895a03b051SAndrea Arcangeli 	unsigned long watermark;
49056de7263SMel Gorman 
491748446bbSMel Gorman 	if (fatal_signal_pending(current))
492748446bbSMel Gorman 		return COMPACT_PARTIAL;
493748446bbSMel Gorman 
494748446bbSMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
495748446bbSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn)
496748446bbSMel Gorman 		return COMPACT_COMPLETE;
497748446bbSMel Gorman 
49882478fb7SJohannes Weiner 	/*
49982478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
50082478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
50182478fb7SJohannes Weiner 	 */
50256de7263SMel Gorman 	if (cc->order == -1)
50356de7263SMel Gorman 		return COMPACT_CONTINUE;
50456de7263SMel Gorman 
5053957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
5063957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
5073957c776SMichal Hocko 	watermark += (1 << cc->order);
5083957c776SMichal Hocko 
5093957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
5103957c776SMichal Hocko 		return COMPACT_CONTINUE;
5113957c776SMichal Hocko 
51256de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
51356de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
51456de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
51556de7263SMel Gorman 		if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
51656de7263SMel Gorman 			return COMPACT_PARTIAL;
51756de7263SMel Gorman 
51856de7263SMel Gorman 		/* Job done if allocation would set block type */
51956de7263SMel Gorman 		if (order >= pageblock_order && zone->free_area[order].nr_free)
52056de7263SMel Gorman 			return COMPACT_PARTIAL;
52156de7263SMel Gorman 	}
52256de7263SMel Gorman 
523748446bbSMel Gorman 	return COMPACT_CONTINUE;
524748446bbSMel Gorman }
525748446bbSMel Gorman 
5263e7d3449SMel Gorman /*
5273e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
5283e7d3449SMel Gorman  * Returns
5293e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
5303e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
5313e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
5323e7d3449SMel Gorman  */
5333e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
5343e7d3449SMel Gorman {
5353e7d3449SMel Gorman 	int fragindex;
5363e7d3449SMel Gorman 	unsigned long watermark;
5373e7d3449SMel Gorman 
5383e7d3449SMel Gorman 	/*
5393957c776SMichal Hocko 	 * order == -1 is expected when compacting via
5403957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
5413957c776SMichal Hocko 	 */
5423957c776SMichal Hocko 	if (order == -1)
5433957c776SMichal Hocko 		return COMPACT_CONTINUE;
5443957c776SMichal Hocko 
5453957c776SMichal Hocko 	/*
5463e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
5473e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
5483e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
5493e7d3449SMel Gorman 	 */
5503e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
5513e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
5523e7d3449SMel Gorman 		return COMPACT_SKIPPED;
5533e7d3449SMel Gorman 
5543e7d3449SMel Gorman 	/*
5553e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
5563e7d3449SMel Gorman 	 * low memory or external fragmentation
5573e7d3449SMel Gorman 	 *
558a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
559a582a738SShaohua Li 	 * watermarks
5603e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
5613e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
5623e7d3449SMel Gorman 	 *
5633e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
5643e7d3449SMel Gorman 	 */
5653e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
5663e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
5673e7d3449SMel Gorman 		return COMPACT_SKIPPED;
5683e7d3449SMel Gorman 
569a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
570a582a738SShaohua Li 	    0, 0))
5713e7d3449SMel Gorman 		return COMPACT_PARTIAL;
5723e7d3449SMel Gorman 
5733e7d3449SMel Gorman 	return COMPACT_CONTINUE;
5743e7d3449SMel Gorman }
5753e7d3449SMel Gorman 
576748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
577748446bbSMel Gorman {
578748446bbSMel Gorman 	int ret;
579748446bbSMel Gorman 
5803e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
5813e7d3449SMel Gorman 	switch (ret) {
5823e7d3449SMel Gorman 	case COMPACT_PARTIAL:
5833e7d3449SMel Gorman 	case COMPACT_SKIPPED:
5843e7d3449SMel Gorman 		/* Compaction is likely to fail */
5853e7d3449SMel Gorman 		return ret;
5863e7d3449SMel Gorman 	case COMPACT_CONTINUE:
5873e7d3449SMel Gorman 		/* Fall through to compaction */
5883e7d3449SMel Gorman 		;
5893e7d3449SMel Gorman 	}
5903e7d3449SMel Gorman 
591748446bbSMel Gorman 	/* Setup to move all movable pages to the end of the zone */
592748446bbSMel Gorman 	cc->migrate_pfn = zone->zone_start_pfn;
593748446bbSMel Gorman 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
594748446bbSMel Gorman 	cc->free_pfn &= ~(pageblock_nr_pages-1);
595748446bbSMel Gorman 
596748446bbSMel Gorman 	migrate_prep_local();
597748446bbSMel Gorman 
598748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
599748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
6009d502c1cSMinchan Kim 		int err;
601748446bbSMel Gorman 
602f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
603f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
604f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
605f9e35b3bSMel Gorman 			goto out;
606f9e35b3bSMel Gorman 		case ISOLATE_NONE:
607748446bbSMel Gorman 			continue;
608f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
609f9e35b3bSMel Gorman 			;
610f9e35b3bSMel Gorman 		}
611748446bbSMel Gorman 
612748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
6139d502c1cSMinchan Kim 		err = migrate_pages(&cc->migratepages, compaction_alloc,
6147f0f2496SMel Gorman 				(unsigned long)cc, false,
615a6bc32b8SMel Gorman 				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
616748446bbSMel Gorman 		update_nr_listpages(cc);
617748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
618748446bbSMel Gorman 
619748446bbSMel Gorman 		count_vm_event(COMPACTBLOCKS);
620748446bbSMel Gorman 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
621748446bbSMel Gorman 		if (nr_remaining)
622748446bbSMel Gorman 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
623b7aba698SMel Gorman 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
624b7aba698SMel Gorman 						nr_remaining);
625748446bbSMel Gorman 
626748446bbSMel Gorman 		/* Release LRU pages not migrated */
6279d502c1cSMinchan Kim 		if (err) {
628748446bbSMel Gorman 			putback_lru_pages(&cc->migratepages);
629748446bbSMel Gorman 			cc->nr_migratepages = 0;
630748446bbSMel Gorman 		}
631748446bbSMel Gorman 
632748446bbSMel Gorman 	}
633748446bbSMel Gorman 
634f9e35b3bSMel Gorman out:
635748446bbSMel Gorman 	/* Release free pages and check accounting */
636748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
637748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
638748446bbSMel Gorman 
639748446bbSMel Gorman 	return ret;
640748446bbSMel Gorman }
64176ab0f53SMel Gorman 
642d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone,
64377f1fe6bSMel Gorman 				 int order, gfp_t gfp_mask,
644d527caf2SAndrea Arcangeli 				 bool sync)
64556de7263SMel Gorman {
64656de7263SMel Gorman 	struct compact_control cc = {
64756de7263SMel Gorman 		.nr_freepages = 0,
64856de7263SMel Gorman 		.nr_migratepages = 0,
64956de7263SMel Gorman 		.order = order,
65056de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
65156de7263SMel Gorman 		.zone = zone,
65277f1fe6bSMel Gorman 		.sync = sync,
65356de7263SMel Gorman 	};
65456de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
65556de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
65656de7263SMel Gorman 
65756de7263SMel Gorman 	return compact_zone(zone, &cc);
65856de7263SMel Gorman }
65956de7263SMel Gorman 
6605e771905SMel Gorman int sysctl_extfrag_threshold = 500;
6615e771905SMel Gorman 
66256de7263SMel Gorman /**
66356de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
66456de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
66556de7263SMel Gorman  * @order: The order of the current allocation
66656de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
66756de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
66877f1fe6bSMel Gorman  * @sync: Whether migration is synchronous or not
66956de7263SMel Gorman  *
67056de7263SMel Gorman  * This is the main entry point for direct page compaction.
67156de7263SMel Gorman  */
67256de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
67377f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
67477f1fe6bSMel Gorman 			bool sync)
67556de7263SMel Gorman {
67656de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
67756de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
67856de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
67956de7263SMel Gorman 	struct zoneref *z;
68056de7263SMel Gorman 	struct zone *zone;
68156de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
68256de7263SMel Gorman 
68356de7263SMel Gorman 	/*
68456de7263SMel Gorman 	 * Check whether it is worth even starting compaction. The order check is
68556de7263SMel Gorman 	 * made because an assumption is made that the page allocator can satisfy
68656de7263SMel Gorman 	 * the "cheaper" orders without taking special steps
68756de7263SMel Gorman 	 */
688c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
68956de7263SMel Gorman 		return rc;
69056de7263SMel Gorman 
69156de7263SMel Gorman 	count_vm_event(COMPACTSTALL);
69256de7263SMel Gorman 
69356de7263SMel Gorman 	/* Compact each zone in the list */
69456de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
69556de7263SMel Gorman 								nodemask) {
69656de7263SMel Gorman 		int status;
69756de7263SMel Gorman 
698d527caf2SAndrea Arcangeli 		status = compact_zone_order(zone, order, gfp_mask, sync);
69956de7263SMel Gorman 		rc = max(status, rc);
70056de7263SMel Gorman 
7013e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
7023e7d3449SMel Gorman 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
70356de7263SMel Gorman 			break;
70456de7263SMel Gorman 	}
70556de7263SMel Gorman 
70656de7263SMel Gorman 	return rc;
70756de7263SMel Gorman }
70856de7263SMel Gorman 
70956de7263SMel Gorman 
71076ab0f53SMel Gorman /* Compact all zones within a node */
7117be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
71276ab0f53SMel Gorman {
71376ab0f53SMel Gorman 	int zoneid;
71476ab0f53SMel Gorman 	struct zone *zone;
71576ab0f53SMel Gorman 
71676ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
71776ab0f53SMel Gorman 
71876ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
71976ab0f53SMel Gorman 		if (!populated_zone(zone))
72076ab0f53SMel Gorman 			continue;
72176ab0f53SMel Gorman 
7227be62de9SRik van Riel 		cc->nr_freepages = 0;
7237be62de9SRik van Riel 		cc->nr_migratepages = 0;
7247be62de9SRik van Riel 		cc->zone = zone;
7257be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
7267be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
72776ab0f53SMel Gorman 
728aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
7297be62de9SRik van Riel 			compact_zone(zone, cc);
73076ab0f53SMel Gorman 
731aff62249SRik van Riel 		if (cc->order > 0) {
732aff62249SRik van Riel 			int ok = zone_watermark_ok(zone, cc->order,
733aff62249SRik van Riel 						low_wmark_pages(zone), 0, 0);
734aff62249SRik van Riel 			if (ok && cc->order > zone->compact_order_failed)
735aff62249SRik van Riel 				zone->compact_order_failed = cc->order + 1;
736aff62249SRik van Riel 			/* Currently async compaction is never deferred. */
737aff62249SRik van Riel 			else if (!ok && cc->sync)
738aff62249SRik van Riel 				defer_compaction(zone, cc->order);
739aff62249SRik van Riel 		}
740aff62249SRik van Riel 
7417be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
7427be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
74376ab0f53SMel Gorman 	}
74476ab0f53SMel Gorman 
74576ab0f53SMel Gorman 	return 0;
74676ab0f53SMel Gorman }
74776ab0f53SMel Gorman 
7487be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order)
7497be62de9SRik van Riel {
7507be62de9SRik van Riel 	struct compact_control cc = {
7517be62de9SRik van Riel 		.order = order,
7527be62de9SRik van Riel 		.sync = false,
7537be62de9SRik van Riel 	};
7547be62de9SRik van Riel 
7557be62de9SRik van Riel 	return __compact_pgdat(pgdat, &cc);
7567be62de9SRik van Riel }
7577be62de9SRik van Riel 
7587be62de9SRik van Riel static int compact_node(int nid)
7597be62de9SRik van Riel {
7607be62de9SRik van Riel 	struct compact_control cc = {
7617be62de9SRik van Riel 		.order = -1,
7627be62de9SRik van Riel 		.sync = true,
7637be62de9SRik van Riel 	};
7647be62de9SRik van Riel 
7658575ec29SHugh Dickins 	return __compact_pgdat(NODE_DATA(nid), &cc);
7667be62de9SRik van Riel }
7677be62de9SRik van Riel 
76876ab0f53SMel Gorman /* Compact all nodes in the system */
76976ab0f53SMel Gorman static int compact_nodes(void)
77076ab0f53SMel Gorman {
77176ab0f53SMel Gorman 	int nid;
77276ab0f53SMel Gorman 
7738575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
7748575ec29SHugh Dickins 	lru_add_drain_all();
7758575ec29SHugh Dickins 
77676ab0f53SMel Gorman 	for_each_online_node(nid)
77776ab0f53SMel Gorman 		compact_node(nid);
77876ab0f53SMel Gorman 
77976ab0f53SMel Gorman 	return COMPACT_COMPLETE;
78076ab0f53SMel Gorman }
78176ab0f53SMel Gorman 
78276ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
78376ab0f53SMel Gorman int sysctl_compact_memory;
78476ab0f53SMel Gorman 
78576ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
78676ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
78776ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
78876ab0f53SMel Gorman {
78976ab0f53SMel Gorman 	if (write)
79076ab0f53SMel Gorman 		return compact_nodes();
79176ab0f53SMel Gorman 
79276ab0f53SMel Gorman 	return 0;
79376ab0f53SMel Gorman }
794ed4a6d7fSMel Gorman 
7955e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
7965e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
7975e771905SMel Gorman {
7985e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
7995e771905SMel Gorman 
8005e771905SMel Gorman 	return 0;
8015e771905SMel Gorman }
8025e771905SMel Gorman 
803ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
80410fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev,
80510fbcf4cSKay Sievers 			struct device_attribute *attr,
806ed4a6d7fSMel Gorman 			const char *buf, size_t count)
807ed4a6d7fSMel Gorman {
8088575ec29SHugh Dickins 	int nid = dev->id;
8098575ec29SHugh Dickins 
8108575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
8118575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
8128575ec29SHugh Dickins 		lru_add_drain_all();
8138575ec29SHugh Dickins 
8148575ec29SHugh Dickins 		compact_node(nid);
8158575ec29SHugh Dickins 	}
816ed4a6d7fSMel Gorman 
817ed4a6d7fSMel Gorman 	return count;
818ed4a6d7fSMel Gorman }
81910fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
820ed4a6d7fSMel Gorman 
821ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
822ed4a6d7fSMel Gorman {
82310fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
824ed4a6d7fSMel Gorman }
825ed4a6d7fSMel Gorman 
826ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
827ed4a6d7fSMel Gorman {
82810fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
829ed4a6d7fSMel Gorman }
830ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
831