xref: /openbmc/linux/mm/compaction.c (revision b7aba6984dc048503b69c2a885098cdd430832bf)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17748446bbSMel Gorman #include "internal.h"
18748446bbSMel Gorman 
19*b7aba698SMel Gorman #define CREATE_TRACE_POINTS
20*b7aba698SMel Gorman #include <trace/events/compaction.h>
21*b7aba698SMel Gorman 
22748446bbSMel Gorman /*
23748446bbSMel Gorman  * compact_control is used to track pages being migrated and the free pages
24748446bbSMel Gorman  * they are being migrated to during memory compaction. The free_pfn starts
25748446bbSMel Gorman  * at the end of a zone and migrate_pfn begins at the start. Movable pages
26748446bbSMel Gorman  * are moved to the end of a zone during a compaction run and the run
27748446bbSMel Gorman  * completes when free_pfn <= migrate_pfn
28748446bbSMel Gorman  */
29748446bbSMel Gorman struct compact_control {
30748446bbSMel Gorman 	struct list_head freepages;	/* List of free pages to migrate to */
31748446bbSMel Gorman 	struct list_head migratepages;	/* List of pages being migrated */
32748446bbSMel Gorman 	unsigned long nr_freepages;	/* Number of isolated free pages */
33748446bbSMel Gorman 	unsigned long nr_migratepages;	/* Number of pages to migrate */
34748446bbSMel Gorman 	unsigned long free_pfn;		/* isolate_freepages search base */
35748446bbSMel Gorman 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
36748446bbSMel Gorman 
37748446bbSMel Gorman 	/* Account for isolated anon and file pages */
38748446bbSMel Gorman 	unsigned long nr_anon;
39748446bbSMel Gorman 	unsigned long nr_file;
40748446bbSMel Gorman 
4156de7263SMel Gorman 	unsigned int order;		/* order a direct compactor needs */
4256de7263SMel Gorman 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
43748446bbSMel Gorman 	struct zone *zone;
44748446bbSMel Gorman };
45748446bbSMel Gorman 
46748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
47748446bbSMel Gorman {
48748446bbSMel Gorman 	struct page *page, *next;
49748446bbSMel Gorman 	unsigned long count = 0;
50748446bbSMel Gorman 
51748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
52748446bbSMel Gorman 		list_del(&page->lru);
53748446bbSMel Gorman 		__free_page(page);
54748446bbSMel Gorman 		count++;
55748446bbSMel Gorman 	}
56748446bbSMel Gorman 
57748446bbSMel Gorman 	return count;
58748446bbSMel Gorman }
59748446bbSMel Gorman 
60748446bbSMel Gorman /* Isolate free pages onto a private freelist. Must hold zone->lock */
61748446bbSMel Gorman static unsigned long isolate_freepages_block(struct zone *zone,
62748446bbSMel Gorman 				unsigned long blockpfn,
63748446bbSMel Gorman 				struct list_head *freelist)
64748446bbSMel Gorman {
65748446bbSMel Gorman 	unsigned long zone_end_pfn, end_pfn;
66*b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
67748446bbSMel Gorman 	struct page *cursor;
68748446bbSMel Gorman 
69748446bbSMel Gorman 	/* Get the last PFN we should scan for free pages at */
70748446bbSMel Gorman 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
71748446bbSMel Gorman 	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
72748446bbSMel Gorman 
73748446bbSMel Gorman 	/* Find the first usable PFN in the block to initialse page cursor */
74748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++) {
75748446bbSMel Gorman 		if (pfn_valid_within(blockpfn))
76748446bbSMel Gorman 			break;
77748446bbSMel Gorman 	}
78748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
79748446bbSMel Gorman 
80748446bbSMel Gorman 	/* Isolate free pages. This assumes the block is valid */
81748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
82748446bbSMel Gorman 		int isolated, i;
83748446bbSMel Gorman 		struct page *page = cursor;
84748446bbSMel Gorman 
85748446bbSMel Gorman 		if (!pfn_valid_within(blockpfn))
86748446bbSMel Gorman 			continue;
87*b7aba698SMel Gorman 		nr_scanned++;
88748446bbSMel Gorman 
89748446bbSMel Gorman 		if (!PageBuddy(page))
90748446bbSMel Gorman 			continue;
91748446bbSMel Gorman 
92748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
93748446bbSMel Gorman 		isolated = split_free_page(page);
94748446bbSMel Gorman 		total_isolated += isolated;
95748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
96748446bbSMel Gorman 			list_add(&page->lru, freelist);
97748446bbSMel Gorman 			page++;
98748446bbSMel Gorman 		}
99748446bbSMel Gorman 
100748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
101748446bbSMel Gorman 		if (isolated) {
102748446bbSMel Gorman 			blockpfn += isolated - 1;
103748446bbSMel Gorman 			cursor += isolated - 1;
104748446bbSMel Gorman 		}
105748446bbSMel Gorman 	}
106748446bbSMel Gorman 
107*b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
108748446bbSMel Gorman 	return total_isolated;
109748446bbSMel Gorman }
110748446bbSMel Gorman 
111748446bbSMel Gorman /* Returns true if the page is within a block suitable for migration to */
112748446bbSMel Gorman static bool suitable_migration_target(struct page *page)
113748446bbSMel Gorman {
114748446bbSMel Gorman 
115748446bbSMel Gorman 	int migratetype = get_pageblock_migratetype(page);
116748446bbSMel Gorman 
117748446bbSMel Gorman 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
118748446bbSMel Gorman 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
119748446bbSMel Gorman 		return false;
120748446bbSMel Gorman 
121748446bbSMel Gorman 	/* If the page is a large free page, then allow migration */
122748446bbSMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
123748446bbSMel Gorman 		return true;
124748446bbSMel Gorman 
125748446bbSMel Gorman 	/* If the block is MIGRATE_MOVABLE, allow migration */
126748446bbSMel Gorman 	if (migratetype == MIGRATE_MOVABLE)
127748446bbSMel Gorman 		return true;
128748446bbSMel Gorman 
129748446bbSMel Gorman 	/* Otherwise skip the block */
130748446bbSMel Gorman 	return false;
131748446bbSMel Gorman }
132748446bbSMel Gorman 
133748446bbSMel Gorman /*
134748446bbSMel Gorman  * Based on information in the current compact_control, find blocks
135748446bbSMel Gorman  * suitable for isolating free pages from and then isolate them.
136748446bbSMel Gorman  */
137748446bbSMel Gorman static void isolate_freepages(struct zone *zone,
138748446bbSMel Gorman 				struct compact_control *cc)
139748446bbSMel Gorman {
140748446bbSMel Gorman 	struct page *page;
141748446bbSMel Gorman 	unsigned long high_pfn, low_pfn, pfn;
142748446bbSMel Gorman 	unsigned long flags;
143748446bbSMel Gorman 	int nr_freepages = cc->nr_freepages;
144748446bbSMel Gorman 	struct list_head *freelist = &cc->freepages;
145748446bbSMel Gorman 
146748446bbSMel Gorman 	pfn = cc->free_pfn;
147748446bbSMel Gorman 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
148748446bbSMel Gorman 	high_pfn = low_pfn;
149748446bbSMel Gorman 
150748446bbSMel Gorman 	/*
151748446bbSMel Gorman 	 * Isolate free pages until enough are available to migrate the
152748446bbSMel Gorman 	 * pages on cc->migratepages. We stop searching if the migrate
153748446bbSMel Gorman 	 * and free page scanners meet or enough free pages are isolated.
154748446bbSMel Gorman 	 */
155748446bbSMel Gorman 	spin_lock_irqsave(&zone->lock, flags);
156748446bbSMel Gorman 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
157748446bbSMel Gorman 					pfn -= pageblock_nr_pages) {
158748446bbSMel Gorman 		unsigned long isolated;
159748446bbSMel Gorman 
160748446bbSMel Gorman 		if (!pfn_valid(pfn))
161748446bbSMel Gorman 			continue;
162748446bbSMel Gorman 
163748446bbSMel Gorman 		/*
164748446bbSMel Gorman 		 * Check for overlapping nodes/zones. It's possible on some
165748446bbSMel Gorman 		 * configurations to have a setup like
166748446bbSMel Gorman 		 * node0 node1 node0
167748446bbSMel Gorman 		 * i.e. it's possible that all pages within a zones range of
168748446bbSMel Gorman 		 * pages do not belong to a single zone.
169748446bbSMel Gorman 		 */
170748446bbSMel Gorman 		page = pfn_to_page(pfn);
171748446bbSMel Gorman 		if (page_zone(page) != zone)
172748446bbSMel Gorman 			continue;
173748446bbSMel Gorman 
174748446bbSMel Gorman 		/* Check the block is suitable for migration */
175748446bbSMel Gorman 		if (!suitable_migration_target(page))
176748446bbSMel Gorman 			continue;
177748446bbSMel Gorman 
178748446bbSMel Gorman 		/* Found a block suitable for isolating free pages from */
179748446bbSMel Gorman 		isolated = isolate_freepages_block(zone, pfn, freelist);
180748446bbSMel Gorman 		nr_freepages += isolated;
181748446bbSMel Gorman 
182748446bbSMel Gorman 		/*
183748446bbSMel Gorman 		 * Record the highest PFN we isolated pages from. When next
184748446bbSMel Gorman 		 * looking for free pages, the search will restart here as
185748446bbSMel Gorman 		 * page migration may have returned some pages to the allocator
186748446bbSMel Gorman 		 */
187748446bbSMel Gorman 		if (isolated)
188748446bbSMel Gorman 			high_pfn = max(high_pfn, pfn);
189748446bbSMel Gorman 	}
190748446bbSMel Gorman 	spin_unlock_irqrestore(&zone->lock, flags);
191748446bbSMel Gorman 
192748446bbSMel Gorman 	/* split_free_page does not map the pages */
193748446bbSMel Gorman 	list_for_each_entry(page, freelist, lru) {
194748446bbSMel Gorman 		arch_alloc_page(page, 0);
195748446bbSMel Gorman 		kernel_map_pages(page, 1, 1);
196748446bbSMel Gorman 	}
197748446bbSMel Gorman 
198748446bbSMel Gorman 	cc->free_pfn = high_pfn;
199748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
200748446bbSMel Gorman }
201748446bbSMel Gorman 
202748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
203748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc)
204748446bbSMel Gorman {
205748446bbSMel Gorman 	struct page *page;
206748446bbSMel Gorman 	unsigned int count[NR_LRU_LISTS] = { 0, };
207748446bbSMel Gorman 
208748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru) {
209748446bbSMel Gorman 		int lru = page_lru_base_type(page);
210748446bbSMel Gorman 		count[lru]++;
211748446bbSMel Gorman 	}
212748446bbSMel Gorman 
213748446bbSMel Gorman 	cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
214748446bbSMel Gorman 	cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
215748446bbSMel Gorman 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
216748446bbSMel Gorman 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
217748446bbSMel Gorman }
218748446bbSMel Gorman 
219748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
220748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
221748446bbSMel Gorman {
222bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
223748446bbSMel Gorman 
224748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
225748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
226bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
227bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
228748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
229748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
230748446bbSMel Gorman 
231bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
232748446bbSMel Gorman }
233748446bbSMel Gorman 
234748446bbSMel Gorman /*
235748446bbSMel Gorman  * Isolate all pages that can be migrated from the block pointed to by
236748446bbSMel Gorman  * the migrate scanner within compact_control.
237748446bbSMel Gorman  */
238748446bbSMel Gorman static unsigned long isolate_migratepages(struct zone *zone,
239748446bbSMel Gorman 					struct compact_control *cc)
240748446bbSMel Gorman {
241748446bbSMel Gorman 	unsigned long low_pfn, end_pfn;
242*b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
243748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
244748446bbSMel Gorman 
245748446bbSMel Gorman 	/* Do not scan outside zone boundaries */
246748446bbSMel Gorman 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
247748446bbSMel Gorman 
248748446bbSMel Gorman 	/* Only scan within a pageblock boundary */
249748446bbSMel Gorman 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
250748446bbSMel Gorman 
251748446bbSMel Gorman 	/* Do not cross the free scanner or scan within a memory hole */
252748446bbSMel Gorman 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
253748446bbSMel Gorman 		cc->migrate_pfn = end_pfn;
254748446bbSMel Gorman 		return 0;
255748446bbSMel Gorman 	}
256748446bbSMel Gorman 
257748446bbSMel Gorman 	/*
258748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
259748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
260748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
261748446bbSMel Gorman 	 */
262748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
263748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
264748446bbSMel Gorman 
265748446bbSMel Gorman 		if (fatal_signal_pending(current))
266748446bbSMel Gorman 			return 0;
267748446bbSMel Gorman 	}
268748446bbSMel Gorman 
269748446bbSMel Gorman 	/* Time to isolate some pages for migration */
270748446bbSMel Gorman 	spin_lock_irq(&zone->lru_lock);
271748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
272748446bbSMel Gorman 		struct page *page;
273748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
274748446bbSMel Gorman 			continue;
275*b7aba698SMel Gorman 		nr_scanned++;
276748446bbSMel Gorman 
277748446bbSMel Gorman 		/* Get the page and skip if free */
278748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
279748446bbSMel Gorman 		if (PageBuddy(page))
280748446bbSMel Gorman 			continue;
281748446bbSMel Gorman 
282748446bbSMel Gorman 		/* Try isolate the page */
283748446bbSMel Gorman 		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
284748446bbSMel Gorman 			continue;
285748446bbSMel Gorman 
286748446bbSMel Gorman 		/* Successfully isolated */
287748446bbSMel Gorman 		del_page_from_lru_list(zone, page, page_lru(page));
288748446bbSMel Gorman 		list_add(&page->lru, migratelist);
289748446bbSMel Gorman 		cc->nr_migratepages++;
290*b7aba698SMel Gorman 		nr_isolated++;
291748446bbSMel Gorman 
292748446bbSMel Gorman 		/* Avoid isolating too much */
293748446bbSMel Gorman 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
294748446bbSMel Gorman 			break;
295748446bbSMel Gorman 	}
296748446bbSMel Gorman 
297748446bbSMel Gorman 	acct_isolated(zone, cc);
298748446bbSMel Gorman 
299748446bbSMel Gorman 	spin_unlock_irq(&zone->lru_lock);
300748446bbSMel Gorman 	cc->migrate_pfn = low_pfn;
301748446bbSMel Gorman 
302*b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
303*b7aba698SMel Gorman 
304748446bbSMel Gorman 	return cc->nr_migratepages;
305748446bbSMel Gorman }
306748446bbSMel Gorman 
307748446bbSMel Gorman /*
308748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
309748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
310748446bbSMel Gorman  */
311748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
312748446bbSMel Gorman 					unsigned long data,
313748446bbSMel Gorman 					int **result)
314748446bbSMel Gorman {
315748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
316748446bbSMel Gorman 	struct page *freepage;
317748446bbSMel Gorman 
318748446bbSMel Gorman 	/* Isolate free pages if necessary */
319748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
320748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
321748446bbSMel Gorman 
322748446bbSMel Gorman 		if (list_empty(&cc->freepages))
323748446bbSMel Gorman 			return NULL;
324748446bbSMel Gorman 	}
325748446bbSMel Gorman 
326748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
327748446bbSMel Gorman 	list_del(&freepage->lru);
328748446bbSMel Gorman 	cc->nr_freepages--;
329748446bbSMel Gorman 
330748446bbSMel Gorman 	return freepage;
331748446bbSMel Gorman }
332748446bbSMel Gorman 
333748446bbSMel Gorman /*
334748446bbSMel Gorman  * We cannot control nr_migratepages and nr_freepages fully when migration is
335748446bbSMel Gorman  * running as migrate_pages() has no knowledge of compact_control. When
336748446bbSMel Gorman  * migration is complete, we count the number of pages on the lists by hand.
337748446bbSMel Gorman  */
338748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
339748446bbSMel Gorman {
340748446bbSMel Gorman 	int nr_migratepages = 0;
341748446bbSMel Gorman 	int nr_freepages = 0;
342748446bbSMel Gorman 	struct page *page;
343748446bbSMel Gorman 
344748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
345748446bbSMel Gorman 		nr_migratepages++;
346748446bbSMel Gorman 	list_for_each_entry(page, &cc->freepages, lru)
347748446bbSMel Gorman 		nr_freepages++;
348748446bbSMel Gorman 
349748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
350748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
351748446bbSMel Gorman }
352748446bbSMel Gorman 
353748446bbSMel Gorman static int compact_finished(struct zone *zone,
354748446bbSMel Gorman 						struct compact_control *cc)
355748446bbSMel Gorman {
35656de7263SMel Gorman 	unsigned int order;
35756de7263SMel Gorman 	unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
35856de7263SMel Gorman 
359748446bbSMel Gorman 	if (fatal_signal_pending(current))
360748446bbSMel Gorman 		return COMPACT_PARTIAL;
361748446bbSMel Gorman 
362748446bbSMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
363748446bbSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn)
364748446bbSMel Gorman 		return COMPACT_COMPLETE;
365748446bbSMel Gorman 
36656de7263SMel Gorman 	/* Compaction run is not finished if the watermark is not met */
36756de7263SMel Gorman 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
36856de7263SMel Gorman 		return COMPACT_CONTINUE;
36956de7263SMel Gorman 
37056de7263SMel Gorman 	if (cc->order == -1)
37156de7263SMel Gorman 		return COMPACT_CONTINUE;
37256de7263SMel Gorman 
37356de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
37456de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
37556de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
37656de7263SMel Gorman 		if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
37756de7263SMel Gorman 			return COMPACT_PARTIAL;
37856de7263SMel Gorman 
37956de7263SMel Gorman 		/* Job done if allocation would set block type */
38056de7263SMel Gorman 		if (order >= pageblock_order && zone->free_area[order].nr_free)
38156de7263SMel Gorman 			return COMPACT_PARTIAL;
38256de7263SMel Gorman 	}
38356de7263SMel Gorman 
384748446bbSMel Gorman 	return COMPACT_CONTINUE;
385748446bbSMel Gorman }
386748446bbSMel Gorman 
387748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
388748446bbSMel Gorman {
389748446bbSMel Gorman 	int ret;
390748446bbSMel Gorman 
391748446bbSMel Gorman 	/* Setup to move all movable pages to the end of the zone */
392748446bbSMel Gorman 	cc->migrate_pfn = zone->zone_start_pfn;
393748446bbSMel Gorman 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
394748446bbSMel Gorman 	cc->free_pfn &= ~(pageblock_nr_pages-1);
395748446bbSMel Gorman 
396748446bbSMel Gorman 	migrate_prep_local();
397748446bbSMel Gorman 
398748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
399748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
400748446bbSMel Gorman 
401748446bbSMel Gorman 		if (!isolate_migratepages(zone, cc))
402748446bbSMel Gorman 			continue;
403748446bbSMel Gorman 
404748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
405748446bbSMel Gorman 		migrate_pages(&cc->migratepages, compaction_alloc,
406748446bbSMel Gorman 						(unsigned long)cc, 0);
407748446bbSMel Gorman 		update_nr_listpages(cc);
408748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
409748446bbSMel Gorman 
410748446bbSMel Gorman 		count_vm_event(COMPACTBLOCKS);
411748446bbSMel Gorman 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
412748446bbSMel Gorman 		if (nr_remaining)
413748446bbSMel Gorman 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
414*b7aba698SMel Gorman 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
415*b7aba698SMel Gorman 						nr_remaining);
416748446bbSMel Gorman 
417748446bbSMel Gorman 		/* Release LRU pages not migrated */
418748446bbSMel Gorman 		if (!list_empty(&cc->migratepages)) {
419748446bbSMel Gorman 			putback_lru_pages(&cc->migratepages);
420748446bbSMel Gorman 			cc->nr_migratepages = 0;
421748446bbSMel Gorman 		}
422748446bbSMel Gorman 
423748446bbSMel Gorman 	}
424748446bbSMel Gorman 
425748446bbSMel Gorman 	/* Release free pages and check accounting */
426748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
427748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
428748446bbSMel Gorman 
429748446bbSMel Gorman 	return ret;
430748446bbSMel Gorman }
43176ab0f53SMel Gorman 
43256de7263SMel Gorman static unsigned long compact_zone_order(struct zone *zone,
43356de7263SMel Gorman 						int order, gfp_t gfp_mask)
43456de7263SMel Gorman {
43556de7263SMel Gorman 	struct compact_control cc = {
43656de7263SMel Gorman 		.nr_freepages = 0,
43756de7263SMel Gorman 		.nr_migratepages = 0,
43856de7263SMel Gorman 		.order = order,
43956de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
44056de7263SMel Gorman 		.zone = zone,
44156de7263SMel Gorman 	};
44256de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
44356de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
44456de7263SMel Gorman 
44556de7263SMel Gorman 	return compact_zone(zone, &cc);
44656de7263SMel Gorman }
44756de7263SMel Gorman 
4485e771905SMel Gorman int sysctl_extfrag_threshold = 500;
4495e771905SMel Gorman 
45056de7263SMel Gorman /**
45156de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
45256de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
45356de7263SMel Gorman  * @order: The order of the current allocation
45456de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
45556de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
45656de7263SMel Gorman  *
45756de7263SMel Gorman  * This is the main entry point for direct page compaction.
45856de7263SMel Gorman  */
45956de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
46056de7263SMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask)
46156de7263SMel Gorman {
46256de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
46356de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
46456de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
46556de7263SMel Gorman 	unsigned long watermark;
46656de7263SMel Gorman 	struct zoneref *z;
46756de7263SMel Gorman 	struct zone *zone;
46856de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
46956de7263SMel Gorman 
47056de7263SMel Gorman 	/*
47156de7263SMel Gorman 	 * Check whether it is worth even starting compaction. The order check is
47256de7263SMel Gorman 	 * made because an assumption is made that the page allocator can satisfy
47356de7263SMel Gorman 	 * the "cheaper" orders without taking special steps
47456de7263SMel Gorman 	 */
47556de7263SMel Gorman 	if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
47656de7263SMel Gorman 		return rc;
47756de7263SMel Gorman 
47856de7263SMel Gorman 	count_vm_event(COMPACTSTALL);
47956de7263SMel Gorman 
48056de7263SMel Gorman 	/* Compact each zone in the list */
48156de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
48256de7263SMel Gorman 								nodemask) {
48356de7263SMel Gorman 		int fragindex;
48456de7263SMel Gorman 		int status;
48556de7263SMel Gorman 
48656de7263SMel Gorman 		/*
48756de7263SMel Gorman 		 * Watermarks for order-0 must be met for compaction. Note
48856de7263SMel Gorman 		 * the 2UL. This is because during migration, copies of
48956de7263SMel Gorman 		 * pages need to be allocated and for a short time, the
49056de7263SMel Gorman 		 * footprint is higher
49156de7263SMel Gorman 		 */
49256de7263SMel Gorman 		watermark = low_wmark_pages(zone) + (2UL << order);
49356de7263SMel Gorman 		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
49456de7263SMel Gorman 			continue;
49556de7263SMel Gorman 
49656de7263SMel Gorman 		/*
49756de7263SMel Gorman 		 * fragmentation index determines if allocation failures are
49856de7263SMel Gorman 		 * due to low memory or external fragmentation
49956de7263SMel Gorman 		 *
50056de7263SMel Gorman 		 * index of -1 implies allocations might succeed depending
50156de7263SMel Gorman 		 * 	on watermarks
50256de7263SMel Gorman 		 * index towards 0 implies failure is due to lack of memory
50356de7263SMel Gorman 		 * index towards 1000 implies failure is due to fragmentation
50456de7263SMel Gorman 		 *
50556de7263SMel Gorman 		 * Only compact if a failure would be due to fragmentation.
50656de7263SMel Gorman 		 */
50756de7263SMel Gorman 		fragindex = fragmentation_index(zone, order);
5085e771905SMel Gorman 		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
50956de7263SMel Gorman 			continue;
51056de7263SMel Gorman 
51156de7263SMel Gorman 		if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
51256de7263SMel Gorman 			rc = COMPACT_PARTIAL;
51356de7263SMel Gorman 			break;
51456de7263SMel Gorman 		}
51556de7263SMel Gorman 
51656de7263SMel Gorman 		status = compact_zone_order(zone, order, gfp_mask);
51756de7263SMel Gorman 		rc = max(status, rc);
51856de7263SMel Gorman 
51956de7263SMel Gorman 		if (zone_watermark_ok(zone, order, watermark, 0, 0))
52056de7263SMel Gorman 			break;
52156de7263SMel Gorman 	}
52256de7263SMel Gorman 
52356de7263SMel Gorman 	return rc;
52456de7263SMel Gorman }
52556de7263SMel Gorman 
52656de7263SMel Gorman 
52776ab0f53SMel Gorman /* Compact all zones within a node */
52876ab0f53SMel Gorman static int compact_node(int nid)
52976ab0f53SMel Gorman {
53076ab0f53SMel Gorman 	int zoneid;
53176ab0f53SMel Gorman 	pg_data_t *pgdat;
53276ab0f53SMel Gorman 	struct zone *zone;
53376ab0f53SMel Gorman 
53476ab0f53SMel Gorman 	if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
53576ab0f53SMel Gorman 		return -EINVAL;
53676ab0f53SMel Gorman 	pgdat = NODE_DATA(nid);
53776ab0f53SMel Gorman 
53876ab0f53SMel Gorman 	/* Flush pending updates to the LRU lists */
53976ab0f53SMel Gorman 	lru_add_drain_all();
54076ab0f53SMel Gorman 
54176ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
54276ab0f53SMel Gorman 		struct compact_control cc = {
54376ab0f53SMel Gorman 			.nr_freepages = 0,
54476ab0f53SMel Gorman 			.nr_migratepages = 0,
54556de7263SMel Gorman 			.order = -1,
54676ab0f53SMel Gorman 		};
54776ab0f53SMel Gorman 
54876ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
54976ab0f53SMel Gorman 		if (!populated_zone(zone))
55076ab0f53SMel Gorman 			continue;
55176ab0f53SMel Gorman 
55276ab0f53SMel Gorman 		cc.zone = zone;
55376ab0f53SMel Gorman 		INIT_LIST_HEAD(&cc.freepages);
55476ab0f53SMel Gorman 		INIT_LIST_HEAD(&cc.migratepages);
55576ab0f53SMel Gorman 
55676ab0f53SMel Gorman 		compact_zone(zone, &cc);
55776ab0f53SMel Gorman 
55876ab0f53SMel Gorman 		VM_BUG_ON(!list_empty(&cc.freepages));
55976ab0f53SMel Gorman 		VM_BUG_ON(!list_empty(&cc.migratepages));
56076ab0f53SMel Gorman 	}
56176ab0f53SMel Gorman 
56276ab0f53SMel Gorman 	return 0;
56376ab0f53SMel Gorman }
56476ab0f53SMel Gorman 
56576ab0f53SMel Gorman /* Compact all nodes in the system */
56676ab0f53SMel Gorman static int compact_nodes(void)
56776ab0f53SMel Gorman {
56876ab0f53SMel Gorman 	int nid;
56976ab0f53SMel Gorman 
57076ab0f53SMel Gorman 	for_each_online_node(nid)
57176ab0f53SMel Gorman 		compact_node(nid);
57276ab0f53SMel Gorman 
57376ab0f53SMel Gorman 	return COMPACT_COMPLETE;
57476ab0f53SMel Gorman }
57576ab0f53SMel Gorman 
57676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
57776ab0f53SMel Gorman int sysctl_compact_memory;
57876ab0f53SMel Gorman 
57976ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
58076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
58176ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
58276ab0f53SMel Gorman {
58376ab0f53SMel Gorman 	if (write)
58476ab0f53SMel Gorman 		return compact_nodes();
58576ab0f53SMel Gorman 
58676ab0f53SMel Gorman 	return 0;
58776ab0f53SMel Gorman }
588ed4a6d7fSMel Gorman 
5895e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
5905e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
5915e771905SMel Gorman {
5925e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
5935e771905SMel Gorman 
5945e771905SMel Gorman 	return 0;
5955e771905SMel Gorman }
5965e771905SMel Gorman 
597ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
598ed4a6d7fSMel Gorman ssize_t sysfs_compact_node(struct sys_device *dev,
599ed4a6d7fSMel Gorman 			struct sysdev_attribute *attr,
600ed4a6d7fSMel Gorman 			const char *buf, size_t count)
601ed4a6d7fSMel Gorman {
602ed4a6d7fSMel Gorman 	compact_node(dev->id);
603ed4a6d7fSMel Gorman 
604ed4a6d7fSMel Gorman 	return count;
605ed4a6d7fSMel Gorman }
606ed4a6d7fSMel Gorman static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
607ed4a6d7fSMel Gorman 
608ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
609ed4a6d7fSMel Gorman {
610ed4a6d7fSMel Gorman 	return sysdev_create_file(&node->sysdev, &attr_compact);
611ed4a6d7fSMel Gorman }
612ed4a6d7fSMel Gorman 
613ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
614ed4a6d7fSMel Gorman {
615ed4a6d7fSMel Gorman 	return sysdev_remove_file(&node->sysdev, &attr_compact);
616ed4a6d7fSMel Gorman }
617ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
618