xref: /openbmc/linux/mm/compaction.c (revision 56de7263fcf3eb10c8dcdf8d59a9cec831795f3f)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17748446bbSMel Gorman #include "internal.h"
18748446bbSMel Gorman 
19748446bbSMel Gorman /*
20748446bbSMel Gorman  * compact_control is used to track pages being migrated and the free pages
21748446bbSMel Gorman  * they are being migrated to during memory compaction. The free_pfn starts
22748446bbSMel Gorman  * at the end of a zone and migrate_pfn begins at the start. Movable pages
23748446bbSMel Gorman  * are moved to the end of a zone during a compaction run and the run
24748446bbSMel Gorman  * completes when free_pfn <= migrate_pfn
25748446bbSMel Gorman  */
26748446bbSMel Gorman struct compact_control {
27748446bbSMel Gorman 	struct list_head freepages;	/* List of free pages to migrate to */
28748446bbSMel Gorman 	struct list_head migratepages;	/* List of pages being migrated */
29748446bbSMel Gorman 	unsigned long nr_freepages;	/* Number of isolated free pages */
30748446bbSMel Gorman 	unsigned long nr_migratepages;	/* Number of pages to migrate */
31748446bbSMel Gorman 	unsigned long free_pfn;		/* isolate_freepages search base */
32748446bbSMel Gorman 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
33748446bbSMel Gorman 
34748446bbSMel Gorman 	/* Account for isolated anon and file pages */
35748446bbSMel Gorman 	unsigned long nr_anon;
36748446bbSMel Gorman 	unsigned long nr_file;
37748446bbSMel Gorman 
38*56de7263SMel Gorman 	unsigned int order;		/* order a direct compactor needs */
39*56de7263SMel Gorman 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
40748446bbSMel Gorman 	struct zone *zone;
41748446bbSMel Gorman };
42748446bbSMel Gorman 
43748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
44748446bbSMel Gorman {
45748446bbSMel Gorman 	struct page *page, *next;
46748446bbSMel Gorman 	unsigned long count = 0;
47748446bbSMel Gorman 
48748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
49748446bbSMel Gorman 		list_del(&page->lru);
50748446bbSMel Gorman 		__free_page(page);
51748446bbSMel Gorman 		count++;
52748446bbSMel Gorman 	}
53748446bbSMel Gorman 
54748446bbSMel Gorman 	return count;
55748446bbSMel Gorman }
56748446bbSMel Gorman 
57748446bbSMel Gorman /* Isolate free pages onto a private freelist. Must hold zone->lock */
58748446bbSMel Gorman static unsigned long isolate_freepages_block(struct zone *zone,
59748446bbSMel Gorman 				unsigned long blockpfn,
60748446bbSMel Gorman 				struct list_head *freelist)
61748446bbSMel Gorman {
62748446bbSMel Gorman 	unsigned long zone_end_pfn, end_pfn;
63748446bbSMel Gorman 	int total_isolated = 0;
64748446bbSMel Gorman 	struct page *cursor;
65748446bbSMel Gorman 
66748446bbSMel Gorman 	/* Get the last PFN we should scan for free pages at */
67748446bbSMel Gorman 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
68748446bbSMel Gorman 	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
69748446bbSMel Gorman 
70748446bbSMel Gorman 	/* Find the first usable PFN in the block to initialse page cursor */
71748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++) {
72748446bbSMel Gorman 		if (pfn_valid_within(blockpfn))
73748446bbSMel Gorman 			break;
74748446bbSMel Gorman 	}
75748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
76748446bbSMel Gorman 
77748446bbSMel Gorman 	/* Isolate free pages. This assumes the block is valid */
78748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
79748446bbSMel Gorman 		int isolated, i;
80748446bbSMel Gorman 		struct page *page = cursor;
81748446bbSMel Gorman 
82748446bbSMel Gorman 		if (!pfn_valid_within(blockpfn))
83748446bbSMel Gorman 			continue;
84748446bbSMel Gorman 
85748446bbSMel Gorman 		if (!PageBuddy(page))
86748446bbSMel Gorman 			continue;
87748446bbSMel Gorman 
88748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
89748446bbSMel Gorman 		isolated = split_free_page(page);
90748446bbSMel Gorman 		total_isolated += isolated;
91748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
92748446bbSMel Gorman 			list_add(&page->lru, freelist);
93748446bbSMel Gorman 			page++;
94748446bbSMel Gorman 		}
95748446bbSMel Gorman 
96748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
97748446bbSMel Gorman 		if (isolated) {
98748446bbSMel Gorman 			blockpfn += isolated - 1;
99748446bbSMel Gorman 			cursor += isolated - 1;
100748446bbSMel Gorman 		}
101748446bbSMel Gorman 	}
102748446bbSMel Gorman 
103748446bbSMel Gorman 	return total_isolated;
104748446bbSMel Gorman }
105748446bbSMel Gorman 
106748446bbSMel Gorman /* Returns true if the page is within a block suitable for migration to */
107748446bbSMel Gorman static bool suitable_migration_target(struct page *page)
108748446bbSMel Gorman {
109748446bbSMel Gorman 
110748446bbSMel Gorman 	int migratetype = get_pageblock_migratetype(page);
111748446bbSMel Gorman 
112748446bbSMel Gorman 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
113748446bbSMel Gorman 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
114748446bbSMel Gorman 		return false;
115748446bbSMel Gorman 
116748446bbSMel Gorman 	/* If the page is a large free page, then allow migration */
117748446bbSMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
118748446bbSMel Gorman 		return true;
119748446bbSMel Gorman 
120748446bbSMel Gorman 	/* If the block is MIGRATE_MOVABLE, allow migration */
121748446bbSMel Gorman 	if (migratetype == MIGRATE_MOVABLE)
122748446bbSMel Gorman 		return true;
123748446bbSMel Gorman 
124748446bbSMel Gorman 	/* Otherwise skip the block */
125748446bbSMel Gorman 	return false;
126748446bbSMel Gorman }
127748446bbSMel Gorman 
128748446bbSMel Gorman /*
129748446bbSMel Gorman  * Based on information in the current compact_control, find blocks
130748446bbSMel Gorman  * suitable for isolating free pages from and then isolate them.
131748446bbSMel Gorman  */
132748446bbSMel Gorman static void isolate_freepages(struct zone *zone,
133748446bbSMel Gorman 				struct compact_control *cc)
134748446bbSMel Gorman {
135748446bbSMel Gorman 	struct page *page;
136748446bbSMel Gorman 	unsigned long high_pfn, low_pfn, pfn;
137748446bbSMel Gorman 	unsigned long flags;
138748446bbSMel Gorman 	int nr_freepages = cc->nr_freepages;
139748446bbSMel Gorman 	struct list_head *freelist = &cc->freepages;
140748446bbSMel Gorman 
141748446bbSMel Gorman 	pfn = cc->free_pfn;
142748446bbSMel Gorman 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
143748446bbSMel Gorman 	high_pfn = low_pfn;
144748446bbSMel Gorman 
145748446bbSMel Gorman 	/*
146748446bbSMel Gorman 	 * Isolate free pages until enough are available to migrate the
147748446bbSMel Gorman 	 * pages on cc->migratepages. We stop searching if the migrate
148748446bbSMel Gorman 	 * and free page scanners meet or enough free pages are isolated.
149748446bbSMel Gorman 	 */
150748446bbSMel Gorman 	spin_lock_irqsave(&zone->lock, flags);
151748446bbSMel Gorman 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
152748446bbSMel Gorman 					pfn -= pageblock_nr_pages) {
153748446bbSMel Gorman 		unsigned long isolated;
154748446bbSMel Gorman 
155748446bbSMel Gorman 		if (!pfn_valid(pfn))
156748446bbSMel Gorman 			continue;
157748446bbSMel Gorman 
158748446bbSMel Gorman 		/*
159748446bbSMel Gorman 		 * Check for overlapping nodes/zones. It's possible on some
160748446bbSMel Gorman 		 * configurations to have a setup like
161748446bbSMel Gorman 		 * node0 node1 node0
162748446bbSMel Gorman 		 * i.e. it's possible that all pages within a zones range of
163748446bbSMel Gorman 		 * pages do not belong to a single zone.
164748446bbSMel Gorman 		 */
165748446bbSMel Gorman 		page = pfn_to_page(pfn);
166748446bbSMel Gorman 		if (page_zone(page) != zone)
167748446bbSMel Gorman 			continue;
168748446bbSMel Gorman 
169748446bbSMel Gorman 		/* Check the block is suitable for migration */
170748446bbSMel Gorman 		if (!suitable_migration_target(page))
171748446bbSMel Gorman 			continue;
172748446bbSMel Gorman 
173748446bbSMel Gorman 		/* Found a block suitable for isolating free pages from */
174748446bbSMel Gorman 		isolated = isolate_freepages_block(zone, pfn, freelist);
175748446bbSMel Gorman 		nr_freepages += isolated;
176748446bbSMel Gorman 
177748446bbSMel Gorman 		/*
178748446bbSMel Gorman 		 * Record the highest PFN we isolated pages from. When next
179748446bbSMel Gorman 		 * looking for free pages, the search will restart here as
180748446bbSMel Gorman 		 * page migration may have returned some pages to the allocator
181748446bbSMel Gorman 		 */
182748446bbSMel Gorman 		if (isolated)
183748446bbSMel Gorman 			high_pfn = max(high_pfn, pfn);
184748446bbSMel Gorman 	}
185748446bbSMel Gorman 	spin_unlock_irqrestore(&zone->lock, flags);
186748446bbSMel Gorman 
187748446bbSMel Gorman 	/* split_free_page does not map the pages */
188748446bbSMel Gorman 	list_for_each_entry(page, freelist, lru) {
189748446bbSMel Gorman 		arch_alloc_page(page, 0);
190748446bbSMel Gorman 		kernel_map_pages(page, 1, 1);
191748446bbSMel Gorman 	}
192748446bbSMel Gorman 
193748446bbSMel Gorman 	cc->free_pfn = high_pfn;
194748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
195748446bbSMel Gorman }
196748446bbSMel Gorman 
197748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
198748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc)
199748446bbSMel Gorman {
200748446bbSMel Gorman 	struct page *page;
201748446bbSMel Gorman 	unsigned int count[NR_LRU_LISTS] = { 0, };
202748446bbSMel Gorman 
203748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru) {
204748446bbSMel Gorman 		int lru = page_lru_base_type(page);
205748446bbSMel Gorman 		count[lru]++;
206748446bbSMel Gorman 	}
207748446bbSMel Gorman 
208748446bbSMel Gorman 	cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
209748446bbSMel Gorman 	cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
210748446bbSMel Gorman 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
211748446bbSMel Gorman 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
212748446bbSMel Gorman }
213748446bbSMel Gorman 
214748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
215748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
216748446bbSMel Gorman {
217748446bbSMel Gorman 
218748446bbSMel Gorman 	unsigned long inactive, isolated;
219748446bbSMel Gorman 
220748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
221748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
222748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
223748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
224748446bbSMel Gorman 
225748446bbSMel Gorman 	return isolated > inactive;
226748446bbSMel Gorman }
227748446bbSMel Gorman 
228748446bbSMel Gorman /*
229748446bbSMel Gorman  * Isolate all pages that can be migrated from the block pointed to by
230748446bbSMel Gorman  * the migrate scanner within compact_control.
231748446bbSMel Gorman  */
232748446bbSMel Gorman static unsigned long isolate_migratepages(struct zone *zone,
233748446bbSMel Gorman 					struct compact_control *cc)
234748446bbSMel Gorman {
235748446bbSMel Gorman 	unsigned long low_pfn, end_pfn;
236748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
237748446bbSMel Gorman 
238748446bbSMel Gorman 	/* Do not scan outside zone boundaries */
239748446bbSMel Gorman 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
240748446bbSMel Gorman 
241748446bbSMel Gorman 	/* Only scan within a pageblock boundary */
242748446bbSMel Gorman 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
243748446bbSMel Gorman 
244748446bbSMel Gorman 	/* Do not cross the free scanner or scan within a memory hole */
245748446bbSMel Gorman 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
246748446bbSMel Gorman 		cc->migrate_pfn = end_pfn;
247748446bbSMel Gorman 		return 0;
248748446bbSMel Gorman 	}
249748446bbSMel Gorman 
250748446bbSMel Gorman 	/*
251748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
252748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
253748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
254748446bbSMel Gorman 	 */
255748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
256748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
257748446bbSMel Gorman 
258748446bbSMel Gorman 		if (fatal_signal_pending(current))
259748446bbSMel Gorman 			return 0;
260748446bbSMel Gorman 	}
261748446bbSMel Gorman 
262748446bbSMel Gorman 	/* Time to isolate some pages for migration */
263748446bbSMel Gorman 	spin_lock_irq(&zone->lru_lock);
264748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
265748446bbSMel Gorman 		struct page *page;
266748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
267748446bbSMel Gorman 			continue;
268748446bbSMel Gorman 
269748446bbSMel Gorman 		/* Get the page and skip if free */
270748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
271748446bbSMel Gorman 		if (PageBuddy(page))
272748446bbSMel Gorman 			continue;
273748446bbSMel Gorman 
274748446bbSMel Gorman 		/* Try isolate the page */
275748446bbSMel Gorman 		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
276748446bbSMel Gorman 			continue;
277748446bbSMel Gorman 
278748446bbSMel Gorman 		/* Successfully isolated */
279748446bbSMel Gorman 		del_page_from_lru_list(zone, page, page_lru(page));
280748446bbSMel Gorman 		list_add(&page->lru, migratelist);
281748446bbSMel Gorman 		mem_cgroup_del_lru(page);
282748446bbSMel Gorman 		cc->nr_migratepages++;
283748446bbSMel Gorman 
284748446bbSMel Gorman 		/* Avoid isolating too much */
285748446bbSMel Gorman 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
286748446bbSMel Gorman 			break;
287748446bbSMel Gorman 	}
288748446bbSMel Gorman 
289748446bbSMel Gorman 	acct_isolated(zone, cc);
290748446bbSMel Gorman 
291748446bbSMel Gorman 	spin_unlock_irq(&zone->lru_lock);
292748446bbSMel Gorman 	cc->migrate_pfn = low_pfn;
293748446bbSMel Gorman 
294748446bbSMel Gorman 	return cc->nr_migratepages;
295748446bbSMel Gorman }
296748446bbSMel Gorman 
297748446bbSMel Gorman /*
298748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
299748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
300748446bbSMel Gorman  */
301748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
302748446bbSMel Gorman 					unsigned long data,
303748446bbSMel Gorman 					int **result)
304748446bbSMel Gorman {
305748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
306748446bbSMel Gorman 	struct page *freepage;
307748446bbSMel Gorman 
308748446bbSMel Gorman 	/* Isolate free pages if necessary */
309748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
310748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
311748446bbSMel Gorman 
312748446bbSMel Gorman 		if (list_empty(&cc->freepages))
313748446bbSMel Gorman 			return NULL;
314748446bbSMel Gorman 	}
315748446bbSMel Gorman 
316748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
317748446bbSMel Gorman 	list_del(&freepage->lru);
318748446bbSMel Gorman 	cc->nr_freepages--;
319748446bbSMel Gorman 
320748446bbSMel Gorman 	return freepage;
321748446bbSMel Gorman }
322748446bbSMel Gorman 
323748446bbSMel Gorman /*
324748446bbSMel Gorman  * We cannot control nr_migratepages and nr_freepages fully when migration is
325748446bbSMel Gorman  * running as migrate_pages() has no knowledge of compact_control. When
326748446bbSMel Gorman  * migration is complete, we count the number of pages on the lists by hand.
327748446bbSMel Gorman  */
328748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
329748446bbSMel Gorman {
330748446bbSMel Gorman 	int nr_migratepages = 0;
331748446bbSMel Gorman 	int nr_freepages = 0;
332748446bbSMel Gorman 	struct page *page;
333748446bbSMel Gorman 
334748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
335748446bbSMel Gorman 		nr_migratepages++;
336748446bbSMel Gorman 	list_for_each_entry(page, &cc->freepages, lru)
337748446bbSMel Gorman 		nr_freepages++;
338748446bbSMel Gorman 
339748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
340748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
341748446bbSMel Gorman }
342748446bbSMel Gorman 
343748446bbSMel Gorman static int compact_finished(struct zone *zone,
344748446bbSMel Gorman 						struct compact_control *cc)
345748446bbSMel Gorman {
346*56de7263SMel Gorman 	unsigned int order;
347*56de7263SMel Gorman 	unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
348*56de7263SMel Gorman 
349748446bbSMel Gorman 	if (fatal_signal_pending(current))
350748446bbSMel Gorman 		return COMPACT_PARTIAL;
351748446bbSMel Gorman 
352748446bbSMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
353748446bbSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn)
354748446bbSMel Gorman 		return COMPACT_COMPLETE;
355748446bbSMel Gorman 
356*56de7263SMel Gorman 	/* Compaction run is not finished if the watermark is not met */
357*56de7263SMel Gorman 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
358*56de7263SMel Gorman 		return COMPACT_CONTINUE;
359*56de7263SMel Gorman 
360*56de7263SMel Gorman 	if (cc->order == -1)
361*56de7263SMel Gorman 		return COMPACT_CONTINUE;
362*56de7263SMel Gorman 
363*56de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
364*56de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
365*56de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
366*56de7263SMel Gorman 		if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
367*56de7263SMel Gorman 			return COMPACT_PARTIAL;
368*56de7263SMel Gorman 
369*56de7263SMel Gorman 		/* Job done if allocation would set block type */
370*56de7263SMel Gorman 		if (order >= pageblock_order && zone->free_area[order].nr_free)
371*56de7263SMel Gorman 			return COMPACT_PARTIAL;
372*56de7263SMel Gorman 	}
373*56de7263SMel Gorman 
374748446bbSMel Gorman 	return COMPACT_CONTINUE;
375748446bbSMel Gorman }
376748446bbSMel Gorman 
377748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
378748446bbSMel Gorman {
379748446bbSMel Gorman 	int ret;
380748446bbSMel Gorman 
381748446bbSMel Gorman 	/* Setup to move all movable pages to the end of the zone */
382748446bbSMel Gorman 	cc->migrate_pfn = zone->zone_start_pfn;
383748446bbSMel Gorman 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
384748446bbSMel Gorman 	cc->free_pfn &= ~(pageblock_nr_pages-1);
385748446bbSMel Gorman 
386748446bbSMel Gorman 	migrate_prep_local();
387748446bbSMel Gorman 
388748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
389748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
390748446bbSMel Gorman 
391748446bbSMel Gorman 		if (!isolate_migratepages(zone, cc))
392748446bbSMel Gorman 			continue;
393748446bbSMel Gorman 
394748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
395748446bbSMel Gorman 		migrate_pages(&cc->migratepages, compaction_alloc,
396748446bbSMel Gorman 						(unsigned long)cc, 0);
397748446bbSMel Gorman 		update_nr_listpages(cc);
398748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
399748446bbSMel Gorman 
400748446bbSMel Gorman 		count_vm_event(COMPACTBLOCKS);
401748446bbSMel Gorman 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
402748446bbSMel Gorman 		if (nr_remaining)
403748446bbSMel Gorman 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
404748446bbSMel Gorman 
405748446bbSMel Gorman 		/* Release LRU pages not migrated */
406748446bbSMel Gorman 		if (!list_empty(&cc->migratepages)) {
407748446bbSMel Gorman 			putback_lru_pages(&cc->migratepages);
408748446bbSMel Gorman 			cc->nr_migratepages = 0;
409748446bbSMel Gorman 		}
410748446bbSMel Gorman 
411748446bbSMel Gorman 	}
412748446bbSMel Gorman 
413748446bbSMel Gorman 	/* Release free pages and check accounting */
414748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
415748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
416748446bbSMel Gorman 
417748446bbSMel Gorman 	return ret;
418748446bbSMel Gorman }
41976ab0f53SMel Gorman 
420*56de7263SMel Gorman static unsigned long compact_zone_order(struct zone *zone,
421*56de7263SMel Gorman 						int order, gfp_t gfp_mask)
422*56de7263SMel Gorman {
423*56de7263SMel Gorman 	struct compact_control cc = {
424*56de7263SMel Gorman 		.nr_freepages = 0,
425*56de7263SMel Gorman 		.nr_migratepages = 0,
426*56de7263SMel Gorman 		.order = order,
427*56de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
428*56de7263SMel Gorman 		.zone = zone,
429*56de7263SMel Gorman 	};
430*56de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
431*56de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
432*56de7263SMel Gorman 
433*56de7263SMel Gorman 	return compact_zone(zone, &cc);
434*56de7263SMel Gorman }
435*56de7263SMel Gorman 
436*56de7263SMel Gorman /**
437*56de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
438*56de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
439*56de7263SMel Gorman  * @order: The order of the current allocation
440*56de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
441*56de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
442*56de7263SMel Gorman  *
443*56de7263SMel Gorman  * This is the main entry point for direct page compaction.
444*56de7263SMel Gorman  */
445*56de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
446*56de7263SMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask)
447*56de7263SMel Gorman {
448*56de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
449*56de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
450*56de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
451*56de7263SMel Gorman 	unsigned long watermark;
452*56de7263SMel Gorman 	struct zoneref *z;
453*56de7263SMel Gorman 	struct zone *zone;
454*56de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
455*56de7263SMel Gorman 
456*56de7263SMel Gorman 	/*
457*56de7263SMel Gorman 	 * Check whether it is worth even starting compaction. The order check is
458*56de7263SMel Gorman 	 * made because an assumption is made that the page allocator can satisfy
459*56de7263SMel Gorman 	 * the "cheaper" orders without taking special steps
460*56de7263SMel Gorman 	 */
461*56de7263SMel Gorman 	if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
462*56de7263SMel Gorman 		return rc;
463*56de7263SMel Gorman 
464*56de7263SMel Gorman 	count_vm_event(COMPACTSTALL);
465*56de7263SMel Gorman 
466*56de7263SMel Gorman 	/* Compact each zone in the list */
467*56de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
468*56de7263SMel Gorman 								nodemask) {
469*56de7263SMel Gorman 		int fragindex;
470*56de7263SMel Gorman 		int status;
471*56de7263SMel Gorman 
472*56de7263SMel Gorman 		/*
473*56de7263SMel Gorman 		 * Watermarks for order-0 must be met for compaction. Note
474*56de7263SMel Gorman 		 * the 2UL. This is because during migration, copies of
475*56de7263SMel Gorman 		 * pages need to be allocated and for a short time, the
476*56de7263SMel Gorman 		 * footprint is higher
477*56de7263SMel Gorman 		 */
478*56de7263SMel Gorman 		watermark = low_wmark_pages(zone) + (2UL << order);
479*56de7263SMel Gorman 		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
480*56de7263SMel Gorman 			continue;
481*56de7263SMel Gorman 
482*56de7263SMel Gorman 		/*
483*56de7263SMel Gorman 		 * fragmentation index determines if allocation failures are
484*56de7263SMel Gorman 		 * due to low memory or external fragmentation
485*56de7263SMel Gorman 		 *
486*56de7263SMel Gorman 		 * index of -1 implies allocations might succeed depending
487*56de7263SMel Gorman 		 * 	on watermarks
488*56de7263SMel Gorman 		 * index towards 0 implies failure is due to lack of memory
489*56de7263SMel Gorman 		 * index towards 1000 implies failure is due to fragmentation
490*56de7263SMel Gorman 		 *
491*56de7263SMel Gorman 		 * Only compact if a failure would be due to fragmentation.
492*56de7263SMel Gorman 		 */
493*56de7263SMel Gorman 		fragindex = fragmentation_index(zone, order);
494*56de7263SMel Gorman 		if (fragindex >= 0 && fragindex <= 500)
495*56de7263SMel Gorman 			continue;
496*56de7263SMel Gorman 
497*56de7263SMel Gorman 		if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
498*56de7263SMel Gorman 			rc = COMPACT_PARTIAL;
499*56de7263SMel Gorman 			break;
500*56de7263SMel Gorman 		}
501*56de7263SMel Gorman 
502*56de7263SMel Gorman 		status = compact_zone_order(zone, order, gfp_mask);
503*56de7263SMel Gorman 		rc = max(status, rc);
504*56de7263SMel Gorman 
505*56de7263SMel Gorman 		if (zone_watermark_ok(zone, order, watermark, 0, 0))
506*56de7263SMel Gorman 			break;
507*56de7263SMel Gorman 	}
508*56de7263SMel Gorman 
509*56de7263SMel Gorman 	return rc;
510*56de7263SMel Gorman }
511*56de7263SMel Gorman 
512*56de7263SMel Gorman 
51376ab0f53SMel Gorman /* Compact all zones within a node */
51476ab0f53SMel Gorman static int compact_node(int nid)
51576ab0f53SMel Gorman {
51676ab0f53SMel Gorman 	int zoneid;
51776ab0f53SMel Gorman 	pg_data_t *pgdat;
51876ab0f53SMel Gorman 	struct zone *zone;
51976ab0f53SMel Gorman 
52076ab0f53SMel Gorman 	if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
52176ab0f53SMel Gorman 		return -EINVAL;
52276ab0f53SMel Gorman 	pgdat = NODE_DATA(nid);
52376ab0f53SMel Gorman 
52476ab0f53SMel Gorman 	/* Flush pending updates to the LRU lists */
52576ab0f53SMel Gorman 	lru_add_drain_all();
52676ab0f53SMel Gorman 
52776ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
52876ab0f53SMel Gorman 		struct compact_control cc = {
52976ab0f53SMel Gorman 			.nr_freepages = 0,
53076ab0f53SMel Gorman 			.nr_migratepages = 0,
531*56de7263SMel Gorman 			.order = -1,
53276ab0f53SMel Gorman 		};
53376ab0f53SMel Gorman 
53476ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
53576ab0f53SMel Gorman 		if (!populated_zone(zone))
53676ab0f53SMel Gorman 			continue;
53776ab0f53SMel Gorman 
53876ab0f53SMel Gorman 		cc.zone = zone;
53976ab0f53SMel Gorman 		INIT_LIST_HEAD(&cc.freepages);
54076ab0f53SMel Gorman 		INIT_LIST_HEAD(&cc.migratepages);
54176ab0f53SMel Gorman 
54276ab0f53SMel Gorman 		compact_zone(zone, &cc);
54376ab0f53SMel Gorman 
54476ab0f53SMel Gorman 		VM_BUG_ON(!list_empty(&cc.freepages));
54576ab0f53SMel Gorman 		VM_BUG_ON(!list_empty(&cc.migratepages));
54676ab0f53SMel Gorman 	}
54776ab0f53SMel Gorman 
54876ab0f53SMel Gorman 	return 0;
54976ab0f53SMel Gorman }
55076ab0f53SMel Gorman 
55176ab0f53SMel Gorman /* Compact all nodes in the system */
55276ab0f53SMel Gorman static int compact_nodes(void)
55376ab0f53SMel Gorman {
55476ab0f53SMel Gorman 	int nid;
55576ab0f53SMel Gorman 
55676ab0f53SMel Gorman 	for_each_online_node(nid)
55776ab0f53SMel Gorman 		compact_node(nid);
55876ab0f53SMel Gorman 
55976ab0f53SMel Gorman 	return COMPACT_COMPLETE;
56076ab0f53SMel Gorman }
56176ab0f53SMel Gorman 
56276ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
56376ab0f53SMel Gorman int sysctl_compact_memory;
56476ab0f53SMel Gorman 
56576ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
56676ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
56776ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
56876ab0f53SMel Gorman {
56976ab0f53SMel Gorman 	if (write)
57076ab0f53SMel Gorman 		return compact_nodes();
57176ab0f53SMel Gorman 
57276ab0f53SMel Gorman 	return 0;
57376ab0f53SMel Gorman }
574ed4a6d7fSMel Gorman 
575ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
576ed4a6d7fSMel Gorman ssize_t sysfs_compact_node(struct sys_device *dev,
577ed4a6d7fSMel Gorman 			struct sysdev_attribute *attr,
578ed4a6d7fSMel Gorman 			const char *buf, size_t count)
579ed4a6d7fSMel Gorman {
580ed4a6d7fSMel Gorman 	compact_node(dev->id);
581ed4a6d7fSMel Gorman 
582ed4a6d7fSMel Gorman 	return count;
583ed4a6d7fSMel Gorman }
584ed4a6d7fSMel Gorman static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
585ed4a6d7fSMel Gorman 
586ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
587ed4a6d7fSMel Gorman {
588ed4a6d7fSMel Gorman 	return sysdev_create_file(&node->sysdev, &attr_compact);
589ed4a6d7fSMel Gorman }
590ed4a6d7fSMel Gorman 
591ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
592ed4a6d7fSMel Gorman {
593ed4a6d7fSMel Gorman 	return sysdev_remove_file(&node->sysdev, &attr_compact);
594ed4a6d7fSMel Gorman }
595ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
596