xref: /openbmc/linux/mm/compaction.c (revision 76ab0f530e4a01d4dc20cdc1d5e87753c579dc18)
1 /*
2  * linux/mm/compaction.c
3  *
4  * Memory compaction for the reduction of external fragmentation. Note that
5  * this heavily depends upon page migration to do all the real heavy
6  * lifting
7  *
8  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9  */
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include "internal.h"
17 
18 /*
19  * compact_control is used to track pages being migrated and the free pages
20  * they are being migrated to during memory compaction. The free_pfn starts
21  * at the end of a zone and migrate_pfn begins at the start. Movable pages
22  * are moved to the end of a zone during a compaction run and the run
23  * completes when free_pfn <= migrate_pfn
24  */
25 struct compact_control {
26 	struct list_head freepages;	/* List of free pages to migrate to */
27 	struct list_head migratepages;	/* List of pages being migrated */
28 	unsigned long nr_freepages;	/* Number of isolated free pages */
29 	unsigned long nr_migratepages;	/* Number of pages to migrate */
30 	unsigned long free_pfn;		/* isolate_freepages search base */
31 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
32 
33 	/* Account for isolated anon and file pages */
34 	unsigned long nr_anon;
35 	unsigned long nr_file;
36 
37 	struct zone *zone;
38 };
39 
40 static unsigned long release_freepages(struct list_head *freelist)
41 {
42 	struct page *page, *next;
43 	unsigned long count = 0;
44 
45 	list_for_each_entry_safe(page, next, freelist, lru) {
46 		list_del(&page->lru);
47 		__free_page(page);
48 		count++;
49 	}
50 
51 	return count;
52 }
53 
54 /* Isolate free pages onto a private freelist. Must hold zone->lock */
55 static unsigned long isolate_freepages_block(struct zone *zone,
56 				unsigned long blockpfn,
57 				struct list_head *freelist)
58 {
59 	unsigned long zone_end_pfn, end_pfn;
60 	int total_isolated = 0;
61 	struct page *cursor;
62 
63 	/* Get the last PFN we should scan for free pages at */
64 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
65 	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
66 
67 	/* Find the first usable PFN in the block to initialse page cursor */
68 	for (; blockpfn < end_pfn; blockpfn++) {
69 		if (pfn_valid_within(blockpfn))
70 			break;
71 	}
72 	cursor = pfn_to_page(blockpfn);
73 
74 	/* Isolate free pages. This assumes the block is valid */
75 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
76 		int isolated, i;
77 		struct page *page = cursor;
78 
79 		if (!pfn_valid_within(blockpfn))
80 			continue;
81 
82 		if (!PageBuddy(page))
83 			continue;
84 
85 		/* Found a free page, break it into order-0 pages */
86 		isolated = split_free_page(page);
87 		total_isolated += isolated;
88 		for (i = 0; i < isolated; i++) {
89 			list_add(&page->lru, freelist);
90 			page++;
91 		}
92 
93 		/* If a page was split, advance to the end of it */
94 		if (isolated) {
95 			blockpfn += isolated - 1;
96 			cursor += isolated - 1;
97 		}
98 	}
99 
100 	return total_isolated;
101 }
102 
103 /* Returns true if the page is within a block suitable for migration to */
104 static bool suitable_migration_target(struct page *page)
105 {
106 
107 	int migratetype = get_pageblock_migratetype(page);
108 
109 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
110 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
111 		return false;
112 
113 	/* If the page is a large free page, then allow migration */
114 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
115 		return true;
116 
117 	/* If the block is MIGRATE_MOVABLE, allow migration */
118 	if (migratetype == MIGRATE_MOVABLE)
119 		return true;
120 
121 	/* Otherwise skip the block */
122 	return false;
123 }
124 
125 /*
126  * Based on information in the current compact_control, find blocks
127  * suitable for isolating free pages from and then isolate them.
128  */
129 static void isolate_freepages(struct zone *zone,
130 				struct compact_control *cc)
131 {
132 	struct page *page;
133 	unsigned long high_pfn, low_pfn, pfn;
134 	unsigned long flags;
135 	int nr_freepages = cc->nr_freepages;
136 	struct list_head *freelist = &cc->freepages;
137 
138 	pfn = cc->free_pfn;
139 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
140 	high_pfn = low_pfn;
141 
142 	/*
143 	 * Isolate free pages until enough are available to migrate the
144 	 * pages on cc->migratepages. We stop searching if the migrate
145 	 * and free page scanners meet or enough free pages are isolated.
146 	 */
147 	spin_lock_irqsave(&zone->lock, flags);
148 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
149 					pfn -= pageblock_nr_pages) {
150 		unsigned long isolated;
151 
152 		if (!pfn_valid(pfn))
153 			continue;
154 
155 		/*
156 		 * Check for overlapping nodes/zones. It's possible on some
157 		 * configurations to have a setup like
158 		 * node0 node1 node0
159 		 * i.e. it's possible that all pages within a zones range of
160 		 * pages do not belong to a single zone.
161 		 */
162 		page = pfn_to_page(pfn);
163 		if (page_zone(page) != zone)
164 			continue;
165 
166 		/* Check the block is suitable for migration */
167 		if (!suitable_migration_target(page))
168 			continue;
169 
170 		/* Found a block suitable for isolating free pages from */
171 		isolated = isolate_freepages_block(zone, pfn, freelist);
172 		nr_freepages += isolated;
173 
174 		/*
175 		 * Record the highest PFN we isolated pages from. When next
176 		 * looking for free pages, the search will restart here as
177 		 * page migration may have returned some pages to the allocator
178 		 */
179 		if (isolated)
180 			high_pfn = max(high_pfn, pfn);
181 	}
182 	spin_unlock_irqrestore(&zone->lock, flags);
183 
184 	/* split_free_page does not map the pages */
185 	list_for_each_entry(page, freelist, lru) {
186 		arch_alloc_page(page, 0);
187 		kernel_map_pages(page, 1, 1);
188 	}
189 
190 	cc->free_pfn = high_pfn;
191 	cc->nr_freepages = nr_freepages;
192 }
193 
194 /* Update the number of anon and file isolated pages in the zone */
195 static void acct_isolated(struct zone *zone, struct compact_control *cc)
196 {
197 	struct page *page;
198 	unsigned int count[NR_LRU_LISTS] = { 0, };
199 
200 	list_for_each_entry(page, &cc->migratepages, lru) {
201 		int lru = page_lru_base_type(page);
202 		count[lru]++;
203 	}
204 
205 	cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
206 	cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
207 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
208 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
209 }
210 
211 /* Similar to reclaim, but different enough that they don't share logic */
212 static bool too_many_isolated(struct zone *zone)
213 {
214 
215 	unsigned long inactive, isolated;
216 
217 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
218 					zone_page_state(zone, NR_INACTIVE_ANON);
219 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
220 					zone_page_state(zone, NR_ISOLATED_ANON);
221 
222 	return isolated > inactive;
223 }
224 
225 /*
226  * Isolate all pages that can be migrated from the block pointed to by
227  * the migrate scanner within compact_control.
228  */
229 static unsigned long isolate_migratepages(struct zone *zone,
230 					struct compact_control *cc)
231 {
232 	unsigned long low_pfn, end_pfn;
233 	struct list_head *migratelist = &cc->migratepages;
234 
235 	/* Do not scan outside zone boundaries */
236 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
237 
238 	/* Only scan within a pageblock boundary */
239 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
240 
241 	/* Do not cross the free scanner or scan within a memory hole */
242 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
243 		cc->migrate_pfn = end_pfn;
244 		return 0;
245 	}
246 
247 	/*
248 	 * Ensure that there are not too many pages isolated from the LRU
249 	 * list by either parallel reclaimers or compaction. If there are,
250 	 * delay for some time until fewer pages are isolated
251 	 */
252 	while (unlikely(too_many_isolated(zone))) {
253 		congestion_wait(BLK_RW_ASYNC, HZ/10);
254 
255 		if (fatal_signal_pending(current))
256 			return 0;
257 	}
258 
259 	/* Time to isolate some pages for migration */
260 	spin_lock_irq(&zone->lru_lock);
261 	for (; low_pfn < end_pfn; low_pfn++) {
262 		struct page *page;
263 		if (!pfn_valid_within(low_pfn))
264 			continue;
265 
266 		/* Get the page and skip if free */
267 		page = pfn_to_page(low_pfn);
268 		if (PageBuddy(page))
269 			continue;
270 
271 		/* Try isolate the page */
272 		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
273 			continue;
274 
275 		/* Successfully isolated */
276 		del_page_from_lru_list(zone, page, page_lru(page));
277 		list_add(&page->lru, migratelist);
278 		mem_cgroup_del_lru(page);
279 		cc->nr_migratepages++;
280 
281 		/* Avoid isolating too much */
282 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
283 			break;
284 	}
285 
286 	acct_isolated(zone, cc);
287 
288 	spin_unlock_irq(&zone->lru_lock);
289 	cc->migrate_pfn = low_pfn;
290 
291 	return cc->nr_migratepages;
292 }
293 
294 /*
295  * This is a migrate-callback that "allocates" freepages by taking pages
296  * from the isolated freelists in the block we are migrating to.
297  */
298 static struct page *compaction_alloc(struct page *migratepage,
299 					unsigned long data,
300 					int **result)
301 {
302 	struct compact_control *cc = (struct compact_control *)data;
303 	struct page *freepage;
304 
305 	/* Isolate free pages if necessary */
306 	if (list_empty(&cc->freepages)) {
307 		isolate_freepages(cc->zone, cc);
308 
309 		if (list_empty(&cc->freepages))
310 			return NULL;
311 	}
312 
313 	freepage = list_entry(cc->freepages.next, struct page, lru);
314 	list_del(&freepage->lru);
315 	cc->nr_freepages--;
316 
317 	return freepage;
318 }
319 
320 /*
321  * We cannot control nr_migratepages and nr_freepages fully when migration is
322  * running as migrate_pages() has no knowledge of compact_control. When
323  * migration is complete, we count the number of pages on the lists by hand.
324  */
325 static void update_nr_listpages(struct compact_control *cc)
326 {
327 	int nr_migratepages = 0;
328 	int nr_freepages = 0;
329 	struct page *page;
330 
331 	list_for_each_entry(page, &cc->migratepages, lru)
332 		nr_migratepages++;
333 	list_for_each_entry(page, &cc->freepages, lru)
334 		nr_freepages++;
335 
336 	cc->nr_migratepages = nr_migratepages;
337 	cc->nr_freepages = nr_freepages;
338 }
339 
340 static int compact_finished(struct zone *zone,
341 						struct compact_control *cc)
342 {
343 	if (fatal_signal_pending(current))
344 		return COMPACT_PARTIAL;
345 
346 	/* Compaction run completes if the migrate and free scanner meet */
347 	if (cc->free_pfn <= cc->migrate_pfn)
348 		return COMPACT_COMPLETE;
349 
350 	return COMPACT_CONTINUE;
351 }
352 
353 static int compact_zone(struct zone *zone, struct compact_control *cc)
354 {
355 	int ret;
356 
357 	/* Setup to move all movable pages to the end of the zone */
358 	cc->migrate_pfn = zone->zone_start_pfn;
359 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
360 	cc->free_pfn &= ~(pageblock_nr_pages-1);
361 
362 	migrate_prep_local();
363 
364 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
365 		unsigned long nr_migrate, nr_remaining;
366 
367 		if (!isolate_migratepages(zone, cc))
368 			continue;
369 
370 		nr_migrate = cc->nr_migratepages;
371 		migrate_pages(&cc->migratepages, compaction_alloc,
372 						(unsigned long)cc, 0);
373 		update_nr_listpages(cc);
374 		nr_remaining = cc->nr_migratepages;
375 
376 		count_vm_event(COMPACTBLOCKS);
377 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
378 		if (nr_remaining)
379 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
380 
381 		/* Release LRU pages not migrated */
382 		if (!list_empty(&cc->migratepages)) {
383 			putback_lru_pages(&cc->migratepages);
384 			cc->nr_migratepages = 0;
385 		}
386 
387 	}
388 
389 	/* Release free pages and check accounting */
390 	cc->nr_freepages -= release_freepages(&cc->freepages);
391 	VM_BUG_ON(cc->nr_freepages != 0);
392 
393 	return ret;
394 }
395 
396 /* Compact all zones within a node */
397 static int compact_node(int nid)
398 {
399 	int zoneid;
400 	pg_data_t *pgdat;
401 	struct zone *zone;
402 
403 	if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
404 		return -EINVAL;
405 	pgdat = NODE_DATA(nid);
406 
407 	/* Flush pending updates to the LRU lists */
408 	lru_add_drain_all();
409 
410 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
411 		struct compact_control cc = {
412 			.nr_freepages = 0,
413 			.nr_migratepages = 0,
414 		};
415 
416 		zone = &pgdat->node_zones[zoneid];
417 		if (!populated_zone(zone))
418 			continue;
419 
420 		cc.zone = zone;
421 		INIT_LIST_HEAD(&cc.freepages);
422 		INIT_LIST_HEAD(&cc.migratepages);
423 
424 		compact_zone(zone, &cc);
425 
426 		VM_BUG_ON(!list_empty(&cc.freepages));
427 		VM_BUG_ON(!list_empty(&cc.migratepages));
428 	}
429 
430 	return 0;
431 }
432 
433 /* Compact all nodes in the system */
434 static int compact_nodes(void)
435 {
436 	int nid;
437 
438 	for_each_online_node(nid)
439 		compact_node(nid);
440 
441 	return COMPACT_COMPLETE;
442 }
443 
444 /* The written value is actually unused, all memory is compacted */
445 int sysctl_compact_memory;
446 
447 /* This is the entry point for compacting all nodes via /proc/sys/vm */
448 int sysctl_compaction_handler(struct ctl_table *table, int write,
449 			void __user *buffer, size_t *length, loff_t *ppos)
450 {
451 	if (write)
452 		return compact_nodes();
453 
454 	return 0;
455 }
456