xref: /openbmc/linux/mm/migrate.c (revision 4c28f811)
1 /*
2  * Memory Migration functionality - linux/mm/migration.c
3  *
4  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5  *
6  * Page migration was first developed in the context of the memory hotplug
7  * project. The main authors of the migration code are:
8  *
9  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10  * Hirokazu Takahashi <taka@valinux.co.jp>
11  * Dave Hansen <haveblue@us.ibm.com>
12  * Christoph Lameter <clameter@sgi.com>
13  */
14 
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/buffer_head.h>
20 #include <linux/mm_inline.h>
21 #include <linux/pagevec.h>
22 #include <linux/rmap.h>
23 #include <linux/topology.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuset.h>
26 #include <linux/swapops.h>
27 
28 #include "internal.h"
29 
30 /* The maximum number of pages to take off the LRU for migration */
31 #define MIGRATE_CHUNK_SIZE 256
32 
33 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
34 
35 /*
36  * Isolate one page from the LRU lists. If successful put it onto
37  * the indicated list with elevated page count.
38  *
39  * Result:
40  *  -EBUSY: page not on LRU list
41  *  0: page removed from LRU list and added to the specified list.
42  */
43 int isolate_lru_page(struct page *page, struct list_head *pagelist)
44 {
45 	int ret = -EBUSY;
46 
47 	if (PageLRU(page)) {
48 		struct zone *zone = page_zone(page);
49 
50 		spin_lock_irq(&zone->lru_lock);
51 		if (PageLRU(page)) {
52 			ret = 0;
53 			get_page(page);
54 			ClearPageLRU(page);
55 			if (PageActive(page))
56 				del_page_from_active_list(zone, page);
57 			else
58 				del_page_from_inactive_list(zone, page);
59 			list_add_tail(&page->lru, pagelist);
60 		}
61 		spin_unlock_irq(&zone->lru_lock);
62 	}
63 	return ret;
64 }
65 
66 /*
67  * migrate_prep() needs to be called after we have compiled the list of pages
68  * to be migrated using isolate_lru_page() but before we begin a series of calls
69  * to migrate_pages().
70  */
71 int migrate_prep(void)
72 {
73 	/* Must have swap device for migration */
74 	if (nr_swap_pages <= 0)
75 		return -ENODEV;
76 
77 	/*
78 	 * Clear the LRU lists so pages can be isolated.
79 	 * Note that pages may be moved off the LRU after we have
80 	 * drained them. Those pages will fail to migrate like other
81 	 * pages that may be busy.
82 	 */
83 	lru_add_drain_all();
84 
85 	return 0;
86 }
87 
88 static inline void move_to_lru(struct page *page)
89 {
90 	list_del(&page->lru);
91 	if (PageActive(page)) {
92 		/*
93 		 * lru_cache_add_active checks that
94 		 * the PG_active bit is off.
95 		 */
96 		ClearPageActive(page);
97 		lru_cache_add_active(page);
98 	} else {
99 		lru_cache_add(page);
100 	}
101 	put_page(page);
102 }
103 
104 /*
105  * Add isolated pages on the list back to the LRU.
106  *
107  * returns the number of pages put back.
108  */
109 int putback_lru_pages(struct list_head *l)
110 {
111 	struct page *page;
112 	struct page *page2;
113 	int count = 0;
114 
115 	list_for_each_entry_safe(page, page2, l, lru) {
116 		move_to_lru(page);
117 		count++;
118 	}
119 	return count;
120 }
121 
122 /*
123  * Non migratable page
124  */
125 int fail_migrate_page(struct page *newpage, struct page *page)
126 {
127 	return -EIO;
128 }
129 EXPORT_SYMBOL(fail_migrate_page);
130 
131 /*
132  * swapout a single page
133  * page is locked upon entry, unlocked on exit
134  */
135 static int swap_page(struct page *page)
136 {
137 	struct address_space *mapping = page_mapping(page);
138 
139 	if (page_mapped(page) && mapping)
140 		if (try_to_unmap(page, 1) != SWAP_SUCCESS)
141 			goto unlock_retry;
142 
143 	if (PageDirty(page)) {
144 		/* Page is dirty, try to write it out here */
145 		switch(pageout(page, mapping)) {
146 		case PAGE_KEEP:
147 		case PAGE_ACTIVATE:
148 			goto unlock_retry;
149 
150 		case PAGE_SUCCESS:
151 			goto retry;
152 
153 		case PAGE_CLEAN:
154 			; /* try to free the page below */
155 		}
156 	}
157 
158 	if (PagePrivate(page)) {
159 		if (!try_to_release_page(page, GFP_KERNEL) ||
160 		    (!mapping && page_count(page) == 1))
161 			goto unlock_retry;
162 	}
163 
164 	if (remove_mapping(mapping, page)) {
165 		/* Success */
166 		unlock_page(page);
167 		return 0;
168 	}
169 
170 unlock_retry:
171 	unlock_page(page);
172 
173 retry:
174 	return -EAGAIN;
175 }
176 
177 /*
178  * Remove references for a page and establish the new page with the correct
179  * basic settings to be able to stop accesses to the page.
180  */
181 int migrate_page_remove_references(struct page *newpage,
182 				struct page *page, int nr_refs)
183 {
184 	struct address_space *mapping = page_mapping(page);
185 	struct page **radix_pointer;
186 
187 	/*
188 	 * Avoid doing any of the following work if the page count
189 	 * indicates that the page is in use or truncate has removed
190 	 * the page.
191 	 */
192 	if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
193 		return -EAGAIN;
194 
195 	/*
196 	 * Establish swap ptes for anonymous pages or destroy pte
197 	 * maps for files.
198 	 *
199 	 * In order to reestablish file backed mappings the fault handlers
200 	 * will take the radix tree_lock which may then be used to stop
201   	 * processses from accessing this page until the new page is ready.
202 	 *
203 	 * A process accessing via a swap pte (an anonymous page) will take a
204 	 * page_lock on the old page which will block the process until the
205 	 * migration attempt is complete. At that time the PageSwapCache bit
206 	 * will be examined. If the page was migrated then the PageSwapCache
207 	 * bit will be clear and the operation to retrieve the page will be
208 	 * retried which will find the new page in the radix tree. Then a new
209 	 * direct mapping may be generated based on the radix tree contents.
210 	 *
211 	 * If the page was not migrated then the PageSwapCache bit
212 	 * is still set and the operation may continue.
213 	 */
214 	if (try_to_unmap(page, 1) == SWAP_FAIL)
215 		/* A vma has VM_LOCKED set -> permanent failure */
216 		return -EPERM;
217 
218 	/*
219 	 * Give up if we were unable to remove all mappings.
220 	 */
221 	if (page_mapcount(page))
222 		return -EAGAIN;
223 
224 	write_lock_irq(&mapping->tree_lock);
225 
226 	radix_pointer = (struct page **)radix_tree_lookup_slot(
227 						&mapping->page_tree,
228 						page_index(page));
229 
230 	if (!page_mapping(page) || page_count(page) != nr_refs ||
231 			*radix_pointer != page) {
232 		write_unlock_irq(&mapping->tree_lock);
233 		return -EAGAIN;
234 	}
235 
236 	/*
237 	 * Now we know that no one else is looking at the page.
238 	 *
239 	 * Certain minimal information about a page must be available
240 	 * in order for other subsystems to properly handle the page if they
241 	 * find it through the radix tree update before we are finished
242 	 * copying the page.
243 	 */
244 	get_page(newpage);
245 	newpage->index = page->index;
246 	newpage->mapping = page->mapping;
247 	if (PageSwapCache(page)) {
248 		SetPageSwapCache(newpage);
249 		set_page_private(newpage, page_private(page));
250 	}
251 
252 	*radix_pointer = newpage;
253 	__put_page(page);
254 	write_unlock_irq(&mapping->tree_lock);
255 
256 	return 0;
257 }
258 EXPORT_SYMBOL(migrate_page_remove_references);
259 
260 /*
261  * Copy the page to its new location
262  */
263 void migrate_page_copy(struct page *newpage, struct page *page)
264 {
265 	copy_highpage(newpage, page);
266 
267 	if (PageError(page))
268 		SetPageError(newpage);
269 	if (PageReferenced(page))
270 		SetPageReferenced(newpage);
271 	if (PageUptodate(page))
272 		SetPageUptodate(newpage);
273 	if (PageActive(page))
274 		SetPageActive(newpage);
275 	if (PageChecked(page))
276 		SetPageChecked(newpage);
277 	if (PageMappedToDisk(page))
278 		SetPageMappedToDisk(newpage);
279 
280 	if (PageDirty(page)) {
281 		clear_page_dirty_for_io(page);
282 		set_page_dirty(newpage);
283  	}
284 
285 	ClearPageSwapCache(page);
286 	ClearPageActive(page);
287 	ClearPagePrivate(page);
288 	set_page_private(page, 0);
289 	page->mapping = NULL;
290 
291 	/*
292 	 * If any waiters have accumulated on the new page then
293 	 * wake them up.
294 	 */
295 	if (PageWriteback(newpage))
296 		end_page_writeback(newpage);
297 }
298 EXPORT_SYMBOL(migrate_page_copy);
299 
300 /*
301  * Common logic to directly migrate a single page suitable for
302  * pages that do not use PagePrivate.
303  *
304  * Pages are locked upon entry and exit.
305  */
306 int migrate_page(struct page *newpage, struct page *page)
307 {
308 	int rc;
309 
310 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
311 
312 	rc = migrate_page_remove_references(newpage, page, 2);
313 
314 	if (rc)
315 		return rc;
316 
317 	migrate_page_copy(newpage, page);
318 
319 	/*
320 	 * Remove auxiliary swap entries and replace
321 	 * them with real ptes.
322 	 *
323 	 * Note that a real pte entry will allow processes that are not
324 	 * waiting on the page lock to use the new page via the page tables
325 	 * before the new page is unlocked.
326 	 */
327 	remove_from_swap(newpage);
328 	return 0;
329 }
330 EXPORT_SYMBOL(migrate_page);
331 
332 /*
333  * migrate_pages
334  *
335  * Two lists are passed to this function. The first list
336  * contains the pages isolated from the LRU to be migrated.
337  * The second list contains new pages that the pages isolated
338  * can be moved to. If the second list is NULL then all
339  * pages are swapped out.
340  *
341  * The function returns after 10 attempts or if no pages
342  * are movable anymore because to has become empty
343  * or no retryable pages exist anymore.
344  *
345  * Return: Number of pages not migrated when "to" ran empty.
346  */
347 int migrate_pages(struct list_head *from, struct list_head *to,
348 		  struct list_head *moved, struct list_head *failed)
349 {
350 	int retry;
351 	int nr_failed = 0;
352 	int pass = 0;
353 	struct page *page;
354 	struct page *page2;
355 	int swapwrite = current->flags & PF_SWAPWRITE;
356 	int rc;
357 
358 	if (!swapwrite)
359 		current->flags |= PF_SWAPWRITE;
360 
361 redo:
362 	retry = 0;
363 
364 	list_for_each_entry_safe(page, page2, from, lru) {
365 		struct page *newpage = NULL;
366 		struct address_space *mapping;
367 
368 		cond_resched();
369 
370 		rc = 0;
371 		if (page_count(page) == 1)
372 			/* page was freed from under us. So we are done. */
373 			goto next;
374 
375 		if (to && list_empty(to))
376 			break;
377 
378 		/*
379 		 * Skip locked pages during the first two passes to give the
380 		 * functions holding the lock time to release the page. Later we
381 		 * use lock_page() to have a higher chance of acquiring the
382 		 * lock.
383 		 */
384 		rc = -EAGAIN;
385 		if (pass > 2)
386 			lock_page(page);
387 		else
388 			if (TestSetPageLocked(page))
389 				goto next;
390 
391 		/*
392 		 * Only wait on writeback if we have already done a pass where
393 		 * we we may have triggered writeouts for lots of pages.
394 		 */
395 		if (pass > 0) {
396 			wait_on_page_writeback(page);
397 		} else {
398 			if (PageWriteback(page))
399 				goto unlock_page;
400 		}
401 
402 		/*
403 		 * Anonymous pages must have swap cache references otherwise
404 		 * the information contained in the page maps cannot be
405 		 * preserved.
406 		 */
407 		if (PageAnon(page) && !PageSwapCache(page)) {
408 			if (!add_to_swap(page, GFP_KERNEL)) {
409 				rc = -ENOMEM;
410 				goto unlock_page;
411 			}
412 		}
413 
414 		if (!to) {
415 			rc = swap_page(page);
416 			goto next;
417 		}
418 
419 		newpage = lru_to_page(to);
420 		lock_page(newpage);
421 
422 		/*
423 		 * Pages are properly locked and writeback is complete.
424 		 * Try to migrate the page.
425 		 */
426 		mapping = page_mapping(page);
427 		if (!mapping)
428 			goto unlock_both;
429 
430 		if (mapping->a_ops->migratepage) {
431 			/*
432 			 * Most pages have a mapping and most filesystems
433 			 * should provide a migration function. Anonymous
434 			 * pages are part of swap space which also has its
435 			 * own migration function. This is the most common
436 			 * path for page migration.
437 			 */
438 			rc = mapping->a_ops->migratepage(newpage, page);
439 			goto unlock_both;
440                 }
441 
442 		/* Make sure the dirty bit is up to date */
443 		if (try_to_unmap(page, 1) == SWAP_FAIL) {
444 			rc = -EPERM;
445 			goto unlock_both;
446 		}
447 
448 		if (page_mapcount(page)) {
449 			rc = -EAGAIN;
450 			goto unlock_both;
451 		}
452 
453 		/*
454 		 * Default handling if a filesystem does not provide
455 		 * a migration function. We can only migrate clean
456 		 * pages so try to write out any dirty pages first.
457 		 */
458 		if (PageDirty(page)) {
459 			switch (pageout(page, mapping)) {
460 			case PAGE_KEEP:
461 			case PAGE_ACTIVATE:
462 				goto unlock_both;
463 
464 			case PAGE_SUCCESS:
465 				unlock_page(newpage);
466 				goto next;
467 
468 			case PAGE_CLEAN:
469 				; /* try to migrate the page below */
470 			}
471                 }
472 
473 		/*
474 		 * Buffers are managed in a filesystem specific way.
475 		 * We must have no buffers or drop them.
476 		 */
477 		if (!page_has_buffers(page) ||
478 		    try_to_release_page(page, GFP_KERNEL)) {
479 			rc = migrate_page(newpage, page);
480 			goto unlock_both;
481 		}
482 
483 		/*
484 		 * On early passes with mapped pages simply
485 		 * retry. There may be a lock held for some
486 		 * buffers that may go away. Later
487 		 * swap them out.
488 		 */
489 		if (pass > 4) {
490 			/*
491 			 * Persistently unable to drop buffers..... As a
492 			 * measure of last resort we fall back to
493 			 * swap_page().
494 			 */
495 			unlock_page(newpage);
496 			newpage = NULL;
497 			rc = swap_page(page);
498 			goto next;
499 		}
500 
501 unlock_both:
502 		unlock_page(newpage);
503 
504 unlock_page:
505 		unlock_page(page);
506 
507 next:
508 		if (rc == -EAGAIN) {
509 			retry++;
510 		} else if (rc) {
511 			/* Permanent failure */
512 			list_move(&page->lru, failed);
513 			nr_failed++;
514 		} else {
515 			if (newpage) {
516 				/* Successful migration. Return page to LRU */
517 				move_to_lru(newpage);
518 			}
519 			list_move(&page->lru, moved);
520 		}
521 	}
522 	if (retry && pass++ < 10)
523 		goto redo;
524 
525 	if (!swapwrite)
526 		current->flags &= ~PF_SWAPWRITE;
527 
528 	return nr_failed + retry;
529 }
530 
531 /*
532  * Migration function for pages with buffers. This function can only be used
533  * if the underlying filesystem guarantees that no other references to "page"
534  * exist.
535  */
536 int buffer_migrate_page(struct page *newpage, struct page *page)
537 {
538 	struct address_space *mapping = page->mapping;
539 	struct buffer_head *bh, *head;
540 	int rc;
541 
542 	if (!mapping)
543 		return -EAGAIN;
544 
545 	if (!page_has_buffers(page))
546 		return migrate_page(newpage, page);
547 
548 	head = page_buffers(page);
549 
550 	rc = migrate_page_remove_references(newpage, page, 3);
551 
552 	if (rc)
553 		return rc;
554 
555 	bh = head;
556 	do {
557 		get_bh(bh);
558 		lock_buffer(bh);
559 		bh = bh->b_this_page;
560 
561 	} while (bh != head);
562 
563 	ClearPagePrivate(page);
564 	set_page_private(newpage, page_private(page));
565 	set_page_private(page, 0);
566 	put_page(page);
567 	get_page(newpage);
568 
569 	bh = head;
570 	do {
571 		set_bh_page(bh, newpage, bh_offset(bh));
572 		bh = bh->b_this_page;
573 
574 	} while (bh != head);
575 
576 	SetPagePrivate(newpage);
577 
578 	migrate_page_copy(newpage, page);
579 
580 	bh = head;
581 	do {
582 		unlock_buffer(bh);
583  		put_bh(bh);
584 		bh = bh->b_this_page;
585 
586 	} while (bh != head);
587 
588 	return 0;
589 }
590 EXPORT_SYMBOL(buffer_migrate_page);
591 
592 /*
593  * Migrate the list 'pagelist' of pages to a certain destination.
594  *
595  * Specify destination with either non-NULL vma or dest_node >= 0
596  * Return the number of pages not migrated or error code
597  */
598 int migrate_pages_to(struct list_head *pagelist,
599 			struct vm_area_struct *vma, int dest)
600 {
601 	LIST_HEAD(newlist);
602 	LIST_HEAD(moved);
603 	LIST_HEAD(failed);
604 	int err = 0;
605 	unsigned long offset = 0;
606 	int nr_pages;
607 	struct page *page;
608 	struct list_head *p;
609 
610 redo:
611 	nr_pages = 0;
612 	list_for_each(p, pagelist) {
613 		if (vma) {
614 			/*
615 			 * The address passed to alloc_page_vma is used to
616 			 * generate the proper interleave behavior. We fake
617 			 * the address here by an increasing offset in order
618 			 * to get the proper distribution of pages.
619 			 *
620 			 * No decision has been made as to which page
621 			 * a certain old page is moved to so we cannot
622 			 * specify the correct address.
623 			 */
624 			page = alloc_page_vma(GFP_HIGHUSER, vma,
625 					offset + vma->vm_start);
626 			offset += PAGE_SIZE;
627 		}
628 		else
629 			page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
630 
631 		if (!page) {
632 			err = -ENOMEM;
633 			goto out;
634 		}
635 		list_add_tail(&page->lru, &newlist);
636 		nr_pages++;
637 		if (nr_pages > MIGRATE_CHUNK_SIZE)
638 			break;
639 	}
640 	err = migrate_pages(pagelist, &newlist, &moved, &failed);
641 
642 	putback_lru_pages(&moved);	/* Call release pages instead ?? */
643 
644 	if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
645 		goto redo;
646 out:
647 	/* Return leftover allocated pages */
648 	while (!list_empty(&newlist)) {
649 		page = list_entry(newlist.next, struct page, lru);
650 		list_del(&page->lru);
651 		__free_page(page);
652 	}
653 	list_splice(&failed, pagelist);
654 	if (err < 0)
655 		return err;
656 
657 	/* Calculate number of leftover pages */
658 	nr_pages = 0;
659 	list_for_each(p, pagelist)
660 		nr_pages++;
661 	return nr_pages;
662 }
663