xref: /openbmc/linux/mm/migrate.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Memory Migration functionality - linux/mm/migration.c
3  *
4  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5  *
6  * Page migration was first developed in the context of the memory hotplug
7  * project. The main authors of the migration code are:
8  *
9  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10  * Hirokazu Takahashi <taka@valinux.co.jp>
11  * Dave Hansen <haveblue@us.ibm.com>
12  * Christoph Lameter
13  */
14 
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/memcontrol.h>
34 #include <linux/syscalls.h>
35 #include <linux/hugetlb.h>
36 #include <linux/gfp.h>
37 
38 #include <asm/tlbflush.h>
39 
40 #include "internal.h"
41 
42 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
43 
44 /*
45  * migrate_prep() needs to be called before we start compiling a list of pages
46  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
47  * undesirable, use migrate_prep_local()
48  */
49 int migrate_prep(void)
50 {
51 	/*
52 	 * Clear the LRU lists so pages can be isolated.
53 	 * Note that pages may be moved off the LRU after we have
54 	 * drained them. Those pages will fail to migrate like other
55 	 * pages that may be busy.
56 	 */
57 	lru_add_drain_all();
58 
59 	return 0;
60 }
61 
62 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
63 int migrate_prep_local(void)
64 {
65 	lru_add_drain();
66 
67 	return 0;
68 }
69 
70 /*
71  * Add isolated pages on the list back to the LRU under page lock
72  * to avoid leaking evictable pages back onto unevictable list.
73  */
74 void putback_lru_pages(struct list_head *l)
75 {
76 	struct page *page;
77 	struct page *page2;
78 
79 	list_for_each_entry_safe(page, page2, l, lru) {
80 		list_del(&page->lru);
81 		dec_zone_page_state(page, NR_ISOLATED_ANON +
82 				page_is_file_cache(page));
83 		putback_lru_page(page);
84 	}
85 }
86 
87 /*
88  * Restore a potential migration pte to a working pte entry
89  */
90 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
91 				 unsigned long addr, void *old)
92 {
93 	struct mm_struct *mm = vma->vm_mm;
94 	swp_entry_t entry;
95  	pgd_t *pgd;
96  	pud_t *pud;
97  	pmd_t *pmd;
98 	pte_t *ptep, pte;
99  	spinlock_t *ptl;
100 
101 	if (unlikely(PageHuge(new))) {
102 		ptep = huge_pte_offset(mm, addr);
103 		if (!ptep)
104 			goto out;
105 		ptl = &mm->page_table_lock;
106 	} else {
107 		pgd = pgd_offset(mm, addr);
108 		if (!pgd_present(*pgd))
109 			goto out;
110 
111 		pud = pud_offset(pgd, addr);
112 		if (!pud_present(*pud))
113 			goto out;
114 
115 		pmd = pmd_offset(pud, addr);
116 		if (!pmd_present(*pmd))
117 			goto out;
118 
119 		ptep = pte_offset_map(pmd, addr);
120 
121 		if (!is_swap_pte(*ptep)) {
122 			pte_unmap(ptep);
123 			goto out;
124 		}
125 
126 		ptl = pte_lockptr(mm, pmd);
127 	}
128 
129  	spin_lock(ptl);
130 	pte = *ptep;
131 	if (!is_swap_pte(pte))
132 		goto unlock;
133 
134 	entry = pte_to_swp_entry(pte);
135 
136 	if (!is_migration_entry(entry) ||
137 	    migration_entry_to_page(entry) != old)
138 		goto unlock;
139 
140 	get_page(new);
141 	pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
142 	if (is_write_migration_entry(entry))
143 		pte = pte_mkwrite(pte);
144 #ifdef CONFIG_HUGETLB_PAGE
145 	if (PageHuge(new))
146 		pte = pte_mkhuge(pte);
147 #endif
148 	flush_cache_page(vma, addr, pte_pfn(pte));
149 	set_pte_at(mm, addr, ptep, pte);
150 
151 	if (PageHuge(new)) {
152 		if (PageAnon(new))
153 			hugepage_add_anon_rmap(new, vma, addr);
154 		else
155 			page_dup_rmap(new);
156 	} else if (PageAnon(new))
157 		page_add_anon_rmap(new, vma, addr);
158 	else
159 		page_add_file_rmap(new);
160 
161 	/* No need to invalidate - it was non-present before */
162 	update_mmu_cache(vma, addr, ptep);
163 unlock:
164 	pte_unmap_unlock(ptep, ptl);
165 out:
166 	return SWAP_AGAIN;
167 }
168 
169 /*
170  * Get rid of all migration entries and replace them by
171  * references to the indicated page.
172  */
173 static void remove_migration_ptes(struct page *old, struct page *new)
174 {
175 	rmap_walk(new, remove_migration_pte, old);
176 }
177 
178 /*
179  * Something used the pte of a page under migration. We need to
180  * get to the page and wait until migration is finished.
181  * When we return from this function the fault will be retried.
182  *
183  * This function is called from do_swap_page().
184  */
185 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
186 				unsigned long address)
187 {
188 	pte_t *ptep, pte;
189 	spinlock_t *ptl;
190 	swp_entry_t entry;
191 	struct page *page;
192 
193 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
194 	pte = *ptep;
195 	if (!is_swap_pte(pte))
196 		goto out;
197 
198 	entry = pte_to_swp_entry(pte);
199 	if (!is_migration_entry(entry))
200 		goto out;
201 
202 	page = migration_entry_to_page(entry);
203 
204 	/*
205 	 * Once radix-tree replacement of page migration started, page_count
206 	 * *must* be zero. And, we don't want to call wait_on_page_locked()
207 	 * against a page without get_page().
208 	 * So, we use get_page_unless_zero(), here. Even failed, page fault
209 	 * will occur again.
210 	 */
211 	if (!get_page_unless_zero(page))
212 		goto out;
213 	pte_unmap_unlock(ptep, ptl);
214 	wait_on_page_locked(page);
215 	put_page(page);
216 	return;
217 out:
218 	pte_unmap_unlock(ptep, ptl);
219 }
220 
221 /*
222  * Replace the page in the mapping.
223  *
224  * The number of remaining references must be:
225  * 1 for anonymous pages without a mapping
226  * 2 for pages with a mapping
227  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
228  */
229 static int migrate_page_move_mapping(struct address_space *mapping,
230 		struct page *newpage, struct page *page)
231 {
232 	int expected_count;
233 	void **pslot;
234 
235 	if (!mapping) {
236 		/* Anonymous page without mapping */
237 		if (page_count(page) != 1)
238 			return -EAGAIN;
239 		return 0;
240 	}
241 
242 	spin_lock_irq(&mapping->tree_lock);
243 
244 	pslot = radix_tree_lookup_slot(&mapping->page_tree,
245  					page_index(page));
246 
247 	expected_count = 2 + page_has_private(page);
248 	if (page_count(page) != expected_count ||
249 			(struct page *)radix_tree_deref_slot(pslot) != page) {
250 		spin_unlock_irq(&mapping->tree_lock);
251 		return -EAGAIN;
252 	}
253 
254 	if (!page_freeze_refs(page, expected_count)) {
255 		spin_unlock_irq(&mapping->tree_lock);
256 		return -EAGAIN;
257 	}
258 
259 	/*
260 	 * Now we know that no one else is looking at the page.
261 	 */
262 	get_page(newpage);	/* add cache reference */
263 	if (PageSwapCache(page)) {
264 		SetPageSwapCache(newpage);
265 		set_page_private(newpage, page_private(page));
266 	}
267 
268 	radix_tree_replace_slot(pslot, newpage);
269 
270 	page_unfreeze_refs(page, expected_count);
271 	/*
272 	 * Drop cache reference from old page.
273 	 * We know this isn't the last reference.
274 	 */
275 	__put_page(page);
276 
277 	/*
278 	 * If moved to a different zone then also account
279 	 * the page for that zone. Other VM counters will be
280 	 * taken care of when we establish references to the
281 	 * new page and drop references to the old page.
282 	 *
283 	 * Note that anonymous pages are accounted for
284 	 * via NR_FILE_PAGES and NR_ANON_PAGES if they
285 	 * are mapped to swap space.
286 	 */
287 	__dec_zone_page_state(page, NR_FILE_PAGES);
288 	__inc_zone_page_state(newpage, NR_FILE_PAGES);
289 	if (PageSwapBacked(page)) {
290 		__dec_zone_page_state(page, NR_SHMEM);
291 		__inc_zone_page_state(newpage, NR_SHMEM);
292 	}
293 	spin_unlock_irq(&mapping->tree_lock);
294 
295 	return 0;
296 }
297 
298 /*
299  * The expected number of remaining references is the same as that
300  * of migrate_page_move_mapping().
301  */
302 int migrate_huge_page_move_mapping(struct address_space *mapping,
303 				   struct page *newpage, struct page *page)
304 {
305 	int expected_count;
306 	void **pslot;
307 
308 	if (!mapping) {
309 		if (page_count(page) != 1)
310 			return -EAGAIN;
311 		return 0;
312 	}
313 
314 	spin_lock_irq(&mapping->tree_lock);
315 
316 	pslot = radix_tree_lookup_slot(&mapping->page_tree,
317 					page_index(page));
318 
319 	expected_count = 2 + page_has_private(page);
320 	if (page_count(page) != expected_count ||
321 	    (struct page *)radix_tree_deref_slot(pslot) != page) {
322 		spin_unlock_irq(&mapping->tree_lock);
323 		return -EAGAIN;
324 	}
325 
326 	if (!page_freeze_refs(page, expected_count)) {
327 		spin_unlock_irq(&mapping->tree_lock);
328 		return -EAGAIN;
329 	}
330 
331 	get_page(newpage);
332 
333 	radix_tree_replace_slot(pslot, newpage);
334 
335 	page_unfreeze_refs(page, expected_count);
336 
337 	__put_page(page);
338 
339 	spin_unlock_irq(&mapping->tree_lock);
340 	return 0;
341 }
342 
343 /*
344  * Copy the page to its new location
345  */
346 void migrate_page_copy(struct page *newpage, struct page *page)
347 {
348 	if (PageHuge(page))
349 		copy_huge_page(newpage, page);
350 	else
351 		copy_highpage(newpage, page);
352 
353 	if (PageError(page))
354 		SetPageError(newpage);
355 	if (PageReferenced(page))
356 		SetPageReferenced(newpage);
357 	if (PageUptodate(page))
358 		SetPageUptodate(newpage);
359 	if (TestClearPageActive(page)) {
360 		VM_BUG_ON(PageUnevictable(page));
361 		SetPageActive(newpage);
362 	} else if (TestClearPageUnevictable(page))
363 		SetPageUnevictable(newpage);
364 	if (PageChecked(page))
365 		SetPageChecked(newpage);
366 	if (PageMappedToDisk(page))
367 		SetPageMappedToDisk(newpage);
368 
369 	if (PageDirty(page)) {
370 		clear_page_dirty_for_io(page);
371 		/*
372 		 * Want to mark the page and the radix tree as dirty, and
373 		 * redo the accounting that clear_page_dirty_for_io undid,
374 		 * but we can't use set_page_dirty because that function
375 		 * is actually a signal that all of the page has become dirty.
376 		 * Wheras only part of our page may be dirty.
377 		 */
378 		__set_page_dirty_nobuffers(newpage);
379  	}
380 
381 	mlock_migrate_page(newpage, page);
382 	ksm_migrate_page(newpage, page);
383 
384 	ClearPageSwapCache(page);
385 	ClearPagePrivate(page);
386 	set_page_private(page, 0);
387 	page->mapping = NULL;
388 
389 	/*
390 	 * If any waiters have accumulated on the new page then
391 	 * wake them up.
392 	 */
393 	if (PageWriteback(newpage))
394 		end_page_writeback(newpage);
395 }
396 
397 /************************************************************
398  *                    Migration functions
399  ***********************************************************/
400 
401 /* Always fail migration. Used for mappings that are not movable */
402 int fail_migrate_page(struct address_space *mapping,
403 			struct page *newpage, struct page *page)
404 {
405 	return -EIO;
406 }
407 EXPORT_SYMBOL(fail_migrate_page);
408 
409 /*
410  * Common logic to directly migrate a single page suitable for
411  * pages that do not use PagePrivate/PagePrivate2.
412  *
413  * Pages are locked upon entry and exit.
414  */
415 int migrate_page(struct address_space *mapping,
416 		struct page *newpage, struct page *page)
417 {
418 	int rc;
419 
420 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
421 
422 	rc = migrate_page_move_mapping(mapping, newpage, page);
423 
424 	if (rc)
425 		return rc;
426 
427 	migrate_page_copy(newpage, page);
428 	return 0;
429 }
430 EXPORT_SYMBOL(migrate_page);
431 
432 #ifdef CONFIG_BLOCK
433 /*
434  * Migration function for pages with buffers. This function can only be used
435  * if the underlying filesystem guarantees that no other references to "page"
436  * exist.
437  */
438 int buffer_migrate_page(struct address_space *mapping,
439 		struct page *newpage, struct page *page)
440 {
441 	struct buffer_head *bh, *head;
442 	int rc;
443 
444 	if (!page_has_buffers(page))
445 		return migrate_page(mapping, newpage, page);
446 
447 	head = page_buffers(page);
448 
449 	rc = migrate_page_move_mapping(mapping, newpage, page);
450 
451 	if (rc)
452 		return rc;
453 
454 	bh = head;
455 	do {
456 		get_bh(bh);
457 		lock_buffer(bh);
458 		bh = bh->b_this_page;
459 
460 	} while (bh != head);
461 
462 	ClearPagePrivate(page);
463 	set_page_private(newpage, page_private(page));
464 	set_page_private(page, 0);
465 	put_page(page);
466 	get_page(newpage);
467 
468 	bh = head;
469 	do {
470 		set_bh_page(bh, newpage, bh_offset(bh));
471 		bh = bh->b_this_page;
472 
473 	} while (bh != head);
474 
475 	SetPagePrivate(newpage);
476 
477 	migrate_page_copy(newpage, page);
478 
479 	bh = head;
480 	do {
481 		unlock_buffer(bh);
482  		put_bh(bh);
483 		bh = bh->b_this_page;
484 
485 	} while (bh != head);
486 
487 	return 0;
488 }
489 EXPORT_SYMBOL(buffer_migrate_page);
490 #endif
491 
492 /*
493  * Writeback a page to clean the dirty state
494  */
495 static int writeout(struct address_space *mapping, struct page *page)
496 {
497 	struct writeback_control wbc = {
498 		.sync_mode = WB_SYNC_NONE,
499 		.nr_to_write = 1,
500 		.range_start = 0,
501 		.range_end = LLONG_MAX,
502 		.for_reclaim = 1
503 	};
504 	int rc;
505 
506 	if (!mapping->a_ops->writepage)
507 		/* No write method for the address space */
508 		return -EINVAL;
509 
510 	if (!clear_page_dirty_for_io(page))
511 		/* Someone else already triggered a write */
512 		return -EAGAIN;
513 
514 	/*
515 	 * A dirty page may imply that the underlying filesystem has
516 	 * the page on some queue. So the page must be clean for
517 	 * migration. Writeout may mean we loose the lock and the
518 	 * page state is no longer what we checked for earlier.
519 	 * At this point we know that the migration attempt cannot
520 	 * be successful.
521 	 */
522 	remove_migration_ptes(page, page);
523 
524 	rc = mapping->a_ops->writepage(page, &wbc);
525 
526 	if (rc != AOP_WRITEPAGE_ACTIVATE)
527 		/* unlocked. Relock */
528 		lock_page(page);
529 
530 	return (rc < 0) ? -EIO : -EAGAIN;
531 }
532 
533 /*
534  * Default handling if a filesystem does not provide a migration function.
535  */
536 static int fallback_migrate_page(struct address_space *mapping,
537 	struct page *newpage, struct page *page)
538 {
539 	if (PageDirty(page))
540 		return writeout(mapping, page);
541 
542 	/*
543 	 * Buffers may be managed in a filesystem specific way.
544 	 * We must have no buffers or drop them.
545 	 */
546 	if (page_has_private(page) &&
547 	    !try_to_release_page(page, GFP_KERNEL))
548 		return -EAGAIN;
549 
550 	return migrate_page(mapping, newpage, page);
551 }
552 
553 /*
554  * Move a page to a newly allocated page
555  * The page is locked and all ptes have been successfully removed.
556  *
557  * The new page will have replaced the old page if this function
558  * is successful.
559  *
560  * Return value:
561  *   < 0 - error code
562  *  == 0 - success
563  */
564 static int move_to_new_page(struct page *newpage, struct page *page,
565 						int remap_swapcache)
566 {
567 	struct address_space *mapping;
568 	int rc;
569 
570 	/*
571 	 * Block others from accessing the page when we get around to
572 	 * establishing additional references. We are the only one
573 	 * holding a reference to the new page at this point.
574 	 */
575 	if (!trylock_page(newpage))
576 		BUG();
577 
578 	/* Prepare mapping for the new page.*/
579 	newpage->index = page->index;
580 	newpage->mapping = page->mapping;
581 	if (PageSwapBacked(page))
582 		SetPageSwapBacked(newpage);
583 
584 	mapping = page_mapping(page);
585 	if (!mapping)
586 		rc = migrate_page(mapping, newpage, page);
587 	else if (mapping->a_ops->migratepage)
588 		/*
589 		 * Most pages have a mapping and most filesystems
590 		 * should provide a migration function. Anonymous
591 		 * pages are part of swap space which also has its
592 		 * own migration function. This is the most common
593 		 * path for page migration.
594 		 */
595 		rc = mapping->a_ops->migratepage(mapping,
596 						newpage, page);
597 	else
598 		rc = fallback_migrate_page(mapping, newpage, page);
599 
600 	if (rc) {
601 		newpage->mapping = NULL;
602 	} else {
603 		if (remap_swapcache)
604 			remove_migration_ptes(page, newpage);
605 	}
606 
607 	unlock_page(newpage);
608 
609 	return rc;
610 }
611 
612 /*
613  * Obtain the lock on page, remove all ptes and migrate the page
614  * to the newly allocated page in newpage.
615  */
616 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
617 			struct page *page, int force, int offlining)
618 {
619 	int rc = 0;
620 	int *result = NULL;
621 	struct page *newpage = get_new_page(page, private, &result);
622 	int remap_swapcache = 1;
623 	int rcu_locked = 0;
624 	int charge = 0;
625 	struct mem_cgroup *mem = NULL;
626 	struct anon_vma *anon_vma = NULL;
627 
628 	if (!newpage)
629 		return -ENOMEM;
630 
631 	if (page_count(page) == 1) {
632 		/* page was freed from under us. So we are done. */
633 		goto move_newpage;
634 	}
635 
636 	/* prepare cgroup just returns 0 or -ENOMEM */
637 	rc = -EAGAIN;
638 
639 	if (!trylock_page(page)) {
640 		if (!force)
641 			goto move_newpage;
642 		lock_page(page);
643 	}
644 
645 	/*
646 	 * Only memory hotplug's offline_pages() caller has locked out KSM,
647 	 * and can safely migrate a KSM page.  The other cases have skipped
648 	 * PageKsm along with PageReserved - but it is only now when we have
649 	 * the page lock that we can be certain it will not go KSM beneath us
650 	 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
651 	 * its pagecount raised, but only here do we take the page lock which
652 	 * serializes that).
653 	 */
654 	if (PageKsm(page) && !offlining) {
655 		rc = -EBUSY;
656 		goto unlock;
657 	}
658 
659 	/* charge against new page */
660 	charge = mem_cgroup_prepare_migration(page, newpage, &mem);
661 	if (charge == -ENOMEM) {
662 		rc = -ENOMEM;
663 		goto unlock;
664 	}
665 	BUG_ON(charge);
666 
667 	if (PageWriteback(page)) {
668 		if (!force)
669 			goto uncharge;
670 		wait_on_page_writeback(page);
671 	}
672 	/*
673 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
674 	 * we cannot notice that anon_vma is freed while we migrates a page.
675 	 * This rcu_read_lock() delays freeing anon_vma pointer until the end
676 	 * of migration. File cache pages are no problem because of page_lock()
677 	 * File Caches may use write_page() or lock_page() in migration, then,
678 	 * just care Anon page here.
679 	 */
680 	if (PageAnon(page)) {
681 		rcu_read_lock();
682 		rcu_locked = 1;
683 
684 		/* Determine how to safely use anon_vma */
685 		if (!page_mapped(page)) {
686 			if (!PageSwapCache(page))
687 				goto rcu_unlock;
688 
689 			/*
690 			 * We cannot be sure that the anon_vma of an unmapped
691 			 * swapcache page is safe to use because we don't
692 			 * know in advance if the VMA that this page belonged
693 			 * to still exists. If the VMA and others sharing the
694 			 * data have been freed, then the anon_vma could
695 			 * already be invalid.
696 			 *
697 			 * To avoid this possibility, swapcache pages get
698 			 * migrated but are not remapped when migration
699 			 * completes
700 			 */
701 			remap_swapcache = 0;
702 		} else {
703 			/*
704 			 * Take a reference count on the anon_vma if the
705 			 * page is mapped so that it is guaranteed to
706 			 * exist when the page is remapped later
707 			 */
708 			anon_vma = page_anon_vma(page);
709 			get_anon_vma(anon_vma);
710 		}
711 	}
712 
713 	/*
714 	 * Corner case handling:
715 	 * 1. When a new swap-cache page is read into, it is added to the LRU
716 	 * and treated as swapcache but it has no rmap yet.
717 	 * Calling try_to_unmap() against a page->mapping==NULL page will
718 	 * trigger a BUG.  So handle it here.
719 	 * 2. An orphaned page (see truncate_complete_page) might have
720 	 * fs-private metadata. The page can be picked up due to memory
721 	 * offlining.  Everywhere else except page reclaim, the page is
722 	 * invisible to the vm, so the page can not be migrated.  So try to
723 	 * free the metadata, so the page can be freed.
724 	 */
725 	if (!page->mapping) {
726 		if (!PageAnon(page) && page_has_private(page)) {
727 			/*
728 			 * Go direct to try_to_free_buffers() here because
729 			 * a) that's what try_to_release_page() would do anyway
730 			 * b) we may be under rcu_read_lock() here, so we can't
731 			 *    use GFP_KERNEL which is what try_to_release_page()
732 			 *    needs to be effective.
733 			 */
734 			try_to_free_buffers(page);
735 			goto rcu_unlock;
736 		}
737 		goto skip_unmap;
738 	}
739 
740 	/* Establish migration ptes or remove ptes */
741 	try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
742 
743 skip_unmap:
744 	if (!page_mapped(page))
745 		rc = move_to_new_page(newpage, page, remap_swapcache);
746 
747 	if (rc && remap_swapcache)
748 		remove_migration_ptes(page, page);
749 rcu_unlock:
750 
751 	/* Drop an anon_vma reference if we took one */
752 	if (anon_vma)
753 		drop_anon_vma(anon_vma);
754 
755 	if (rcu_locked)
756 		rcu_read_unlock();
757 uncharge:
758 	if (!charge)
759 		mem_cgroup_end_migration(mem, page, newpage);
760 unlock:
761 	unlock_page(page);
762 
763 	if (rc != -EAGAIN) {
764  		/*
765  		 * A page that has been migrated has all references
766  		 * removed and will be freed. A page that has not been
767  		 * migrated will have kepts its references and be
768  		 * restored.
769  		 */
770  		list_del(&page->lru);
771 		dec_zone_page_state(page, NR_ISOLATED_ANON +
772 				page_is_file_cache(page));
773 		putback_lru_page(page);
774 	}
775 
776 move_newpage:
777 
778 	/*
779 	 * Move the new page to the LRU. If migration was not successful
780 	 * then this will free the page.
781 	 */
782 	putback_lru_page(newpage);
783 
784 	if (result) {
785 		if (rc)
786 			*result = rc;
787 		else
788 			*result = page_to_nid(newpage);
789 	}
790 	return rc;
791 }
792 
793 /*
794  * Counterpart of unmap_and_move_page() for hugepage migration.
795  *
796  * This function doesn't wait the completion of hugepage I/O
797  * because there is no race between I/O and migration for hugepage.
798  * Note that currently hugepage I/O occurs only in direct I/O
799  * where no lock is held and PG_writeback is irrelevant,
800  * and writeback status of all subpages are counted in the reference
801  * count of the head page (i.e. if all subpages of a 2MB hugepage are
802  * under direct I/O, the reference of the head page is 512 and a bit more.)
803  * This means that when we try to migrate hugepage whose subpages are
804  * doing direct I/O, some references remain after try_to_unmap() and
805  * hugepage migration fails without data corruption.
806  *
807  * There is also no race when direct I/O is issued on the page under migration,
808  * because then pte is replaced with migration swap entry and direct I/O code
809  * will wait in the page fault for migration to complete.
810  */
811 static int unmap_and_move_huge_page(new_page_t get_new_page,
812 				unsigned long private, struct page *hpage,
813 				int force, int offlining)
814 {
815 	int rc = 0;
816 	int *result = NULL;
817 	struct page *new_hpage = get_new_page(hpage, private, &result);
818 	int rcu_locked = 0;
819 	struct anon_vma *anon_vma = NULL;
820 
821 	if (!new_hpage)
822 		return -ENOMEM;
823 
824 	rc = -EAGAIN;
825 
826 	if (!trylock_page(hpage)) {
827 		if (!force)
828 			goto out;
829 		lock_page(hpage);
830 	}
831 
832 	if (PageAnon(hpage)) {
833 		rcu_read_lock();
834 		rcu_locked = 1;
835 
836 		if (page_mapped(hpage)) {
837 			anon_vma = page_anon_vma(hpage);
838 			atomic_inc(&anon_vma->external_refcount);
839 		}
840 	}
841 
842 	try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
843 
844 	if (!page_mapped(hpage))
845 		rc = move_to_new_page(new_hpage, hpage, 1);
846 
847 	if (rc)
848 		remove_migration_ptes(hpage, hpage);
849 
850 	if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount,
851 					    &anon_vma->lock)) {
852 		int empty = list_empty(&anon_vma->head);
853 		spin_unlock(&anon_vma->lock);
854 		if (empty)
855 			anon_vma_free(anon_vma);
856 	}
857 
858 	if (rcu_locked)
859 		rcu_read_unlock();
860 out:
861 	unlock_page(hpage);
862 
863 	if (rc != -EAGAIN) {
864 		list_del(&hpage->lru);
865 		put_page(hpage);
866 	}
867 
868 	put_page(new_hpage);
869 
870 	if (result) {
871 		if (rc)
872 			*result = rc;
873 		else
874 			*result = page_to_nid(new_hpage);
875 	}
876 	return rc;
877 }
878 
879 /*
880  * migrate_pages
881  *
882  * The function takes one list of pages to migrate and a function
883  * that determines from the page to be migrated and the private data
884  * the target of the move and allocates the page.
885  *
886  * The function returns after 10 attempts or if no pages
887  * are movable anymore because to has become empty
888  * or no retryable pages exist anymore.
889  * Caller should call putback_lru_pages to return pages to the LRU
890  * or free list.
891  *
892  * Return: Number of pages not migrated or error code.
893  */
894 int migrate_pages(struct list_head *from,
895 		new_page_t get_new_page, unsigned long private, int offlining)
896 {
897 	int retry = 1;
898 	int nr_failed = 0;
899 	int pass = 0;
900 	struct page *page;
901 	struct page *page2;
902 	int swapwrite = current->flags & PF_SWAPWRITE;
903 	int rc;
904 
905 	if (!swapwrite)
906 		current->flags |= PF_SWAPWRITE;
907 
908 	for(pass = 0; pass < 10 && retry; pass++) {
909 		retry = 0;
910 
911 		list_for_each_entry_safe(page, page2, from, lru) {
912 			cond_resched();
913 
914 			rc = unmap_and_move(get_new_page, private,
915 						page, pass > 2, offlining);
916 
917 			switch(rc) {
918 			case -ENOMEM:
919 				goto out;
920 			case -EAGAIN:
921 				retry++;
922 				break;
923 			case 0:
924 				break;
925 			default:
926 				/* Permanent failure */
927 				nr_failed++;
928 				break;
929 			}
930 		}
931 	}
932 	rc = 0;
933 out:
934 	if (!swapwrite)
935 		current->flags &= ~PF_SWAPWRITE;
936 
937 	if (rc)
938 		return rc;
939 
940 	return nr_failed + retry;
941 }
942 
943 int migrate_huge_pages(struct list_head *from,
944 		new_page_t get_new_page, unsigned long private, int offlining)
945 {
946 	int retry = 1;
947 	int nr_failed = 0;
948 	int pass = 0;
949 	struct page *page;
950 	struct page *page2;
951 	int rc;
952 
953 	for (pass = 0; pass < 10 && retry; pass++) {
954 		retry = 0;
955 
956 		list_for_each_entry_safe(page, page2, from, lru) {
957 			cond_resched();
958 
959 			rc = unmap_and_move_huge_page(get_new_page,
960 					private, page, pass > 2, offlining);
961 
962 			switch(rc) {
963 			case -ENOMEM:
964 				goto out;
965 			case -EAGAIN:
966 				retry++;
967 				break;
968 			case 0:
969 				break;
970 			default:
971 				/* Permanent failure */
972 				nr_failed++;
973 				break;
974 			}
975 		}
976 	}
977 	rc = 0;
978 out:
979 
980 	list_for_each_entry_safe(page, page2, from, lru)
981 		put_page(page);
982 
983 	if (rc)
984 		return rc;
985 
986 	return nr_failed + retry;
987 }
988 
989 #ifdef CONFIG_NUMA
990 /*
991  * Move a list of individual pages
992  */
993 struct page_to_node {
994 	unsigned long addr;
995 	struct page *page;
996 	int node;
997 	int status;
998 };
999 
1000 static struct page *new_page_node(struct page *p, unsigned long private,
1001 		int **result)
1002 {
1003 	struct page_to_node *pm = (struct page_to_node *)private;
1004 
1005 	while (pm->node != MAX_NUMNODES && pm->page != p)
1006 		pm++;
1007 
1008 	if (pm->node == MAX_NUMNODES)
1009 		return NULL;
1010 
1011 	*result = &pm->status;
1012 
1013 	return alloc_pages_exact_node(pm->node,
1014 				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1015 }
1016 
1017 /*
1018  * Move a set of pages as indicated in the pm array. The addr
1019  * field must be set to the virtual address of the page to be moved
1020  * and the node number must contain a valid target node.
1021  * The pm array ends with node = MAX_NUMNODES.
1022  */
1023 static int do_move_page_to_node_array(struct mm_struct *mm,
1024 				      struct page_to_node *pm,
1025 				      int migrate_all)
1026 {
1027 	int err;
1028 	struct page_to_node *pp;
1029 	LIST_HEAD(pagelist);
1030 
1031 	down_read(&mm->mmap_sem);
1032 
1033 	/*
1034 	 * Build a list of pages to migrate
1035 	 */
1036 	for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1037 		struct vm_area_struct *vma;
1038 		struct page *page;
1039 
1040 		err = -EFAULT;
1041 		vma = find_vma(mm, pp->addr);
1042 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1043 			goto set_status;
1044 
1045 		page = follow_page(vma, pp->addr, FOLL_GET);
1046 
1047 		err = PTR_ERR(page);
1048 		if (IS_ERR(page))
1049 			goto set_status;
1050 
1051 		err = -ENOENT;
1052 		if (!page)
1053 			goto set_status;
1054 
1055 		/* Use PageReserved to check for zero page */
1056 		if (PageReserved(page) || PageKsm(page))
1057 			goto put_and_set;
1058 
1059 		pp->page = page;
1060 		err = page_to_nid(page);
1061 
1062 		if (err == pp->node)
1063 			/*
1064 			 * Node already in the right place
1065 			 */
1066 			goto put_and_set;
1067 
1068 		err = -EACCES;
1069 		if (page_mapcount(page) > 1 &&
1070 				!migrate_all)
1071 			goto put_and_set;
1072 
1073 		err = isolate_lru_page(page);
1074 		if (!err) {
1075 			list_add_tail(&page->lru, &pagelist);
1076 			inc_zone_page_state(page, NR_ISOLATED_ANON +
1077 					    page_is_file_cache(page));
1078 		}
1079 put_and_set:
1080 		/*
1081 		 * Either remove the duplicate refcount from
1082 		 * isolate_lru_page() or drop the page ref if it was
1083 		 * not isolated.
1084 		 */
1085 		put_page(page);
1086 set_status:
1087 		pp->status = err;
1088 	}
1089 
1090 	err = 0;
1091 	if (!list_empty(&pagelist)) {
1092 		err = migrate_pages(&pagelist, new_page_node,
1093 				(unsigned long)pm, 0);
1094 		if (err)
1095 			putback_lru_pages(&pagelist);
1096 	}
1097 
1098 	up_read(&mm->mmap_sem);
1099 	return err;
1100 }
1101 
1102 /*
1103  * Migrate an array of page address onto an array of nodes and fill
1104  * the corresponding array of status.
1105  */
1106 static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
1107 			 unsigned long nr_pages,
1108 			 const void __user * __user *pages,
1109 			 const int __user *nodes,
1110 			 int __user *status, int flags)
1111 {
1112 	struct page_to_node *pm;
1113 	nodemask_t task_nodes;
1114 	unsigned long chunk_nr_pages;
1115 	unsigned long chunk_start;
1116 	int err;
1117 
1118 	task_nodes = cpuset_mems_allowed(task);
1119 
1120 	err = -ENOMEM;
1121 	pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1122 	if (!pm)
1123 		goto out;
1124 
1125 	migrate_prep();
1126 
1127 	/*
1128 	 * Store a chunk of page_to_node array in a page,
1129 	 * but keep the last one as a marker
1130 	 */
1131 	chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1132 
1133 	for (chunk_start = 0;
1134 	     chunk_start < nr_pages;
1135 	     chunk_start += chunk_nr_pages) {
1136 		int j;
1137 
1138 		if (chunk_start + chunk_nr_pages > nr_pages)
1139 			chunk_nr_pages = nr_pages - chunk_start;
1140 
1141 		/* fill the chunk pm with addrs and nodes from user-space */
1142 		for (j = 0; j < chunk_nr_pages; j++) {
1143 			const void __user *p;
1144 			int node;
1145 
1146 			err = -EFAULT;
1147 			if (get_user(p, pages + j + chunk_start))
1148 				goto out_pm;
1149 			pm[j].addr = (unsigned long) p;
1150 
1151 			if (get_user(node, nodes + j + chunk_start))
1152 				goto out_pm;
1153 
1154 			err = -ENODEV;
1155 			if (node < 0 || node >= MAX_NUMNODES)
1156 				goto out_pm;
1157 
1158 			if (!node_state(node, N_HIGH_MEMORY))
1159 				goto out_pm;
1160 
1161 			err = -EACCES;
1162 			if (!node_isset(node, task_nodes))
1163 				goto out_pm;
1164 
1165 			pm[j].node = node;
1166 		}
1167 
1168 		/* End marker for this chunk */
1169 		pm[chunk_nr_pages].node = MAX_NUMNODES;
1170 
1171 		/* Migrate this chunk */
1172 		err = do_move_page_to_node_array(mm, pm,
1173 						 flags & MPOL_MF_MOVE_ALL);
1174 		if (err < 0)
1175 			goto out_pm;
1176 
1177 		/* Return status information */
1178 		for (j = 0; j < chunk_nr_pages; j++)
1179 			if (put_user(pm[j].status, status + j + chunk_start)) {
1180 				err = -EFAULT;
1181 				goto out_pm;
1182 			}
1183 	}
1184 	err = 0;
1185 
1186 out_pm:
1187 	free_page((unsigned long)pm);
1188 out:
1189 	return err;
1190 }
1191 
1192 /*
1193  * Determine the nodes of an array of pages and store it in an array of status.
1194  */
1195 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1196 				const void __user **pages, int *status)
1197 {
1198 	unsigned long i;
1199 
1200 	down_read(&mm->mmap_sem);
1201 
1202 	for (i = 0; i < nr_pages; i++) {
1203 		unsigned long addr = (unsigned long)(*pages);
1204 		struct vm_area_struct *vma;
1205 		struct page *page;
1206 		int err = -EFAULT;
1207 
1208 		vma = find_vma(mm, addr);
1209 		if (!vma || addr < vma->vm_start)
1210 			goto set_status;
1211 
1212 		page = follow_page(vma, addr, 0);
1213 
1214 		err = PTR_ERR(page);
1215 		if (IS_ERR(page))
1216 			goto set_status;
1217 
1218 		err = -ENOENT;
1219 		/* Use PageReserved to check for zero page */
1220 		if (!page || PageReserved(page) || PageKsm(page))
1221 			goto set_status;
1222 
1223 		err = page_to_nid(page);
1224 set_status:
1225 		*status = err;
1226 
1227 		pages++;
1228 		status++;
1229 	}
1230 
1231 	up_read(&mm->mmap_sem);
1232 }
1233 
1234 /*
1235  * Determine the nodes of a user array of pages and store it in
1236  * a user array of status.
1237  */
1238 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1239 			 const void __user * __user *pages,
1240 			 int __user *status)
1241 {
1242 #define DO_PAGES_STAT_CHUNK_NR 16
1243 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1244 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1245 
1246 	while (nr_pages) {
1247 		unsigned long chunk_nr;
1248 
1249 		chunk_nr = nr_pages;
1250 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1251 			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1252 
1253 		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1254 			break;
1255 
1256 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1257 
1258 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1259 			break;
1260 
1261 		pages += chunk_nr;
1262 		status += chunk_nr;
1263 		nr_pages -= chunk_nr;
1264 	}
1265 	return nr_pages ? -EFAULT : 0;
1266 }
1267 
1268 /*
1269  * Move a list of pages in the address space of the currently executing
1270  * process.
1271  */
1272 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1273 		const void __user * __user *, pages,
1274 		const int __user *, nodes,
1275 		int __user *, status, int, flags)
1276 {
1277 	const struct cred *cred = current_cred(), *tcred;
1278 	struct task_struct *task;
1279 	struct mm_struct *mm;
1280 	int err;
1281 
1282 	/* Check flags */
1283 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1284 		return -EINVAL;
1285 
1286 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1287 		return -EPERM;
1288 
1289 	/* Find the mm_struct */
1290 	read_lock(&tasklist_lock);
1291 	task = pid ? find_task_by_vpid(pid) : current;
1292 	if (!task) {
1293 		read_unlock(&tasklist_lock);
1294 		return -ESRCH;
1295 	}
1296 	mm = get_task_mm(task);
1297 	read_unlock(&tasklist_lock);
1298 
1299 	if (!mm)
1300 		return -EINVAL;
1301 
1302 	/*
1303 	 * Check if this process has the right to modify the specified
1304 	 * process. The right exists if the process has administrative
1305 	 * capabilities, superuser privileges or the same
1306 	 * userid as the target process.
1307 	 */
1308 	rcu_read_lock();
1309 	tcred = __task_cred(task);
1310 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1311 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1312 	    !capable(CAP_SYS_NICE)) {
1313 		rcu_read_unlock();
1314 		err = -EPERM;
1315 		goto out;
1316 	}
1317 	rcu_read_unlock();
1318 
1319  	err = security_task_movememory(task);
1320  	if (err)
1321 		goto out;
1322 
1323 	if (nodes) {
1324 		err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1325 				    flags);
1326 	} else {
1327 		err = do_pages_stat(mm, nr_pages, pages, status);
1328 	}
1329 
1330 out:
1331 	mmput(mm);
1332 	return err;
1333 }
1334 
1335 /*
1336  * Call migration functions in the vma_ops that may prepare
1337  * memory in a vm for migration. migration functions may perform
1338  * the migration for vmas that do not have an underlying page struct.
1339  */
1340 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1341 	const nodemask_t *from, unsigned long flags)
1342 {
1343  	struct vm_area_struct *vma;
1344  	int err = 0;
1345 
1346 	for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1347  		if (vma->vm_ops && vma->vm_ops->migrate) {
1348  			err = vma->vm_ops->migrate(vma, to, from, flags);
1349  			if (err)
1350  				break;
1351  		}
1352  	}
1353  	return err;
1354 }
1355 #endif
1356