1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b20a3503SChristoph Lameter /*
314e0f9bcSHugh Dickins * Memory Migration functionality - linux/mm/migrate.c
4b20a3503SChristoph Lameter *
5b20a3503SChristoph Lameter * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6b20a3503SChristoph Lameter *
7b20a3503SChristoph Lameter * Page migration was first developed in the context of the memory hotplug
8b20a3503SChristoph Lameter * project. The main authors of the migration code are:
9b20a3503SChristoph Lameter *
10b20a3503SChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11b20a3503SChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp>
12b20a3503SChristoph Lameter * Dave Hansen <haveblue@us.ibm.com>
13cde53535SChristoph Lameter * Christoph Lameter
14b20a3503SChristoph Lameter */
15b20a3503SChristoph Lameter
16b20a3503SChristoph Lameter #include <linux/migrate.h>
17b95f1b31SPaul Gortmaker #include <linux/export.h>
18b20a3503SChristoph Lameter #include <linux/swap.h>
190697212aSChristoph Lameter #include <linux/swapops.h>
20b20a3503SChristoph Lameter #include <linux/pagemap.h>
21e23ca00bSChristoph Lameter #include <linux/buffer_head.h>
22b20a3503SChristoph Lameter #include <linux/mm_inline.h>
23b488893aSPavel Emelyanov #include <linux/nsproxy.h>
24e9995ef9SHugh Dickins #include <linux/ksm.h>
25b20a3503SChristoph Lameter #include <linux/rmap.h>
26b20a3503SChristoph Lameter #include <linux/topology.h>
27b20a3503SChristoph Lameter #include <linux/cpu.h>
28b20a3503SChristoph Lameter #include <linux/cpuset.h>
2904e62a29SChristoph Lameter #include <linux/writeback.h>
30742755a1SChristoph Lameter #include <linux/mempolicy.h>
31742755a1SChristoph Lameter #include <linux/vmalloc.h>
3286c3a764SDavid Quigley #include <linux/security.h>
3342cb14b1SHugh Dickins #include <linux/backing-dev.h>
34bda807d4SMinchan Kim #include <linux/compaction.h>
354f5ca265SAdrian Bunk #include <linux/syscalls.h>
367addf443SDominik Brodowski #include <linux/compat.h>
37290408d4SNaoya Horiguchi #include <linux/hugetlb.h>
388e6ac7faSAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
395a0e3ad6STejun Heo #include <linux/gfp.h>
40df6ad698SJérôme Glisse #include <linux/pfn_t.h>
41a5430ddaSJérôme Glisse #include <linux/memremap.h>
428315ada7SJérôme Glisse #include <linux/userfaultfd_k.h>
43bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h>
4433c3fc71SVladimir Davydov #include <linux/page_idle.h>
45d435edcaSVlastimil Babka #include <linux/page_owner.h>
466e84f315SIngo Molnar #include <linux/sched/mm.h>
47197e7e52SLinus Torvalds #include <linux/ptrace.h>
4834290e2cSRalph Campbell #include <linux/oom.h>
49884a6e5dSDave Hansen #include <linux/memory.h>
50ac16ec83SBaolin Wang #include <linux/random.h>
51c574bbe9SHuang Ying #include <linux/sched/sysctl.h>
52467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h>
53b20a3503SChristoph Lameter
540d1836c3SMichal Nazarewicz #include <asm/tlbflush.h>
550d1836c3SMichal Nazarewicz
567b2a2d4aSMel Gorman #include <trace/events/migrate.h>
577b2a2d4aSMel Gorman
58b20a3503SChristoph Lameter #include "internal.h"
59b20a3503SChristoph Lameter
isolate_movable_page(struct page * page,isolate_mode_t mode)60cd775580SBaolin Wang bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61bda807d4SMinchan Kim {
6219979497SVishal Moola (Oracle) struct folio *folio = folio_get_nontail_page(page);
6368f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops;
64bda807d4SMinchan Kim
65bda807d4SMinchan Kim /*
66bda807d4SMinchan Kim * Avoid burning cycles with pages that are yet under __free_pages(),
67bda807d4SMinchan Kim * or just got freed under us.
68bda807d4SMinchan Kim *
69bda807d4SMinchan Kim * In case we 'win' a race for a movable page being freed under us and
70bda807d4SMinchan Kim * raise its refcount preventing __free_pages() from doing its job
71bda807d4SMinchan Kim * the put_page() at the end of this block will take care of
72bda807d4SMinchan Kim * release this page, thus avoiding a nasty leakage.
73bda807d4SMinchan Kim */
7419979497SVishal Moola (Oracle) if (!folio)
75bda807d4SMinchan Kim goto out;
76bda807d4SMinchan Kim
7719979497SVishal Moola (Oracle) if (unlikely(folio_test_slab(folio)))
7819979497SVishal Moola (Oracle) goto out_putfolio;
798b881763SVlastimil Babka /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
808b881763SVlastimil Babka smp_rmb();
81bda807d4SMinchan Kim /*
828b881763SVlastimil Babka * Check movable flag before taking the page lock because
838b881763SVlastimil Babka * we use non-atomic bitops on newly allocated page flags so
848b881763SVlastimil Babka * unconditionally grabbing the lock ruins page's owner side.
85bda807d4SMinchan Kim */
8619979497SVishal Moola (Oracle) if (unlikely(!__folio_test_movable(folio)))
8719979497SVishal Moola (Oracle) goto out_putfolio;
888b881763SVlastimil Babka /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
898b881763SVlastimil Babka smp_rmb();
9019979497SVishal Moola (Oracle) if (unlikely(folio_test_slab(folio)))
9119979497SVishal Moola (Oracle) goto out_putfolio;
928b881763SVlastimil Babka
93bda807d4SMinchan Kim /*
94bda807d4SMinchan Kim * As movable pages are not isolated from LRU lists, concurrent
95bda807d4SMinchan Kim * compaction threads can race against page migration functions
96bda807d4SMinchan Kim * as well as race against the releasing a page.
97bda807d4SMinchan Kim *
98bda807d4SMinchan Kim * In order to avoid having an already isolated movable page
99bda807d4SMinchan Kim * being (wrongly) re-isolated while it is under migration,
100bda807d4SMinchan Kim * or to avoid attempting to isolate pages being released,
101bda807d4SMinchan Kim * lets be sure we have the page lock
102bda807d4SMinchan Kim * before proceeding with the movable page isolation steps.
103bda807d4SMinchan Kim */
10419979497SVishal Moola (Oracle) if (unlikely(!folio_trylock(folio)))
10519979497SVishal Moola (Oracle) goto out_putfolio;
106bda807d4SMinchan Kim
10719979497SVishal Moola (Oracle) if (!folio_test_movable(folio) || folio_test_isolated(folio))
108bda807d4SMinchan Kim goto out_no_isolated;
109bda807d4SMinchan Kim
11019979497SVishal Moola (Oracle) mops = folio_movable_ops(folio);
11119979497SVishal Moola (Oracle) VM_BUG_ON_FOLIO(!mops, folio);
112bda807d4SMinchan Kim
11319979497SVishal Moola (Oracle) if (!mops->isolate_page(&folio->page, mode))
114bda807d4SMinchan Kim goto out_no_isolated;
115bda807d4SMinchan Kim
116bda807d4SMinchan Kim /* Driver shouldn't use PG_isolated bit of page->flags */
11719979497SVishal Moola (Oracle) WARN_ON_ONCE(folio_test_isolated(folio));
11819979497SVishal Moola (Oracle) folio_set_isolated(folio);
11919979497SVishal Moola (Oracle) folio_unlock(folio);
120bda807d4SMinchan Kim
121cd775580SBaolin Wang return true;
122bda807d4SMinchan Kim
123bda807d4SMinchan Kim out_no_isolated:
12419979497SVishal Moola (Oracle) folio_unlock(folio);
12519979497SVishal Moola (Oracle) out_putfolio:
12619979497SVishal Moola (Oracle) folio_put(folio);
127bda807d4SMinchan Kim out:
128cd775580SBaolin Wang return false;
129bda807d4SMinchan Kim }
130bda807d4SMinchan Kim
putback_movable_folio(struct folio * folio)131280d724aSVishal Moola (Oracle) static void putback_movable_folio(struct folio *folio)
132bda807d4SMinchan Kim {
133280d724aSVishal Moola (Oracle) const struct movable_operations *mops = folio_movable_ops(folio);
134bda807d4SMinchan Kim
135280d724aSVishal Moola (Oracle) mops->putback_page(&folio->page);
136280d724aSVishal Moola (Oracle) folio_clear_isolated(folio);
137bda807d4SMinchan Kim }
138bda807d4SMinchan Kim
139b20a3503SChristoph Lameter /*
1405733c7d1SRafael Aquini * Put previously isolated pages back onto the appropriate lists
1415733c7d1SRafael Aquini * from where they were once taken off for compaction/migration.
1425733c7d1SRafael Aquini *
14359c82b70SJoonsoo Kim * This function shall be used whenever the isolated pageset has been
14459c82b70SJoonsoo Kim * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
1457ce82f4cSMiaohe Lin * and isolate_hugetlb().
1465733c7d1SRafael Aquini */
putback_movable_pages(struct list_head * l)1475733c7d1SRafael Aquini void putback_movable_pages(struct list_head *l)
1485733c7d1SRafael Aquini {
149280d724aSVishal Moola (Oracle) struct folio *folio;
150280d724aSVishal Moola (Oracle) struct folio *folio2;
1515733c7d1SRafael Aquini
152280d724aSVishal Moola (Oracle) list_for_each_entry_safe(folio, folio2, l, lru) {
153280d724aSVishal Moola (Oracle) if (unlikely(folio_test_hugetlb(folio))) {
154280d724aSVishal Moola (Oracle) folio_putback_active_hugetlb(folio);
15531caf665SNaoya Horiguchi continue;
15631caf665SNaoya Horiguchi }
157280d724aSVishal Moola (Oracle) list_del(&folio->lru);
158bda807d4SMinchan Kim /*
159280d724aSVishal Moola (Oracle) * We isolated non-lru movable folio so here we can use
160280d724aSVishal Moola (Oracle) * __PageMovable because LRU folio's mapping cannot have
161bda807d4SMinchan Kim * PAGE_MAPPING_MOVABLE.
162bda807d4SMinchan Kim */
163280d724aSVishal Moola (Oracle) if (unlikely(__folio_test_movable(folio))) {
164280d724aSVishal Moola (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165280d724aSVishal Moola (Oracle) folio_lock(folio);
166280d724aSVishal Moola (Oracle) if (folio_test_movable(folio))
167280d724aSVishal Moola (Oracle) putback_movable_folio(folio);
168bf6bddf1SRafael Aquini else
169280d724aSVishal Moola (Oracle) folio_clear_isolated(folio);
170280d724aSVishal Moola (Oracle) folio_unlock(folio);
171280d724aSVishal Moola (Oracle) folio_put(folio);
172bda807d4SMinchan Kim } else {
173280d724aSVishal Moola (Oracle) node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174280d724aSVishal Moola (Oracle) folio_is_file_lru(folio), -folio_nr_pages(folio));
175280d724aSVishal Moola (Oracle) folio_putback_lru(folio);
176b20a3503SChristoph Lameter }
177b20a3503SChristoph Lameter }
178bda807d4SMinchan Kim }
179b20a3503SChristoph Lameter
1800697212aSChristoph Lameter /*
1810697212aSChristoph Lameter * Restore a potential migration pte to a working pte entry
1820697212aSChristoph Lameter */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * old)1832f031c6fSMatthew Wilcox (Oracle) static bool remove_migration_pte(struct folio *folio,
1842f031c6fSMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long addr, void *old)
1850697212aSChristoph Lameter {
1864eecb8b9SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
1870697212aSChristoph Lameter
1883fe87967SKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) {
1896c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_NONE;
190c33c7948SRyan Roberts pte_t old_pte;
1910697212aSChristoph Lameter pte_t pte;
1920697212aSChristoph Lameter swp_entry_t entry;
1934eecb8b9SMatthew Wilcox (Oracle) struct page *new;
1944eecb8b9SMatthew Wilcox (Oracle) unsigned long idx = 0;
1950697212aSChristoph Lameter
1964eecb8b9SMatthew Wilcox (Oracle) /* pgoff is invalid for ksm pages, but they are never large */
1974eecb8b9SMatthew Wilcox (Oracle) if (folio_test_large(folio) && !folio_test_hugetlb(folio))
1984eecb8b9SMatthew Wilcox (Oracle) idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
1994eecb8b9SMatthew Wilcox (Oracle) new = folio_page(folio, idx);
2000697212aSChristoph Lameter
201616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202616b8371SZi Yan /* PMD-mapped THP migration entry */
203616b8371SZi Yan if (!pvmw.pte) {
2044eecb8b9SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2054eecb8b9SMatthew Wilcox (Oracle) !folio_test_pmd_mappable(folio), folio);
206616b8371SZi Yan remove_migration_pmd(&pvmw, new);
207616b8371SZi Yan continue;
208616b8371SZi Yan }
209616b8371SZi Yan #endif
210616b8371SZi Yan
2114eecb8b9SMatthew Wilcox (Oracle) folio_get(folio);
2122e346877SPeter Xu pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213c33c7948SRyan Roberts old_pte = ptep_get(pvmw.pte);
214c33c7948SRyan Roberts if (pte_swp_soft_dirty(old_pte))
215c3d16e16SCyrill Gorcunov pte = pte_mksoft_dirty(pte);
216d3cb8bf6SMel Gorman
217c33c7948SRyan Roberts entry = pte_to_swp_entry(old_pte);
2182e346877SPeter Xu if (!is_migration_entry_young(entry))
2192e346877SPeter Xu pte = pte_mkold(pte);
2202e346877SPeter Xu if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
2212e346877SPeter Xu pte = pte_mkdirty(pte);
2224dd845b5SAlistair Popple if (is_writable_migration_entry(entry))
223161e393cSRick Edgecombe pte = pte_mkwrite(pte, vma);
224c33c7948SRyan Roberts else if (pte_swp_uffd_wp(old_pte))
225f45ec5ffSPeter Xu pte = pte_mkuffd_wp(pte);
226d3cb8bf6SMel Gorman
2276c287605SDavid Hildenbrand if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
2286c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE;
2296c287605SDavid Hildenbrand
2306128763fSRalph Campbell if (unlikely(is_device_private_page(new))) {
2314dd845b5SAlistair Popple if (pte_write(pte))
2324dd845b5SAlistair Popple entry = make_writable_device_private_entry(
2334dd845b5SAlistair Popple page_to_pfn(new));
2344dd845b5SAlistair Popple else
2354dd845b5SAlistair Popple entry = make_readable_device_private_entry(
2364dd845b5SAlistair Popple page_to_pfn(new));
237a5430ddaSJérôme Glisse pte = swp_entry_to_pte(entry);
238c33c7948SRyan Roberts if (pte_swp_soft_dirty(old_pte))
2393d321bf8SRalph Campbell pte = pte_swp_mksoft_dirty(pte);
240c33c7948SRyan Roberts if (pte_swp_uffd_wp(old_pte))
241ebdf8321SAlistair Popple pte = pte_swp_mkuffd_wp(pte);
242df6ad698SJérôme Glisse }
243a5430ddaSJérôme Glisse
2443ef8fd7fSAndi Kleen #ifdef CONFIG_HUGETLB_PAGE
2454eecb8b9SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) {
246935d4f0cSRyan Roberts struct hstate *h = hstate_vma(vma);
247935d4f0cSRyan Roberts unsigned int shift = huge_page_shift(h);
248935d4f0cSRyan Roberts unsigned long psize = huge_page_size(h);
24979c1c594SChristophe Leroy
25079c1c594SChristophe Leroy pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
2514eecb8b9SMatthew Wilcox (Oracle) if (folio_test_anon(folio))
25228c5209dSDavid Hildenbrand hugepage_add_anon_rmap(new, vma, pvmw.address,
2536c287605SDavid Hildenbrand rmap_flags);
254290408d4SNaoya Horiguchi else
255fb3d824dSDavid Hildenbrand page_dup_file_rmap(new, true);
256935d4f0cSRyan Roberts set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
257935d4f0cSRyan Roberts psize);
258383321abSAneesh Kumar K.V } else
259383321abSAneesh Kumar K.V #endif
260383321abSAneesh Kumar K.V {
2614eecb8b9SMatthew Wilcox (Oracle) if (folio_test_anon(folio))
262f1e2db12SDavid Hildenbrand page_add_anon_rmap(new, vma, pvmw.address,
2636c287605SDavid Hildenbrand rmap_flags);
26404e62a29SChristoph Lameter else
265cea86fe2SHugh Dickins page_add_file_rmap(new, vma, false);
2661eba86c0SPasha Tatashin set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
267383321abSAneesh Kumar K.V }
268b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED)
26996f97c43SLorenzo Stoakes mlock_drain_local();
270e125fe40SKirill A. Shutemov
2714cc79b33SAnshuman Khandual trace_remove_migration_pte(pvmw.address, pte_val(pte),
2724cc79b33SAnshuman Khandual compound_order(new));
2734cc79b33SAnshuman Khandual
27404e62a29SChristoph Lameter /* No need to invalidate - it was non-present before */
2753fe87967SKirill A. Shutemov update_mmu_cache(vma, pvmw.address, pvmw.pte);
2763fe87967SKirill A. Shutemov }
2773fe87967SKirill A. Shutemov
278e4b82222SMinchan Kim return true;
2790697212aSChristoph Lameter }
2800697212aSChristoph Lameter
2810697212aSChristoph Lameter /*
28204e62a29SChristoph Lameter * Get rid of all migration entries and replace them by
28304e62a29SChristoph Lameter * references to the indicated page.
28404e62a29SChristoph Lameter */
remove_migration_ptes(struct folio * src,struct folio * dst,bool locked)2854eecb8b9SMatthew Wilcox (Oracle) void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
28604e62a29SChristoph Lameter {
287051ac83aSJoonsoo Kim struct rmap_walk_control rwc = {
288051ac83aSJoonsoo Kim .rmap_one = remove_migration_pte,
2894eecb8b9SMatthew Wilcox (Oracle) .arg = src,
290051ac83aSJoonsoo Kim };
291051ac83aSJoonsoo Kim
292e388466dSKirill A. Shutemov if (locked)
2932f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(dst, &rwc);
294e388466dSKirill A. Shutemov else
2952f031c6fSMatthew Wilcox (Oracle) rmap_walk(dst, &rwc);
29604e62a29SChristoph Lameter }
29704e62a29SChristoph Lameter
29804e62a29SChristoph Lameter /*
2990697212aSChristoph Lameter * Something used the pte of a page under migration. We need to
3000697212aSChristoph Lameter * get to the page and wait until migration is finished.
3010697212aSChristoph Lameter * When we return from this function the fault will be retried.
3020697212aSChristoph Lameter */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)3030cb8fd4dSHugh Dickins void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
3040cb8fd4dSHugh Dickins unsigned long address)
3050697212aSChristoph Lameter {
3060cb8fd4dSHugh Dickins spinlock_t *ptl;
3070cb8fd4dSHugh Dickins pte_t *ptep;
30830dad309SNaoya Horiguchi pte_t pte;
3090697212aSChristoph Lameter swp_entry_t entry;
3100697212aSChristoph Lameter
3110cb8fd4dSHugh Dickins ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
31204dee9e8SHugh Dickins if (!ptep)
31304dee9e8SHugh Dickins return;
31404dee9e8SHugh Dickins
315c33c7948SRyan Roberts pte = ptep_get(ptep);
3160cb8fd4dSHugh Dickins pte_unmap(ptep);
3170cb8fd4dSHugh Dickins
3180697212aSChristoph Lameter if (!is_swap_pte(pte))
3190697212aSChristoph Lameter goto out;
3200697212aSChristoph Lameter
3210697212aSChristoph Lameter entry = pte_to_swp_entry(pte);
3220697212aSChristoph Lameter if (!is_migration_entry(entry))
3230697212aSChristoph Lameter goto out;
3240697212aSChristoph Lameter
3250cb8fd4dSHugh Dickins migration_entry_wait_on_locked(entry, ptl);
3260697212aSChristoph Lameter return;
3270697212aSChristoph Lameter out:
3280cb8fd4dSHugh Dickins spin_unlock(ptl);
32930dad309SNaoya Horiguchi }
33030dad309SNaoya Horiguchi
331ad1ac596SMiaohe Lin #ifdef CONFIG_HUGETLB_PAGE
332fcd48540SPeter Xu /*
333fcd48540SPeter Xu * The vma read lock must be held upon entry. Holding that lock prevents either
334fcd48540SPeter Xu * the pte or the ptl from being freed.
335fcd48540SPeter Xu *
336fcd48540SPeter Xu * This function will release the vma lock before returning.
337fcd48540SPeter Xu */
migration_entry_wait_huge(struct vm_area_struct * vma,pte_t * ptep)3380cb8fd4dSHugh Dickins void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
33930dad309SNaoya Horiguchi {
3400cb8fd4dSHugh Dickins spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
341ad1ac596SMiaohe Lin pte_t pte;
342ad1ac596SMiaohe Lin
343fcd48540SPeter Xu hugetlb_vma_assert_locked(vma);
344ad1ac596SMiaohe Lin spin_lock(ptl);
345ad1ac596SMiaohe Lin pte = huge_ptep_get(ptep);
346ad1ac596SMiaohe Lin
347fcd48540SPeter Xu if (unlikely(!is_hugetlb_entry_migration(pte))) {
348ad1ac596SMiaohe Lin spin_unlock(ptl);
349fcd48540SPeter Xu hugetlb_vma_unlock_read(vma);
350fcd48540SPeter Xu } else {
351fcd48540SPeter Xu /*
352fcd48540SPeter Xu * If migration entry existed, safe to release vma lock
353fcd48540SPeter Xu * here because the pgtable page won't be freed without the
354fcd48540SPeter Xu * pgtable lock released. See comment right above pgtable
355fcd48540SPeter Xu * lock release in migration_entry_wait_on_locked().
356fcd48540SPeter Xu */
357fcd48540SPeter Xu hugetlb_vma_unlock_read(vma);
3580cb8fd4dSHugh Dickins migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
35930dad309SNaoya Horiguchi }
360fcd48540SPeter Xu }
361ad1ac596SMiaohe Lin #endif
362ad1ac596SMiaohe Lin
363616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)364616b8371SZi Yan void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
365616b8371SZi Yan {
366616b8371SZi Yan spinlock_t *ptl;
367616b8371SZi Yan
368616b8371SZi Yan ptl = pmd_lock(mm, pmd);
369616b8371SZi Yan if (!is_pmd_migration_entry(*pmd))
370616b8371SZi Yan goto unlock;
3710cb8fd4dSHugh Dickins migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
372616b8371SZi Yan return;
373616b8371SZi Yan unlock:
374616b8371SZi Yan spin_unlock(ptl);
375616b8371SZi Yan }
376616b8371SZi Yan #endif
377616b8371SZi Yan
folio_expected_refs(struct address_space * mapping,struct folio * folio)378108ca835SMatthew Wilcox (Oracle) static int folio_expected_refs(struct address_space *mapping,
379108ca835SMatthew Wilcox (Oracle) struct folio *folio)
3800b3901b3SJan Kara {
381108ca835SMatthew Wilcox (Oracle) int refs = 1;
382108ca835SMatthew Wilcox (Oracle) if (!mapping)
383108ca835SMatthew Wilcox (Oracle) return refs;
3840b3901b3SJan Kara
385108ca835SMatthew Wilcox (Oracle) refs += folio_nr_pages(folio);
386108ca835SMatthew Wilcox (Oracle) if (folio_test_private(folio))
387108ca835SMatthew Wilcox (Oracle) refs++;
388108ca835SMatthew Wilcox (Oracle)
389108ca835SMatthew Wilcox (Oracle) return refs;
3900b3901b3SJan Kara }
3910b3901b3SJan Kara
392b20a3503SChristoph Lameter /*
393c3fcf8a5SChristoph Lameter * Replace the page in the mapping.
3945b5c7120SChristoph Lameter *
3955b5c7120SChristoph Lameter * The number of remaining references must be:
3965b5c7120SChristoph Lameter * 1 for anonymous pages without a mapping
3975b5c7120SChristoph Lameter * 2 for pages with a mapping
398266cf658SDavid Howells * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
399b20a3503SChristoph Lameter */
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)4003417013eSMatthew Wilcox (Oracle) int folio_migrate_mapping(struct address_space *mapping,
4013417013eSMatthew Wilcox (Oracle) struct folio *newfolio, struct folio *folio, int extra_count)
402b20a3503SChristoph Lameter {
4033417013eSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(folio));
40442cb14b1SHugh Dickins struct zone *oldzone, *newzone;
40542cb14b1SHugh Dickins int dirty;
406108ca835SMatthew Wilcox (Oracle) int expected_count = folio_expected_refs(mapping, folio) + extra_count;
4073417013eSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
40807550b14SCharan Teja Kalla long entries, i;
4098763cb45SJérôme Glisse
4106c5240aeSChristoph Lameter if (!mapping) {
4110e8c7d0fSChristoph Lameter /* Anonymous page without mapping */
4123417013eSMatthew Wilcox (Oracle) if (folio_ref_count(folio) != expected_count)
4136c5240aeSChristoph Lameter return -EAGAIN;
414cf4b769aSHugh Dickins
415cf4b769aSHugh Dickins /* No turning back from here */
4163417013eSMatthew Wilcox (Oracle) newfolio->index = folio->index;
4173417013eSMatthew Wilcox (Oracle) newfolio->mapping = folio->mapping;
4183417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio))
4193417013eSMatthew Wilcox (Oracle) __folio_set_swapbacked(newfolio);
420cf4b769aSHugh Dickins
42178bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS;
4226c5240aeSChristoph Lameter }
4236c5240aeSChristoph Lameter
4243417013eSMatthew Wilcox (Oracle) oldzone = folio_zone(folio);
4253417013eSMatthew Wilcox (Oracle) newzone = folio_zone(newfolio);
42642cb14b1SHugh Dickins
42789eb946aSMatthew Wilcox xas_lock_irq(&xas);
4283417013eSMatthew Wilcox (Oracle) if (!folio_ref_freeze(folio, expected_count)) {
42989eb946aSMatthew Wilcox xas_unlock_irq(&xas);
430e286781dSNick Piggin return -EAGAIN;
431e286781dSNick Piggin }
432e286781dSNick Piggin
433b20a3503SChristoph Lameter /*
4343417013eSMatthew Wilcox (Oracle) * Now we know that no one else is looking at the folio:
435cf4b769aSHugh Dickins * no turning back from here.
436b20a3503SChristoph Lameter */
4373417013eSMatthew Wilcox (Oracle) newfolio->index = folio->index;
4383417013eSMatthew Wilcox (Oracle) newfolio->mapping = folio->mapping;
4393417013eSMatthew Wilcox (Oracle) folio_ref_add(newfolio, nr); /* add cache reference */
4403417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) {
4413417013eSMatthew Wilcox (Oracle) __folio_set_swapbacked(newfolio);
4423417013eSMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) {
4433417013eSMatthew Wilcox (Oracle) folio_set_swapcache(newfolio);
4443417013eSMatthew Wilcox (Oracle) newfolio->private = folio_get_private(folio);
445b20a3503SChristoph Lameter }
44607550b14SCharan Teja Kalla entries = nr;
4476326fec1SNicholas Piggin } else {
4483417013eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
44907550b14SCharan Teja Kalla entries = 1;
4506326fec1SNicholas Piggin }
451b20a3503SChristoph Lameter
45242cb14b1SHugh Dickins /* Move dirty while page refs frozen and newpage not yet exposed */
4533417013eSMatthew Wilcox (Oracle) dirty = folio_test_dirty(folio);
45442cb14b1SHugh Dickins if (dirty) {
4553417013eSMatthew Wilcox (Oracle) folio_clear_dirty(folio);
4563417013eSMatthew Wilcox (Oracle) folio_set_dirty(newfolio);
45742cb14b1SHugh Dickins }
45842cb14b1SHugh Dickins
45907550b14SCharan Teja Kalla /* Swap cache still stores N entries instead of a high-order entry */
46007550b14SCharan Teja Kalla for (i = 0; i < entries; i++) {
4613417013eSMatthew Wilcox (Oracle) xas_store(&xas, newfolio);
46207550b14SCharan Teja Kalla xas_next(&xas);
46307550b14SCharan Teja Kalla }
4647cf9c2c7SNick Piggin
4657cf9c2c7SNick Piggin /*
466937a94c9SJacobo Giralt * Drop cache reference from old page by unfreezing
467937a94c9SJacobo Giralt * to one less reference.
4687cf9c2c7SNick Piggin * We know this isn't the last reference.
4697cf9c2c7SNick Piggin */
4703417013eSMatthew Wilcox (Oracle) folio_ref_unfreeze(folio, expected_count - nr);
4717cf9c2c7SNick Piggin
47289eb946aSMatthew Wilcox xas_unlock(&xas);
47342cb14b1SHugh Dickins /* Leave irq disabled to prevent preemption while updating stats */
47442cb14b1SHugh Dickins
4750e8c7d0fSChristoph Lameter /*
4760e8c7d0fSChristoph Lameter * If moved to a different zone then also account
4770e8c7d0fSChristoph Lameter * the page for that zone. Other VM counters will be
4780e8c7d0fSChristoph Lameter * taken care of when we establish references to the
4790e8c7d0fSChristoph Lameter * new page and drop references to the old page.
4800e8c7d0fSChristoph Lameter *
4810e8c7d0fSChristoph Lameter * Note that anonymous pages are accounted for
4824b9d0fabSMel Gorman * via NR_FILE_PAGES and NR_ANON_MAPPED if they
4830e8c7d0fSChristoph Lameter * are mapped to swap space.
4840e8c7d0fSChristoph Lameter */
48542cb14b1SHugh Dickins if (newzone != oldzone) {
4860d1c2072SJohannes Weiner struct lruvec *old_lruvec, *new_lruvec;
4870d1c2072SJohannes Weiner struct mem_cgroup *memcg;
4880d1c2072SJohannes Weiner
4893417013eSMatthew Wilcox (Oracle) memcg = folio_memcg(folio);
4900d1c2072SJohannes Weiner old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
4910d1c2072SJohannes Weiner new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
4920d1c2072SJohannes Weiner
4935c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
4945c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
4953417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
4965c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
4975c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
4980b52c420SJan Glauber
4990b52c420SJan Glauber if (folio_test_pmd_mappable(folio)) {
5000b52c420SJan Glauber __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
5010b52c420SJan Glauber __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
5020b52c420SJan Glauber }
5034b02108aSKOSAKI Motohiro }
504b6038942SShakeel Butt #ifdef CONFIG_SWAP
5053417013eSMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) {
506b6038942SShakeel Butt __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
507b6038942SShakeel Butt __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
508b6038942SShakeel Butt }
509b6038942SShakeel Butt #endif
510f56753acSChristoph Hellwig if (dirty && mapping_can_writeback(mapping)) {
5115c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
5125c447d27SShakeel Butt __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
5135c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
5145c447d27SShakeel Butt __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
51542cb14b1SHugh Dickins }
51642cb14b1SHugh Dickins }
51742cb14b1SHugh Dickins local_irq_enable();
518b20a3503SChristoph Lameter
51978bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS;
520b20a3503SChristoph Lameter }
5213417013eSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_mapping);
522b20a3503SChristoph Lameter
523b20a3503SChristoph Lameter /*
524290408d4SNaoya Horiguchi * The expected number of remaining references is the same as that
5253417013eSMatthew Wilcox (Oracle) * of folio_migrate_mapping().
526290408d4SNaoya Horiguchi */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)527290408d4SNaoya Horiguchi int migrate_huge_page_move_mapping(struct address_space *mapping,
528b890ec2aSMatthew Wilcox (Oracle) struct folio *dst, struct folio *src)
529290408d4SNaoya Horiguchi {
530b890ec2aSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(src));
531290408d4SNaoya Horiguchi int expected_count;
532290408d4SNaoya Horiguchi
53389eb946aSMatthew Wilcox xas_lock_irq(&xas);
534b890ec2aSMatthew Wilcox (Oracle) expected_count = 2 + folio_has_private(src);
535b890ec2aSMatthew Wilcox (Oracle) if (!folio_ref_freeze(src, expected_count)) {
53689eb946aSMatthew Wilcox xas_unlock_irq(&xas);
537290408d4SNaoya Horiguchi return -EAGAIN;
538290408d4SNaoya Horiguchi }
539290408d4SNaoya Horiguchi
540b890ec2aSMatthew Wilcox (Oracle) dst->index = src->index;
541b890ec2aSMatthew Wilcox (Oracle) dst->mapping = src->mapping;
5426a93ca8fSJohannes Weiner
543b890ec2aSMatthew Wilcox (Oracle) folio_get(dst);
544290408d4SNaoya Horiguchi
545b890ec2aSMatthew Wilcox (Oracle) xas_store(&xas, dst);
546290408d4SNaoya Horiguchi
547b890ec2aSMatthew Wilcox (Oracle) folio_ref_unfreeze(src, expected_count - 1);
548290408d4SNaoya Horiguchi
54989eb946aSMatthew Wilcox xas_unlock_irq(&xas);
5506a93ca8fSJohannes Weiner
55178bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS;
552290408d4SNaoya Horiguchi }
553290408d4SNaoya Horiguchi
554290408d4SNaoya Horiguchi /*
55519138349SMatthew Wilcox (Oracle) * Copy the flags and some other ancillary information
556b20a3503SChristoph Lameter */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)55719138349SMatthew Wilcox (Oracle) void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
558b20a3503SChristoph Lameter {
5597851a45cSRik van Riel int cpupid;
5607851a45cSRik van Riel
56119138349SMatthew Wilcox (Oracle) if (folio_test_error(folio))
56219138349SMatthew Wilcox (Oracle) folio_set_error(newfolio);
56319138349SMatthew Wilcox (Oracle) if (folio_test_referenced(folio))
56419138349SMatthew Wilcox (Oracle) folio_set_referenced(newfolio);
56519138349SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
56619138349SMatthew Wilcox (Oracle) folio_mark_uptodate(newfolio);
56719138349SMatthew Wilcox (Oracle) if (folio_test_clear_active(folio)) {
56819138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
56919138349SMatthew Wilcox (Oracle) folio_set_active(newfolio);
57019138349SMatthew Wilcox (Oracle) } else if (folio_test_clear_unevictable(folio))
57119138349SMatthew Wilcox (Oracle) folio_set_unevictable(newfolio);
57219138349SMatthew Wilcox (Oracle) if (folio_test_workingset(folio))
57319138349SMatthew Wilcox (Oracle) folio_set_workingset(newfolio);
57419138349SMatthew Wilcox (Oracle) if (folio_test_checked(folio))
57519138349SMatthew Wilcox (Oracle) folio_set_checked(newfolio);
5766c287605SDavid Hildenbrand /*
5776c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
5786c287605SDavid Hildenbrand * migration entries. We can still have PG_anon_exclusive set on an
5796c287605SDavid Hildenbrand * effectively unmapped and unreferenced first sub-pages of an
5806c287605SDavid Hildenbrand * anonymous THP: we can simply copy it here via PG_mappedtodisk.
5816c287605SDavid Hildenbrand */
58219138349SMatthew Wilcox (Oracle) if (folio_test_mappedtodisk(folio))
58319138349SMatthew Wilcox (Oracle) folio_set_mappedtodisk(newfolio);
584b20a3503SChristoph Lameter
5853417013eSMatthew Wilcox (Oracle) /* Move dirty on pages not done by folio_migrate_mapping() */
58619138349SMatthew Wilcox (Oracle) if (folio_test_dirty(folio))
58719138349SMatthew Wilcox (Oracle) folio_set_dirty(newfolio);
588b20a3503SChristoph Lameter
58919138349SMatthew Wilcox (Oracle) if (folio_test_young(folio))
59019138349SMatthew Wilcox (Oracle) folio_set_young(newfolio);
59119138349SMatthew Wilcox (Oracle) if (folio_test_idle(folio))
59219138349SMatthew Wilcox (Oracle) folio_set_idle(newfolio);
59333c3fc71SVladimir Davydov
5947851a45cSRik van Riel /*
5957851a45cSRik van Riel * Copy NUMA information to the new page, to prevent over-eager
5967851a45cSRik van Riel * future migrations of this same page.
5977851a45cSRik van Riel */
59819138349SMatthew Wilcox (Oracle) cpupid = page_cpupid_xchg_last(&folio->page, -1);
59933024536SHuang Ying /*
60033024536SHuang Ying * For memory tiering mode, when migrate between slow and fast
60133024536SHuang Ying * memory node, reset cpupid, because that is used to record
60233024536SHuang Ying * page access time in slow memory node.
60333024536SHuang Ying */
60433024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
60533024536SHuang Ying bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
60633024536SHuang Ying bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
60733024536SHuang Ying
60833024536SHuang Ying if (f_toptier != t_toptier)
60933024536SHuang Ying cpupid = -1;
61033024536SHuang Ying }
61119138349SMatthew Wilcox (Oracle) page_cpupid_xchg_last(&newfolio->page, cpupid);
6127851a45cSRik van Riel
61319138349SMatthew Wilcox (Oracle) folio_migrate_ksm(newfolio, folio);
614c8d6553bSHugh Dickins /*
615c8d6553bSHugh Dickins * Please do not reorder this without considering how mm/ksm.c's
616c8d6553bSHugh Dickins * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
617c8d6553bSHugh Dickins */
61819138349SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio))
61919138349SMatthew Wilcox (Oracle) folio_clear_swapcache(folio);
62019138349SMatthew Wilcox (Oracle) folio_clear_private(folio);
621ad2fa371SMuchun Song
622ad2fa371SMuchun Song /* page->private contains hugetlb specific flags */
62319138349SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio))
62419138349SMatthew Wilcox (Oracle) folio->private = NULL;
625b20a3503SChristoph Lameter
626b20a3503SChristoph Lameter /*
627b20a3503SChristoph Lameter * If any waiters have accumulated on the new page then
628b20a3503SChristoph Lameter * wake them up.
629b20a3503SChristoph Lameter */
63019138349SMatthew Wilcox (Oracle) if (folio_test_writeback(newfolio))
63119138349SMatthew Wilcox (Oracle) folio_end_writeback(newfolio);
632d435edcaSVlastimil Babka
6336aeff241SYang Shi /*
6346aeff241SYang Shi * PG_readahead shares the same bit with PG_reclaim. The above
6356aeff241SYang Shi * end_page_writeback() may clear PG_readahead mistakenly, so set the
6366aeff241SYang Shi * bit after that.
6376aeff241SYang Shi */
63819138349SMatthew Wilcox (Oracle) if (folio_test_readahead(folio))
63919138349SMatthew Wilcox (Oracle) folio_set_readahead(newfolio);
6406aeff241SYang Shi
64119138349SMatthew Wilcox (Oracle) folio_copy_owner(newfolio, folio);
64274485cf2SJohannes Weiner
64319138349SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio))
644d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(folio, newfolio);
645b20a3503SChristoph Lameter }
64619138349SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_flags);
6472916ecc0SJérôme Glisse
folio_migrate_copy(struct folio * newfolio,struct folio * folio)648715cbfd6SMatthew Wilcox (Oracle) void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
6492916ecc0SJérôme Glisse {
650715cbfd6SMatthew Wilcox (Oracle) folio_copy(newfolio, folio);
651715cbfd6SMatthew Wilcox (Oracle) folio_migrate_flags(newfolio, folio);
6522916ecc0SJérôme Glisse }
653715cbfd6SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_copy);
654b20a3503SChristoph Lameter
6551d8b85ccSChristoph Lameter /************************************************************
6561d8b85ccSChristoph Lameter * Migration functions
6571d8b85ccSChristoph Lameter ***********************************************************/
6581d8b85ccSChristoph Lameter
migrate_folio_extra(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,int extra_count)65916ce101dSAlistair Popple int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
66016ce101dSAlistair Popple struct folio *src, enum migrate_mode mode, int extra_count)
66116ce101dSAlistair Popple {
66216ce101dSAlistair Popple int rc;
66316ce101dSAlistair Popple
66416ce101dSAlistair Popple BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
66516ce101dSAlistair Popple
66616ce101dSAlistair Popple rc = folio_migrate_mapping(mapping, dst, src, extra_count);
66716ce101dSAlistair Popple
66816ce101dSAlistair Popple if (rc != MIGRATEPAGE_SUCCESS)
66916ce101dSAlistair Popple return rc;
67016ce101dSAlistair Popple
67116ce101dSAlistair Popple if (mode != MIGRATE_SYNC_NO_COPY)
67216ce101dSAlistair Popple folio_migrate_copy(dst, src);
67316ce101dSAlistair Popple else
67416ce101dSAlistair Popple folio_migrate_flags(dst, src);
67516ce101dSAlistair Popple return MIGRATEPAGE_SUCCESS;
67616ce101dSAlistair Popple }
67716ce101dSAlistair Popple
67854184650SMatthew Wilcox (Oracle) /**
67954184650SMatthew Wilcox (Oracle) * migrate_folio() - Simple folio migration.
68054184650SMatthew Wilcox (Oracle) * @mapping: The address_space containing the folio.
68154184650SMatthew Wilcox (Oracle) * @dst: The folio to migrate the data to.
68254184650SMatthew Wilcox (Oracle) * @src: The folio containing the current data.
68354184650SMatthew Wilcox (Oracle) * @mode: How to migrate the page.
684b20a3503SChristoph Lameter *
68554184650SMatthew Wilcox (Oracle) * Common logic to directly migrate a single LRU folio suitable for
68654184650SMatthew Wilcox (Oracle) * folios that do not use PagePrivate/PagePrivate2.
68754184650SMatthew Wilcox (Oracle) *
68854184650SMatthew Wilcox (Oracle) * Folios are locked upon entry and exit.
689b20a3503SChristoph Lameter */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)69054184650SMatthew Wilcox (Oracle) int migrate_folio(struct address_space *mapping, struct folio *dst,
69154184650SMatthew Wilcox (Oracle) struct folio *src, enum migrate_mode mode)
692b20a3503SChristoph Lameter {
69316ce101dSAlistair Popple return migrate_folio_extra(mapping, dst, src, mode, 0);
694b20a3503SChristoph Lameter }
69554184650SMatthew Wilcox (Oracle) EXPORT_SYMBOL(migrate_folio);
696b20a3503SChristoph Lameter
697925c86a1SChristoph Hellwig #ifdef CONFIG_BUFFER_HEAD
69884ade7c1SJan Kara /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)69984ade7c1SJan Kara static bool buffer_migrate_lock_buffers(struct buffer_head *head,
70084ade7c1SJan Kara enum migrate_mode mode)
70184ade7c1SJan Kara {
70284ade7c1SJan Kara struct buffer_head *bh = head;
7034bb6dc79SDouglas Anderson struct buffer_head *failed_bh;
70484ade7c1SJan Kara
70584ade7c1SJan Kara do {
7064bb6dc79SDouglas Anderson if (!trylock_buffer(bh)) {
7074bb6dc79SDouglas Anderson if (mode == MIGRATE_ASYNC)
7084bb6dc79SDouglas Anderson goto unlock;
7094bb6dc79SDouglas Anderson if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
7104bb6dc79SDouglas Anderson goto unlock;
71184ade7c1SJan Kara lock_buffer(bh);
7124bb6dc79SDouglas Anderson }
71384ade7c1SJan Kara
7144bb6dc79SDouglas Anderson bh = bh->b_this_page;
71584ade7c1SJan Kara } while (bh != head);
71684ade7c1SJan Kara
71784ade7c1SJan Kara return true;
71884ade7c1SJan Kara
7194bb6dc79SDouglas Anderson unlock:
7204bb6dc79SDouglas Anderson /* We failed to lock the buffer and cannot stall. */
7214bb6dc79SDouglas Anderson failed_bh = bh;
72284ade7c1SJan Kara bh = head;
72384ade7c1SJan Kara while (bh != failed_bh) {
72484ade7c1SJan Kara unlock_buffer(bh);
72584ade7c1SJan Kara bh = bh->b_this_page;
72684ade7c1SJan Kara }
72784ade7c1SJan Kara
7284bb6dc79SDouglas Anderson return false;
72984ade7c1SJan Kara }
73084ade7c1SJan Kara
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)73167235182SMatthew Wilcox (Oracle) static int __buffer_migrate_folio(struct address_space *mapping,
73267235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode,
73389cb0888SJan Kara bool check_refs)
7341d8b85ccSChristoph Lameter {
7351d8b85ccSChristoph Lameter struct buffer_head *bh, *head;
7361d8b85ccSChristoph Lameter int rc;
737cc4f11e6SJan Kara int expected_count;
7381d8b85ccSChristoph Lameter
73967235182SMatthew Wilcox (Oracle) head = folio_buffers(src);
74067235182SMatthew Wilcox (Oracle) if (!head)
74154184650SMatthew Wilcox (Oracle) return migrate_folio(mapping, dst, src, mode);
7421d8b85ccSChristoph Lameter
743cc4f11e6SJan Kara /* Check whether page does not have extra refs before we do more work */
744108ca835SMatthew Wilcox (Oracle) expected_count = folio_expected_refs(mapping, src);
74567235182SMatthew Wilcox (Oracle) if (folio_ref_count(src) != expected_count)
746cc4f11e6SJan Kara return -EAGAIN;
747cc4f11e6SJan Kara
748cc4f11e6SJan Kara if (!buffer_migrate_lock_buffers(head, mode))
749cc4f11e6SJan Kara return -EAGAIN;
7501d8b85ccSChristoph Lameter
75189cb0888SJan Kara if (check_refs) {
75289cb0888SJan Kara bool busy;
75389cb0888SJan Kara bool invalidated = false;
75489cb0888SJan Kara
75589cb0888SJan Kara recheck_buffers:
75689cb0888SJan Kara busy = false;
75789cb0888SJan Kara spin_lock(&mapping->private_lock);
75889cb0888SJan Kara bh = head;
75989cb0888SJan Kara do {
76089cb0888SJan Kara if (atomic_read(&bh->b_count)) {
76189cb0888SJan Kara busy = true;
76289cb0888SJan Kara break;
76389cb0888SJan Kara }
76489cb0888SJan Kara bh = bh->b_this_page;
76589cb0888SJan Kara } while (bh != head);
76689cb0888SJan Kara if (busy) {
76789cb0888SJan Kara if (invalidated) {
76889cb0888SJan Kara rc = -EAGAIN;
76989cb0888SJan Kara goto unlock_buffers;
77089cb0888SJan Kara }
771ebdf4de5SJan Kara spin_unlock(&mapping->private_lock);
77289cb0888SJan Kara invalidate_bh_lrus();
77389cb0888SJan Kara invalidated = true;
77489cb0888SJan Kara goto recheck_buffers;
77589cb0888SJan Kara }
77689cb0888SJan Kara }
77789cb0888SJan Kara
77867235182SMatthew Wilcox (Oracle) rc = folio_migrate_mapping(mapping, dst, src, 0);
77978bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS)
780cc4f11e6SJan Kara goto unlock_buffers;
7811d8b85ccSChristoph Lameter
78267235182SMatthew Wilcox (Oracle) folio_attach_private(dst, folio_detach_private(src));
7831d8b85ccSChristoph Lameter
7841d8b85ccSChristoph Lameter bh = head;
7851d8b85ccSChristoph Lameter do {
786d5db4f9dSMatthew Wilcox (Oracle) folio_set_bh(bh, dst, bh_offset(bh));
7871d8b85ccSChristoph Lameter bh = bh->b_this_page;
7881d8b85ccSChristoph Lameter } while (bh != head);
7891d8b85ccSChristoph Lameter
7902916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY)
79167235182SMatthew Wilcox (Oracle) folio_migrate_copy(dst, src);
7922916ecc0SJérôme Glisse else
79367235182SMatthew Wilcox (Oracle) folio_migrate_flags(dst, src);
7941d8b85ccSChristoph Lameter
795cc4f11e6SJan Kara rc = MIGRATEPAGE_SUCCESS;
796cc4f11e6SJan Kara unlock_buffers:
797ebdf4de5SJan Kara if (check_refs)
798ebdf4de5SJan Kara spin_unlock(&mapping->private_lock);
7991d8b85ccSChristoph Lameter bh = head;
8001d8b85ccSChristoph Lameter do {
8011d8b85ccSChristoph Lameter unlock_buffer(bh);
8021d8b85ccSChristoph Lameter bh = bh->b_this_page;
8031d8b85ccSChristoph Lameter } while (bh != head);
8041d8b85ccSChristoph Lameter
805cc4f11e6SJan Kara return rc;
8061d8b85ccSChristoph Lameter }
80789cb0888SJan Kara
80867235182SMatthew Wilcox (Oracle) /**
80967235182SMatthew Wilcox (Oracle) * buffer_migrate_folio() - Migration function for folios with buffers.
81067235182SMatthew Wilcox (Oracle) * @mapping: The address space containing @src.
81167235182SMatthew Wilcox (Oracle) * @dst: The folio to migrate to.
81267235182SMatthew Wilcox (Oracle) * @src: The folio to migrate from.
81367235182SMatthew Wilcox (Oracle) * @mode: How to migrate the folio.
81467235182SMatthew Wilcox (Oracle) *
81567235182SMatthew Wilcox (Oracle) * This function can only be used if the underlying filesystem guarantees
81667235182SMatthew Wilcox (Oracle) * that no other references to @src exist. For example attached buffer
81767235182SMatthew Wilcox (Oracle) * heads are accessed only under the folio lock. If your filesystem cannot
81867235182SMatthew Wilcox (Oracle) * provide this guarantee, buffer_migrate_folio_norefs() may be more
81967235182SMatthew Wilcox (Oracle) * appropriate.
82067235182SMatthew Wilcox (Oracle) *
82167235182SMatthew Wilcox (Oracle) * Return: 0 on success or a negative errno on failure.
82289cb0888SJan Kara */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)82367235182SMatthew Wilcox (Oracle) int buffer_migrate_folio(struct address_space *mapping,
82467235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode)
82589cb0888SJan Kara {
82667235182SMatthew Wilcox (Oracle) return __buffer_migrate_folio(mapping, dst, src, mode, false);
82789cb0888SJan Kara }
82867235182SMatthew Wilcox (Oracle) EXPORT_SYMBOL(buffer_migrate_folio);
82989cb0888SJan Kara
83067235182SMatthew Wilcox (Oracle) /**
83167235182SMatthew Wilcox (Oracle) * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
83267235182SMatthew Wilcox (Oracle) * @mapping: The address space containing @src.
83367235182SMatthew Wilcox (Oracle) * @dst: The folio to migrate to.
83467235182SMatthew Wilcox (Oracle) * @src: The folio to migrate from.
83567235182SMatthew Wilcox (Oracle) * @mode: How to migrate the folio.
83667235182SMatthew Wilcox (Oracle) *
83767235182SMatthew Wilcox (Oracle) * Like buffer_migrate_folio() except that this variant is more careful
83867235182SMatthew Wilcox (Oracle) * and checks that there are also no buffer head references. This function
83967235182SMatthew Wilcox (Oracle) * is the right one for mappings where buffer heads are directly looked
84067235182SMatthew Wilcox (Oracle) * up and referenced (such as block device mappings).
84167235182SMatthew Wilcox (Oracle) *
84267235182SMatthew Wilcox (Oracle) * Return: 0 on success or a negative errno on failure.
84389cb0888SJan Kara */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)84467235182SMatthew Wilcox (Oracle) int buffer_migrate_folio_norefs(struct address_space *mapping,
84567235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode)
84689cb0888SJan Kara {
84767235182SMatthew Wilcox (Oracle) return __buffer_migrate_folio(mapping, dst, src, mode, true);
84889cb0888SJan Kara }
849e26355e2SJan Kara EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
850925c86a1SChristoph Hellwig #endif /* CONFIG_BUFFER_HEAD */
8511d8b85ccSChristoph Lameter
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)8522ec810d5SMatthew Wilcox (Oracle) int filemap_migrate_folio(struct address_space *mapping,
8532ec810d5SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode)
8542ec810d5SMatthew Wilcox (Oracle) {
8552ec810d5SMatthew Wilcox (Oracle) int ret;
8562ec810d5SMatthew Wilcox (Oracle)
8572ec810d5SMatthew Wilcox (Oracle) ret = folio_migrate_mapping(mapping, dst, src, 0);
8582ec810d5SMatthew Wilcox (Oracle) if (ret != MIGRATEPAGE_SUCCESS)
8592ec810d5SMatthew Wilcox (Oracle) return ret;
8602ec810d5SMatthew Wilcox (Oracle)
8612ec810d5SMatthew Wilcox (Oracle) if (folio_get_private(src))
8622ec810d5SMatthew Wilcox (Oracle) folio_attach_private(dst, folio_detach_private(src));
8632ec810d5SMatthew Wilcox (Oracle)
8642ec810d5SMatthew Wilcox (Oracle) if (mode != MIGRATE_SYNC_NO_COPY)
8652ec810d5SMatthew Wilcox (Oracle) folio_migrate_copy(dst, src);
8662ec810d5SMatthew Wilcox (Oracle) else
8672ec810d5SMatthew Wilcox (Oracle) folio_migrate_flags(dst, src);
8682ec810d5SMatthew Wilcox (Oracle) return MIGRATEPAGE_SUCCESS;
8692ec810d5SMatthew Wilcox (Oracle) }
8702ec810d5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(filemap_migrate_folio);
8712ec810d5SMatthew Wilcox (Oracle)
87204e62a29SChristoph Lameter /*
8732be7fa10SMatthew Wilcox (Oracle) * Writeback a folio to clean the dirty state
87404e62a29SChristoph Lameter */
writeout(struct address_space * mapping,struct folio * folio)8752be7fa10SMatthew Wilcox (Oracle) static int writeout(struct address_space *mapping, struct folio *folio)
87604e62a29SChristoph Lameter {
87704e62a29SChristoph Lameter struct writeback_control wbc = {
87804e62a29SChristoph Lameter .sync_mode = WB_SYNC_NONE,
87904e62a29SChristoph Lameter .nr_to_write = 1,
88004e62a29SChristoph Lameter .range_start = 0,
88104e62a29SChristoph Lameter .range_end = LLONG_MAX,
88204e62a29SChristoph Lameter .for_reclaim = 1
88304e62a29SChristoph Lameter };
88404e62a29SChristoph Lameter int rc;
88504e62a29SChristoph Lameter
88604e62a29SChristoph Lameter if (!mapping->a_ops->writepage)
88704e62a29SChristoph Lameter /* No write method for the address space */
88804e62a29SChristoph Lameter return -EINVAL;
88904e62a29SChristoph Lameter
8902be7fa10SMatthew Wilcox (Oracle) if (!folio_clear_dirty_for_io(folio))
89104e62a29SChristoph Lameter /* Someone else already triggered a write */
89204e62a29SChristoph Lameter return -EAGAIN;
89304e62a29SChristoph Lameter
89404e62a29SChristoph Lameter /*
8952be7fa10SMatthew Wilcox (Oracle) * A dirty folio may imply that the underlying filesystem has
8962be7fa10SMatthew Wilcox (Oracle) * the folio on some queue. So the folio must be clean for
8972be7fa10SMatthew Wilcox (Oracle) * migration. Writeout may mean we lose the lock and the
8982be7fa10SMatthew Wilcox (Oracle) * folio state is no longer what we checked for earlier.
89904e62a29SChristoph Lameter * At this point we know that the migration attempt cannot
90004e62a29SChristoph Lameter * be successful.
90104e62a29SChristoph Lameter */
9024eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, false);
90304e62a29SChristoph Lameter
9042be7fa10SMatthew Wilcox (Oracle) rc = mapping->a_ops->writepage(&folio->page, &wbc);
90504e62a29SChristoph Lameter
90604e62a29SChristoph Lameter if (rc != AOP_WRITEPAGE_ACTIVATE)
90704e62a29SChristoph Lameter /* unlocked. Relock */
9082be7fa10SMatthew Wilcox (Oracle) folio_lock(folio);
90904e62a29SChristoph Lameter
910bda8550dSHugh Dickins return (rc < 0) ? -EIO : -EAGAIN;
91104e62a29SChristoph Lameter }
91204e62a29SChristoph Lameter
91304e62a29SChristoph Lameter /*
91404e62a29SChristoph Lameter * Default handling if a filesystem does not provide a migration function.
91504e62a29SChristoph Lameter */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)9168faa8ef5SMatthew Wilcox (Oracle) static int fallback_migrate_folio(struct address_space *mapping,
9178faa8ef5SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode)
9188351a6e4SChristoph Lameter {
9198faa8ef5SMatthew Wilcox (Oracle) if (folio_test_dirty(src)) {
9208faa8ef5SMatthew Wilcox (Oracle) /* Only writeback folios in full synchronous migration */
9212916ecc0SJérôme Glisse switch (mode) {
9222916ecc0SJérôme Glisse case MIGRATE_SYNC:
9232916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY:
9242916ecc0SJérôme Glisse break;
9252916ecc0SJérôme Glisse default:
926b969c4abSMel Gorman return -EBUSY;
9272916ecc0SJérôme Glisse }
9282be7fa10SMatthew Wilcox (Oracle) return writeout(mapping, src);
929b969c4abSMel Gorman }
9308351a6e4SChristoph Lameter
9318351a6e4SChristoph Lameter /*
9328351a6e4SChristoph Lameter * Buffers may be managed in a filesystem specific way.
9338351a6e4SChristoph Lameter * We must have no buffers or drop them.
9348351a6e4SChristoph Lameter */
9350201ebf2SDavid Howells if (!filemap_release_folio(src, GFP_KERNEL))
936806031bbSMel Gorman return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
9378351a6e4SChristoph Lameter
93854184650SMatthew Wilcox (Oracle) return migrate_folio(mapping, dst, src, mode);
9398351a6e4SChristoph Lameter }
9408351a6e4SChristoph Lameter
9411d8b85ccSChristoph Lameter /*
942e24f0b8fSChristoph Lameter * Move a page to a newly allocated page
943e24f0b8fSChristoph Lameter * The page is locked and all ptes have been successfully removed.
944b20a3503SChristoph Lameter *
945e24f0b8fSChristoph Lameter * The new page will have replaced the old page if this function
946e24f0b8fSChristoph Lameter * is successful.
947894bc310SLee Schermerhorn *
948894bc310SLee Schermerhorn * Return value:
949894bc310SLee Schermerhorn * < 0 - error code
95078bd5209SRafael Aquini * MIGRATEPAGE_SUCCESS - success
951b20a3503SChristoph Lameter */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)952e7e3ffebSMatthew Wilcox (Oracle) static int move_to_new_folio(struct folio *dst, struct folio *src,
9535c3f9a67SHugh Dickins enum migrate_mode mode)
954b20a3503SChristoph Lameter {
955bda807d4SMinchan Kim int rc = -EAGAIN;
956e7e3ffebSMatthew Wilcox (Oracle) bool is_lru = !__PageMovable(&src->page);
957b20a3503SChristoph Lameter
958e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
959e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
960b20a3503SChristoph Lameter
961bda807d4SMinchan Kim if (likely(is_lru)) {
96268f2736aSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(src);
96368f2736aSMatthew Wilcox (Oracle)
964b20a3503SChristoph Lameter if (!mapping)
96554184650SMatthew Wilcox (Oracle) rc = migrate_folio(mapping, dst, src, mode);
9665490da4fSMatthew Wilcox (Oracle) else if (mapping->a_ops->migrate_folio)
967b20a3503SChristoph Lameter /*
9685490da4fSMatthew Wilcox (Oracle) * Most folios have a mapping and most filesystems
9695490da4fSMatthew Wilcox (Oracle) * provide a migrate_folio callback. Anonymous folios
970bda807d4SMinchan Kim * are part of swap space which also has its own
9715490da4fSMatthew Wilcox (Oracle) * migrate_folio callback. This is the most common path
972bda807d4SMinchan Kim * for page migration.
973b20a3503SChristoph Lameter */
9745490da4fSMatthew Wilcox (Oracle) rc = mapping->a_ops->migrate_folio(mapping, dst, src,
9755490da4fSMatthew Wilcox (Oracle) mode);
9768351a6e4SChristoph Lameter else
9778faa8ef5SMatthew Wilcox (Oracle) rc = fallback_migrate_folio(mapping, dst, src, mode);
978bda807d4SMinchan Kim } else {
97968f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops;
98068f2736aSMatthew Wilcox (Oracle)
981bda807d4SMinchan Kim /*
982bda807d4SMinchan Kim * In case of non-lru page, it could be released after
983bda807d4SMinchan Kim * isolation step. In that case, we shouldn't try migration.
984bda807d4SMinchan Kim */
985e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
986e7e3ffebSMatthew Wilcox (Oracle) if (!folio_test_movable(src)) {
987bda807d4SMinchan Kim rc = MIGRATEPAGE_SUCCESS;
988e7e3ffebSMatthew Wilcox (Oracle) folio_clear_isolated(src);
989bda807d4SMinchan Kim goto out;
990bda807d4SMinchan Kim }
991bda807d4SMinchan Kim
992da707a6dSVishal Moola (Oracle) mops = folio_movable_ops(src);
99368f2736aSMatthew Wilcox (Oracle) rc = mops->migrate_page(&dst->page, &src->page, mode);
994bda807d4SMinchan Kim WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
995e7e3ffebSMatthew Wilcox (Oracle) !folio_test_isolated(src));
996bda807d4SMinchan Kim }
997b20a3503SChristoph Lameter
9985c3f9a67SHugh Dickins /*
999e7e3ffebSMatthew Wilcox (Oracle) * When successful, old pagecache src->mapping must be cleared before
1000e7e3ffebSMatthew Wilcox (Oracle) * src is freed; but stats require that PageAnon be left as PageAnon.
10015c3f9a67SHugh Dickins */
10025c3f9a67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) {
1003e7e3ffebSMatthew Wilcox (Oracle) if (__PageMovable(&src->page)) {
1004e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1005bda807d4SMinchan Kim
1006bda807d4SMinchan Kim /*
1007bda807d4SMinchan Kim * We clear PG_movable under page_lock so any compactor
1008bda807d4SMinchan Kim * cannot try to migrate this page.
1009bda807d4SMinchan Kim */
1010e7e3ffebSMatthew Wilcox (Oracle) folio_clear_isolated(src);
1011bda807d4SMinchan Kim }
1012bda807d4SMinchan Kim
1013bda807d4SMinchan Kim /*
1014e7e3ffebSMatthew Wilcox (Oracle) * Anonymous and movable src->mapping will be cleared by
1015bda807d4SMinchan Kim * free_pages_prepare so don't reset it here for keeping
1016bda807d4SMinchan Kim * the type to work PageAnon, for example.
1017bda807d4SMinchan Kim */
1018e7e3ffebSMatthew Wilcox (Oracle) if (!folio_mapping_flags(src))
1019e7e3ffebSMatthew Wilcox (Oracle) src->mapping = NULL;
1020d2b2c6ddSLars Persson
1021e7e3ffebSMatthew Wilcox (Oracle) if (likely(!folio_is_zone_device(dst)))
1022e7e3ffebSMatthew Wilcox (Oracle) flush_dcache_folio(dst);
10233fe2011fSMel Gorman }
1024bda807d4SMinchan Kim out:
1025e24f0b8fSChristoph Lameter return rc;
1026e24f0b8fSChristoph Lameter }
1027e24f0b8fSChristoph Lameter
102864c8902eSHuang Ying /*
10299128bfbcSBaolin Wang * To record some information during migration, we use unused private
10309128bfbcSBaolin Wang * field of struct folio of the newly allocated destination folio.
10319128bfbcSBaolin Wang * This is safe because nobody is using it except us.
103264c8902eSHuang Ying */
10339d23fab8SBaolin Wang enum {
10349d23fab8SBaolin Wang PAGE_WAS_MAPPED = BIT(0),
10359d23fab8SBaolin Wang PAGE_WAS_MLOCKED = BIT(1),
10369128bfbcSBaolin Wang PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
10379d23fab8SBaolin Wang };
10389d23fab8SBaolin Wang
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)103964c8902eSHuang Ying static void __migrate_folio_record(struct folio *dst,
10409128bfbcSBaolin Wang int old_page_state,
104164c8902eSHuang Ying struct anon_vma *anon_vma)
1042e24f0b8fSChristoph Lameter {
10439128bfbcSBaolin Wang dst->private = (void *)anon_vma + old_page_state;
104464c8902eSHuang Ying }
104564c8902eSHuang Ying
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)104664c8902eSHuang Ying static void __migrate_folio_extract(struct folio *dst,
10479d23fab8SBaolin Wang int *old_page_state,
104864c8902eSHuang Ying struct anon_vma **anon_vmap)
104964c8902eSHuang Ying {
10509128bfbcSBaolin Wang unsigned long private = (unsigned long)dst->private;
10519128bfbcSBaolin Wang
10529128bfbcSBaolin Wang *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
10539128bfbcSBaolin Wang *old_page_state = private & PAGE_OLD_STATES;
105464c8902eSHuang Ying dst->private = NULL;
105564c8902eSHuang Ying }
105664c8902eSHuang Ying
10575dfab109SHuang Ying /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)10585dfab109SHuang Ying static void migrate_folio_undo_src(struct folio *src,
10595dfab109SHuang Ying int page_was_mapped,
10605dfab109SHuang Ying struct anon_vma *anon_vma,
1061ebe75e47SHuang Ying bool locked,
10625dfab109SHuang Ying struct list_head *ret)
10635dfab109SHuang Ying {
10645dfab109SHuang Ying if (page_was_mapped)
10655dfab109SHuang Ying remove_migration_ptes(src, src, false);
10665dfab109SHuang Ying /* Drop an anon_vma reference if we took one */
10675dfab109SHuang Ying if (anon_vma)
10685dfab109SHuang Ying put_anon_vma(anon_vma);
1069ebe75e47SHuang Ying if (locked)
10705dfab109SHuang Ying folio_unlock(src);
1071ebe75e47SHuang Ying if (ret)
10725dfab109SHuang Ying list_move_tail(&src->lru, ret);
10735dfab109SHuang Ying }
10745dfab109SHuang Ying
10755dfab109SHuang Ying /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)10764e096ae1SMatthew Wilcox (Oracle) static void migrate_folio_undo_dst(struct folio *dst, bool locked,
10774e096ae1SMatthew Wilcox (Oracle) free_folio_t put_new_folio, unsigned long private)
10785dfab109SHuang Ying {
1079ebe75e47SHuang Ying if (locked)
10805dfab109SHuang Ying folio_unlock(dst);
10814e096ae1SMatthew Wilcox (Oracle) if (put_new_folio)
10824e096ae1SMatthew Wilcox (Oracle) put_new_folio(dst, private);
10835dfab109SHuang Ying else
10845dfab109SHuang Ying folio_put(dst);
10855dfab109SHuang Ying }
10865dfab109SHuang Ying
108764c8902eSHuang Ying /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)108864c8902eSHuang Ying static void migrate_folio_done(struct folio *src,
108964c8902eSHuang Ying enum migrate_reason reason)
109064c8902eSHuang Ying {
109164c8902eSHuang Ying /*
109264c8902eSHuang Ying * Compaction can migrate also non-LRU pages which are
109364c8902eSHuang Ying * not accounted to NR_ISOLATED_*. They can be recognized
109464c8902eSHuang Ying * as __PageMovable
109564c8902eSHuang Ying */
1096*1a49b96cSGregory Price if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
109764c8902eSHuang Ying mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
109864c8902eSHuang Ying folio_is_file_lru(src), -folio_nr_pages(src));
109964c8902eSHuang Ying
110064c8902eSHuang Ying if (reason != MR_MEMORY_FAILURE)
110164c8902eSHuang Ying /* We release the page in page_handle_poison. */
110264c8902eSHuang Ying folio_put(src);
110364c8902eSHuang Ying }
110464c8902eSHuang Ying
1105ebe75e47SHuang Ying /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)11064e096ae1SMatthew Wilcox (Oracle) static int migrate_folio_unmap(new_folio_t get_new_folio,
11074e096ae1SMatthew Wilcox (Oracle) free_folio_t put_new_folio, unsigned long private,
11084e096ae1SMatthew Wilcox (Oracle) struct folio *src, struct folio **dstp, enum migrate_mode mode,
11092ef7dbb2SHuang Ying enum migrate_reason reason, struct list_head *ret)
1110e24f0b8fSChristoph Lameter {
1111ebe75e47SHuang Ying struct folio *dst;
11120dabec93SMinchan Kim int rc = -EAGAIN;
11139d23fab8SBaolin Wang int old_page_state = 0;
11143f6c8272SMel Gorman struct anon_vma *anon_vma = NULL;
1115682a71a1SMatthew Wilcox (Oracle) bool is_lru = !__PageMovable(&src->page);
1116ebe75e47SHuang Ying bool locked = false;
1117ebe75e47SHuang Ying bool dst_locked = false;
1118ebe75e47SHuang Ying
1119ebe75e47SHuang Ying if (folio_ref_count(src) == 1) {
1120ebe75e47SHuang Ying /* Folio was freed from under us. So we are done. */
1121ebe75e47SHuang Ying folio_clear_active(src);
1122ebe75e47SHuang Ying folio_clear_unevictable(src);
1123ebe75e47SHuang Ying /* free_pages_prepare() will clear PG_isolated. */
1124ebe75e47SHuang Ying list_del(&src->lru);
1125ebe75e47SHuang Ying migrate_folio_done(src, reason);
1126ebe75e47SHuang Ying return MIGRATEPAGE_SUCCESS;
1127ebe75e47SHuang Ying }
1128ebe75e47SHuang Ying
11294e096ae1SMatthew Wilcox (Oracle) dst = get_new_folio(src, private);
11304e096ae1SMatthew Wilcox (Oracle) if (!dst)
1131ebe75e47SHuang Ying return -ENOMEM;
1132ebe75e47SHuang Ying *dstp = dst;
1133ebe75e47SHuang Ying
1134ebe75e47SHuang Ying dst->private = NULL;
113595a402c3SChristoph Lameter
1136682a71a1SMatthew Wilcox (Oracle) if (!folio_trylock(src)) {
11372ef7dbb2SHuang Ying if (mode == MIGRATE_ASYNC)
11380dabec93SMinchan Kim goto out;
11393e7d3449SMel Gorman
11403e7d3449SMel Gorman /*
11413e7d3449SMel Gorman * It's not safe for direct compaction to call lock_page.
11423e7d3449SMel Gorman * For example, during page readahead pages are added locked
11433e7d3449SMel Gorman * to the LRU. Later, when the IO completes the pages are
11443e7d3449SMel Gorman * marked uptodate and unlocked. However, the queueing
11453e7d3449SMel Gorman * could be merging multiple pages for one bio (e.g.
1146d4388340SMatthew Wilcox (Oracle) * mpage_readahead). If an allocation happens for the
11473e7d3449SMel Gorman * second or third page, the process can end up locking
11483e7d3449SMel Gorman * the same page twice and deadlocking. Rather than
11493e7d3449SMel Gorman * trying to be clever about what pages can be locked,
11503e7d3449SMel Gorman * avoid the use of lock_page for direct compaction
11513e7d3449SMel Gorman * altogether.
11523e7d3449SMel Gorman */
11533e7d3449SMel Gorman if (current->flags & PF_MEMALLOC)
11540dabec93SMinchan Kim goto out;
11553e7d3449SMel Gorman
11564bb6dc79SDouglas Anderson /*
11574bb6dc79SDouglas Anderson * In "light" mode, we can wait for transient locks (eg
11584bb6dc79SDouglas Anderson * inserting a page into the page table), but it's not
11594bb6dc79SDouglas Anderson * worth waiting for I/O.
11604bb6dc79SDouglas Anderson */
11614bb6dc79SDouglas Anderson if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
11624bb6dc79SDouglas Anderson goto out;
11634bb6dc79SDouglas Anderson
1164682a71a1SMatthew Wilcox (Oracle) folio_lock(src);
1165e24f0b8fSChristoph Lameter }
1166ebe75e47SHuang Ying locked = true;
11679d23fab8SBaolin Wang if (folio_test_mlocked(src))
11689d23fab8SBaolin Wang old_page_state |= PAGE_WAS_MLOCKED;
1169e24f0b8fSChristoph Lameter
1170682a71a1SMatthew Wilcox (Oracle) if (folio_test_writeback(src)) {
117111bc82d6SAndrea Arcangeli /*
1172fed5b64aSJianguo Wu * Only in the case of a full synchronous migration is it
1173a6bc32b8SMel Gorman * necessary to wait for PageWriteback. In the async case,
1174a6bc32b8SMel Gorman * the retry loop is too short and in the sync-light case,
1175a6bc32b8SMel Gorman * the overhead of stalling is too much
117611bc82d6SAndrea Arcangeli */
11772916ecc0SJérôme Glisse switch (mode) {
11782916ecc0SJérôme Glisse case MIGRATE_SYNC:
11792916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY:
11802916ecc0SJérôme Glisse break;
11812916ecc0SJérôme Glisse default:
118211bc82d6SAndrea Arcangeli rc = -EBUSY;
1183ebe75e47SHuang Ying goto out;
118411bc82d6SAndrea Arcangeli }
1185682a71a1SMatthew Wilcox (Oracle) folio_wait_writeback(src);
1186e24f0b8fSChristoph Lameter }
118703f15c86SHugh Dickins
1188e24f0b8fSChristoph Lameter /*
1189682a71a1SMatthew Wilcox (Oracle) * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1190682a71a1SMatthew Wilcox (Oracle) * we cannot notice that anon_vma is freed while we migrate a page.
11911ce82b69SHugh Dickins * This get_anon_vma() delays freeing anon_vma pointer until the end
1192dc386d4dSKAMEZAWA Hiroyuki * of migration. File cache pages are no problem because of page_lock()
1193989f89c5SKAMEZAWA Hiroyuki * File Caches may use write_page() or lock_page() in migration, then,
1194989f89c5SKAMEZAWA Hiroyuki * just care Anon page here.
11953fe2011fSMel Gorman *
119629eea9b5SMatthew Wilcox (Oracle) * Only folio_get_anon_vma() understands the subtleties of
119703f15c86SHugh Dickins * getting a hold on an anon_vma from outside one of its mms.
119803f15c86SHugh Dickins * But if we cannot get anon_vma, then we won't need it anyway,
119903f15c86SHugh Dickins * because that implies that the anon page is no longer mapped
120003f15c86SHugh Dickins * (and cannot be remapped so long as we hold the page lock).
12013fe2011fSMel Gorman */
1202682a71a1SMatthew Wilcox (Oracle) if (folio_test_anon(src) && !folio_test_ksm(src))
120329eea9b5SMatthew Wilcox (Oracle) anon_vma = folio_get_anon_vma(src);
120462e1c553SShaohua Li
12057db7671fSHugh Dickins /*
12067db7671fSHugh Dickins * Block others from accessing the new page when we get around to
12077db7671fSHugh Dickins * establishing additional references. We are usually the only one
1208682a71a1SMatthew Wilcox (Oracle) * holding a reference to dst at this point. We used to have a BUG
1209682a71a1SMatthew Wilcox (Oracle) * here if folio_trylock(dst) fails, but would like to allow for
1210682a71a1SMatthew Wilcox (Oracle) * cases where there might be a race with the previous use of dst.
12117db7671fSHugh Dickins * This is much like races on refcount of oldpage: just don't BUG().
12127db7671fSHugh Dickins */
1213682a71a1SMatthew Wilcox (Oracle) if (unlikely(!folio_trylock(dst)))
1214ebe75e47SHuang Ying goto out;
1215ebe75e47SHuang Ying dst_locked = true;
12167db7671fSHugh Dickins
1217bda807d4SMinchan Kim if (unlikely(!is_lru)) {
12189d23fab8SBaolin Wang __migrate_folio_record(dst, old_page_state, anon_vma);
121964c8902eSHuang Ying return MIGRATEPAGE_UNMAP;
1220bda807d4SMinchan Kim }
1221bda807d4SMinchan Kim
1222dc386d4dSKAMEZAWA Hiroyuki /*
122362e1c553SShaohua Li * Corner case handling:
122462e1c553SShaohua Li * 1. When a new swap-cache page is read into, it is added to the LRU
122562e1c553SShaohua Li * and treated as swapcache but it has no rmap yet.
1226682a71a1SMatthew Wilcox (Oracle) * Calling try_to_unmap() against a src->mapping==NULL page will
122762e1c553SShaohua Li * trigger a BUG. So handle it here.
1228d12b8951SYang Shi * 2. An orphaned page (see truncate_cleanup_page) might have
122962e1c553SShaohua Li * fs-private metadata. The page can be picked up due to memory
123062e1c553SShaohua Li * offlining. Everywhere else except page reclaim, the page is
123162e1c553SShaohua Li * invisible to the vm, so the page can not be migrated. So try to
123262e1c553SShaohua Li * free the metadata, so the page can be freed.
1233dc386d4dSKAMEZAWA Hiroyuki */
1234682a71a1SMatthew Wilcox (Oracle) if (!src->mapping) {
1235682a71a1SMatthew Wilcox (Oracle) if (folio_test_private(src)) {
1236682a71a1SMatthew Wilcox (Oracle) try_to_free_buffers(src);
1237ebe75e47SHuang Ying goto out;
123862e1c553SShaohua Li }
1239682a71a1SMatthew Wilcox (Oracle) } else if (folio_mapped(src)) {
12407db7671fSHugh Dickins /* Establish migration ptes */
1241682a71a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1242682a71a1SMatthew Wilcox (Oracle) !folio_test_ksm(src) && !anon_vma, src);
1243fb3592c4SHuang Ying try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
12449d23fab8SBaolin Wang old_page_state |= PAGE_WAS_MAPPED;
12452ebba6b7SHugh Dickins }
1246dc386d4dSKAMEZAWA Hiroyuki
124764c8902eSHuang Ying if (!folio_mapped(src)) {
12489d23fab8SBaolin Wang __migrate_folio_record(dst, old_page_state, anon_vma);
124964c8902eSHuang Ying return MIGRATEPAGE_UNMAP;
125064c8902eSHuang Ying }
125164c8902eSHuang Ying
125264c8902eSHuang Ying out:
125380562ba0SHuang Ying /*
125480562ba0SHuang Ying * A folio that has not been unmapped will be restored to
125580562ba0SHuang Ying * right list unless we want to retry.
125680562ba0SHuang Ying */
1257fb3592c4SHuang Ying if (rc == -EAGAIN)
1258ebe75e47SHuang Ying ret = NULL;
125980562ba0SHuang Ying
12609d23fab8SBaolin Wang migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
12619d23fab8SBaolin Wang anon_vma, locked, ret);
12624e096ae1SMatthew Wilcox (Oracle) migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
126380562ba0SHuang Ying
126480562ba0SHuang Ying return rc;
126580562ba0SHuang Ying }
126680562ba0SHuang Ying
1267ebe75e47SHuang Ying /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)12684e096ae1SMatthew Wilcox (Oracle) static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1269ebe75e47SHuang Ying struct folio *src, struct folio *dst,
1270ebe75e47SHuang Ying enum migrate_mode mode, enum migrate_reason reason,
1271ebe75e47SHuang Ying struct list_head *ret)
127264c8902eSHuang Ying {
127364c8902eSHuang Ying int rc;
12749d23fab8SBaolin Wang int old_page_state = 0;
127564c8902eSHuang Ying struct anon_vma *anon_vma = NULL;
127664c8902eSHuang Ying bool is_lru = !__PageMovable(&src->page);
12775dfab109SHuang Ying struct list_head *prev;
127864c8902eSHuang Ying
12799d23fab8SBaolin Wang __migrate_folio_extract(dst, &old_page_state, &anon_vma);
12805dfab109SHuang Ying prev = dst->lru.prev;
12815dfab109SHuang Ying list_del(&dst->lru);
128264c8902eSHuang Ying
1283682a71a1SMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, src, mode);
1284ebe75e47SHuang Ying if (rc)
1285ebe75e47SHuang Ying goto out;
12865dfab109SHuang Ying
128764c8902eSHuang Ying if (unlikely(!is_lru))
128864c8902eSHuang Ying goto out_unlock_both;
1289e24f0b8fSChristoph Lameter
1290c3096e67SHugh Dickins /*
1291682a71a1SMatthew Wilcox (Oracle) * When successful, push dst to LRU immediately: so that if it
1292c3096e67SHugh Dickins * turns out to be an mlocked page, remove_migration_ptes() will
1293682a71a1SMatthew Wilcox (Oracle) * automatically build up the correct dst->mlock_count for it.
1294c3096e67SHugh Dickins *
1295c3096e67SHugh Dickins * We would like to do something similar for the old page, when
1296c3096e67SHugh Dickins * unsuccessful, and other cases when a page has been temporarily
1297c3096e67SHugh Dickins * isolated from the unevictable LRU: but this case is the easiest.
1298c3096e67SHugh Dickins */
1299682a71a1SMatthew Wilcox (Oracle) folio_add_lru(dst);
13009d23fab8SBaolin Wang if (old_page_state & PAGE_WAS_MLOCKED)
1301c3096e67SHugh Dickins lru_add_drain();
1302c3096e67SHugh Dickins
13039d23fab8SBaolin Wang if (old_page_state & PAGE_WAS_MAPPED)
1304ebe75e47SHuang Ying remove_migration_ptes(src, dst, false);
13053f6c8272SMel Gorman
13067db7671fSHugh Dickins out_unlock_both:
1307682a71a1SMatthew Wilcox (Oracle) folio_unlock(dst);
1308ebe75e47SHuang Ying set_page_owner_migrate_reason(&dst->page, reason);
1309c6c919ebSMinchan Kim /*
1310682a71a1SMatthew Wilcox (Oracle) * If migration is successful, decrease refcount of dst,
1311c6c919ebSMinchan Kim * which will not free the page because new page owner increased
1312c3096e67SHugh Dickins * refcounter.
1313c6c919ebSMinchan Kim */
1314682a71a1SMatthew Wilcox (Oracle) folio_put(dst);
1315c6c919ebSMinchan Kim
1316ebe75e47SHuang Ying /*
1317ebe75e47SHuang Ying * A folio that has been migrated has all references removed
1318ebe75e47SHuang Ying * and will be freed.
1319ebe75e47SHuang Ying */
1320ebe75e47SHuang Ying list_del(&src->lru);
1321ebe75e47SHuang Ying /* Drop an anon_vma reference if we took one */
1322ebe75e47SHuang Ying if (anon_vma)
1323ebe75e47SHuang Ying put_anon_vma(anon_vma);
1324ebe75e47SHuang Ying folio_unlock(src);
1325ebe75e47SHuang Ying migrate_folio_done(src, reason);
1326ebe75e47SHuang Ying
1327ebe75e47SHuang Ying return rc;
1328ebe75e47SHuang Ying out:
1329ebe75e47SHuang Ying /*
1330ebe75e47SHuang Ying * A folio that has not been migrated will be restored to
1331ebe75e47SHuang Ying * right list unless we want to retry.
1332ebe75e47SHuang Ying */
1333ebe75e47SHuang Ying if (rc == -EAGAIN) {
1334ebe75e47SHuang Ying list_add(&dst->lru, prev);
13359d23fab8SBaolin Wang __migrate_folio_record(dst, old_page_state, anon_vma);
13360dabec93SMinchan Kim return rc;
13370dabec93SMinchan Kim }
133895a402c3SChristoph Lameter
13399d23fab8SBaolin Wang migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
13409d23fab8SBaolin Wang anon_vma, true, ret);
13414e096ae1SMatthew Wilcox (Oracle) migrate_folio_undo_dst(dst, true, put_new_folio, private);
134268711a74SDavid Rientjes
1343e24f0b8fSChristoph Lameter return rc;
1344e24f0b8fSChristoph Lameter }
1345b20a3503SChristoph Lameter
1346e24f0b8fSChristoph Lameter /*
1347290408d4SNaoya Horiguchi * Counterpart of unmap_and_move_page() for hugepage migration.
1348290408d4SNaoya Horiguchi *
1349290408d4SNaoya Horiguchi * This function doesn't wait the completion of hugepage I/O
1350290408d4SNaoya Horiguchi * because there is no race between I/O and migration for hugepage.
1351290408d4SNaoya Horiguchi * Note that currently hugepage I/O occurs only in direct I/O
1352290408d4SNaoya Horiguchi * where no lock is held and PG_writeback is irrelevant,
1353290408d4SNaoya Horiguchi * and writeback status of all subpages are counted in the reference
1354290408d4SNaoya Horiguchi * count of the head page (i.e. if all subpages of a 2MB hugepage are
1355290408d4SNaoya Horiguchi * under direct I/O, the reference of the head page is 512 and a bit more.)
1356290408d4SNaoya Horiguchi * This means that when we try to migrate hugepage whose subpages are
1357290408d4SNaoya Horiguchi * doing direct I/O, some references remain after try_to_unmap() and
1358290408d4SNaoya Horiguchi * hugepage migration fails without data corruption.
1359290408d4SNaoya Horiguchi *
1360290408d4SNaoya Horiguchi * There is also no race when direct I/O is issued on the page under migration,
1361290408d4SNaoya Horiguchi * because then pte is replaced with migration swap entry and direct I/O code
1362290408d4SNaoya Horiguchi * will wait in the page fault for migration to complete.
1363290408d4SNaoya Horiguchi */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)13644e096ae1SMatthew Wilcox (Oracle) static int unmap_and_move_huge_page(new_folio_t get_new_folio,
13654e096ae1SMatthew Wilcox (Oracle) free_folio_t put_new_folio, unsigned long private,
13664e096ae1SMatthew Wilcox (Oracle) struct folio *src, int force, enum migrate_mode mode,
13674e096ae1SMatthew Wilcox (Oracle) int reason, struct list_head *ret)
1368290408d4SNaoya Horiguchi {
13694e096ae1SMatthew Wilcox (Oracle) struct folio *dst;
13702def7424SHugh Dickins int rc = -EAGAIN;
13712ebba6b7SHugh Dickins int page_was_mapped = 0;
1372290408d4SNaoya Horiguchi struct anon_vma *anon_vma = NULL;
1373c0d0381aSMike Kravetz struct address_space *mapping = NULL;
1374290408d4SNaoya Horiguchi
1375c33db292SMatthew Wilcox (Oracle) if (folio_ref_count(src) == 1) {
137671a64f61SMuchun Song /* page was freed from under us. So we are done. */
1377ea8e72f4SSidhartha Kumar folio_putback_active_hugetlb(src);
137871a64f61SMuchun Song return MIGRATEPAGE_SUCCESS;
137971a64f61SMuchun Song }
138071a64f61SMuchun Song
13814e096ae1SMatthew Wilcox (Oracle) dst = get_new_folio(src, private);
13824e096ae1SMatthew Wilcox (Oracle) if (!dst)
1383290408d4SNaoya Horiguchi return -ENOMEM;
1384290408d4SNaoya Horiguchi
1385c33db292SMatthew Wilcox (Oracle) if (!folio_trylock(src)) {
13862916ecc0SJérôme Glisse if (!force)
1387290408d4SNaoya Horiguchi goto out;
13882916ecc0SJérôme Glisse switch (mode) {
13892916ecc0SJérôme Glisse case MIGRATE_SYNC:
13902916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY:
13912916ecc0SJérôme Glisse break;
13922916ecc0SJérôme Glisse default:
13932916ecc0SJérôme Glisse goto out;
13942916ecc0SJérôme Glisse }
1395c33db292SMatthew Wilcox (Oracle) folio_lock(src);
1396290408d4SNaoya Horiguchi }
1397290408d4SNaoya Horiguchi
1398cb6acd01SMike Kravetz /*
1399cb6acd01SMike Kravetz * Check for pages which are in the process of being freed. Without
1400c33db292SMatthew Wilcox (Oracle) * folio_mapping() set, hugetlbfs specific move page routine will not
1401cb6acd01SMike Kravetz * be called and we could leak usage counts for subpools.
1402cb6acd01SMike Kravetz */
1403345c62d1SSidhartha Kumar if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1404cb6acd01SMike Kravetz rc = -EBUSY;
1405cb6acd01SMike Kravetz goto out_unlock;
1406cb6acd01SMike Kravetz }
1407cb6acd01SMike Kravetz
1408c33db292SMatthew Wilcox (Oracle) if (folio_test_anon(src))
140929eea9b5SMatthew Wilcox (Oracle) anon_vma = folio_get_anon_vma(src);
1410290408d4SNaoya Horiguchi
1411c33db292SMatthew Wilcox (Oracle) if (unlikely(!folio_trylock(dst)))
14127db7671fSHugh Dickins goto put_anon;
14137db7671fSHugh Dickins
1414c33db292SMatthew Wilcox (Oracle) if (folio_mapped(src)) {
1415a98a2f0cSAlistair Popple enum ttu_flags ttu = 0;
1416336bf30eSMike Kravetz
1417c33db292SMatthew Wilcox (Oracle) if (!folio_test_anon(src)) {
1418c0d0381aSMike Kravetz /*
1419336bf30eSMike Kravetz * In shared mappings, try_to_unmap could potentially
1420336bf30eSMike Kravetz * call huge_pmd_unshare. Because of this, take
1421336bf30eSMike Kravetz * semaphore in write mode here and set TTU_RMAP_LOCKED
1422336bf30eSMike Kravetz * to let lower levels know we have taken the lock.
1423c0d0381aSMike Kravetz */
14244e096ae1SMatthew Wilcox (Oracle) mapping = hugetlb_page_mapping_lock_write(&src->page);
1425c0d0381aSMike Kravetz if (unlikely(!mapping))
1426c0d0381aSMike Kravetz goto unlock_put_anon;
1427c0d0381aSMike Kravetz
14285202978bSMiaohe Lin ttu = TTU_RMAP_LOCKED;
1429336bf30eSMike Kravetz }
1430336bf30eSMike Kravetz
14314b8554c5SMatthew Wilcox (Oracle) try_to_migrate(src, ttu);
14322ebba6b7SHugh Dickins page_was_mapped = 1;
1433336bf30eSMike Kravetz
14345202978bSMiaohe Lin if (ttu & TTU_RMAP_LOCKED)
1435336bf30eSMike Kravetz i_mmap_unlock_write(mapping);
14362ebba6b7SHugh Dickins }
1437290408d4SNaoya Horiguchi
1438c33db292SMatthew Wilcox (Oracle) if (!folio_mapped(src))
1439e7e3ffebSMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, src, mode);
1440290408d4SNaoya Horiguchi
1441336bf30eSMike Kravetz if (page_was_mapped)
14424eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(src,
14434eecb8b9SMatthew Wilcox (Oracle) rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1444290408d4SNaoya Horiguchi
1445c0d0381aSMike Kravetz unlock_put_anon:
1446c33db292SMatthew Wilcox (Oracle) folio_unlock(dst);
14477db7671fSHugh Dickins
14487db7671fSHugh Dickins put_anon:
1449fd4a4663SHugh Dickins if (anon_vma)
14509e60109fSPeter Zijlstra put_anon_vma(anon_vma);
14518e6ac7faSAneesh Kumar K.V
14522def7424SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) {
1453345c62d1SSidhartha Kumar move_hugetlb_state(src, dst, reason);
14544e096ae1SMatthew Wilcox (Oracle) put_new_folio = NULL;
14552def7424SHugh Dickins }
14568e6ac7faSAneesh Kumar K.V
1457cb6acd01SMike Kravetz out_unlock:
1458c33db292SMatthew Wilcox (Oracle) folio_unlock(src);
145909761333SHillf Danton out:
1460dd4ae78aSYang Shi if (rc == MIGRATEPAGE_SUCCESS)
1461ea8e72f4SSidhartha Kumar folio_putback_active_hugetlb(src);
1462a04840c6SMiaohe Lin else if (rc != -EAGAIN)
1463c33db292SMatthew Wilcox (Oracle) list_move_tail(&src->lru, ret);
146468711a74SDavid Rientjes
146568711a74SDavid Rientjes /*
146668711a74SDavid Rientjes * If migration was not successful and there's a freeing callback, use
146768711a74SDavid Rientjes * it. Otherwise, put_page() will drop the reference grabbed during
146868711a74SDavid Rientjes * isolation.
146968711a74SDavid Rientjes */
14704e096ae1SMatthew Wilcox (Oracle) if (put_new_folio)
14714e096ae1SMatthew Wilcox (Oracle) put_new_folio(dst, private);
147268711a74SDavid Rientjes else
1473ea8e72f4SSidhartha Kumar folio_putback_active_hugetlb(dst);
147468711a74SDavid Rientjes
1475290408d4SNaoya Horiguchi return rc;
1476290408d4SNaoya Horiguchi }
1477290408d4SNaoya Horiguchi
try_split_folio(struct folio * folio,struct list_head * split_folios)1478eaec4e63SHuang Ying static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1479d532e2e5SYang Shi {
14809c62ff00SHuang Ying int rc;
1481d532e2e5SYang Shi
1482eaec4e63SHuang Ying folio_lock(folio);
1483eaec4e63SHuang Ying rc = split_folio_to_list(folio, split_folios);
1484eaec4e63SHuang Ying folio_unlock(folio);
1485e6fa8a79SHuang Ying if (!rc)
1486eaec4e63SHuang Ying list_move_tail(&folio->lru, split_folios);
1487d532e2e5SYang Shi
1488d532e2e5SYang Shi return rc;
1489d532e2e5SYang Shi }
1490d532e2e5SYang Shi
149142012e04SHuang Ying #ifdef CONFIG_TRANSPARENT_HUGEPAGE
149242012e04SHuang Ying #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
149342012e04SHuang Ying #else
149442012e04SHuang Ying #define NR_MAX_BATCHED_MIGRATION 512
149542012e04SHuang Ying #endif
1496e5bfff8bSHuang Ying #define NR_MAX_MIGRATE_PAGES_RETRY 10
14972ef7dbb2SHuang Ying #define NR_MAX_MIGRATE_ASYNC_RETRY 3
14982ef7dbb2SHuang Ying #define NR_MAX_MIGRATE_SYNC_RETRY \
14992ef7dbb2SHuang Ying (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1500e5bfff8bSHuang Ying
15015b855937SHuang Ying struct migrate_pages_stats {
15025b855937SHuang Ying int nr_succeeded; /* Normal and large folios migrated successfully, in
15035b855937SHuang Ying units of base pages */
15045b855937SHuang Ying int nr_failed_pages; /* Normal and large folios failed to be migrated, in
15055b855937SHuang Ying units of base pages. Untried folios aren't counted */
15065b855937SHuang Ying int nr_thp_succeeded; /* THP migrated successfully */
15075b855937SHuang Ying int nr_thp_failed; /* THP failed to be migrated */
15085b855937SHuang Ying int nr_thp_split; /* THP split before migrating */
15095b855937SHuang Ying };
15105b855937SHuang Ying
1511290408d4SNaoya Horiguchi /*
1512e5bfff8bSHuang Ying * Returns the number of hugetlb folios that were not migrated, or an error code
1513e5bfff8bSHuang Ying * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1514e5bfff8bSHuang Ying * any more because the list has become empty or no retryable hugetlb folios
1515e5bfff8bSHuang Ying * exist any more. It is caller's responsibility to call putback_movable_pages()
1516e5bfff8bSHuang Ying * only if ret != 0.
1517e5bfff8bSHuang Ying */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)15184e096ae1SMatthew Wilcox (Oracle) static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
15194e096ae1SMatthew Wilcox (Oracle) free_folio_t put_new_folio, unsigned long private,
1520e5bfff8bSHuang Ying enum migrate_mode mode, int reason,
1521e5bfff8bSHuang Ying struct migrate_pages_stats *stats,
1522e5bfff8bSHuang Ying struct list_head *ret_folios)
1523e5bfff8bSHuang Ying {
1524e5bfff8bSHuang Ying int retry = 1;
1525e5bfff8bSHuang Ying int nr_failed = 0;
1526e5bfff8bSHuang Ying int nr_retry_pages = 0;
1527e5bfff8bSHuang Ying int pass = 0;
1528e5bfff8bSHuang Ying struct folio *folio, *folio2;
1529e5bfff8bSHuang Ying int rc, nr_pages;
1530e5bfff8bSHuang Ying
1531e5bfff8bSHuang Ying for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1532e5bfff8bSHuang Ying retry = 0;
1533e5bfff8bSHuang Ying nr_retry_pages = 0;
1534e5bfff8bSHuang Ying
1535e5bfff8bSHuang Ying list_for_each_entry_safe(folio, folio2, from, lru) {
1536e5bfff8bSHuang Ying if (!folio_test_hugetlb(folio))
1537e5bfff8bSHuang Ying continue;
1538e5bfff8bSHuang Ying
1539e5bfff8bSHuang Ying nr_pages = folio_nr_pages(folio);
1540e5bfff8bSHuang Ying
1541e5bfff8bSHuang Ying cond_resched();
1542e5bfff8bSHuang Ying
15436f7d760eSHuang Ying /*
15446f7d760eSHuang Ying * Migratability of hugepages depends on architectures and
15456f7d760eSHuang Ying * their size. This check is necessary because some callers
15466f7d760eSHuang Ying * of hugepage migration like soft offline and memory
15476f7d760eSHuang Ying * hotremove don't walk through page tables or check whether
15486f7d760eSHuang Ying * the hugepage is pmd-based or not before kicking migration.
15496f7d760eSHuang Ying */
15506f7d760eSHuang Ying if (!hugepage_migration_supported(folio_hstate(folio))) {
15516f7d760eSHuang Ying nr_failed++;
15526f7d760eSHuang Ying stats->nr_failed_pages += nr_pages;
15536f7d760eSHuang Ying list_move_tail(&folio->lru, ret_folios);
15546f7d760eSHuang Ying continue;
15556f7d760eSHuang Ying }
15566f7d760eSHuang Ying
15574e096ae1SMatthew Wilcox (Oracle) rc = unmap_and_move_huge_page(get_new_folio,
15584e096ae1SMatthew Wilcox (Oracle) put_new_folio, private,
15594e096ae1SMatthew Wilcox (Oracle) folio, pass > 2, mode,
1560e5bfff8bSHuang Ying reason, ret_folios);
1561e5bfff8bSHuang Ying /*
1562e5bfff8bSHuang Ying * The rules are:
1563e5bfff8bSHuang Ying * Success: hugetlb folio will be put back
1564e5bfff8bSHuang Ying * -EAGAIN: stay on the from list
1565e5bfff8bSHuang Ying * -ENOMEM: stay on the from list
1566e5bfff8bSHuang Ying * Other errno: put on ret_folios list
1567e5bfff8bSHuang Ying */
1568e5bfff8bSHuang Ying switch(rc) {
1569e5bfff8bSHuang Ying case -ENOMEM:
1570e5bfff8bSHuang Ying /*
1571e5bfff8bSHuang Ying * When memory is low, don't bother to try to migrate
1572e5bfff8bSHuang Ying * other folios, just exit.
1573e5bfff8bSHuang Ying */
1574e5bfff8bSHuang Ying stats->nr_failed_pages += nr_pages + nr_retry_pages;
1575e5bfff8bSHuang Ying return -ENOMEM;
1576e5bfff8bSHuang Ying case -EAGAIN:
1577e5bfff8bSHuang Ying retry++;
1578e5bfff8bSHuang Ying nr_retry_pages += nr_pages;
1579e5bfff8bSHuang Ying break;
1580e5bfff8bSHuang Ying case MIGRATEPAGE_SUCCESS:
1581e5bfff8bSHuang Ying stats->nr_succeeded += nr_pages;
1582e5bfff8bSHuang Ying break;
1583e5bfff8bSHuang Ying default:
1584e5bfff8bSHuang Ying /*
1585e5bfff8bSHuang Ying * Permanent failure (-EBUSY, etc.):
1586e5bfff8bSHuang Ying * unlike -EAGAIN case, the failed folio is
1587e5bfff8bSHuang Ying * removed from migration folio list and not
1588e5bfff8bSHuang Ying * retried in the next outer loop.
1589e5bfff8bSHuang Ying */
1590e5bfff8bSHuang Ying nr_failed++;
1591e5bfff8bSHuang Ying stats->nr_failed_pages += nr_pages;
1592e5bfff8bSHuang Ying break;
1593e5bfff8bSHuang Ying }
1594e5bfff8bSHuang Ying }
1595e5bfff8bSHuang Ying }
1596e5bfff8bSHuang Ying /*
1597e5bfff8bSHuang Ying * nr_failed is number of hugetlb folios failed to be migrated. After
1598e5bfff8bSHuang Ying * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1599e5bfff8bSHuang Ying * folios as failed.
1600e5bfff8bSHuang Ying */
1601e5bfff8bSHuang Ying nr_failed += retry;
1602e5bfff8bSHuang Ying stats->nr_failed_pages += nr_retry_pages;
1603e5bfff8bSHuang Ying
1604e5bfff8bSHuang Ying return nr_failed;
1605e5bfff8bSHuang Ying }
1606e5bfff8bSHuang Ying
16075dfab109SHuang Ying /*
16085dfab109SHuang Ying * migrate_pages_batch() first unmaps folios in the from list as many as
16095dfab109SHuang Ying * possible, then move the unmapped folios.
1610fb3592c4SHuang Ying *
1611fb3592c4SHuang Ying * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1612fb3592c4SHuang Ying * lock or bit when we have locked more than one folio. Which may cause
1613fb3592c4SHuang Ying * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1614fb3592c4SHuang Ying * length of the from list must be <= 1.
16155dfab109SHuang Ying */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)16164e096ae1SMatthew Wilcox (Oracle) static int migrate_pages_batch(struct list_head *from,
16174e096ae1SMatthew Wilcox (Oracle) new_folio_t get_new_folio, free_folio_t put_new_folio,
16184e096ae1SMatthew Wilcox (Oracle) unsigned long private, enum migrate_mode mode, int reason,
16194e096ae1SMatthew Wilcox (Oracle) struct list_head *ret_folios, struct list_head *split_folios,
16204e096ae1SMatthew Wilcox (Oracle) struct migrate_pages_stats *stats, int nr_pass)
162142012e04SHuang Ying {
1622a21d2133SHuang Ying int retry = 1;
162342012e04SHuang Ying int thp_retry = 1;
162442012e04SHuang Ying int nr_failed = 0;
162542012e04SHuang Ying int nr_retry_pages = 0;
162642012e04SHuang Ying int pass = 0;
162742012e04SHuang Ying bool is_thp = false;
16285dfab109SHuang Ying struct folio *folio, *folio2, *dst = NULL, *dst2;
1629a21d2133SHuang Ying int rc, rc_saved = 0, nr_pages;
16305dfab109SHuang Ying LIST_HEAD(unmap_folios);
16315dfab109SHuang Ying LIST_HEAD(dst_folios);
163242012e04SHuang Ying bool nosplit = (reason == MR_NUMA_MISPLACED);
163342012e04SHuang Ying
1634fb3592c4SHuang Ying VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1635fb3592c4SHuang Ying !list_empty(from) && !list_is_singular(from));
1636a21d2133SHuang Ying
1637124abcedSHuang Ying for (pass = 0; pass < nr_pass && retry; pass++) {
163842012e04SHuang Ying retry = 0;
163942012e04SHuang Ying thp_retry = 0;
164042012e04SHuang Ying nr_retry_pages = 0;
164142012e04SHuang Ying
164242012e04SHuang Ying list_for_each_entry_safe(folio, folio2, from, lru) {
1643124abcedSHuang Ying is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
164442012e04SHuang Ying nr_pages = folio_nr_pages(folio);
164542012e04SHuang Ying
164642012e04SHuang Ying cond_resched();
164742012e04SHuang Ying
16486f7d760eSHuang Ying /*
16496f7d760eSHuang Ying * Large folio migration might be unsupported or
16506f7d760eSHuang Ying * the allocation might be failed so we should retry
16516f7d760eSHuang Ying * on the same folio with the large folio split
16526f7d760eSHuang Ying * to normal folios.
16536f7d760eSHuang Ying *
16546f7d760eSHuang Ying * Split folios are put in split_folios, and
16556f7d760eSHuang Ying * we will migrate them after the rest of the
16566f7d760eSHuang Ying * list is processed.
16576f7d760eSHuang Ying */
16586f7d760eSHuang Ying if (!thp_migration_supported() && is_thp) {
1659124abcedSHuang Ying nr_failed++;
16606f7d760eSHuang Ying stats->nr_thp_failed++;
1661a21d2133SHuang Ying if (!try_split_folio(folio, split_folios)) {
16626f7d760eSHuang Ying stats->nr_thp_split++;
16636f7d760eSHuang Ying continue;
16646f7d760eSHuang Ying }
16656f7d760eSHuang Ying stats->nr_failed_pages += nr_pages;
16666f7d760eSHuang Ying list_move_tail(&folio->lru, ret_folios);
16676f7d760eSHuang Ying continue;
16686f7d760eSHuang Ying }
16696f7d760eSHuang Ying
16704e096ae1SMatthew Wilcox (Oracle) rc = migrate_folio_unmap(get_new_folio, put_new_folio,
16714e096ae1SMatthew Wilcox (Oracle) private, folio, &dst, mode, reason,
16724e096ae1SMatthew Wilcox (Oracle) ret_folios);
167342012e04SHuang Ying /*
167442012e04SHuang Ying * The rules are:
167542012e04SHuang Ying * Success: folio will be freed
16765dfab109SHuang Ying * Unmap: folio will be put on unmap_folios list,
16775dfab109SHuang Ying * dst folio put on dst_folios list
167842012e04SHuang Ying * -EAGAIN: stay on the from list
167942012e04SHuang Ying * -ENOMEM: stay on the from list
168042012e04SHuang Ying * Other errno: put on ret_folios list
168142012e04SHuang Ying */
168242012e04SHuang Ying switch(rc) {
168342012e04SHuang Ying case -ENOMEM:
168442012e04SHuang Ying /*
168542012e04SHuang Ying * When memory is low, don't bother to try to migrate
16865dfab109SHuang Ying * other folios, move unmapped folios, then exit.
168742012e04SHuang Ying */
1688124abcedSHuang Ying nr_failed++;
168942012e04SHuang Ying stats->nr_thp_failed += is_thp;
169042012e04SHuang Ying /* Large folio NUMA faulting doesn't split to retry. */
1691124abcedSHuang Ying if (folio_test_large(folio) && !nosplit) {
1692a21d2133SHuang Ying int ret = try_split_folio(folio, split_folios);
169342012e04SHuang Ying
169442012e04SHuang Ying if (!ret) {
169542012e04SHuang Ying stats->nr_thp_split += is_thp;
169642012e04SHuang Ying break;
169742012e04SHuang Ying } else if (reason == MR_LONGTERM_PIN &&
169842012e04SHuang Ying ret == -EAGAIN) {
169942012e04SHuang Ying /*
170042012e04SHuang Ying * Try again to split large folio to
170142012e04SHuang Ying * mitigate the failure of longterm pinning.
170242012e04SHuang Ying */
1703124abcedSHuang Ying retry++;
170442012e04SHuang Ying thp_retry += is_thp;
170542012e04SHuang Ying nr_retry_pages += nr_pages;
1706851ae642SHuang Ying /* Undo duplicated failure counting. */
1707124abcedSHuang Ying nr_failed--;
1708851ae642SHuang Ying stats->nr_thp_failed -= is_thp;
170942012e04SHuang Ying break;
171042012e04SHuang Ying }
171142012e04SHuang Ying }
171242012e04SHuang Ying
171342012e04SHuang Ying stats->nr_failed_pages += nr_pages + nr_retry_pages;
171442012e04SHuang Ying /* nr_failed isn't updated for not used */
171542012e04SHuang Ying stats->nr_thp_failed += thp_retry;
17165dfab109SHuang Ying rc_saved = rc;
17175dfab109SHuang Ying if (list_empty(&unmap_folios))
171842012e04SHuang Ying goto out;
17195dfab109SHuang Ying else
17205dfab109SHuang Ying goto move;
172142012e04SHuang Ying case -EAGAIN:
172242012e04SHuang Ying retry++;
1723124abcedSHuang Ying thp_retry += is_thp;
172442012e04SHuang Ying nr_retry_pages += nr_pages;
172542012e04SHuang Ying break;
172642012e04SHuang Ying case MIGRATEPAGE_SUCCESS:
172742012e04SHuang Ying stats->nr_succeeded += nr_pages;
172842012e04SHuang Ying stats->nr_thp_succeeded += is_thp;
172942012e04SHuang Ying break;
17305dfab109SHuang Ying case MIGRATEPAGE_UNMAP:
17315dfab109SHuang Ying list_move_tail(&folio->lru, &unmap_folios);
17325dfab109SHuang Ying list_add_tail(&dst->lru, &dst_folios);
17335dfab109SHuang Ying break;
173442012e04SHuang Ying default:
173542012e04SHuang Ying /*
173642012e04SHuang Ying * Permanent failure (-EBUSY, etc.):
173742012e04SHuang Ying * unlike -EAGAIN case, the failed folio is
173842012e04SHuang Ying * removed from migration folio list and not
173942012e04SHuang Ying * retried in the next outer loop.
174042012e04SHuang Ying */
174142012e04SHuang Ying nr_failed++;
1742124abcedSHuang Ying stats->nr_thp_failed += is_thp;
174342012e04SHuang Ying stats->nr_failed_pages += nr_pages;
174442012e04SHuang Ying break;
174542012e04SHuang Ying }
174642012e04SHuang Ying }
174742012e04SHuang Ying }
174842012e04SHuang Ying nr_failed += retry;
174942012e04SHuang Ying stats->nr_thp_failed += thp_retry;
175042012e04SHuang Ying stats->nr_failed_pages += nr_retry_pages;
17515dfab109SHuang Ying move:
17527e12beb8SHuang Ying /* Flush TLBs for all unmapped folios */
17537e12beb8SHuang Ying try_to_unmap_flush();
17547e12beb8SHuang Ying
17555dfab109SHuang Ying retry = 1;
1756124abcedSHuang Ying for (pass = 0; pass < nr_pass && retry; pass++) {
17575dfab109SHuang Ying retry = 0;
17585dfab109SHuang Ying thp_retry = 0;
17595dfab109SHuang Ying nr_retry_pages = 0;
17605dfab109SHuang Ying
17615dfab109SHuang Ying dst = list_first_entry(&dst_folios, struct folio, lru);
17625dfab109SHuang Ying dst2 = list_next_entry(dst, lru);
17635dfab109SHuang Ying list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1764124abcedSHuang Ying is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
17655dfab109SHuang Ying nr_pages = folio_nr_pages(folio);
17665dfab109SHuang Ying
17675dfab109SHuang Ying cond_resched();
17685dfab109SHuang Ying
17694e096ae1SMatthew Wilcox (Oracle) rc = migrate_folio_move(put_new_folio, private,
17705dfab109SHuang Ying folio, dst, mode,
17715dfab109SHuang Ying reason, ret_folios);
17725dfab109SHuang Ying /*
17735dfab109SHuang Ying * The rules are:
17745dfab109SHuang Ying * Success: folio will be freed
17755dfab109SHuang Ying * -EAGAIN: stay on the unmap_folios list
17765dfab109SHuang Ying * Other errno: put on ret_folios list
17775dfab109SHuang Ying */
17785dfab109SHuang Ying switch(rc) {
17795dfab109SHuang Ying case -EAGAIN:
17805dfab109SHuang Ying retry++;
1781124abcedSHuang Ying thp_retry += is_thp;
17825dfab109SHuang Ying nr_retry_pages += nr_pages;
17835dfab109SHuang Ying break;
17845dfab109SHuang Ying case MIGRATEPAGE_SUCCESS:
17855dfab109SHuang Ying stats->nr_succeeded += nr_pages;
17865dfab109SHuang Ying stats->nr_thp_succeeded += is_thp;
17875dfab109SHuang Ying break;
17885dfab109SHuang Ying default:
17895dfab109SHuang Ying nr_failed++;
1790124abcedSHuang Ying stats->nr_thp_failed += is_thp;
17915dfab109SHuang Ying stats->nr_failed_pages += nr_pages;
17925dfab109SHuang Ying break;
17935dfab109SHuang Ying }
17945dfab109SHuang Ying dst = dst2;
17955dfab109SHuang Ying dst2 = list_next_entry(dst, lru);
17965dfab109SHuang Ying }
17975dfab109SHuang Ying }
17985dfab109SHuang Ying nr_failed += retry;
17995dfab109SHuang Ying stats->nr_thp_failed += thp_retry;
18005dfab109SHuang Ying stats->nr_failed_pages += nr_retry_pages;
18015dfab109SHuang Ying
1802124abcedSHuang Ying rc = rc_saved ? : nr_failed;
18035dfab109SHuang Ying out:
18045dfab109SHuang Ying /* Cleanup remaining folios */
18055dfab109SHuang Ying dst = list_first_entry(&dst_folios, struct folio, lru);
18065dfab109SHuang Ying dst2 = list_next_entry(dst, lru);
18075dfab109SHuang Ying list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
18089d23fab8SBaolin Wang int old_page_state = 0;
18095dfab109SHuang Ying struct anon_vma *anon_vma = NULL;
18105dfab109SHuang Ying
18119d23fab8SBaolin Wang __migrate_folio_extract(dst, &old_page_state, &anon_vma);
18129d23fab8SBaolin Wang migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
18139d23fab8SBaolin Wang anon_vma, true, ret_folios);
18145dfab109SHuang Ying list_del(&dst->lru);
18154e096ae1SMatthew Wilcox (Oracle) migrate_folio_undo_dst(dst, true, put_new_folio, private);
18165dfab109SHuang Ying dst = dst2;
18175dfab109SHuang Ying dst2 = list_next_entry(dst, lru);
18185dfab109SHuang Ying }
18195dfab109SHuang Ying
182042012e04SHuang Ying return rc;
182142012e04SHuang Ying }
182242012e04SHuang Ying
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)18234e096ae1SMatthew Wilcox (Oracle) static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
18244e096ae1SMatthew Wilcox (Oracle) free_folio_t put_new_folio, unsigned long private,
18254e096ae1SMatthew Wilcox (Oracle) enum migrate_mode mode, int reason,
18264e096ae1SMatthew Wilcox (Oracle) struct list_head *ret_folios, struct list_head *split_folios,
18274e096ae1SMatthew Wilcox (Oracle) struct migrate_pages_stats *stats)
18282ef7dbb2SHuang Ying {
18292ef7dbb2SHuang Ying int rc, nr_failed = 0;
18302ef7dbb2SHuang Ying LIST_HEAD(folios);
18312ef7dbb2SHuang Ying struct migrate_pages_stats astats;
18322ef7dbb2SHuang Ying
18332ef7dbb2SHuang Ying memset(&astats, 0, sizeof(astats));
18342ef7dbb2SHuang Ying /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
18354e096ae1SMatthew Wilcox (Oracle) rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
18362ef7dbb2SHuang Ying reason, &folios, split_folios, &astats,
18372ef7dbb2SHuang Ying NR_MAX_MIGRATE_ASYNC_RETRY);
18382ef7dbb2SHuang Ying stats->nr_succeeded += astats.nr_succeeded;
18392ef7dbb2SHuang Ying stats->nr_thp_succeeded += astats.nr_thp_succeeded;
18402ef7dbb2SHuang Ying stats->nr_thp_split += astats.nr_thp_split;
18412ef7dbb2SHuang Ying if (rc < 0) {
18422ef7dbb2SHuang Ying stats->nr_failed_pages += astats.nr_failed_pages;
18432ef7dbb2SHuang Ying stats->nr_thp_failed += astats.nr_thp_failed;
18442ef7dbb2SHuang Ying list_splice_tail(&folios, ret_folios);
18452ef7dbb2SHuang Ying return rc;
18462ef7dbb2SHuang Ying }
18472ef7dbb2SHuang Ying stats->nr_thp_failed += astats.nr_thp_split;
18482ef7dbb2SHuang Ying nr_failed += astats.nr_thp_split;
18492ef7dbb2SHuang Ying /*
18502ef7dbb2SHuang Ying * Fall back to migrate all failed folios one by one synchronously. All
18512ef7dbb2SHuang Ying * failed folios except split THPs will be retried, so their failure
18522ef7dbb2SHuang Ying * isn't counted
18532ef7dbb2SHuang Ying */
18542ef7dbb2SHuang Ying list_splice_tail_init(&folios, from);
18552ef7dbb2SHuang Ying while (!list_empty(from)) {
18562ef7dbb2SHuang Ying list_move(from->next, &folios);
18574e096ae1SMatthew Wilcox (Oracle) rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
18582ef7dbb2SHuang Ying private, mode, reason, ret_folios,
18592ef7dbb2SHuang Ying split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
18602ef7dbb2SHuang Ying list_splice_tail_init(&folios, ret_folios);
18612ef7dbb2SHuang Ying if (rc < 0)
18622ef7dbb2SHuang Ying return rc;
18632ef7dbb2SHuang Ying nr_failed += rc;
18642ef7dbb2SHuang Ying }
18652ef7dbb2SHuang Ying
18662ef7dbb2SHuang Ying return nr_failed;
18672ef7dbb2SHuang Ying }
18682ef7dbb2SHuang Ying
1869e24f0b8fSChristoph Lameter /*
1870eaec4e63SHuang Ying * migrate_pages - migrate the folios specified in a list, to the free folios
1871c73e5c9cSSrivatsa S. Bhat * supplied as the target for the page migration
1872e24f0b8fSChristoph Lameter *
1873eaec4e63SHuang Ying * @from: The list of folios to be migrated.
18744e096ae1SMatthew Wilcox (Oracle) * @get_new_folio: The function used to allocate free folios to be used
1875eaec4e63SHuang Ying * as the target of the folio migration.
18764e096ae1SMatthew Wilcox (Oracle) * @put_new_folio: The function used to free target folios if migration
187768711a74SDavid Rientjes * fails, or NULL if no special handling is necessary.
18784e096ae1SMatthew Wilcox (Oracle) * @private: Private data to be passed on to get_new_folio()
1879c73e5c9cSSrivatsa S. Bhat * @mode: The migration mode that specifies the constraints for
1880eaec4e63SHuang Ying * folio migration, if any.
1881eaec4e63SHuang Ying * @reason: The reason for folio migration.
1882eaec4e63SHuang Ying * @ret_succeeded: Set to the number of folios migrated successfully if
18835ac95884SYang Shi * the caller passes a non-NULL pointer.
1884e24f0b8fSChristoph Lameter *
1885e5bfff8bSHuang Ying * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1886e5bfff8bSHuang Ying * are movable any more because the list has become empty or no retryable folios
1887e5bfff8bSHuang Ying * exist any more. It is caller's responsibility to call putback_movable_pages()
1888e5bfff8bSHuang Ying * only if ret != 0.
1889e24f0b8fSChristoph Lameter *
1890eaec4e63SHuang Ying * Returns the number of {normal folio, large folio, hugetlb} that were not
1891eaec4e63SHuang Ying * migrated, or an error code. The number of large folio splits will be
1892eaec4e63SHuang Ying * considered as the number of non-migrated large folio, no matter how many
1893eaec4e63SHuang Ying * split folios of the large folio are migrated successfully.
1894e24f0b8fSChristoph Lameter */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)18954e096ae1SMatthew Wilcox (Oracle) int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
18964e096ae1SMatthew Wilcox (Oracle) free_folio_t put_new_folio, unsigned long private,
18975ac95884SYang Shi enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1898e24f0b8fSChristoph Lameter {
189942012e04SHuang Ying int rc, rc_gather;
19002ef7dbb2SHuang Ying int nr_pages;
1901eaec4e63SHuang Ying struct folio *folio, *folio2;
190242012e04SHuang Ying LIST_HEAD(folios);
1903eaec4e63SHuang Ying LIST_HEAD(ret_folios);
1904a21d2133SHuang Ying LIST_HEAD(split_folios);
19055b855937SHuang Ying struct migrate_pages_stats stats;
19062d1db3b1SChristoph Lameter
19077bc1aec5SLiam Mark trace_mm_migrate_pages_start(mode, reason);
19087bc1aec5SLiam Mark
19095b855937SHuang Ying memset(&stats, 0, sizeof(stats));
1910e24f0b8fSChristoph Lameter
19114e096ae1SMatthew Wilcox (Oracle) rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
191242012e04SHuang Ying mode, reason, &stats, &ret_folios);
191342012e04SHuang Ying if (rc_gather < 0)
191495a402c3SChristoph Lameter goto out;
1915fb3592c4SHuang Ying
191642012e04SHuang Ying again:
191742012e04SHuang Ying nr_pages = 0;
1918b20a3503SChristoph Lameter list_for_each_entry_safe(folio, folio2, from, lru) {
1919e5bfff8bSHuang Ying /* Retried hugetlb folios will be kept in list */
1920e5bfff8bSHuang Ying if (folio_test_hugetlb(folio)) {
1921e5bfff8bSHuang Ying list_move_tail(&folio->lru, &ret_folios);
1922e5bfff8bSHuang Ying continue;
1923eaec4e63SHuang Ying }
1924f430893bSMiaohe Lin
192542012e04SHuang Ying nr_pages += folio_nr_pages(folio);
19262ef7dbb2SHuang Ying if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1927e24f0b8fSChristoph Lameter break;
1928b20a3503SChristoph Lameter }
19292ef7dbb2SHuang Ying if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1930fb3592c4SHuang Ying list_cut_before(&folios, from, &folio2->lru);
193142012e04SHuang Ying else
193242012e04SHuang Ying list_splice_init(from, &folios);
19332ef7dbb2SHuang Ying if (mode == MIGRATE_ASYNC)
19344e096ae1SMatthew Wilcox (Oracle) rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
19354e096ae1SMatthew Wilcox (Oracle) private, mode, reason, &ret_folios,
19364e096ae1SMatthew Wilcox (Oracle) &split_folios, &stats,
1937a21d2133SHuang Ying NR_MAX_MIGRATE_PAGES_RETRY);
19382ef7dbb2SHuang Ying else
19394e096ae1SMatthew Wilcox (Oracle) rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
19404e096ae1SMatthew Wilcox (Oracle) private, mode, reason, &ret_folios,
19414e096ae1SMatthew Wilcox (Oracle) &split_folios, &stats);
194242012e04SHuang Ying list_splice_tail_init(&folios, &ret_folios);
194342012e04SHuang Ying if (rc < 0) {
194442012e04SHuang Ying rc_gather = rc;
1945a21d2133SHuang Ying list_splice_tail(&split_folios, &ret_folios);
1946b20a3503SChristoph Lameter goto out;
1947b20a3503SChristoph Lameter }
1948a21d2133SHuang Ying if (!list_empty(&split_folios)) {
1949a21d2133SHuang Ying /*
1950a21d2133SHuang Ying * Failure isn't counted since all split folios of a large folio
1951a21d2133SHuang Ying * is counted as 1 failure already. And, we only try to migrate
1952a21d2133SHuang Ying * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1953a21d2133SHuang Ying */
19544e096ae1SMatthew Wilcox (Oracle) migrate_pages_batch(&split_folios, get_new_folio,
19554e096ae1SMatthew Wilcox (Oracle) put_new_folio, private, MIGRATE_ASYNC, reason,
19564e096ae1SMatthew Wilcox (Oracle) &ret_folios, NULL, &stats, 1);
1957a21d2133SHuang Ying list_splice_tail_init(&split_folios, &ret_folios);
1958a21d2133SHuang Ying }
195942012e04SHuang Ying rc_gather += rc;
196042012e04SHuang Ying if (!list_empty(from))
196142012e04SHuang Ying goto again;
196295a402c3SChristoph Lameter out:
1963dd4ae78aSYang Shi /*
1964eaec4e63SHuang Ying * Put the permanent failure folio back to migration list, they
1965dd4ae78aSYang Shi * will be put back to the right list by the caller.
1966dd4ae78aSYang Shi */
1967eaec4e63SHuang Ying list_splice(&ret_folios, from);
1968dd4ae78aSYang Shi
196903e5f82eSBaolin Wang /*
1970eaec4e63SHuang Ying * Return 0 in case all split folios of fail-to-migrate large folios
1971eaec4e63SHuang Ying * are migrated successfully.
197203e5f82eSBaolin Wang */
197303e5f82eSBaolin Wang if (list_empty(from))
197442012e04SHuang Ying rc_gather = 0;
197503e5f82eSBaolin Wang
19765b855937SHuang Ying count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
19775b855937SHuang Ying count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
19785b855937SHuang Ying count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
19795b855937SHuang Ying count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
19805b855937SHuang Ying count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
19815b855937SHuang Ying trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
19825b855937SHuang Ying stats.nr_thp_succeeded, stats.nr_thp_failed,
19835b855937SHuang Ying stats.nr_thp_split, mode, reason);
19847b2a2d4aSMel Gorman
19855ac95884SYang Shi if (ret_succeeded)
19865b855937SHuang Ying *ret_succeeded = stats.nr_succeeded;
19875ac95884SYang Shi
198842012e04SHuang Ying return rc_gather;
1989b20a3503SChristoph Lameter }
1990b20a3503SChristoph Lameter
alloc_migration_target(struct folio * src,unsigned long private)19914e096ae1SMatthew Wilcox (Oracle) struct folio *alloc_migration_target(struct folio *src, unsigned long private)
1992b4b38223SJoonsoo Kim {
199319fc7bedSJoonsoo Kim struct migration_target_control *mtc;
199419fc7bedSJoonsoo Kim gfp_t gfp_mask;
1995b4b38223SJoonsoo Kim unsigned int order = 0;
199619fc7bedSJoonsoo Kim int nid;
199719fc7bedSJoonsoo Kim int zidx;
199819fc7bedSJoonsoo Kim
199919fc7bedSJoonsoo Kim mtc = (struct migration_target_control *)private;
200019fc7bedSJoonsoo Kim gfp_mask = mtc->gfp_mask;
200119fc7bedSJoonsoo Kim nid = mtc->nid;
200219fc7bedSJoonsoo Kim if (nid == NUMA_NO_NODE)
20034e096ae1SMatthew Wilcox (Oracle) nid = folio_nid(src);
2004b4b38223SJoonsoo Kim
20054e096ae1SMatthew Wilcox (Oracle) if (folio_test_hugetlb(src)) {
20064e096ae1SMatthew Wilcox (Oracle) struct hstate *h = folio_hstate(src);
2007d92bbc27SJoonsoo Kim
200819fc7bedSJoonsoo Kim gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
20094e096ae1SMatthew Wilcox (Oracle) return alloc_hugetlb_folio_nodemask(h, nid,
2010e37d3e83SSidhartha Kumar mtc->nmask, gfp_mask);
2011d92bbc27SJoonsoo Kim }
2012b4b38223SJoonsoo Kim
20134e096ae1SMatthew Wilcox (Oracle) if (folio_test_large(src)) {
20149933a0c8SJoonsoo Kim /*
20159933a0c8SJoonsoo Kim * clear __GFP_RECLAIM to make the migration callback
20169933a0c8SJoonsoo Kim * consistent with regular THP allocations.
20179933a0c8SJoonsoo Kim */
20189933a0c8SJoonsoo Kim gfp_mask &= ~__GFP_RECLAIM;
2019b4b38223SJoonsoo Kim gfp_mask |= GFP_TRANSHUGE;
20204e096ae1SMatthew Wilcox (Oracle) order = folio_order(src);
2021b4b38223SJoonsoo Kim }
20224e096ae1SMatthew Wilcox (Oracle) zidx = zone_idx(folio_zone(src));
202319fc7bedSJoonsoo Kim if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2024b4b38223SJoonsoo Kim gfp_mask |= __GFP_HIGHMEM;
2025b4b38223SJoonsoo Kim
20264e096ae1SMatthew Wilcox (Oracle) return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2027b4b38223SJoonsoo Kim }
2028b4b38223SJoonsoo Kim
2029742755a1SChristoph Lameter #ifdef CONFIG_NUMA
2030742755a1SChristoph Lameter
store_status(int __user * status,int start,int value,int nr)2031a49bd4d7SMichal Hocko static int store_status(int __user *status, int start, int value, int nr)
2032742755a1SChristoph Lameter {
2033a49bd4d7SMichal Hocko while (nr-- > 0) {
2034a49bd4d7SMichal Hocko if (put_user(value, status + start))
2035a49bd4d7SMichal Hocko return -EFAULT;
2036a49bd4d7SMichal Hocko start++;
2037a49bd4d7SMichal Hocko }
2038742755a1SChristoph Lameter
2039a49bd4d7SMichal Hocko return 0;
2040a49bd4d7SMichal Hocko }
2041742755a1SChristoph Lameter
do_move_pages_to_node(struct mm_struct * mm,struct list_head * pagelist,int node)2042a49bd4d7SMichal Hocko static int do_move_pages_to_node(struct mm_struct *mm,
2043a49bd4d7SMichal Hocko struct list_head *pagelist, int node)
2044a49bd4d7SMichal Hocko {
2045a49bd4d7SMichal Hocko int err;
2046a0976311SJoonsoo Kim struct migration_target_control mtc = {
2047a0976311SJoonsoo Kim .nid = node,
2048a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2049a0976311SJoonsoo Kim };
2050742755a1SChristoph Lameter
2051a0976311SJoonsoo Kim err = migrate_pages(pagelist, alloc_migration_target, NULL,
20525ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2053a49bd4d7SMichal Hocko if (err)
2054a49bd4d7SMichal Hocko putback_movable_pages(pagelist);
2055a49bd4d7SMichal Hocko return err;
2056742755a1SChristoph Lameter }
2057742755a1SChristoph Lameter
2058742755a1SChristoph Lameter /*
2059a49bd4d7SMichal Hocko * Resolves the given address to a struct page, isolates it from the LRU and
2060a49bd4d7SMichal Hocko * puts it to the given pagelist.
2061e0153fc2SYang Shi * Returns:
2062e0153fc2SYang Shi * errno - if the page cannot be found/isolated
2063e0153fc2SYang Shi * 0 - when it doesn't have to be migrated because it is already on the
2064e0153fc2SYang Shi * target node
2065e0153fc2SYang Shi * 1 - when it has been queued
2066742755a1SChristoph Lameter */
add_page_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2067428e106aSKirill A. Shutemov static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2068a49bd4d7SMichal Hocko int node, struct list_head *pagelist, bool migrate_all)
2069742755a1SChristoph Lameter {
2070742755a1SChristoph Lameter struct vm_area_struct *vma;
2071428e106aSKirill A. Shutemov unsigned long addr;
2072742755a1SChristoph Lameter struct page *page;
2073a49bd4d7SMichal Hocko int err;
20749747b9e9SBaolin Wang bool isolated;
2075742755a1SChristoph Lameter
2076d8ed45c5SMichel Lespinasse mmap_read_lock(mm);
2077428e106aSKirill A. Shutemov addr = (unsigned long)untagged_addr_remote(mm, p);
2078428e106aSKirill A. Shutemov
2079742755a1SChristoph Lameter err = -EFAULT;
2080cb1c37b1SMiaohe Lin vma = vma_lookup(mm, addr);
2081cb1c37b1SMiaohe Lin if (!vma || !vma_migratable(vma))
2082a49bd4d7SMichal Hocko goto out;
2083742755a1SChristoph Lameter
2084d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */
208587d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
208689f5b7daSLinus Torvalds
208789f5b7daSLinus Torvalds err = PTR_ERR(page);
208889f5b7daSLinus Torvalds if (IS_ERR(page))
2089a49bd4d7SMichal Hocko goto out;
209089f5b7daSLinus Torvalds
2091742755a1SChristoph Lameter err = -ENOENT;
2092f7091ed6SHaiyue Wang if (!page)
2093a49bd4d7SMichal Hocko goto out;
2094742755a1SChristoph Lameter
2095f7091ed6SHaiyue Wang if (is_zone_device_page(page))
2096f7091ed6SHaiyue Wang goto out_putpage;
2097f7091ed6SHaiyue Wang
2098a49bd4d7SMichal Hocko err = 0;
2099a49bd4d7SMichal Hocko if (page_to_nid(page) == node)
2100a49bd4d7SMichal Hocko goto out_putpage;
2101742755a1SChristoph Lameter
2102742755a1SChristoph Lameter err = -EACCES;
2103a49bd4d7SMichal Hocko if (page_mapcount(page) > 1 && !migrate_all)
2104a49bd4d7SMichal Hocko goto out_putpage;
2105742755a1SChristoph Lameter
2106e632a938SNaoya Horiguchi if (PageHuge(page)) {
2107e8db67ebSNaoya Horiguchi if (PageHead(page)) {
21089747b9e9SBaolin Wang isolated = isolate_hugetlb(page_folio(page), pagelist);
21099747b9e9SBaolin Wang err = isolated ? 1 : -EBUSY;
2110e8db67ebSNaoya Horiguchi }
2111a49bd4d7SMichal Hocko } else {
2112a49bd4d7SMichal Hocko struct page *head;
2113e632a938SNaoya Horiguchi
2114e8db67ebSNaoya Horiguchi head = compound_head(page);
2115f7f9c00dSBaolin Wang isolated = isolate_lru_page(head);
2116f7f9c00dSBaolin Wang if (!isolated) {
2117f7f9c00dSBaolin Wang err = -EBUSY;
2118a49bd4d7SMichal Hocko goto out_putpage;
2119f7f9c00dSBaolin Wang }
2120a49bd4d7SMichal Hocko
2121e0153fc2SYang Shi err = 1;
2122a49bd4d7SMichal Hocko list_add_tail(&head->lru, pagelist);
2123e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(head),
21249de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head),
21256c357848SMatthew Wilcox (Oracle) thp_nr_pages(head));
21266d9c285aSKOSAKI Motohiro }
2127a49bd4d7SMichal Hocko out_putpage:
2128742755a1SChristoph Lameter /*
2129742755a1SChristoph Lameter * Either remove the duplicate refcount from
2130742755a1SChristoph Lameter * isolate_lru_page() or drop the page ref if it was
2131742755a1SChristoph Lameter * not isolated.
2132742755a1SChristoph Lameter */
2133742755a1SChristoph Lameter put_page(page);
2134a49bd4d7SMichal Hocko out:
2135d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
2136742755a1SChristoph Lameter return err;
2137742755a1SChristoph Lameter }
2138742755a1SChristoph Lameter
move_pages_and_store_status(struct mm_struct * mm,int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)21397ca8783aSWei Yang static int move_pages_and_store_status(struct mm_struct *mm, int node,
21407ca8783aSWei Yang struct list_head *pagelist, int __user *status,
21417ca8783aSWei Yang int start, int i, unsigned long nr_pages)
21427ca8783aSWei Yang {
21437ca8783aSWei Yang int err;
21447ca8783aSWei Yang
21455d7ae891SWei Yang if (list_empty(pagelist))
21465d7ae891SWei Yang return 0;
21475d7ae891SWei Yang
21487ca8783aSWei Yang err = do_move_pages_to_node(mm, pagelist, node);
21497ca8783aSWei Yang if (err) {
21507ca8783aSWei Yang /*
21517ca8783aSWei Yang * Positive err means the number of failed
21527ca8783aSWei Yang * pages to migrate. Since we are going to
21537ca8783aSWei Yang * abort and return the number of non-migrated
2154ab9dd4f8SLong Li * pages, so need to include the rest of the
21557ca8783aSWei Yang * nr_pages that have not been attempted as
21567ca8783aSWei Yang * well.
21577ca8783aSWei Yang */
21587ca8783aSWei Yang if (err > 0)
2159a7504ed1SHuang Ying err += nr_pages - i;
21607ca8783aSWei Yang return err;
21617ca8783aSWei Yang }
21627ca8783aSWei Yang return store_status(status, start, node, i - start);
21637ca8783aSWei Yang }
21647ca8783aSWei Yang
2165742755a1SChristoph Lameter /*
21665e9a0f02SBrice Goglin * Migrate an array of page address onto an array of nodes and fill
21675e9a0f02SBrice Goglin * the corresponding array of status.
21685e9a0f02SBrice Goglin */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)21693268c63eSChristoph Lameter static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
21705e9a0f02SBrice Goglin unsigned long nr_pages,
21715e9a0f02SBrice Goglin const void __user * __user *pages,
21725e9a0f02SBrice Goglin const int __user *nodes,
21735e9a0f02SBrice Goglin int __user *status, int flags)
21745e9a0f02SBrice Goglin {
2175229e2253SGregory Price compat_uptr_t __user *compat_pages = (void __user *)pages;
2176a49bd4d7SMichal Hocko int current_node = NUMA_NO_NODE;
2177a49bd4d7SMichal Hocko LIST_HEAD(pagelist);
2178a49bd4d7SMichal Hocko int start, i;
2179a49bd4d7SMichal Hocko int err = 0, err1;
218035282a2dSBrice Goglin
2181361a2a22SMinchan Kim lru_cache_disable();
218235282a2dSBrice Goglin
2183a49bd4d7SMichal Hocko for (i = start = 0; i < nr_pages; i++) {
21845e9a0f02SBrice Goglin const void __user *p;
21855e9a0f02SBrice Goglin int node;
21865e9a0f02SBrice Goglin
21873140a227SBrice Goglin err = -EFAULT;
2188229e2253SGregory Price if (in_compat_syscall()) {
2189229e2253SGregory Price compat_uptr_t cp;
2190229e2253SGregory Price
2191229e2253SGregory Price if (get_user(cp, compat_pages + i))
2192229e2253SGregory Price goto out_flush;
2193229e2253SGregory Price
2194229e2253SGregory Price p = compat_ptr(cp);
2195229e2253SGregory Price } else {
2196a49bd4d7SMichal Hocko if (get_user(p, pages + i))
2197a49bd4d7SMichal Hocko goto out_flush;
2198229e2253SGregory Price }
2199a49bd4d7SMichal Hocko if (get_user(node, nodes + i))
2200a49bd4d7SMichal Hocko goto out_flush;
22015e9a0f02SBrice Goglin
22025e9a0f02SBrice Goglin err = -ENODEV;
22036f5a55f1SLinus Torvalds if (node < 0 || node >= MAX_NUMNODES)
2204a49bd4d7SMichal Hocko goto out_flush;
2205389162c2SLai Jiangshan if (!node_state(node, N_MEMORY))
2206a49bd4d7SMichal Hocko goto out_flush;
22075e9a0f02SBrice Goglin
22085e9a0f02SBrice Goglin err = -EACCES;
22095e9a0f02SBrice Goglin if (!node_isset(node, task_nodes))
2210a49bd4d7SMichal Hocko goto out_flush;
22115e9a0f02SBrice Goglin
2212a49bd4d7SMichal Hocko if (current_node == NUMA_NO_NODE) {
2213a49bd4d7SMichal Hocko current_node = node;
2214a49bd4d7SMichal Hocko start = i;
2215a49bd4d7SMichal Hocko } else if (node != current_node) {
22167ca8783aSWei Yang err = move_pages_and_store_status(mm, current_node,
22177ca8783aSWei Yang &pagelist, status, start, i, nr_pages);
2218a49bd4d7SMichal Hocko if (err)
2219a49bd4d7SMichal Hocko goto out;
2220a49bd4d7SMichal Hocko start = i;
2221a49bd4d7SMichal Hocko current_node = node;
22225e9a0f02SBrice Goglin }
22235e9a0f02SBrice Goglin
2224a49bd4d7SMichal Hocko /*
2225a49bd4d7SMichal Hocko * Errors in the page lookup or isolation are not fatal and we simply
2226a49bd4d7SMichal Hocko * report them via status
2227a49bd4d7SMichal Hocko */
2228428e106aSKirill A. Shutemov err = add_page_for_migration(mm, p, current_node, &pagelist,
2229428e106aSKirill A. Shutemov flags & MPOL_MF_MOVE_ALL);
2230e0153fc2SYang Shi
2231d08221a0SWei Yang if (err > 0) {
2232e0153fc2SYang Shi /* The page is successfully queued for migration */
2233e0153fc2SYang Shi continue;
2234e0153fc2SYang Shi }
22353140a227SBrice Goglin
2236d08221a0SWei Yang /*
223765462462SJohn Hubbard * The move_pages() man page does not have an -EEXIST choice, so
223865462462SJohn Hubbard * use -EFAULT instead.
223965462462SJohn Hubbard */
224065462462SJohn Hubbard if (err == -EEXIST)
224165462462SJohn Hubbard err = -EFAULT;
224265462462SJohn Hubbard
224365462462SJohn Hubbard /*
2244d08221a0SWei Yang * If the page is already on the target node (!err), store the
2245d08221a0SWei Yang * node, otherwise, store the err.
2246d08221a0SWei Yang */
2247d08221a0SWei Yang err = store_status(status, i, err ? : current_node, 1);
2248a49bd4d7SMichal Hocko if (err)
2249a49bd4d7SMichal Hocko goto out_flush;
22503140a227SBrice Goglin
22517ca8783aSWei Yang err = move_pages_and_store_status(mm, current_node, &pagelist,
22527ca8783aSWei Yang status, start, i, nr_pages);
2253a7504ed1SHuang Ying if (err) {
2254a7504ed1SHuang Ying /* We have accounted for page i */
2255a7504ed1SHuang Ying if (err > 0)
2256a7504ed1SHuang Ying err--;
2257a49bd4d7SMichal Hocko goto out;
2258a7504ed1SHuang Ying }
2259a49bd4d7SMichal Hocko current_node = NUMA_NO_NODE;
22603140a227SBrice Goglin }
2261a49bd4d7SMichal Hocko out_flush:
2262a49bd4d7SMichal Hocko /* Make sure we do not overwrite the existing error */
22637ca8783aSWei Yang err1 = move_pages_and_store_status(mm, current_node, &pagelist,
22647ca8783aSWei Yang status, start, i, nr_pages);
2265dfe9aa23SWei Yang if (err >= 0)
2266a49bd4d7SMichal Hocko err = err1;
22675e9a0f02SBrice Goglin out:
2268361a2a22SMinchan Kim lru_cache_enable();
22695e9a0f02SBrice Goglin return err;
22705e9a0f02SBrice Goglin }
22715e9a0f02SBrice Goglin
22725e9a0f02SBrice Goglin /*
22732f007e74SBrice Goglin * Determine the nodes of an array of pages and store it in an array of status.
2274742755a1SChristoph Lameter */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)227580bba129SBrice Goglin static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
227680bba129SBrice Goglin const void __user **pages, int *status)
2277742755a1SChristoph Lameter {
22782f007e74SBrice Goglin unsigned long i;
2279742755a1SChristoph Lameter
2280d8ed45c5SMichel Lespinasse mmap_read_lock(mm);
22812f007e74SBrice Goglin
22822f007e74SBrice Goglin for (i = 0; i < nr_pages; i++) {
228380bba129SBrice Goglin unsigned long addr = (unsigned long)(*pages);
22842f007e74SBrice Goglin struct vm_area_struct *vma;
22852f007e74SBrice Goglin struct page *page;
2286c095adbcSKOSAKI Motohiro int err = -EFAULT;
22872f007e74SBrice Goglin
2288059b8b48SLiam Howlett vma = vma_lookup(mm, addr);
2289059b8b48SLiam Howlett if (!vma)
2290742755a1SChristoph Lameter goto set_status;
2291742755a1SChristoph Lameter
2292d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */
229316fd6b31SBaolin Wang page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
229489f5b7daSLinus Torvalds
229589f5b7daSLinus Torvalds err = PTR_ERR(page);
229689f5b7daSLinus Torvalds if (IS_ERR(page))
229789f5b7daSLinus Torvalds goto set_status;
229889f5b7daSLinus Torvalds
2299f7091ed6SHaiyue Wang err = -ENOENT;
2300f7091ed6SHaiyue Wang if (!page)
2301f7091ed6SHaiyue Wang goto set_status;
2302f7091ed6SHaiyue Wang
2303f7091ed6SHaiyue Wang if (!is_zone_device_page(page))
23044cd61484SMiaohe Lin err = page_to_nid(page);
2305f7091ed6SHaiyue Wang
23064cd61484SMiaohe Lin put_page(page);
2307742755a1SChristoph Lameter set_status:
230880bba129SBrice Goglin *status = err;
230980bba129SBrice Goglin
231080bba129SBrice Goglin pages++;
231180bba129SBrice Goglin status++;
231280bba129SBrice Goglin }
231380bba129SBrice Goglin
2314d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
231580bba129SBrice Goglin }
231680bba129SBrice Goglin
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)23175b1b561bSArnd Bergmann static int get_compat_pages_array(const void __user *chunk_pages[],
23185b1b561bSArnd Bergmann const void __user * __user *pages,
23195b1b561bSArnd Bergmann unsigned long chunk_nr)
23205b1b561bSArnd Bergmann {
23215b1b561bSArnd Bergmann compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
23225b1b561bSArnd Bergmann compat_uptr_t p;
23235b1b561bSArnd Bergmann int i;
23245b1b561bSArnd Bergmann
23255b1b561bSArnd Bergmann for (i = 0; i < chunk_nr; i++) {
23265b1b561bSArnd Bergmann if (get_user(p, pages32 + i))
23275b1b561bSArnd Bergmann return -EFAULT;
23285b1b561bSArnd Bergmann chunk_pages[i] = compat_ptr(p);
23295b1b561bSArnd Bergmann }
23305b1b561bSArnd Bergmann
23315b1b561bSArnd Bergmann return 0;
23325b1b561bSArnd Bergmann }
23335b1b561bSArnd Bergmann
233480bba129SBrice Goglin /*
233580bba129SBrice Goglin * Determine the nodes of a user array of pages and store it in
233680bba129SBrice Goglin * a user array of status.
233780bba129SBrice Goglin */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)233880bba129SBrice Goglin static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
233980bba129SBrice Goglin const void __user * __user *pages,
234080bba129SBrice Goglin int __user *status)
234180bba129SBrice Goglin {
23423eefb826SMiaohe Lin #define DO_PAGES_STAT_CHUNK_NR 16UL
234380bba129SBrice Goglin const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
234480bba129SBrice Goglin int chunk_status[DO_PAGES_STAT_CHUNK_NR];
234580bba129SBrice Goglin
234687b8d1adSH. Peter Anvin while (nr_pages) {
23473eefb826SMiaohe Lin unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
234887b8d1adSH. Peter Anvin
23495b1b561bSArnd Bergmann if (in_compat_syscall()) {
23505b1b561bSArnd Bergmann if (get_compat_pages_array(chunk_pages, pages,
23515b1b561bSArnd Bergmann chunk_nr))
235287b8d1adSH. Peter Anvin break;
23535b1b561bSArnd Bergmann } else {
23545b1b561bSArnd Bergmann if (copy_from_user(chunk_pages, pages,
23555b1b561bSArnd Bergmann chunk_nr * sizeof(*chunk_pages)))
23565b1b561bSArnd Bergmann break;
23575b1b561bSArnd Bergmann }
235880bba129SBrice Goglin
235980bba129SBrice Goglin do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
236080bba129SBrice Goglin
236187b8d1adSH. Peter Anvin if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
236287b8d1adSH. Peter Anvin break;
2363742755a1SChristoph Lameter
236487b8d1adSH. Peter Anvin pages += chunk_nr;
236587b8d1adSH. Peter Anvin status += chunk_nr;
236687b8d1adSH. Peter Anvin nr_pages -= chunk_nr;
236787b8d1adSH. Peter Anvin }
236887b8d1adSH. Peter Anvin return nr_pages ? -EFAULT : 0;
2369742755a1SChristoph Lameter }
2370742755a1SChristoph Lameter
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)23714dc200ceSMiaohe Lin static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
23724dc200ceSMiaohe Lin {
23734dc200ceSMiaohe Lin struct task_struct *task;
23744dc200ceSMiaohe Lin struct mm_struct *mm;
23754dc200ceSMiaohe Lin
23764dc200ceSMiaohe Lin /*
23774dc200ceSMiaohe Lin * There is no need to check if current process has the right to modify
23784dc200ceSMiaohe Lin * the specified process when they are same.
23794dc200ceSMiaohe Lin */
23804dc200ceSMiaohe Lin if (!pid) {
23814dc200ceSMiaohe Lin mmget(current->mm);
23824dc200ceSMiaohe Lin *mem_nodes = cpuset_mems_allowed(current);
23834dc200ceSMiaohe Lin return current->mm;
23844dc200ceSMiaohe Lin }
23854dc200ceSMiaohe Lin
23864dc200ceSMiaohe Lin /* Find the mm_struct */
23874dc200ceSMiaohe Lin rcu_read_lock();
23884dc200ceSMiaohe Lin task = find_task_by_vpid(pid);
23894dc200ceSMiaohe Lin if (!task) {
23904dc200ceSMiaohe Lin rcu_read_unlock();
23914dc200ceSMiaohe Lin return ERR_PTR(-ESRCH);
23924dc200ceSMiaohe Lin }
23934dc200ceSMiaohe Lin get_task_struct(task);
23944dc200ceSMiaohe Lin
23954dc200ceSMiaohe Lin /*
23964dc200ceSMiaohe Lin * Check if this process has the right to modify the specified
23974dc200ceSMiaohe Lin * process. Use the regular "ptrace_may_access()" checks.
23984dc200ceSMiaohe Lin */
23994dc200ceSMiaohe Lin if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
24004dc200ceSMiaohe Lin rcu_read_unlock();
24014dc200ceSMiaohe Lin mm = ERR_PTR(-EPERM);
24024dc200ceSMiaohe Lin goto out;
24034dc200ceSMiaohe Lin }
24044dc200ceSMiaohe Lin rcu_read_unlock();
24054dc200ceSMiaohe Lin
24064dc200ceSMiaohe Lin mm = ERR_PTR(security_task_movememory(task));
24074dc200ceSMiaohe Lin if (IS_ERR(mm))
24084dc200ceSMiaohe Lin goto out;
24094dc200ceSMiaohe Lin *mem_nodes = cpuset_mems_allowed(task);
24104dc200ceSMiaohe Lin mm = get_task_mm(task);
24114dc200ceSMiaohe Lin out:
24124dc200ceSMiaohe Lin put_task_struct(task);
24134dc200ceSMiaohe Lin if (!mm)
24144dc200ceSMiaohe Lin mm = ERR_PTR(-EINVAL);
24154dc200ceSMiaohe Lin return mm;
24164dc200ceSMiaohe Lin }
24174dc200ceSMiaohe Lin
2418742755a1SChristoph Lameter /*
2419742755a1SChristoph Lameter * Move a list of pages in the address space of the currently executing
2420742755a1SChristoph Lameter * process.
2421742755a1SChristoph Lameter */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)24227addf443SDominik Brodowski static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
24237addf443SDominik Brodowski const void __user * __user *pages,
24247addf443SDominik Brodowski const int __user *nodes,
24257addf443SDominik Brodowski int __user *status, int flags)
2426742755a1SChristoph Lameter {
2427742755a1SChristoph Lameter struct mm_struct *mm;
24285e9a0f02SBrice Goglin int err;
24293268c63eSChristoph Lameter nodemask_t task_nodes;
2430742755a1SChristoph Lameter
2431742755a1SChristoph Lameter /* Check flags */
2432742755a1SChristoph Lameter if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2433742755a1SChristoph Lameter return -EINVAL;
2434742755a1SChristoph Lameter
2435742755a1SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2436742755a1SChristoph Lameter return -EPERM;
2437742755a1SChristoph Lameter
24384dc200ceSMiaohe Lin mm = find_mm_struct(pid, &task_nodes);
24394dc200ceSMiaohe Lin if (IS_ERR(mm))
24404dc200ceSMiaohe Lin return PTR_ERR(mm);
24416e8b09eaSSasha Levin
24423268c63eSChristoph Lameter if (nodes)
24433268c63eSChristoph Lameter err = do_pages_move(mm, task_nodes, nr_pages, pages,
24443268c63eSChristoph Lameter nodes, status, flags);
24453268c63eSChristoph Lameter else
24465e9a0f02SBrice Goglin err = do_pages_stat(mm, nr_pages, pages, status);
24473268c63eSChristoph Lameter
24483268c63eSChristoph Lameter mmput(mm);
24493268c63eSChristoph Lameter return err;
2450742755a1SChristoph Lameter }
2451742755a1SChristoph Lameter
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)24527addf443SDominik Brodowski SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
24537addf443SDominik Brodowski const void __user * __user *, pages,
24547addf443SDominik Brodowski const int __user *, nodes,
24557addf443SDominik Brodowski int __user *, status, int, flags)
24567addf443SDominik Brodowski {
24577addf443SDominik Brodowski return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
24587addf443SDominik Brodowski }
24597addf443SDominik Brodowski
24607039e1dbSPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
24617039e1dbSPeter Zijlstra /*
24627039e1dbSPeter Zijlstra * Returns true if this is a safe migration target node for misplaced NUMA
2463bc53008eSWei Yang * pages. Currently it only checks the watermarks which is crude.
24647039e1dbSPeter Zijlstra */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)24657039e1dbSPeter Zijlstra static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
24663abef4e6SMel Gorman unsigned long nr_migrate_pages)
24677039e1dbSPeter Zijlstra {
24687039e1dbSPeter Zijlstra int z;
2469599d0c95SMel Gorman
24707039e1dbSPeter Zijlstra for (z = pgdat->nr_zones - 1; z >= 0; z--) {
24717039e1dbSPeter Zijlstra struct zone *zone = pgdat->node_zones + z;
24727039e1dbSPeter Zijlstra
2473bc53008eSWei Yang if (!managed_zone(zone))
24747039e1dbSPeter Zijlstra continue;
24757039e1dbSPeter Zijlstra
24767039e1dbSPeter Zijlstra /* Avoid waking kswapd by allocating pages_to_migrate pages. */
24777039e1dbSPeter Zijlstra if (!zone_watermark_ok(zone, 0,
24787039e1dbSPeter Zijlstra high_wmark_pages(zone) +
24797039e1dbSPeter Zijlstra nr_migrate_pages,
2480bfe9d006SHuang Ying ZONE_MOVABLE, 0))
24817039e1dbSPeter Zijlstra continue;
24827039e1dbSPeter Zijlstra return true;
24837039e1dbSPeter Zijlstra }
24847039e1dbSPeter Zijlstra return false;
24857039e1dbSPeter Zijlstra }
24867039e1dbSPeter Zijlstra
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)24874e096ae1SMatthew Wilcox (Oracle) static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2488666feb21SMichal Hocko unsigned long data)
24897039e1dbSPeter Zijlstra {
24907039e1dbSPeter Zijlstra int nid = (int) data;
24914e096ae1SMatthew Wilcox (Oracle) int order = folio_order(src);
2492c185e494SMatthew Wilcox (Oracle) gfp_t gfp = __GFP_THISNODE;
24937039e1dbSPeter Zijlstra
2494c185e494SMatthew Wilcox (Oracle) if (order > 0)
2495c185e494SMatthew Wilcox (Oracle) gfp |= GFP_TRANSHUGE_LIGHT;
2496c185e494SMatthew Wilcox (Oracle) else {
2497c185e494SMatthew Wilcox (Oracle) gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2498c185e494SMatthew Wilcox (Oracle) __GFP_NOWARN;
2499c185e494SMatthew Wilcox (Oracle) gfp &= ~__GFP_RECLAIM;
25007039e1dbSPeter Zijlstra }
25014e096ae1SMatthew Wilcox (Oracle) return __folio_alloc_node(gfp, order, nid);
2502c5b5a3ddSYang Shi }
2503c5b5a3ddSYang Shi
numamigrate_isolate_page(pg_data_t * pgdat,struct page * page)25041c30e017SMel Gorman static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2505b32967ffSMel Gorman {
25062b9b624fSBaolin Wang int nr_pages = thp_nr_pages(page);
2507c574bbe9SHuang Ying int order = compound_order(page);
2508b32967ffSMel Gorman
2509c574bbe9SHuang Ying VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
25103abef4e6SMel Gorman
2511662aeea7SYang Shi /* Do not migrate THP mapped by multiple processes */
2512662aeea7SYang Shi if (PageTransHuge(page) && total_mapcount(page) > 1)
2513662aeea7SYang Shi return 0;
2514662aeea7SYang Shi
2515b32967ffSMel Gorman /* Avoid migrating to a node that is nearly full */
2516c574bbe9SHuang Ying if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2517c574bbe9SHuang Ying int z;
2518c574bbe9SHuang Ying
2519c574bbe9SHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2520340ef390SHugh Dickins return 0;
2521c574bbe9SHuang Ying for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2522bc53008eSWei Yang if (managed_zone(pgdat->node_zones + z))
2523c574bbe9SHuang Ying break;
2524c574bbe9SHuang Ying }
2525d6159bd4SByungchul Park
2526d6159bd4SByungchul Park /*
2527d6159bd4SByungchul Park * If there are no managed zones, it should not proceed
2528d6159bd4SByungchul Park * further.
2529d6159bd4SByungchul Park */
2530d6159bd4SByungchul Park if (z < 0)
2531d6159bd4SByungchul Park return 0;
2532d6159bd4SByungchul Park
2533c574bbe9SHuang Ying wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2534c574bbe9SHuang Ying return 0;
2535c574bbe9SHuang Ying }
2536b32967ffSMel Gorman
2537f7f9c00dSBaolin Wang if (!isolate_lru_page(page))
2538340ef390SHugh Dickins return 0;
2539340ef390SHugh Dickins
2540b75454e1SMiaohe Lin mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
25412b9b624fSBaolin Wang nr_pages);
2542b32967ffSMel Gorman
2543b32967ffSMel Gorman /*
2544340ef390SHugh Dickins * Isolating the page has taken another reference, so the
2545340ef390SHugh Dickins * caller's reference can be safely dropped without the page
2546340ef390SHugh Dickins * disappearing underneath us during migration.
2547b32967ffSMel Gorman */
2548b32967ffSMel Gorman put_page(page);
2549340ef390SHugh Dickins return 1;
2550b32967ffSMel Gorman }
2551b32967ffSMel Gorman
2552a8f60772SMel Gorman /*
25537039e1dbSPeter Zijlstra * Attempt to migrate a misplaced page to the specified destination
25547039e1dbSPeter Zijlstra * node. Caller is expected to have an elevated reference count on
25557039e1dbSPeter Zijlstra * the page that will be dropped by this function before returning.
25567039e1dbSPeter Zijlstra */
migrate_misplaced_page(struct page * page,struct vm_area_struct * vma,int node)25571bc115d8SMel Gorman int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
25581bc115d8SMel Gorman int node)
25597039e1dbSPeter Zijlstra {
2560a8f60772SMel Gorman pg_data_t *pgdat = NODE_DATA(node);
2561340ef390SHugh Dickins int isolated;
2562b32967ffSMel Gorman int nr_remaining;
2563e39bb6beSHuang Ying unsigned int nr_succeeded;
25647039e1dbSPeter Zijlstra LIST_HEAD(migratepages);
2565b5916c02SAneesh Kumar K.V int nr_pages = thp_nr_pages(page);
2566c5b5a3ddSYang Shi
2567c5b5a3ddSYang Shi /*
25681bc115d8SMel Gorman * Don't migrate file pages that are mapped in multiple processes
25691bc115d8SMel Gorman * with execute permissions as they are probably shared libraries.
25707039e1dbSPeter Zijlstra */
25717ee820eeSMiaohe Lin if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
25727ee820eeSMiaohe Lin (vma->vm_flags & VM_EXEC))
25737039e1dbSPeter Zijlstra goto out;
25747039e1dbSPeter Zijlstra
2575a8f60772SMel Gorman /*
257609a913a7SMel Gorman * Also do not migrate dirty pages as not all filesystems can move
257709a913a7SMel Gorman * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
257809a913a7SMel Gorman */
25799de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page))
258009a913a7SMel Gorman goto out;
258109a913a7SMel Gorman
2582b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page);
2583b32967ffSMel Gorman if (!isolated)
25847039e1dbSPeter Zijlstra goto out;
25857039e1dbSPeter Zijlstra
25867039e1dbSPeter Zijlstra list_add(&page->lru, &migratepages);
25874e096ae1SMatthew Wilcox (Oracle) nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2588c185e494SMatthew Wilcox (Oracle) NULL, node, MIGRATE_ASYNC,
2589c185e494SMatthew Wilcox (Oracle) MR_NUMA_MISPLACED, &nr_succeeded);
25907039e1dbSPeter Zijlstra if (nr_remaining) {
259159c82b70SJoonsoo Kim if (!list_empty(&migratepages)) {
259259c82b70SJoonsoo Kim list_del(&page->lru);
2593c5fc5c3aSYang Shi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2594c5fc5c3aSYang Shi page_is_file_lru(page), -nr_pages);
259559c82b70SJoonsoo Kim putback_lru_page(page);
259659c82b70SJoonsoo Kim }
25977039e1dbSPeter Zijlstra isolated = 0;
2598e39bb6beSHuang Ying }
2599e39bb6beSHuang Ying if (nr_succeeded) {
2600e39bb6beSHuang Ying count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2601e39bb6beSHuang Ying if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2602e39bb6beSHuang Ying mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2603e39bb6beSHuang Ying nr_succeeded);
2604e39bb6beSHuang Ying }
26057039e1dbSPeter Zijlstra BUG_ON(!list_empty(&migratepages));
26067039e1dbSPeter Zijlstra return isolated;
2607340ef390SHugh Dickins
2608340ef390SHugh Dickins out:
2609340ef390SHugh Dickins put_page(page);
2610340ef390SHugh Dickins return 0;
26117039e1dbSPeter Zijlstra }
2612220018d3SMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26137d6e2d96SOscar Salvador #endif /* CONFIG_NUMA */
2614