176cbbeadSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
276cbbeadSChristoph Hellwig /*
376cbbeadSChristoph Hellwig * Device Memory Migration functionality.
476cbbeadSChristoph Hellwig *
576cbbeadSChristoph Hellwig * Originally written by Jérôme Glisse.
676cbbeadSChristoph Hellwig */
776cbbeadSChristoph Hellwig #include <linux/export.h>
876cbbeadSChristoph Hellwig #include <linux/memremap.h>
976cbbeadSChristoph Hellwig #include <linux/migrate.h>
10fd35ca3dSAlistair Popple #include <linux/mm.h>
1176cbbeadSChristoph Hellwig #include <linux/mm_inline.h>
1276cbbeadSChristoph Hellwig #include <linux/mmu_notifier.h>
1376cbbeadSChristoph Hellwig #include <linux/oom.h>
1476cbbeadSChristoph Hellwig #include <linux/pagewalk.h>
1576cbbeadSChristoph Hellwig #include <linux/rmap.h>
1676cbbeadSChristoph Hellwig #include <linux/swapops.h>
1776cbbeadSChristoph Hellwig #include <asm/tlbflush.h>
1876cbbeadSChristoph Hellwig #include "internal.h"
1976cbbeadSChristoph Hellwig
migrate_vma_collect_skip(unsigned long start,unsigned long end,struct mm_walk * walk)2076cbbeadSChristoph Hellwig static int migrate_vma_collect_skip(unsigned long start,
2176cbbeadSChristoph Hellwig unsigned long end,
2276cbbeadSChristoph Hellwig struct mm_walk *walk)
2376cbbeadSChristoph Hellwig {
2476cbbeadSChristoph Hellwig struct migrate_vma *migrate = walk->private;
2576cbbeadSChristoph Hellwig unsigned long addr;
2676cbbeadSChristoph Hellwig
2776cbbeadSChristoph Hellwig for (addr = start; addr < end; addr += PAGE_SIZE) {
2876cbbeadSChristoph Hellwig migrate->dst[migrate->npages] = 0;
2976cbbeadSChristoph Hellwig migrate->src[migrate->npages++] = 0;
3076cbbeadSChristoph Hellwig }
3176cbbeadSChristoph Hellwig
3276cbbeadSChristoph Hellwig return 0;
3376cbbeadSChristoph Hellwig }
3476cbbeadSChristoph Hellwig
migrate_vma_collect_hole(unsigned long start,unsigned long end,__always_unused int depth,struct mm_walk * walk)3576cbbeadSChristoph Hellwig static int migrate_vma_collect_hole(unsigned long start,
3676cbbeadSChristoph Hellwig unsigned long end,
3776cbbeadSChristoph Hellwig __always_unused int depth,
3876cbbeadSChristoph Hellwig struct mm_walk *walk)
3976cbbeadSChristoph Hellwig {
4076cbbeadSChristoph Hellwig struct migrate_vma *migrate = walk->private;
4176cbbeadSChristoph Hellwig unsigned long addr;
4276cbbeadSChristoph Hellwig
4376cbbeadSChristoph Hellwig /* Only allow populating anonymous memory. */
4476cbbeadSChristoph Hellwig if (!vma_is_anonymous(walk->vma))
4576cbbeadSChristoph Hellwig return migrate_vma_collect_skip(start, end, walk);
4676cbbeadSChristoph Hellwig
4776cbbeadSChristoph Hellwig for (addr = start; addr < end; addr += PAGE_SIZE) {
4876cbbeadSChristoph Hellwig migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
4976cbbeadSChristoph Hellwig migrate->dst[migrate->npages] = 0;
5076cbbeadSChristoph Hellwig migrate->npages++;
5176cbbeadSChristoph Hellwig migrate->cpages++;
5276cbbeadSChristoph Hellwig }
5376cbbeadSChristoph Hellwig
5476cbbeadSChristoph Hellwig return 0;
5576cbbeadSChristoph Hellwig }
5676cbbeadSChristoph Hellwig
migrate_vma_collect_pmd(pmd_t * pmdp,unsigned long start,unsigned long end,struct mm_walk * walk)5776cbbeadSChristoph Hellwig static int migrate_vma_collect_pmd(pmd_t *pmdp,
5876cbbeadSChristoph Hellwig unsigned long start,
5976cbbeadSChristoph Hellwig unsigned long end,
6076cbbeadSChristoph Hellwig struct mm_walk *walk)
6176cbbeadSChristoph Hellwig {
6276cbbeadSChristoph Hellwig struct migrate_vma *migrate = walk->private;
6376cbbeadSChristoph Hellwig struct vm_area_struct *vma = walk->vma;
6476cbbeadSChristoph Hellwig struct mm_struct *mm = vma->vm_mm;
6576cbbeadSChristoph Hellwig unsigned long addr = start, unmapped = 0;
6676cbbeadSChristoph Hellwig spinlock_t *ptl;
6776cbbeadSChristoph Hellwig pte_t *ptep;
6876cbbeadSChristoph Hellwig
6976cbbeadSChristoph Hellwig again:
7076cbbeadSChristoph Hellwig if (pmd_none(*pmdp))
7176cbbeadSChristoph Hellwig return migrate_vma_collect_hole(start, end, -1, walk);
7276cbbeadSChristoph Hellwig
7376cbbeadSChristoph Hellwig if (pmd_trans_huge(*pmdp)) {
7476cbbeadSChristoph Hellwig struct page *page;
7576cbbeadSChristoph Hellwig
7676cbbeadSChristoph Hellwig ptl = pmd_lock(mm, pmdp);
7776cbbeadSChristoph Hellwig if (unlikely(!pmd_trans_huge(*pmdp))) {
7876cbbeadSChristoph Hellwig spin_unlock(ptl);
7976cbbeadSChristoph Hellwig goto again;
8076cbbeadSChristoph Hellwig }
8176cbbeadSChristoph Hellwig
8276cbbeadSChristoph Hellwig page = pmd_page(*pmdp);
8376cbbeadSChristoph Hellwig if (is_huge_zero_page(page)) {
8476cbbeadSChristoph Hellwig spin_unlock(ptl);
8576cbbeadSChristoph Hellwig split_huge_pmd(vma, pmdp, addr);
8676cbbeadSChristoph Hellwig } else {
8776cbbeadSChristoph Hellwig int ret;
8876cbbeadSChristoph Hellwig
8976cbbeadSChristoph Hellwig get_page(page);
9076cbbeadSChristoph Hellwig spin_unlock(ptl);
9176cbbeadSChristoph Hellwig if (unlikely(!trylock_page(page)))
9276cbbeadSChristoph Hellwig return migrate_vma_collect_skip(start, end,
9376cbbeadSChristoph Hellwig walk);
9476cbbeadSChristoph Hellwig ret = split_huge_page(page);
9576cbbeadSChristoph Hellwig unlock_page(page);
9676cbbeadSChristoph Hellwig put_page(page);
9776cbbeadSChristoph Hellwig if (ret)
9876cbbeadSChristoph Hellwig return migrate_vma_collect_skip(start, end,
9976cbbeadSChristoph Hellwig walk);
10076cbbeadSChristoph Hellwig }
10176cbbeadSChristoph Hellwig }
10276cbbeadSChristoph Hellwig
10376cbbeadSChristoph Hellwig ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
1044b56069cSHugh Dickins if (!ptep)
1054b56069cSHugh Dickins goto again;
10676cbbeadSChristoph Hellwig arch_enter_lazy_mmu_mode();
10776cbbeadSChristoph Hellwig
10876cbbeadSChristoph Hellwig for (; addr < end; addr += PAGE_SIZE, ptep++) {
10976cbbeadSChristoph Hellwig unsigned long mpfn = 0, pfn;
11076cbbeadSChristoph Hellwig struct page *page;
11176cbbeadSChristoph Hellwig swp_entry_t entry;
11276cbbeadSChristoph Hellwig pte_t pte;
11376cbbeadSChristoph Hellwig
114c33c7948SRyan Roberts pte = ptep_get(ptep);
11576cbbeadSChristoph Hellwig
11676cbbeadSChristoph Hellwig if (pte_none(pte)) {
11776cbbeadSChristoph Hellwig if (vma_is_anonymous(vma)) {
11876cbbeadSChristoph Hellwig mpfn = MIGRATE_PFN_MIGRATE;
11976cbbeadSChristoph Hellwig migrate->cpages++;
12076cbbeadSChristoph Hellwig }
12176cbbeadSChristoph Hellwig goto next;
12276cbbeadSChristoph Hellwig }
12376cbbeadSChristoph Hellwig
12476cbbeadSChristoph Hellwig if (!pte_present(pte)) {
12576cbbeadSChristoph Hellwig /*
12676cbbeadSChristoph Hellwig * Only care about unaddressable device page special
12776cbbeadSChristoph Hellwig * page table entry. Other special swap entries are not
12876cbbeadSChristoph Hellwig * migratable, and we ignore regular swapped page.
12976cbbeadSChristoph Hellwig */
13076cbbeadSChristoph Hellwig entry = pte_to_swp_entry(pte);
13176cbbeadSChristoph Hellwig if (!is_device_private_entry(entry))
13276cbbeadSChristoph Hellwig goto next;
13376cbbeadSChristoph Hellwig
13476cbbeadSChristoph Hellwig page = pfn_swap_entry_to_page(entry);
13576cbbeadSChristoph Hellwig if (!(migrate->flags &
13676cbbeadSChristoph Hellwig MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
13776cbbeadSChristoph Hellwig page->pgmap->owner != migrate->pgmap_owner)
13876cbbeadSChristoph Hellwig goto next;
13976cbbeadSChristoph Hellwig
14076cbbeadSChristoph Hellwig mpfn = migrate_pfn(page_to_pfn(page)) |
14176cbbeadSChristoph Hellwig MIGRATE_PFN_MIGRATE;
14276cbbeadSChristoph Hellwig if (is_writable_device_private_entry(entry))
14376cbbeadSChristoph Hellwig mpfn |= MIGRATE_PFN_WRITE;
14476cbbeadSChristoph Hellwig } else {
14576cbbeadSChristoph Hellwig pfn = pte_pfn(pte);
146dd19e6d8SAlex Sierra if (is_zero_pfn(pfn) &&
147dd19e6d8SAlex Sierra (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
14876cbbeadSChristoph Hellwig mpfn = MIGRATE_PFN_MIGRATE;
14976cbbeadSChristoph Hellwig migrate->cpages++;
15076cbbeadSChristoph Hellwig goto next;
15176cbbeadSChristoph Hellwig }
15276cbbeadSChristoph Hellwig page = vm_normal_page(migrate->vma, addr, pte);
153dd19e6d8SAlex Sierra if (page && !is_zone_device_page(page) &&
154dd19e6d8SAlex Sierra !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
155dd19e6d8SAlex Sierra goto next;
156dd19e6d8SAlex Sierra else if (page && is_device_coherent_page(page) &&
157dd19e6d8SAlex Sierra (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
158dd19e6d8SAlex Sierra page->pgmap->owner != migrate->pgmap_owner))
159dd19e6d8SAlex Sierra goto next;
16076cbbeadSChristoph Hellwig mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
16176cbbeadSChristoph Hellwig mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
16276cbbeadSChristoph Hellwig }
16376cbbeadSChristoph Hellwig
16476cbbeadSChristoph Hellwig /* FIXME support THP */
16576cbbeadSChristoph Hellwig if (!page || !page->mapping || PageTransCompound(page)) {
16676cbbeadSChristoph Hellwig mpfn = 0;
16776cbbeadSChristoph Hellwig goto next;
16876cbbeadSChristoph Hellwig }
16976cbbeadSChristoph Hellwig
17076cbbeadSChristoph Hellwig /*
17176cbbeadSChristoph Hellwig * By getting a reference on the page we pin it and that blocks
17276cbbeadSChristoph Hellwig * any kind of migration. Side effect is that it "freezes" the
17376cbbeadSChristoph Hellwig * pte.
17476cbbeadSChristoph Hellwig *
17576cbbeadSChristoph Hellwig * We drop this reference after isolating the page from the lru
17676cbbeadSChristoph Hellwig * for non device page (device page are not on the lru and thus
17776cbbeadSChristoph Hellwig * can't be dropped from it).
17876cbbeadSChristoph Hellwig */
17976cbbeadSChristoph Hellwig get_page(page);
18076cbbeadSChristoph Hellwig
18176cbbeadSChristoph Hellwig /*
1820742e490SAlistair Popple * We rely on trylock_page() to avoid deadlock between
1830742e490SAlistair Popple * concurrent migrations where each is waiting on the others
1840742e490SAlistair Popple * page lock. If we can't immediately lock the page we fail this
1850742e490SAlistair Popple * migration as it is only best effort anyway.
1860742e490SAlistair Popple *
1870742e490SAlistair Popple * If we can lock the page it's safe to set up a migration entry
1880742e490SAlistair Popple * now. In the common case where the page is mapped once in a
1890742e490SAlistair Popple * single process setting up the migration entry now is an
1900742e490SAlistair Popple * optimisation to avoid walking the rmap later with
1910742e490SAlistair Popple * try_to_migrate().
19276cbbeadSChristoph Hellwig */
19376cbbeadSChristoph Hellwig if (trylock_page(page)) {
1946c287605SDavid Hildenbrand bool anon_exclusive;
19576cbbeadSChristoph Hellwig pte_t swp_pte;
19676cbbeadSChristoph Hellwig
197c33c7948SRyan Roberts flush_cache_page(vma, addr, pte_pfn(pte));
1986c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
1996c287605SDavid Hildenbrand if (anon_exclusive) {
200fd35ca3dSAlistair Popple pte = ptep_clear_flush(vma, addr, ptep);
2016c287605SDavid Hildenbrand
2026c287605SDavid Hildenbrand if (page_try_share_anon_rmap(page)) {
2036c287605SDavid Hildenbrand set_pte_at(mm, addr, ptep, pte);
2046c287605SDavid Hildenbrand unlock_page(page);
2056c287605SDavid Hildenbrand put_page(page);
2066c287605SDavid Hildenbrand mpfn = 0;
2076c287605SDavid Hildenbrand goto next;
2086c287605SDavid Hildenbrand }
2096c287605SDavid Hildenbrand } else {
210fd35ca3dSAlistair Popple pte = ptep_get_and_clear(mm, addr, ptep);
2116c287605SDavid Hildenbrand }
2126c287605SDavid Hildenbrand
2136c287605SDavid Hildenbrand migrate->cpages++;
21476cbbeadSChristoph Hellwig
215fd35ca3dSAlistair Popple /* Set the dirty flag on the folio now the pte is gone. */
216fd35ca3dSAlistair Popple if (pte_dirty(pte))
217fd35ca3dSAlistair Popple folio_mark_dirty(page_folio(page));
218fd35ca3dSAlistair Popple
21976cbbeadSChristoph Hellwig /* Setup special migration page table entry */
22076cbbeadSChristoph Hellwig if (mpfn & MIGRATE_PFN_WRITE)
22176cbbeadSChristoph Hellwig entry = make_writable_migration_entry(
22276cbbeadSChristoph Hellwig page_to_pfn(page));
2236c287605SDavid Hildenbrand else if (anon_exclusive)
2246c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(
2256c287605SDavid Hildenbrand page_to_pfn(page));
22676cbbeadSChristoph Hellwig else
22776cbbeadSChristoph Hellwig entry = make_readable_migration_entry(
22876cbbeadSChristoph Hellwig page_to_pfn(page));
2292e346877SPeter Xu if (pte_present(pte)) {
2302e346877SPeter Xu if (pte_young(pte))
2312e346877SPeter Xu entry = make_migration_entry_young(entry);
2322e346877SPeter Xu if (pte_dirty(pte))
2332e346877SPeter Xu entry = make_migration_entry_dirty(entry);
2342e346877SPeter Xu }
23576cbbeadSChristoph Hellwig swp_pte = swp_entry_to_pte(entry);
23676cbbeadSChristoph Hellwig if (pte_present(pte)) {
23776cbbeadSChristoph Hellwig if (pte_soft_dirty(pte))
23876cbbeadSChristoph Hellwig swp_pte = pte_swp_mksoft_dirty(swp_pte);
23976cbbeadSChristoph Hellwig if (pte_uffd_wp(pte))
24076cbbeadSChristoph Hellwig swp_pte = pte_swp_mkuffd_wp(swp_pte);
24176cbbeadSChristoph Hellwig } else {
24276cbbeadSChristoph Hellwig if (pte_swp_soft_dirty(pte))
24376cbbeadSChristoph Hellwig swp_pte = pte_swp_mksoft_dirty(swp_pte);
24476cbbeadSChristoph Hellwig if (pte_swp_uffd_wp(pte))
24576cbbeadSChristoph Hellwig swp_pte = pte_swp_mkuffd_wp(swp_pte);
24676cbbeadSChristoph Hellwig }
24776cbbeadSChristoph Hellwig set_pte_at(mm, addr, ptep, swp_pte);
24876cbbeadSChristoph Hellwig
24976cbbeadSChristoph Hellwig /*
25076cbbeadSChristoph Hellwig * This is like regular unmap: we remove the rmap and
25176cbbeadSChristoph Hellwig * drop page refcount. Page won't be freed, as we took
25276cbbeadSChristoph Hellwig * a reference just above.
25376cbbeadSChristoph Hellwig */
25476cbbeadSChristoph Hellwig page_remove_rmap(page, vma, false);
25576cbbeadSChristoph Hellwig put_page(page);
25676cbbeadSChristoph Hellwig
25776cbbeadSChristoph Hellwig if (pte_present(pte))
25876cbbeadSChristoph Hellwig unmapped++;
25976cbbeadSChristoph Hellwig } else {
26076cbbeadSChristoph Hellwig put_page(page);
26176cbbeadSChristoph Hellwig mpfn = 0;
26276cbbeadSChristoph Hellwig }
26376cbbeadSChristoph Hellwig
26476cbbeadSChristoph Hellwig next:
26576cbbeadSChristoph Hellwig migrate->dst[migrate->npages] = 0;
26676cbbeadSChristoph Hellwig migrate->src[migrate->npages++] = mpfn;
26776cbbeadSChristoph Hellwig }
26876cbbeadSChristoph Hellwig
26976cbbeadSChristoph Hellwig /* Only flush the TLB if we actually modified any entries */
27076cbbeadSChristoph Hellwig if (unmapped)
27176cbbeadSChristoph Hellwig flush_tlb_range(walk->vma, start, end);
27276cbbeadSChristoph Hellwig
27360bae737SAlistair Popple arch_leave_lazy_mmu_mode();
27460bae737SAlistair Popple pte_unmap_unlock(ptep - 1, ptl);
27560bae737SAlistair Popple
27676cbbeadSChristoph Hellwig return 0;
27776cbbeadSChristoph Hellwig }
27876cbbeadSChristoph Hellwig
27976cbbeadSChristoph Hellwig static const struct mm_walk_ops migrate_vma_walk_ops = {
28076cbbeadSChristoph Hellwig .pmd_entry = migrate_vma_collect_pmd,
28176cbbeadSChristoph Hellwig .pte_hole = migrate_vma_collect_hole,
282*49b06385SSuren Baghdasaryan .walk_lock = PGWALK_RDLOCK,
28376cbbeadSChristoph Hellwig };
28476cbbeadSChristoph Hellwig
28576cbbeadSChristoph Hellwig /*
28676cbbeadSChristoph Hellwig * migrate_vma_collect() - collect pages over a range of virtual addresses
28776cbbeadSChristoph Hellwig * @migrate: migrate struct containing all migration information
28876cbbeadSChristoph Hellwig *
28976cbbeadSChristoph Hellwig * This will walk the CPU page table. For each virtual address backed by a
29076cbbeadSChristoph Hellwig * valid page, it updates the src array and takes a reference on the page, in
29176cbbeadSChristoph Hellwig * order to pin the page until we lock it and unmap it.
29276cbbeadSChristoph Hellwig */
migrate_vma_collect(struct migrate_vma * migrate)29376cbbeadSChristoph Hellwig static void migrate_vma_collect(struct migrate_vma *migrate)
29476cbbeadSChristoph Hellwig {
29576cbbeadSChristoph Hellwig struct mmu_notifier_range range;
29676cbbeadSChristoph Hellwig
29776cbbeadSChristoph Hellwig /*
29876cbbeadSChristoph Hellwig * Note that the pgmap_owner is passed to the mmu notifier callback so
29976cbbeadSChristoph Hellwig * that the registered device driver can skip invalidating device
30076cbbeadSChristoph Hellwig * private page mappings that won't be migrated.
30176cbbeadSChristoph Hellwig */
30276cbbeadSChristoph Hellwig mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
3037d4a8be0SAlistair Popple migrate->vma->vm_mm, migrate->start, migrate->end,
30476cbbeadSChristoph Hellwig migrate->pgmap_owner);
30576cbbeadSChristoph Hellwig mmu_notifier_invalidate_range_start(&range);
30676cbbeadSChristoph Hellwig
30776cbbeadSChristoph Hellwig walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
30876cbbeadSChristoph Hellwig &migrate_vma_walk_ops, migrate);
30976cbbeadSChristoph Hellwig
31076cbbeadSChristoph Hellwig mmu_notifier_invalidate_range_end(&range);
31176cbbeadSChristoph Hellwig migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
31276cbbeadSChristoph Hellwig }
31376cbbeadSChristoph Hellwig
31476cbbeadSChristoph Hellwig /*
31576cbbeadSChristoph Hellwig * migrate_vma_check_page() - check if page is pinned or not
31676cbbeadSChristoph Hellwig * @page: struct page to check
31776cbbeadSChristoph Hellwig *
31876cbbeadSChristoph Hellwig * Pinned pages cannot be migrated. This is the same test as in
31976cbbeadSChristoph Hellwig * folio_migrate_mapping(), except that here we allow migration of a
32076cbbeadSChristoph Hellwig * ZONE_DEVICE page.
32176cbbeadSChristoph Hellwig */
migrate_vma_check_page(struct page * page,struct page * fault_page)32216ce101dSAlistair Popple static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
32376cbbeadSChristoph Hellwig {
32476cbbeadSChristoph Hellwig /*
32576cbbeadSChristoph Hellwig * One extra ref because caller holds an extra reference, either from
32676cbbeadSChristoph Hellwig * isolate_lru_page() for a regular page, or migrate_vma_collect() for
32776cbbeadSChristoph Hellwig * a device page.
32876cbbeadSChristoph Hellwig */
32916ce101dSAlistair Popple int extra = 1 + (page == fault_page);
33076cbbeadSChristoph Hellwig
33176cbbeadSChristoph Hellwig /*
33276cbbeadSChristoph Hellwig * FIXME support THP (transparent huge page), it is bit more complex to
33376cbbeadSChristoph Hellwig * check them than regular pages, because they can be mapped with a pmd
33476cbbeadSChristoph Hellwig * or with a pte (split pte mapping).
33576cbbeadSChristoph Hellwig */
33676cbbeadSChristoph Hellwig if (PageCompound(page))
33776cbbeadSChristoph Hellwig return false;
33876cbbeadSChristoph Hellwig
33976cbbeadSChristoph Hellwig /* Page from ZONE_DEVICE have one extra reference */
34076cbbeadSChristoph Hellwig if (is_zone_device_page(page))
34176cbbeadSChristoph Hellwig extra++;
34276cbbeadSChristoph Hellwig
34376cbbeadSChristoph Hellwig /* For file back page */
34476cbbeadSChristoph Hellwig if (page_mapping(page))
34576cbbeadSChristoph Hellwig extra += 1 + page_has_private(page);
34676cbbeadSChristoph Hellwig
34776cbbeadSChristoph Hellwig if ((page_count(page) - extra) > page_mapcount(page))
34876cbbeadSChristoph Hellwig return false;
34976cbbeadSChristoph Hellwig
35076cbbeadSChristoph Hellwig return true;
35176cbbeadSChristoph Hellwig }
35276cbbeadSChristoph Hellwig
35376cbbeadSChristoph Hellwig /*
35444af0b45SAlistair Popple * Unmaps pages for migration. Returns number of source pfns marked as
35544af0b45SAlistair Popple * migrating.
35676cbbeadSChristoph Hellwig */
migrate_device_unmap(unsigned long * src_pfns,unsigned long npages,struct page * fault_page)357241f6885SAlistair Popple static unsigned long migrate_device_unmap(unsigned long *src_pfns,
358241f6885SAlistair Popple unsigned long npages,
359241f6885SAlistair Popple struct page *fault_page)
36076cbbeadSChristoph Hellwig {
36176cbbeadSChristoph Hellwig unsigned long i, restore = 0;
36276cbbeadSChristoph Hellwig bool allow_drain = true;
363241f6885SAlistair Popple unsigned long unmapped = 0;
36476cbbeadSChristoph Hellwig
36576cbbeadSChristoph Hellwig lru_add_drain();
36676cbbeadSChristoph Hellwig
36776cbbeadSChristoph Hellwig for (i = 0; i < npages; i++) {
368241f6885SAlistair Popple struct page *page = migrate_pfn_to_page(src_pfns[i]);
3694b8554c5SMatthew Wilcox (Oracle) struct folio *folio;
37076cbbeadSChristoph Hellwig
37144af0b45SAlistair Popple if (!page) {
37244af0b45SAlistair Popple if (src_pfns[i] & MIGRATE_PFN_MIGRATE)
37344af0b45SAlistair Popple unmapped++;
37476cbbeadSChristoph Hellwig continue;
37544af0b45SAlistair Popple }
37676cbbeadSChristoph Hellwig
37776cbbeadSChristoph Hellwig /* ZONE_DEVICE pages are not on LRU */
37876cbbeadSChristoph Hellwig if (!is_zone_device_page(page)) {
37976cbbeadSChristoph Hellwig if (!PageLRU(page) && allow_drain) {
3801fec6890SMatthew Wilcox (Oracle) /* Drain CPU's lru cache */
38176cbbeadSChristoph Hellwig lru_add_drain_all();
38276cbbeadSChristoph Hellwig allow_drain = false;
38376cbbeadSChristoph Hellwig }
38476cbbeadSChristoph Hellwig
385f7f9c00dSBaolin Wang if (!isolate_lru_page(page)) {
386241f6885SAlistair Popple src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
38776cbbeadSChristoph Hellwig restore++;
38876cbbeadSChristoph Hellwig continue;
38976cbbeadSChristoph Hellwig }
39076cbbeadSChristoph Hellwig
39176cbbeadSChristoph Hellwig /* Drop the reference we took in collect */
39276cbbeadSChristoph Hellwig put_page(page);
39376cbbeadSChristoph Hellwig }
39476cbbeadSChristoph Hellwig
3954b8554c5SMatthew Wilcox (Oracle) folio = page_folio(page);
3964b8554c5SMatthew Wilcox (Oracle) if (folio_mapped(folio))
3974b8554c5SMatthew Wilcox (Oracle) try_to_migrate(folio, 0);
39876cbbeadSChristoph Hellwig
39916ce101dSAlistair Popple if (page_mapped(page) ||
400241f6885SAlistair Popple !migrate_vma_check_page(page, fault_page)) {
40176cbbeadSChristoph Hellwig if (!is_zone_device_page(page)) {
40276cbbeadSChristoph Hellwig get_page(page);
40376cbbeadSChristoph Hellwig putback_lru_page(page);
40476cbbeadSChristoph Hellwig }
40576cbbeadSChristoph Hellwig
406241f6885SAlistair Popple src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
40776cbbeadSChristoph Hellwig restore++;
40876cbbeadSChristoph Hellwig continue;
40976cbbeadSChristoph Hellwig }
410241f6885SAlistair Popple
411241f6885SAlistair Popple unmapped++;
41276cbbeadSChristoph Hellwig }
41376cbbeadSChristoph Hellwig
41476cbbeadSChristoph Hellwig for (i = 0; i < npages && restore; i++) {
415241f6885SAlistair Popple struct page *page = migrate_pfn_to_page(src_pfns[i]);
4164eecb8b9SMatthew Wilcox (Oracle) struct folio *folio;
41776cbbeadSChristoph Hellwig
418241f6885SAlistair Popple if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE))
41976cbbeadSChristoph Hellwig continue;
42076cbbeadSChristoph Hellwig
4214eecb8b9SMatthew Wilcox (Oracle) folio = page_folio(page);
4224eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, false);
42376cbbeadSChristoph Hellwig
424241f6885SAlistair Popple src_pfns[i] = 0;
4254eecb8b9SMatthew Wilcox (Oracle) folio_unlock(folio);
4264eecb8b9SMatthew Wilcox (Oracle) folio_put(folio);
42776cbbeadSChristoph Hellwig restore--;
42876cbbeadSChristoph Hellwig }
429241f6885SAlistair Popple
430241f6885SAlistair Popple return unmapped;
431241f6885SAlistair Popple }
432241f6885SAlistair Popple
433241f6885SAlistair Popple /*
434241f6885SAlistair Popple * migrate_vma_unmap() - replace page mapping with special migration pte entry
435241f6885SAlistair Popple * @migrate: migrate struct containing all migration information
436241f6885SAlistair Popple *
437241f6885SAlistair Popple * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
438241f6885SAlistair Popple * special migration pte entry and check if it has been pinned. Pinned pages are
439241f6885SAlistair Popple * restored because we cannot migrate them.
440241f6885SAlistair Popple *
441241f6885SAlistair Popple * This is the last step before we call the device driver callback to allocate
442241f6885SAlistair Popple * destination memory and copy contents of original page over to new page.
443241f6885SAlistair Popple */
migrate_vma_unmap(struct migrate_vma * migrate)444241f6885SAlistair Popple static void migrate_vma_unmap(struct migrate_vma *migrate)
445241f6885SAlistair Popple {
446241f6885SAlistair Popple migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages,
447241f6885SAlistair Popple migrate->fault_page);
44876cbbeadSChristoph Hellwig }
44976cbbeadSChristoph Hellwig
45076cbbeadSChristoph Hellwig /**
45176cbbeadSChristoph Hellwig * migrate_vma_setup() - prepare to migrate a range of memory
45276cbbeadSChristoph Hellwig * @args: contains the vma, start, and pfns arrays for the migration
45376cbbeadSChristoph Hellwig *
45476cbbeadSChristoph Hellwig * Returns: negative errno on failures, 0 when 0 or more pages were migrated
45576cbbeadSChristoph Hellwig * without an error.
45676cbbeadSChristoph Hellwig *
45776cbbeadSChristoph Hellwig * Prepare to migrate a range of memory virtual address range by collecting all
45876cbbeadSChristoph Hellwig * the pages backing each virtual address in the range, saving them inside the
45976cbbeadSChristoph Hellwig * src array. Then lock those pages and unmap them. Once the pages are locked
46076cbbeadSChristoph Hellwig * and unmapped, check whether each page is pinned or not. Pages that aren't
46176cbbeadSChristoph Hellwig * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
46276cbbeadSChristoph Hellwig * corresponding src array entry. Then restores any pages that are pinned, by
46376cbbeadSChristoph Hellwig * remapping and unlocking those pages.
46476cbbeadSChristoph Hellwig *
46576cbbeadSChristoph Hellwig * The caller should then allocate destination memory and copy source memory to
46676cbbeadSChristoph Hellwig * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
46776cbbeadSChristoph Hellwig * flag set). Once these are allocated and copied, the caller must update each
46876cbbeadSChristoph Hellwig * corresponding entry in the dst array with the pfn value of the destination
46976cbbeadSChristoph Hellwig * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
47076cbbeadSChristoph Hellwig * lock_page().
47176cbbeadSChristoph Hellwig *
47276cbbeadSChristoph Hellwig * Note that the caller does not have to migrate all the pages that are marked
47376cbbeadSChristoph Hellwig * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
47476cbbeadSChristoph Hellwig * device memory to system memory. If the caller cannot migrate a device page
47576cbbeadSChristoph Hellwig * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
47676cbbeadSChristoph Hellwig * consequences for the userspace process, so it must be avoided if at all
47776cbbeadSChristoph Hellwig * possible.
47876cbbeadSChristoph Hellwig *
47976cbbeadSChristoph Hellwig * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
48076cbbeadSChristoph Hellwig * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
48176cbbeadSChristoph Hellwig * allowing the caller to allocate device memory for those unbacked virtual
48276cbbeadSChristoph Hellwig * addresses. For this the caller simply has to allocate device memory and
48376cbbeadSChristoph Hellwig * properly set the destination entry like for regular migration. Note that
48476cbbeadSChristoph Hellwig * this can still fail, and thus inside the device driver you must check if the
48576cbbeadSChristoph Hellwig * migration was successful for those entries after calling migrate_vma_pages(),
48676cbbeadSChristoph Hellwig * just like for regular migration.
48776cbbeadSChristoph Hellwig *
48876cbbeadSChristoph Hellwig * After that, the callers must call migrate_vma_pages() to go over each entry
48976cbbeadSChristoph Hellwig * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
49076cbbeadSChristoph Hellwig * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
49176cbbeadSChristoph Hellwig * then migrate_vma_pages() to migrate struct page information from the source
49276cbbeadSChristoph Hellwig * struct page to the destination struct page. If it fails to migrate the
49376cbbeadSChristoph Hellwig * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
49476cbbeadSChristoph Hellwig * src array.
49576cbbeadSChristoph Hellwig *
49676cbbeadSChristoph Hellwig * At this point all successfully migrated pages have an entry in the src
49776cbbeadSChristoph Hellwig * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
49876cbbeadSChristoph Hellwig * array entry with MIGRATE_PFN_VALID flag set.
49976cbbeadSChristoph Hellwig *
50076cbbeadSChristoph Hellwig * Once migrate_vma_pages() returns the caller may inspect which pages were
50176cbbeadSChristoph Hellwig * successfully migrated, and which were not. Successfully migrated pages will
50276cbbeadSChristoph Hellwig * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
50376cbbeadSChristoph Hellwig *
50476cbbeadSChristoph Hellwig * It is safe to update device page table after migrate_vma_pages() because
50576cbbeadSChristoph Hellwig * both destination and source page are still locked, and the mmap_lock is held
50676cbbeadSChristoph Hellwig * in read mode (hence no one can unmap the range being migrated).
50776cbbeadSChristoph Hellwig *
50876cbbeadSChristoph Hellwig * Once the caller is done cleaning up things and updating its page table (if it
50976cbbeadSChristoph Hellwig * chose to do so, this is not an obligation) it finally calls
51076cbbeadSChristoph Hellwig * migrate_vma_finalize() to update the CPU page table to point to new pages
51176cbbeadSChristoph Hellwig * for successfully migrated pages or otherwise restore the CPU page table to
51276cbbeadSChristoph Hellwig * point to the original source pages.
51376cbbeadSChristoph Hellwig */
migrate_vma_setup(struct migrate_vma * args)51476cbbeadSChristoph Hellwig int migrate_vma_setup(struct migrate_vma *args)
51576cbbeadSChristoph Hellwig {
51676cbbeadSChristoph Hellwig long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
51776cbbeadSChristoph Hellwig
51876cbbeadSChristoph Hellwig args->start &= PAGE_MASK;
51976cbbeadSChristoph Hellwig args->end &= PAGE_MASK;
52076cbbeadSChristoph Hellwig if (!args->vma || is_vm_hugetlb_page(args->vma) ||
52176cbbeadSChristoph Hellwig (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
52276cbbeadSChristoph Hellwig return -EINVAL;
52376cbbeadSChristoph Hellwig if (nr_pages <= 0)
52476cbbeadSChristoph Hellwig return -EINVAL;
52576cbbeadSChristoph Hellwig if (args->start < args->vma->vm_start ||
52676cbbeadSChristoph Hellwig args->start >= args->vma->vm_end)
52776cbbeadSChristoph Hellwig return -EINVAL;
52876cbbeadSChristoph Hellwig if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
52976cbbeadSChristoph Hellwig return -EINVAL;
53076cbbeadSChristoph Hellwig if (!args->src || !args->dst)
53176cbbeadSChristoph Hellwig return -EINVAL;
53216ce101dSAlistair Popple if (args->fault_page && !is_device_private_page(args->fault_page))
53316ce101dSAlistair Popple return -EINVAL;
53476cbbeadSChristoph Hellwig
53576cbbeadSChristoph Hellwig memset(args->src, 0, sizeof(*args->src) * nr_pages);
53676cbbeadSChristoph Hellwig args->cpages = 0;
53776cbbeadSChristoph Hellwig args->npages = 0;
53876cbbeadSChristoph Hellwig
53976cbbeadSChristoph Hellwig migrate_vma_collect(args);
54076cbbeadSChristoph Hellwig
54176cbbeadSChristoph Hellwig if (args->cpages)
54276cbbeadSChristoph Hellwig migrate_vma_unmap(args);
54376cbbeadSChristoph Hellwig
54476cbbeadSChristoph Hellwig /*
54576cbbeadSChristoph Hellwig * At this point pages are locked and unmapped, and thus they have
54676cbbeadSChristoph Hellwig * stable content and can safely be copied to destination memory that
54776cbbeadSChristoph Hellwig * is allocated by the drivers.
54876cbbeadSChristoph Hellwig */
54976cbbeadSChristoph Hellwig return 0;
55076cbbeadSChristoph Hellwig
55176cbbeadSChristoph Hellwig }
55276cbbeadSChristoph Hellwig EXPORT_SYMBOL(migrate_vma_setup);
55376cbbeadSChristoph Hellwig
55476cbbeadSChristoph Hellwig /*
55576cbbeadSChristoph Hellwig * This code closely matches the code in:
55676cbbeadSChristoph Hellwig * __handle_mm_fault()
55776cbbeadSChristoph Hellwig * handle_pte_fault()
55876cbbeadSChristoph Hellwig * do_anonymous_page()
55976cbbeadSChristoph Hellwig * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
560f25cbb7aSAlex Sierra * private or coherent page.
56176cbbeadSChristoph Hellwig */
migrate_vma_insert_page(struct migrate_vma * migrate,unsigned long addr,struct page * page,unsigned long * src)56276cbbeadSChristoph Hellwig static void migrate_vma_insert_page(struct migrate_vma *migrate,
56376cbbeadSChristoph Hellwig unsigned long addr,
56476cbbeadSChristoph Hellwig struct page *page,
56576cbbeadSChristoph Hellwig unsigned long *src)
56676cbbeadSChristoph Hellwig {
56776cbbeadSChristoph Hellwig struct vm_area_struct *vma = migrate->vma;
56876cbbeadSChristoph Hellwig struct mm_struct *mm = vma->vm_mm;
56976cbbeadSChristoph Hellwig bool flush = false;
57076cbbeadSChristoph Hellwig spinlock_t *ptl;
57176cbbeadSChristoph Hellwig pte_t entry;
57276cbbeadSChristoph Hellwig pgd_t *pgdp;
57376cbbeadSChristoph Hellwig p4d_t *p4dp;
57476cbbeadSChristoph Hellwig pud_t *pudp;
57576cbbeadSChristoph Hellwig pmd_t *pmdp;
57676cbbeadSChristoph Hellwig pte_t *ptep;
577c33c7948SRyan Roberts pte_t orig_pte;
57876cbbeadSChristoph Hellwig
57976cbbeadSChristoph Hellwig /* Only allow populating anonymous memory */
58076cbbeadSChristoph Hellwig if (!vma_is_anonymous(vma))
58176cbbeadSChristoph Hellwig goto abort;
58276cbbeadSChristoph Hellwig
58376cbbeadSChristoph Hellwig pgdp = pgd_offset(mm, addr);
58476cbbeadSChristoph Hellwig p4dp = p4d_alloc(mm, pgdp, addr);
58576cbbeadSChristoph Hellwig if (!p4dp)
58676cbbeadSChristoph Hellwig goto abort;
58776cbbeadSChristoph Hellwig pudp = pud_alloc(mm, p4dp, addr);
58876cbbeadSChristoph Hellwig if (!pudp)
58976cbbeadSChristoph Hellwig goto abort;
59076cbbeadSChristoph Hellwig pmdp = pmd_alloc(mm, pudp, addr);
59176cbbeadSChristoph Hellwig if (!pmdp)
59276cbbeadSChristoph Hellwig goto abort;
59376cbbeadSChristoph Hellwig if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
59476cbbeadSChristoph Hellwig goto abort;
59576cbbeadSChristoph Hellwig if (pte_alloc(mm, pmdp))
59676cbbeadSChristoph Hellwig goto abort;
59776cbbeadSChristoph Hellwig if (unlikely(anon_vma_prepare(vma)))
59876cbbeadSChristoph Hellwig goto abort;
59976cbbeadSChristoph Hellwig if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
60076cbbeadSChristoph Hellwig goto abort;
60176cbbeadSChristoph Hellwig
60276cbbeadSChristoph Hellwig /*
60376cbbeadSChristoph Hellwig * The memory barrier inside __SetPageUptodate makes sure that
60476cbbeadSChristoph Hellwig * preceding stores to the page contents become visible before
60576cbbeadSChristoph Hellwig * the set_pte_at() write.
60676cbbeadSChristoph Hellwig */
60776cbbeadSChristoph Hellwig __SetPageUptodate(page);
60876cbbeadSChristoph Hellwig
60976cbbeadSChristoph Hellwig if (is_device_private_page(page)) {
61076cbbeadSChristoph Hellwig swp_entry_t swp_entry;
61176cbbeadSChristoph Hellwig
61276cbbeadSChristoph Hellwig if (vma->vm_flags & VM_WRITE)
61376cbbeadSChristoph Hellwig swp_entry = make_writable_device_private_entry(
61476cbbeadSChristoph Hellwig page_to_pfn(page));
61576cbbeadSChristoph Hellwig else
61676cbbeadSChristoph Hellwig swp_entry = make_readable_device_private_entry(
61776cbbeadSChristoph Hellwig page_to_pfn(page));
61876cbbeadSChristoph Hellwig entry = swp_entry_to_pte(swp_entry);
61976cbbeadSChristoph Hellwig } else {
620f25cbb7aSAlex Sierra if (is_zone_device_page(page) &&
621f25cbb7aSAlex Sierra !is_device_coherent_page(page)) {
62276cbbeadSChristoph Hellwig pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
62376cbbeadSChristoph Hellwig goto abort;
62476cbbeadSChristoph Hellwig }
62576cbbeadSChristoph Hellwig entry = mk_pte(page, vma->vm_page_prot);
62676cbbeadSChristoph Hellwig if (vma->vm_flags & VM_WRITE)
627161e393cSRick Edgecombe entry = pte_mkwrite(pte_mkdirty(entry), vma);
62876cbbeadSChristoph Hellwig }
62976cbbeadSChristoph Hellwig
63076cbbeadSChristoph Hellwig ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
6314b56069cSHugh Dickins if (!ptep)
6324b56069cSHugh Dickins goto abort;
633c33c7948SRyan Roberts orig_pte = ptep_get(ptep);
634c33c7948SRyan Roberts
63576cbbeadSChristoph Hellwig if (check_stable_address_space(mm))
63676cbbeadSChristoph Hellwig goto unlock_abort;
63776cbbeadSChristoph Hellwig
638c33c7948SRyan Roberts if (pte_present(orig_pte)) {
639c33c7948SRyan Roberts unsigned long pfn = pte_pfn(orig_pte);
64076cbbeadSChristoph Hellwig
64176cbbeadSChristoph Hellwig if (!is_zero_pfn(pfn))
64276cbbeadSChristoph Hellwig goto unlock_abort;
64376cbbeadSChristoph Hellwig flush = true;
644c33c7948SRyan Roberts } else if (!pte_none(orig_pte))
64576cbbeadSChristoph Hellwig goto unlock_abort;
64676cbbeadSChristoph Hellwig
64776cbbeadSChristoph Hellwig /*
64876cbbeadSChristoph Hellwig * Check for userfaultfd but do not deliver the fault. Instead,
64976cbbeadSChristoph Hellwig * just back off.
65076cbbeadSChristoph Hellwig */
65176cbbeadSChristoph Hellwig if (userfaultfd_missing(vma))
65276cbbeadSChristoph Hellwig goto unlock_abort;
65376cbbeadSChristoph Hellwig
65476cbbeadSChristoph Hellwig inc_mm_counter(mm, MM_ANONPAGES);
65540f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(page, vma, addr);
65676cbbeadSChristoph Hellwig if (!is_zone_device_page(page))
65776cbbeadSChristoph Hellwig lru_cache_add_inactive_or_unevictable(page, vma);
65876cbbeadSChristoph Hellwig get_page(page);
65976cbbeadSChristoph Hellwig
66076cbbeadSChristoph Hellwig if (flush) {
661c33c7948SRyan Roberts flush_cache_page(vma, addr, pte_pfn(orig_pte));
662ec8832d0SAlistair Popple ptep_clear_flush(vma, addr, ptep);
66376cbbeadSChristoph Hellwig set_pte_at_notify(mm, addr, ptep, entry);
66476cbbeadSChristoph Hellwig update_mmu_cache(vma, addr, ptep);
66576cbbeadSChristoph Hellwig } else {
66676cbbeadSChristoph Hellwig /* No need to invalidate - it was non-present before */
66776cbbeadSChristoph Hellwig set_pte_at(mm, addr, ptep, entry);
66876cbbeadSChristoph Hellwig update_mmu_cache(vma, addr, ptep);
66976cbbeadSChristoph Hellwig }
67076cbbeadSChristoph Hellwig
67176cbbeadSChristoph Hellwig pte_unmap_unlock(ptep, ptl);
67276cbbeadSChristoph Hellwig *src = MIGRATE_PFN_MIGRATE;
67376cbbeadSChristoph Hellwig return;
67476cbbeadSChristoph Hellwig
67576cbbeadSChristoph Hellwig unlock_abort:
67676cbbeadSChristoph Hellwig pte_unmap_unlock(ptep, ptl);
67776cbbeadSChristoph Hellwig abort:
67876cbbeadSChristoph Hellwig *src &= ~MIGRATE_PFN_MIGRATE;
67976cbbeadSChristoph Hellwig }
68076cbbeadSChristoph Hellwig
__migrate_device_pages(unsigned long * src_pfns,unsigned long * dst_pfns,unsigned long npages,struct migrate_vma * migrate)681e778406bSAlistair Popple static void __migrate_device_pages(unsigned long *src_pfns,
682241f6885SAlistair Popple unsigned long *dst_pfns, unsigned long npages,
683241f6885SAlistair Popple struct migrate_vma *migrate)
68476cbbeadSChristoph Hellwig {
68576cbbeadSChristoph Hellwig struct mmu_notifier_range range;
686241f6885SAlistair Popple unsigned long i;
68776cbbeadSChristoph Hellwig bool notified = false;
68876cbbeadSChristoph Hellwig
689241f6885SAlistair Popple for (i = 0; i < npages; i++) {
690241f6885SAlistair Popple struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
691241f6885SAlistair Popple struct page *page = migrate_pfn_to_page(src_pfns[i]);
69276cbbeadSChristoph Hellwig struct address_space *mapping;
69376cbbeadSChristoph Hellwig int r;
69476cbbeadSChristoph Hellwig
69576cbbeadSChristoph Hellwig if (!newpage) {
696241f6885SAlistair Popple src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
69776cbbeadSChristoph Hellwig continue;
69876cbbeadSChristoph Hellwig }
69976cbbeadSChristoph Hellwig
70076cbbeadSChristoph Hellwig if (!page) {
701241f6885SAlistair Popple unsigned long addr;
702241f6885SAlistair Popple
703e778406bSAlistair Popple if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE))
704e778406bSAlistair Popple continue;
705e778406bSAlistair Popple
706b05a79d4SAlistair Popple /*
707b05a79d4SAlistair Popple * The only time there is no vma is when called from
708b05a79d4SAlistair Popple * migrate_device_coherent_page(). However this isn't
709b05a79d4SAlistair Popple * called if the page could not be unmapped.
710b05a79d4SAlistair Popple */
711241f6885SAlistair Popple VM_BUG_ON(!migrate);
712241f6885SAlistair Popple addr = migrate->start + i*PAGE_SIZE;
71376cbbeadSChristoph Hellwig if (!notified) {
71476cbbeadSChristoph Hellwig notified = true;
71576cbbeadSChristoph Hellwig
71676cbbeadSChristoph Hellwig mmu_notifier_range_init_owner(&range,
7177d4a8be0SAlistair Popple MMU_NOTIFY_MIGRATE, 0,
71876cbbeadSChristoph Hellwig migrate->vma->vm_mm, addr, migrate->end,
71976cbbeadSChristoph Hellwig migrate->pgmap_owner);
72076cbbeadSChristoph Hellwig mmu_notifier_invalidate_range_start(&range);
72176cbbeadSChristoph Hellwig }
72276cbbeadSChristoph Hellwig migrate_vma_insert_page(migrate, addr, newpage,
723241f6885SAlistair Popple &src_pfns[i]);
72476cbbeadSChristoph Hellwig continue;
72576cbbeadSChristoph Hellwig }
72676cbbeadSChristoph Hellwig
72776cbbeadSChristoph Hellwig mapping = page_mapping(page);
72876cbbeadSChristoph Hellwig
729f25cbb7aSAlex Sierra if (is_device_private_page(newpage) ||
730f25cbb7aSAlex Sierra is_device_coherent_page(newpage)) {
731df263d9aSMika Penttilä if (mapping) {
732df263d9aSMika Penttilä struct folio *folio;
733df263d9aSMika Penttilä
734df263d9aSMika Penttilä folio = page_folio(page);
735df263d9aSMika Penttilä
73676cbbeadSChristoph Hellwig /*
737f25cbb7aSAlex Sierra * For now only support anonymous memory migrating to
738f25cbb7aSAlex Sierra * device private or coherent memory.
739df263d9aSMika Penttilä *
740df263d9aSMika Penttilä * Try to get rid of swap cache if possible.
74176cbbeadSChristoph Hellwig */
742df263d9aSMika Penttilä if (!folio_test_anon(folio) ||
743df263d9aSMika Penttilä !folio_free_swap(folio)) {
744241f6885SAlistair Popple src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
74576cbbeadSChristoph Hellwig continue;
74676cbbeadSChristoph Hellwig }
747df263d9aSMika Penttilä }
74876cbbeadSChristoph Hellwig } else if (is_zone_device_page(newpage)) {
74976cbbeadSChristoph Hellwig /*
75076cbbeadSChristoph Hellwig * Other types of ZONE_DEVICE page are not supported.
75176cbbeadSChristoph Hellwig */
752241f6885SAlistair Popple src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
75376cbbeadSChristoph Hellwig continue;
75476cbbeadSChristoph Hellwig }
75576cbbeadSChristoph Hellwig
756241f6885SAlistair Popple if (migrate && migrate->fault_page == page)
75716ce101dSAlistair Popple r = migrate_folio_extra(mapping, page_folio(newpage),
75816ce101dSAlistair Popple page_folio(page),
75916ce101dSAlistair Popple MIGRATE_SYNC_NO_COPY, 1);
76016ce101dSAlistair Popple else
76154184650SMatthew Wilcox (Oracle) r = migrate_folio(mapping, page_folio(newpage),
76254184650SMatthew Wilcox (Oracle) page_folio(page), MIGRATE_SYNC_NO_COPY);
76376cbbeadSChristoph Hellwig if (r != MIGRATEPAGE_SUCCESS)
764241f6885SAlistair Popple src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
76576cbbeadSChristoph Hellwig }
76676cbbeadSChristoph Hellwig
76776cbbeadSChristoph Hellwig if (notified)
768ec8832d0SAlistair Popple mmu_notifier_invalidate_range_end(&range);
76976cbbeadSChristoph Hellwig }
77076cbbeadSChristoph Hellwig
77176cbbeadSChristoph Hellwig /**
772e778406bSAlistair Popple * migrate_device_pages() - migrate meta-data from src page to dst page
773e778406bSAlistair Popple * @src_pfns: src_pfns returned from migrate_device_range()
774e778406bSAlistair Popple * @dst_pfns: array of pfns allocated by the driver to migrate memory to
775e778406bSAlistair Popple * @npages: number of pages in the range
776e778406bSAlistair Popple *
777e778406bSAlistair Popple * Equivalent to migrate_vma_pages(). This is called to migrate struct page
778e778406bSAlistair Popple * meta-data from source struct page to destination.
779e778406bSAlistair Popple */
migrate_device_pages(unsigned long * src_pfns,unsigned long * dst_pfns,unsigned long npages)780e778406bSAlistair Popple void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
781e778406bSAlistair Popple unsigned long npages)
782e778406bSAlistair Popple {
783e778406bSAlistair Popple __migrate_device_pages(src_pfns, dst_pfns, npages, NULL);
784e778406bSAlistair Popple }
785e778406bSAlistair Popple EXPORT_SYMBOL(migrate_device_pages);
786e778406bSAlistair Popple
787e778406bSAlistair Popple /**
788241f6885SAlistair Popple * migrate_vma_pages() - migrate meta-data from src page to dst page
78976cbbeadSChristoph Hellwig * @migrate: migrate struct containing all migration information
79076cbbeadSChristoph Hellwig *
791241f6885SAlistair Popple * This migrates struct page meta-data from source struct page to destination
792241f6885SAlistair Popple * struct page. This effectively finishes the migration from source page to the
793241f6885SAlistair Popple * destination page.
79476cbbeadSChristoph Hellwig */
migrate_vma_pages(struct migrate_vma * migrate)795241f6885SAlistair Popple void migrate_vma_pages(struct migrate_vma *migrate)
79676cbbeadSChristoph Hellwig {
797e778406bSAlistair Popple __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate);
798241f6885SAlistair Popple }
799241f6885SAlistair Popple EXPORT_SYMBOL(migrate_vma_pages);
800241f6885SAlistair Popple
801e778406bSAlistair Popple /*
802e778406bSAlistair Popple * migrate_device_finalize() - complete page migration
803e778406bSAlistair Popple * @src_pfns: src_pfns returned from migrate_device_range()
804e778406bSAlistair Popple * @dst_pfns: array of pfns allocated by the driver to migrate memory to
805e778406bSAlistair Popple * @npages: number of pages in the range
806e778406bSAlistair Popple *
807e778406bSAlistair Popple * Completes migration of the page by removing special migration entries.
808e778406bSAlistair Popple * Drivers must ensure copying of page data is complete and visible to the CPU
809e778406bSAlistair Popple * before calling this.
810e778406bSAlistair Popple */
migrate_device_finalize(unsigned long * src_pfns,unsigned long * dst_pfns,unsigned long npages)811e778406bSAlistair Popple void migrate_device_finalize(unsigned long *src_pfns,
812241f6885SAlistair Popple unsigned long *dst_pfns, unsigned long npages)
813241f6885SAlistair Popple {
81476cbbeadSChristoph Hellwig unsigned long i;
81576cbbeadSChristoph Hellwig
81676cbbeadSChristoph Hellwig for (i = 0; i < npages; i++) {
8174eecb8b9SMatthew Wilcox (Oracle) struct folio *dst, *src;
818241f6885SAlistair Popple struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
819241f6885SAlistair Popple struct page *page = migrate_pfn_to_page(src_pfns[i]);
82076cbbeadSChristoph Hellwig
82176cbbeadSChristoph Hellwig if (!page) {
82276cbbeadSChristoph Hellwig if (newpage) {
82376cbbeadSChristoph Hellwig unlock_page(newpage);
82476cbbeadSChristoph Hellwig put_page(newpage);
82576cbbeadSChristoph Hellwig }
82676cbbeadSChristoph Hellwig continue;
82776cbbeadSChristoph Hellwig }
82876cbbeadSChristoph Hellwig
829241f6885SAlistair Popple if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
83076cbbeadSChristoph Hellwig if (newpage) {
83176cbbeadSChristoph Hellwig unlock_page(newpage);
83276cbbeadSChristoph Hellwig put_page(newpage);
83376cbbeadSChristoph Hellwig }
83476cbbeadSChristoph Hellwig newpage = page;
83576cbbeadSChristoph Hellwig }
83676cbbeadSChristoph Hellwig
8374eecb8b9SMatthew Wilcox (Oracle) src = page_folio(page);
8384eecb8b9SMatthew Wilcox (Oracle) dst = page_folio(newpage);
8394eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(src, dst, false);
8404eecb8b9SMatthew Wilcox (Oracle) folio_unlock(src);
84176cbbeadSChristoph Hellwig
84276cbbeadSChristoph Hellwig if (is_zone_device_page(page))
84376cbbeadSChristoph Hellwig put_page(page);
84476cbbeadSChristoph Hellwig else
84576cbbeadSChristoph Hellwig putback_lru_page(page);
84676cbbeadSChristoph Hellwig
84776cbbeadSChristoph Hellwig if (newpage != page) {
84876cbbeadSChristoph Hellwig unlock_page(newpage);
84976cbbeadSChristoph Hellwig if (is_zone_device_page(newpage))
85076cbbeadSChristoph Hellwig put_page(newpage);
85176cbbeadSChristoph Hellwig else
85276cbbeadSChristoph Hellwig putback_lru_page(newpage);
85376cbbeadSChristoph Hellwig }
85476cbbeadSChristoph Hellwig }
85576cbbeadSChristoph Hellwig }
856e778406bSAlistair Popple EXPORT_SYMBOL(migrate_device_finalize);
857241f6885SAlistair Popple
858241f6885SAlistair Popple /**
859241f6885SAlistair Popple * migrate_vma_finalize() - restore CPU page table entry
860241f6885SAlistair Popple * @migrate: migrate struct containing all migration information
861241f6885SAlistair Popple *
862241f6885SAlistair Popple * This replaces the special migration pte entry with either a mapping to the
863241f6885SAlistair Popple * new page if migration was successful for that page, or to the original page
864241f6885SAlistair Popple * otherwise.
865241f6885SAlistair Popple *
866241f6885SAlistair Popple * This also unlocks the pages and puts them back on the lru, or drops the extra
867241f6885SAlistair Popple * refcount, for device pages.
868241f6885SAlistair Popple */
migrate_vma_finalize(struct migrate_vma * migrate)869241f6885SAlistair Popple void migrate_vma_finalize(struct migrate_vma *migrate)
870241f6885SAlistair Popple {
871241f6885SAlistair Popple migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
872241f6885SAlistair Popple }
87376cbbeadSChristoph Hellwig EXPORT_SYMBOL(migrate_vma_finalize);
874b05a79d4SAlistair Popple
875e778406bSAlistair Popple /**
876e778406bSAlistair Popple * migrate_device_range() - migrate device private pfns to normal memory.
877e778406bSAlistair Popple * @src_pfns: array large enough to hold migrating source device private pfns.
878e778406bSAlistair Popple * @start: starting pfn in the range to migrate.
879e778406bSAlistair Popple * @npages: number of pages to migrate.
880e778406bSAlistair Popple *
881e778406bSAlistair Popple * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
882e778406bSAlistair Popple * instead of looking up pages based on virtual address mappings a range of
883e778406bSAlistair Popple * device pfns that should be migrated to system memory is used instead.
884e778406bSAlistair Popple *
885e778406bSAlistair Popple * This is useful when a driver needs to free device memory but doesn't know the
886e778406bSAlistair Popple * virtual mappings of every page that may be in device memory. For example this
887e778406bSAlistair Popple * is often the case when a driver is being unloaded or unbound from a device.
888e778406bSAlistair Popple *
889e778406bSAlistair Popple * Like migrate_vma_setup() this function will take a reference and lock any
890e778406bSAlistair Popple * migrating pages that aren't free before unmapping them. Drivers may then
891e778406bSAlistair Popple * allocate destination pages and start copying data from the device to CPU
892e778406bSAlistair Popple * memory before calling migrate_device_pages().
893e778406bSAlistair Popple */
migrate_device_range(unsigned long * src_pfns,unsigned long start,unsigned long npages)894e778406bSAlistair Popple int migrate_device_range(unsigned long *src_pfns, unsigned long start,
895e778406bSAlistair Popple unsigned long npages)
896e778406bSAlistair Popple {
897e778406bSAlistair Popple unsigned long i, pfn;
898e778406bSAlistair Popple
899e778406bSAlistair Popple for (pfn = start, i = 0; i < npages; pfn++, i++) {
900e778406bSAlistair Popple struct page *page = pfn_to_page(pfn);
901e778406bSAlistair Popple
902e778406bSAlistair Popple if (!get_page_unless_zero(page)) {
903e778406bSAlistair Popple src_pfns[i] = 0;
904e778406bSAlistair Popple continue;
905e778406bSAlistair Popple }
906e778406bSAlistair Popple
907e778406bSAlistair Popple if (!trylock_page(page)) {
908e778406bSAlistair Popple src_pfns[i] = 0;
909e778406bSAlistair Popple put_page(page);
910e778406bSAlistair Popple continue;
911e778406bSAlistair Popple }
912e778406bSAlistair Popple
913e778406bSAlistair Popple src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
914e778406bSAlistair Popple }
915e778406bSAlistair Popple
916e778406bSAlistair Popple migrate_device_unmap(src_pfns, npages, NULL);
917e778406bSAlistair Popple
918e778406bSAlistair Popple return 0;
919e778406bSAlistair Popple }
920e778406bSAlistair Popple EXPORT_SYMBOL(migrate_device_range);
921e778406bSAlistair Popple
922b05a79d4SAlistair Popple /*
923b05a79d4SAlistair Popple * Migrate a device coherent page back to normal memory. The caller should have
924b05a79d4SAlistair Popple * a reference on page which will be copied to the new page if migration is
925b05a79d4SAlistair Popple * successful or dropped on failure.
926b05a79d4SAlistair Popple */
migrate_device_coherent_page(struct page * page)927b05a79d4SAlistair Popple int migrate_device_coherent_page(struct page *page)
928b05a79d4SAlistair Popple {
929b05a79d4SAlistair Popple unsigned long src_pfn, dst_pfn = 0;
930b05a79d4SAlistair Popple struct page *dpage;
931b05a79d4SAlistair Popple
932b05a79d4SAlistair Popple WARN_ON_ONCE(PageCompound(page));
933b05a79d4SAlistair Popple
934b05a79d4SAlistair Popple lock_page(page);
935b05a79d4SAlistair Popple src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
936b05a79d4SAlistair Popple
937b05a79d4SAlistair Popple /*
938b05a79d4SAlistair Popple * We don't have a VMA and don't need to walk the page tables to find
939b05a79d4SAlistair Popple * the source page. So call migrate_vma_unmap() directly to unmap the
940b05a79d4SAlistair Popple * page as migrate_vma_setup() will fail if args.vma == NULL.
941b05a79d4SAlistair Popple */
942241f6885SAlistair Popple migrate_device_unmap(&src_pfn, 1, NULL);
943b05a79d4SAlistair Popple if (!(src_pfn & MIGRATE_PFN_MIGRATE))
944b05a79d4SAlistair Popple return -EBUSY;
945b05a79d4SAlistair Popple
946b05a79d4SAlistair Popple dpage = alloc_page(GFP_USER | __GFP_NOWARN);
947b05a79d4SAlistair Popple if (dpage) {
948b05a79d4SAlistair Popple lock_page(dpage);
949b05a79d4SAlistair Popple dst_pfn = migrate_pfn(page_to_pfn(dpage));
950b05a79d4SAlistair Popple }
951b05a79d4SAlistair Popple
952e778406bSAlistair Popple migrate_device_pages(&src_pfn, &dst_pfn, 1);
953b05a79d4SAlistair Popple if (src_pfn & MIGRATE_PFN_MIGRATE)
954b05a79d4SAlistair Popple copy_highpage(dpage, page);
955241f6885SAlistair Popple migrate_device_finalize(&src_pfn, &dst_pfn, 1);
956b05a79d4SAlistair Popple
957b05a79d4SAlistair Popple if (src_pfn & MIGRATE_PFN_MIGRATE)
958b05a79d4SAlistair Popple return 0;
959b05a79d4SAlistair Popple return -EBUSY;
960b05a79d4SAlistair Popple }
961