Lines Matching refs:vma

75 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,  in hugepage_vma_check()  argument
78 if (!vma->vm_mm) /* vdso */ in hugepage_vma_check()
87 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in hugepage_vma_check()
96 if (vma_is_dax(vma)) in hugepage_vma_check()
114 !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) in hugepage_vma_check()
122 if (!in_pf && shmem_file(vma->vm_file)) in hugepage_vma_check()
123 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, in hugepage_vma_check()
124 !enforce_sysfs, vma->vm_mm, vm_flags); in hugepage_vma_check()
133 if (!in_pf && file_thp_enabled(vma)) in hugepage_vma_check()
136 if (!vma_is_anonymous(vma)) in hugepage_vma_check()
139 if (vma_is_temporary_stack(vma)) in hugepage_vma_check()
149 if (!vma->anon_vma) in hugepage_vma_check()
552 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
554 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
555 pmd = pmd_mkwrite(pmd, vma); in maybe_pmd_mkwrite()
653 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
661 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
669 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
683 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
689 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
694 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
697 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
703 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
704 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
705 folio_add_new_anon_rmap(folio, vma, haddr); in __do_huge_pmd_anonymous_page()
706 folio_add_lru_vma(folio, vma); in __do_huge_pmd_anonymous_page()
707 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
708 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
709 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
710 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
711 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
714 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in __do_huge_pmd_anonymous_page()
722 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
737 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) in vma_thp_gfp_mask() argument
739 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); in vma_thp_gfp_mask()
765 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
771 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
780 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local
785 if (!transhuge_vma_suitable(vma, haddr)) in do_huge_pmd_anonymous_page()
787 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
789 khugepaged_enter_vma(vma, vma->vm_flags); in do_huge_pmd_anonymous_page()
792 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
797 pgtable = pte_alloc_one(vma->vm_mm); in do_huge_pmd_anonymous_page()
800 zero_page = mm_get_huge_zero_page(vma->vm_mm); in do_huge_pmd_anonymous_page()
802 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
806 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
809 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
812 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
813 } else if (userfaultfd_missing(vma)) { in do_huge_pmd_anonymous_page()
815 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
819 set_huge_zero_page(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
821 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
826 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
830 gfp = vma_thp_gfp_mask(vma); in do_huge_pmd_anonymous_page()
831 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); in do_huge_pmd_anonymous_page()
839 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
843 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
855 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in insert_pfn_pmd()
856 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
857 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
868 entry = maybe_pmd_mkwrite(entry, vma); in insert_pfn_pmd()
878 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
899 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd() local
900 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pmd()
908 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd()
910 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
912 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
914 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd()
918 pgtable = pte_alloc_one(vma->vm_mm); in vmf_insert_pfn_pmd()
923 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd()
925 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
931 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
933 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
938 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pud() argument
941 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
942 pgprot_t prot = vma->vm_page_prot; in insert_pfn_pud()
954 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); in insert_pfn_pud()
955 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
956 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
966 entry = maybe_pud_mkwrite(entry, vma); in insert_pfn_pud()
969 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
988 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud() local
989 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pud()
996 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud()
998 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud()
1000 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud()
1002 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud()
1005 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pud()
1007 insert_pfn_pud(vma, addr, vmf->pud, pfn, write); in vmf_insert_pfn_pud()
1013 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, in touch_pmd() argument
1021 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in touch_pmd()
1023 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1026 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd() argument
1030 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
1045 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_devmap_pmd()
1168 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, in touch_pud() argument
1176 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, in touch_pud()
1178 update_mmu_cache_pud(vma, addr, pud); in touch_pud()
1181 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pud() argument
1185 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pud()
1200 touch_pud(vma, addr, pud, flags & FOLL_WRITE); in follow_devmap_pud()
1226 struct vm_area_struct *vma) in copy_huge_pud() argument
1269 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1273 touch_pud(vmf->vma, vmf->address, vmf->pud, write); in huge_pud_set_accessed()
1283 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1287 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1296 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local
1302 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1303 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1356 page_move_anon_rmap(page, vma); in do_huge_pmd_wp_page()
1364 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1365 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1366 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1375 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1379 static inline bool can_change_pmd_writable(struct vm_area_struct *vma, in can_change_pmd_writable() argument
1384 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pmd_writable()
1392 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) in can_change_pmd_writable()
1396 if (userfaultfd_huge_pmd_wp(vma, pmd)) in can_change_pmd_writable()
1399 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pmd_writable()
1401 page = vm_normal_page_pmd(vma, addr, pmd); in can_change_pmd_writable()
1411 struct vm_area_struct *vma, in can_follow_write_pmd() argument
1423 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) in can_follow_write_pmd()
1427 if (!(vma->vm_flags & VM_MAYWRITE)) in can_follow_write_pmd()
1431 if (vma->vm_flags & VM_WRITE) in can_follow_write_pmd()
1442 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) in can_follow_write_pmd()
1444 return !userfaultfd_huge_pmd_wp(vma, pmd); in can_follow_write_pmd()
1447 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1452 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1462 !can_follow_write_pmd(*pmd, page, vma, flags)) in follow_trans_huge_pmd()
1469 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) in follow_trans_huge_pmd()
1472 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page)) in follow_trans_huge_pmd()
1483 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_trans_huge_pmd()
1494 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local
1504 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1510 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1517 if (!writable && vma_wants_manual_pte_write_upgrade(vma) && in do_huge_pmd_numa_page()
1518 can_change_pmd_writable(vma, vmf->address, pmd)) in do_huge_pmd_numa_page()
1521 page = vm_normal_page_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
1536 target_nid = numa_migrate_prep(page, vma, haddr, page_nid, in do_huge_pmd_numa_page()
1547 migrated = migrate_misplaced_page(page, vma, target_nid); in do_huge_pmd_numa_page()
1556 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1563 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1566 pmd = pmd_mkwrite(pmd, vma); in do_huge_pmd_numa_page()
1567 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1568 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1580 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1591 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1634 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1659 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1667 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1676 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
1678 arch_check_zapped_pmd(vma, orig_pmd); in zap_huge_pmd()
1680 if (vma_is_special_huge(vma)) { in zap_huge_pmd()
1693 page_remove_rmap(page, vma, true); in zap_huge_pmd()
1725 struct vm_area_struct *vma) in pmd_move_must_withdraw() argument
1733 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); in pmd_move_must_withdraw()
1748 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pmd() argument
1753 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1770 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); in move_huge_pmd()
1780 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { in move_huge_pmd()
1788 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_huge_pmd()
1804 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in change_huge_pmd() argument
1808 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1821 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1907 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
1922 can_change_pmd_writable(vma, addr, entry)) in change_huge_pmd()
1923 entry = pmd_mkwrite(entry, vma); in change_huge_pmd()
1941 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1944 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1958 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) in __pud_trans_huge_lock() argument
1962 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
1970 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pud() argument
1975 ptl = __pud_trans_huge_lock(pud, vma); in zap_huge_pud()
1979 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); in zap_huge_pud()
1981 if (vma_is_special_huge(vma)) { in zap_huge_pud()
1991 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud_locked() argument
1995 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
1996 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()
2001 pudp_huge_clear_flush(vma, haddr, pud); in __split_huge_pud_locked()
2004 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud() argument
2010 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in __split_huge_pud()
2014 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud()
2017 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
2025 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
2028 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2043 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2053 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2066 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2069 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
2080 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pmd_locked()
2081 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked()
2087 if (!vma_is_anonymous(vma)) { in __split_huge_pmd_locked()
2088 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_pmd_locked()
2095 if (vma_is_special_huge(vma)) in __split_huge_pmd_locked()
2108 page_remove_rmap(page, vma, true); in __split_huge_pmd_locked()
2125 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2166 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2238 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2240 entry = pte_mkwrite(entry, vma); in __split_huge_pmd_locked()
2252 page_add_anon_rmap(page + i, vma, addr, RMAP_NONE); in __split_huge_pmd_locked()
2261 page_remove_rmap(page, vma, true); in __split_huge_pmd_locked()
2269 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2275 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in __split_huge_pmd()
2279 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2296 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2304 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, in split_huge_pmd_address() argument
2307 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address()
2312 __split_huge_pmd(vma, pmd, address, freeze, folio); in split_huge_pmd_address()
2315 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) in split_huge_pmd_if_needed() argument
2322 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), in split_huge_pmd_if_needed()
2324 split_huge_pmd_address(vma, address, false, NULL); in split_huge_pmd_if_needed()
2327 void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
2333 split_huge_pmd_if_needed(vma, start); in vma_adjust_trans_huge()
2336 split_huge_pmd_if_needed(vma, end); in vma_adjust_trans_huge()
2343 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); in vma_adjust_trans_huge()
2969 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) in vma_not_suitable_for_thp_split() argument
2971 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || in vma_not_suitable_for_thp_split()
2972 is_vm_hugetlb_page(vma); in vma_not_suitable_for_thp_split()
3016 struct vm_area_struct *vma = vma_lookup(mm, addr); in split_huge_pages_pid() local
3020 if (!vma) in split_huge_pages_pid()
3024 if (vma_not_suitable_for_thp_split(vma)) { in split_huge_pages_pid()
3025 addr = vma->vm_end; in split_huge_pages_pid()
3030 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); in split_huge_pages_pid()
3210 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local
3211 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
3221 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); in set_pmd_migration_entry()
3222 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
3249 page_remove_rmap(page, vma, true); in set_pmd_migration_entry()
3258 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() local
3259 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()
3270 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); in remove_migration_pmd()
3274 pmde = pmd_mkwrite(pmde, vma); in remove_migration_pmd()
3289 page_add_anon_rmap(new, vma, haddr, rmap_flags); in remove_migration_pmd()
3291 page_add_file_rmap(new, vma, true); in remove_migration_pmd()
3297 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()