Lines Matching refs:vma

348 int hugepage_madvise(struct vm_area_struct *vma,  in hugepage_madvise()  argument
359 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
369 khugepaged_enter_vma(vma, *vm_flags); in hugepage_madvise()
444 void khugepaged_enter_vma(struct vm_area_struct *vma, in khugepaged_enter_vma() argument
447 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && in khugepaged_enter_vma()
449 if (hugepage_vma_check(vma, vm_flags, false, false, true)) in khugepaged_enter_vma()
450 __khugepaged_enter(vma->vm_mm); in khugepaged_enter_vma()
538 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() argument
555 if (!userfaultfd_armed(vma) && in __collapse_huge_page_isolate()
573 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
657 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, in __collapse_huge_page_isolate()
683 struct vm_area_struct *vma, in __collapse_huge_page_copy_succeeded() argument
697 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy_succeeded()
703 ptep_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy_succeeded()
705 ksm_might_unmap_zero_page(vma->vm_mm, pteval); in __collapse_huge_page_copy_succeeded()
717 ptep_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy_succeeded()
718 page_remove_rmap(src_page, vma, false); in __collapse_huge_page_copy_succeeded()
738 struct vm_area_struct *vma, in __collapse_huge_page_copy_failed() argument
749 pmd_ptl = pmd_lock(vma->vm_mm, pmd); in __collapse_huge_page_copy_failed()
750 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd)); in __collapse_huge_page_copy_failed()
778 struct vm_area_struct *vma, in __collapse_huge_page_copy() argument
800 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { in __collapse_huge_page_copy()
807 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl, in __collapse_huge_page_copy()
810 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma, in __collapse_huge_page_copy()
915 struct vm_area_struct *vma; in hugepage_vma_revalidate() local
920 *vmap = vma = find_vma(mm, address); in hugepage_vma_revalidate()
921 if (!vma) in hugepage_vma_revalidate()
924 if (!transhuge_vma_suitable(vma, address)) in hugepage_vma_revalidate()
926 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, in hugepage_vma_revalidate()
987 struct vm_area_struct *vma, in __collapse_huge_page_swapin() argument
1000 .vma = vma, in __collapse_huge_page_swapin()
1002 .pgoff = linear_page_index(vma, address), in __collapse_huge_page_swapin()
1091 struct vm_area_struct *vma; in collapse_huge_page() local
1109 result = hugepage_vma_revalidate(mm, address, true, &vma, cc); in collapse_huge_page()
1127 result = __collapse_huge_page_swapin(mm, vma, address, pmd, in collapse_huge_page()
1140 result = hugepage_vma_revalidate(mm, address, true, &vma, cc); in collapse_huge_page()
1148 vma_start_write(vma); in collapse_huge_page()
1149 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
1164 _pmd = pmdp_collapse_flush(vma, address, pmd); in collapse_huge_page()
1171 result = __collapse_huge_page_isolate(vma, address, pte, cc, in collapse_huge_page()
1190 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1198 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1201 vma, address, pte_ptl, in collapse_huge_page()
1216 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); in collapse_huge_page()
1217 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); in collapse_huge_page()
1221 page_add_new_anon_rmap(hpage, vma, address); in collapse_huge_page()
1222 lru_cache_add_inactive_or_unevictable(hpage, vma); in collapse_huge_page()
1225 update_mmu_cache_pmd(vma, address, pmd); in collapse_huge_page()
1241 struct vm_area_struct *vma, in hpage_collapse_scan_pmd() argument
1294 if (!userfaultfd_armed(vma) && in hpage_collapse_scan_pmd()
1320 page = vm_normal_page(vma, _address, pteval); in hpage_collapse_scan_pmd()
1384 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, in hpage_collapse_scan_pmd()
1437 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, in set_huge_pmd() argument
1441 .vma = vma, in set_huge_pmd()
1448 mmap_assert_locked(vma->vm_mm); in set_huge_pmd()
1475 struct vm_area_struct *vma = vma_lookup(mm, haddr); in collapse_pte_mapped_thp() local
1486 if (!vma || !vma->vm_file || in collapse_pte_mapped_thp()
1487 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) in collapse_pte_mapped_thp()
1502 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) in collapse_pte_mapped_thp()
1506 if (userfaultfd_wp(vma)) in collapse_pte_mapped_thp()
1509 hpage = find_lock_page(vma->vm_file->f_mapping, in collapse_pte_mapped_thp()
1510 linear_page_index(vma, haddr)); in collapse_pte_mapped_thp()
1559 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp()
1584 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED)) in collapse_pte_mapped_thp()
1613 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp()
1623 page_remove_rmap(page, vma, false); in collapse_pte_mapped_thp()
1643 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd); in collapse_pte_mapped_thp()
1658 ? set_huge_pmd(vma, haddr, pmd, hpage) in collapse_pte_mapped_thp()
1681 struct vm_area_struct *vma; in retract_page_tables() local
1684 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables()
1698 if (READ_ONCE(vma->anon_vma)) in retract_page_tables()
1701 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables()
1703 vma->vm_end < addr + HPAGE_PMD_SIZE) in retract_page_tables()
1706 mm = vma->vm_mm; in retract_page_tables()
1719 if (userfaultfd_wp(vma)) in retract_page_tables()
1741 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) { in retract_page_tables()
1744 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd); in retract_page_tables()
2065 struct vm_area_struct *vma; in collapse_file() local
2100 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { in collapse_file()
2101 if (userfaultfd_missing(vma)) { in collapse_file()
2331 struct vm_area_struct *vma; in khugepaged_scan_mm_slot() local
2355 vma = NULL; in khugepaged_scan_mm_slot()
2364 for_each_vma(vmi, vma) { in khugepaged_scan_mm_slot()
2372 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { in khugepaged_scan_mm_slot()
2377 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); in khugepaged_scan_mm_slot()
2378 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); in khugepaged_scan_mm_slot()
2395 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in khugepaged_scan_mm_slot()
2396 struct file *file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
2397 pgoff_t pgoff = linear_page_index(vma, in khugepaged_scan_mm_slot()
2416 *result = hpage_collapse_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2449 if (hpage_collapse_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()
2695 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, in madvise_collapse() argument
2699 struct mm_struct *mm = vma->vm_mm; in madvise_collapse()
2704 BUG_ON(vma->vm_start > start); in madvise_collapse()
2705 BUG_ON(vma->vm_end < end); in madvise_collapse()
2707 *prev = vma; in madvise_collapse()
2709 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) in madvise_collapse()
2730 result = hugepage_vma_revalidate(mm, addr, false, &vma, in madvise_collapse()
2737 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); in madvise_collapse()
2742 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in madvise_collapse()
2743 struct file *file = get_file(vma->vm_file); in madvise_collapse()
2744 pgoff_t pgoff = linear_page_index(vma, addr); in madvise_collapse()
2752 result = hpage_collapse_scan_pmd(mm, vma, addr, in madvise_collapse()