/openbmc/linux/mm/ |
H A D | userfaultfd.c | 31 struct vm_area_struct *dst_vma; in find_dst_vma() local 33 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 34 if (!range_in_vma(dst_vma, dst_start, dst_start + len)) in find_dst_vma() 42 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma() 45 return dst_vma; in find_dst_vma() 49 static bool mfill_file_over_size(struct vm_area_struct *dst_vma, in mfill_file_over_size() argument 55 if (!dst_vma->vm_file) in mfill_file_over_size() 58 inode = dst_vma->vm_file->f_inode; in mfill_file_over_size() 59 offset = linear_page_index(dst_vma, dst_addr); in mfill_file_over_size() 71 struct vm_area_struct *dst_vma, in mfill_atomic_install_pte() argument [all …]
|
H A D | memory.c | 767 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() argument 770 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 860 pte_marker marker = copy_pte_marker(entry, dst_vma); in copy_nonpresent_pte() 867 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 886 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 904 folio_add_new_anon_rmap(new_folio, dst_vma, addr); in copy_present_page() 905 folio_add_lru_vma(new_folio, dst_vma); in copy_present_page() 909 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); in copy_present_page() 910 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() 911 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) in copy_present_page() [all …]
|
H A D | hugetlb.c | 5031 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 5070 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range() 5100 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5119 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5124 pte_to_swp_entry(entry), dst_vma); in copy_hugetlb_page_range() 5154 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); in copy_hugetlb_page_range() 5162 addr, dst_vma); in copy_hugetlb_page_range() 5175 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range() 5181 hugetlb_install_folio(dst_vma, dst_pte, addr, in copy_hugetlb_page_range() 5200 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() [all …]
|
H A D | huge_memory.c | 1057 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1066 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1098 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1143 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd()
|
H A D | shmem.c | 2571 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() argument 2577 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 2581 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2660 gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); in shmem_mfill_atomic_pte() 2664 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, in shmem_mfill_atomic_pte()
|
/openbmc/linux/include/linux/ |
H A D | shmem_fs.h | 189 struct vm_area_struct *dst_vma, 195 #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ argument
|
H A D | mm_inline.h | 538 swp_entry_t entry, struct vm_area_struct *dst_vma) in copy_pte_marker() argument 545 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) in copy_pte_marker()
|
H A D | hugetlb.h | 154 struct vm_area_struct *dst_vma, 340 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 426 struct vm_area_struct *dst_vma, in hugetlb_mfill_atomic_pte() argument
|
H A D | userfaultfd_k.h | 73 struct vm_area_struct *dst_vma,
|
H A D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
H A D | mm.h | 2357 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|