Lines Matching refs:vma

42 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,  in can_change_pte_writable()  argument
47 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pte_writable()
55 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte)) in can_change_pte_writable()
59 if (userfaultfd_pte_wp(vma, pte)) in can_change_pte_writable()
62 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pte_writable()
69 page = vm_normal_page(vma, addr, pte); in can_change_pte_writable()
84 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, in change_pte_range() argument
96 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
101 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
102 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
105 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
125 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
130 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
164 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
187 can_change_pte_writable(vma, addr, ptent)) in change_pte_range()
188 ptent = pte_mkwrite(ptent, vma); in change_pte_range()
190 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
244 pte_clear(vma->vm_mm, addr, pte); in change_pte_range()
258 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
272 if (userfaultfd_wp_use_markers(vma)) { in change_pte_range()
279 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range()
296 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) in pgtable_split_needed() argument
303 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); in pgtable_split_needed()
311 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) in pgtable_populate_needed() argument
318 return userfaultfd_wp_use_markers(vma); in pgtable_populate_needed()
327 #define change_pmd_prepare(vma, pmd, cp_flags) \ argument
330 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
331 if (pte_alloc(vma->vm_mm, pmd)) \
342 #define change_prepare(vma, high, low, addr, cp_flags) \ argument
345 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
346 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
354 struct vm_area_struct *vma, pud_t *pud, unsigned long addr, in change_pmd_range() argument
372 ret = change_pmd_prepare(vma, pmd, cp_flags); in change_pmd_range()
385 vma->vm_mm, addr, end); in change_pmd_range()
392 pgtable_split_needed(vma, cp_flags)) { in change_pmd_range()
393 __split_huge_pmd(vma, pmd, addr, false, NULL); in change_pmd_range()
399 ret = change_pmd_prepare(vma, pmd, cp_flags); in change_pmd_range()
405 ret = change_huge_pmd(tlb, vma, pmd, in change_pmd_range()
420 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, in change_pmd_range()
438 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, in change_pud_range() argument
448 ret = change_prepare(vma, pud, pmd, addr, cp_flags); in change_pud_range()
453 pages += change_pmd_range(tlb, vma, pud, addr, next, newprot, in change_pud_range()
461 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, in change_p4d_range() argument
471 ret = change_prepare(vma, p4d, pud, addr, cp_flags); in change_p4d_range()
476 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, in change_p4d_range()
484 struct vm_area_struct *vma, unsigned long addr, in change_protection_range() argument
487 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
494 tlb_start_vma(tlb, vma); in change_protection_range()
497 ret = change_prepare(vma, pgd, p4d, addr, cp_flags); in change_protection_range()
504 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, in change_protection_range()
508 tlb_end_vma(tlb, vma); in change_protection_range()
514 struct vm_area_struct *vma, unsigned long start, in change_protection() argument
517 pgprot_t newprot = vma->vm_page_prot; in change_protection()
534 if (is_vm_hugetlb_page(vma)) in change_protection()
535 pages = hugetlb_change_protection(vma, start, end, newprot, in change_protection()
538 pages = change_protection_range(tlb, vma, start, end, newprot, in change_protection()
576 struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
579 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
580 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
588 *pprev = vma; in mprotect_fixup()
598 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
631 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup()
633 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
634 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in mprotect_fixup()
636 vma = *pprev; in mprotect_fixup()
637 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup()
641 *pprev = vma; in mprotect_fixup()
643 if (start != vma->vm_start) { in mprotect_fixup()
644 error = split_vma(vmi, vma, start, 1); in mprotect_fixup()
649 if (end != vma->vm_end) { in mprotect_fixup()
650 error = split_vma(vmi, vma, end, 0); in mprotect_fixup()
660 vma_start_write(vma); in mprotect_fixup()
661 vm_flags_reset(vma, newflags); in mprotect_fixup()
662 if (vma_wants_manual_pte_write_upgrade(vma)) in mprotect_fixup()
664 vma_set_page_prot(vma); in mprotect_fixup()
666 change_protection(tlb, vma, start, end, mm_cp_flags); in mprotect_fixup()
674 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
679 perf_event_mmap(vma); in mprotect_fixup()
694 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
733 vma = vma_find(&vmi, end); in do_mprotect_pkey()
735 if (!vma) in do_mprotect_pkey()
739 if (vma->vm_start >= end) in do_mprotect_pkey()
741 start = vma->vm_start; in do_mprotect_pkey()
743 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
746 if (vma->vm_start > start) in do_mprotect_pkey()
749 end = vma->vm_end; in do_mprotect_pkey()
751 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
757 if (start > vma->vm_start) in do_mprotect_pkey()
758 prev = vma; in do_mprotect_pkey()
762 tmp = vma->vm_start; in do_mprotect_pkey()
763 for_each_vma_range(vmi, vma, end) { in do_mprotect_pkey()
768 if (vma->vm_start != tmp) { in do_mprotect_pkey()
774 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
784 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); in do_mprotect_pkey()
786 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
794 if (map_deny_write_exec(vma, newflags)) { in do_mprotect_pkey()
805 error = security_file_mprotect(vma, reqprot, prot); in do_mprotect_pkey()
809 tmp = vma->vm_end; in do_mprotect_pkey()
813 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
814 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey()
819 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()