Lines Matching full:range
32 struct hmm_range *range; member
43 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument
45 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill()
48 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
54 * @addr: range virtual start address (inclusive)
55 * @end: range virtual end address (exclusive)
61 * or whenever there is no page directory covering the virtual address range.
90 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
94 * consider the default flags requested for the range. The API can in hmm_pte_need_fault()
98 * fault a range with specific flags. For the latter one it is a in hmm_pte_need_fault()
102 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault()
103 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault()
125 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local
134 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault()
151 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
156 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
158 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_hole()
164 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); in hmm_vma_walk_hole()
168 return hmm_pfns_fill(addr, end, range, 0); in hmm_vma_walk_hole()
176 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, in pmd_to_hmm_pfn_flags() argument
192 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd() local
198 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
215 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, in pte_to_hmm_pfn_flags() argument
228 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte() local
252 range->dev_private_owner) { in hmm_vma_handle_pte()
288 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
326 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd() local
328 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; in hmm_vma_walk_pmd()
345 return hmm_pfns_fill(start, end, range, 0); in hmm_vma_walk_pmd()
351 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
380 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_pmd()
401 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, in pud_to_hmm_pfn_flags() argument
415 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pud() local
443 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pud()
445 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_pud()
447 cpu_flags = pud_to_hmm_pfn_flags(range, pud); in hmm_vma_walk_pud()
479 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hugetlb_entry() local
490 i = (start - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hugetlb_entry()
491 pfn_req_flags = range->hmm_pfns[i]; in hmm_vma_walk_hugetlb_entry()
492 cpu_flags = pte_to_hmm_pfn_flags(range, entry) | in hmm_vma_walk_hugetlb_entry()
516 range->hmm_pfns[i] = pfn | cpu_flags; in hmm_vma_walk_hugetlb_entry()
529 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_test() local
544 * If a fault is requested for an unsupported range then it is a hard in hmm_vma_walk_test()
548 range->hmm_pfns + in hmm_vma_walk_test()
549 ((start - range->start) >> PAGE_SHIFT), in hmm_vma_walk_test()
553 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); in hmm_vma_walk_test()
569 * hmm_range_fault - try to fault some address in a virtual address range
570 * @range: argument structure
577 * -EPERM: Invalid permission (e.g., asking for write and range is read
579 * -EBUSY: The range has been invalidated and the caller needs to wait for
587 int hmm_range_fault(struct hmm_range *range) in hmm_range_fault() argument
590 .range = range, in hmm_range_fault()
591 .last = range->start, in hmm_range_fault()
593 struct mm_struct *mm = range->notifier->mm; in hmm_range_fault()
599 /* If range is no longer valid force retry. */ in hmm_range_fault()
600 if (mmu_interval_check_retry(range->notifier, in hmm_range_fault()
601 range->notifier_seq)) in hmm_range_fault()
603 ret = walk_page_range(mm, hmm_vma_walk.last, range->end, in hmm_range_fault()