Lines Matching refs:walk

24 				unsigned long end, struct mm_walk *walk)  in walk_pte_range_inner()  argument
26 const struct mm_walk_ops *ops = walk->ops; in walk_pte_range_inner()
30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner()
42 struct mm_walk *walk) in walk_pte_range() argument
48 if (walk->no_vma) { in walk_pte_range()
55 if (walk->mm == &init_mm || addr >= TASK_SIZE) in walk_pte_range()
60 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range()
61 if (walk->mm != &init_mm && addr < TASK_SIZE) in walk_pte_range()
65 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in walk_pte_range()
67 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range()
72 walk->action = ACTION_AGAIN; in walk_pte_range()
78 unsigned long end, struct mm_walk *walk, int pdshift) in walk_hugepd_range() argument
81 const struct mm_walk_ops *ops = walk->ops; in walk_hugepd_range()
94 spin_lock(&walk->mm->page_table_lock); in walk_hugepd_range()
96 err = ops->pte_entry(pte, addr, addr + page_size, walk); in walk_hugepd_range()
97 spin_unlock(&walk->mm->page_table_lock); in walk_hugepd_range()
109 unsigned long end, struct mm_walk *walk, int pdshift) in walk_hugepd_range() argument
116 struct mm_walk *walk) in walk_pmd_range() argument
120 const struct mm_walk_ops *ops = walk->ops; in walk_pmd_range()
130 err = ops->pte_hole(addr, next, depth, walk); in walk_pmd_range()
136 walk->action = ACTION_SUBTREE; in walk_pmd_range()
143 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range()
147 if (walk->action == ACTION_AGAIN) in walk_pmd_range()
154 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) || in walk_pmd_range()
155 walk->action == ACTION_CONTINUE || in walk_pmd_range()
159 if (walk->vma) in walk_pmd_range()
160 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range()
163 err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); in walk_pmd_range()
165 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range()
169 if (walk->action == ACTION_AGAIN) in walk_pmd_range()
178 struct mm_walk *walk) in walk_pud_range() argument
182 const struct mm_walk_ops *ops = walk->ops; in walk_pud_range()
192 err = ops->pte_hole(addr, next, depth, walk); in walk_pud_range()
198 walk->action = ACTION_SUBTREE; in walk_pud_range()
201 err = ops->pud_entry(pud, addr, next, walk); in walk_pud_range()
205 if (walk->action == ACTION_AGAIN) in walk_pud_range()
208 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) || in walk_pud_range()
209 walk->action == ACTION_CONTINUE || in walk_pud_range()
213 if (walk->vma) in walk_pud_range()
214 split_huge_pud(walk->vma, pud, addr); in walk_pud_range()
219 err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT); in walk_pud_range()
221 err = walk_pmd_range(pud, addr, next, walk); in walk_pud_range()
230 struct mm_walk *walk) in walk_p4d_range() argument
234 const struct mm_walk_ops *ops = walk->ops; in walk_p4d_range()
243 err = ops->pte_hole(addr, next, depth, walk); in walk_p4d_range()
249 err = ops->p4d_entry(p4d, addr, next, walk); in walk_p4d_range()
254 err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT); in walk_p4d_range()
256 err = walk_pud_range(p4d, addr, next, walk); in walk_p4d_range()
265 struct mm_walk *walk) in walk_pgd_range() argument
269 const struct mm_walk_ops *ops = walk->ops; in walk_pgd_range()
272 if (walk->pgd) in walk_pgd_range()
273 pgd = walk->pgd + pgd_index(addr); in walk_pgd_range()
275 pgd = pgd_offset(walk->mm, addr); in walk_pgd_range()
280 err = ops->pte_hole(addr, next, 0, walk); in walk_pgd_range()
286 err = ops->pgd_entry(pgd, addr, next, walk); in walk_pgd_range()
291 err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT); in walk_pgd_range()
293 err = walk_p4d_range(pgd, addr, next, walk); in walk_pgd_range()
310 struct mm_walk *walk) in walk_hugetlb_range() argument
312 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range()
318 const struct mm_walk_ops *ops = walk->ops; in walk_hugetlb_range()
326 err = ops->hugetlb_entry(pte, hmask, addr, next, walk); in walk_hugetlb_range()
328 err = ops->pte_hole(addr, next, -1, walk); in walk_hugetlb_range()
339 struct mm_walk *walk) in walk_hugetlb_range() argument
353 struct mm_walk *walk) in walk_page_test() argument
355 struct vm_area_struct *vma = walk->vma; in walk_page_test()
356 const struct mm_walk_ops *ops = walk->ops; in walk_page_test()
359 return ops->test_walk(start, end, walk); in walk_page_test()
372 err = ops->pte_hole(start, end, -1, walk); in walk_page_test()
379 struct mm_walk *walk) in __walk_page_range() argument
382 struct vm_area_struct *vma = walk->vma; in __walk_page_range()
383 const struct mm_walk_ops *ops = walk->ops; in __walk_page_range()
386 err = ops->pre_vma(start, end, walk); in __walk_page_range()
393 err = walk_hugetlb_range(start, end, walk); in __walk_page_range()
395 err = walk_pgd_range(start, end, walk); in __walk_page_range()
398 ops->post_vma(walk); in __walk_page_range()
477 struct mm_walk walk = { in walk_page_range() local
486 if (!walk.mm) in walk_page_range()
489 process_mm_walk_lock(walk.mm, ops->walk_lock); in walk_page_range()
491 vma = find_vma(walk.mm, start); in walk_page_range()
494 walk.vma = NULL; in walk_page_range()
497 err = ops->pte_hole(start, next, -1, &walk); in walk_page_range()
499 walk.vma = NULL; in walk_page_range()
502 err = ops->pte_hole(start, next, -1, &walk); in walk_page_range()
505 walk.vma = vma; in walk_page_range()
509 err = walk_page_test(start, next, &walk); in walk_page_range()
521 err = __walk_page_range(start, next, &walk); in walk_page_range()
548 struct mm_walk walk = { in walk_page_range_novma() local
556 if (start >= end || !walk.mm) in walk_page_range_novma()
559 mmap_assert_write_locked(walk.mm); in walk_page_range_novma()
561 return walk_pgd_range(start, end, &walk); in walk_page_range_novma()
568 struct mm_walk walk = { in walk_page_range_vma() local
575 if (start >= end || !walk.mm) in walk_page_range_vma()
580 process_mm_walk_lock(walk.mm, ops->walk_lock); in walk_page_range_vma()
582 return __walk_page_range(start, end, &walk); in walk_page_range_vma()
588 struct mm_walk walk = { in walk_page_vma() local
595 if (!walk.mm) in walk_page_vma()
598 process_mm_walk_lock(walk.mm, ops->walk_lock); in walk_page_vma()
600 return __walk_page_range(vma->vm_start, vma->vm_end, &walk); in walk_page_vma()
637 struct mm_walk walk = { in walk_page_mapping() local
662 walk.vma = vma; in walk_page_mapping()
663 walk.mm = vma->vm_mm; in walk_page_mapping()
665 err = walk_page_test(vma->vm_start, vma->vm_end, &walk); in walk_page_mapping()
672 err = __walk_page_range(start_addr, end_addr, &walk); in walk_page_mapping()