xref: /openbmc/linux/mm/pagewalk.c (revision a5c43003)
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
5 
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
7 			  struct mm_walk *walk)
8 {
9 	pte_t *pte;
10 	int err = 0;
11 
12 	pte = pte_offset_map(pmd, addr);
13 	for (;;) {
14 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15 		if (err)
16 		       break;
17 		addr += PAGE_SIZE;
18 		if (addr == end)
19 			break;
20 		pte++;
21 	}
22 
23 	pte_unmap(pte);
24 	return err;
25 }
26 
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
28 			  struct mm_walk *walk)
29 {
30 	pmd_t *pmd;
31 	unsigned long next;
32 	int err = 0;
33 
34 	pmd = pmd_offset(pud, addr);
35 	do {
36 		next = pmd_addr_end(addr, end);
37 		if (pmd_none_or_clear_bad(pmd)) {
38 			if (walk->pte_hole)
39 				err = walk->pte_hole(addr, next, walk);
40 			if (err)
41 				break;
42 			continue;
43 		}
44 		if (walk->pmd_entry)
45 			err = walk->pmd_entry(pmd, addr, next, walk);
46 		if (!err && walk->pte_entry)
47 			err = walk_pte_range(pmd, addr, next, walk);
48 		if (err)
49 			break;
50 	} while (pmd++, addr = next, addr != end);
51 
52 	return err;
53 }
54 
55 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
56 			  struct mm_walk *walk)
57 {
58 	pud_t *pud;
59 	unsigned long next;
60 	int err = 0;
61 
62 	pud = pud_offset(pgd, addr);
63 	do {
64 		next = pud_addr_end(addr, end);
65 		if (pud_none_or_clear_bad(pud)) {
66 			if (walk->pte_hole)
67 				err = walk->pte_hole(addr, next, walk);
68 			if (err)
69 				break;
70 			continue;
71 		}
72 		if (walk->pud_entry)
73 			err = walk->pud_entry(pud, addr, next, walk);
74 		if (!err && (walk->pmd_entry || walk->pte_entry))
75 			err = walk_pmd_range(pud, addr, next, walk);
76 		if (err)
77 			break;
78 	} while (pud++, addr = next, addr != end);
79 
80 	return err;
81 }
82 
83 #ifdef CONFIG_HUGETLB_PAGE
84 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
85 				       unsigned long end)
86 {
87 	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
88 	return boundary < end ? boundary : end;
89 }
90 
91 static int walk_hugetlb_range(struct vm_area_struct *vma,
92 			      unsigned long addr, unsigned long end,
93 			      struct mm_walk *walk)
94 {
95 	struct hstate *h = hstate_vma(vma);
96 	unsigned long next;
97 	unsigned long hmask = huge_page_mask(h);
98 	pte_t *pte;
99 	int err = 0;
100 
101 	do {
102 		next = hugetlb_entry_end(h, addr, end);
103 		pte = huge_pte_offset(walk->mm, addr & hmask);
104 		if (pte && walk->hugetlb_entry)
105 			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
106 		if (err)
107 			return err;
108 	} while (addr = next, addr != end);
109 
110 	return 0;
111 }
112 #endif
113 
114 /**
115  * walk_page_range - walk a memory map's page tables with a callback
116  * @mm: memory map to walk
117  * @addr: starting address
118  * @end: ending address
119  * @walk: set of callbacks to invoke for each level of the tree
120  *
121  * Recursively walk the page table for the memory area in a VMA,
122  * calling supplied callbacks. Callbacks are called in-order (first
123  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
124  * etc.). If lower-level callbacks are omitted, walking depth is reduced.
125  *
126  * Each callback receives an entry pointer and the start and end of the
127  * associated range, and a copy of the original mm_walk for access to
128  * the ->private or ->mm fields.
129  *
130  * No locks are taken, but the bottom level iterator will map PTE
131  * directories from highmem if necessary.
132  *
133  * If any callback returns a non-zero value, the walk is aborted and
134  * the return value is propagated back to the caller. Otherwise 0 is returned.
135  */
136 int walk_page_range(unsigned long addr, unsigned long end,
137 		    struct mm_walk *walk)
138 {
139 	pgd_t *pgd;
140 	unsigned long next;
141 	int err = 0;
142 	struct vm_area_struct *vma;
143 
144 	if (addr >= end)
145 		return err;
146 
147 	if (!walk->mm)
148 		return -EINVAL;
149 
150 	pgd = pgd_offset(walk->mm, addr);
151 	do {
152 		next = pgd_addr_end(addr, end);
153 
154 		/*
155 		 * handle hugetlb vma individually because pagetable walk for
156 		 * the hugetlb page is dependent on the architecture and
157 		 * we can't handled it in the same manner as non-huge pages.
158 		 */
159 		vma = find_vma(walk->mm, addr);
160 #ifdef CONFIG_HUGETLB_PAGE
161 		if (vma && is_vm_hugetlb_page(vma)) {
162 			if (vma->vm_end < next)
163 				next = vma->vm_end;
164 			/*
165 			 * Hugepage is very tightly coupled with vma, so
166 			 * walk through hugetlb entries within a given vma.
167 			 */
168 			err = walk_hugetlb_range(vma, addr, next, walk);
169 			if (err)
170 				break;
171 			pgd = pgd_offset(walk->mm, next);
172 			continue;
173 		}
174 #endif
175 		if (pgd_none_or_clear_bad(pgd)) {
176 			if (walk->pte_hole)
177 				err = walk->pte_hole(addr, next, walk);
178 			if (err)
179 				break;
180 			pgd++;
181 			continue;
182 		}
183 		if (walk->pgd_entry)
184 			err = walk->pgd_entry(pgd, addr, next, walk);
185 		if (!err &&
186 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
187 			err = walk_pud_range(pgd, addr, next, walk);
188 		if (err)
189 			break;
190 		pgd++;
191 	} while (addr = next, addr != end);
192 
193 	return err;
194 }
195