xref: /openbmc/linux/mm/pagewalk.c (revision 93d90ad7)
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
5 
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
7 			  struct mm_walk *walk)
8 {
9 	pte_t *pte;
10 	int err = 0;
11 
12 	pte = pte_offset_map(pmd, addr);
13 	for (;;) {
14 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15 		if (err)
16 		       break;
17 		addr += PAGE_SIZE;
18 		if (addr == end)
19 			break;
20 		pte++;
21 	}
22 
23 	pte_unmap(pte);
24 	return err;
25 }
26 
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
28 			  struct mm_walk *walk)
29 {
30 	pmd_t *pmd;
31 	unsigned long next;
32 	int err = 0;
33 
34 	pmd = pmd_offset(pud, addr);
35 	do {
36 again:
37 		next = pmd_addr_end(addr, end);
38 		if (pmd_none(*pmd)) {
39 			if (walk->pte_hole)
40 				err = walk->pte_hole(addr, next, walk);
41 			if (err)
42 				break;
43 			continue;
44 		}
45 		/*
46 		 * This implies that each ->pmd_entry() handler
47 		 * needs to know about pmd_trans_huge() pmds
48 		 */
49 		if (walk->pmd_entry)
50 			err = walk->pmd_entry(pmd, addr, next, walk);
51 		if (err)
52 			break;
53 
54 		/*
55 		 * Check this here so we only break down trans_huge
56 		 * pages when we _need_ to
57 		 */
58 		if (!walk->pte_entry)
59 			continue;
60 
61 		split_huge_page_pmd_mm(walk->mm, addr, pmd);
62 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
63 			goto again;
64 		err = walk_pte_range(pmd, addr, next, walk);
65 		if (err)
66 			break;
67 	} while (pmd++, addr = next, addr != end);
68 
69 	return err;
70 }
71 
72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
73 			  struct mm_walk *walk)
74 {
75 	pud_t *pud;
76 	unsigned long next;
77 	int err = 0;
78 
79 	pud = pud_offset(pgd, addr);
80 	do {
81 		next = pud_addr_end(addr, end);
82 		if (pud_none_or_clear_bad(pud)) {
83 			if (walk->pte_hole)
84 				err = walk->pte_hole(addr, next, walk);
85 			if (err)
86 				break;
87 			continue;
88 		}
89 		if (walk->pud_entry)
90 			err = walk->pud_entry(pud, addr, next, walk);
91 		if (!err && (walk->pmd_entry || walk->pte_entry))
92 			err = walk_pmd_range(pud, addr, next, walk);
93 		if (err)
94 			break;
95 	} while (pud++, addr = next, addr != end);
96 
97 	return err;
98 }
99 
100 #ifdef CONFIG_HUGETLB_PAGE
101 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
102 				       unsigned long end)
103 {
104 	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
105 	return boundary < end ? boundary : end;
106 }
107 
108 static int walk_hugetlb_range(struct vm_area_struct *vma,
109 			      unsigned long addr, unsigned long end,
110 			      struct mm_walk *walk)
111 {
112 	struct hstate *h = hstate_vma(vma);
113 	unsigned long next;
114 	unsigned long hmask = huge_page_mask(h);
115 	pte_t *pte;
116 	int err = 0;
117 
118 	do {
119 		next = hugetlb_entry_end(h, addr, end);
120 		pte = huge_pte_offset(walk->mm, addr & hmask);
121 		if (pte && walk->hugetlb_entry)
122 			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
123 		if (err)
124 			return err;
125 	} while (addr = next, addr != end);
126 
127 	return 0;
128 }
129 
130 #else /* CONFIG_HUGETLB_PAGE */
131 static int walk_hugetlb_range(struct vm_area_struct *vma,
132 			      unsigned long addr, unsigned long end,
133 			      struct mm_walk *walk)
134 {
135 	return 0;
136 }
137 
138 #endif /* CONFIG_HUGETLB_PAGE */
139 
140 
141 
142 /**
143  * walk_page_range - walk a memory map's page tables with a callback
144  * @addr: starting address
145  * @end: ending address
146  * @walk: set of callbacks to invoke for each level of the tree
147  *
148  * Recursively walk the page table for the memory area in a VMA,
149  * calling supplied callbacks. Callbacks are called in-order (first
150  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
151  * etc.). If lower-level callbacks are omitted, walking depth is reduced.
152  *
153  * Each callback receives an entry pointer and the start and end of the
154  * associated range, and a copy of the original mm_walk for access to
155  * the ->private or ->mm fields.
156  *
157  * Usually no locks are taken, but splitting transparent huge page may
158  * take page table lock. And the bottom level iterator will map PTE
159  * directories from highmem if necessary.
160  *
161  * If any callback returns a non-zero value, the walk is aborted and
162  * the return value is propagated back to the caller. Otherwise 0 is returned.
163  *
164  * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
165  * is !NULL.
166  */
167 int walk_page_range(unsigned long addr, unsigned long end,
168 		    struct mm_walk *walk)
169 {
170 	pgd_t *pgd;
171 	unsigned long next;
172 	int err = 0;
173 
174 	if (addr >= end)
175 		return err;
176 
177 	if (!walk->mm)
178 		return -EINVAL;
179 
180 	VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
181 
182 	pgd = pgd_offset(walk->mm, addr);
183 	do {
184 		struct vm_area_struct *vma = NULL;
185 
186 		next = pgd_addr_end(addr, end);
187 
188 		/*
189 		 * This function was not intended to be vma based.
190 		 * But there are vma special cases to be handled:
191 		 * - hugetlb vma's
192 		 * - VM_PFNMAP vma's
193 		 */
194 		vma = find_vma(walk->mm, addr);
195 		if (vma) {
196 			/*
197 			 * There are no page structures backing a VM_PFNMAP
198 			 * range, so do not allow split_huge_page_pmd().
199 			 */
200 			if ((vma->vm_start <= addr) &&
201 			    (vma->vm_flags & VM_PFNMAP)) {
202 				next = vma->vm_end;
203 				pgd = pgd_offset(walk->mm, next);
204 				continue;
205 			}
206 			/*
207 			 * Handle hugetlb vma individually because pagetable
208 			 * walk for the hugetlb page is dependent on the
209 			 * architecture and we can't handled it in the same
210 			 * manner as non-huge pages.
211 			 */
212 			if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
213 			    is_vm_hugetlb_page(vma)) {
214 				if (vma->vm_end < next)
215 					next = vma->vm_end;
216 				/*
217 				 * Hugepage is very tightly coupled with vma,
218 				 * so walk through hugetlb entries within a
219 				 * given vma.
220 				 */
221 				err = walk_hugetlb_range(vma, addr, next, walk);
222 				if (err)
223 					break;
224 				pgd = pgd_offset(walk->mm, next);
225 				continue;
226 			}
227 		}
228 
229 		if (pgd_none_or_clear_bad(pgd)) {
230 			if (walk->pte_hole)
231 				err = walk->pte_hole(addr, next, walk);
232 			if (err)
233 				break;
234 			pgd++;
235 			continue;
236 		}
237 		if (walk->pgd_entry)
238 			err = walk->pgd_entry(pgd, addr, next, walk);
239 		if (!err &&
240 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
241 			err = walk_pud_range(pgd, addr, next, walk);
242 		if (err)
243 			break;
244 		pgd++;
245 	} while (addr = next, addr < end);
246 
247 	return err;
248 }
249