xref: /openbmc/linux/mm/page_vma_mapped.c (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7 
8 #include "internal.h"
9 
not_found(struct page_vma_mapped_walk * pvmw)10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 	page_vma_mapped_walk_done(pvmw);
13 	return false;
14 }
15 
map_pte(struct page_vma_mapped_walk * pvmw,spinlock_t ** ptlp)16 static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
17 {
18 	pte_t ptent;
19 
20 	if (pvmw->flags & PVMW_SYNC) {
21 		/* Use the stricter lookup */
22 		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
23 						pvmw->address, &pvmw->ptl);
24 		*ptlp = pvmw->ptl;
25 		return !!pvmw->pte;
26 	}
27 
28 	/*
29 	 * It is important to return the ptl corresponding to pte,
30 	 * in case *pvmw->pmd changes underneath us; so we need to
31 	 * return it even when choosing not to lock, in case caller
32 	 * proceeds to loop over next ptes, and finds a match later.
33 	 * Though, in most cases, page lock already protects this.
34 	 */
35 	pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
36 					  pvmw->address, ptlp);
37 	if (!pvmw->pte)
38 		return false;
39 
40 	ptent = ptep_get(pvmw->pte);
41 
42 	if (pvmw->flags & PVMW_MIGRATION) {
43 		if (!is_swap_pte(ptent))
44 			return false;
45 	} else if (is_swap_pte(ptent)) {
46 		swp_entry_t entry;
47 		/*
48 		 * Handle un-addressable ZONE_DEVICE memory.
49 		 *
50 		 * We get here when we are trying to unmap a private
51 		 * device page from the process address space. Such
52 		 * page is not CPU accessible and thus is mapped as
53 		 * a special swap entry, nonetheless it still does
54 		 * count as a valid regular mapping for the page
55 		 * (and is accounted as such in page maps count).
56 		 *
57 		 * So handle this special case as if it was a normal
58 		 * page mapping ie lock CPU page table and return true.
59 		 *
60 		 * For more details on device private memory see HMM
61 		 * (include/linux/hmm.h or mm/hmm.c).
62 		 */
63 		entry = pte_to_swp_entry(ptent);
64 		if (!is_device_private_entry(entry) &&
65 		    !is_device_exclusive_entry(entry))
66 			return false;
67 	} else if (!pte_present(ptent)) {
68 		return false;
69 	}
70 	pvmw->ptl = *ptlp;
71 	spin_lock(pvmw->ptl);
72 	return true;
73 }
74 
75 /**
76  * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
77  * mapped at the @pvmw->pte
78  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
79  * for checking
80  * @pte_nr: the number of small pages described by @pvmw->pte.
81  *
82  * page_vma_mapped_walk() found a place where pfn range is *potentially*
83  * mapped. check_pte() has to validate this.
84  *
85  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
86  * arbitrary page.
87  *
88  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
89  * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
90  *
91  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
92  * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
93  *
94  * Otherwise, return false.
95  *
96  */
check_pte(struct page_vma_mapped_walk * pvmw,unsigned long pte_nr)97 static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
98 {
99 	unsigned long pfn;
100 	pte_t ptent = ptep_get(pvmw->pte);
101 
102 	if (pvmw->flags & PVMW_MIGRATION) {
103 		swp_entry_t entry;
104 		if (!is_swap_pte(ptent))
105 			return false;
106 		entry = pte_to_swp_entry(ptent);
107 
108 		if (!is_migration_entry(entry) &&
109 		    !is_device_exclusive_entry(entry))
110 			return false;
111 
112 		pfn = swp_offset_pfn(entry);
113 	} else if (is_swap_pte(ptent)) {
114 		swp_entry_t entry;
115 
116 		/* Handle un-addressable ZONE_DEVICE memory */
117 		entry = pte_to_swp_entry(ptent);
118 		if (!is_device_private_entry(entry) &&
119 		    !is_device_exclusive_entry(entry))
120 			return false;
121 
122 		pfn = swp_offset_pfn(entry);
123 	} else {
124 		if (!pte_present(ptent))
125 			return false;
126 
127 		pfn = pte_pfn(ptent);
128 	}
129 
130 	if ((pfn + pte_nr - 1) < pvmw->pfn)
131 		return false;
132 	if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
133 		return false;
134 	return true;
135 }
136 
137 /* Returns true if the two ranges overlap.  Careful to not overflow. */
check_pmd(unsigned long pfn,struct page_vma_mapped_walk * pvmw)138 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
139 {
140 	if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
141 		return false;
142 	if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
143 		return false;
144 	return true;
145 }
146 
step_forward(struct page_vma_mapped_walk * pvmw,unsigned long size)147 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
148 {
149 	pvmw->address = (pvmw->address + size) & ~(size - 1);
150 	if (!pvmw->address)
151 		pvmw->address = ULONG_MAX;
152 }
153 
154 /**
155  * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
156  * @pvmw->address
157  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
158  * must be set. pmd, pte and ptl must be NULL.
159  *
160  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
161  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
162  * adjusted if needed (for PTE-mapped THPs).
163  *
164  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
165  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
166  * a loop to find all PTEs that map the THP.
167  *
168  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
169  * regardless of which page table level the page is mapped at. @pvmw->pmd is
170  * NULL.
171  *
172  * Returns false if there are no more page table entries for the page in
173  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
174  *
175  * If you need to stop the walk before page_vma_mapped_walk() returned false,
176  * use page_vma_mapped_walk_done(). It will do the housekeeping.
177  */
page_vma_mapped_walk(struct page_vma_mapped_walk * pvmw)178 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
179 {
180 	struct vm_area_struct *vma = pvmw->vma;
181 	struct mm_struct *mm = vma->vm_mm;
182 	unsigned long end;
183 	spinlock_t *ptl;
184 	pgd_t *pgd;
185 	p4d_t *p4d;
186 	pud_t *pud;
187 	pmd_t pmde;
188 
189 	/* The only possible pmd mapping has been handled on last iteration */
190 	if (pvmw->pmd && !pvmw->pte)
191 		return not_found(pvmw);
192 
193 	if (unlikely(is_vm_hugetlb_page(vma))) {
194 		struct hstate *hstate = hstate_vma(vma);
195 		unsigned long size = huge_page_size(hstate);
196 		/* The only possible mapping was handled on last iteration */
197 		if (pvmw->pte)
198 			return not_found(pvmw);
199 		/*
200 		 * All callers that get here will already hold the
201 		 * i_mmap_rwsem.  Therefore, no additional locks need to be
202 		 * taken before calling hugetlb_walk().
203 		 */
204 		pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
205 		if (!pvmw->pte)
206 			return false;
207 
208 		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
209 		if (!check_pte(pvmw, pages_per_huge_page(hstate)))
210 			return not_found(pvmw);
211 		return true;
212 	}
213 
214 	end = vma_address_end(pvmw);
215 	if (pvmw->pte)
216 		goto next_pte;
217 restart:
218 	do {
219 		pgd = pgd_offset(mm, pvmw->address);
220 		if (!pgd_present(*pgd)) {
221 			step_forward(pvmw, PGDIR_SIZE);
222 			continue;
223 		}
224 		p4d = p4d_offset(pgd, pvmw->address);
225 		if (!p4d_present(*p4d)) {
226 			step_forward(pvmw, P4D_SIZE);
227 			continue;
228 		}
229 		pud = pud_offset(p4d, pvmw->address);
230 		if (!pud_present(*pud)) {
231 			step_forward(pvmw, PUD_SIZE);
232 			continue;
233 		}
234 
235 		pvmw->pmd = pmd_offset(pud, pvmw->address);
236 		/*
237 		 * Make sure the pmd value isn't cached in a register by the
238 		 * compiler and used as a stale value after we've observed a
239 		 * subsequent update.
240 		 */
241 		pmde = pmdp_get_lockless(pvmw->pmd);
242 
243 		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
244 		    (pmd_present(pmde) && pmd_devmap(pmde))) {
245 			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
246 			pmde = *pvmw->pmd;
247 			if (!pmd_present(pmde)) {
248 				swp_entry_t entry;
249 
250 				if (!thp_migration_supported() ||
251 				    !(pvmw->flags & PVMW_MIGRATION))
252 					return not_found(pvmw);
253 				entry = pmd_to_swp_entry(pmde);
254 				if (!is_migration_entry(entry) ||
255 				    !check_pmd(swp_offset_pfn(entry), pvmw))
256 					return not_found(pvmw);
257 				return true;
258 			}
259 			if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
260 				if (pvmw->flags & PVMW_MIGRATION)
261 					return not_found(pvmw);
262 				if (!check_pmd(pmd_pfn(pmde), pvmw))
263 					return not_found(pvmw);
264 				return true;
265 			}
266 			/* THP pmd was split under us: handle on pte level */
267 			spin_unlock(pvmw->ptl);
268 			pvmw->ptl = NULL;
269 		} else if (!pmd_present(pmde)) {
270 			/*
271 			 * If PVMW_SYNC, take and drop THP pmd lock so that we
272 			 * cannot return prematurely, while zap_huge_pmd() has
273 			 * cleared *pmd but not decremented compound_mapcount().
274 			 */
275 			if ((pvmw->flags & PVMW_SYNC) &&
276 			    transhuge_vma_suitable(vma, pvmw->address) &&
277 			    (pvmw->nr_pages >= HPAGE_PMD_NR)) {
278 				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
279 
280 				spin_unlock(ptl);
281 			}
282 			step_forward(pvmw, PMD_SIZE);
283 			continue;
284 		}
285 		if (!map_pte(pvmw, &ptl)) {
286 			if (!pvmw->pte)
287 				goto restart;
288 			goto next_pte;
289 		}
290 this_pte:
291 		if (check_pte(pvmw, 1))
292 			return true;
293 next_pte:
294 		do {
295 			pvmw->address += PAGE_SIZE;
296 			if (pvmw->address >= end)
297 				return not_found(pvmw);
298 			/* Did we cross page table boundary? */
299 			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
300 				if (pvmw->ptl) {
301 					spin_unlock(pvmw->ptl);
302 					pvmw->ptl = NULL;
303 				}
304 				pte_unmap(pvmw->pte);
305 				pvmw->pte = NULL;
306 				goto restart;
307 			}
308 			pvmw->pte++;
309 		} while (pte_none(ptep_get(pvmw->pte)));
310 
311 		if (!pvmw->ptl) {
312 			pvmw->ptl = ptl;
313 			spin_lock(pvmw->ptl);
314 		}
315 		goto this_pte;
316 	} while (pvmw->address < end);
317 
318 	return false;
319 }
320 
321 /**
322  * page_mapped_in_vma - check whether a page is really mapped in a VMA
323  * @page: the page to test
324  * @vma: the VMA to test
325  *
326  * Returns 1 if the page is mapped into the page tables of the VMA, 0
327  * if the page is not mapped into the page tables of this VMA.  Only
328  * valid for normal file or anonymous VMAs.
329  */
page_mapped_in_vma(struct page * page,struct vm_area_struct * vma)330 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
331 {
332 	struct page_vma_mapped_walk pvmw = {
333 		.pfn = page_to_pfn(page),
334 		.nr_pages = 1,
335 		.vma = vma,
336 		.flags = PVMW_SYNC,
337 	};
338 
339 	pvmw.address = vma_address(page, vma);
340 	if (pvmw.address == -EFAULT)
341 		return 0;
342 	if (!page_vma_mapped_walk(&pvmw))
343 		return 0;
344 	page_vma_mapped_walk_done(&pvmw);
345 	return 1;
346 }
347