xref: /openbmc/linux/mm/page_vma_mapped.c (revision 90f43b0a13cddb09e2686f4d976751c0a9b8b197)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7 
8 #include "internal.h"
9 
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 	page_vma_mapped_walk_done(pvmw);
13 	return false;
14 }
15 
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18 	if (pvmw->flags & PVMW_SYNC) {
19 		/* Use the stricter lookup */
20 		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
21 						pvmw->address, &pvmw->ptl);
22 		return true;
23 	}
24 
25 	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
26 	if (pvmw->flags & PVMW_MIGRATION) {
27 		if (!is_swap_pte(*pvmw->pte))
28 			return false;
29 	} else if (is_swap_pte(*pvmw->pte)) {
30 		swp_entry_t entry;
31 		/*
32 		 * Handle un-addressable ZONE_DEVICE memory.
33 		 *
34 		 * We get here when we are trying to unmap a private
35 		 * device page from the process address space. Such
36 		 * page is not CPU accessible and thus is mapped as
37 		 * a special swap entry, nonetheless it still does
38 		 * count as a valid regular mapping for the page
39 		 * (and is accounted as such in page maps count).
40 		 *
41 		 * So handle this special case as if it was a normal
42 		 * page mapping ie lock CPU page table and return true.
43 		 *
44 		 * For more details on device private memory see HMM
45 		 * (include/linux/hmm.h or mm/hmm.c).
46 		 */
47 		entry = pte_to_swp_entry(*pvmw->pte);
48 		if (!is_device_private_entry(entry) &&
49 		    !is_device_exclusive_entry(entry))
50 			return false;
51 	} else if (!pte_present(*pvmw->pte)) {
52 		return false;
53 	}
54 	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
55 	spin_lock(pvmw->ptl);
56 	return true;
57 }
58 
59 /**
60  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
61  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
62  *
63  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
64  * mapped. check_pte() has to validate this.
65  *
66  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
67  * arbitrary page.
68  *
69  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
70  * entry that points to @pvmw->page or any subpage in case of THP.
71  *
72  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
73  * pvmw->page or any subpage in case of THP.
74  *
75  * Otherwise, return false.
76  *
77  */
78 static bool check_pte(struct page_vma_mapped_walk *pvmw)
79 {
80 	unsigned long pfn;
81 
82 	if (pvmw->flags & PVMW_MIGRATION) {
83 		swp_entry_t entry;
84 		if (!is_swap_pte(*pvmw->pte))
85 			return false;
86 		entry = pte_to_swp_entry(*pvmw->pte);
87 
88 		if (!is_migration_entry(entry) &&
89 		    !is_device_exclusive_entry(entry))
90 			return false;
91 
92 		pfn = swp_offset_pfn(entry);
93 	} else if (is_swap_pte(*pvmw->pte)) {
94 		swp_entry_t entry;
95 
96 		/* Handle un-addressable ZONE_DEVICE memory */
97 		entry = pte_to_swp_entry(*pvmw->pte);
98 		if (!is_device_private_entry(entry) &&
99 		    !is_device_exclusive_entry(entry))
100 			return false;
101 
102 		pfn = swp_offset_pfn(entry);
103 	} else {
104 		if (!pte_present(*pvmw->pte))
105 			return false;
106 
107 		pfn = pte_pfn(*pvmw->pte);
108 	}
109 
110 	return (pfn - pvmw->pfn) < pvmw->nr_pages;
111 }
112 
113 /* Returns true if the two ranges overlap.  Careful to not overflow. */
114 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
115 {
116 	if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
117 		return false;
118 	if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
119 		return false;
120 	return true;
121 }
122 
123 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
124 {
125 	pvmw->address = (pvmw->address + size) & ~(size - 1);
126 	if (!pvmw->address)
127 		pvmw->address = ULONG_MAX;
128 }
129 
130 /**
131  * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
132  * @pvmw->address
133  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
134  * must be set. pmd, pte and ptl must be NULL.
135  *
136  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
137  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
138  * adjusted if needed (for PTE-mapped THPs).
139  *
140  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
141  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
142  * a loop to find all PTEs that map the THP.
143  *
144  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
145  * regardless of which page table level the page is mapped at. @pvmw->pmd is
146  * NULL.
147  *
148  * Returns false if there are no more page table entries for the page in
149  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
150  *
151  * If you need to stop the walk before page_vma_mapped_walk() returned false,
152  * use page_vma_mapped_walk_done(). It will do the housekeeping.
153  */
154 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
155 {
156 	struct vm_area_struct *vma = pvmw->vma;
157 	struct mm_struct *mm = vma->vm_mm;
158 	unsigned long end;
159 	pgd_t *pgd;
160 	p4d_t *p4d;
161 	pud_t *pud;
162 	pmd_t pmde;
163 
164 	/* The only possible pmd mapping has been handled on last iteration */
165 	if (pvmw->pmd && !pvmw->pte)
166 		return not_found(pvmw);
167 
168 	if (unlikely(is_vm_hugetlb_page(vma))) {
169 		struct hstate *hstate = hstate_vma(vma);
170 		unsigned long size = huge_page_size(hstate);
171 		/* The only possible mapping was handled on last iteration */
172 		if (pvmw->pte)
173 			return not_found(pvmw);
174 		/*
175 		 * All callers that get here will already hold the
176 		 * i_mmap_rwsem.  Therefore, no additional locks need to be
177 		 * taken before calling hugetlb_walk().
178 		 */
179 		pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
180 		if (!pvmw->pte)
181 			return false;
182 
183 		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
184 		if (!check_pte(pvmw))
185 			return not_found(pvmw);
186 		return true;
187 	}
188 
189 	end = vma_address_end(pvmw);
190 	if (pvmw->pte)
191 		goto next_pte;
192 restart:
193 	do {
194 		pgd = pgd_offset(mm, pvmw->address);
195 		if (!pgd_present(*pgd)) {
196 			step_forward(pvmw, PGDIR_SIZE);
197 			continue;
198 		}
199 		p4d = p4d_offset(pgd, pvmw->address);
200 		if (!p4d_present(*p4d)) {
201 			step_forward(pvmw, P4D_SIZE);
202 			continue;
203 		}
204 		pud = pud_offset(p4d, pvmw->address);
205 		if (!pud_present(*pud)) {
206 			step_forward(pvmw, PUD_SIZE);
207 			continue;
208 		}
209 
210 		pvmw->pmd = pmd_offset(pud, pvmw->address);
211 		/*
212 		 * Make sure the pmd value isn't cached in a register by the
213 		 * compiler and used as a stale value after we've observed a
214 		 * subsequent update.
215 		 */
216 		pmde = pmdp_get_lockless(pvmw->pmd);
217 
218 		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
219 		    (pmd_present(pmde) && pmd_devmap(pmde))) {
220 			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
221 			pmde = *pvmw->pmd;
222 			if (!pmd_present(pmde)) {
223 				swp_entry_t entry;
224 
225 				if (!thp_migration_supported() ||
226 				    !(pvmw->flags & PVMW_MIGRATION))
227 					return not_found(pvmw);
228 				entry = pmd_to_swp_entry(pmde);
229 				if (!is_migration_entry(entry) ||
230 				    !check_pmd(swp_offset_pfn(entry), pvmw))
231 					return not_found(pvmw);
232 				return true;
233 			}
234 			if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
235 				if (pvmw->flags & PVMW_MIGRATION)
236 					return not_found(pvmw);
237 				if (!check_pmd(pmd_pfn(pmde), pvmw))
238 					return not_found(pvmw);
239 				return true;
240 			}
241 			/* THP pmd was split under us: handle on pte level */
242 			spin_unlock(pvmw->ptl);
243 			pvmw->ptl = NULL;
244 		} else if (!pmd_present(pmde)) {
245 			/*
246 			 * If PVMW_SYNC, take and drop THP pmd lock so that we
247 			 * cannot return prematurely, while zap_huge_pmd() has
248 			 * cleared *pmd but not decremented compound_mapcount().
249 			 */
250 			if ((pvmw->flags & PVMW_SYNC) &&
251 			    transhuge_vma_suitable(vma, pvmw->address) &&
252 			    (pvmw->nr_pages >= HPAGE_PMD_NR)) {
253 				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
254 
255 				spin_unlock(ptl);
256 			}
257 			step_forward(pvmw, PMD_SIZE);
258 			continue;
259 		}
260 		if (!map_pte(pvmw))
261 			goto next_pte;
262 this_pte:
263 		if (check_pte(pvmw))
264 			return true;
265 next_pte:
266 		do {
267 			pvmw->address += PAGE_SIZE;
268 			if (pvmw->address >= end)
269 				return not_found(pvmw);
270 			/* Did we cross page table boundary? */
271 			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
272 				if (pvmw->ptl) {
273 					spin_unlock(pvmw->ptl);
274 					pvmw->ptl = NULL;
275 				}
276 				pte_unmap(pvmw->pte);
277 				pvmw->pte = NULL;
278 				goto restart;
279 			}
280 			pvmw->pte++;
281 		} while (pte_none(*pvmw->pte));
282 
283 		if (!pvmw->ptl) {
284 			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
285 			spin_lock(pvmw->ptl);
286 		}
287 		goto this_pte;
288 	} while (pvmw->address < end);
289 
290 	return false;
291 }
292 
293 /**
294  * page_mapped_in_vma - check whether a page is really mapped in a VMA
295  * @page: the page to test
296  * @vma: the VMA to test
297  *
298  * Returns 1 if the page is mapped into the page tables of the VMA, 0
299  * if the page is not mapped into the page tables of this VMA.  Only
300  * valid for normal file or anonymous VMAs.
301  */
302 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
303 {
304 	struct page_vma_mapped_walk pvmw = {
305 		.pfn = page_to_pfn(page),
306 		.nr_pages = 1,
307 		.vma = vma,
308 		.flags = PVMW_SYNC,
309 	};
310 
311 	pvmw.address = vma_address(page, vma);
312 	if (pvmw.address == -EFAULT)
313 		return 0;
314 	if (!page_vma_mapped_walk(&pvmw))
315 		return 0;
316 	page_vma_mapped_walk_done(&pvmw);
317 	return 1;
318 }
319