xref: /openbmc/linux/mm/page_vma_mapped.c (revision 79e790ff)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7 
8 #include "internal.h"
9 
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 	page_vma_mapped_walk_done(pvmw);
13 	return false;
14 }
15 
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18 	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 	if (!(pvmw->flags & PVMW_SYNC)) {
20 		if (pvmw->flags & PVMW_MIGRATION) {
21 			if (!is_swap_pte(*pvmw->pte))
22 				return false;
23 		} else {
24 			/*
25 			 * We get here when we are trying to unmap a private
26 			 * device page from the process address space. Such
27 			 * page is not CPU accessible and thus is mapped as
28 			 * a special swap entry, nonetheless it still does
29 			 * count as a valid regular mapping for the page (and
30 			 * is accounted as such in page maps count).
31 			 *
32 			 * So handle this special case as if it was a normal
33 			 * page mapping ie lock CPU page table and returns
34 			 * true.
35 			 *
36 			 * For more details on device private memory see HMM
37 			 * (include/linux/hmm.h or mm/hmm.c).
38 			 */
39 			if (is_swap_pte(*pvmw->pte)) {
40 				swp_entry_t entry;
41 
42 				/* Handle un-addressable ZONE_DEVICE memory */
43 				entry = pte_to_swp_entry(*pvmw->pte);
44 				if (!is_device_private_entry(entry))
45 					return false;
46 			} else if (!pte_present(*pvmw->pte))
47 				return false;
48 		}
49 	}
50 	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 	spin_lock(pvmw->ptl);
52 	return true;
53 }
54 
55 static inline bool pfn_is_match(struct page *page, unsigned long pfn)
56 {
57 	unsigned long page_pfn = page_to_pfn(page);
58 
59 	/* normal page and hugetlbfs page */
60 	if (!PageTransCompound(page) || PageHuge(page))
61 		return page_pfn == pfn;
62 
63 	/* THP can be referenced by any subpage */
64 	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
65 }
66 
67 /**
68  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
70  *
71  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
72  * mapped. check_pte() has to validate this.
73  *
74  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
75  * arbitrary page.
76  *
77  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
78  * entry that points to @pvmw->page or any subpage in case of THP.
79  *
80  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
81  * pvmw->page or any subpage in case of THP.
82  *
83  * Otherwise, return false.
84  *
85  */
86 static bool check_pte(struct page_vma_mapped_walk *pvmw)
87 {
88 	unsigned long pfn;
89 
90 	if (pvmw->flags & PVMW_MIGRATION) {
91 		swp_entry_t entry;
92 		if (!is_swap_pte(*pvmw->pte))
93 			return false;
94 		entry = pte_to_swp_entry(*pvmw->pte);
95 
96 		if (!is_migration_entry(entry))
97 			return false;
98 
99 		pfn = migration_entry_to_pfn(entry);
100 	} else if (is_swap_pte(*pvmw->pte)) {
101 		swp_entry_t entry;
102 
103 		/* Handle un-addressable ZONE_DEVICE memory */
104 		entry = pte_to_swp_entry(*pvmw->pte);
105 		if (!is_device_private_entry(entry))
106 			return false;
107 
108 		pfn = device_private_entry_to_pfn(entry);
109 	} else {
110 		if (!pte_present(*pvmw->pte))
111 			return false;
112 
113 		pfn = pte_pfn(*pvmw->pte);
114 	}
115 
116 	return pfn_is_match(pvmw->page, pfn);
117 }
118 
119 /**
120  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
121  * @pvmw->address
122  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
123  * must be set. pmd, pte and ptl must be NULL.
124  *
125  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
126  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
127  * adjusted if needed (for PTE-mapped THPs).
128  *
129  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
130  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
131  * a loop to find all PTEs that map the THP.
132  *
133  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
134  * regardless of which page table level the page is mapped at. @pvmw->pmd is
135  * NULL.
136  *
137  * Returns false if there are no more page table entries for the page in
138  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
139  *
140  * If you need to stop the walk before page_vma_mapped_walk() returned false,
141  * use page_vma_mapped_walk_done(). It will do the housekeeping.
142  */
143 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
144 {
145 	struct mm_struct *mm = pvmw->vma->vm_mm;
146 	struct page *page = pvmw->page;
147 	pgd_t *pgd;
148 	p4d_t *p4d;
149 	pud_t *pud;
150 	pmd_t pmde;
151 
152 	/* The only possible pmd mapping has been handled on last iteration */
153 	if (pvmw->pmd && !pvmw->pte)
154 		return not_found(pvmw);
155 
156 	if (pvmw->pte)
157 		goto next_pte;
158 
159 	if (unlikely(PageHuge(pvmw->page))) {
160 		/* when pud is not present, pte will be NULL */
161 		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
162 		if (!pvmw->pte)
163 			return false;
164 
165 		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
166 		spin_lock(pvmw->ptl);
167 		if (!check_pte(pvmw))
168 			return not_found(pvmw);
169 		return true;
170 	}
171 restart:
172 	pgd = pgd_offset(mm, pvmw->address);
173 	if (!pgd_present(*pgd))
174 		return false;
175 	p4d = p4d_offset(pgd, pvmw->address);
176 	if (!p4d_present(*p4d))
177 		return false;
178 	pud = pud_offset(p4d, pvmw->address);
179 	if (!pud_present(*pud))
180 		return false;
181 	pvmw->pmd = pmd_offset(pud, pvmw->address);
182 	/*
183 	 * Make sure the pmd value isn't cached in a register by the
184 	 * compiler and used as a stale value after we've observed a
185 	 * subsequent update.
186 	 */
187 	pmde = READ_ONCE(*pvmw->pmd);
188 	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
189 		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
190 		if (likely(pmd_trans_huge(*pvmw->pmd))) {
191 			if (pvmw->flags & PVMW_MIGRATION)
192 				return not_found(pvmw);
193 			if (pmd_page(*pvmw->pmd) != page)
194 				return not_found(pvmw);
195 			return true;
196 		} else if (!pmd_present(*pvmw->pmd)) {
197 			if (thp_migration_supported()) {
198 				if (!(pvmw->flags & PVMW_MIGRATION))
199 					return not_found(pvmw);
200 				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
201 					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
202 
203 					if (migration_entry_to_page(entry) != page)
204 						return not_found(pvmw);
205 					return true;
206 				}
207 			}
208 			return not_found(pvmw);
209 		} else {
210 			/* THP pmd was split under us: handle on pte level */
211 			spin_unlock(pvmw->ptl);
212 			pvmw->ptl = NULL;
213 		}
214 	} else if (!pmd_present(pmde)) {
215 		return false;
216 	}
217 	if (!map_pte(pvmw))
218 		goto next_pte;
219 	while (1) {
220 		if (check_pte(pvmw))
221 			return true;
222 next_pte:
223 		/* Seek to next pte only makes sense for THP */
224 		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
225 			return not_found(pvmw);
226 		do {
227 			pvmw->address += PAGE_SIZE;
228 			if (pvmw->address >= pvmw->vma->vm_end ||
229 			    pvmw->address >=
230 					__vma_address(pvmw->page, pvmw->vma) +
231 					thp_size(pvmw->page))
232 				return not_found(pvmw);
233 			/* Did we cross page table boundary? */
234 			if (pvmw->address % PMD_SIZE == 0) {
235 				pte_unmap(pvmw->pte);
236 				if (pvmw->ptl) {
237 					spin_unlock(pvmw->ptl);
238 					pvmw->ptl = NULL;
239 				}
240 				goto restart;
241 			} else {
242 				pvmw->pte++;
243 			}
244 		} while (pte_none(*pvmw->pte));
245 
246 		if (!pvmw->ptl) {
247 			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
248 			spin_lock(pvmw->ptl);
249 		}
250 	}
251 }
252 
253 /**
254  * page_mapped_in_vma - check whether a page is really mapped in a VMA
255  * @page: the page to test
256  * @vma: the VMA to test
257  *
258  * Returns 1 if the page is mapped into the page tables of the VMA, 0
259  * if the page is not mapped into the page tables of this VMA.  Only
260  * valid for normal file or anonymous VMAs.
261  */
262 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
263 {
264 	struct page_vma_mapped_walk pvmw = {
265 		.page = page,
266 		.vma = vma,
267 		.flags = PVMW_SYNC,
268 	};
269 	unsigned long start, end;
270 
271 	start = __vma_address(page, vma);
272 	end = start + thp_size(page) - PAGE_SIZE;
273 
274 	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
275 		return 0;
276 	pvmw.address = max(start, vma->vm_start);
277 	if (!page_vma_mapped_walk(&pvmw))
278 		return 0;
279 	page_vma_mapped_walk_done(&pvmw);
280 	return 1;
281 }
282