xref: /openbmc/linux/mm/debug_vm_pgtable.c (revision 6fa24b41)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/mman.h>
21 #include <linux/mm_types.h>
22 #include <linux/module.h>
23 #include <linux/pfn_t.h>
24 #include <linux/printk.h>
25 #include <linux/pgtable.h>
26 #include <linux/random.h>
27 #include <linux/spinlock.h>
28 #include <linux/swap.h>
29 #include <linux/swapops.h>
30 #include <linux/start_kernel.h>
31 #include <linux/sched/mm.h>
32 #include <linux/io.h>
33 
34 #include <asm/cacheflush.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlbflush.h>
37 
38 /*
39  * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
40  * expectations that are being validated here. All future changes in here
41  * or the documentation need to be in sync.
42  */
43 #define RANDOM_NZVALUE	GENMASK(7, 0)
44 
45 struct pgtable_debug_args {
46 	struct mm_struct	*mm;
47 	struct vm_area_struct	*vma;
48 
49 	pgd_t			*pgdp;
50 	p4d_t			*p4dp;
51 	pud_t			*pudp;
52 	pmd_t			*pmdp;
53 	pte_t			*ptep;
54 
55 	p4d_t			*start_p4dp;
56 	pud_t			*start_pudp;
57 	pmd_t			*start_pmdp;
58 	pgtable_t		start_ptep;
59 
60 	unsigned long		vaddr;
61 	pgprot_t		page_prot;
62 	pgprot_t		page_prot_none;
63 
64 	bool			is_contiguous_page;
65 	unsigned long		pud_pfn;
66 	unsigned long		pmd_pfn;
67 	unsigned long		pte_pfn;
68 
69 	unsigned long		fixed_alignment;
70 	unsigned long		fixed_pgd_pfn;
71 	unsigned long		fixed_p4d_pfn;
72 	unsigned long		fixed_pud_pfn;
73 	unsigned long		fixed_pmd_pfn;
74 	unsigned long		fixed_pte_pfn;
75 };
76 
77 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
78 {
79 	pgprot_t prot = vm_get_page_prot(idx);
80 	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
81 	unsigned long val = idx, *ptr = &val;
82 
83 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
84 
85 	/*
86 	 * This test needs to be executed after the given page table entry
87 	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
88 	 * does not have the dirty bit enabled from the beginning. This is
89 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
90 	 * dirty bit being set.
91 	 */
92 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
93 
94 	WARN_ON(!pte_same(pte, pte));
95 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
96 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
97 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
98 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
99 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
100 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
101 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
102 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
103 }
104 
105 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
106 {
107 	struct page *page;
108 	pte_t pte;
109 
110 	/*
111 	 * Architectures optimize set_pte_at by avoiding TLB flush.
112 	 * This requires set_pte_at to be not used to update an
113 	 * existing pte entry. Clear pte before we do set_pte_at
114 	 *
115 	 * flush_dcache_page() is called after set_pte_at() to clear
116 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
117 	 * when it's released and page allocation check will fail when
118 	 * the page is allocated again. For architectures other than ARM64,
119 	 * the unexpected overhead of cache flushing is acceptable.
120 	 */
121 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
122 	if (!page)
123 		return;
124 
125 	pr_debug("Validating PTE advanced\n");
126 	if (WARN_ON(!args->ptep))
127 		return;
128 
129 	pte = pfn_pte(args->pte_pfn, args->page_prot);
130 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
131 	flush_dcache_page(page);
132 	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
133 	pte = ptep_get(args->ptep);
134 	WARN_ON(pte_write(pte));
135 	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
136 	pte = ptep_get(args->ptep);
137 	WARN_ON(!pte_none(pte));
138 
139 	pte = pfn_pte(args->pte_pfn, args->page_prot);
140 	pte = pte_wrprotect(pte);
141 	pte = pte_mkclean(pte);
142 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
143 	flush_dcache_page(page);
144 	pte = pte_mkwrite(pte, args->vma);
145 	pte = pte_mkdirty(pte);
146 	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
147 	pte = ptep_get(args->ptep);
148 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
149 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
150 	pte = ptep_get(args->ptep);
151 	WARN_ON(!pte_none(pte));
152 
153 	pte = pfn_pte(args->pte_pfn, args->page_prot);
154 	pte = pte_mkyoung(pte);
155 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
156 	flush_dcache_page(page);
157 	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
158 	pte = ptep_get(args->ptep);
159 	WARN_ON(pte_young(pte));
160 
161 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
162 }
163 
164 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
165 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
166 {
167 	pgprot_t prot = vm_get_page_prot(idx);
168 	unsigned long val = idx, *ptr = &val;
169 	pmd_t pmd;
170 
171 	if (!has_transparent_hugepage())
172 		return;
173 
174 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
175 	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
176 
177 	/*
178 	 * This test needs to be executed after the given page table entry
179 	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
180 	 * does not have the dirty bit enabled from the beginning. This is
181 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
182 	 * dirty bit being set.
183 	 */
184 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
185 
186 
187 	WARN_ON(!pmd_same(pmd, pmd));
188 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
189 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
190 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
191 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
192 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
193 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
194 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
195 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
196 	/*
197 	 * A huge page does not point to next level page table
198 	 * entry. Hence this must qualify as pmd_bad().
199 	 */
200 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
201 }
202 
203 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
204 {
205 	struct page *page;
206 	pmd_t pmd;
207 	unsigned long vaddr = args->vaddr;
208 
209 	if (!has_transparent_hugepage())
210 		return;
211 
212 	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
213 	if (!page)
214 		return;
215 
216 	/*
217 	 * flush_dcache_page() is called after set_pmd_at() to clear
218 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
219 	 * when it's released and page allocation check will fail when
220 	 * the page is allocated again. For architectures other than ARM64,
221 	 * the unexpected overhead of cache flushing is acceptable.
222 	 */
223 	pr_debug("Validating PMD advanced\n");
224 	/* Align the address wrt HPAGE_PMD_SIZE */
225 	vaddr &= HPAGE_PMD_MASK;
226 
227 	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
228 
229 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
230 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
231 	flush_dcache_page(page);
232 	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
233 	pmd = READ_ONCE(*args->pmdp);
234 	WARN_ON(pmd_write(pmd));
235 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
236 	pmd = READ_ONCE(*args->pmdp);
237 	WARN_ON(!pmd_none(pmd));
238 
239 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
240 	pmd = pmd_wrprotect(pmd);
241 	pmd = pmd_mkclean(pmd);
242 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
243 	flush_dcache_page(page);
244 	pmd = pmd_mkwrite(pmd, args->vma);
245 	pmd = pmd_mkdirty(pmd);
246 	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
247 	pmd = READ_ONCE(*args->pmdp);
248 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
249 	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
250 	pmd = READ_ONCE(*args->pmdp);
251 	WARN_ON(!pmd_none(pmd));
252 
253 	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
254 	pmd = pmd_mkyoung(pmd);
255 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
256 	flush_dcache_page(page);
257 	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
258 	pmd = READ_ONCE(*args->pmdp);
259 	WARN_ON(pmd_young(pmd));
260 
261 	/*  Clear the pte entries  */
262 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
263 	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
264 }
265 
266 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
267 {
268 	pmd_t pmd;
269 
270 	if (!has_transparent_hugepage())
271 		return;
272 
273 	pr_debug("Validating PMD leaf\n");
274 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
275 
276 	/*
277 	 * PMD based THP is a leaf entry.
278 	 */
279 	pmd = pmd_mkhuge(pmd);
280 	WARN_ON(!pmd_leaf(pmd));
281 }
282 
283 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
284 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
285 {
286 	pgprot_t prot = vm_get_page_prot(idx);
287 	unsigned long val = idx, *ptr = &val;
288 	pud_t pud;
289 
290 	if (!has_transparent_pud_hugepage())
291 		return;
292 
293 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
294 	pud = pfn_pud(args->fixed_pud_pfn, prot);
295 
296 	/*
297 	 * This test needs to be executed after the given page table entry
298 	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
299 	 * does not have the dirty bit enabled from the beginning. This is
300 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
301 	 * dirty bit being set.
302 	 */
303 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
304 
305 	WARN_ON(!pud_same(pud, pud));
306 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
307 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
308 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
309 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
310 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
311 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
312 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
313 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
314 
315 	if (mm_pmd_folded(args->mm))
316 		return;
317 
318 	/*
319 	 * A huge page does not point to next level page table
320 	 * entry. Hence this must qualify as pud_bad().
321 	 */
322 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
323 }
324 
325 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
326 {
327 	struct page *page;
328 	unsigned long vaddr = args->vaddr;
329 	pud_t pud;
330 
331 	if (!has_transparent_pud_hugepage())
332 		return;
333 
334 	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
335 	if (!page)
336 		return;
337 
338 	/*
339 	 * flush_dcache_page() is called after set_pud_at() to clear
340 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
341 	 * when it's released and page allocation check will fail when
342 	 * the page is allocated again. For architectures other than ARM64,
343 	 * the unexpected overhead of cache flushing is acceptable.
344 	 */
345 	pr_debug("Validating PUD advanced\n");
346 	/* Align the address wrt HPAGE_PUD_SIZE */
347 	vaddr &= HPAGE_PUD_MASK;
348 
349 	pud = pfn_pud(args->pud_pfn, args->page_prot);
350 	/*
351 	 * Some architectures have debug checks to make sure
352 	 * huge pud mapping are only found with devmap entries
353 	 * For now test with only devmap entries.
354 	 */
355 	pud = pud_mkdevmap(pud);
356 	set_pud_at(args->mm, vaddr, args->pudp, pud);
357 	flush_dcache_page(page);
358 	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
359 	pud = READ_ONCE(*args->pudp);
360 	WARN_ON(pud_write(pud));
361 
362 #ifndef __PAGETABLE_PMD_FOLDED
363 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
364 	pud = READ_ONCE(*args->pudp);
365 	WARN_ON(!pud_none(pud));
366 #endif /* __PAGETABLE_PMD_FOLDED */
367 	pud = pfn_pud(args->pud_pfn, args->page_prot);
368 	pud = pud_mkdevmap(pud);
369 	pud = pud_wrprotect(pud);
370 	pud = pud_mkclean(pud);
371 	set_pud_at(args->mm, vaddr, args->pudp, pud);
372 	flush_dcache_page(page);
373 	pud = pud_mkwrite(pud);
374 	pud = pud_mkdirty(pud);
375 	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
376 	pud = READ_ONCE(*args->pudp);
377 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
378 
379 #ifndef __PAGETABLE_PMD_FOLDED
380 	pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
381 	pud = READ_ONCE(*args->pudp);
382 	WARN_ON(!pud_none(pud));
383 #endif /* __PAGETABLE_PMD_FOLDED */
384 
385 	pud = pfn_pud(args->pud_pfn, args->page_prot);
386 	pud = pud_mkdevmap(pud);
387 	pud = pud_mkyoung(pud);
388 	set_pud_at(args->mm, vaddr, args->pudp, pud);
389 	flush_dcache_page(page);
390 	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
391 	pud = READ_ONCE(*args->pudp);
392 	WARN_ON(pud_young(pud));
393 
394 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
395 }
396 
397 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
398 {
399 	pud_t pud;
400 
401 	if (!has_transparent_pud_hugepage())
402 		return;
403 
404 	pr_debug("Validating PUD leaf\n");
405 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
406 	/*
407 	 * PUD based THP is a leaf entry.
408 	 */
409 	pud = pud_mkhuge(pud);
410 	WARN_ON(!pud_leaf(pud));
411 }
412 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
413 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
414 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
415 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
416 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
417 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
418 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
419 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
420 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
421 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
422 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
423 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
424 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
425 
426 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
427 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
428 {
429 	pmd_t pmd;
430 
431 	if (!arch_vmap_pmd_supported(args->page_prot) ||
432 	    args->fixed_alignment < PMD_SIZE)
433 		return;
434 
435 	pr_debug("Validating PMD huge\n");
436 	/*
437 	 * X86 defined pmd_set_huge() verifies that the given
438 	 * PMD is not a populated non-leaf entry.
439 	 */
440 	WRITE_ONCE(*args->pmdp, __pmd(0));
441 	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
442 	WARN_ON(!pmd_clear_huge(args->pmdp));
443 	pmd = READ_ONCE(*args->pmdp);
444 	WARN_ON(!pmd_none(pmd));
445 }
446 
447 static void __init pud_huge_tests(struct pgtable_debug_args *args)
448 {
449 	pud_t pud;
450 
451 	if (!arch_vmap_pud_supported(args->page_prot) ||
452 	    args->fixed_alignment < PUD_SIZE)
453 		return;
454 
455 	pr_debug("Validating PUD huge\n");
456 	/*
457 	 * X86 defined pud_set_huge() verifies that the given
458 	 * PUD is not a populated non-leaf entry.
459 	 */
460 	WRITE_ONCE(*args->pudp, __pud(0));
461 	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
462 	WARN_ON(!pud_clear_huge(args->pudp));
463 	pud = READ_ONCE(*args->pudp);
464 	WARN_ON(!pud_none(pud));
465 }
466 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
467 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
468 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
469 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
470 
471 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
472 {
473 	p4d_t p4d;
474 
475 	pr_debug("Validating P4D basic\n");
476 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
477 	WARN_ON(!p4d_same(p4d, p4d));
478 }
479 
480 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
481 {
482 	pgd_t pgd;
483 
484 	pr_debug("Validating PGD basic\n");
485 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
486 	WARN_ON(!pgd_same(pgd, pgd));
487 }
488 
489 #ifndef __PAGETABLE_PUD_FOLDED
490 static void __init pud_clear_tests(struct pgtable_debug_args *args)
491 {
492 	pud_t pud = READ_ONCE(*args->pudp);
493 
494 	if (mm_pmd_folded(args->mm))
495 		return;
496 
497 	pr_debug("Validating PUD clear\n");
498 	WARN_ON(pud_none(pud));
499 	pud_clear(args->pudp);
500 	pud = READ_ONCE(*args->pudp);
501 	WARN_ON(!pud_none(pud));
502 }
503 
504 static void __init pud_populate_tests(struct pgtable_debug_args *args)
505 {
506 	pud_t pud;
507 
508 	if (mm_pmd_folded(args->mm))
509 		return;
510 
511 	pr_debug("Validating PUD populate\n");
512 	/*
513 	 * This entry points to next level page table page.
514 	 * Hence this must not qualify as pud_bad().
515 	 */
516 	pud_populate(args->mm, args->pudp, args->start_pmdp);
517 	pud = READ_ONCE(*args->pudp);
518 	WARN_ON(pud_bad(pud));
519 }
520 #else  /* !__PAGETABLE_PUD_FOLDED */
521 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
522 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
523 #endif /* PAGETABLE_PUD_FOLDED */
524 
525 #ifndef __PAGETABLE_P4D_FOLDED
526 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
527 {
528 	p4d_t p4d = READ_ONCE(*args->p4dp);
529 
530 	if (mm_pud_folded(args->mm))
531 		return;
532 
533 	pr_debug("Validating P4D clear\n");
534 	WARN_ON(p4d_none(p4d));
535 	p4d_clear(args->p4dp);
536 	p4d = READ_ONCE(*args->p4dp);
537 	WARN_ON(!p4d_none(p4d));
538 }
539 
540 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
541 {
542 	p4d_t p4d;
543 
544 	if (mm_pud_folded(args->mm))
545 		return;
546 
547 	pr_debug("Validating P4D populate\n");
548 	/*
549 	 * This entry points to next level page table page.
550 	 * Hence this must not qualify as p4d_bad().
551 	 */
552 	pud_clear(args->pudp);
553 	p4d_clear(args->p4dp);
554 	p4d_populate(args->mm, args->p4dp, args->start_pudp);
555 	p4d = READ_ONCE(*args->p4dp);
556 	WARN_ON(p4d_bad(p4d));
557 }
558 
559 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
560 {
561 	pgd_t pgd = READ_ONCE(*(args->pgdp));
562 
563 	if (mm_p4d_folded(args->mm))
564 		return;
565 
566 	pr_debug("Validating PGD clear\n");
567 	WARN_ON(pgd_none(pgd));
568 	pgd_clear(args->pgdp);
569 	pgd = READ_ONCE(*args->pgdp);
570 	WARN_ON(!pgd_none(pgd));
571 }
572 
573 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
574 {
575 	pgd_t pgd;
576 
577 	if (mm_p4d_folded(args->mm))
578 		return;
579 
580 	pr_debug("Validating PGD populate\n");
581 	/*
582 	 * This entry points to next level page table page.
583 	 * Hence this must not qualify as pgd_bad().
584 	 */
585 	p4d_clear(args->p4dp);
586 	pgd_clear(args->pgdp);
587 	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
588 	pgd = READ_ONCE(*args->pgdp);
589 	WARN_ON(pgd_bad(pgd));
590 }
591 #else  /* !__PAGETABLE_P4D_FOLDED */
592 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
593 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
594 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
595 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
596 #endif /* PAGETABLE_P4D_FOLDED */
597 
598 static void __init pte_clear_tests(struct pgtable_debug_args *args)
599 {
600 	struct page *page;
601 	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
602 
603 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
604 	if (!page)
605 		return;
606 
607 	/*
608 	 * flush_dcache_page() is called after set_pte_at() to clear
609 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
610 	 * when it's released and page allocation check will fail when
611 	 * the page is allocated again. For architectures other than ARM64,
612 	 * the unexpected overhead of cache flushing is acceptable.
613 	 */
614 	pr_debug("Validating PTE clear\n");
615 	if (WARN_ON(!args->ptep))
616 		return;
617 
618 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
619 	WARN_ON(pte_none(pte));
620 	flush_dcache_page(page);
621 	barrier();
622 	ptep_clear(args->mm, args->vaddr, args->ptep);
623 	pte = ptep_get(args->ptep);
624 	WARN_ON(!pte_none(pte));
625 }
626 
627 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
628 {
629 	pmd_t pmd = READ_ONCE(*args->pmdp);
630 
631 	pr_debug("Validating PMD clear\n");
632 	WARN_ON(pmd_none(pmd));
633 	pmd_clear(args->pmdp);
634 	pmd = READ_ONCE(*args->pmdp);
635 	WARN_ON(!pmd_none(pmd));
636 }
637 
638 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
639 {
640 	pmd_t pmd;
641 
642 	pr_debug("Validating PMD populate\n");
643 	/*
644 	 * This entry points to next level page table page.
645 	 * Hence this must not qualify as pmd_bad().
646 	 */
647 	pmd_populate(args->mm, args->pmdp, args->start_ptep);
648 	pmd = READ_ONCE(*args->pmdp);
649 	WARN_ON(pmd_bad(pmd));
650 }
651 
652 static void __init pte_special_tests(struct pgtable_debug_args *args)
653 {
654 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
655 
656 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
657 		return;
658 
659 	pr_debug("Validating PTE special\n");
660 	WARN_ON(!pte_special(pte_mkspecial(pte)));
661 }
662 
663 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
664 {
665 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
666 
667 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
668 		return;
669 
670 	pr_debug("Validating PTE protnone\n");
671 	WARN_ON(!pte_protnone(pte));
672 	WARN_ON(!pte_present(pte));
673 }
674 
675 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
676 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
677 {
678 	pmd_t pmd;
679 
680 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
681 		return;
682 
683 	if (!has_transparent_hugepage())
684 		return;
685 
686 	pr_debug("Validating PMD protnone\n");
687 	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
688 	WARN_ON(!pmd_protnone(pmd));
689 	WARN_ON(!pmd_present(pmd));
690 }
691 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
692 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
693 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
694 
695 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
696 static void __init pte_devmap_tests(struct pgtable_debug_args *args)
697 {
698 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
699 
700 	pr_debug("Validating PTE devmap\n");
701 	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
702 }
703 
704 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
705 static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
706 {
707 	pmd_t pmd;
708 
709 	if (!has_transparent_hugepage())
710 		return;
711 
712 	pr_debug("Validating PMD devmap\n");
713 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
714 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
715 }
716 
717 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
718 static void __init pud_devmap_tests(struct pgtable_debug_args *args)
719 {
720 	pud_t pud;
721 
722 	if (!has_transparent_pud_hugepage())
723 		return;
724 
725 	pr_debug("Validating PUD devmap\n");
726 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
727 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
728 }
729 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
730 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
731 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
732 #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
733 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
734 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
735 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
736 #else
737 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
738 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
739 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
740 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
741 
742 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
743 {
744 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
745 
746 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
747 		return;
748 
749 	pr_debug("Validating PTE soft dirty\n");
750 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
751 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
752 }
753 
754 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
755 {
756 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
757 
758 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
759 		return;
760 
761 	pr_debug("Validating PTE swap soft dirty\n");
762 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
763 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
764 }
765 
766 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
767 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
768 {
769 	pmd_t pmd;
770 
771 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
772 		return;
773 
774 	if (!has_transparent_hugepage())
775 		return;
776 
777 	pr_debug("Validating PMD soft dirty\n");
778 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
779 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
780 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
781 }
782 
783 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
784 {
785 	pmd_t pmd;
786 
787 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
788 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
789 		return;
790 
791 	if (!has_transparent_hugepage())
792 		return;
793 
794 	pr_debug("Validating PMD swap soft dirty\n");
795 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
796 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
797 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
798 }
799 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
800 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
801 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
802 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
803 
804 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
805 {
806 	unsigned long max_swap_offset;
807 	swp_entry_t entry, entry2;
808 	pte_t pte;
809 
810 	pr_debug("Validating PTE swap exclusive\n");
811 
812 	/* See generic_max_swapfile_size(): probe the maximum offset */
813 	max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
814 
815 	/* Create a swp entry with all possible bits set */
816 	entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
817 
818 	pte = swp_entry_to_pte(entry);
819 	WARN_ON(pte_swp_exclusive(pte));
820 	WARN_ON(!is_swap_pte(pte));
821 	entry2 = pte_to_swp_entry(pte);
822 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
823 
824 	pte = pte_swp_mkexclusive(pte);
825 	WARN_ON(!pte_swp_exclusive(pte));
826 	WARN_ON(!is_swap_pte(pte));
827 	WARN_ON(pte_swp_soft_dirty(pte));
828 	entry2 = pte_to_swp_entry(pte);
829 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
830 
831 	pte = pte_swp_clear_exclusive(pte);
832 	WARN_ON(pte_swp_exclusive(pte));
833 	WARN_ON(!is_swap_pte(pte));
834 	entry2 = pte_to_swp_entry(pte);
835 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
836 }
837 
838 static void __init pte_swap_tests(struct pgtable_debug_args *args)
839 {
840 	swp_entry_t swp;
841 	pte_t pte;
842 
843 	pr_debug("Validating PTE swap\n");
844 	pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
845 	swp = __pte_to_swp_entry(pte);
846 	pte = __swp_entry_to_pte(swp);
847 	WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
848 }
849 
850 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
851 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
852 {
853 	swp_entry_t swp;
854 	pmd_t pmd;
855 
856 	if (!has_transparent_hugepage())
857 		return;
858 
859 	pr_debug("Validating PMD swap\n");
860 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
861 	swp = __pmd_to_swp_entry(pmd);
862 	pmd = __swp_entry_to_pmd(swp);
863 	WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
864 }
865 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
866 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
867 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
868 
869 static void __init swap_migration_tests(struct pgtable_debug_args *args)
870 {
871 	struct page *page;
872 	swp_entry_t swp;
873 
874 	if (!IS_ENABLED(CONFIG_MIGRATION))
875 		return;
876 
877 	/*
878 	 * swap_migration_tests() requires a dedicated page as it needs to
879 	 * be locked before creating a migration entry from it. Locking the
880 	 * page that actually maps kernel text ('start_kernel') can be real
881 	 * problematic. Lets use the allocated page explicitly for this
882 	 * purpose.
883 	 */
884 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
885 	if (!page)
886 		return;
887 
888 	pr_debug("Validating swap migration\n");
889 
890 	/*
891 	 * make_[readable|writable]_migration_entry() expects given page to
892 	 * be locked, otherwise it stumbles upon a BUG_ON().
893 	 */
894 	__SetPageLocked(page);
895 	swp = make_writable_migration_entry(page_to_pfn(page));
896 	WARN_ON(!is_migration_entry(swp));
897 	WARN_ON(!is_writable_migration_entry(swp));
898 
899 	swp = make_readable_migration_entry(swp_offset(swp));
900 	WARN_ON(!is_migration_entry(swp));
901 	WARN_ON(is_writable_migration_entry(swp));
902 
903 	swp = make_readable_migration_entry(page_to_pfn(page));
904 	WARN_ON(!is_migration_entry(swp));
905 	WARN_ON(is_writable_migration_entry(swp));
906 	__ClearPageLocked(page);
907 }
908 
909 #ifdef CONFIG_HUGETLB_PAGE
910 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
911 {
912 	struct page *page;
913 	pte_t pte;
914 
915 	pr_debug("Validating HugeTLB basic\n");
916 	/*
917 	 * Accessing the page associated with the pfn is safe here,
918 	 * as it was previously derived from a real kernel symbol.
919 	 */
920 	page = pfn_to_page(args->fixed_pmd_pfn);
921 	pte = mk_huge_pte(page, args->page_prot);
922 
923 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
924 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
925 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
926 
927 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
928 	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
929 
930 	WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
931 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
932 }
933 #else  /* !CONFIG_HUGETLB_PAGE */
934 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
935 #endif /* CONFIG_HUGETLB_PAGE */
936 
937 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
938 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
939 {
940 	pmd_t pmd;
941 
942 	if (!has_transparent_hugepage())
943 		return;
944 
945 	pr_debug("Validating PMD based THP\n");
946 	/*
947 	 * pmd_trans_huge() and pmd_present() must return positive after
948 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
949 	 * optimization for transparent huge page. pmd_trans_huge() must
950 	 * be true if pmd_page() returns a valid THP to avoid taking the
951 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
952 	 * are no THP allocated). Especially when splitting a THP and
953 	 * removing the present bit from the pmd, pmd_trans_huge() still
954 	 * needs to return true. pmd_present() should be true whenever
955 	 * pmd_trans_huge() returns true.
956 	 */
957 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
958 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
959 
960 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
961 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
962 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
963 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
964 }
965 
966 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
967 static void __init pud_thp_tests(struct pgtable_debug_args *args)
968 {
969 	pud_t pud;
970 
971 	if (!has_transparent_pud_hugepage())
972 		return;
973 
974 	pr_debug("Validating PUD based THP\n");
975 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
976 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
977 
978 	/*
979 	 * pud_mkinvalid() has been dropped for now. Enable back
980 	 * these tests when it comes back with a modified pud_present().
981 	 *
982 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
983 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
984 	 */
985 }
986 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
987 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
988 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
989 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
990 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
991 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
992 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
993 
994 static unsigned long __init get_random_vaddr(void)
995 {
996 	unsigned long random_vaddr, random_pages, total_user_pages;
997 
998 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
999 
1000 	random_pages = get_random_long() % total_user_pages;
1001 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1002 
1003 	return random_vaddr;
1004 }
1005 
1006 static void __init destroy_args(struct pgtable_debug_args *args)
1007 {
1008 	struct page *page = NULL;
1009 
1010 	/* Free (huge) page */
1011 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1012 	    has_transparent_pud_hugepage() &&
1013 	    args->pud_pfn != ULONG_MAX) {
1014 		if (args->is_contiguous_page) {
1015 			free_contig_range(args->pud_pfn,
1016 					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1017 		} else {
1018 			page = pfn_to_page(args->pud_pfn);
1019 			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1020 		}
1021 
1022 		args->pud_pfn = ULONG_MAX;
1023 		args->pmd_pfn = ULONG_MAX;
1024 		args->pte_pfn = ULONG_MAX;
1025 	}
1026 
1027 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1028 	    has_transparent_hugepage() &&
1029 	    args->pmd_pfn != ULONG_MAX) {
1030 		if (args->is_contiguous_page) {
1031 			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1032 		} else {
1033 			page = pfn_to_page(args->pmd_pfn);
1034 			__free_pages(page, HPAGE_PMD_ORDER);
1035 		}
1036 
1037 		args->pmd_pfn = ULONG_MAX;
1038 		args->pte_pfn = ULONG_MAX;
1039 	}
1040 
1041 	if (args->pte_pfn != ULONG_MAX) {
1042 		page = pfn_to_page(args->pte_pfn);
1043 		__free_page(page);
1044 
1045 		args->pte_pfn = ULONG_MAX;
1046 	}
1047 
1048 	/* Free page table entries */
1049 	if (args->start_ptep) {
1050 		pte_free(args->mm, args->start_ptep);
1051 		mm_dec_nr_ptes(args->mm);
1052 	}
1053 
1054 	if (args->start_pmdp) {
1055 		pmd_free(args->mm, args->start_pmdp);
1056 		mm_dec_nr_pmds(args->mm);
1057 	}
1058 
1059 	if (args->start_pudp) {
1060 		pud_free(args->mm, args->start_pudp);
1061 		mm_dec_nr_puds(args->mm);
1062 	}
1063 
1064 	if (args->start_p4dp)
1065 		p4d_free(args->mm, args->start_p4dp);
1066 
1067 	/* Free vma and mm struct */
1068 	if (args->vma)
1069 		vm_area_free(args->vma);
1070 
1071 	if (args->mm)
1072 		mmdrop(args->mm);
1073 }
1074 
1075 static struct page * __init
1076 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1077 {
1078 	struct page *page = NULL;
1079 
1080 #ifdef CONFIG_CONTIG_ALLOC
1081 	if (order > MAX_ORDER) {
1082 		page = alloc_contig_pages((1 << order), GFP_KERNEL,
1083 					  first_online_node, NULL);
1084 		if (page) {
1085 			args->is_contiguous_page = true;
1086 			return page;
1087 		}
1088 	}
1089 #endif
1090 
1091 	if (order <= MAX_ORDER)
1092 		page = alloc_pages(GFP_KERNEL, order);
1093 
1094 	return page;
1095 }
1096 
1097 /*
1098  * Check if a physical memory range described by <pstart, pend> contains
1099  * an area that is of size psize, and aligned to psize.
1100  *
1101  * Don't use address 0, an all-zeroes physical address might mask bugs, and
1102  * it's not used on x86.
1103  */
1104 static void  __init phys_align_check(phys_addr_t pstart,
1105 				     phys_addr_t pend, unsigned long psize,
1106 				     phys_addr_t *physp, unsigned long *alignp)
1107 {
1108 	phys_addr_t aligned_start, aligned_end;
1109 
1110 	if (pstart == 0)
1111 		pstart = PAGE_SIZE;
1112 
1113 	aligned_start = ALIGN(pstart, psize);
1114 	aligned_end = aligned_start + psize;
1115 
1116 	if (aligned_end > aligned_start && aligned_end <= pend) {
1117 		*alignp = psize;
1118 		*physp = aligned_start;
1119 	}
1120 }
1121 
1122 static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1123 {
1124 	u64 idx;
1125 	phys_addr_t phys, pstart, pend;
1126 
1127 	/*
1128 	 * Initialize the fixed pfns. To do this, try to find a
1129 	 * valid physical range, preferably aligned to PUD_SIZE,
1130 	 * but settling for aligned to PMD_SIZE as a fallback. If
1131 	 * neither of those is found, use the physical address of
1132 	 * the start_kernel symbol.
1133 	 *
1134 	 * The memory doesn't need to be allocated, it just needs to exist
1135 	 * as usable memory. It won't be touched.
1136 	 *
1137 	 * The alignment is recorded, and can be checked to see if we
1138 	 * can run the tests that require an actual valid physical
1139 	 * address range on some architectures ({pmd,pud}_huge_test
1140 	 * on x86).
1141 	 */
1142 
1143 	phys = __pa_symbol(&start_kernel);
1144 	args->fixed_alignment = PAGE_SIZE;
1145 
1146 	for_each_mem_range(idx, &pstart, &pend) {
1147 		/* First check for a PUD-aligned area */
1148 		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1149 				 &args->fixed_alignment);
1150 
1151 		/* If a PUD-aligned area is found, we're done */
1152 		if (args->fixed_alignment == PUD_SIZE)
1153 			break;
1154 
1155 		/*
1156 		 * If no PMD-aligned area found yet, check for one,
1157 		 * but continue the loop to look for a PUD-aligned area.
1158 		 */
1159 		if (args->fixed_alignment < PMD_SIZE)
1160 			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1161 					 &args->fixed_alignment);
1162 	}
1163 
1164 	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1165 	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1166 	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1167 	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1168 	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1169 	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1170 }
1171 
1172 
1173 static int __init init_args(struct pgtable_debug_args *args)
1174 {
1175 	struct page *page = NULL;
1176 	int ret = 0;
1177 
1178 	/*
1179 	 * Initialize the debugging data.
1180 	 *
1181 	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1182 	 * will help create page table entries with PROT_NONE permission as
1183 	 * required for pxx_protnone_tests().
1184 	 */
1185 	memset(args, 0, sizeof(*args));
1186 	args->vaddr              = get_random_vaddr();
1187 	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
1188 	args->page_prot_none     = vm_get_page_prot(VM_NONE);
1189 	args->is_contiguous_page = false;
1190 	args->pud_pfn            = ULONG_MAX;
1191 	args->pmd_pfn            = ULONG_MAX;
1192 	args->pte_pfn            = ULONG_MAX;
1193 	args->fixed_pgd_pfn      = ULONG_MAX;
1194 	args->fixed_p4d_pfn      = ULONG_MAX;
1195 	args->fixed_pud_pfn      = ULONG_MAX;
1196 	args->fixed_pmd_pfn      = ULONG_MAX;
1197 	args->fixed_pte_pfn      = ULONG_MAX;
1198 
1199 	/* Allocate mm and vma */
1200 	args->mm = mm_alloc();
1201 	if (!args->mm) {
1202 		pr_err("Failed to allocate mm struct\n");
1203 		ret = -ENOMEM;
1204 		goto error;
1205 	}
1206 
1207 	args->vma = vm_area_alloc(args->mm);
1208 	if (!args->vma) {
1209 		pr_err("Failed to allocate vma\n");
1210 		ret = -ENOMEM;
1211 		goto error;
1212 	}
1213 
1214 	/*
1215 	 * Allocate page table entries. They will be modified in the tests.
1216 	 * Lets save the page table entries so that they can be released
1217 	 * when the tests are completed.
1218 	 */
1219 	args->pgdp = pgd_offset(args->mm, args->vaddr);
1220 	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1221 	if (!args->p4dp) {
1222 		pr_err("Failed to allocate p4d entries\n");
1223 		ret = -ENOMEM;
1224 		goto error;
1225 	}
1226 	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1227 	WARN_ON(!args->start_p4dp);
1228 
1229 	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1230 	if (!args->pudp) {
1231 		pr_err("Failed to allocate pud entries\n");
1232 		ret = -ENOMEM;
1233 		goto error;
1234 	}
1235 	args->start_pudp = pud_offset(args->p4dp, 0UL);
1236 	WARN_ON(!args->start_pudp);
1237 
1238 	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1239 	if (!args->pmdp) {
1240 		pr_err("Failed to allocate pmd entries\n");
1241 		ret = -ENOMEM;
1242 		goto error;
1243 	}
1244 	args->start_pmdp = pmd_offset(args->pudp, 0UL);
1245 	WARN_ON(!args->start_pmdp);
1246 
1247 	if (pte_alloc(args->mm, args->pmdp)) {
1248 		pr_err("Failed to allocate pte entries\n");
1249 		ret = -ENOMEM;
1250 		goto error;
1251 	}
1252 	args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1253 	WARN_ON(!args->start_ptep);
1254 
1255 	init_fixed_pfns(args);
1256 
1257 	/*
1258 	 * Allocate (huge) pages because some of the tests need to access
1259 	 * the data in the pages. The corresponding tests will be skipped
1260 	 * if we fail to allocate (huge) pages.
1261 	 */
1262 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1263 	    has_transparent_pud_hugepage()) {
1264 		page = debug_vm_pgtable_alloc_huge_page(args,
1265 				HPAGE_PUD_SHIFT - PAGE_SHIFT);
1266 		if (page) {
1267 			args->pud_pfn = page_to_pfn(page);
1268 			args->pmd_pfn = args->pud_pfn;
1269 			args->pte_pfn = args->pud_pfn;
1270 			return 0;
1271 		}
1272 	}
1273 
1274 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1275 	    has_transparent_hugepage()) {
1276 		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1277 		if (page) {
1278 			args->pmd_pfn = page_to_pfn(page);
1279 			args->pte_pfn = args->pmd_pfn;
1280 			return 0;
1281 		}
1282 	}
1283 
1284 	page = alloc_page(GFP_KERNEL);
1285 	if (page)
1286 		args->pte_pfn = page_to_pfn(page);
1287 
1288 	return 0;
1289 
1290 error:
1291 	destroy_args(args);
1292 	return ret;
1293 }
1294 
1295 static int __init debug_vm_pgtable(void)
1296 {
1297 	struct pgtable_debug_args args;
1298 	spinlock_t *ptl = NULL;
1299 	int idx, ret;
1300 
1301 	pr_info("Validating architecture page table helpers\n");
1302 	ret = init_args(&args);
1303 	if (ret)
1304 		return ret;
1305 
1306 	/*
1307 	 * Iterate over each possible vm_flags to make sure that all
1308 	 * the basic page table transformation validations just hold
1309 	 * true irrespective of the starting protection value for a
1310 	 * given page table entry.
1311 	 *
1312 	 * Protection based vm_flags combinatins are always linear
1313 	 * and increasing i.e starting from VM_NONE and going upto
1314 	 * (VM_SHARED | READ | WRITE | EXEC).
1315 	 */
1316 #define VM_FLAGS_START	(VM_NONE)
1317 #define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1318 
1319 	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1320 		pte_basic_tests(&args, idx);
1321 		pmd_basic_tests(&args, idx);
1322 		pud_basic_tests(&args, idx);
1323 	}
1324 
1325 	/*
1326 	 * Both P4D and PGD level tests are very basic which do not
1327 	 * involve creating page table entries from the protection
1328 	 * value and the given pfn. Hence just keep them out from
1329 	 * the above iteration for now to save some test execution
1330 	 * time.
1331 	 */
1332 	p4d_basic_tests(&args);
1333 	pgd_basic_tests(&args);
1334 
1335 	pmd_leaf_tests(&args);
1336 	pud_leaf_tests(&args);
1337 
1338 	pte_special_tests(&args);
1339 	pte_protnone_tests(&args);
1340 	pmd_protnone_tests(&args);
1341 
1342 	pte_devmap_tests(&args);
1343 	pmd_devmap_tests(&args);
1344 	pud_devmap_tests(&args);
1345 
1346 	pte_soft_dirty_tests(&args);
1347 	pmd_soft_dirty_tests(&args);
1348 	pte_swap_soft_dirty_tests(&args);
1349 	pmd_swap_soft_dirty_tests(&args);
1350 
1351 	pte_swap_exclusive_tests(&args);
1352 
1353 	pte_swap_tests(&args);
1354 	pmd_swap_tests(&args);
1355 
1356 	swap_migration_tests(&args);
1357 
1358 	pmd_thp_tests(&args);
1359 	pud_thp_tests(&args);
1360 
1361 	hugetlb_basic_tests(&args);
1362 
1363 	/*
1364 	 * Page table modifying tests. They need to hold
1365 	 * proper page table lock.
1366 	 */
1367 
1368 	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1369 	pte_clear_tests(&args);
1370 	pte_advanced_tests(&args);
1371 	if (args.ptep)
1372 		pte_unmap_unlock(args.ptep, ptl);
1373 
1374 	ptl = pmd_lock(args.mm, args.pmdp);
1375 	pmd_clear_tests(&args);
1376 	pmd_advanced_tests(&args);
1377 	pmd_huge_tests(&args);
1378 	pmd_populate_tests(&args);
1379 	spin_unlock(ptl);
1380 
1381 	ptl = pud_lock(args.mm, args.pudp);
1382 	pud_clear_tests(&args);
1383 	pud_advanced_tests(&args);
1384 	pud_huge_tests(&args);
1385 	pud_populate_tests(&args);
1386 	spin_unlock(ptl);
1387 
1388 	spin_lock(&(args.mm->page_table_lock));
1389 	p4d_clear_tests(&args);
1390 	pgd_clear_tests(&args);
1391 	p4d_populate_tests(&args);
1392 	pgd_populate_tests(&args);
1393 	spin_unlock(&(args.mm->page_table_lock));
1394 
1395 	destroy_args(&args);
1396 	return 0;
1397 }
1398 late_initcall(debug_vm_pgtable);
1399