xref: /openbmc/linux/mm/debug_vm_pgtable.c (revision 7fde9d6e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/mm.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 
35 /*
36  * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37  * expectations that are being validated here. All future changes in here
38  * or the documentation need to be in sync.
39  */
40 
41 #define VMFLAGS	(VM_READ|VM_WRITE|VM_EXEC)
42 
43 /*
44  * On s390 platform, the lower 4 bits are used to identify given page table
45  * entry type. But these bits might affect the ability to clear entries with
46  * pxx_clear() because of how dynamic page table folding works on s390. So
47  * while loading up the entries do not change the lower 4 bits. It does not
48  * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49  * used to mark a pte entry.
50  */
51 #define S390_SKIP_MASK		GENMASK(3, 0)
52 #if __BITS_PER_LONG == 64
53 #define PPC64_SKIP_MASK		GENMASK(62, 62)
54 #else
55 #define PPC64_SKIP_MASK		0x0
56 #endif
57 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 #define RANDOM_NZVALUE	GENMASK(7, 0)
60 
61 static void __init pte_basic_tests(unsigned long pfn, int idx)
62 {
63 	pgprot_t prot = protection_map[idx];
64 	pte_t pte = pfn_pte(pfn, prot);
65 	unsigned long val = idx, *ptr = &val;
66 
67 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
68 
69 	/*
70 	 * This test needs to be executed after the given page table entry
71 	 * is created with pfn_pte() to make sure that protection_map[idx]
72 	 * does not have the dirty bit enabled from the beginning. This is
73 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
74 	 * dirty bit being set.
75 	 */
76 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
77 
78 	WARN_ON(!pte_same(pte, pte));
79 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
80 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
81 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
82 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
83 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
84 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
85 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
86 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
87 }
88 
89 static void __init pte_advanced_tests(struct mm_struct *mm,
90 				      struct vm_area_struct *vma, pte_t *ptep,
91 				      unsigned long pfn, unsigned long vaddr,
92 				      pgprot_t prot)
93 {
94 	pte_t pte;
95 
96 	/*
97 	 * Architectures optimize set_pte_at by avoiding TLB flush.
98 	 * This requires set_pte_at to be not used to update an
99 	 * existing pte entry. Clear pte before we do set_pte_at
100 	 */
101 
102 	pr_debug("Validating PTE advanced\n");
103 	pte = pfn_pte(pfn, prot);
104 	set_pte_at(mm, vaddr, ptep, pte);
105 	ptep_set_wrprotect(mm, vaddr, ptep);
106 	pte = ptep_get(ptep);
107 	WARN_ON(pte_write(pte));
108 	ptep_get_and_clear(mm, vaddr, ptep);
109 	pte = ptep_get(ptep);
110 	WARN_ON(!pte_none(pte));
111 
112 	pte = pfn_pte(pfn, prot);
113 	pte = pte_wrprotect(pte);
114 	pte = pte_mkclean(pte);
115 	set_pte_at(mm, vaddr, ptep, pte);
116 	pte = pte_mkwrite(pte);
117 	pte = pte_mkdirty(pte);
118 	ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
119 	pte = ptep_get(ptep);
120 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
121 	ptep_get_and_clear_full(mm, vaddr, ptep, 1);
122 	pte = ptep_get(ptep);
123 	WARN_ON(!pte_none(pte));
124 
125 	pte = pfn_pte(pfn, prot);
126 	pte = pte_mkyoung(pte);
127 	set_pte_at(mm, vaddr, ptep, pte);
128 	ptep_test_and_clear_young(vma, vaddr, ptep);
129 	pte = ptep_get(ptep);
130 	WARN_ON(pte_young(pte));
131 }
132 
133 static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
134 {
135 	pte_t pte = pfn_pte(pfn, prot);
136 
137 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
138 		return;
139 
140 	pr_debug("Validating PTE saved write\n");
141 	WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
142 	WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
143 }
144 
145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
146 static void __init pmd_basic_tests(unsigned long pfn, int idx)
147 {
148 	pgprot_t prot = protection_map[idx];
149 	unsigned long val = idx, *ptr = &val;
150 	pmd_t pmd;
151 
152 	if (!has_transparent_hugepage())
153 		return;
154 
155 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
156 	pmd = pfn_pmd(pfn, prot);
157 
158 	/*
159 	 * This test needs to be executed after the given page table entry
160 	 * is created with pfn_pmd() to make sure that protection_map[idx]
161 	 * does not have the dirty bit enabled from the beginning. This is
162 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
163 	 * dirty bit being set.
164 	 */
165 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
166 
167 
168 	WARN_ON(!pmd_same(pmd, pmd));
169 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
170 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
171 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
172 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
173 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
174 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
175 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
176 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
177 	/*
178 	 * A huge page does not point to next level page table
179 	 * entry. Hence this must qualify as pmd_bad().
180 	 */
181 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
182 }
183 
184 static void __init pmd_advanced_tests(struct mm_struct *mm,
185 				      struct vm_area_struct *vma, pmd_t *pmdp,
186 				      unsigned long pfn, unsigned long vaddr,
187 				      pgprot_t prot, pgtable_t pgtable)
188 {
189 	pmd_t pmd;
190 
191 	if (!has_transparent_hugepage())
192 		return;
193 
194 	pr_debug("Validating PMD advanced\n");
195 	/* Align the address wrt HPAGE_PMD_SIZE */
196 	vaddr &= HPAGE_PMD_MASK;
197 
198 	pgtable_trans_huge_deposit(mm, pmdp, pgtable);
199 
200 	pmd = pfn_pmd(pfn, prot);
201 	set_pmd_at(mm, vaddr, pmdp, pmd);
202 	pmdp_set_wrprotect(mm, vaddr, pmdp);
203 	pmd = READ_ONCE(*pmdp);
204 	WARN_ON(pmd_write(pmd));
205 	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
206 	pmd = READ_ONCE(*pmdp);
207 	WARN_ON(!pmd_none(pmd));
208 
209 	pmd = pfn_pmd(pfn, prot);
210 	pmd = pmd_wrprotect(pmd);
211 	pmd = pmd_mkclean(pmd);
212 	set_pmd_at(mm, vaddr, pmdp, pmd);
213 	pmd = pmd_mkwrite(pmd);
214 	pmd = pmd_mkdirty(pmd);
215 	pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
216 	pmd = READ_ONCE(*pmdp);
217 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
218 	pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
219 	pmd = READ_ONCE(*pmdp);
220 	WARN_ON(!pmd_none(pmd));
221 
222 	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
223 	pmd = pmd_mkyoung(pmd);
224 	set_pmd_at(mm, vaddr, pmdp, pmd);
225 	pmdp_test_and_clear_young(vma, vaddr, pmdp);
226 	pmd = READ_ONCE(*pmdp);
227 	WARN_ON(pmd_young(pmd));
228 
229 	/*  Clear the pte entries  */
230 	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
231 	pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
232 }
233 
234 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
235 {
236 	pmd_t pmd;
237 
238 	if (!has_transparent_hugepage())
239 		return;
240 
241 	pr_debug("Validating PMD leaf\n");
242 	pmd = pfn_pmd(pfn, prot);
243 
244 	/*
245 	 * PMD based THP is a leaf entry.
246 	 */
247 	pmd = pmd_mkhuge(pmd);
248 	WARN_ON(!pmd_leaf(pmd));
249 }
250 
251 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
252 {
253 	pmd_t pmd;
254 
255 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
256 		return;
257 
258 	if (!has_transparent_hugepage())
259 		return;
260 
261 	pr_debug("Validating PMD saved write\n");
262 	pmd = pfn_pmd(pfn, prot);
263 	WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
264 	WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
265 }
266 
267 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
268 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
269 {
270 	pgprot_t prot = protection_map[idx];
271 	unsigned long val = idx, *ptr = &val;
272 	pud_t pud;
273 
274 	if (!has_transparent_hugepage())
275 		return;
276 
277 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
278 	pud = pfn_pud(pfn, prot);
279 
280 	/*
281 	 * This test needs to be executed after the given page table entry
282 	 * is created with pfn_pud() to make sure that protection_map[idx]
283 	 * does not have the dirty bit enabled from the beginning. This is
284 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
285 	 * dirty bit being set.
286 	 */
287 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
288 
289 	WARN_ON(!pud_same(pud, pud));
290 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
291 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
292 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
293 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
294 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
295 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
296 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
297 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
298 
299 	if (mm_pmd_folded(mm))
300 		return;
301 
302 	/*
303 	 * A huge page does not point to next level page table
304 	 * entry. Hence this must qualify as pud_bad().
305 	 */
306 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
307 }
308 
309 static void __init pud_advanced_tests(struct mm_struct *mm,
310 				      struct vm_area_struct *vma, pud_t *pudp,
311 				      unsigned long pfn, unsigned long vaddr,
312 				      pgprot_t prot)
313 {
314 	pud_t pud;
315 
316 	if (!has_transparent_hugepage())
317 		return;
318 
319 	pr_debug("Validating PUD advanced\n");
320 	/* Align the address wrt HPAGE_PUD_SIZE */
321 	vaddr &= HPAGE_PUD_MASK;
322 
323 	pud = pfn_pud(pfn, prot);
324 	set_pud_at(mm, vaddr, pudp, pud);
325 	pudp_set_wrprotect(mm, vaddr, pudp);
326 	pud = READ_ONCE(*pudp);
327 	WARN_ON(pud_write(pud));
328 
329 #ifndef __PAGETABLE_PMD_FOLDED
330 	pudp_huge_get_and_clear(mm, vaddr, pudp);
331 	pud = READ_ONCE(*pudp);
332 	WARN_ON(!pud_none(pud));
333 #endif /* __PAGETABLE_PMD_FOLDED */
334 	pud = pfn_pud(pfn, prot);
335 	pud = pud_wrprotect(pud);
336 	pud = pud_mkclean(pud);
337 	set_pud_at(mm, vaddr, pudp, pud);
338 	pud = pud_mkwrite(pud);
339 	pud = pud_mkdirty(pud);
340 	pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
341 	pud = READ_ONCE(*pudp);
342 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
343 
344 #ifndef __PAGETABLE_PMD_FOLDED
345 	pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
346 	pud = READ_ONCE(*pudp);
347 	WARN_ON(!pud_none(pud));
348 #endif /* __PAGETABLE_PMD_FOLDED */
349 
350 	pud = pfn_pud(pfn, prot);
351 	pud = pud_mkyoung(pud);
352 	set_pud_at(mm, vaddr, pudp, pud);
353 	pudp_test_and_clear_young(vma, vaddr, pudp);
354 	pud = READ_ONCE(*pudp);
355 	WARN_ON(pud_young(pud));
356 
357 	pudp_huge_get_and_clear(mm, vaddr, pudp);
358 }
359 
360 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
361 {
362 	pud_t pud;
363 
364 	if (!has_transparent_hugepage())
365 		return;
366 
367 	pr_debug("Validating PUD leaf\n");
368 	pud = pfn_pud(pfn, prot);
369 	/*
370 	 * PUD based THP is a leaf entry.
371 	 */
372 	pud = pud_mkhuge(pud);
373 	WARN_ON(!pud_leaf(pud));
374 }
375 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
376 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
377 static void __init pud_advanced_tests(struct mm_struct *mm,
378 				      struct vm_area_struct *vma, pud_t *pudp,
379 				      unsigned long pfn, unsigned long vaddr,
380 				      pgprot_t prot)
381 {
382 }
383 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
384 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
385 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
386 static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
387 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
388 static void __init pmd_advanced_tests(struct mm_struct *mm,
389 				      struct vm_area_struct *vma, pmd_t *pmdp,
390 				      unsigned long pfn, unsigned long vaddr,
391 				      pgprot_t prot, pgtable_t pgtable)
392 {
393 }
394 static void __init pud_advanced_tests(struct mm_struct *mm,
395 				      struct vm_area_struct *vma, pud_t *pudp,
396 				      unsigned long pfn, unsigned long vaddr,
397 				      pgprot_t prot)
398 {
399 }
400 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
401 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
402 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
403 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
404 
405 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
406 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
407 {
408 	pmd_t pmd;
409 
410 	if (!arch_vmap_pmd_supported(prot))
411 		return;
412 
413 	pr_debug("Validating PMD huge\n");
414 	/*
415 	 * X86 defined pmd_set_huge() verifies that the given
416 	 * PMD is not a populated non-leaf entry.
417 	 */
418 	WRITE_ONCE(*pmdp, __pmd(0));
419 	WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
420 	WARN_ON(!pmd_clear_huge(pmdp));
421 	pmd = READ_ONCE(*pmdp);
422 	WARN_ON(!pmd_none(pmd));
423 }
424 
425 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
426 {
427 	pud_t pud;
428 
429 	if (!arch_vmap_pud_supported(prot))
430 		return;
431 
432 	pr_debug("Validating PUD huge\n");
433 	/*
434 	 * X86 defined pud_set_huge() verifies that the given
435 	 * PUD is not a populated non-leaf entry.
436 	 */
437 	WRITE_ONCE(*pudp, __pud(0));
438 	WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
439 	WARN_ON(!pud_clear_huge(pudp));
440 	pud = READ_ONCE(*pudp);
441 	WARN_ON(!pud_none(pud));
442 }
443 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
444 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
445 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
446 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
447 
448 static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
449 {
450 	p4d_t p4d;
451 
452 	pr_debug("Validating P4D basic\n");
453 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
454 	WARN_ON(!p4d_same(p4d, p4d));
455 }
456 
457 static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
458 {
459 	pgd_t pgd;
460 
461 	pr_debug("Validating PGD basic\n");
462 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
463 	WARN_ON(!pgd_same(pgd, pgd));
464 }
465 
466 #ifndef __PAGETABLE_PUD_FOLDED
467 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
468 {
469 	pud_t pud = READ_ONCE(*pudp);
470 
471 	if (mm_pmd_folded(mm))
472 		return;
473 
474 	pr_debug("Validating PUD clear\n");
475 	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
476 	WRITE_ONCE(*pudp, pud);
477 	pud_clear(pudp);
478 	pud = READ_ONCE(*pudp);
479 	WARN_ON(!pud_none(pud));
480 }
481 
482 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
483 				      pmd_t *pmdp)
484 {
485 	pud_t pud;
486 
487 	if (mm_pmd_folded(mm))
488 		return;
489 
490 	pr_debug("Validating PUD populate\n");
491 	/*
492 	 * This entry points to next level page table page.
493 	 * Hence this must not qualify as pud_bad().
494 	 */
495 	pud_populate(mm, pudp, pmdp);
496 	pud = READ_ONCE(*pudp);
497 	WARN_ON(pud_bad(pud));
498 }
499 #else  /* !__PAGETABLE_PUD_FOLDED */
500 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
501 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
502 				      pmd_t *pmdp)
503 {
504 }
505 #endif /* PAGETABLE_PUD_FOLDED */
506 
507 #ifndef __PAGETABLE_P4D_FOLDED
508 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
509 {
510 	p4d_t p4d = READ_ONCE(*p4dp);
511 
512 	if (mm_pud_folded(mm))
513 		return;
514 
515 	pr_debug("Validating P4D clear\n");
516 	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
517 	WRITE_ONCE(*p4dp, p4d);
518 	p4d_clear(p4dp);
519 	p4d = READ_ONCE(*p4dp);
520 	WARN_ON(!p4d_none(p4d));
521 }
522 
523 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
524 				      pud_t *pudp)
525 {
526 	p4d_t p4d;
527 
528 	if (mm_pud_folded(mm))
529 		return;
530 
531 	pr_debug("Validating P4D populate\n");
532 	/*
533 	 * This entry points to next level page table page.
534 	 * Hence this must not qualify as p4d_bad().
535 	 */
536 	pud_clear(pudp);
537 	p4d_clear(p4dp);
538 	p4d_populate(mm, p4dp, pudp);
539 	p4d = READ_ONCE(*p4dp);
540 	WARN_ON(p4d_bad(p4d));
541 }
542 
543 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
544 {
545 	pgd_t pgd = READ_ONCE(*pgdp);
546 
547 	if (mm_p4d_folded(mm))
548 		return;
549 
550 	pr_debug("Validating PGD clear\n");
551 	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
552 	WRITE_ONCE(*pgdp, pgd);
553 	pgd_clear(pgdp);
554 	pgd = READ_ONCE(*pgdp);
555 	WARN_ON(!pgd_none(pgd));
556 }
557 
558 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
559 				      p4d_t *p4dp)
560 {
561 	pgd_t pgd;
562 
563 	if (mm_p4d_folded(mm))
564 		return;
565 
566 	pr_debug("Validating PGD populate\n");
567 	/*
568 	 * This entry points to next level page table page.
569 	 * Hence this must not qualify as pgd_bad().
570 	 */
571 	p4d_clear(p4dp);
572 	pgd_clear(pgdp);
573 	pgd_populate(mm, pgdp, p4dp);
574 	pgd = READ_ONCE(*pgdp);
575 	WARN_ON(pgd_bad(pgd));
576 }
577 #else  /* !__PAGETABLE_P4D_FOLDED */
578 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
579 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
580 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
581 				      pud_t *pudp)
582 {
583 }
584 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
585 				      p4d_t *p4dp)
586 {
587 }
588 #endif /* PAGETABLE_P4D_FOLDED */
589 
590 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
591 				   unsigned long pfn, unsigned long vaddr,
592 				   pgprot_t prot)
593 {
594 	pte_t pte = pfn_pte(pfn, prot);
595 
596 	pr_debug("Validating PTE clear\n");
597 #ifndef CONFIG_RISCV
598 	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
599 #endif
600 	set_pte_at(mm, vaddr, ptep, pte);
601 	barrier();
602 	pte_clear(mm, vaddr, ptep);
603 	pte = ptep_get(ptep);
604 	WARN_ON(!pte_none(pte));
605 }
606 
607 static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
608 {
609 	pmd_t pmd = READ_ONCE(*pmdp);
610 
611 	pr_debug("Validating PMD clear\n");
612 	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
613 	WRITE_ONCE(*pmdp, pmd);
614 	pmd_clear(pmdp);
615 	pmd = READ_ONCE(*pmdp);
616 	WARN_ON(!pmd_none(pmd));
617 }
618 
619 static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
620 				      pgtable_t pgtable)
621 {
622 	pmd_t pmd;
623 
624 	pr_debug("Validating PMD populate\n");
625 	/*
626 	 * This entry points to next level page table page.
627 	 * Hence this must not qualify as pmd_bad().
628 	 */
629 	pmd_populate(mm, pmdp, pgtable);
630 	pmd = READ_ONCE(*pmdp);
631 	WARN_ON(pmd_bad(pmd));
632 }
633 
634 static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
635 {
636 	pte_t pte = pfn_pte(pfn, prot);
637 
638 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
639 		return;
640 
641 	pr_debug("Validating PTE special\n");
642 	WARN_ON(!pte_special(pte_mkspecial(pte)));
643 }
644 
645 static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
646 {
647 	pte_t pte = pfn_pte(pfn, prot);
648 
649 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
650 		return;
651 
652 	pr_debug("Validating PTE protnone\n");
653 	WARN_ON(!pte_protnone(pte));
654 	WARN_ON(!pte_present(pte));
655 }
656 
657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
658 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
659 {
660 	pmd_t pmd;
661 
662 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
663 		return;
664 
665 	if (!has_transparent_hugepage())
666 		return;
667 
668 	pr_debug("Validating PMD protnone\n");
669 	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
670 	WARN_ON(!pmd_protnone(pmd));
671 	WARN_ON(!pmd_present(pmd));
672 }
673 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
674 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
675 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
676 
677 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
678 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
679 {
680 	pte_t pte = pfn_pte(pfn, prot);
681 
682 	pr_debug("Validating PTE devmap\n");
683 	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
684 }
685 
686 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
687 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
688 {
689 	pmd_t pmd;
690 
691 	if (!has_transparent_hugepage())
692 		return;
693 
694 	pr_debug("Validating PMD devmap\n");
695 	pmd = pfn_pmd(pfn, prot);
696 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
697 }
698 
699 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
700 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
701 {
702 	pud_t pud;
703 
704 	if (!has_transparent_hugepage())
705 		return;
706 
707 	pr_debug("Validating PUD devmap\n");
708 	pud = pfn_pud(pfn, prot);
709 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
710 }
711 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
712 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
713 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
714 #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
715 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
716 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
717 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
718 #else
719 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
720 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
721 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
722 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
723 
724 static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
725 {
726 	pte_t pte = pfn_pte(pfn, prot);
727 
728 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
729 		return;
730 
731 	pr_debug("Validating PTE soft dirty\n");
732 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
733 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
734 }
735 
736 static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
737 {
738 	pte_t pte = pfn_pte(pfn, prot);
739 
740 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
741 		return;
742 
743 	pr_debug("Validating PTE swap soft dirty\n");
744 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
745 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
746 }
747 
748 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
749 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
750 {
751 	pmd_t pmd;
752 
753 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
754 		return;
755 
756 	if (!has_transparent_hugepage())
757 		return;
758 
759 	pr_debug("Validating PMD soft dirty\n");
760 	pmd = pfn_pmd(pfn, prot);
761 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
762 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
763 }
764 
765 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
766 {
767 	pmd_t pmd;
768 
769 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
770 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
771 		return;
772 
773 	if (!has_transparent_hugepage())
774 		return;
775 
776 	pr_debug("Validating PMD swap soft dirty\n");
777 	pmd = pfn_pmd(pfn, prot);
778 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
779 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
780 }
781 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
782 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
783 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
784 {
785 }
786 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
787 
788 static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
789 {
790 	swp_entry_t swp;
791 	pte_t pte;
792 
793 	pr_debug("Validating PTE swap\n");
794 	pte = pfn_pte(pfn, prot);
795 	swp = __pte_to_swp_entry(pte);
796 	pte = __swp_entry_to_pte(swp);
797 	WARN_ON(pfn != pte_pfn(pte));
798 }
799 
800 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
801 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
802 {
803 	swp_entry_t swp;
804 	pmd_t pmd;
805 
806 	if (!has_transparent_hugepage())
807 		return;
808 
809 	pr_debug("Validating PMD swap\n");
810 	pmd = pfn_pmd(pfn, prot);
811 	swp = __pmd_to_swp_entry(pmd);
812 	pmd = __swp_entry_to_pmd(swp);
813 	WARN_ON(pfn != pmd_pfn(pmd));
814 }
815 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
816 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
817 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
818 
819 static void __init swap_migration_tests(void)
820 {
821 	struct page *page;
822 	swp_entry_t swp;
823 
824 	if (!IS_ENABLED(CONFIG_MIGRATION))
825 		return;
826 
827 	pr_debug("Validating swap migration\n");
828 	/*
829 	 * swap_migration_tests() requires a dedicated page as it needs to
830 	 * be locked before creating a migration entry from it. Locking the
831 	 * page that actually maps kernel text ('start_kernel') can be real
832 	 * problematic. Lets allocate a dedicated page explicitly for this
833 	 * purpose that will be freed subsequently.
834 	 */
835 	page = alloc_page(GFP_KERNEL);
836 	if (!page) {
837 		pr_err("page allocation failed\n");
838 		return;
839 	}
840 
841 	/*
842 	 * make_migration_entry() expects given page to be
843 	 * locked, otherwise it stumbles upon a BUG_ON().
844 	 */
845 	__SetPageLocked(page);
846 	swp = make_writable_migration_entry(page_to_pfn(page));
847 	WARN_ON(!is_migration_entry(swp));
848 	WARN_ON(!is_writable_migration_entry(swp));
849 
850 	swp = make_readable_migration_entry(swp_offset(swp));
851 	WARN_ON(!is_migration_entry(swp));
852 	WARN_ON(is_writable_migration_entry(swp));
853 
854 	swp = make_readable_migration_entry(page_to_pfn(page));
855 	WARN_ON(!is_migration_entry(swp));
856 	WARN_ON(is_writable_migration_entry(swp));
857 	__ClearPageLocked(page);
858 	__free_page(page);
859 }
860 
861 #ifdef CONFIG_HUGETLB_PAGE
862 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
863 {
864 	struct page *page;
865 	pte_t pte;
866 
867 	pr_debug("Validating HugeTLB basic\n");
868 	/*
869 	 * Accessing the page associated with the pfn is safe here,
870 	 * as it was previously derived from a real kernel symbol.
871 	 */
872 	page = pfn_to_page(pfn);
873 	pte = mk_huge_pte(page, prot);
874 
875 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
876 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
877 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
878 
879 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
880 	pte = pfn_pte(pfn, prot);
881 
882 	WARN_ON(!pte_huge(pte_mkhuge(pte)));
883 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
884 }
885 #else  /* !CONFIG_HUGETLB_PAGE */
886 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
887 #endif /* CONFIG_HUGETLB_PAGE */
888 
889 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
890 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
891 {
892 	pmd_t pmd;
893 
894 	if (!has_transparent_hugepage())
895 		return;
896 
897 	pr_debug("Validating PMD based THP\n");
898 	/*
899 	 * pmd_trans_huge() and pmd_present() must return positive after
900 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
901 	 * optimization for transparent huge page. pmd_trans_huge() must
902 	 * be true if pmd_page() returns a valid THP to avoid taking the
903 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
904 	 * are no THP allocated). Especially when splitting a THP and
905 	 * removing the present bit from the pmd, pmd_trans_huge() still
906 	 * needs to return true. pmd_present() should be true whenever
907 	 * pmd_trans_huge() returns true.
908 	 */
909 	pmd = pfn_pmd(pfn, prot);
910 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
911 
912 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
913 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
914 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
915 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
916 }
917 
918 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
919 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
920 {
921 	pud_t pud;
922 
923 	if (!has_transparent_hugepage())
924 		return;
925 
926 	pr_debug("Validating PUD based THP\n");
927 	pud = pfn_pud(pfn, prot);
928 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
929 
930 	/*
931 	 * pud_mkinvalid() has been dropped for now. Enable back
932 	 * these tests when it comes back with a modified pud_present().
933 	 *
934 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
935 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
936 	 */
937 }
938 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
939 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
940 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
941 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
942 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
943 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
944 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
945 
946 static unsigned long __init get_random_vaddr(void)
947 {
948 	unsigned long random_vaddr, random_pages, total_user_pages;
949 
950 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
951 
952 	random_pages = get_random_long() % total_user_pages;
953 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
954 
955 	return random_vaddr;
956 }
957 
958 static int __init debug_vm_pgtable(void)
959 {
960 	struct vm_area_struct *vma;
961 	struct mm_struct *mm;
962 	pgd_t *pgdp;
963 	p4d_t *p4dp, *saved_p4dp;
964 	pud_t *pudp, *saved_pudp;
965 	pmd_t *pmdp, *saved_pmdp, pmd;
966 	pte_t *ptep;
967 	pgtable_t saved_ptep;
968 	pgprot_t prot, protnone;
969 	phys_addr_t paddr;
970 	unsigned long vaddr, pte_aligned, pmd_aligned;
971 	unsigned long pud_aligned, p4d_aligned, pgd_aligned;
972 	spinlock_t *ptl = NULL;
973 	int idx;
974 
975 	pr_info("Validating architecture page table helpers\n");
976 	prot = vm_get_page_prot(VMFLAGS);
977 	vaddr = get_random_vaddr();
978 	mm = mm_alloc();
979 	if (!mm) {
980 		pr_err("mm_struct allocation failed\n");
981 		return 1;
982 	}
983 
984 	/*
985 	 * __P000 (or even __S000) will help create page table entries with
986 	 * PROT_NONE permission as required for pxx_protnone_tests().
987 	 */
988 	protnone = __P000;
989 
990 	vma = vm_area_alloc(mm);
991 	if (!vma) {
992 		pr_err("vma allocation failed\n");
993 		return 1;
994 	}
995 
996 	/*
997 	 * PFN for mapping at PTE level is determined from a standard kernel
998 	 * text symbol. But pfns for higher page table levels are derived by
999 	 * masking lower bits of this real pfn. These derived pfns might not
1000 	 * exist on the platform but that does not really matter as pfn_pxx()
1001 	 * helpers will still create appropriate entries for the test. This
1002 	 * helps avoid large memory block allocations to be used for mapping
1003 	 * at higher page table levels.
1004 	 */
1005 	paddr = __pa_symbol(&start_kernel);
1006 
1007 	pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
1008 	pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
1009 	pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
1010 	p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
1011 	pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
1012 	WARN_ON(!pfn_valid(pte_aligned));
1013 
1014 	pgdp = pgd_offset(mm, vaddr);
1015 	p4dp = p4d_alloc(mm, pgdp, vaddr);
1016 	pudp = pud_alloc(mm, p4dp, vaddr);
1017 	pmdp = pmd_alloc(mm, pudp, vaddr);
1018 	/*
1019 	 * Allocate pgtable_t
1020 	 */
1021 	if (pte_alloc(mm, pmdp)) {
1022 		pr_err("pgtable allocation failed\n");
1023 		return 1;
1024 	}
1025 
1026 	/*
1027 	 * Save all the page table page addresses as the page table
1028 	 * entries will be used for testing with random or garbage
1029 	 * values. These saved addresses will be used for freeing
1030 	 * page table pages.
1031 	 */
1032 	pmd = READ_ONCE(*pmdp);
1033 	saved_p4dp = p4d_offset(pgdp, 0UL);
1034 	saved_pudp = pud_offset(p4dp, 0UL);
1035 	saved_pmdp = pmd_offset(pudp, 0UL);
1036 	saved_ptep = pmd_pgtable(pmd);
1037 
1038 	/*
1039 	 * Iterate over the protection_map[] to make sure that all
1040 	 * the basic page table transformation validations just hold
1041 	 * true irrespective of the starting protection value for a
1042 	 * given page table entry.
1043 	 */
1044 	for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1045 		pte_basic_tests(pte_aligned, idx);
1046 		pmd_basic_tests(pmd_aligned, idx);
1047 		pud_basic_tests(mm, pud_aligned, idx);
1048 	}
1049 
1050 	/*
1051 	 * Both P4D and PGD level tests are very basic which do not
1052 	 * involve creating page table entries from the protection
1053 	 * value and the given pfn. Hence just keep them out from
1054 	 * the above iteration for now to save some test execution
1055 	 * time.
1056 	 */
1057 	p4d_basic_tests(p4d_aligned, prot);
1058 	pgd_basic_tests(pgd_aligned, prot);
1059 
1060 	pmd_leaf_tests(pmd_aligned, prot);
1061 	pud_leaf_tests(pud_aligned, prot);
1062 
1063 	pte_savedwrite_tests(pte_aligned, protnone);
1064 	pmd_savedwrite_tests(pmd_aligned, protnone);
1065 
1066 	pte_special_tests(pte_aligned, prot);
1067 	pte_protnone_tests(pte_aligned, protnone);
1068 	pmd_protnone_tests(pmd_aligned, protnone);
1069 
1070 	pte_devmap_tests(pte_aligned, prot);
1071 	pmd_devmap_tests(pmd_aligned, prot);
1072 	pud_devmap_tests(pud_aligned, prot);
1073 
1074 	pte_soft_dirty_tests(pte_aligned, prot);
1075 	pmd_soft_dirty_tests(pmd_aligned, prot);
1076 	pte_swap_soft_dirty_tests(pte_aligned, prot);
1077 	pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1078 
1079 	pte_swap_tests(pte_aligned, prot);
1080 	pmd_swap_tests(pmd_aligned, prot);
1081 
1082 	swap_migration_tests();
1083 
1084 	pmd_thp_tests(pmd_aligned, prot);
1085 	pud_thp_tests(pud_aligned, prot);
1086 
1087 	hugetlb_basic_tests(pte_aligned, prot);
1088 
1089 	/*
1090 	 * Page table modifying tests. They need to hold
1091 	 * proper page table lock.
1092 	 */
1093 
1094 	ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
1095 	pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
1096 	pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1097 	pte_unmap_unlock(ptep, ptl);
1098 
1099 	ptl = pmd_lock(mm, pmdp);
1100 	pmd_clear_tests(mm, pmdp);
1101 	pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
1102 	pmd_huge_tests(pmdp, pmd_aligned, prot);
1103 	pmd_populate_tests(mm, pmdp, saved_ptep);
1104 	spin_unlock(ptl);
1105 
1106 	ptl = pud_lock(mm, pudp);
1107 	pud_clear_tests(mm, pudp);
1108 	pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1109 	pud_huge_tests(pudp, pud_aligned, prot);
1110 	pud_populate_tests(mm, pudp, saved_pmdp);
1111 	spin_unlock(ptl);
1112 
1113 	spin_lock(&mm->page_table_lock);
1114 	p4d_clear_tests(mm, p4dp);
1115 	pgd_clear_tests(mm, pgdp);
1116 	p4d_populate_tests(mm, p4dp, saved_pudp);
1117 	pgd_populate_tests(mm, pgdp, saved_p4dp);
1118 	spin_unlock(&mm->page_table_lock);
1119 
1120 	p4d_free(mm, saved_p4dp);
1121 	pud_free(mm, saved_pudp);
1122 	pmd_free(mm, saved_pmdp);
1123 	pte_free(mm, saved_ptep);
1124 
1125 	vm_area_free(vma);
1126 	mm_dec_nr_puds(mm);
1127 	mm_dec_nr_pmds(mm);
1128 	mm_dec_nr_ptes(mm);
1129 	mmdrop(mm);
1130 	return 0;
1131 }
1132 late_initcall(debug_vm_pgtable);
1133