xref: /openbmc/linux/mm/debug_vm_pgtable.c (revision 399145f9eb6c670daa605f0a823f836761e560ae)
1*399145f9SAnshuman Khandual // SPDX-License-Identifier: GPL-2.0-only
2*399145f9SAnshuman Khandual /*
3*399145f9SAnshuman Khandual  * This kernel test validates architecture page table helpers and
4*399145f9SAnshuman Khandual  * accessors and helps in verifying their continued compliance with
5*399145f9SAnshuman Khandual  * expected generic MM semantics.
6*399145f9SAnshuman Khandual  *
7*399145f9SAnshuman Khandual  * Copyright (C) 2019 ARM Ltd.
8*399145f9SAnshuman Khandual  *
9*399145f9SAnshuman Khandual  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10*399145f9SAnshuman Khandual  */
11*399145f9SAnshuman Khandual #define pr_fmt(fmt) "debug_vm_pgtable: %s: " fmt, __func__
12*399145f9SAnshuman Khandual 
13*399145f9SAnshuman Khandual #include <linux/gfp.h>
14*399145f9SAnshuman Khandual #include <linux/highmem.h>
15*399145f9SAnshuman Khandual #include <linux/hugetlb.h>
16*399145f9SAnshuman Khandual #include <linux/kernel.h>
17*399145f9SAnshuman Khandual #include <linux/kconfig.h>
18*399145f9SAnshuman Khandual #include <linux/mm.h>
19*399145f9SAnshuman Khandual #include <linux/mman.h>
20*399145f9SAnshuman Khandual #include <linux/mm_types.h>
21*399145f9SAnshuman Khandual #include <linux/module.h>
22*399145f9SAnshuman Khandual #include <linux/pfn_t.h>
23*399145f9SAnshuman Khandual #include <linux/printk.h>
24*399145f9SAnshuman Khandual #include <linux/random.h>
25*399145f9SAnshuman Khandual #include <linux/spinlock.h>
26*399145f9SAnshuman Khandual #include <linux/swap.h>
27*399145f9SAnshuman Khandual #include <linux/swapops.h>
28*399145f9SAnshuman Khandual #include <linux/start_kernel.h>
29*399145f9SAnshuman Khandual #include <linux/sched/mm.h>
30*399145f9SAnshuman Khandual #include <asm/pgalloc.h>
31*399145f9SAnshuman Khandual #include <asm/pgtable.h>
32*399145f9SAnshuman Khandual 
33*399145f9SAnshuman Khandual #define VMFLAGS	(VM_READ|VM_WRITE|VM_EXEC)
34*399145f9SAnshuman Khandual 
35*399145f9SAnshuman Khandual /*
36*399145f9SAnshuman Khandual  * On s390 platform, the lower 4 bits are used to identify given page table
37*399145f9SAnshuman Khandual  * entry type. But these bits might affect the ability to clear entries with
38*399145f9SAnshuman Khandual  * pxx_clear() because of how dynamic page table folding works on s390. So
39*399145f9SAnshuman Khandual  * while loading up the entries do not change the lower 4 bits. It does not
40*399145f9SAnshuman Khandual  * have affect any other platform.
41*399145f9SAnshuman Khandual  */
42*399145f9SAnshuman Khandual #define S390_MASK_BITS	4
43*399145f9SAnshuman Khandual #define RANDOM_ORVALUE	GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS)
44*399145f9SAnshuman Khandual #define RANDOM_NZVALUE	GENMASK(7, 0)
45*399145f9SAnshuman Khandual 
46*399145f9SAnshuman Khandual static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
47*399145f9SAnshuman Khandual {
48*399145f9SAnshuman Khandual 	pte_t pte = pfn_pte(pfn, prot);
49*399145f9SAnshuman Khandual 
50*399145f9SAnshuman Khandual 	WARN_ON(!pte_same(pte, pte));
51*399145f9SAnshuman Khandual 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
52*399145f9SAnshuman Khandual 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
53*399145f9SAnshuman Khandual 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
54*399145f9SAnshuman Khandual 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
55*399145f9SAnshuman Khandual 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
56*399145f9SAnshuman Khandual 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
57*399145f9SAnshuman Khandual }
58*399145f9SAnshuman Khandual 
59*399145f9SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
60*399145f9SAnshuman Khandual static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
61*399145f9SAnshuman Khandual {
62*399145f9SAnshuman Khandual 	pmd_t pmd = pfn_pmd(pfn, prot);
63*399145f9SAnshuman Khandual 
64*399145f9SAnshuman Khandual 	WARN_ON(!pmd_same(pmd, pmd));
65*399145f9SAnshuman Khandual 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
66*399145f9SAnshuman Khandual 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
67*399145f9SAnshuman Khandual 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
68*399145f9SAnshuman Khandual 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
69*399145f9SAnshuman Khandual 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
70*399145f9SAnshuman Khandual 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
71*399145f9SAnshuman Khandual 	/*
72*399145f9SAnshuman Khandual 	 * A huge page does not point to next level page table
73*399145f9SAnshuman Khandual 	 * entry. Hence this must qualify as pmd_bad().
74*399145f9SAnshuman Khandual 	 */
75*399145f9SAnshuman Khandual 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
76*399145f9SAnshuman Khandual }
77*399145f9SAnshuman Khandual 
78*399145f9SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
79*399145f9SAnshuman Khandual static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
80*399145f9SAnshuman Khandual {
81*399145f9SAnshuman Khandual 	pud_t pud = pfn_pud(pfn, prot);
82*399145f9SAnshuman Khandual 
83*399145f9SAnshuman Khandual 	WARN_ON(!pud_same(pud, pud));
84*399145f9SAnshuman Khandual 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
85*399145f9SAnshuman Khandual 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
86*399145f9SAnshuman Khandual 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
87*399145f9SAnshuman Khandual 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
88*399145f9SAnshuman Khandual 
89*399145f9SAnshuman Khandual 	if (mm_pmd_folded(mm))
90*399145f9SAnshuman Khandual 		return;
91*399145f9SAnshuman Khandual 
92*399145f9SAnshuman Khandual 	/*
93*399145f9SAnshuman Khandual 	 * A huge page does not point to next level page table
94*399145f9SAnshuman Khandual 	 * entry. Hence this must qualify as pud_bad().
95*399145f9SAnshuman Khandual 	 */
96*399145f9SAnshuman Khandual 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
97*399145f9SAnshuman Khandual }
98*399145f9SAnshuman Khandual #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
99*399145f9SAnshuman Khandual static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
100*399145f9SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
101*399145f9SAnshuman Khandual #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
102*399145f9SAnshuman Khandual static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
103*399145f9SAnshuman Khandual static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
104*399145f9SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
105*399145f9SAnshuman Khandual 
106*399145f9SAnshuman Khandual static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
107*399145f9SAnshuman Khandual {
108*399145f9SAnshuman Khandual 	p4d_t p4d;
109*399145f9SAnshuman Khandual 
110*399145f9SAnshuman Khandual 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
111*399145f9SAnshuman Khandual 	WARN_ON(!p4d_same(p4d, p4d));
112*399145f9SAnshuman Khandual }
113*399145f9SAnshuman Khandual 
114*399145f9SAnshuman Khandual static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
115*399145f9SAnshuman Khandual {
116*399145f9SAnshuman Khandual 	pgd_t pgd;
117*399145f9SAnshuman Khandual 
118*399145f9SAnshuman Khandual 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
119*399145f9SAnshuman Khandual 	WARN_ON(!pgd_same(pgd, pgd));
120*399145f9SAnshuman Khandual }
121*399145f9SAnshuman Khandual 
122*399145f9SAnshuman Khandual #ifndef __PAGETABLE_PUD_FOLDED
123*399145f9SAnshuman Khandual static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
124*399145f9SAnshuman Khandual {
125*399145f9SAnshuman Khandual 	pud_t pud = READ_ONCE(*pudp);
126*399145f9SAnshuman Khandual 
127*399145f9SAnshuman Khandual 	if (mm_pmd_folded(mm))
128*399145f9SAnshuman Khandual 		return;
129*399145f9SAnshuman Khandual 
130*399145f9SAnshuman Khandual 	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
131*399145f9SAnshuman Khandual 	WRITE_ONCE(*pudp, pud);
132*399145f9SAnshuman Khandual 	pud_clear(pudp);
133*399145f9SAnshuman Khandual 	pud = READ_ONCE(*pudp);
134*399145f9SAnshuman Khandual 	WARN_ON(!pud_none(pud));
135*399145f9SAnshuman Khandual }
136*399145f9SAnshuman Khandual 
137*399145f9SAnshuman Khandual static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
138*399145f9SAnshuman Khandual 				      pmd_t *pmdp)
139*399145f9SAnshuman Khandual {
140*399145f9SAnshuman Khandual 	pud_t pud;
141*399145f9SAnshuman Khandual 
142*399145f9SAnshuman Khandual 	if (mm_pmd_folded(mm))
143*399145f9SAnshuman Khandual 		return;
144*399145f9SAnshuman Khandual 	/*
145*399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
146*399145f9SAnshuman Khandual 	 * Hence this must not qualify as pud_bad().
147*399145f9SAnshuman Khandual 	 */
148*399145f9SAnshuman Khandual 	pmd_clear(pmdp);
149*399145f9SAnshuman Khandual 	pud_clear(pudp);
150*399145f9SAnshuman Khandual 	pud_populate(mm, pudp, pmdp);
151*399145f9SAnshuman Khandual 	pud = READ_ONCE(*pudp);
152*399145f9SAnshuman Khandual 	WARN_ON(pud_bad(pud));
153*399145f9SAnshuman Khandual }
154*399145f9SAnshuman Khandual #else  /* !__PAGETABLE_PUD_FOLDED */
155*399145f9SAnshuman Khandual static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
156*399145f9SAnshuman Khandual static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
157*399145f9SAnshuman Khandual 				      pmd_t *pmdp)
158*399145f9SAnshuman Khandual {
159*399145f9SAnshuman Khandual }
160*399145f9SAnshuman Khandual #endif /* PAGETABLE_PUD_FOLDED */
161*399145f9SAnshuman Khandual 
162*399145f9SAnshuman Khandual #ifndef __PAGETABLE_P4D_FOLDED
163*399145f9SAnshuman Khandual static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
164*399145f9SAnshuman Khandual {
165*399145f9SAnshuman Khandual 	p4d_t p4d = READ_ONCE(*p4dp);
166*399145f9SAnshuman Khandual 
167*399145f9SAnshuman Khandual 	if (mm_pud_folded(mm))
168*399145f9SAnshuman Khandual 		return;
169*399145f9SAnshuman Khandual 
170*399145f9SAnshuman Khandual 	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
171*399145f9SAnshuman Khandual 	WRITE_ONCE(*p4dp, p4d);
172*399145f9SAnshuman Khandual 	p4d_clear(p4dp);
173*399145f9SAnshuman Khandual 	p4d = READ_ONCE(*p4dp);
174*399145f9SAnshuman Khandual 	WARN_ON(!p4d_none(p4d));
175*399145f9SAnshuman Khandual }
176*399145f9SAnshuman Khandual 
177*399145f9SAnshuman Khandual static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
178*399145f9SAnshuman Khandual 				      pud_t *pudp)
179*399145f9SAnshuman Khandual {
180*399145f9SAnshuman Khandual 	p4d_t p4d;
181*399145f9SAnshuman Khandual 
182*399145f9SAnshuman Khandual 	if (mm_pud_folded(mm))
183*399145f9SAnshuman Khandual 		return;
184*399145f9SAnshuman Khandual 
185*399145f9SAnshuman Khandual 	/*
186*399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
187*399145f9SAnshuman Khandual 	 * Hence this must not qualify as p4d_bad().
188*399145f9SAnshuman Khandual 	 */
189*399145f9SAnshuman Khandual 	pud_clear(pudp);
190*399145f9SAnshuman Khandual 	p4d_clear(p4dp);
191*399145f9SAnshuman Khandual 	p4d_populate(mm, p4dp, pudp);
192*399145f9SAnshuman Khandual 	p4d = READ_ONCE(*p4dp);
193*399145f9SAnshuman Khandual 	WARN_ON(p4d_bad(p4d));
194*399145f9SAnshuman Khandual }
195*399145f9SAnshuman Khandual 
196*399145f9SAnshuman Khandual static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
197*399145f9SAnshuman Khandual {
198*399145f9SAnshuman Khandual 	pgd_t pgd = READ_ONCE(*pgdp);
199*399145f9SAnshuman Khandual 
200*399145f9SAnshuman Khandual 	if (mm_p4d_folded(mm))
201*399145f9SAnshuman Khandual 		return;
202*399145f9SAnshuman Khandual 
203*399145f9SAnshuman Khandual 	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
204*399145f9SAnshuman Khandual 	WRITE_ONCE(*pgdp, pgd);
205*399145f9SAnshuman Khandual 	pgd_clear(pgdp);
206*399145f9SAnshuman Khandual 	pgd = READ_ONCE(*pgdp);
207*399145f9SAnshuman Khandual 	WARN_ON(!pgd_none(pgd));
208*399145f9SAnshuman Khandual }
209*399145f9SAnshuman Khandual 
210*399145f9SAnshuman Khandual static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
211*399145f9SAnshuman Khandual 				      p4d_t *p4dp)
212*399145f9SAnshuman Khandual {
213*399145f9SAnshuman Khandual 	pgd_t pgd;
214*399145f9SAnshuman Khandual 
215*399145f9SAnshuman Khandual 	if (mm_p4d_folded(mm))
216*399145f9SAnshuman Khandual 		return;
217*399145f9SAnshuman Khandual 
218*399145f9SAnshuman Khandual 	/*
219*399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
220*399145f9SAnshuman Khandual 	 * Hence this must not qualify as pgd_bad().
221*399145f9SAnshuman Khandual 	 */
222*399145f9SAnshuman Khandual 	p4d_clear(p4dp);
223*399145f9SAnshuman Khandual 	pgd_clear(pgdp);
224*399145f9SAnshuman Khandual 	pgd_populate(mm, pgdp, p4dp);
225*399145f9SAnshuman Khandual 	pgd = READ_ONCE(*pgdp);
226*399145f9SAnshuman Khandual 	WARN_ON(pgd_bad(pgd));
227*399145f9SAnshuman Khandual }
228*399145f9SAnshuman Khandual #else  /* !__PAGETABLE_P4D_FOLDED */
229*399145f9SAnshuman Khandual static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
230*399145f9SAnshuman Khandual static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
231*399145f9SAnshuman Khandual static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
232*399145f9SAnshuman Khandual 				      pud_t *pudp)
233*399145f9SAnshuman Khandual {
234*399145f9SAnshuman Khandual }
235*399145f9SAnshuman Khandual static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
236*399145f9SAnshuman Khandual 				      p4d_t *p4dp)
237*399145f9SAnshuman Khandual {
238*399145f9SAnshuman Khandual }
239*399145f9SAnshuman Khandual #endif /* PAGETABLE_P4D_FOLDED */
240*399145f9SAnshuman Khandual 
241*399145f9SAnshuman Khandual static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
242*399145f9SAnshuman Khandual 				   unsigned long vaddr)
243*399145f9SAnshuman Khandual {
244*399145f9SAnshuman Khandual 	pte_t pte = READ_ONCE(*ptep);
245*399145f9SAnshuman Khandual 
246*399145f9SAnshuman Khandual 	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
247*399145f9SAnshuman Khandual 	set_pte_at(mm, vaddr, ptep, pte);
248*399145f9SAnshuman Khandual 	barrier();
249*399145f9SAnshuman Khandual 	pte_clear(mm, vaddr, ptep);
250*399145f9SAnshuman Khandual 	pte = READ_ONCE(*ptep);
251*399145f9SAnshuman Khandual 	WARN_ON(!pte_none(pte));
252*399145f9SAnshuman Khandual }
253*399145f9SAnshuman Khandual 
254*399145f9SAnshuman Khandual static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
255*399145f9SAnshuman Khandual {
256*399145f9SAnshuman Khandual 	pmd_t pmd = READ_ONCE(*pmdp);
257*399145f9SAnshuman Khandual 
258*399145f9SAnshuman Khandual 	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
259*399145f9SAnshuman Khandual 	WRITE_ONCE(*pmdp, pmd);
260*399145f9SAnshuman Khandual 	pmd_clear(pmdp);
261*399145f9SAnshuman Khandual 	pmd = READ_ONCE(*pmdp);
262*399145f9SAnshuman Khandual 	WARN_ON(!pmd_none(pmd));
263*399145f9SAnshuman Khandual }
264*399145f9SAnshuman Khandual 
265*399145f9SAnshuman Khandual static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
266*399145f9SAnshuman Khandual 				      pgtable_t pgtable)
267*399145f9SAnshuman Khandual {
268*399145f9SAnshuman Khandual 	pmd_t pmd;
269*399145f9SAnshuman Khandual 
270*399145f9SAnshuman Khandual 	/*
271*399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
272*399145f9SAnshuman Khandual 	 * Hence this must not qualify as pmd_bad().
273*399145f9SAnshuman Khandual 	 */
274*399145f9SAnshuman Khandual 	pmd_clear(pmdp);
275*399145f9SAnshuman Khandual 	pmd_populate(mm, pmdp, pgtable);
276*399145f9SAnshuman Khandual 	pmd = READ_ONCE(*pmdp);
277*399145f9SAnshuman Khandual 	WARN_ON(pmd_bad(pmd));
278*399145f9SAnshuman Khandual }
279*399145f9SAnshuman Khandual 
280*399145f9SAnshuman Khandual static unsigned long __init get_random_vaddr(void)
281*399145f9SAnshuman Khandual {
282*399145f9SAnshuman Khandual 	unsigned long random_vaddr, random_pages, total_user_pages;
283*399145f9SAnshuman Khandual 
284*399145f9SAnshuman Khandual 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
285*399145f9SAnshuman Khandual 
286*399145f9SAnshuman Khandual 	random_pages = get_random_long() % total_user_pages;
287*399145f9SAnshuman Khandual 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
288*399145f9SAnshuman Khandual 
289*399145f9SAnshuman Khandual 	return random_vaddr;
290*399145f9SAnshuman Khandual }
291*399145f9SAnshuman Khandual 
292*399145f9SAnshuman Khandual static int __init debug_vm_pgtable(void)
293*399145f9SAnshuman Khandual {
294*399145f9SAnshuman Khandual 	struct mm_struct *mm;
295*399145f9SAnshuman Khandual 	pgd_t *pgdp;
296*399145f9SAnshuman Khandual 	p4d_t *p4dp, *saved_p4dp;
297*399145f9SAnshuman Khandual 	pud_t *pudp, *saved_pudp;
298*399145f9SAnshuman Khandual 	pmd_t *pmdp, *saved_pmdp, pmd;
299*399145f9SAnshuman Khandual 	pte_t *ptep;
300*399145f9SAnshuman Khandual 	pgtable_t saved_ptep;
301*399145f9SAnshuman Khandual 	pgprot_t prot;
302*399145f9SAnshuman Khandual 	phys_addr_t paddr;
303*399145f9SAnshuman Khandual 	unsigned long vaddr, pte_aligned, pmd_aligned;
304*399145f9SAnshuman Khandual 	unsigned long pud_aligned, p4d_aligned, pgd_aligned;
305*399145f9SAnshuman Khandual 	spinlock_t *uninitialized_var(ptl);
306*399145f9SAnshuman Khandual 
307*399145f9SAnshuman Khandual 	pr_info("Validating architecture page table helpers\n");
308*399145f9SAnshuman Khandual 	prot = vm_get_page_prot(VMFLAGS);
309*399145f9SAnshuman Khandual 	vaddr = get_random_vaddr();
310*399145f9SAnshuman Khandual 	mm = mm_alloc();
311*399145f9SAnshuman Khandual 	if (!mm) {
312*399145f9SAnshuman Khandual 		pr_err("mm_struct allocation failed\n");
313*399145f9SAnshuman Khandual 		return 1;
314*399145f9SAnshuman Khandual 	}
315*399145f9SAnshuman Khandual 
316*399145f9SAnshuman Khandual 	/*
317*399145f9SAnshuman Khandual 	 * PFN for mapping at PTE level is determined from a standard kernel
318*399145f9SAnshuman Khandual 	 * text symbol. But pfns for higher page table levels are derived by
319*399145f9SAnshuman Khandual 	 * masking lower bits of this real pfn. These derived pfns might not
320*399145f9SAnshuman Khandual 	 * exist on the platform but that does not really matter as pfn_pxx()
321*399145f9SAnshuman Khandual 	 * helpers will still create appropriate entries for the test. This
322*399145f9SAnshuman Khandual 	 * helps avoid large memory block allocations to be used for mapping
323*399145f9SAnshuman Khandual 	 * at higher page table levels.
324*399145f9SAnshuman Khandual 	 */
325*399145f9SAnshuman Khandual 	paddr = __pa_symbol(&start_kernel);
326*399145f9SAnshuman Khandual 
327*399145f9SAnshuman Khandual 	pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
328*399145f9SAnshuman Khandual 	pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
329*399145f9SAnshuman Khandual 	pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
330*399145f9SAnshuman Khandual 	p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
331*399145f9SAnshuman Khandual 	pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
332*399145f9SAnshuman Khandual 	WARN_ON(!pfn_valid(pte_aligned));
333*399145f9SAnshuman Khandual 
334*399145f9SAnshuman Khandual 	pgdp = pgd_offset(mm, vaddr);
335*399145f9SAnshuman Khandual 	p4dp = p4d_alloc(mm, pgdp, vaddr);
336*399145f9SAnshuman Khandual 	pudp = pud_alloc(mm, p4dp, vaddr);
337*399145f9SAnshuman Khandual 	pmdp = pmd_alloc(mm, pudp, vaddr);
338*399145f9SAnshuman Khandual 	ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl);
339*399145f9SAnshuman Khandual 
340*399145f9SAnshuman Khandual 	/*
341*399145f9SAnshuman Khandual 	 * Save all the page table page addresses as the page table
342*399145f9SAnshuman Khandual 	 * entries will be used for testing with random or garbage
343*399145f9SAnshuman Khandual 	 * values. These saved addresses will be used for freeing
344*399145f9SAnshuman Khandual 	 * page table pages.
345*399145f9SAnshuman Khandual 	 */
346*399145f9SAnshuman Khandual 	pmd = READ_ONCE(*pmdp);
347*399145f9SAnshuman Khandual 	saved_p4dp = p4d_offset(pgdp, 0UL);
348*399145f9SAnshuman Khandual 	saved_pudp = pud_offset(p4dp, 0UL);
349*399145f9SAnshuman Khandual 	saved_pmdp = pmd_offset(pudp, 0UL);
350*399145f9SAnshuman Khandual 	saved_ptep = pmd_pgtable(pmd);
351*399145f9SAnshuman Khandual 
352*399145f9SAnshuman Khandual 	pte_basic_tests(pte_aligned, prot);
353*399145f9SAnshuman Khandual 	pmd_basic_tests(pmd_aligned, prot);
354*399145f9SAnshuman Khandual 	pud_basic_tests(pud_aligned, prot);
355*399145f9SAnshuman Khandual 	p4d_basic_tests(p4d_aligned, prot);
356*399145f9SAnshuman Khandual 	pgd_basic_tests(pgd_aligned, prot);
357*399145f9SAnshuman Khandual 
358*399145f9SAnshuman Khandual 	pte_clear_tests(mm, ptep, vaddr);
359*399145f9SAnshuman Khandual 	pmd_clear_tests(mm, pmdp);
360*399145f9SAnshuman Khandual 	pud_clear_tests(mm, pudp);
361*399145f9SAnshuman Khandual 	p4d_clear_tests(mm, p4dp);
362*399145f9SAnshuman Khandual 	pgd_clear_tests(mm, pgdp);
363*399145f9SAnshuman Khandual 
364*399145f9SAnshuman Khandual 	pte_unmap_unlock(ptep, ptl);
365*399145f9SAnshuman Khandual 
366*399145f9SAnshuman Khandual 	pmd_populate_tests(mm, pmdp, saved_ptep);
367*399145f9SAnshuman Khandual 	pud_populate_tests(mm, pudp, saved_pmdp);
368*399145f9SAnshuman Khandual 	p4d_populate_tests(mm, p4dp, saved_pudp);
369*399145f9SAnshuman Khandual 	pgd_populate_tests(mm, pgdp, saved_p4dp);
370*399145f9SAnshuman Khandual 
371*399145f9SAnshuman Khandual 	p4d_free(mm, saved_p4dp);
372*399145f9SAnshuman Khandual 	pud_free(mm, saved_pudp);
373*399145f9SAnshuman Khandual 	pmd_free(mm, saved_pmdp);
374*399145f9SAnshuman Khandual 	pte_free(mm, saved_ptep);
375*399145f9SAnshuman Khandual 
376*399145f9SAnshuman Khandual 	mm_dec_nr_puds(mm);
377*399145f9SAnshuman Khandual 	mm_dec_nr_pmds(mm);
378*399145f9SAnshuman Khandual 	mm_dec_nr_ptes(mm);
379*399145f9SAnshuman Khandual 	mmdrop(mm);
380*399145f9SAnshuman Khandual 	return 0;
381*399145f9SAnshuman Khandual }
382*399145f9SAnshuman Khandual late_initcall(debug_vm_pgtable);
383