xref: /openbmc/linux/mm/debug_vm_pgtable.c (revision 161e393c0f63592a3b95bdd8b55752653763fc6d)
1399145f9SAnshuman Khandual // SPDX-License-Identifier: GPL-2.0-only
2399145f9SAnshuman Khandual /*
3399145f9SAnshuman Khandual  * This kernel test validates architecture page table helpers and
4399145f9SAnshuman Khandual  * accessors and helps in verifying their continued compliance with
5399145f9SAnshuman Khandual  * expected generic MM semantics.
6399145f9SAnshuman Khandual  *
7399145f9SAnshuman Khandual  * Copyright (C) 2019 ARM Ltd.
8399145f9SAnshuman Khandual  *
9399145f9SAnshuman Khandual  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10399145f9SAnshuman Khandual  */
116315df41SAnshuman Khandual #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12399145f9SAnshuman Khandual 
13399145f9SAnshuman Khandual #include <linux/gfp.h>
14399145f9SAnshuman Khandual #include <linux/highmem.h>
15399145f9SAnshuman Khandual #include <linux/hugetlb.h>
16399145f9SAnshuman Khandual #include <linux/kernel.h>
17399145f9SAnshuman Khandual #include <linux/kconfig.h>
18c4876ff6SFrank van der Linden #include <linux/memblock.h>
19399145f9SAnshuman Khandual #include <linux/mm.h>
20399145f9SAnshuman Khandual #include <linux/mman.h>
21399145f9SAnshuman Khandual #include <linux/mm_types.h>
22399145f9SAnshuman Khandual #include <linux/module.h>
23399145f9SAnshuman Khandual #include <linux/pfn_t.h>
24399145f9SAnshuman Khandual #include <linux/printk.h>
25a5c3b9ffSAnshuman Khandual #include <linux/pgtable.h>
26399145f9SAnshuman Khandual #include <linux/random.h>
27399145f9SAnshuman Khandual #include <linux/spinlock.h>
28399145f9SAnshuman Khandual #include <linux/swap.h>
29399145f9SAnshuman Khandual #include <linux/swapops.h>
30399145f9SAnshuman Khandual #include <linux/start_kernel.h>
31399145f9SAnshuman Khandual #include <linux/sched/mm.h>
3285a14463SAneesh Kumar K.V #include <linux/io.h>
338c5b3a8aSGavin Shan 
348c5b3a8aSGavin Shan #include <asm/cacheflush.h>
35399145f9SAnshuman Khandual #include <asm/pgalloc.h>
36a5c3b9ffSAnshuman Khandual #include <asm/tlbflush.h>
37399145f9SAnshuman Khandual 
38b1d00007SAnshuman Khandual /*
39ee65728eSMike Rapoport  * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
40b1d00007SAnshuman Khandual  * expectations that are being validated here. All future changes in here
41b1d00007SAnshuman Khandual  * or the documentation need to be in sync.
42d7e679b6SKefeng Wang  *
43399145f9SAnshuman Khandual  * On s390 platform, the lower 4 bits are used to identify given page table
44399145f9SAnshuman Khandual  * entry type. But these bits might affect the ability to clear entries with
45399145f9SAnshuman Khandual  * pxx_clear() because of how dynamic page table folding works on s390. So
46399145f9SAnshuman Khandual  * while loading up the entries do not change the lower 4 bits. It does not
47cfc5bbc4SAneesh Kumar K.V  * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
48cfc5bbc4SAneesh Kumar K.V  * used to mark a pte entry.
49399145f9SAnshuman Khandual  */
50cfc5bbc4SAneesh Kumar K.V #define S390_SKIP_MASK		GENMASK(3, 0)
51cfc5bbc4SAneesh Kumar K.V #if __BITS_PER_LONG == 64
52cfc5bbc4SAneesh Kumar K.V #define PPC64_SKIP_MASK		GENMASK(62, 62)
53cfc5bbc4SAneesh Kumar K.V #else
54cfc5bbc4SAneesh Kumar K.V #define PPC64_SKIP_MASK		0x0
55cfc5bbc4SAneesh Kumar K.V #endif
56cfc5bbc4SAneesh Kumar K.V #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
57cfc5bbc4SAneesh Kumar K.V #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
58399145f9SAnshuman Khandual #define RANDOM_NZVALUE	GENMASK(7, 0)
59399145f9SAnshuman Khandual 
603c9b84f0SGavin Shan struct pgtable_debug_args {
613c9b84f0SGavin Shan 	struct mm_struct	*mm;
623c9b84f0SGavin Shan 	struct vm_area_struct	*vma;
633c9b84f0SGavin Shan 
643c9b84f0SGavin Shan 	pgd_t			*pgdp;
653c9b84f0SGavin Shan 	p4d_t			*p4dp;
663c9b84f0SGavin Shan 	pud_t			*pudp;
673c9b84f0SGavin Shan 	pmd_t			*pmdp;
683c9b84f0SGavin Shan 	pte_t			*ptep;
693c9b84f0SGavin Shan 
703c9b84f0SGavin Shan 	p4d_t			*start_p4dp;
713c9b84f0SGavin Shan 	pud_t			*start_pudp;
723c9b84f0SGavin Shan 	pmd_t			*start_pmdp;
733c9b84f0SGavin Shan 	pgtable_t		start_ptep;
743c9b84f0SGavin Shan 
753c9b84f0SGavin Shan 	unsigned long		vaddr;
763c9b84f0SGavin Shan 	pgprot_t		page_prot;
773c9b84f0SGavin Shan 	pgprot_t		page_prot_none;
783c9b84f0SGavin Shan 
793c9b84f0SGavin Shan 	bool			is_contiguous_page;
803c9b84f0SGavin Shan 	unsigned long		pud_pfn;
813c9b84f0SGavin Shan 	unsigned long		pmd_pfn;
823c9b84f0SGavin Shan 	unsigned long		pte_pfn;
833c9b84f0SGavin Shan 
84c4876ff6SFrank van der Linden 	unsigned long		fixed_alignment;
853c9b84f0SGavin Shan 	unsigned long		fixed_pgd_pfn;
863c9b84f0SGavin Shan 	unsigned long		fixed_p4d_pfn;
873c9b84f0SGavin Shan 	unsigned long		fixed_pud_pfn;
883c9b84f0SGavin Shan 	unsigned long		fixed_pmd_pfn;
893c9b84f0SGavin Shan 	unsigned long		fixed_pte_pfn;
903c9b84f0SGavin Shan };
913c9b84f0SGavin Shan 
9236b77d1eSGavin Shan static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
93399145f9SAnshuman Khandual {
9431d17076SAnshuman Khandual 	pgprot_t prot = vm_get_page_prot(idx);
9536b77d1eSGavin Shan 	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
962e326c07SAnshuman Khandual 	unsigned long val = idx, *ptr = &val;
97399145f9SAnshuman Khandual 
982e326c07SAnshuman Khandual 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
99bb5c47ceSAnshuman Khandual 
100bb5c47ceSAnshuman Khandual 	/*
101bb5c47ceSAnshuman Khandual 	 * This test needs to be executed after the given page table entry
10231d17076SAnshuman Khandual 	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
103bb5c47ceSAnshuman Khandual 	 * does not have the dirty bit enabled from the beginning. This is
104bb5c47ceSAnshuman Khandual 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
105bb5c47ceSAnshuman Khandual 	 * dirty bit being set.
106bb5c47ceSAnshuman Khandual 	 */
107bb5c47ceSAnshuman Khandual 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
108bb5c47ceSAnshuman Khandual 
109399145f9SAnshuman Khandual 	WARN_ON(!pte_same(pte, pte));
110399145f9SAnshuman Khandual 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
111399145f9SAnshuman Khandual 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
112*161e393cSRick Edgecombe 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
113399145f9SAnshuman Khandual 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
114399145f9SAnshuman Khandual 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
115*161e393cSRick Edgecombe 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
116bb5c47ceSAnshuman Khandual 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
117bb5c47ceSAnshuman Khandual 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
118399145f9SAnshuman Khandual }
119399145f9SAnshuman Khandual 
12044966c44SGavin Shan static void __init pte_advanced_tests(struct pgtable_debug_args *args)
121a5c3b9ffSAnshuman Khandual {
1228c5b3a8aSGavin Shan 	struct page *page;
123b593b90dSShixin Liu 	pte_t pte;
124a5c3b9ffSAnshuman Khandual 
125c3824e18SAneesh Kumar K.V 	/*
126c3824e18SAneesh Kumar K.V 	 * Architectures optimize set_pte_at by avoiding TLB flush.
127c3824e18SAneesh Kumar K.V 	 * This requires set_pte_at to be not used to update an
128c3824e18SAneesh Kumar K.V 	 * existing pte entry. Clear pte before we do set_pte_at
1298c5b3a8aSGavin Shan 	 *
1308c5b3a8aSGavin Shan 	 * flush_dcache_page() is called after set_pte_at() to clear
1318c5b3a8aSGavin Shan 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
1328c5b3a8aSGavin Shan 	 * when it's released and page allocation check will fail when
1338c5b3a8aSGavin Shan 	 * the page is allocated again. For architectures other than ARM64,
1348c5b3a8aSGavin Shan 	 * the unexpected overhead of cache flushing is acceptable.
135c3824e18SAneesh Kumar K.V 	 */
1368c5b3a8aSGavin Shan 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
1378c5b3a8aSGavin Shan 	if (!page)
13844966c44SGavin Shan 		return;
139c3824e18SAneesh Kumar K.V 
1406315df41SAnshuman Khandual 	pr_debug("Validating PTE advanced\n");
1419f2bad09SHugh Dickins 	if (WARN_ON(!args->ptep))
1429f2bad09SHugh Dickins 		return;
1439f2bad09SHugh Dickins 
14444966c44SGavin Shan 	pte = pfn_pte(args->pte_pfn, args->page_prot);
14544966c44SGavin Shan 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
1468c5b3a8aSGavin Shan 	flush_dcache_page(page);
14744966c44SGavin Shan 	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
14844966c44SGavin Shan 	pte = ptep_get(args->ptep);
149a5c3b9ffSAnshuman Khandual 	WARN_ON(pte_write(pte));
15044966c44SGavin Shan 	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
15144966c44SGavin Shan 	pte = ptep_get(args->ptep);
152a5c3b9ffSAnshuman Khandual 	WARN_ON(!pte_none(pte));
153a5c3b9ffSAnshuman Khandual 
15444966c44SGavin Shan 	pte = pfn_pte(args->pte_pfn, args->page_prot);
155a5c3b9ffSAnshuman Khandual 	pte = pte_wrprotect(pte);
156a5c3b9ffSAnshuman Khandual 	pte = pte_mkclean(pte);
15744966c44SGavin Shan 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
1588c5b3a8aSGavin Shan 	flush_dcache_page(page);
159*161e393cSRick Edgecombe 	pte = pte_mkwrite(pte, args->vma);
160a5c3b9ffSAnshuman Khandual 	pte = pte_mkdirty(pte);
16144966c44SGavin Shan 	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
16244966c44SGavin Shan 	pte = ptep_get(args->ptep);
163a5c3b9ffSAnshuman Khandual 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
16444966c44SGavin Shan 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
16544966c44SGavin Shan 	pte = ptep_get(args->ptep);
166a5c3b9ffSAnshuman Khandual 	WARN_ON(!pte_none(pte));
167a5c3b9ffSAnshuman Khandual 
16844966c44SGavin Shan 	pte = pfn_pte(args->pte_pfn, args->page_prot);
169a5c3b9ffSAnshuman Khandual 	pte = pte_mkyoung(pte);
17044966c44SGavin Shan 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
1718c5b3a8aSGavin Shan 	flush_dcache_page(page);
17244966c44SGavin Shan 	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
17344966c44SGavin Shan 	pte = ptep_get(args->ptep);
174a5c3b9ffSAnshuman Khandual 	WARN_ON(pte_young(pte));
175fb5222aaSPasha Tatashin 
176fb5222aaSPasha Tatashin 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
177a5c3b9ffSAnshuman Khandual }
178a5c3b9ffSAnshuman Khandual 
179399145f9SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
18036b77d1eSGavin Shan static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
181399145f9SAnshuman Khandual {
18231d17076SAnshuman Khandual 	pgprot_t prot = vm_get_page_prot(idx);
1832e326c07SAnshuman Khandual 	unsigned long val = idx, *ptr = &val;
18465ac1a60SAnshuman Khandual 	pmd_t pmd;
185399145f9SAnshuman Khandual 
186787d563bSAneesh Kumar K.V 	if (!has_transparent_hugepage())
187787d563bSAneesh Kumar K.V 		return;
188787d563bSAneesh Kumar K.V 
1892e326c07SAnshuman Khandual 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
19036b77d1eSGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
191bb5c47ceSAnshuman Khandual 
192bb5c47ceSAnshuman Khandual 	/*
193bb5c47ceSAnshuman Khandual 	 * This test needs to be executed after the given page table entry
19431d17076SAnshuman Khandual 	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
195bb5c47ceSAnshuman Khandual 	 * does not have the dirty bit enabled from the beginning. This is
196bb5c47ceSAnshuman Khandual 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
197bb5c47ceSAnshuman Khandual 	 * dirty bit being set.
198bb5c47ceSAnshuman Khandual 	 */
199bb5c47ceSAnshuman Khandual 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
200bb5c47ceSAnshuman Khandual 
201bb5c47ceSAnshuman Khandual 
202399145f9SAnshuman Khandual 	WARN_ON(!pmd_same(pmd, pmd));
203399145f9SAnshuman Khandual 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
204399145f9SAnshuman Khandual 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
205*161e393cSRick Edgecombe 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
206399145f9SAnshuman Khandual 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
207399145f9SAnshuman Khandual 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
208*161e393cSRick Edgecombe 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
209bb5c47ceSAnshuman Khandual 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
210bb5c47ceSAnshuman Khandual 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
211399145f9SAnshuman Khandual 	/*
212399145f9SAnshuman Khandual 	 * A huge page does not point to next level page table
213399145f9SAnshuman Khandual 	 * entry. Hence this must qualify as pmd_bad().
214399145f9SAnshuman Khandual 	 */
215399145f9SAnshuman Khandual 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
216399145f9SAnshuman Khandual }
217399145f9SAnshuman Khandual 
218c0fe07b0SGavin Shan static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
219a5c3b9ffSAnshuman Khandual {
2208c5b3a8aSGavin Shan 	struct page *page;
22165ac1a60SAnshuman Khandual 	pmd_t pmd;
222c0fe07b0SGavin Shan 	unsigned long vaddr = args->vaddr;
223a5c3b9ffSAnshuman Khandual 
224a5c3b9ffSAnshuman Khandual 	if (!has_transparent_hugepage())
225a5c3b9ffSAnshuman Khandual 		return;
226a5c3b9ffSAnshuman Khandual 
2278c5b3a8aSGavin Shan 	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
2288c5b3a8aSGavin Shan 	if (!page)
229c0fe07b0SGavin Shan 		return;
230c0fe07b0SGavin Shan 
2318c5b3a8aSGavin Shan 	/*
2328c5b3a8aSGavin Shan 	 * flush_dcache_page() is called after set_pmd_at() to clear
2338c5b3a8aSGavin Shan 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
2348c5b3a8aSGavin Shan 	 * when it's released and page allocation check will fail when
2358c5b3a8aSGavin Shan 	 * the page is allocated again. For architectures other than ARM64,
2368c5b3a8aSGavin Shan 	 * the unexpected overhead of cache flushing is acceptable.
2378c5b3a8aSGavin Shan 	 */
2386315df41SAnshuman Khandual 	pr_debug("Validating PMD advanced\n");
239a5c3b9ffSAnshuman Khandual 	/* Align the address wrt HPAGE_PMD_SIZE */
24004f7ce3fSGerald Schaefer 	vaddr &= HPAGE_PMD_MASK;
241a5c3b9ffSAnshuman Khandual 
242c0fe07b0SGavin Shan 	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
24387f34986SAneesh Kumar K.V 
244c0fe07b0SGavin Shan 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
245c0fe07b0SGavin Shan 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
2468c5b3a8aSGavin Shan 	flush_dcache_page(page);
247c0fe07b0SGavin Shan 	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
248c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
249a5c3b9ffSAnshuman Khandual 	WARN_ON(pmd_write(pmd));
250c0fe07b0SGavin Shan 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
251c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
252a5c3b9ffSAnshuman Khandual 	WARN_ON(!pmd_none(pmd));
253a5c3b9ffSAnshuman Khandual 
254c0fe07b0SGavin Shan 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
255a5c3b9ffSAnshuman Khandual 	pmd = pmd_wrprotect(pmd);
256a5c3b9ffSAnshuman Khandual 	pmd = pmd_mkclean(pmd);
257c0fe07b0SGavin Shan 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
2588c5b3a8aSGavin Shan 	flush_dcache_page(page);
259*161e393cSRick Edgecombe 	pmd = pmd_mkwrite(pmd, args->vma);
260a5c3b9ffSAnshuman Khandual 	pmd = pmd_mkdirty(pmd);
261c0fe07b0SGavin Shan 	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
262c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
263a5c3b9ffSAnshuman Khandual 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
264c0fe07b0SGavin Shan 	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
265c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
266a5c3b9ffSAnshuman Khandual 	WARN_ON(!pmd_none(pmd));
267a5c3b9ffSAnshuman Khandual 
268c0fe07b0SGavin Shan 	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
269a5c3b9ffSAnshuman Khandual 	pmd = pmd_mkyoung(pmd);
270c0fe07b0SGavin Shan 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
2718c5b3a8aSGavin Shan 	flush_dcache_page(page);
272c0fe07b0SGavin Shan 	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
273c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
274a5c3b9ffSAnshuman Khandual 	WARN_ON(pmd_young(pmd));
27587f34986SAneesh Kumar K.V 
27613af0506SAneesh Kumar K.V 	/*  Clear the pte entries  */
277c0fe07b0SGavin Shan 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
278c0fe07b0SGavin Shan 	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
279a5c3b9ffSAnshuman Khandual }
280a5c3b9ffSAnshuman Khandual 
2818983d231SGavin Shan static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
282a5c3b9ffSAnshuman Khandual {
28365ac1a60SAnshuman Khandual 	pmd_t pmd;
28465ac1a60SAnshuman Khandual 
28565ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
28665ac1a60SAnshuman Khandual 		return;
287a5c3b9ffSAnshuman Khandual 
2886315df41SAnshuman Khandual 	pr_debug("Validating PMD leaf\n");
2898983d231SGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
29065ac1a60SAnshuman Khandual 
291a5c3b9ffSAnshuman Khandual 	/*
292a5c3b9ffSAnshuman Khandual 	 * PMD based THP is a leaf entry.
293a5c3b9ffSAnshuman Khandual 	 */
294a5c3b9ffSAnshuman Khandual 	pmd = pmd_mkhuge(pmd);
295a5c3b9ffSAnshuman Khandual 	WARN_ON(!pmd_leaf(pmd));
296a5c3b9ffSAnshuman Khandual }
297a5c3b9ffSAnshuman Khandual 
298399145f9SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
29936b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
300399145f9SAnshuman Khandual {
30131d17076SAnshuman Khandual 	pgprot_t prot = vm_get_page_prot(idx);
3022e326c07SAnshuman Khandual 	unsigned long val = idx, *ptr = &val;
30365ac1a60SAnshuman Khandual 	pud_t pud;
304399145f9SAnshuman Khandual 
305787d563bSAneesh Kumar K.V 	if (!has_transparent_hugepage())
306787d563bSAneesh Kumar K.V 		return;
307787d563bSAneesh Kumar K.V 
3082e326c07SAnshuman Khandual 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
30936b77d1eSGavin Shan 	pud = pfn_pud(args->fixed_pud_pfn, prot);
310bb5c47ceSAnshuman Khandual 
311bb5c47ceSAnshuman Khandual 	/*
312bb5c47ceSAnshuman Khandual 	 * This test needs to be executed after the given page table entry
31331d17076SAnshuman Khandual 	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
314bb5c47ceSAnshuman Khandual 	 * does not have the dirty bit enabled from the beginning. This is
315bb5c47ceSAnshuman Khandual 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
316bb5c47ceSAnshuman Khandual 	 * dirty bit being set.
317bb5c47ceSAnshuman Khandual 	 */
318bb5c47ceSAnshuman Khandual 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
319bb5c47ceSAnshuman Khandual 
320399145f9SAnshuman Khandual 	WARN_ON(!pud_same(pud, pud));
321399145f9SAnshuman Khandual 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
322bb5c47ceSAnshuman Khandual 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
323bb5c47ceSAnshuman Khandual 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
324399145f9SAnshuman Khandual 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
325399145f9SAnshuman Khandual 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
326399145f9SAnshuman Khandual 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
327bb5c47ceSAnshuman Khandual 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
328bb5c47ceSAnshuman Khandual 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
329399145f9SAnshuman Khandual 
33036b77d1eSGavin Shan 	if (mm_pmd_folded(args->mm))
331399145f9SAnshuman Khandual 		return;
332399145f9SAnshuman Khandual 
333399145f9SAnshuman Khandual 	/*
334399145f9SAnshuman Khandual 	 * A huge page does not point to next level page table
335399145f9SAnshuman Khandual 	 * entry. Hence this must qualify as pud_bad().
336399145f9SAnshuman Khandual 	 */
337399145f9SAnshuman Khandual 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
338399145f9SAnshuman Khandual }
339a5c3b9ffSAnshuman Khandual 
3404cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args)
341a5c3b9ffSAnshuman Khandual {
3428c5b3a8aSGavin Shan 	struct page *page;
3434cbde03bSGavin Shan 	unsigned long vaddr = args->vaddr;
34465ac1a60SAnshuman Khandual 	pud_t pud;
345a5c3b9ffSAnshuman Khandual 
346a5c3b9ffSAnshuman Khandual 	if (!has_transparent_hugepage())
347a5c3b9ffSAnshuman Khandual 		return;
348a5c3b9ffSAnshuman Khandual 
3498c5b3a8aSGavin Shan 	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
3508c5b3a8aSGavin Shan 	if (!page)
3514cbde03bSGavin Shan 		return;
3524cbde03bSGavin Shan 
3538c5b3a8aSGavin Shan 	/*
3548c5b3a8aSGavin Shan 	 * flush_dcache_page() is called after set_pud_at() to clear
3558c5b3a8aSGavin Shan 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
3568c5b3a8aSGavin Shan 	 * when it's released and page allocation check will fail when
3578c5b3a8aSGavin Shan 	 * the page is allocated again. For architectures other than ARM64,
3588c5b3a8aSGavin Shan 	 * the unexpected overhead of cache flushing is acceptable.
3598c5b3a8aSGavin Shan 	 */
3606315df41SAnshuman Khandual 	pr_debug("Validating PUD advanced\n");
361a5c3b9ffSAnshuman Khandual 	/* Align the address wrt HPAGE_PUD_SIZE */
36204f7ce3fSGerald Schaefer 	vaddr &= HPAGE_PUD_MASK;
363a5c3b9ffSAnshuman Khandual 
3644cbde03bSGavin Shan 	pud = pfn_pud(args->pud_pfn, args->page_prot);
3654cbde03bSGavin Shan 	set_pud_at(args->mm, vaddr, args->pudp, pud);
3668c5b3a8aSGavin Shan 	flush_dcache_page(page);
3674cbde03bSGavin Shan 	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
3684cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
369a5c3b9ffSAnshuman Khandual 	WARN_ON(pud_write(pud));
370a5c3b9ffSAnshuman Khandual 
371a5c3b9ffSAnshuman Khandual #ifndef __PAGETABLE_PMD_FOLDED
3724cbde03bSGavin Shan 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
3734cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
374a5c3b9ffSAnshuman Khandual 	WARN_ON(!pud_none(pud));
375a5c3b9ffSAnshuman Khandual #endif /* __PAGETABLE_PMD_FOLDED */
3764cbde03bSGavin Shan 	pud = pfn_pud(args->pud_pfn, args->page_prot);
377a5c3b9ffSAnshuman Khandual 	pud = pud_wrprotect(pud);
378a5c3b9ffSAnshuman Khandual 	pud = pud_mkclean(pud);
3794cbde03bSGavin Shan 	set_pud_at(args->mm, vaddr, args->pudp, pud);
3808c5b3a8aSGavin Shan 	flush_dcache_page(page);
381a5c3b9ffSAnshuman Khandual 	pud = pud_mkwrite(pud);
382a5c3b9ffSAnshuman Khandual 	pud = pud_mkdirty(pud);
3834cbde03bSGavin Shan 	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
3844cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
385a5c3b9ffSAnshuman Khandual 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
386a5c3b9ffSAnshuman Khandual 
387c3824e18SAneesh Kumar K.V #ifndef __PAGETABLE_PMD_FOLDED
3884cbde03bSGavin Shan 	pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1);
3894cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
390c3824e18SAneesh Kumar K.V 	WARN_ON(!pud_none(pud));
391c3824e18SAneesh Kumar K.V #endif /* __PAGETABLE_PMD_FOLDED */
392c3824e18SAneesh Kumar K.V 
3934cbde03bSGavin Shan 	pud = pfn_pud(args->pud_pfn, args->page_prot);
394a5c3b9ffSAnshuman Khandual 	pud = pud_mkyoung(pud);
3954cbde03bSGavin Shan 	set_pud_at(args->mm, vaddr, args->pudp, pud);
3968c5b3a8aSGavin Shan 	flush_dcache_page(page);
3974cbde03bSGavin Shan 	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
3984cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
399a5c3b9ffSAnshuman Khandual 	WARN_ON(pud_young(pud));
40013af0506SAneesh Kumar K.V 
4014cbde03bSGavin Shan 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
402a5c3b9ffSAnshuman Khandual }
403a5c3b9ffSAnshuman Khandual 
4048983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args)
405a5c3b9ffSAnshuman Khandual {
40665ac1a60SAnshuman Khandual 	pud_t pud;
40765ac1a60SAnshuman Khandual 
40865ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
40965ac1a60SAnshuman Khandual 		return;
410a5c3b9ffSAnshuman Khandual 
4116315df41SAnshuman Khandual 	pr_debug("Validating PUD leaf\n");
4128983d231SGavin Shan 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
413a5c3b9ffSAnshuman Khandual 	/*
414a5c3b9ffSAnshuman Khandual 	 * PUD based THP is a leaf entry.
415a5c3b9ffSAnshuman Khandual 	 */
416a5c3b9ffSAnshuman Khandual 	pud = pud_mkhuge(pud);
417a5c3b9ffSAnshuman Khandual 	WARN_ON(!pud_leaf(pud));
418a5c3b9ffSAnshuman Khandual }
4195fe77be6SShixin Liu #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
42036b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
4214cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
4228983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
4235fe77be6SShixin Liu #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
4245fe77be6SShixin Liu #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
42536b77d1eSGavin Shan static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
42636b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
427c0fe07b0SGavin Shan static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
4284cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
4298983d231SGavin Shan static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
4308983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
4315fe77be6SShixin Liu #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
432a5c3b9ffSAnshuman Khandual 
43385a14463SAneesh Kumar K.V #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
434c0fe07b0SGavin Shan static void __init pmd_huge_tests(struct pgtable_debug_args *args)
4355fe77be6SShixin Liu {
4365fe77be6SShixin Liu 	pmd_t pmd;
4375fe77be6SShixin Liu 
438c4876ff6SFrank van der Linden 	if (!arch_vmap_pmd_supported(args->page_prot) ||
439c4876ff6SFrank van der Linden 	    args->fixed_alignment < PMD_SIZE)
4405fe77be6SShixin Liu 		return;
4415fe77be6SShixin Liu 
4425fe77be6SShixin Liu 	pr_debug("Validating PMD huge\n");
4435fe77be6SShixin Liu 	/*
4445fe77be6SShixin Liu 	 * X86 defined pmd_set_huge() verifies that the given
4455fe77be6SShixin Liu 	 * PMD is not a populated non-leaf entry.
4465fe77be6SShixin Liu 	 */
447c0fe07b0SGavin Shan 	WRITE_ONCE(*args->pmdp, __pmd(0));
448c0fe07b0SGavin Shan 	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
449c0fe07b0SGavin Shan 	WARN_ON(!pmd_clear_huge(args->pmdp));
450c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
4515fe77be6SShixin Liu 	WARN_ON(!pmd_none(pmd));
4525fe77be6SShixin Liu }
4535fe77be6SShixin Liu 
4544cbde03bSGavin Shan static void __init pud_huge_tests(struct pgtable_debug_args *args)
455a5c3b9ffSAnshuman Khandual {
456a5c3b9ffSAnshuman Khandual 	pud_t pud;
457a5c3b9ffSAnshuman Khandual 
458c4876ff6SFrank van der Linden 	if (!arch_vmap_pud_supported(args->page_prot) ||
459c4876ff6SFrank van der Linden 	    args->fixed_alignment < PUD_SIZE)
460a5c3b9ffSAnshuman Khandual 		return;
4616315df41SAnshuman Khandual 
4626315df41SAnshuman Khandual 	pr_debug("Validating PUD huge\n");
463a5c3b9ffSAnshuman Khandual 	/*
464a5c3b9ffSAnshuman Khandual 	 * X86 defined pud_set_huge() verifies that the given
465a5c3b9ffSAnshuman Khandual 	 * PUD is not a populated non-leaf entry.
466a5c3b9ffSAnshuman Khandual 	 */
4674cbde03bSGavin Shan 	WRITE_ONCE(*args->pudp, __pud(0));
4684cbde03bSGavin Shan 	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
4694cbde03bSGavin Shan 	WARN_ON(!pud_clear_huge(args->pudp));
4704cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
471a5c3b9ffSAnshuman Khandual 	WARN_ON(!pud_none(pud));
472a5c3b9ffSAnshuman Khandual }
47385a14463SAneesh Kumar K.V #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
474c0fe07b0SGavin Shan static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
4754cbde03bSGavin Shan static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
4765fe77be6SShixin Liu #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
477399145f9SAnshuman Khandual 
47836b77d1eSGavin Shan static void __init p4d_basic_tests(struct pgtable_debug_args *args)
479399145f9SAnshuman Khandual {
480399145f9SAnshuman Khandual 	p4d_t p4d;
481399145f9SAnshuman Khandual 
4826315df41SAnshuman Khandual 	pr_debug("Validating P4D basic\n");
483399145f9SAnshuman Khandual 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
484399145f9SAnshuman Khandual 	WARN_ON(!p4d_same(p4d, p4d));
485399145f9SAnshuman Khandual }
486399145f9SAnshuman Khandual 
48736b77d1eSGavin Shan static void __init pgd_basic_tests(struct pgtable_debug_args *args)
488399145f9SAnshuman Khandual {
489399145f9SAnshuman Khandual 	pgd_t pgd;
490399145f9SAnshuman Khandual 
4916315df41SAnshuman Khandual 	pr_debug("Validating PGD basic\n");
492399145f9SAnshuman Khandual 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
493399145f9SAnshuman Khandual 	WARN_ON(!pgd_same(pgd, pgd));
494399145f9SAnshuman Khandual }
495399145f9SAnshuman Khandual 
496399145f9SAnshuman Khandual #ifndef __PAGETABLE_PUD_FOLDED
4974cbde03bSGavin Shan static void __init pud_clear_tests(struct pgtable_debug_args *args)
498399145f9SAnshuman Khandual {
4994cbde03bSGavin Shan 	pud_t pud = READ_ONCE(*args->pudp);
500399145f9SAnshuman Khandual 
5014cbde03bSGavin Shan 	if (mm_pmd_folded(args->mm))
502399145f9SAnshuman Khandual 		return;
503399145f9SAnshuman Khandual 
5046315df41SAnshuman Khandual 	pr_debug("Validating PUD clear\n");
505399145f9SAnshuman Khandual 	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
5064cbde03bSGavin Shan 	WRITE_ONCE(*args->pudp, pud);
5074cbde03bSGavin Shan 	pud_clear(args->pudp);
5084cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
509399145f9SAnshuman Khandual 	WARN_ON(!pud_none(pud));
510399145f9SAnshuman Khandual }
511399145f9SAnshuman Khandual 
5124cbde03bSGavin Shan static void __init pud_populate_tests(struct pgtable_debug_args *args)
513399145f9SAnshuman Khandual {
514399145f9SAnshuman Khandual 	pud_t pud;
515399145f9SAnshuman Khandual 
5164cbde03bSGavin Shan 	if (mm_pmd_folded(args->mm))
517399145f9SAnshuman Khandual 		return;
5186315df41SAnshuman Khandual 
5196315df41SAnshuman Khandual 	pr_debug("Validating PUD populate\n");
520399145f9SAnshuman Khandual 	/*
521399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
522399145f9SAnshuman Khandual 	 * Hence this must not qualify as pud_bad().
523399145f9SAnshuman Khandual 	 */
5244cbde03bSGavin Shan 	pud_populate(args->mm, args->pudp, args->start_pmdp);
5254cbde03bSGavin Shan 	pud = READ_ONCE(*args->pudp);
526399145f9SAnshuman Khandual 	WARN_ON(pud_bad(pud));
527399145f9SAnshuman Khandual }
528399145f9SAnshuman Khandual #else  /* !__PAGETABLE_PUD_FOLDED */
5294cbde03bSGavin Shan static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
5304cbde03bSGavin Shan static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
531399145f9SAnshuman Khandual #endif /* PAGETABLE_PUD_FOLDED */
532399145f9SAnshuman Khandual 
533399145f9SAnshuman Khandual #ifndef __PAGETABLE_P4D_FOLDED
5342f87f8c3SGavin Shan static void __init p4d_clear_tests(struct pgtable_debug_args *args)
535399145f9SAnshuman Khandual {
5362f87f8c3SGavin Shan 	p4d_t p4d = READ_ONCE(*args->p4dp);
537399145f9SAnshuman Khandual 
5382f87f8c3SGavin Shan 	if (mm_pud_folded(args->mm))
539399145f9SAnshuman Khandual 		return;
540399145f9SAnshuman Khandual 
5416315df41SAnshuman Khandual 	pr_debug("Validating P4D clear\n");
542399145f9SAnshuman Khandual 	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
5432f87f8c3SGavin Shan 	WRITE_ONCE(*args->p4dp, p4d);
5442f87f8c3SGavin Shan 	p4d_clear(args->p4dp);
5452f87f8c3SGavin Shan 	p4d = READ_ONCE(*args->p4dp);
546399145f9SAnshuman Khandual 	WARN_ON(!p4d_none(p4d));
547399145f9SAnshuman Khandual }
548399145f9SAnshuman Khandual 
5492f87f8c3SGavin Shan static void __init p4d_populate_tests(struct pgtable_debug_args *args)
550399145f9SAnshuman Khandual {
551399145f9SAnshuman Khandual 	p4d_t p4d;
552399145f9SAnshuman Khandual 
5532f87f8c3SGavin Shan 	if (mm_pud_folded(args->mm))
554399145f9SAnshuman Khandual 		return;
555399145f9SAnshuman Khandual 
5566315df41SAnshuman Khandual 	pr_debug("Validating P4D populate\n");
557399145f9SAnshuman Khandual 	/*
558399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
559399145f9SAnshuman Khandual 	 * Hence this must not qualify as p4d_bad().
560399145f9SAnshuman Khandual 	 */
5612f87f8c3SGavin Shan 	pud_clear(args->pudp);
5622f87f8c3SGavin Shan 	p4d_clear(args->p4dp);
5632f87f8c3SGavin Shan 	p4d_populate(args->mm, args->p4dp, args->start_pudp);
5642f87f8c3SGavin Shan 	p4d = READ_ONCE(*args->p4dp);
565399145f9SAnshuman Khandual 	WARN_ON(p4d_bad(p4d));
566399145f9SAnshuman Khandual }
567399145f9SAnshuman Khandual 
5682f87f8c3SGavin Shan static void __init pgd_clear_tests(struct pgtable_debug_args *args)
569399145f9SAnshuman Khandual {
5702f87f8c3SGavin Shan 	pgd_t pgd = READ_ONCE(*(args->pgdp));
571399145f9SAnshuman Khandual 
5722f87f8c3SGavin Shan 	if (mm_p4d_folded(args->mm))
573399145f9SAnshuman Khandual 		return;
574399145f9SAnshuman Khandual 
5756315df41SAnshuman Khandual 	pr_debug("Validating PGD clear\n");
576399145f9SAnshuman Khandual 	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
5772f87f8c3SGavin Shan 	WRITE_ONCE(*args->pgdp, pgd);
5782f87f8c3SGavin Shan 	pgd_clear(args->pgdp);
5792f87f8c3SGavin Shan 	pgd = READ_ONCE(*args->pgdp);
580399145f9SAnshuman Khandual 	WARN_ON(!pgd_none(pgd));
581399145f9SAnshuman Khandual }
582399145f9SAnshuman Khandual 
5832f87f8c3SGavin Shan static void __init pgd_populate_tests(struct pgtable_debug_args *args)
584399145f9SAnshuman Khandual {
585399145f9SAnshuman Khandual 	pgd_t pgd;
586399145f9SAnshuman Khandual 
5872f87f8c3SGavin Shan 	if (mm_p4d_folded(args->mm))
588399145f9SAnshuman Khandual 		return;
589399145f9SAnshuman Khandual 
5906315df41SAnshuman Khandual 	pr_debug("Validating PGD populate\n");
591399145f9SAnshuman Khandual 	/*
592399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
593399145f9SAnshuman Khandual 	 * Hence this must not qualify as pgd_bad().
594399145f9SAnshuman Khandual 	 */
5952f87f8c3SGavin Shan 	p4d_clear(args->p4dp);
5962f87f8c3SGavin Shan 	pgd_clear(args->pgdp);
5972f87f8c3SGavin Shan 	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
5982f87f8c3SGavin Shan 	pgd = READ_ONCE(*args->pgdp);
599399145f9SAnshuman Khandual 	WARN_ON(pgd_bad(pgd));
600399145f9SAnshuman Khandual }
601399145f9SAnshuman Khandual #else  /* !__PAGETABLE_P4D_FOLDED */
6022f87f8c3SGavin Shan static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
6032f87f8c3SGavin Shan static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
6042f87f8c3SGavin Shan static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
6052f87f8c3SGavin Shan static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
606399145f9SAnshuman Khandual #endif /* PAGETABLE_P4D_FOLDED */
607399145f9SAnshuman Khandual 
60844966c44SGavin Shan static void __init pte_clear_tests(struct pgtable_debug_args *args)
609399145f9SAnshuman Khandual {
6108c5b3a8aSGavin Shan 	struct page *page;
61144966c44SGavin Shan 	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
61244966c44SGavin Shan 
6138c5b3a8aSGavin Shan 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
6148c5b3a8aSGavin Shan 	if (!page)
61544966c44SGavin Shan 		return;
616399145f9SAnshuman Khandual 
6178c5b3a8aSGavin Shan 	/*
6188c5b3a8aSGavin Shan 	 * flush_dcache_page() is called after set_pte_at() to clear
6198c5b3a8aSGavin Shan 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
6208c5b3a8aSGavin Shan 	 * when it's released and page allocation check will fail when
6218c5b3a8aSGavin Shan 	 * the page is allocated again. For architectures other than ARM64,
6228c5b3a8aSGavin Shan 	 * the unexpected overhead of cache flushing is acceptable.
6238c5b3a8aSGavin Shan 	 */
6246315df41SAnshuman Khandual 	pr_debug("Validating PTE clear\n");
6259f2bad09SHugh Dickins 	if (WARN_ON(!args->ptep))
6269f2bad09SHugh Dickins 		return;
6279f2bad09SHugh Dickins 
628401035d5SAneesh Kumar K.V #ifndef CONFIG_RISCV
629399145f9SAnshuman Khandual 	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
630401035d5SAneesh Kumar K.V #endif
63144966c44SGavin Shan 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
6328c5b3a8aSGavin Shan 	flush_dcache_page(page);
633399145f9SAnshuman Khandual 	barrier();
63408d5b29eSPasha Tatashin 	ptep_clear(args->mm, args->vaddr, args->ptep);
63544966c44SGavin Shan 	pte = ptep_get(args->ptep);
636399145f9SAnshuman Khandual 	WARN_ON(!pte_none(pte));
637399145f9SAnshuman Khandual }
638399145f9SAnshuman Khandual 
639c0fe07b0SGavin Shan static void __init pmd_clear_tests(struct pgtable_debug_args *args)
640399145f9SAnshuman Khandual {
641c0fe07b0SGavin Shan 	pmd_t pmd = READ_ONCE(*args->pmdp);
642399145f9SAnshuman Khandual 
6436315df41SAnshuman Khandual 	pr_debug("Validating PMD clear\n");
644399145f9SAnshuman Khandual 	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
645c0fe07b0SGavin Shan 	WRITE_ONCE(*args->pmdp, pmd);
646c0fe07b0SGavin Shan 	pmd_clear(args->pmdp);
647c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
648399145f9SAnshuman Khandual 	WARN_ON(!pmd_none(pmd));
649399145f9SAnshuman Khandual }
650399145f9SAnshuman Khandual 
651c0fe07b0SGavin Shan static void __init pmd_populate_tests(struct pgtable_debug_args *args)
652399145f9SAnshuman Khandual {
653399145f9SAnshuman Khandual 	pmd_t pmd;
654399145f9SAnshuman Khandual 
6556315df41SAnshuman Khandual 	pr_debug("Validating PMD populate\n");
656399145f9SAnshuman Khandual 	/*
657399145f9SAnshuman Khandual 	 * This entry points to next level page table page.
658399145f9SAnshuman Khandual 	 * Hence this must not qualify as pmd_bad().
659399145f9SAnshuman Khandual 	 */
660c0fe07b0SGavin Shan 	pmd_populate(args->mm, args->pmdp, args->start_ptep);
661c0fe07b0SGavin Shan 	pmd = READ_ONCE(*args->pmdp);
662399145f9SAnshuman Khandual 	WARN_ON(pmd_bad(pmd));
663399145f9SAnshuman Khandual }
664399145f9SAnshuman Khandual 
6658cb183f2SGavin Shan static void __init pte_special_tests(struct pgtable_debug_args *args)
66605289402SAnshuman Khandual {
6678cb183f2SGavin Shan 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
66805289402SAnshuman Khandual 
66905289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
67005289402SAnshuman Khandual 		return;
67105289402SAnshuman Khandual 
6726315df41SAnshuman Khandual 	pr_debug("Validating PTE special\n");
67305289402SAnshuman Khandual 	WARN_ON(!pte_special(pte_mkspecial(pte)));
67405289402SAnshuman Khandual }
67505289402SAnshuman Khandual 
6768cb183f2SGavin Shan static void __init pte_protnone_tests(struct pgtable_debug_args *args)
67705289402SAnshuman Khandual {
6788cb183f2SGavin Shan 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
67905289402SAnshuman Khandual 
68005289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
68105289402SAnshuman Khandual 		return;
68205289402SAnshuman Khandual 
6836315df41SAnshuman Khandual 	pr_debug("Validating PTE protnone\n");
68405289402SAnshuman Khandual 	WARN_ON(!pte_protnone(pte));
68505289402SAnshuman Khandual 	WARN_ON(!pte_present(pte));
68605289402SAnshuman Khandual }
68705289402SAnshuman Khandual 
68805289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6898cb183f2SGavin Shan static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
69005289402SAnshuman Khandual {
69165ac1a60SAnshuman Khandual 	pmd_t pmd;
69205289402SAnshuman Khandual 
69305289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
69405289402SAnshuman Khandual 		return;
69505289402SAnshuman Khandual 
69665ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
69765ac1a60SAnshuman Khandual 		return;
69865ac1a60SAnshuman Khandual 
6996315df41SAnshuman Khandual 	pr_debug("Validating PMD protnone\n");
7008cb183f2SGavin Shan 	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
70105289402SAnshuman Khandual 	WARN_ON(!pmd_protnone(pmd));
70205289402SAnshuman Khandual 	WARN_ON(!pmd_present(pmd));
70305289402SAnshuman Khandual }
70405289402SAnshuman Khandual #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
7058cb183f2SGavin Shan static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
70605289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
70705289402SAnshuman Khandual 
70805289402SAnshuman Khandual #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
7098cb183f2SGavin Shan static void __init pte_devmap_tests(struct pgtable_debug_args *args)
71005289402SAnshuman Khandual {
7118cb183f2SGavin Shan 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
71205289402SAnshuman Khandual 
7136315df41SAnshuman Khandual 	pr_debug("Validating PTE devmap\n");
71405289402SAnshuman Khandual 	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
71505289402SAnshuman Khandual }
71605289402SAnshuman Khandual 
71705289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7188cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
71905289402SAnshuman Khandual {
72065ac1a60SAnshuman Khandual 	pmd_t pmd;
72165ac1a60SAnshuman Khandual 
72265ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
72365ac1a60SAnshuman Khandual 		return;
72405289402SAnshuman Khandual 
7256315df41SAnshuman Khandual 	pr_debug("Validating PMD devmap\n");
7268cb183f2SGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
72705289402SAnshuman Khandual 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
72805289402SAnshuman Khandual }
72905289402SAnshuman Khandual 
73005289402SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
7318cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args)
73205289402SAnshuman Khandual {
73365ac1a60SAnshuman Khandual 	pud_t pud;
73465ac1a60SAnshuman Khandual 
73565ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
73665ac1a60SAnshuman Khandual 		return;
73705289402SAnshuman Khandual 
7386315df41SAnshuman Khandual 	pr_debug("Validating PUD devmap\n");
7398cb183f2SGavin Shan 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
74005289402SAnshuman Khandual 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
74105289402SAnshuman Khandual }
74205289402SAnshuman Khandual #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
7438cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
74405289402SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
74505289402SAnshuman Khandual #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
7468cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
7478cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
74805289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
74905289402SAnshuman Khandual #else
7508cb183f2SGavin Shan static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
7518cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
7528cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
75305289402SAnshuman Khandual #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
75405289402SAnshuman Khandual 
7555f447e80SGavin Shan static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
75605289402SAnshuman Khandual {
7575f447e80SGavin Shan 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
75805289402SAnshuman Khandual 
75905289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
76005289402SAnshuman Khandual 		return;
76105289402SAnshuman Khandual 
7626315df41SAnshuman Khandual 	pr_debug("Validating PTE soft dirty\n");
76305289402SAnshuman Khandual 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
76405289402SAnshuman Khandual 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
76505289402SAnshuman Khandual }
76605289402SAnshuman Khandual 
7675f447e80SGavin Shan static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
76805289402SAnshuman Khandual {
7695f447e80SGavin Shan 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
77005289402SAnshuman Khandual 
77105289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
77205289402SAnshuman Khandual 		return;
77305289402SAnshuman Khandual 
7746315df41SAnshuman Khandual 	pr_debug("Validating PTE swap soft dirty\n");
77505289402SAnshuman Khandual 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
77605289402SAnshuman Khandual 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
77705289402SAnshuman Khandual }
77805289402SAnshuman Khandual 
77905289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7805f447e80SGavin Shan static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
78105289402SAnshuman Khandual {
78265ac1a60SAnshuman Khandual 	pmd_t pmd;
78305289402SAnshuman Khandual 
78405289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
78505289402SAnshuman Khandual 		return;
78605289402SAnshuman Khandual 
78765ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
78865ac1a60SAnshuman Khandual 		return;
78965ac1a60SAnshuman Khandual 
7906315df41SAnshuman Khandual 	pr_debug("Validating PMD soft dirty\n");
7915f447e80SGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
79205289402SAnshuman Khandual 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
79305289402SAnshuman Khandual 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
79405289402SAnshuman Khandual }
79505289402SAnshuman Khandual 
7965f447e80SGavin Shan static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
79705289402SAnshuman Khandual {
79865ac1a60SAnshuman Khandual 	pmd_t pmd;
79905289402SAnshuman Khandual 
80005289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
80105289402SAnshuman Khandual 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
80205289402SAnshuman Khandual 		return;
80305289402SAnshuman Khandual 
80465ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
80565ac1a60SAnshuman Khandual 		return;
80665ac1a60SAnshuman Khandual 
8076315df41SAnshuman Khandual 	pr_debug("Validating PMD swap soft dirty\n");
8085f447e80SGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
80905289402SAnshuman Khandual 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
81005289402SAnshuman Khandual 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
81105289402SAnshuman Khandual }
812b593b90dSShixin Liu #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
8135f447e80SGavin Shan static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
8145f447e80SGavin Shan static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
815b593b90dSShixin Liu #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
81605289402SAnshuman Khandual 
817210d1e8aSDavid Hildenbrand static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
818210d1e8aSDavid Hildenbrand {
8192321ba3eSDavid Hildenbrand 	unsigned long max_swap_offset;
8202321ba3eSDavid Hildenbrand 	swp_entry_t entry, entry2;
8212321ba3eSDavid Hildenbrand 	pte_t pte;
822210d1e8aSDavid Hildenbrand 
823210d1e8aSDavid Hildenbrand 	pr_debug("Validating PTE swap exclusive\n");
8242321ba3eSDavid Hildenbrand 
8252321ba3eSDavid Hildenbrand 	/* See generic_max_swapfile_size(): probe the maximum offset */
8262321ba3eSDavid Hildenbrand 	max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
8272321ba3eSDavid Hildenbrand 
8282321ba3eSDavid Hildenbrand 	/* Create a swp entry with all possible bits set */
8292321ba3eSDavid Hildenbrand 	entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
8302321ba3eSDavid Hildenbrand 
8312321ba3eSDavid Hildenbrand 	pte = swp_entry_to_pte(entry);
8322321ba3eSDavid Hildenbrand 	WARN_ON(pte_swp_exclusive(pte));
8332321ba3eSDavid Hildenbrand 	WARN_ON(!is_swap_pte(pte));
8342321ba3eSDavid Hildenbrand 	entry2 = pte_to_swp_entry(pte);
8352321ba3eSDavid Hildenbrand 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
8362321ba3eSDavid Hildenbrand 
837210d1e8aSDavid Hildenbrand 	pte = pte_swp_mkexclusive(pte);
838210d1e8aSDavid Hildenbrand 	WARN_ON(!pte_swp_exclusive(pte));
8392321ba3eSDavid Hildenbrand 	WARN_ON(!is_swap_pte(pte));
8402321ba3eSDavid Hildenbrand 	WARN_ON(pte_swp_soft_dirty(pte));
8412321ba3eSDavid Hildenbrand 	entry2 = pte_to_swp_entry(pte);
8422321ba3eSDavid Hildenbrand 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
8432321ba3eSDavid Hildenbrand 
844210d1e8aSDavid Hildenbrand 	pte = pte_swp_clear_exclusive(pte);
845210d1e8aSDavid Hildenbrand 	WARN_ON(pte_swp_exclusive(pte));
8462321ba3eSDavid Hildenbrand 	WARN_ON(!is_swap_pte(pte));
8472321ba3eSDavid Hildenbrand 	entry2 = pte_to_swp_entry(pte);
8482321ba3eSDavid Hildenbrand 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
849210d1e8aSDavid Hildenbrand }
850210d1e8aSDavid Hildenbrand 
8515f447e80SGavin Shan static void __init pte_swap_tests(struct pgtable_debug_args *args)
85205289402SAnshuman Khandual {
85305289402SAnshuman Khandual 	swp_entry_t swp;
85405289402SAnshuman Khandual 	pte_t pte;
85505289402SAnshuman Khandual 
8566315df41SAnshuman Khandual 	pr_debug("Validating PTE swap\n");
8575f447e80SGavin Shan 	pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
85805289402SAnshuman Khandual 	swp = __pte_to_swp_entry(pte);
85905289402SAnshuman Khandual 	pte = __swp_entry_to_pte(swp);
8605f447e80SGavin Shan 	WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
86105289402SAnshuman Khandual }
86205289402SAnshuman Khandual 
86305289402SAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
8645f447e80SGavin Shan static void __init pmd_swap_tests(struct pgtable_debug_args *args)
86505289402SAnshuman Khandual {
86605289402SAnshuman Khandual 	swp_entry_t swp;
86705289402SAnshuman Khandual 	pmd_t pmd;
86805289402SAnshuman Khandual 
86965ac1a60SAnshuman Khandual 	if (!has_transparent_hugepage())
87065ac1a60SAnshuman Khandual 		return;
87165ac1a60SAnshuman Khandual 
8726315df41SAnshuman Khandual 	pr_debug("Validating PMD swap\n");
8735f447e80SGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
87405289402SAnshuman Khandual 	swp = __pmd_to_swp_entry(pmd);
87505289402SAnshuman Khandual 	pmd = __swp_entry_to_pmd(swp);
8765f447e80SGavin Shan 	WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
87705289402SAnshuman Khandual }
87805289402SAnshuman Khandual #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
8795f447e80SGavin Shan static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
88005289402SAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
88105289402SAnshuman Khandual 
8824878a888SGavin Shan static void __init swap_migration_tests(struct pgtable_debug_args *args)
88305289402SAnshuman Khandual {
88405289402SAnshuman Khandual 	struct page *page;
88505289402SAnshuman Khandual 	swp_entry_t swp;
88605289402SAnshuman Khandual 
88705289402SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_MIGRATION))
88805289402SAnshuman Khandual 		return;
8896315df41SAnshuman Khandual 
89005289402SAnshuman Khandual 	/*
89105289402SAnshuman Khandual 	 * swap_migration_tests() requires a dedicated page as it needs to
89205289402SAnshuman Khandual 	 * be locked before creating a migration entry from it. Locking the
89305289402SAnshuman Khandual 	 * page that actually maps kernel text ('start_kernel') can be real
8944878a888SGavin Shan 	 * problematic. Lets use the allocated page explicitly for this
8954878a888SGavin Shan 	 * purpose.
89605289402SAnshuman Khandual 	 */
8974878a888SGavin Shan 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
8984878a888SGavin Shan 	if (!page)
89905289402SAnshuman Khandual 		return;
9004878a888SGavin Shan 
9014878a888SGavin Shan 	pr_debug("Validating swap migration\n");
90205289402SAnshuman Khandual 
90305289402SAnshuman Khandual 	/*
90423647618SAnshuman Khandual 	 * make_[readable|writable]_migration_entry() expects given page to
90523647618SAnshuman Khandual 	 * be locked, otherwise it stumbles upon a BUG_ON().
90605289402SAnshuman Khandual 	 */
90705289402SAnshuman Khandual 	__SetPageLocked(page);
9084dd845b5SAlistair Popple 	swp = make_writable_migration_entry(page_to_pfn(page));
90905289402SAnshuman Khandual 	WARN_ON(!is_migration_entry(swp));
9104dd845b5SAlistair Popple 	WARN_ON(!is_writable_migration_entry(swp));
91105289402SAnshuman Khandual 
9124dd845b5SAlistair Popple 	swp = make_readable_migration_entry(swp_offset(swp));
91305289402SAnshuman Khandual 	WARN_ON(!is_migration_entry(swp));
9144dd845b5SAlistair Popple 	WARN_ON(is_writable_migration_entry(swp));
91505289402SAnshuman Khandual 
9164dd845b5SAlistair Popple 	swp = make_readable_migration_entry(page_to_pfn(page));
91705289402SAnshuman Khandual 	WARN_ON(!is_migration_entry(swp));
9184dd845b5SAlistair Popple 	WARN_ON(is_writable_migration_entry(swp));
91905289402SAnshuman Khandual 	__ClearPageLocked(page);
92005289402SAnshuman Khandual }
92105289402SAnshuman Khandual 
92205289402SAnshuman Khandual #ifdef CONFIG_HUGETLB_PAGE
92336b77d1eSGavin Shan static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
92405289402SAnshuman Khandual {
92505289402SAnshuman Khandual 	struct page *page;
92605289402SAnshuman Khandual 	pte_t pte;
92705289402SAnshuman Khandual 
9286315df41SAnshuman Khandual 	pr_debug("Validating HugeTLB basic\n");
92905289402SAnshuman Khandual 	/*
93005289402SAnshuman Khandual 	 * Accessing the page associated with the pfn is safe here,
93105289402SAnshuman Khandual 	 * as it was previously derived from a real kernel symbol.
93205289402SAnshuman Khandual 	 */
93336b77d1eSGavin Shan 	page = pfn_to_page(args->fixed_pmd_pfn);
93436b77d1eSGavin Shan 	pte = mk_huge_pte(page, args->page_prot);
93505289402SAnshuman Khandual 
93605289402SAnshuman Khandual 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
93705289402SAnshuman Khandual 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
93805289402SAnshuman Khandual 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
93905289402SAnshuman Khandual 
94005289402SAnshuman Khandual #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
94136b77d1eSGavin Shan 	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
94205289402SAnshuman Khandual 
9439dabf6e1SAnshuman Khandual 	WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
94405289402SAnshuman Khandual #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
94505289402SAnshuman Khandual }
94605289402SAnshuman Khandual #else  /* !CONFIG_HUGETLB_PAGE */
94736b77d1eSGavin Shan static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
94805289402SAnshuman Khandual #endif /* CONFIG_HUGETLB_PAGE */
94905289402SAnshuman Khandual 
95005289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9514878a888SGavin Shan static void __init pmd_thp_tests(struct pgtable_debug_args *args)
95205289402SAnshuman Khandual {
95305289402SAnshuman Khandual 	pmd_t pmd;
95405289402SAnshuman Khandual 
95505289402SAnshuman Khandual 	if (!has_transparent_hugepage())
95605289402SAnshuman Khandual 		return;
95705289402SAnshuman Khandual 
9586315df41SAnshuman Khandual 	pr_debug("Validating PMD based THP\n");
95905289402SAnshuman Khandual 	/*
96005289402SAnshuman Khandual 	 * pmd_trans_huge() and pmd_present() must return positive after
96105289402SAnshuman Khandual 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
96205289402SAnshuman Khandual 	 * optimization for transparent huge page. pmd_trans_huge() must
96305289402SAnshuman Khandual 	 * be true if pmd_page() returns a valid THP to avoid taking the
96405289402SAnshuman Khandual 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
96505289402SAnshuman Khandual 	 * are no THP allocated). Especially when splitting a THP and
96605289402SAnshuman Khandual 	 * removing the present bit from the pmd, pmd_trans_huge() still
96705289402SAnshuman Khandual 	 * needs to return true. pmd_present() should be true whenever
96805289402SAnshuman Khandual 	 * pmd_trans_huge() returns true.
96905289402SAnshuman Khandual 	 */
9704878a888SGavin Shan 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
97105289402SAnshuman Khandual 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
97205289402SAnshuman Khandual 
97305289402SAnshuman Khandual #ifndef __HAVE_ARCH_PMDP_INVALIDATE
97405289402SAnshuman Khandual 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
97505289402SAnshuman Khandual 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
97605289402SAnshuman Khandual #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
97705289402SAnshuman Khandual }
97805289402SAnshuman Khandual 
97905289402SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
9804878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args)
98105289402SAnshuman Khandual {
98205289402SAnshuman Khandual 	pud_t pud;
98305289402SAnshuman Khandual 
98405289402SAnshuman Khandual 	if (!has_transparent_hugepage())
98505289402SAnshuman Khandual 		return;
98605289402SAnshuman Khandual 
9876315df41SAnshuman Khandual 	pr_debug("Validating PUD based THP\n");
9884878a888SGavin Shan 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
98905289402SAnshuman Khandual 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
99005289402SAnshuman Khandual 
99105289402SAnshuman Khandual 	/*
99205289402SAnshuman Khandual 	 * pud_mkinvalid() has been dropped for now. Enable back
99305289402SAnshuman Khandual 	 * these tests when it comes back with a modified pud_present().
99405289402SAnshuman Khandual 	 *
99505289402SAnshuman Khandual 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
99605289402SAnshuman Khandual 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
99705289402SAnshuman Khandual 	 */
99805289402SAnshuman Khandual }
99905289402SAnshuman Khandual #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
10004878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
100105289402SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
100205289402SAnshuman Khandual #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
10034878a888SGavin Shan static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
10044878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
100505289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
100605289402SAnshuman Khandual 
1007399145f9SAnshuman Khandual static unsigned long __init get_random_vaddr(void)
1008399145f9SAnshuman Khandual {
1009399145f9SAnshuman Khandual 	unsigned long random_vaddr, random_pages, total_user_pages;
1010399145f9SAnshuman Khandual 
1011399145f9SAnshuman Khandual 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
1012399145f9SAnshuman Khandual 
1013399145f9SAnshuman Khandual 	random_pages = get_random_long() % total_user_pages;
1014399145f9SAnshuman Khandual 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1015399145f9SAnshuman Khandual 
1016399145f9SAnshuman Khandual 	return random_vaddr;
1017399145f9SAnshuman Khandual }
1018399145f9SAnshuman Khandual 
10193c9b84f0SGavin Shan static void __init destroy_args(struct pgtable_debug_args *args)
10203c9b84f0SGavin Shan {
10213c9b84f0SGavin Shan 	struct page *page = NULL;
10223c9b84f0SGavin Shan 
10233c9b84f0SGavin Shan 	/* Free (huge) page */
10243c9b84f0SGavin Shan 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
10253c9b84f0SGavin Shan 	    IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
10263c9b84f0SGavin Shan 	    has_transparent_hugepage() &&
10273c9b84f0SGavin Shan 	    args->pud_pfn != ULONG_MAX) {
10283c9b84f0SGavin Shan 		if (args->is_contiguous_page) {
10293c9b84f0SGavin Shan 			free_contig_range(args->pud_pfn,
10303c9b84f0SGavin Shan 					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
10313c9b84f0SGavin Shan 		} else {
10323c9b84f0SGavin Shan 			page = pfn_to_page(args->pud_pfn);
10333c9b84f0SGavin Shan 			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
10343c9b84f0SGavin Shan 		}
10353c9b84f0SGavin Shan 
10363c9b84f0SGavin Shan 		args->pud_pfn = ULONG_MAX;
10373c9b84f0SGavin Shan 		args->pmd_pfn = ULONG_MAX;
10383c9b84f0SGavin Shan 		args->pte_pfn = ULONG_MAX;
10393c9b84f0SGavin Shan 	}
10403c9b84f0SGavin Shan 
10413c9b84f0SGavin Shan 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
10423c9b84f0SGavin Shan 	    has_transparent_hugepage() &&
10433c9b84f0SGavin Shan 	    args->pmd_pfn != ULONG_MAX) {
10443c9b84f0SGavin Shan 		if (args->is_contiguous_page) {
10453c9b84f0SGavin Shan 			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
10463c9b84f0SGavin Shan 		} else {
10473c9b84f0SGavin Shan 			page = pfn_to_page(args->pmd_pfn);
10483c9b84f0SGavin Shan 			__free_pages(page, HPAGE_PMD_ORDER);
10493c9b84f0SGavin Shan 		}
10503c9b84f0SGavin Shan 
10513c9b84f0SGavin Shan 		args->pmd_pfn = ULONG_MAX;
10523c9b84f0SGavin Shan 		args->pte_pfn = ULONG_MAX;
10533c9b84f0SGavin Shan 	}
10543c9b84f0SGavin Shan 
10553c9b84f0SGavin Shan 	if (args->pte_pfn != ULONG_MAX) {
10563c9b84f0SGavin Shan 		page = pfn_to_page(args->pte_pfn);
1057dcc1be11SLorenzo Stoakes 		__free_page(page);
10583c9b84f0SGavin Shan 
10593c9b84f0SGavin Shan 		args->pte_pfn = ULONG_MAX;
10603c9b84f0SGavin Shan 	}
10613c9b84f0SGavin Shan 
10623c9b84f0SGavin Shan 	/* Free page table entries */
10633c9b84f0SGavin Shan 	if (args->start_ptep) {
10643c9b84f0SGavin Shan 		pte_free(args->mm, args->start_ptep);
10653c9b84f0SGavin Shan 		mm_dec_nr_ptes(args->mm);
10663c9b84f0SGavin Shan 	}
10673c9b84f0SGavin Shan 
10683c9b84f0SGavin Shan 	if (args->start_pmdp) {
10693c9b84f0SGavin Shan 		pmd_free(args->mm, args->start_pmdp);
10703c9b84f0SGavin Shan 		mm_dec_nr_pmds(args->mm);
10713c9b84f0SGavin Shan 	}
10723c9b84f0SGavin Shan 
10733c9b84f0SGavin Shan 	if (args->start_pudp) {
10743c9b84f0SGavin Shan 		pud_free(args->mm, args->start_pudp);
10753c9b84f0SGavin Shan 		mm_dec_nr_puds(args->mm);
10763c9b84f0SGavin Shan 	}
10773c9b84f0SGavin Shan 
10783c9b84f0SGavin Shan 	if (args->start_p4dp)
10793c9b84f0SGavin Shan 		p4d_free(args->mm, args->start_p4dp);
10803c9b84f0SGavin Shan 
10813c9b84f0SGavin Shan 	/* Free vma and mm struct */
10823c9b84f0SGavin Shan 	if (args->vma)
10833c9b84f0SGavin Shan 		vm_area_free(args->vma);
10843c9b84f0SGavin Shan 
10853c9b84f0SGavin Shan 	if (args->mm)
10863c9b84f0SGavin Shan 		mmdrop(args->mm);
10873c9b84f0SGavin Shan }
10883c9b84f0SGavin Shan 
10893c9b84f0SGavin Shan static struct page * __init
10903c9b84f0SGavin Shan debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
10913c9b84f0SGavin Shan {
10923c9b84f0SGavin Shan 	struct page *page = NULL;
10933c9b84f0SGavin Shan 
10943c9b84f0SGavin Shan #ifdef CONFIG_CONTIG_ALLOC
109523baf831SKirill A. Shutemov 	if (order > MAX_ORDER) {
10963c9b84f0SGavin Shan 		page = alloc_contig_pages((1 << order), GFP_KERNEL,
10973c9b84f0SGavin Shan 					  first_online_node, NULL);
10983c9b84f0SGavin Shan 		if (page) {
10993c9b84f0SGavin Shan 			args->is_contiguous_page = true;
11003c9b84f0SGavin Shan 			return page;
11013c9b84f0SGavin Shan 		}
11023c9b84f0SGavin Shan 	}
11033c9b84f0SGavin Shan #endif
11043c9b84f0SGavin Shan 
110523baf831SKirill A. Shutemov 	if (order <= MAX_ORDER)
11063c9b84f0SGavin Shan 		page = alloc_pages(GFP_KERNEL, order);
11073c9b84f0SGavin Shan 
11083c9b84f0SGavin Shan 	return page;
11093c9b84f0SGavin Shan }
11103c9b84f0SGavin Shan 
1111c4876ff6SFrank van der Linden /*
1112c4876ff6SFrank van der Linden  * Check if a physical memory range described by <pstart, pend> contains
1113c4876ff6SFrank van der Linden  * an area that is of size psize, and aligned to psize.
1114c4876ff6SFrank van der Linden  *
1115c4876ff6SFrank van der Linden  * Don't use address 0, an all-zeroes physical address might mask bugs, and
1116c4876ff6SFrank van der Linden  * it's not used on x86.
1117c4876ff6SFrank van der Linden  */
1118c4876ff6SFrank van der Linden static void  __init phys_align_check(phys_addr_t pstart,
1119c4876ff6SFrank van der Linden 				     phys_addr_t pend, unsigned long psize,
1120c4876ff6SFrank van der Linden 				     phys_addr_t *physp, unsigned long *alignp)
1121c4876ff6SFrank van der Linden {
1122c4876ff6SFrank van der Linden 	phys_addr_t aligned_start, aligned_end;
1123c4876ff6SFrank van der Linden 
1124c4876ff6SFrank van der Linden 	if (pstart == 0)
1125c4876ff6SFrank van der Linden 		pstart = PAGE_SIZE;
1126c4876ff6SFrank van der Linden 
1127c4876ff6SFrank van der Linden 	aligned_start = ALIGN(pstart, psize);
1128c4876ff6SFrank van der Linden 	aligned_end = aligned_start + psize;
1129c4876ff6SFrank van der Linden 
1130c4876ff6SFrank van der Linden 	if (aligned_end > aligned_start && aligned_end <= pend) {
1131c4876ff6SFrank van der Linden 		*alignp = psize;
1132c4876ff6SFrank van der Linden 		*physp = aligned_start;
1133c4876ff6SFrank van der Linden 	}
1134c4876ff6SFrank van der Linden }
1135c4876ff6SFrank van der Linden 
1136c4876ff6SFrank van der Linden static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1137c4876ff6SFrank van der Linden {
1138c4876ff6SFrank van der Linden 	u64 idx;
1139c4876ff6SFrank van der Linden 	phys_addr_t phys, pstart, pend;
1140c4876ff6SFrank van der Linden 
1141c4876ff6SFrank van der Linden 	/*
1142c4876ff6SFrank van der Linden 	 * Initialize the fixed pfns. To do this, try to find a
1143c4876ff6SFrank van der Linden 	 * valid physical range, preferably aligned to PUD_SIZE,
1144c4876ff6SFrank van der Linden 	 * but settling for aligned to PMD_SIZE as a fallback. If
1145c4876ff6SFrank van der Linden 	 * neither of those is found, use the physical address of
1146c4876ff6SFrank van der Linden 	 * the start_kernel symbol.
1147c4876ff6SFrank van der Linden 	 *
1148c4876ff6SFrank van der Linden 	 * The memory doesn't need to be allocated, it just needs to exist
1149c4876ff6SFrank van der Linden 	 * as usable memory. It won't be touched.
1150c4876ff6SFrank van der Linden 	 *
1151c4876ff6SFrank van der Linden 	 * The alignment is recorded, and can be checked to see if we
1152c4876ff6SFrank van der Linden 	 * can run the tests that require an actual valid physical
1153c4876ff6SFrank van der Linden 	 * address range on some architectures ({pmd,pud}_huge_test
1154c4876ff6SFrank van der Linden 	 * on x86).
1155c4876ff6SFrank van der Linden 	 */
1156c4876ff6SFrank van der Linden 
1157c4876ff6SFrank van der Linden 	phys = __pa_symbol(&start_kernel);
1158c4876ff6SFrank van der Linden 	args->fixed_alignment = PAGE_SIZE;
1159c4876ff6SFrank van der Linden 
1160c4876ff6SFrank van der Linden 	for_each_mem_range(idx, &pstart, &pend) {
1161c4876ff6SFrank van der Linden 		/* First check for a PUD-aligned area */
1162c4876ff6SFrank van der Linden 		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1163c4876ff6SFrank van der Linden 				 &args->fixed_alignment);
1164c4876ff6SFrank van der Linden 
1165c4876ff6SFrank van der Linden 		/* If a PUD-aligned area is found, we're done */
1166c4876ff6SFrank van der Linden 		if (args->fixed_alignment == PUD_SIZE)
1167c4876ff6SFrank van der Linden 			break;
1168c4876ff6SFrank van der Linden 
1169c4876ff6SFrank van der Linden 		/*
1170c4876ff6SFrank van der Linden 		 * If no PMD-aligned area found yet, check for one,
1171c4876ff6SFrank van der Linden 		 * but continue the loop to look for a PUD-aligned area.
1172c4876ff6SFrank van der Linden 		 */
1173c4876ff6SFrank van der Linden 		if (args->fixed_alignment < PMD_SIZE)
1174c4876ff6SFrank van der Linden 			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1175c4876ff6SFrank van der Linden 					 &args->fixed_alignment);
1176c4876ff6SFrank van der Linden 	}
1177c4876ff6SFrank van der Linden 
1178c4876ff6SFrank van der Linden 	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1179c4876ff6SFrank van der Linden 	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1180c4876ff6SFrank van der Linden 	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1181c4876ff6SFrank van der Linden 	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1182c4876ff6SFrank van der Linden 	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1183c4876ff6SFrank van der Linden 	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1184c4876ff6SFrank van der Linden }
1185c4876ff6SFrank van der Linden 
1186c4876ff6SFrank van der Linden 
11873c9b84f0SGavin Shan static int __init init_args(struct pgtable_debug_args *args)
11883c9b84f0SGavin Shan {
11893c9b84f0SGavin Shan 	struct page *page = NULL;
11903c9b84f0SGavin Shan 	int ret = 0;
11913c9b84f0SGavin Shan 
11923c9b84f0SGavin Shan 	/*
11933c9b84f0SGavin Shan 	 * Initialize the debugging data.
11943c9b84f0SGavin Shan 	 *
119531d17076SAnshuman Khandual 	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
119631d17076SAnshuman Khandual 	 * will help create page table entries with PROT_NONE permission as
119731d17076SAnshuman Khandual 	 * required for pxx_protnone_tests().
11983c9b84f0SGavin Shan 	 */
11993c9b84f0SGavin Shan 	memset(args, 0, sizeof(*args));
12003c9b84f0SGavin Shan 	args->vaddr              = get_random_vaddr();
1201d7e679b6SKefeng Wang 	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
120231d17076SAnshuman Khandual 	args->page_prot_none     = vm_get_page_prot(VM_NONE);
12033c9b84f0SGavin Shan 	args->is_contiguous_page = false;
12043c9b84f0SGavin Shan 	args->pud_pfn            = ULONG_MAX;
12053c9b84f0SGavin Shan 	args->pmd_pfn            = ULONG_MAX;
12063c9b84f0SGavin Shan 	args->pte_pfn            = ULONG_MAX;
12073c9b84f0SGavin Shan 	args->fixed_pgd_pfn      = ULONG_MAX;
12083c9b84f0SGavin Shan 	args->fixed_p4d_pfn      = ULONG_MAX;
12093c9b84f0SGavin Shan 	args->fixed_pud_pfn      = ULONG_MAX;
12103c9b84f0SGavin Shan 	args->fixed_pmd_pfn      = ULONG_MAX;
12113c9b84f0SGavin Shan 	args->fixed_pte_pfn      = ULONG_MAX;
12123c9b84f0SGavin Shan 
12133c9b84f0SGavin Shan 	/* Allocate mm and vma */
12143c9b84f0SGavin Shan 	args->mm = mm_alloc();
12153c9b84f0SGavin Shan 	if (!args->mm) {
12163c9b84f0SGavin Shan 		pr_err("Failed to allocate mm struct\n");
12173c9b84f0SGavin Shan 		ret = -ENOMEM;
12183c9b84f0SGavin Shan 		goto error;
12193c9b84f0SGavin Shan 	}
12203c9b84f0SGavin Shan 
12213c9b84f0SGavin Shan 	args->vma = vm_area_alloc(args->mm);
12223c9b84f0SGavin Shan 	if (!args->vma) {
12233c9b84f0SGavin Shan 		pr_err("Failed to allocate vma\n");
12243c9b84f0SGavin Shan 		ret = -ENOMEM;
12253c9b84f0SGavin Shan 		goto error;
12263c9b84f0SGavin Shan 	}
12273c9b84f0SGavin Shan 
12283c9b84f0SGavin Shan 	/*
12293c9b84f0SGavin Shan 	 * Allocate page table entries. They will be modified in the tests.
12303c9b84f0SGavin Shan 	 * Lets save the page table entries so that they can be released
12313c9b84f0SGavin Shan 	 * when the tests are completed.
12323c9b84f0SGavin Shan 	 */
12333c9b84f0SGavin Shan 	args->pgdp = pgd_offset(args->mm, args->vaddr);
12343c9b84f0SGavin Shan 	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
12353c9b84f0SGavin Shan 	if (!args->p4dp) {
12363c9b84f0SGavin Shan 		pr_err("Failed to allocate p4d entries\n");
12373c9b84f0SGavin Shan 		ret = -ENOMEM;
12383c9b84f0SGavin Shan 		goto error;
12393c9b84f0SGavin Shan 	}
12403c9b84f0SGavin Shan 	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
12413c9b84f0SGavin Shan 	WARN_ON(!args->start_p4dp);
12423c9b84f0SGavin Shan 
12433c9b84f0SGavin Shan 	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
12443c9b84f0SGavin Shan 	if (!args->pudp) {
12453c9b84f0SGavin Shan 		pr_err("Failed to allocate pud entries\n");
12463c9b84f0SGavin Shan 		ret = -ENOMEM;
12473c9b84f0SGavin Shan 		goto error;
12483c9b84f0SGavin Shan 	}
12493c9b84f0SGavin Shan 	args->start_pudp = pud_offset(args->p4dp, 0UL);
12503c9b84f0SGavin Shan 	WARN_ON(!args->start_pudp);
12513c9b84f0SGavin Shan 
12523c9b84f0SGavin Shan 	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
12533c9b84f0SGavin Shan 	if (!args->pmdp) {
12543c9b84f0SGavin Shan 		pr_err("Failed to allocate pmd entries\n");
12553c9b84f0SGavin Shan 		ret = -ENOMEM;
12563c9b84f0SGavin Shan 		goto error;
12573c9b84f0SGavin Shan 	}
12583c9b84f0SGavin Shan 	args->start_pmdp = pmd_offset(args->pudp, 0UL);
12593c9b84f0SGavin Shan 	WARN_ON(!args->start_pmdp);
12603c9b84f0SGavin Shan 
12613c9b84f0SGavin Shan 	if (pte_alloc(args->mm, args->pmdp)) {
12623c9b84f0SGavin Shan 		pr_err("Failed to allocate pte entries\n");
12633c9b84f0SGavin Shan 		ret = -ENOMEM;
12643c9b84f0SGavin Shan 		goto error;
12653c9b84f0SGavin Shan 	}
12663c9b84f0SGavin Shan 	args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
12673c9b84f0SGavin Shan 	WARN_ON(!args->start_ptep);
12683c9b84f0SGavin Shan 
1269c4876ff6SFrank van der Linden 	init_fixed_pfns(args);
12703c9b84f0SGavin Shan 
12713c9b84f0SGavin Shan 	/*
12723c9b84f0SGavin Shan 	 * Allocate (huge) pages because some of the tests need to access
12733c9b84f0SGavin Shan 	 * the data in the pages. The corresponding tests will be skipped
12743c9b84f0SGavin Shan 	 * if we fail to allocate (huge) pages.
12753c9b84f0SGavin Shan 	 */
12763c9b84f0SGavin Shan 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
12773c9b84f0SGavin Shan 	    IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
12783c9b84f0SGavin Shan 	    has_transparent_hugepage()) {
12793c9b84f0SGavin Shan 		page = debug_vm_pgtable_alloc_huge_page(args,
12803c9b84f0SGavin Shan 				HPAGE_PUD_SHIFT - PAGE_SHIFT);
12813c9b84f0SGavin Shan 		if (page) {
12823c9b84f0SGavin Shan 			args->pud_pfn = page_to_pfn(page);
12833c9b84f0SGavin Shan 			args->pmd_pfn = args->pud_pfn;
12843c9b84f0SGavin Shan 			args->pte_pfn = args->pud_pfn;
12853c9b84f0SGavin Shan 			return 0;
12863c9b84f0SGavin Shan 		}
12873c9b84f0SGavin Shan 	}
12883c9b84f0SGavin Shan 
12893c9b84f0SGavin Shan 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
12903c9b84f0SGavin Shan 	    has_transparent_hugepage()) {
12913c9b84f0SGavin Shan 		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
12923c9b84f0SGavin Shan 		if (page) {
12933c9b84f0SGavin Shan 			args->pmd_pfn = page_to_pfn(page);
12943c9b84f0SGavin Shan 			args->pte_pfn = args->pmd_pfn;
12953c9b84f0SGavin Shan 			return 0;
12963c9b84f0SGavin Shan 		}
12973c9b84f0SGavin Shan 	}
12983c9b84f0SGavin Shan 
1299dcc1be11SLorenzo Stoakes 	page = alloc_page(GFP_KERNEL);
13003c9b84f0SGavin Shan 	if (page)
13013c9b84f0SGavin Shan 		args->pte_pfn = page_to_pfn(page);
13023c9b84f0SGavin Shan 
13033c9b84f0SGavin Shan 	return 0;
13043c9b84f0SGavin Shan 
13053c9b84f0SGavin Shan error:
13063c9b84f0SGavin Shan 	destroy_args(args);
13073c9b84f0SGavin Shan 	return ret;
13083c9b84f0SGavin Shan }
13093c9b84f0SGavin Shan 
1310399145f9SAnshuman Khandual static int __init debug_vm_pgtable(void)
1311399145f9SAnshuman Khandual {
13123c9b84f0SGavin Shan 	struct pgtable_debug_args args;
1313fea1120cSKees Cook 	spinlock_t *ptl = NULL;
13143c9b84f0SGavin Shan 	int idx, ret;
1315399145f9SAnshuman Khandual 
1316399145f9SAnshuman Khandual 	pr_info("Validating architecture page table helpers\n");
13173c9b84f0SGavin Shan 	ret = init_args(&args);
13183c9b84f0SGavin Shan 	if (ret)
13193c9b84f0SGavin Shan 		return ret;
13203c9b84f0SGavin Shan 
13212e326c07SAnshuman Khandual 	/*
132231d17076SAnshuman Khandual 	 * Iterate over each possible vm_flags to make sure that all
13232e326c07SAnshuman Khandual 	 * the basic page table transformation validations just hold
13242e326c07SAnshuman Khandual 	 * true irrespective of the starting protection value for a
13252e326c07SAnshuman Khandual 	 * given page table entry.
132631d17076SAnshuman Khandual 	 *
132731d17076SAnshuman Khandual 	 * Protection based vm_flags combinatins are always linear
132831d17076SAnshuman Khandual 	 * and increasing i.e starting from VM_NONE and going upto
132931d17076SAnshuman Khandual 	 * (VM_SHARED | READ | WRITE | EXEC).
13302e326c07SAnshuman Khandual 	 */
133131d17076SAnshuman Khandual #define VM_FLAGS_START	(VM_NONE)
133231d17076SAnshuman Khandual #define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
133331d17076SAnshuman Khandual 
133431d17076SAnshuman Khandual 	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
133536b77d1eSGavin Shan 		pte_basic_tests(&args, idx);
133636b77d1eSGavin Shan 		pmd_basic_tests(&args, idx);
133736b77d1eSGavin Shan 		pud_basic_tests(&args, idx);
13382e326c07SAnshuman Khandual 	}
13392e326c07SAnshuman Khandual 
13402e326c07SAnshuman Khandual 	/*
13412e326c07SAnshuman Khandual 	 * Both P4D and PGD level tests are very basic which do not
13422e326c07SAnshuman Khandual 	 * involve creating page table entries from the protection
13432e326c07SAnshuman Khandual 	 * value and the given pfn. Hence just keep them out from
13442e326c07SAnshuman Khandual 	 * the above iteration for now to save some test execution
13452e326c07SAnshuman Khandual 	 * time.
13462e326c07SAnshuman Khandual 	 */
134736b77d1eSGavin Shan 	p4d_basic_tests(&args);
134836b77d1eSGavin Shan 	pgd_basic_tests(&args);
1349399145f9SAnshuman Khandual 
13508983d231SGavin Shan 	pmd_leaf_tests(&args);
13518983d231SGavin Shan 	pud_leaf_tests(&args);
1352a5c3b9ffSAnshuman Khandual 
13538cb183f2SGavin Shan 	pte_special_tests(&args);
13548cb183f2SGavin Shan 	pte_protnone_tests(&args);
13558cb183f2SGavin Shan 	pmd_protnone_tests(&args);
135605289402SAnshuman Khandual 
13578cb183f2SGavin Shan 	pte_devmap_tests(&args);
13588cb183f2SGavin Shan 	pmd_devmap_tests(&args);
13598cb183f2SGavin Shan 	pud_devmap_tests(&args);
136005289402SAnshuman Khandual 
13615f447e80SGavin Shan 	pte_soft_dirty_tests(&args);
13625f447e80SGavin Shan 	pmd_soft_dirty_tests(&args);
13635f447e80SGavin Shan 	pte_swap_soft_dirty_tests(&args);
13645f447e80SGavin Shan 	pmd_swap_soft_dirty_tests(&args);
136505289402SAnshuman Khandual 
1366210d1e8aSDavid Hildenbrand 	pte_swap_exclusive_tests(&args);
1367210d1e8aSDavid Hildenbrand 
13685f447e80SGavin Shan 	pte_swap_tests(&args);
13695f447e80SGavin Shan 	pmd_swap_tests(&args);
137005289402SAnshuman Khandual 
13714878a888SGavin Shan 	swap_migration_tests(&args);
137205289402SAnshuman Khandual 
13734878a888SGavin Shan 	pmd_thp_tests(&args);
13744878a888SGavin Shan 	pud_thp_tests(&args);
137505289402SAnshuman Khandual 
137636b77d1eSGavin Shan 	hugetlb_basic_tests(&args);
1377e8edf0adSAneesh Kumar K.V 
13786f302e27SAneesh Kumar K.V 	/*
13796f302e27SAneesh Kumar K.V 	 * Page table modifying tests. They need to hold
13806f302e27SAneesh Kumar K.V 	 * proper page table lock.
13816f302e27SAneesh Kumar K.V 	 */
1382e8edf0adSAneesh Kumar K.V 
138344966c44SGavin Shan 	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
138444966c44SGavin Shan 	pte_clear_tests(&args);
138544966c44SGavin Shan 	pte_advanced_tests(&args);
13869f2bad09SHugh Dickins 	if (args.ptep)
138744966c44SGavin Shan 		pte_unmap_unlock(args.ptep, ptl);
1388e8edf0adSAneesh Kumar K.V 
1389c0fe07b0SGavin Shan 	ptl = pmd_lock(args.mm, args.pmdp);
1390c0fe07b0SGavin Shan 	pmd_clear_tests(&args);
1391c0fe07b0SGavin Shan 	pmd_advanced_tests(&args);
1392c0fe07b0SGavin Shan 	pmd_huge_tests(&args);
1393c0fe07b0SGavin Shan 	pmd_populate_tests(&args);
13946f302e27SAneesh Kumar K.V 	spin_unlock(ptl);
13956f302e27SAneesh Kumar K.V 
13964cbde03bSGavin Shan 	ptl = pud_lock(args.mm, args.pudp);
13974cbde03bSGavin Shan 	pud_clear_tests(&args);
13984cbde03bSGavin Shan 	pud_advanced_tests(&args);
13994cbde03bSGavin Shan 	pud_huge_tests(&args);
14004cbde03bSGavin Shan 	pud_populate_tests(&args);
14016f302e27SAneesh Kumar K.V 	spin_unlock(ptl);
14026f302e27SAneesh Kumar K.V 
14032f87f8c3SGavin Shan 	spin_lock(&(args.mm->page_table_lock));
14042f87f8c3SGavin Shan 	p4d_clear_tests(&args);
14052f87f8c3SGavin Shan 	pgd_clear_tests(&args);
14062f87f8c3SGavin Shan 	p4d_populate_tests(&args);
14072f87f8c3SGavin Shan 	pgd_populate_tests(&args);
14082f87f8c3SGavin Shan 	spin_unlock(&(args.mm->page_table_lock));
1409e8edf0adSAneesh Kumar K.V 
14103c9b84f0SGavin Shan 	destroy_args(&args);
1411399145f9SAnshuman Khandual 	return 0;
1412399145f9SAnshuman Khandual }
1413399145f9SAnshuman Khandual late_initcall(debug_vm_pgtable);
1414