1399145f9SAnshuman Khandual // SPDX-License-Identifier: GPL-2.0-only 2399145f9SAnshuman Khandual /* 3399145f9SAnshuman Khandual * This kernel test validates architecture page table helpers and 4399145f9SAnshuman Khandual * accessors and helps in verifying their continued compliance with 5399145f9SAnshuman Khandual * expected generic MM semantics. 6399145f9SAnshuman Khandual * 7399145f9SAnshuman Khandual * Copyright (C) 2019 ARM Ltd. 8399145f9SAnshuman Khandual * 9399145f9SAnshuman Khandual * Author: Anshuman Khandual <anshuman.khandual@arm.com> 10399145f9SAnshuman Khandual */ 116315df41SAnshuman Khandual #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ 12399145f9SAnshuman Khandual 13399145f9SAnshuman Khandual #include <linux/gfp.h> 14399145f9SAnshuman Khandual #include <linux/highmem.h> 15399145f9SAnshuman Khandual #include <linux/hugetlb.h> 16399145f9SAnshuman Khandual #include <linux/kernel.h> 17399145f9SAnshuman Khandual #include <linux/kconfig.h> 18c4876ff6SFrank van der Linden #include <linux/memblock.h> 19399145f9SAnshuman Khandual #include <linux/mm.h> 20399145f9SAnshuman Khandual #include <linux/mman.h> 21399145f9SAnshuman Khandual #include <linux/mm_types.h> 22399145f9SAnshuman Khandual #include <linux/module.h> 23399145f9SAnshuman Khandual #include <linux/pfn_t.h> 24399145f9SAnshuman Khandual #include <linux/printk.h> 25a5c3b9ffSAnshuman Khandual #include <linux/pgtable.h> 26399145f9SAnshuman Khandual #include <linux/random.h> 27399145f9SAnshuman Khandual #include <linux/spinlock.h> 28399145f9SAnshuman Khandual #include <linux/swap.h> 29399145f9SAnshuman Khandual #include <linux/swapops.h> 30399145f9SAnshuman Khandual #include <linux/start_kernel.h> 31399145f9SAnshuman Khandual #include <linux/sched/mm.h> 3285a14463SAneesh Kumar K.V #include <linux/io.h> 338c5b3a8aSGavin Shan 348c5b3a8aSGavin Shan #include <asm/cacheflush.h> 35399145f9SAnshuman Khandual #include <asm/pgalloc.h> 36a5c3b9ffSAnshuman Khandual #include <asm/tlbflush.h> 37399145f9SAnshuman Khandual 38b1d00007SAnshuman Khandual /* 39ee65728eSMike Rapoport * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics 40b1d00007SAnshuman Khandual * expectations that are being validated here. All future changes in here 41b1d00007SAnshuman Khandual * or the documentation need to be in sync. 42d7e679b6SKefeng Wang * 43399145f9SAnshuman Khandual * On s390 platform, the lower 4 bits are used to identify given page table 44399145f9SAnshuman Khandual * entry type. But these bits might affect the ability to clear entries with 45399145f9SAnshuman Khandual * pxx_clear() because of how dynamic page table folding works on s390. So 46399145f9SAnshuman Khandual * while loading up the entries do not change the lower 4 bits. It does not 47cfc5bbc4SAneesh Kumar K.V * have affect any other platform. Also avoid the 62nd bit on ppc64 that is 48cfc5bbc4SAneesh Kumar K.V * used to mark a pte entry. 49399145f9SAnshuman Khandual */ 50cfc5bbc4SAneesh Kumar K.V #define S390_SKIP_MASK GENMASK(3, 0) 51cfc5bbc4SAneesh Kumar K.V #if __BITS_PER_LONG == 64 52cfc5bbc4SAneesh Kumar K.V #define PPC64_SKIP_MASK GENMASK(62, 62) 53cfc5bbc4SAneesh Kumar K.V #else 54cfc5bbc4SAneesh Kumar K.V #define PPC64_SKIP_MASK 0x0 55cfc5bbc4SAneesh Kumar K.V #endif 56cfc5bbc4SAneesh Kumar K.V #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) 57cfc5bbc4SAneesh Kumar K.V #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) 58399145f9SAnshuman Khandual #define RANDOM_NZVALUE GENMASK(7, 0) 59399145f9SAnshuman Khandual 603c9b84f0SGavin Shan struct pgtable_debug_args { 613c9b84f0SGavin Shan struct mm_struct *mm; 623c9b84f0SGavin Shan struct vm_area_struct *vma; 633c9b84f0SGavin Shan 643c9b84f0SGavin Shan pgd_t *pgdp; 653c9b84f0SGavin Shan p4d_t *p4dp; 663c9b84f0SGavin Shan pud_t *pudp; 673c9b84f0SGavin Shan pmd_t *pmdp; 683c9b84f0SGavin Shan pte_t *ptep; 693c9b84f0SGavin Shan 703c9b84f0SGavin Shan p4d_t *start_p4dp; 713c9b84f0SGavin Shan pud_t *start_pudp; 723c9b84f0SGavin Shan pmd_t *start_pmdp; 733c9b84f0SGavin Shan pgtable_t start_ptep; 743c9b84f0SGavin Shan 753c9b84f0SGavin Shan unsigned long vaddr; 763c9b84f0SGavin Shan pgprot_t page_prot; 773c9b84f0SGavin Shan pgprot_t page_prot_none; 783c9b84f0SGavin Shan 793c9b84f0SGavin Shan bool is_contiguous_page; 803c9b84f0SGavin Shan unsigned long pud_pfn; 813c9b84f0SGavin Shan unsigned long pmd_pfn; 823c9b84f0SGavin Shan unsigned long pte_pfn; 833c9b84f0SGavin Shan 84c4876ff6SFrank van der Linden unsigned long fixed_alignment; 853c9b84f0SGavin Shan unsigned long fixed_pgd_pfn; 863c9b84f0SGavin Shan unsigned long fixed_p4d_pfn; 873c9b84f0SGavin Shan unsigned long fixed_pud_pfn; 883c9b84f0SGavin Shan unsigned long fixed_pmd_pfn; 893c9b84f0SGavin Shan unsigned long fixed_pte_pfn; 903c9b84f0SGavin Shan }; 913c9b84f0SGavin Shan 9236b77d1eSGavin Shan static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) 93399145f9SAnshuman Khandual { 9431d17076SAnshuman Khandual pgprot_t prot = vm_get_page_prot(idx); 9536b77d1eSGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); 962e326c07SAnshuman Khandual unsigned long val = idx, *ptr = &val; 97399145f9SAnshuman Khandual 982e326c07SAnshuman Khandual pr_debug("Validating PTE basic (%pGv)\n", ptr); 99bb5c47ceSAnshuman Khandual 100bb5c47ceSAnshuman Khandual /* 101bb5c47ceSAnshuman Khandual * This test needs to be executed after the given page table entry 10231d17076SAnshuman Khandual * is created with pfn_pte() to make sure that vm_get_page_prot(idx) 103bb5c47ceSAnshuman Khandual * does not have the dirty bit enabled from the beginning. This is 104bb5c47ceSAnshuman Khandual * important for platforms like arm64 where (!PTE_RDONLY) indicate 105bb5c47ceSAnshuman Khandual * dirty bit being set. 106bb5c47ceSAnshuman Khandual */ 107bb5c47ceSAnshuman Khandual WARN_ON(pte_dirty(pte_wrprotect(pte))); 108bb5c47ceSAnshuman Khandual 109399145f9SAnshuman Khandual WARN_ON(!pte_same(pte, pte)); 110399145f9SAnshuman Khandual WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); 111399145f9SAnshuman Khandual WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); 112399145f9SAnshuman Khandual WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); 113399145f9SAnshuman Khandual WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); 114399145f9SAnshuman Khandual WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); 115399145f9SAnshuman Khandual WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); 116bb5c47ceSAnshuman Khandual WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); 117bb5c47ceSAnshuman Khandual WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); 118399145f9SAnshuman Khandual } 119399145f9SAnshuman Khandual 12044966c44SGavin Shan static void __init pte_advanced_tests(struct pgtable_debug_args *args) 121a5c3b9ffSAnshuman Khandual { 1228c5b3a8aSGavin Shan struct page *page; 123b593b90dSShixin Liu pte_t pte; 124a5c3b9ffSAnshuman Khandual 125c3824e18SAneesh Kumar K.V /* 126c3824e18SAneesh Kumar K.V * Architectures optimize set_pte_at by avoiding TLB flush. 127c3824e18SAneesh Kumar K.V * This requires set_pte_at to be not used to update an 128c3824e18SAneesh Kumar K.V * existing pte entry. Clear pte before we do set_pte_at 1298c5b3a8aSGavin Shan * 1308c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pte_at() to clear 1318c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared 1328c5b3a8aSGavin Shan * when it's released and page allocation check will fail when 1338c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64, 1348c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable. 135c3824e18SAneesh Kumar K.V */ 1368c5b3a8aSGavin Shan page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 1378c5b3a8aSGavin Shan if (!page) 13844966c44SGavin Shan return; 139c3824e18SAneesh Kumar K.V 1406315df41SAnshuman Khandual pr_debug("Validating PTE advanced\n"); 14144966c44SGavin Shan pte = pfn_pte(args->pte_pfn, args->page_prot); 14244966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte); 1438c5b3a8aSGavin Shan flush_dcache_page(page); 14444966c44SGavin Shan ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); 14544966c44SGavin Shan pte = ptep_get(args->ptep); 146a5c3b9ffSAnshuman Khandual WARN_ON(pte_write(pte)); 14744966c44SGavin Shan ptep_get_and_clear(args->mm, args->vaddr, args->ptep); 14844966c44SGavin Shan pte = ptep_get(args->ptep); 149a5c3b9ffSAnshuman Khandual WARN_ON(!pte_none(pte)); 150a5c3b9ffSAnshuman Khandual 15144966c44SGavin Shan pte = pfn_pte(args->pte_pfn, args->page_prot); 152a5c3b9ffSAnshuman Khandual pte = pte_wrprotect(pte); 153a5c3b9ffSAnshuman Khandual pte = pte_mkclean(pte); 15444966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte); 1558c5b3a8aSGavin Shan flush_dcache_page(page); 156a5c3b9ffSAnshuman Khandual pte = pte_mkwrite(pte); 157a5c3b9ffSAnshuman Khandual pte = pte_mkdirty(pte); 15844966c44SGavin Shan ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); 15944966c44SGavin Shan pte = ptep_get(args->ptep); 160a5c3b9ffSAnshuman Khandual WARN_ON(!(pte_write(pte) && pte_dirty(pte))); 16144966c44SGavin Shan ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); 16244966c44SGavin Shan pte = ptep_get(args->ptep); 163a5c3b9ffSAnshuman Khandual WARN_ON(!pte_none(pte)); 164a5c3b9ffSAnshuman Khandual 16544966c44SGavin Shan pte = pfn_pte(args->pte_pfn, args->page_prot); 166a5c3b9ffSAnshuman Khandual pte = pte_mkyoung(pte); 16744966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte); 1688c5b3a8aSGavin Shan flush_dcache_page(page); 16944966c44SGavin Shan ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); 17044966c44SGavin Shan pte = ptep_get(args->ptep); 171a5c3b9ffSAnshuman Khandual WARN_ON(pte_young(pte)); 172fb5222aaSPasha Tatashin 173fb5222aaSPasha Tatashin ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); 174a5c3b9ffSAnshuman Khandual } 175a5c3b9ffSAnshuman Khandual 176399145f9SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE 17736b77d1eSGavin Shan static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) 178399145f9SAnshuman Khandual { 17931d17076SAnshuman Khandual pgprot_t prot = vm_get_page_prot(idx); 1802e326c07SAnshuman Khandual unsigned long val = idx, *ptr = &val; 18165ac1a60SAnshuman Khandual pmd_t pmd; 182399145f9SAnshuman Khandual 183787d563bSAneesh Kumar K.V if (!has_transparent_hugepage()) 184787d563bSAneesh Kumar K.V return; 185787d563bSAneesh Kumar K.V 1862e326c07SAnshuman Khandual pr_debug("Validating PMD basic (%pGv)\n", ptr); 18736b77d1eSGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, prot); 188bb5c47ceSAnshuman Khandual 189bb5c47ceSAnshuman Khandual /* 190bb5c47ceSAnshuman Khandual * This test needs to be executed after the given page table entry 19131d17076SAnshuman Khandual * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) 192bb5c47ceSAnshuman Khandual * does not have the dirty bit enabled from the beginning. This is 193bb5c47ceSAnshuman Khandual * important for platforms like arm64 where (!PTE_RDONLY) indicate 194bb5c47ceSAnshuman Khandual * dirty bit being set. 195bb5c47ceSAnshuman Khandual */ 196bb5c47ceSAnshuman Khandual WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); 197bb5c47ceSAnshuman Khandual 198bb5c47ceSAnshuman Khandual 199399145f9SAnshuman Khandual WARN_ON(!pmd_same(pmd, pmd)); 200399145f9SAnshuman Khandual WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); 201399145f9SAnshuman Khandual WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); 202399145f9SAnshuman Khandual WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); 203399145f9SAnshuman Khandual WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); 204399145f9SAnshuman Khandual WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); 205399145f9SAnshuman Khandual WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); 206bb5c47ceSAnshuman Khandual WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); 207bb5c47ceSAnshuman Khandual WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); 208399145f9SAnshuman Khandual /* 209399145f9SAnshuman Khandual * A huge page does not point to next level page table 210399145f9SAnshuman Khandual * entry. Hence this must qualify as pmd_bad(). 211399145f9SAnshuman Khandual */ 212399145f9SAnshuman Khandual WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); 213399145f9SAnshuman Khandual } 214399145f9SAnshuman Khandual 215c0fe07b0SGavin Shan static void __init pmd_advanced_tests(struct pgtable_debug_args *args) 216a5c3b9ffSAnshuman Khandual { 2178c5b3a8aSGavin Shan struct page *page; 21865ac1a60SAnshuman Khandual pmd_t pmd; 219c0fe07b0SGavin Shan unsigned long vaddr = args->vaddr; 220a5c3b9ffSAnshuman Khandual 221a5c3b9ffSAnshuman Khandual if (!has_transparent_hugepage()) 222a5c3b9ffSAnshuman Khandual return; 223a5c3b9ffSAnshuman Khandual 2248c5b3a8aSGavin Shan page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; 2258c5b3a8aSGavin Shan if (!page) 226c0fe07b0SGavin Shan return; 227c0fe07b0SGavin Shan 2288c5b3a8aSGavin Shan /* 2298c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pmd_at() to clear 2308c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared 2318c5b3a8aSGavin Shan * when it's released and page allocation check will fail when 2328c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64, 2338c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable. 2348c5b3a8aSGavin Shan */ 2356315df41SAnshuman Khandual pr_debug("Validating PMD advanced\n"); 236a5c3b9ffSAnshuman Khandual /* Align the address wrt HPAGE_PMD_SIZE */ 23704f7ce3fSGerald Schaefer vaddr &= HPAGE_PMD_MASK; 238a5c3b9ffSAnshuman Khandual 239c0fe07b0SGavin Shan pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep); 24087f34986SAneesh Kumar K.V 241c0fe07b0SGavin Shan pmd = pfn_pmd(args->pmd_pfn, args->page_prot); 242c0fe07b0SGavin Shan set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 2438c5b3a8aSGavin Shan flush_dcache_page(page); 244c0fe07b0SGavin Shan pmdp_set_wrprotect(args->mm, vaddr, args->pmdp); 245c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 246a5c3b9ffSAnshuman Khandual WARN_ON(pmd_write(pmd)); 247c0fe07b0SGavin Shan pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); 248c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 249a5c3b9ffSAnshuman Khandual WARN_ON(!pmd_none(pmd)); 250a5c3b9ffSAnshuman Khandual 251c0fe07b0SGavin Shan pmd = pfn_pmd(args->pmd_pfn, args->page_prot); 252a5c3b9ffSAnshuman Khandual pmd = pmd_wrprotect(pmd); 253a5c3b9ffSAnshuman Khandual pmd = pmd_mkclean(pmd); 254c0fe07b0SGavin Shan set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 2558c5b3a8aSGavin Shan flush_dcache_page(page); 256a5c3b9ffSAnshuman Khandual pmd = pmd_mkwrite(pmd); 257a5c3b9ffSAnshuman Khandual pmd = pmd_mkdirty(pmd); 258c0fe07b0SGavin Shan pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1); 259c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 260a5c3b9ffSAnshuman Khandual WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); 261c0fe07b0SGavin Shan pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1); 262c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 263a5c3b9ffSAnshuman Khandual WARN_ON(!pmd_none(pmd)); 264a5c3b9ffSAnshuman Khandual 265c0fe07b0SGavin Shan pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot)); 266a5c3b9ffSAnshuman Khandual pmd = pmd_mkyoung(pmd); 267c0fe07b0SGavin Shan set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 2688c5b3a8aSGavin Shan flush_dcache_page(page); 269c0fe07b0SGavin Shan pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp); 270c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 271a5c3b9ffSAnshuman Khandual WARN_ON(pmd_young(pmd)); 27287f34986SAneesh Kumar K.V 27313af0506SAneesh Kumar K.V /* Clear the pte entries */ 274c0fe07b0SGavin Shan pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); 275c0fe07b0SGavin Shan pgtable_trans_huge_withdraw(args->mm, args->pmdp); 276a5c3b9ffSAnshuman Khandual } 277a5c3b9ffSAnshuman Khandual 2788983d231SGavin Shan static void __init pmd_leaf_tests(struct pgtable_debug_args *args) 279a5c3b9ffSAnshuman Khandual { 28065ac1a60SAnshuman Khandual pmd_t pmd; 28165ac1a60SAnshuman Khandual 28265ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 28365ac1a60SAnshuman Khandual return; 284a5c3b9ffSAnshuman Khandual 2856315df41SAnshuman Khandual pr_debug("Validating PMD leaf\n"); 2868983d231SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 28765ac1a60SAnshuman Khandual 288a5c3b9ffSAnshuman Khandual /* 289a5c3b9ffSAnshuman Khandual * PMD based THP is a leaf entry. 290a5c3b9ffSAnshuman Khandual */ 291a5c3b9ffSAnshuman Khandual pmd = pmd_mkhuge(pmd); 292a5c3b9ffSAnshuman Khandual WARN_ON(!pmd_leaf(pmd)); 293a5c3b9ffSAnshuman Khandual } 294a5c3b9ffSAnshuman Khandual 295399145f9SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 29636b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) 297399145f9SAnshuman Khandual { 29831d17076SAnshuman Khandual pgprot_t prot = vm_get_page_prot(idx); 2992e326c07SAnshuman Khandual unsigned long val = idx, *ptr = &val; 30065ac1a60SAnshuman Khandual pud_t pud; 301399145f9SAnshuman Khandual 302787d563bSAneesh Kumar K.V if (!has_transparent_hugepage()) 303787d563bSAneesh Kumar K.V return; 304787d563bSAneesh Kumar K.V 3052e326c07SAnshuman Khandual pr_debug("Validating PUD basic (%pGv)\n", ptr); 30636b77d1eSGavin Shan pud = pfn_pud(args->fixed_pud_pfn, prot); 307bb5c47ceSAnshuman Khandual 308bb5c47ceSAnshuman Khandual /* 309bb5c47ceSAnshuman Khandual * This test needs to be executed after the given page table entry 31031d17076SAnshuman Khandual * is created with pfn_pud() to make sure that vm_get_page_prot(idx) 311bb5c47ceSAnshuman Khandual * does not have the dirty bit enabled from the beginning. This is 312bb5c47ceSAnshuman Khandual * important for platforms like arm64 where (!PTE_RDONLY) indicate 313bb5c47ceSAnshuman Khandual * dirty bit being set. 314bb5c47ceSAnshuman Khandual */ 315bb5c47ceSAnshuman Khandual WARN_ON(pud_dirty(pud_wrprotect(pud))); 316bb5c47ceSAnshuman Khandual 317399145f9SAnshuman Khandual WARN_ON(!pud_same(pud, pud)); 318399145f9SAnshuman Khandual WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); 319bb5c47ceSAnshuman Khandual WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); 320bb5c47ceSAnshuman Khandual WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); 321399145f9SAnshuman Khandual WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); 322399145f9SAnshuman Khandual WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); 323399145f9SAnshuman Khandual WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); 324bb5c47ceSAnshuman Khandual WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); 325bb5c47ceSAnshuman Khandual WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); 326399145f9SAnshuman Khandual 32736b77d1eSGavin Shan if (mm_pmd_folded(args->mm)) 328399145f9SAnshuman Khandual return; 329399145f9SAnshuman Khandual 330399145f9SAnshuman Khandual /* 331399145f9SAnshuman Khandual * A huge page does not point to next level page table 332399145f9SAnshuman Khandual * entry. Hence this must qualify as pud_bad(). 333399145f9SAnshuman Khandual */ 334399145f9SAnshuman Khandual WARN_ON(!pud_bad(pud_mkhuge(pud))); 335399145f9SAnshuman Khandual } 336a5c3b9ffSAnshuman Khandual 3374cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) 338a5c3b9ffSAnshuman Khandual { 3398c5b3a8aSGavin Shan struct page *page; 3404cbde03bSGavin Shan unsigned long vaddr = args->vaddr; 34165ac1a60SAnshuman Khandual pud_t pud; 342a5c3b9ffSAnshuman Khandual 343a5c3b9ffSAnshuman Khandual if (!has_transparent_hugepage()) 344a5c3b9ffSAnshuman Khandual return; 345a5c3b9ffSAnshuman Khandual 3468c5b3a8aSGavin Shan page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL; 3478c5b3a8aSGavin Shan if (!page) 3484cbde03bSGavin Shan return; 3494cbde03bSGavin Shan 3508c5b3a8aSGavin Shan /* 3518c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pud_at() to clear 3528c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared 3538c5b3a8aSGavin Shan * when it's released and page allocation check will fail when 3548c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64, 3558c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable. 3568c5b3a8aSGavin Shan */ 3576315df41SAnshuman Khandual pr_debug("Validating PUD advanced\n"); 358a5c3b9ffSAnshuman Khandual /* Align the address wrt HPAGE_PUD_SIZE */ 35904f7ce3fSGerald Schaefer vaddr &= HPAGE_PUD_MASK; 360a5c3b9ffSAnshuman Khandual 3614cbde03bSGavin Shan pud = pfn_pud(args->pud_pfn, args->page_prot); 3624cbde03bSGavin Shan set_pud_at(args->mm, vaddr, args->pudp, pud); 3638c5b3a8aSGavin Shan flush_dcache_page(page); 3644cbde03bSGavin Shan pudp_set_wrprotect(args->mm, vaddr, args->pudp); 3654cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 366a5c3b9ffSAnshuman Khandual WARN_ON(pud_write(pud)); 367a5c3b9ffSAnshuman Khandual 368a5c3b9ffSAnshuman Khandual #ifndef __PAGETABLE_PMD_FOLDED 3694cbde03bSGavin Shan pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); 3704cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 371a5c3b9ffSAnshuman Khandual WARN_ON(!pud_none(pud)); 372a5c3b9ffSAnshuman Khandual #endif /* __PAGETABLE_PMD_FOLDED */ 3734cbde03bSGavin Shan pud = pfn_pud(args->pud_pfn, args->page_prot); 374a5c3b9ffSAnshuman Khandual pud = pud_wrprotect(pud); 375a5c3b9ffSAnshuman Khandual pud = pud_mkclean(pud); 3764cbde03bSGavin Shan set_pud_at(args->mm, vaddr, args->pudp, pud); 3778c5b3a8aSGavin Shan flush_dcache_page(page); 378a5c3b9ffSAnshuman Khandual pud = pud_mkwrite(pud); 379a5c3b9ffSAnshuman Khandual pud = pud_mkdirty(pud); 3804cbde03bSGavin Shan pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1); 3814cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 382a5c3b9ffSAnshuman Khandual WARN_ON(!(pud_write(pud) && pud_dirty(pud))); 383a5c3b9ffSAnshuman Khandual 384c3824e18SAneesh Kumar K.V #ifndef __PAGETABLE_PMD_FOLDED 3854cbde03bSGavin Shan pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1); 3864cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 387c3824e18SAneesh Kumar K.V WARN_ON(!pud_none(pud)); 388c3824e18SAneesh Kumar K.V #endif /* __PAGETABLE_PMD_FOLDED */ 389c3824e18SAneesh Kumar K.V 3904cbde03bSGavin Shan pud = pfn_pud(args->pud_pfn, args->page_prot); 391a5c3b9ffSAnshuman Khandual pud = pud_mkyoung(pud); 3924cbde03bSGavin Shan set_pud_at(args->mm, vaddr, args->pudp, pud); 3938c5b3a8aSGavin Shan flush_dcache_page(page); 3944cbde03bSGavin Shan pudp_test_and_clear_young(args->vma, vaddr, args->pudp); 3954cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 396a5c3b9ffSAnshuman Khandual WARN_ON(pud_young(pud)); 39713af0506SAneesh Kumar K.V 3984cbde03bSGavin Shan pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); 399a5c3b9ffSAnshuman Khandual } 400a5c3b9ffSAnshuman Khandual 4018983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) 402a5c3b9ffSAnshuman Khandual { 40365ac1a60SAnshuman Khandual pud_t pud; 40465ac1a60SAnshuman Khandual 40565ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 40665ac1a60SAnshuman Khandual return; 407a5c3b9ffSAnshuman Khandual 4086315df41SAnshuman Khandual pr_debug("Validating PUD leaf\n"); 4098983d231SGavin Shan pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 410a5c3b9ffSAnshuman Khandual /* 411a5c3b9ffSAnshuman Khandual * PUD based THP is a leaf entry. 412a5c3b9ffSAnshuman Khandual */ 413a5c3b9ffSAnshuman Khandual pud = pud_mkhuge(pud); 414a5c3b9ffSAnshuman Khandual WARN_ON(!pud_leaf(pud)); 415a5c3b9ffSAnshuman Khandual } 4165fe77be6SShixin Liu #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 41736b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } 4184cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } 4198983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } 4205fe77be6SShixin Liu #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 4215fe77be6SShixin Liu #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 42236b77d1eSGavin Shan static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } 42336b77d1eSGavin Shan static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } 424c0fe07b0SGavin Shan static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } 4254cbde03bSGavin Shan static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } 4268983d231SGavin Shan static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } 4278983d231SGavin Shan static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } 4285fe77be6SShixin Liu #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 429a5c3b9ffSAnshuman Khandual 43085a14463SAneesh Kumar K.V #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 431c0fe07b0SGavin Shan static void __init pmd_huge_tests(struct pgtable_debug_args *args) 4325fe77be6SShixin Liu { 4335fe77be6SShixin Liu pmd_t pmd; 4345fe77be6SShixin Liu 435c4876ff6SFrank van der Linden if (!arch_vmap_pmd_supported(args->page_prot) || 436c4876ff6SFrank van der Linden args->fixed_alignment < PMD_SIZE) 4375fe77be6SShixin Liu return; 4385fe77be6SShixin Liu 4395fe77be6SShixin Liu pr_debug("Validating PMD huge\n"); 4405fe77be6SShixin Liu /* 4415fe77be6SShixin Liu * X86 defined pmd_set_huge() verifies that the given 4425fe77be6SShixin Liu * PMD is not a populated non-leaf entry. 4435fe77be6SShixin Liu */ 444c0fe07b0SGavin Shan WRITE_ONCE(*args->pmdp, __pmd(0)); 445c0fe07b0SGavin Shan WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); 446c0fe07b0SGavin Shan WARN_ON(!pmd_clear_huge(args->pmdp)); 447c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 4485fe77be6SShixin Liu WARN_ON(!pmd_none(pmd)); 4495fe77be6SShixin Liu } 4505fe77be6SShixin Liu 4514cbde03bSGavin Shan static void __init pud_huge_tests(struct pgtable_debug_args *args) 452a5c3b9ffSAnshuman Khandual { 453a5c3b9ffSAnshuman Khandual pud_t pud; 454a5c3b9ffSAnshuman Khandual 455c4876ff6SFrank van der Linden if (!arch_vmap_pud_supported(args->page_prot) || 456c4876ff6SFrank van der Linden args->fixed_alignment < PUD_SIZE) 457a5c3b9ffSAnshuman Khandual return; 4586315df41SAnshuman Khandual 4596315df41SAnshuman Khandual pr_debug("Validating PUD huge\n"); 460a5c3b9ffSAnshuman Khandual /* 461a5c3b9ffSAnshuman Khandual * X86 defined pud_set_huge() verifies that the given 462a5c3b9ffSAnshuman Khandual * PUD is not a populated non-leaf entry. 463a5c3b9ffSAnshuman Khandual */ 4644cbde03bSGavin Shan WRITE_ONCE(*args->pudp, __pud(0)); 4654cbde03bSGavin Shan WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot)); 4664cbde03bSGavin Shan WARN_ON(!pud_clear_huge(args->pudp)); 4674cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 468a5c3b9ffSAnshuman Khandual WARN_ON(!pud_none(pud)); 469a5c3b9ffSAnshuman Khandual } 47085a14463SAneesh Kumar K.V #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 471c0fe07b0SGavin Shan static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } 4724cbde03bSGavin Shan static void __init pud_huge_tests(struct pgtable_debug_args *args) { } 4735fe77be6SShixin Liu #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 474399145f9SAnshuman Khandual 47536b77d1eSGavin Shan static void __init p4d_basic_tests(struct pgtable_debug_args *args) 476399145f9SAnshuman Khandual { 477399145f9SAnshuman Khandual p4d_t p4d; 478399145f9SAnshuman Khandual 4796315df41SAnshuman Khandual pr_debug("Validating P4D basic\n"); 480399145f9SAnshuman Khandual memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); 481399145f9SAnshuman Khandual WARN_ON(!p4d_same(p4d, p4d)); 482399145f9SAnshuman Khandual } 483399145f9SAnshuman Khandual 48436b77d1eSGavin Shan static void __init pgd_basic_tests(struct pgtable_debug_args *args) 485399145f9SAnshuman Khandual { 486399145f9SAnshuman Khandual pgd_t pgd; 487399145f9SAnshuman Khandual 4886315df41SAnshuman Khandual pr_debug("Validating PGD basic\n"); 489399145f9SAnshuman Khandual memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); 490399145f9SAnshuman Khandual WARN_ON(!pgd_same(pgd, pgd)); 491399145f9SAnshuman Khandual } 492399145f9SAnshuman Khandual 493399145f9SAnshuman Khandual #ifndef __PAGETABLE_PUD_FOLDED 4944cbde03bSGavin Shan static void __init pud_clear_tests(struct pgtable_debug_args *args) 495399145f9SAnshuman Khandual { 4964cbde03bSGavin Shan pud_t pud = READ_ONCE(*args->pudp); 497399145f9SAnshuman Khandual 4984cbde03bSGavin Shan if (mm_pmd_folded(args->mm)) 499399145f9SAnshuman Khandual return; 500399145f9SAnshuman Khandual 5016315df41SAnshuman Khandual pr_debug("Validating PUD clear\n"); 502399145f9SAnshuman Khandual pud = __pud(pud_val(pud) | RANDOM_ORVALUE); 5034cbde03bSGavin Shan WRITE_ONCE(*args->pudp, pud); 5044cbde03bSGavin Shan pud_clear(args->pudp); 5054cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 506399145f9SAnshuman Khandual WARN_ON(!pud_none(pud)); 507399145f9SAnshuman Khandual } 508399145f9SAnshuman Khandual 5094cbde03bSGavin Shan static void __init pud_populate_tests(struct pgtable_debug_args *args) 510399145f9SAnshuman Khandual { 511399145f9SAnshuman Khandual pud_t pud; 512399145f9SAnshuman Khandual 5134cbde03bSGavin Shan if (mm_pmd_folded(args->mm)) 514399145f9SAnshuman Khandual return; 5156315df41SAnshuman Khandual 5166315df41SAnshuman Khandual pr_debug("Validating PUD populate\n"); 517399145f9SAnshuman Khandual /* 518399145f9SAnshuman Khandual * This entry points to next level page table page. 519399145f9SAnshuman Khandual * Hence this must not qualify as pud_bad(). 520399145f9SAnshuman Khandual */ 5214cbde03bSGavin Shan pud_populate(args->mm, args->pudp, args->start_pmdp); 5224cbde03bSGavin Shan pud = READ_ONCE(*args->pudp); 523399145f9SAnshuman Khandual WARN_ON(pud_bad(pud)); 524399145f9SAnshuman Khandual } 525399145f9SAnshuman Khandual #else /* !__PAGETABLE_PUD_FOLDED */ 5264cbde03bSGavin Shan static void __init pud_clear_tests(struct pgtable_debug_args *args) { } 5274cbde03bSGavin Shan static void __init pud_populate_tests(struct pgtable_debug_args *args) { } 528399145f9SAnshuman Khandual #endif /* PAGETABLE_PUD_FOLDED */ 529399145f9SAnshuman Khandual 530399145f9SAnshuman Khandual #ifndef __PAGETABLE_P4D_FOLDED 5312f87f8c3SGavin Shan static void __init p4d_clear_tests(struct pgtable_debug_args *args) 532399145f9SAnshuman Khandual { 5332f87f8c3SGavin Shan p4d_t p4d = READ_ONCE(*args->p4dp); 534399145f9SAnshuman Khandual 5352f87f8c3SGavin Shan if (mm_pud_folded(args->mm)) 536399145f9SAnshuman Khandual return; 537399145f9SAnshuman Khandual 5386315df41SAnshuman Khandual pr_debug("Validating P4D clear\n"); 539399145f9SAnshuman Khandual p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); 5402f87f8c3SGavin Shan WRITE_ONCE(*args->p4dp, p4d); 5412f87f8c3SGavin Shan p4d_clear(args->p4dp); 5422f87f8c3SGavin Shan p4d = READ_ONCE(*args->p4dp); 543399145f9SAnshuman Khandual WARN_ON(!p4d_none(p4d)); 544399145f9SAnshuman Khandual } 545399145f9SAnshuman Khandual 5462f87f8c3SGavin Shan static void __init p4d_populate_tests(struct pgtable_debug_args *args) 547399145f9SAnshuman Khandual { 548399145f9SAnshuman Khandual p4d_t p4d; 549399145f9SAnshuman Khandual 5502f87f8c3SGavin Shan if (mm_pud_folded(args->mm)) 551399145f9SAnshuman Khandual return; 552399145f9SAnshuman Khandual 5536315df41SAnshuman Khandual pr_debug("Validating P4D populate\n"); 554399145f9SAnshuman Khandual /* 555399145f9SAnshuman Khandual * This entry points to next level page table page. 556399145f9SAnshuman Khandual * Hence this must not qualify as p4d_bad(). 557399145f9SAnshuman Khandual */ 5582f87f8c3SGavin Shan pud_clear(args->pudp); 5592f87f8c3SGavin Shan p4d_clear(args->p4dp); 5602f87f8c3SGavin Shan p4d_populate(args->mm, args->p4dp, args->start_pudp); 5612f87f8c3SGavin Shan p4d = READ_ONCE(*args->p4dp); 562399145f9SAnshuman Khandual WARN_ON(p4d_bad(p4d)); 563399145f9SAnshuman Khandual } 564399145f9SAnshuman Khandual 5652f87f8c3SGavin Shan static void __init pgd_clear_tests(struct pgtable_debug_args *args) 566399145f9SAnshuman Khandual { 5672f87f8c3SGavin Shan pgd_t pgd = READ_ONCE(*(args->pgdp)); 568399145f9SAnshuman Khandual 5692f87f8c3SGavin Shan if (mm_p4d_folded(args->mm)) 570399145f9SAnshuman Khandual return; 571399145f9SAnshuman Khandual 5726315df41SAnshuman Khandual pr_debug("Validating PGD clear\n"); 573399145f9SAnshuman Khandual pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); 5742f87f8c3SGavin Shan WRITE_ONCE(*args->pgdp, pgd); 5752f87f8c3SGavin Shan pgd_clear(args->pgdp); 5762f87f8c3SGavin Shan pgd = READ_ONCE(*args->pgdp); 577399145f9SAnshuman Khandual WARN_ON(!pgd_none(pgd)); 578399145f9SAnshuman Khandual } 579399145f9SAnshuman Khandual 5802f87f8c3SGavin Shan static void __init pgd_populate_tests(struct pgtable_debug_args *args) 581399145f9SAnshuman Khandual { 582399145f9SAnshuman Khandual pgd_t pgd; 583399145f9SAnshuman Khandual 5842f87f8c3SGavin Shan if (mm_p4d_folded(args->mm)) 585399145f9SAnshuman Khandual return; 586399145f9SAnshuman Khandual 5876315df41SAnshuman Khandual pr_debug("Validating PGD populate\n"); 588399145f9SAnshuman Khandual /* 589399145f9SAnshuman Khandual * This entry points to next level page table page. 590399145f9SAnshuman Khandual * Hence this must not qualify as pgd_bad(). 591399145f9SAnshuman Khandual */ 5922f87f8c3SGavin Shan p4d_clear(args->p4dp); 5932f87f8c3SGavin Shan pgd_clear(args->pgdp); 5942f87f8c3SGavin Shan pgd_populate(args->mm, args->pgdp, args->start_p4dp); 5952f87f8c3SGavin Shan pgd = READ_ONCE(*args->pgdp); 596399145f9SAnshuman Khandual WARN_ON(pgd_bad(pgd)); 597399145f9SAnshuman Khandual } 598399145f9SAnshuman Khandual #else /* !__PAGETABLE_P4D_FOLDED */ 5992f87f8c3SGavin Shan static void __init p4d_clear_tests(struct pgtable_debug_args *args) { } 6002f87f8c3SGavin Shan static void __init pgd_clear_tests(struct pgtable_debug_args *args) { } 6012f87f8c3SGavin Shan static void __init p4d_populate_tests(struct pgtable_debug_args *args) { } 6022f87f8c3SGavin Shan static void __init pgd_populate_tests(struct pgtable_debug_args *args) { } 603399145f9SAnshuman Khandual #endif /* PAGETABLE_P4D_FOLDED */ 604399145f9SAnshuman Khandual 60544966c44SGavin Shan static void __init pte_clear_tests(struct pgtable_debug_args *args) 606399145f9SAnshuman Khandual { 6078c5b3a8aSGavin Shan struct page *page; 60844966c44SGavin Shan pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); 60944966c44SGavin Shan 6108c5b3a8aSGavin Shan page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 6118c5b3a8aSGavin Shan if (!page) 61244966c44SGavin Shan return; 613399145f9SAnshuman Khandual 6148c5b3a8aSGavin Shan /* 6158c5b3a8aSGavin Shan * flush_dcache_page() is called after set_pte_at() to clear 6168c5b3a8aSGavin Shan * PG_arch_1 for the page on ARM64. The page flag isn't cleared 6178c5b3a8aSGavin Shan * when it's released and page allocation check will fail when 6188c5b3a8aSGavin Shan * the page is allocated again. For architectures other than ARM64, 6198c5b3a8aSGavin Shan * the unexpected overhead of cache flushing is acceptable. 6208c5b3a8aSGavin Shan */ 6216315df41SAnshuman Khandual pr_debug("Validating PTE clear\n"); 622401035d5SAneesh Kumar K.V #ifndef CONFIG_RISCV 623399145f9SAnshuman Khandual pte = __pte(pte_val(pte) | RANDOM_ORVALUE); 624401035d5SAneesh Kumar K.V #endif 62544966c44SGavin Shan set_pte_at(args->mm, args->vaddr, args->ptep, pte); 6268c5b3a8aSGavin Shan flush_dcache_page(page); 627399145f9SAnshuman Khandual barrier(); 62808d5b29eSPasha Tatashin ptep_clear(args->mm, args->vaddr, args->ptep); 62944966c44SGavin Shan pte = ptep_get(args->ptep); 630399145f9SAnshuman Khandual WARN_ON(!pte_none(pte)); 631399145f9SAnshuman Khandual } 632399145f9SAnshuman Khandual 633c0fe07b0SGavin Shan static void __init pmd_clear_tests(struct pgtable_debug_args *args) 634399145f9SAnshuman Khandual { 635c0fe07b0SGavin Shan pmd_t pmd = READ_ONCE(*args->pmdp); 636399145f9SAnshuman Khandual 6376315df41SAnshuman Khandual pr_debug("Validating PMD clear\n"); 638399145f9SAnshuman Khandual pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); 639c0fe07b0SGavin Shan WRITE_ONCE(*args->pmdp, pmd); 640c0fe07b0SGavin Shan pmd_clear(args->pmdp); 641c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 642399145f9SAnshuman Khandual WARN_ON(!pmd_none(pmd)); 643399145f9SAnshuman Khandual } 644399145f9SAnshuman Khandual 645c0fe07b0SGavin Shan static void __init pmd_populate_tests(struct pgtable_debug_args *args) 646399145f9SAnshuman Khandual { 647399145f9SAnshuman Khandual pmd_t pmd; 648399145f9SAnshuman Khandual 6496315df41SAnshuman Khandual pr_debug("Validating PMD populate\n"); 650399145f9SAnshuman Khandual /* 651399145f9SAnshuman Khandual * This entry points to next level page table page. 652399145f9SAnshuman Khandual * Hence this must not qualify as pmd_bad(). 653399145f9SAnshuman Khandual */ 654c0fe07b0SGavin Shan pmd_populate(args->mm, args->pmdp, args->start_ptep); 655c0fe07b0SGavin Shan pmd = READ_ONCE(*args->pmdp); 656399145f9SAnshuman Khandual WARN_ON(pmd_bad(pmd)); 657399145f9SAnshuman Khandual } 658399145f9SAnshuman Khandual 6598cb183f2SGavin Shan static void __init pte_special_tests(struct pgtable_debug_args *args) 66005289402SAnshuman Khandual { 6618cb183f2SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 66205289402SAnshuman Khandual 66305289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) 66405289402SAnshuman Khandual return; 66505289402SAnshuman Khandual 6666315df41SAnshuman Khandual pr_debug("Validating PTE special\n"); 66705289402SAnshuman Khandual WARN_ON(!pte_special(pte_mkspecial(pte))); 66805289402SAnshuman Khandual } 66905289402SAnshuman Khandual 6708cb183f2SGavin Shan static void __init pte_protnone_tests(struct pgtable_debug_args *args) 67105289402SAnshuman Khandual { 6728cb183f2SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); 67305289402SAnshuman Khandual 67405289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 67505289402SAnshuman Khandual return; 67605289402SAnshuman Khandual 6776315df41SAnshuman Khandual pr_debug("Validating PTE protnone\n"); 67805289402SAnshuman Khandual WARN_ON(!pte_protnone(pte)); 67905289402SAnshuman Khandual WARN_ON(!pte_present(pte)); 68005289402SAnshuman Khandual } 68105289402SAnshuman Khandual 68205289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6838cb183f2SGavin Shan static void __init pmd_protnone_tests(struct pgtable_debug_args *args) 68405289402SAnshuman Khandual { 68565ac1a60SAnshuman Khandual pmd_t pmd; 68605289402SAnshuman Khandual 68705289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 68805289402SAnshuman Khandual return; 68905289402SAnshuman Khandual 69065ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 69165ac1a60SAnshuman Khandual return; 69265ac1a60SAnshuman Khandual 6936315df41SAnshuman Khandual pr_debug("Validating PMD protnone\n"); 6948cb183f2SGavin Shan pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); 69505289402SAnshuman Khandual WARN_ON(!pmd_protnone(pmd)); 69605289402SAnshuman Khandual WARN_ON(!pmd_present(pmd)); 69705289402SAnshuman Khandual } 69805289402SAnshuman Khandual #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 6998cb183f2SGavin Shan static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } 70005289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 70105289402SAnshuman Khandual 70205289402SAnshuman Khandual #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 7038cb183f2SGavin Shan static void __init pte_devmap_tests(struct pgtable_debug_args *args) 70405289402SAnshuman Khandual { 7058cb183f2SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 70605289402SAnshuman Khandual 7076315df41SAnshuman Khandual pr_debug("Validating PTE devmap\n"); 70805289402SAnshuman Khandual WARN_ON(!pte_devmap(pte_mkdevmap(pte))); 70905289402SAnshuman Khandual } 71005289402SAnshuman Khandual 71105289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7128cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) 71305289402SAnshuman Khandual { 71465ac1a60SAnshuman Khandual pmd_t pmd; 71565ac1a60SAnshuman Khandual 71665ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 71765ac1a60SAnshuman Khandual return; 71805289402SAnshuman Khandual 7196315df41SAnshuman Khandual pr_debug("Validating PMD devmap\n"); 7208cb183f2SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 72105289402SAnshuman Khandual WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); 72205289402SAnshuman Khandual } 72305289402SAnshuman Khandual 72405289402SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 7258cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) 72605289402SAnshuman Khandual { 72765ac1a60SAnshuman Khandual pud_t pud; 72865ac1a60SAnshuman Khandual 72965ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 73065ac1a60SAnshuman Khandual return; 73105289402SAnshuman Khandual 7326315df41SAnshuman Khandual pr_debug("Validating PUD devmap\n"); 7338cb183f2SGavin Shan pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 73405289402SAnshuman Khandual WARN_ON(!pud_devmap(pud_mkdevmap(pud))); 73505289402SAnshuman Khandual } 73605289402SAnshuman Khandual #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 7378cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 73805289402SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 73905289402SAnshuman Khandual #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 7408cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 7418cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 74205289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 74305289402SAnshuman Khandual #else 7448cb183f2SGavin Shan static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } 7458cb183f2SGavin Shan static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 7468cb183f2SGavin Shan static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 74705289402SAnshuman Khandual #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ 74805289402SAnshuman Khandual 7495f447e80SGavin Shan static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) 75005289402SAnshuman Khandual { 7515f447e80SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 75205289402SAnshuman Khandual 75305289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 75405289402SAnshuman Khandual return; 75505289402SAnshuman Khandual 7566315df41SAnshuman Khandual pr_debug("Validating PTE soft dirty\n"); 75705289402SAnshuman Khandual WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); 75805289402SAnshuman Khandual WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); 75905289402SAnshuman Khandual } 76005289402SAnshuman Khandual 7615f447e80SGavin Shan static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) 76205289402SAnshuman Khandual { 7635f447e80SGavin Shan pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 76405289402SAnshuman Khandual 76505289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 76605289402SAnshuman Khandual return; 76705289402SAnshuman Khandual 7686315df41SAnshuman Khandual pr_debug("Validating PTE swap soft dirty\n"); 76905289402SAnshuman Khandual WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); 77005289402SAnshuman Khandual WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); 77105289402SAnshuman Khandual } 77205289402SAnshuman Khandual 77305289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7745f447e80SGavin Shan static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) 77505289402SAnshuman Khandual { 77665ac1a60SAnshuman Khandual pmd_t pmd; 77705289402SAnshuman Khandual 77805289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 77905289402SAnshuman Khandual return; 78005289402SAnshuman Khandual 78165ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 78265ac1a60SAnshuman Khandual return; 78365ac1a60SAnshuman Khandual 7846315df41SAnshuman Khandual pr_debug("Validating PMD soft dirty\n"); 7855f447e80SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 78605289402SAnshuman Khandual WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); 78705289402SAnshuman Khandual WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); 78805289402SAnshuman Khandual } 78905289402SAnshuman Khandual 7905f447e80SGavin Shan static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) 79105289402SAnshuman Khandual { 79265ac1a60SAnshuman Khandual pmd_t pmd; 79305289402SAnshuman Khandual 79405289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || 79505289402SAnshuman Khandual !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) 79605289402SAnshuman Khandual return; 79705289402SAnshuman Khandual 79865ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 79965ac1a60SAnshuman Khandual return; 80065ac1a60SAnshuman Khandual 8016315df41SAnshuman Khandual pr_debug("Validating PMD swap soft dirty\n"); 8025f447e80SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 80305289402SAnshuman Khandual WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); 80405289402SAnshuman Khandual WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); 80505289402SAnshuman Khandual } 806b593b90dSShixin Liu #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 8075f447e80SGavin Shan static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } 8085f447e80SGavin Shan static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } 809b593b90dSShixin Liu #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 81005289402SAnshuman Khandual 811210d1e8aSDavid Hildenbrand static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) 812210d1e8aSDavid Hildenbrand { 8132321ba3eSDavid Hildenbrand unsigned long max_swap_offset; 8142321ba3eSDavid Hildenbrand swp_entry_t entry, entry2; 8152321ba3eSDavid Hildenbrand pte_t pte; 816210d1e8aSDavid Hildenbrand 817210d1e8aSDavid Hildenbrand pr_debug("Validating PTE swap exclusive\n"); 8182321ba3eSDavid Hildenbrand 8192321ba3eSDavid Hildenbrand /* See generic_max_swapfile_size(): probe the maximum offset */ 8202321ba3eSDavid Hildenbrand max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL)))); 8212321ba3eSDavid Hildenbrand 8222321ba3eSDavid Hildenbrand /* Create a swp entry with all possible bits set */ 8232321ba3eSDavid Hildenbrand entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset); 8242321ba3eSDavid Hildenbrand 8252321ba3eSDavid Hildenbrand pte = swp_entry_to_pte(entry); 8262321ba3eSDavid Hildenbrand WARN_ON(pte_swp_exclusive(pte)); 8272321ba3eSDavid Hildenbrand WARN_ON(!is_swap_pte(pte)); 8282321ba3eSDavid Hildenbrand entry2 = pte_to_swp_entry(pte); 8292321ba3eSDavid Hildenbrand WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); 8302321ba3eSDavid Hildenbrand 831210d1e8aSDavid Hildenbrand pte = pte_swp_mkexclusive(pte); 832210d1e8aSDavid Hildenbrand WARN_ON(!pte_swp_exclusive(pte)); 8332321ba3eSDavid Hildenbrand WARN_ON(!is_swap_pte(pte)); 8342321ba3eSDavid Hildenbrand WARN_ON(pte_swp_soft_dirty(pte)); 8352321ba3eSDavid Hildenbrand entry2 = pte_to_swp_entry(pte); 8362321ba3eSDavid Hildenbrand WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); 8372321ba3eSDavid Hildenbrand 838210d1e8aSDavid Hildenbrand pte = pte_swp_clear_exclusive(pte); 839210d1e8aSDavid Hildenbrand WARN_ON(pte_swp_exclusive(pte)); 8402321ba3eSDavid Hildenbrand WARN_ON(!is_swap_pte(pte)); 8412321ba3eSDavid Hildenbrand entry2 = pte_to_swp_entry(pte); 8422321ba3eSDavid Hildenbrand WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); 843210d1e8aSDavid Hildenbrand } 844210d1e8aSDavid Hildenbrand 8455f447e80SGavin Shan static void __init pte_swap_tests(struct pgtable_debug_args *args) 84605289402SAnshuman Khandual { 84705289402SAnshuman Khandual swp_entry_t swp; 84805289402SAnshuman Khandual pte_t pte; 84905289402SAnshuman Khandual 8506315df41SAnshuman Khandual pr_debug("Validating PTE swap\n"); 8515f447e80SGavin Shan pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 85205289402SAnshuman Khandual swp = __pte_to_swp_entry(pte); 85305289402SAnshuman Khandual pte = __swp_entry_to_pte(swp); 8545f447e80SGavin Shan WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); 85505289402SAnshuman Khandual } 85605289402SAnshuman Khandual 85705289402SAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 8585f447e80SGavin Shan static void __init pmd_swap_tests(struct pgtable_debug_args *args) 85905289402SAnshuman Khandual { 86005289402SAnshuman Khandual swp_entry_t swp; 86105289402SAnshuman Khandual pmd_t pmd; 86205289402SAnshuman Khandual 86365ac1a60SAnshuman Khandual if (!has_transparent_hugepage()) 86465ac1a60SAnshuman Khandual return; 86565ac1a60SAnshuman Khandual 8666315df41SAnshuman Khandual pr_debug("Validating PMD swap\n"); 8675f447e80SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 86805289402SAnshuman Khandual swp = __pmd_to_swp_entry(pmd); 86905289402SAnshuman Khandual pmd = __swp_entry_to_pmd(swp); 8705f447e80SGavin Shan WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); 87105289402SAnshuman Khandual } 87205289402SAnshuman Khandual #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ 8735f447e80SGavin Shan static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } 87405289402SAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 87505289402SAnshuman Khandual 8764878a888SGavin Shan static void __init swap_migration_tests(struct pgtable_debug_args *args) 87705289402SAnshuman Khandual { 87805289402SAnshuman Khandual struct page *page; 87905289402SAnshuman Khandual swp_entry_t swp; 88005289402SAnshuman Khandual 88105289402SAnshuman Khandual if (!IS_ENABLED(CONFIG_MIGRATION)) 88205289402SAnshuman Khandual return; 8836315df41SAnshuman Khandual 88405289402SAnshuman Khandual /* 88505289402SAnshuman Khandual * swap_migration_tests() requires a dedicated page as it needs to 88605289402SAnshuman Khandual * be locked before creating a migration entry from it. Locking the 88705289402SAnshuman Khandual * page that actually maps kernel text ('start_kernel') can be real 8884878a888SGavin Shan * problematic. Lets use the allocated page explicitly for this 8894878a888SGavin Shan * purpose. 89005289402SAnshuman Khandual */ 8914878a888SGavin Shan page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 8924878a888SGavin Shan if (!page) 89305289402SAnshuman Khandual return; 8944878a888SGavin Shan 8954878a888SGavin Shan pr_debug("Validating swap migration\n"); 89605289402SAnshuman Khandual 89705289402SAnshuman Khandual /* 89823647618SAnshuman Khandual * make_[readable|writable]_migration_entry() expects given page to 89923647618SAnshuman Khandual * be locked, otherwise it stumbles upon a BUG_ON(). 90005289402SAnshuman Khandual */ 90105289402SAnshuman Khandual __SetPageLocked(page); 9024dd845b5SAlistair Popple swp = make_writable_migration_entry(page_to_pfn(page)); 90305289402SAnshuman Khandual WARN_ON(!is_migration_entry(swp)); 9044dd845b5SAlistair Popple WARN_ON(!is_writable_migration_entry(swp)); 90505289402SAnshuman Khandual 9064dd845b5SAlistair Popple swp = make_readable_migration_entry(swp_offset(swp)); 90705289402SAnshuman Khandual WARN_ON(!is_migration_entry(swp)); 9084dd845b5SAlistair Popple WARN_ON(is_writable_migration_entry(swp)); 90905289402SAnshuman Khandual 9104dd845b5SAlistair Popple swp = make_readable_migration_entry(page_to_pfn(page)); 91105289402SAnshuman Khandual WARN_ON(!is_migration_entry(swp)); 9124dd845b5SAlistair Popple WARN_ON(is_writable_migration_entry(swp)); 91305289402SAnshuman Khandual __ClearPageLocked(page); 91405289402SAnshuman Khandual } 91505289402SAnshuman Khandual 91605289402SAnshuman Khandual #ifdef CONFIG_HUGETLB_PAGE 91736b77d1eSGavin Shan static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) 91805289402SAnshuman Khandual { 91905289402SAnshuman Khandual struct page *page; 92005289402SAnshuman Khandual pte_t pte; 92105289402SAnshuman Khandual 9226315df41SAnshuman Khandual pr_debug("Validating HugeTLB basic\n"); 92305289402SAnshuman Khandual /* 92405289402SAnshuman Khandual * Accessing the page associated with the pfn is safe here, 92505289402SAnshuman Khandual * as it was previously derived from a real kernel symbol. 92605289402SAnshuman Khandual */ 92736b77d1eSGavin Shan page = pfn_to_page(args->fixed_pmd_pfn); 92836b77d1eSGavin Shan pte = mk_huge_pte(page, args->page_prot); 92905289402SAnshuman Khandual 93005289402SAnshuman Khandual WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); 93105289402SAnshuman Khandual WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); 93205289402SAnshuman Khandual WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); 93305289402SAnshuman Khandual 93405289402SAnshuman Khandual #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 93536b77d1eSGavin Shan pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); 93605289402SAnshuman Khandual 9379dabf6e1SAnshuman Khandual WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS))); 93805289402SAnshuman Khandual #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 93905289402SAnshuman Khandual } 94005289402SAnshuman Khandual #else /* !CONFIG_HUGETLB_PAGE */ 94136b77d1eSGavin Shan static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } 94205289402SAnshuman Khandual #endif /* CONFIG_HUGETLB_PAGE */ 94305289402SAnshuman Khandual 94405289402SAnshuman Khandual #ifdef CONFIG_TRANSPARENT_HUGEPAGE 9454878a888SGavin Shan static void __init pmd_thp_tests(struct pgtable_debug_args *args) 94605289402SAnshuman Khandual { 94705289402SAnshuman Khandual pmd_t pmd; 94805289402SAnshuman Khandual 94905289402SAnshuman Khandual if (!has_transparent_hugepage()) 95005289402SAnshuman Khandual return; 95105289402SAnshuman Khandual 9526315df41SAnshuman Khandual pr_debug("Validating PMD based THP\n"); 95305289402SAnshuman Khandual /* 95405289402SAnshuman Khandual * pmd_trans_huge() and pmd_present() must return positive after 95505289402SAnshuman Khandual * MMU invalidation with pmd_mkinvalid(). This behavior is an 95605289402SAnshuman Khandual * optimization for transparent huge page. pmd_trans_huge() must 95705289402SAnshuman Khandual * be true if pmd_page() returns a valid THP to avoid taking the 95805289402SAnshuman Khandual * pmd_lock when others walk over non transhuge pmds (i.e. there 95905289402SAnshuman Khandual * are no THP allocated). Especially when splitting a THP and 96005289402SAnshuman Khandual * removing the present bit from the pmd, pmd_trans_huge() still 96105289402SAnshuman Khandual * needs to return true. pmd_present() should be true whenever 96205289402SAnshuman Khandual * pmd_trans_huge() returns true. 96305289402SAnshuman Khandual */ 9644878a888SGavin Shan pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 96505289402SAnshuman Khandual WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); 96605289402SAnshuman Khandual 96705289402SAnshuman Khandual #ifndef __HAVE_ARCH_PMDP_INVALIDATE 96805289402SAnshuman Khandual WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); 96905289402SAnshuman Khandual WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); 97005289402SAnshuman Khandual #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ 97105289402SAnshuman Khandual } 97205289402SAnshuman Khandual 97305289402SAnshuman Khandual #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 9744878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) 97505289402SAnshuman Khandual { 97605289402SAnshuman Khandual pud_t pud; 97705289402SAnshuman Khandual 97805289402SAnshuman Khandual if (!has_transparent_hugepage()) 97905289402SAnshuman Khandual return; 98005289402SAnshuman Khandual 9816315df41SAnshuman Khandual pr_debug("Validating PUD based THP\n"); 9824878a888SGavin Shan pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 98305289402SAnshuman Khandual WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); 98405289402SAnshuman Khandual 98505289402SAnshuman Khandual /* 98605289402SAnshuman Khandual * pud_mkinvalid() has been dropped for now. Enable back 98705289402SAnshuman Khandual * these tests when it comes back with a modified pud_present(). 98805289402SAnshuman Khandual * 98905289402SAnshuman Khandual * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); 99005289402SAnshuman Khandual * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); 99105289402SAnshuman Khandual */ 99205289402SAnshuman Khandual } 99305289402SAnshuman Khandual #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 9944878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) { } 99505289402SAnshuman Khandual #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 99605289402SAnshuman Khandual #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 9974878a888SGavin Shan static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } 9984878a888SGavin Shan static void __init pud_thp_tests(struct pgtable_debug_args *args) { } 99905289402SAnshuman Khandual #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 100005289402SAnshuman Khandual 1001399145f9SAnshuman Khandual static unsigned long __init get_random_vaddr(void) 1002399145f9SAnshuman Khandual { 1003399145f9SAnshuman Khandual unsigned long random_vaddr, random_pages, total_user_pages; 1004399145f9SAnshuman Khandual 1005399145f9SAnshuman Khandual total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; 1006399145f9SAnshuman Khandual 1007399145f9SAnshuman Khandual random_pages = get_random_long() % total_user_pages; 1008399145f9SAnshuman Khandual random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; 1009399145f9SAnshuman Khandual 1010399145f9SAnshuman Khandual return random_vaddr; 1011399145f9SAnshuman Khandual } 1012399145f9SAnshuman Khandual 10133c9b84f0SGavin Shan static void __init destroy_args(struct pgtable_debug_args *args) 10143c9b84f0SGavin Shan { 10153c9b84f0SGavin Shan struct page *page = NULL; 10163c9b84f0SGavin Shan 10173c9b84f0SGavin Shan /* Free (huge) page */ 10183c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 10193c9b84f0SGavin Shan IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 10203c9b84f0SGavin Shan has_transparent_hugepage() && 10213c9b84f0SGavin Shan args->pud_pfn != ULONG_MAX) { 10223c9b84f0SGavin Shan if (args->is_contiguous_page) { 10233c9b84f0SGavin Shan free_contig_range(args->pud_pfn, 10243c9b84f0SGavin Shan (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); 10253c9b84f0SGavin Shan } else { 10263c9b84f0SGavin Shan page = pfn_to_page(args->pud_pfn); 10273c9b84f0SGavin Shan __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); 10283c9b84f0SGavin Shan } 10293c9b84f0SGavin Shan 10303c9b84f0SGavin Shan args->pud_pfn = ULONG_MAX; 10313c9b84f0SGavin Shan args->pmd_pfn = ULONG_MAX; 10323c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX; 10333c9b84f0SGavin Shan } 10343c9b84f0SGavin Shan 10353c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 10363c9b84f0SGavin Shan has_transparent_hugepage() && 10373c9b84f0SGavin Shan args->pmd_pfn != ULONG_MAX) { 10383c9b84f0SGavin Shan if (args->is_contiguous_page) { 10393c9b84f0SGavin Shan free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); 10403c9b84f0SGavin Shan } else { 10413c9b84f0SGavin Shan page = pfn_to_page(args->pmd_pfn); 10423c9b84f0SGavin Shan __free_pages(page, HPAGE_PMD_ORDER); 10433c9b84f0SGavin Shan } 10443c9b84f0SGavin Shan 10453c9b84f0SGavin Shan args->pmd_pfn = ULONG_MAX; 10463c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX; 10473c9b84f0SGavin Shan } 10483c9b84f0SGavin Shan 10493c9b84f0SGavin Shan if (args->pte_pfn != ULONG_MAX) { 10503c9b84f0SGavin Shan page = pfn_to_page(args->pte_pfn); 1051*dcc1be11SLorenzo Stoakes __free_page(page); 10523c9b84f0SGavin Shan 10533c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX; 10543c9b84f0SGavin Shan } 10553c9b84f0SGavin Shan 10563c9b84f0SGavin Shan /* Free page table entries */ 10573c9b84f0SGavin Shan if (args->start_ptep) { 10583c9b84f0SGavin Shan pte_free(args->mm, args->start_ptep); 10593c9b84f0SGavin Shan mm_dec_nr_ptes(args->mm); 10603c9b84f0SGavin Shan } 10613c9b84f0SGavin Shan 10623c9b84f0SGavin Shan if (args->start_pmdp) { 10633c9b84f0SGavin Shan pmd_free(args->mm, args->start_pmdp); 10643c9b84f0SGavin Shan mm_dec_nr_pmds(args->mm); 10653c9b84f0SGavin Shan } 10663c9b84f0SGavin Shan 10673c9b84f0SGavin Shan if (args->start_pudp) { 10683c9b84f0SGavin Shan pud_free(args->mm, args->start_pudp); 10693c9b84f0SGavin Shan mm_dec_nr_puds(args->mm); 10703c9b84f0SGavin Shan } 10713c9b84f0SGavin Shan 10723c9b84f0SGavin Shan if (args->start_p4dp) 10733c9b84f0SGavin Shan p4d_free(args->mm, args->start_p4dp); 10743c9b84f0SGavin Shan 10753c9b84f0SGavin Shan /* Free vma and mm struct */ 10763c9b84f0SGavin Shan if (args->vma) 10773c9b84f0SGavin Shan vm_area_free(args->vma); 10783c9b84f0SGavin Shan 10793c9b84f0SGavin Shan if (args->mm) 10803c9b84f0SGavin Shan mmdrop(args->mm); 10813c9b84f0SGavin Shan } 10823c9b84f0SGavin Shan 10833c9b84f0SGavin Shan static struct page * __init 10843c9b84f0SGavin Shan debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) 10853c9b84f0SGavin Shan { 10863c9b84f0SGavin Shan struct page *page = NULL; 10873c9b84f0SGavin Shan 10883c9b84f0SGavin Shan #ifdef CONFIG_CONTIG_ALLOC 10893c9b84f0SGavin Shan if (order >= MAX_ORDER) { 10903c9b84f0SGavin Shan page = alloc_contig_pages((1 << order), GFP_KERNEL, 10913c9b84f0SGavin Shan first_online_node, NULL); 10923c9b84f0SGavin Shan if (page) { 10933c9b84f0SGavin Shan args->is_contiguous_page = true; 10943c9b84f0SGavin Shan return page; 10953c9b84f0SGavin Shan } 10963c9b84f0SGavin Shan } 10973c9b84f0SGavin Shan #endif 10983c9b84f0SGavin Shan 10993c9b84f0SGavin Shan if (order < MAX_ORDER) 11003c9b84f0SGavin Shan page = alloc_pages(GFP_KERNEL, order); 11013c9b84f0SGavin Shan 11023c9b84f0SGavin Shan return page; 11033c9b84f0SGavin Shan } 11043c9b84f0SGavin Shan 1105c4876ff6SFrank van der Linden /* 1106c4876ff6SFrank van der Linden * Check if a physical memory range described by <pstart, pend> contains 1107c4876ff6SFrank van der Linden * an area that is of size psize, and aligned to psize. 1108c4876ff6SFrank van der Linden * 1109c4876ff6SFrank van der Linden * Don't use address 0, an all-zeroes physical address might mask bugs, and 1110c4876ff6SFrank van der Linden * it's not used on x86. 1111c4876ff6SFrank van der Linden */ 1112c4876ff6SFrank van der Linden static void __init phys_align_check(phys_addr_t pstart, 1113c4876ff6SFrank van der Linden phys_addr_t pend, unsigned long psize, 1114c4876ff6SFrank van der Linden phys_addr_t *physp, unsigned long *alignp) 1115c4876ff6SFrank van der Linden { 1116c4876ff6SFrank van der Linden phys_addr_t aligned_start, aligned_end; 1117c4876ff6SFrank van der Linden 1118c4876ff6SFrank van der Linden if (pstart == 0) 1119c4876ff6SFrank van der Linden pstart = PAGE_SIZE; 1120c4876ff6SFrank van der Linden 1121c4876ff6SFrank van der Linden aligned_start = ALIGN(pstart, psize); 1122c4876ff6SFrank van der Linden aligned_end = aligned_start + psize; 1123c4876ff6SFrank van der Linden 1124c4876ff6SFrank van der Linden if (aligned_end > aligned_start && aligned_end <= pend) { 1125c4876ff6SFrank van der Linden *alignp = psize; 1126c4876ff6SFrank van der Linden *physp = aligned_start; 1127c4876ff6SFrank van der Linden } 1128c4876ff6SFrank van der Linden } 1129c4876ff6SFrank van der Linden 1130c4876ff6SFrank van der Linden static void __init init_fixed_pfns(struct pgtable_debug_args *args) 1131c4876ff6SFrank van der Linden { 1132c4876ff6SFrank van der Linden u64 idx; 1133c4876ff6SFrank van der Linden phys_addr_t phys, pstart, pend; 1134c4876ff6SFrank van der Linden 1135c4876ff6SFrank van der Linden /* 1136c4876ff6SFrank van der Linden * Initialize the fixed pfns. To do this, try to find a 1137c4876ff6SFrank van der Linden * valid physical range, preferably aligned to PUD_SIZE, 1138c4876ff6SFrank van der Linden * but settling for aligned to PMD_SIZE as a fallback. If 1139c4876ff6SFrank van der Linden * neither of those is found, use the physical address of 1140c4876ff6SFrank van der Linden * the start_kernel symbol. 1141c4876ff6SFrank van der Linden * 1142c4876ff6SFrank van der Linden * The memory doesn't need to be allocated, it just needs to exist 1143c4876ff6SFrank van der Linden * as usable memory. It won't be touched. 1144c4876ff6SFrank van der Linden * 1145c4876ff6SFrank van der Linden * The alignment is recorded, and can be checked to see if we 1146c4876ff6SFrank van der Linden * can run the tests that require an actual valid physical 1147c4876ff6SFrank van der Linden * address range on some architectures ({pmd,pud}_huge_test 1148c4876ff6SFrank van der Linden * on x86). 1149c4876ff6SFrank van der Linden */ 1150c4876ff6SFrank van der Linden 1151c4876ff6SFrank van der Linden phys = __pa_symbol(&start_kernel); 1152c4876ff6SFrank van der Linden args->fixed_alignment = PAGE_SIZE; 1153c4876ff6SFrank van der Linden 1154c4876ff6SFrank van der Linden for_each_mem_range(idx, &pstart, &pend) { 1155c4876ff6SFrank van der Linden /* First check for a PUD-aligned area */ 1156c4876ff6SFrank van der Linden phys_align_check(pstart, pend, PUD_SIZE, &phys, 1157c4876ff6SFrank van der Linden &args->fixed_alignment); 1158c4876ff6SFrank van der Linden 1159c4876ff6SFrank van der Linden /* If a PUD-aligned area is found, we're done */ 1160c4876ff6SFrank van der Linden if (args->fixed_alignment == PUD_SIZE) 1161c4876ff6SFrank van der Linden break; 1162c4876ff6SFrank van der Linden 1163c4876ff6SFrank van der Linden /* 1164c4876ff6SFrank van der Linden * If no PMD-aligned area found yet, check for one, 1165c4876ff6SFrank van der Linden * but continue the loop to look for a PUD-aligned area. 1166c4876ff6SFrank van der Linden */ 1167c4876ff6SFrank van der Linden if (args->fixed_alignment < PMD_SIZE) 1168c4876ff6SFrank van der Linden phys_align_check(pstart, pend, PMD_SIZE, &phys, 1169c4876ff6SFrank van der Linden &args->fixed_alignment); 1170c4876ff6SFrank van der Linden } 1171c4876ff6SFrank van der Linden 1172c4876ff6SFrank van der Linden args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); 1173c4876ff6SFrank van der Linden args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); 1174c4876ff6SFrank van der Linden args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); 1175c4876ff6SFrank van der Linden args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); 1176c4876ff6SFrank van der Linden args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); 1177c4876ff6SFrank van der Linden WARN_ON(!pfn_valid(args->fixed_pte_pfn)); 1178c4876ff6SFrank van der Linden } 1179c4876ff6SFrank van der Linden 1180c4876ff6SFrank van der Linden 11813c9b84f0SGavin Shan static int __init init_args(struct pgtable_debug_args *args) 11823c9b84f0SGavin Shan { 11833c9b84f0SGavin Shan struct page *page = NULL; 11843c9b84f0SGavin Shan int ret = 0; 11853c9b84f0SGavin Shan 11863c9b84f0SGavin Shan /* 11873c9b84f0SGavin Shan * Initialize the debugging data. 11883c9b84f0SGavin Shan * 118931d17076SAnshuman Khandual * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) 119031d17076SAnshuman Khandual * will help create page table entries with PROT_NONE permission as 119131d17076SAnshuman Khandual * required for pxx_protnone_tests(). 11923c9b84f0SGavin Shan */ 11933c9b84f0SGavin Shan memset(args, 0, sizeof(*args)); 11943c9b84f0SGavin Shan args->vaddr = get_random_vaddr(); 1195d7e679b6SKefeng Wang args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS); 119631d17076SAnshuman Khandual args->page_prot_none = vm_get_page_prot(VM_NONE); 11973c9b84f0SGavin Shan args->is_contiguous_page = false; 11983c9b84f0SGavin Shan args->pud_pfn = ULONG_MAX; 11993c9b84f0SGavin Shan args->pmd_pfn = ULONG_MAX; 12003c9b84f0SGavin Shan args->pte_pfn = ULONG_MAX; 12013c9b84f0SGavin Shan args->fixed_pgd_pfn = ULONG_MAX; 12023c9b84f0SGavin Shan args->fixed_p4d_pfn = ULONG_MAX; 12033c9b84f0SGavin Shan args->fixed_pud_pfn = ULONG_MAX; 12043c9b84f0SGavin Shan args->fixed_pmd_pfn = ULONG_MAX; 12053c9b84f0SGavin Shan args->fixed_pte_pfn = ULONG_MAX; 12063c9b84f0SGavin Shan 12073c9b84f0SGavin Shan /* Allocate mm and vma */ 12083c9b84f0SGavin Shan args->mm = mm_alloc(); 12093c9b84f0SGavin Shan if (!args->mm) { 12103c9b84f0SGavin Shan pr_err("Failed to allocate mm struct\n"); 12113c9b84f0SGavin Shan ret = -ENOMEM; 12123c9b84f0SGavin Shan goto error; 12133c9b84f0SGavin Shan } 12143c9b84f0SGavin Shan 12153c9b84f0SGavin Shan args->vma = vm_area_alloc(args->mm); 12163c9b84f0SGavin Shan if (!args->vma) { 12173c9b84f0SGavin Shan pr_err("Failed to allocate vma\n"); 12183c9b84f0SGavin Shan ret = -ENOMEM; 12193c9b84f0SGavin Shan goto error; 12203c9b84f0SGavin Shan } 12213c9b84f0SGavin Shan 12223c9b84f0SGavin Shan /* 12233c9b84f0SGavin Shan * Allocate page table entries. They will be modified in the tests. 12243c9b84f0SGavin Shan * Lets save the page table entries so that they can be released 12253c9b84f0SGavin Shan * when the tests are completed. 12263c9b84f0SGavin Shan */ 12273c9b84f0SGavin Shan args->pgdp = pgd_offset(args->mm, args->vaddr); 12283c9b84f0SGavin Shan args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); 12293c9b84f0SGavin Shan if (!args->p4dp) { 12303c9b84f0SGavin Shan pr_err("Failed to allocate p4d entries\n"); 12313c9b84f0SGavin Shan ret = -ENOMEM; 12323c9b84f0SGavin Shan goto error; 12333c9b84f0SGavin Shan } 12343c9b84f0SGavin Shan args->start_p4dp = p4d_offset(args->pgdp, 0UL); 12353c9b84f0SGavin Shan WARN_ON(!args->start_p4dp); 12363c9b84f0SGavin Shan 12373c9b84f0SGavin Shan args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); 12383c9b84f0SGavin Shan if (!args->pudp) { 12393c9b84f0SGavin Shan pr_err("Failed to allocate pud entries\n"); 12403c9b84f0SGavin Shan ret = -ENOMEM; 12413c9b84f0SGavin Shan goto error; 12423c9b84f0SGavin Shan } 12433c9b84f0SGavin Shan args->start_pudp = pud_offset(args->p4dp, 0UL); 12443c9b84f0SGavin Shan WARN_ON(!args->start_pudp); 12453c9b84f0SGavin Shan 12463c9b84f0SGavin Shan args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); 12473c9b84f0SGavin Shan if (!args->pmdp) { 12483c9b84f0SGavin Shan pr_err("Failed to allocate pmd entries\n"); 12493c9b84f0SGavin Shan ret = -ENOMEM; 12503c9b84f0SGavin Shan goto error; 12513c9b84f0SGavin Shan } 12523c9b84f0SGavin Shan args->start_pmdp = pmd_offset(args->pudp, 0UL); 12533c9b84f0SGavin Shan WARN_ON(!args->start_pmdp); 12543c9b84f0SGavin Shan 12553c9b84f0SGavin Shan if (pte_alloc(args->mm, args->pmdp)) { 12563c9b84f0SGavin Shan pr_err("Failed to allocate pte entries\n"); 12573c9b84f0SGavin Shan ret = -ENOMEM; 12583c9b84f0SGavin Shan goto error; 12593c9b84f0SGavin Shan } 12603c9b84f0SGavin Shan args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); 12613c9b84f0SGavin Shan WARN_ON(!args->start_ptep); 12623c9b84f0SGavin Shan 1263c4876ff6SFrank van der Linden init_fixed_pfns(args); 12643c9b84f0SGavin Shan 12653c9b84f0SGavin Shan /* 12663c9b84f0SGavin Shan * Allocate (huge) pages because some of the tests need to access 12673c9b84f0SGavin Shan * the data in the pages. The corresponding tests will be skipped 12683c9b84f0SGavin Shan * if we fail to allocate (huge) pages. 12693c9b84f0SGavin Shan */ 12703c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 12713c9b84f0SGavin Shan IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 12723c9b84f0SGavin Shan has_transparent_hugepage()) { 12733c9b84f0SGavin Shan page = debug_vm_pgtable_alloc_huge_page(args, 12743c9b84f0SGavin Shan HPAGE_PUD_SHIFT - PAGE_SHIFT); 12753c9b84f0SGavin Shan if (page) { 12763c9b84f0SGavin Shan args->pud_pfn = page_to_pfn(page); 12773c9b84f0SGavin Shan args->pmd_pfn = args->pud_pfn; 12783c9b84f0SGavin Shan args->pte_pfn = args->pud_pfn; 12793c9b84f0SGavin Shan return 0; 12803c9b84f0SGavin Shan } 12813c9b84f0SGavin Shan } 12823c9b84f0SGavin Shan 12833c9b84f0SGavin Shan if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 12843c9b84f0SGavin Shan has_transparent_hugepage()) { 12853c9b84f0SGavin Shan page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); 12863c9b84f0SGavin Shan if (page) { 12873c9b84f0SGavin Shan args->pmd_pfn = page_to_pfn(page); 12883c9b84f0SGavin Shan args->pte_pfn = args->pmd_pfn; 12893c9b84f0SGavin Shan return 0; 12903c9b84f0SGavin Shan } 12913c9b84f0SGavin Shan } 12923c9b84f0SGavin Shan 1293*dcc1be11SLorenzo Stoakes page = alloc_page(GFP_KERNEL); 12943c9b84f0SGavin Shan if (page) 12953c9b84f0SGavin Shan args->pte_pfn = page_to_pfn(page); 12963c9b84f0SGavin Shan 12973c9b84f0SGavin Shan return 0; 12983c9b84f0SGavin Shan 12993c9b84f0SGavin Shan error: 13003c9b84f0SGavin Shan destroy_args(args); 13013c9b84f0SGavin Shan return ret; 13023c9b84f0SGavin Shan } 13033c9b84f0SGavin Shan 1304399145f9SAnshuman Khandual static int __init debug_vm_pgtable(void) 1305399145f9SAnshuman Khandual { 13063c9b84f0SGavin Shan struct pgtable_debug_args args; 1307fea1120cSKees Cook spinlock_t *ptl = NULL; 13083c9b84f0SGavin Shan int idx, ret; 1309399145f9SAnshuman Khandual 1310399145f9SAnshuman Khandual pr_info("Validating architecture page table helpers\n"); 13113c9b84f0SGavin Shan ret = init_args(&args); 13123c9b84f0SGavin Shan if (ret) 13133c9b84f0SGavin Shan return ret; 13143c9b84f0SGavin Shan 13152e326c07SAnshuman Khandual /* 131631d17076SAnshuman Khandual * Iterate over each possible vm_flags to make sure that all 13172e326c07SAnshuman Khandual * the basic page table transformation validations just hold 13182e326c07SAnshuman Khandual * true irrespective of the starting protection value for a 13192e326c07SAnshuman Khandual * given page table entry. 132031d17076SAnshuman Khandual * 132131d17076SAnshuman Khandual * Protection based vm_flags combinatins are always linear 132231d17076SAnshuman Khandual * and increasing i.e starting from VM_NONE and going upto 132331d17076SAnshuman Khandual * (VM_SHARED | READ | WRITE | EXEC). 13242e326c07SAnshuman Khandual */ 132531d17076SAnshuman Khandual #define VM_FLAGS_START (VM_NONE) 132631d17076SAnshuman Khandual #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) 132731d17076SAnshuman Khandual 132831d17076SAnshuman Khandual for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { 132936b77d1eSGavin Shan pte_basic_tests(&args, idx); 133036b77d1eSGavin Shan pmd_basic_tests(&args, idx); 133136b77d1eSGavin Shan pud_basic_tests(&args, idx); 13322e326c07SAnshuman Khandual } 13332e326c07SAnshuman Khandual 13342e326c07SAnshuman Khandual /* 13352e326c07SAnshuman Khandual * Both P4D and PGD level tests are very basic which do not 13362e326c07SAnshuman Khandual * involve creating page table entries from the protection 13372e326c07SAnshuman Khandual * value and the given pfn. Hence just keep them out from 13382e326c07SAnshuman Khandual * the above iteration for now to save some test execution 13392e326c07SAnshuman Khandual * time. 13402e326c07SAnshuman Khandual */ 134136b77d1eSGavin Shan p4d_basic_tests(&args); 134236b77d1eSGavin Shan pgd_basic_tests(&args); 1343399145f9SAnshuman Khandual 13448983d231SGavin Shan pmd_leaf_tests(&args); 13458983d231SGavin Shan pud_leaf_tests(&args); 1346a5c3b9ffSAnshuman Khandual 13478cb183f2SGavin Shan pte_special_tests(&args); 13488cb183f2SGavin Shan pte_protnone_tests(&args); 13498cb183f2SGavin Shan pmd_protnone_tests(&args); 135005289402SAnshuman Khandual 13518cb183f2SGavin Shan pte_devmap_tests(&args); 13528cb183f2SGavin Shan pmd_devmap_tests(&args); 13538cb183f2SGavin Shan pud_devmap_tests(&args); 135405289402SAnshuman Khandual 13555f447e80SGavin Shan pte_soft_dirty_tests(&args); 13565f447e80SGavin Shan pmd_soft_dirty_tests(&args); 13575f447e80SGavin Shan pte_swap_soft_dirty_tests(&args); 13585f447e80SGavin Shan pmd_swap_soft_dirty_tests(&args); 135905289402SAnshuman Khandual 1360210d1e8aSDavid Hildenbrand pte_swap_exclusive_tests(&args); 1361210d1e8aSDavid Hildenbrand 13625f447e80SGavin Shan pte_swap_tests(&args); 13635f447e80SGavin Shan pmd_swap_tests(&args); 136405289402SAnshuman Khandual 13654878a888SGavin Shan swap_migration_tests(&args); 136605289402SAnshuman Khandual 13674878a888SGavin Shan pmd_thp_tests(&args); 13684878a888SGavin Shan pud_thp_tests(&args); 136905289402SAnshuman Khandual 137036b77d1eSGavin Shan hugetlb_basic_tests(&args); 1371e8edf0adSAneesh Kumar K.V 13726f302e27SAneesh Kumar K.V /* 13736f302e27SAneesh Kumar K.V * Page table modifying tests. They need to hold 13746f302e27SAneesh Kumar K.V * proper page table lock. 13756f302e27SAneesh Kumar K.V */ 1376e8edf0adSAneesh Kumar K.V 137744966c44SGavin Shan args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); 137844966c44SGavin Shan pte_clear_tests(&args); 137944966c44SGavin Shan pte_advanced_tests(&args); 138044966c44SGavin Shan pte_unmap_unlock(args.ptep, ptl); 1381e8edf0adSAneesh Kumar K.V 1382c0fe07b0SGavin Shan ptl = pmd_lock(args.mm, args.pmdp); 1383c0fe07b0SGavin Shan pmd_clear_tests(&args); 1384c0fe07b0SGavin Shan pmd_advanced_tests(&args); 1385c0fe07b0SGavin Shan pmd_huge_tests(&args); 1386c0fe07b0SGavin Shan pmd_populate_tests(&args); 13876f302e27SAneesh Kumar K.V spin_unlock(ptl); 13886f302e27SAneesh Kumar K.V 13894cbde03bSGavin Shan ptl = pud_lock(args.mm, args.pudp); 13904cbde03bSGavin Shan pud_clear_tests(&args); 13914cbde03bSGavin Shan pud_advanced_tests(&args); 13924cbde03bSGavin Shan pud_huge_tests(&args); 13934cbde03bSGavin Shan pud_populate_tests(&args); 13946f302e27SAneesh Kumar K.V spin_unlock(ptl); 13956f302e27SAneesh Kumar K.V 13962f87f8c3SGavin Shan spin_lock(&(args.mm->page_table_lock)); 13972f87f8c3SGavin Shan p4d_clear_tests(&args); 13982f87f8c3SGavin Shan pgd_clear_tests(&args); 13992f87f8c3SGavin Shan p4d_populate_tests(&args); 14002f87f8c3SGavin Shan pgd_populate_tests(&args); 14012f87f8c3SGavin Shan spin_unlock(&(args.mm->page_table_lock)); 1402e8edf0adSAneesh Kumar K.V 14033c9b84f0SGavin Shan destroy_args(&args); 1404399145f9SAnshuman Khandual return 0; 1405399145f9SAnshuman Khandual } 1406399145f9SAnshuman Khandual late_initcall(debug_vm_pgtable); 1407