1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This kernel test validates architecture page table helpers and 4 * accessors and helps in verifying their continued compliance with 5 * expected generic MM semantics. 6 * 7 * Copyright (C) 2019 ARM Ltd. 8 * 9 * Author: Anshuman Khandual <anshuman.khandual@arm.com> 10 */ 11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ 12 13 #include <linux/gfp.h> 14 #include <linux/highmem.h> 15 #include <linux/hugetlb.h> 16 #include <linux/kernel.h> 17 #include <linux/kconfig.h> 18 #include <linux/mm.h> 19 #include <linux/mman.h> 20 #include <linux/mm_types.h> 21 #include <linux/module.h> 22 #include <linux/pfn_t.h> 23 #include <linux/printk.h> 24 #include <linux/pgtable.h> 25 #include <linux/random.h> 26 #include <linux/spinlock.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/start_kernel.h> 30 #include <linux/sched/mm.h> 31 #include <linux/io.h> 32 33 #include <asm/cacheflush.h> 34 #include <asm/pgalloc.h> 35 #include <asm/tlbflush.h> 36 37 /* 38 * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics 39 * expectations that are being validated here. All future changes in here 40 * or the documentation need to be in sync. 41 * 42 * On s390 platform, the lower 4 bits are used to identify given page table 43 * entry type. But these bits might affect the ability to clear entries with 44 * pxx_clear() because of how dynamic page table folding works on s390. So 45 * while loading up the entries do not change the lower 4 bits. It does not 46 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is 47 * used to mark a pte entry. 48 */ 49 #define S390_SKIP_MASK GENMASK(3, 0) 50 #if __BITS_PER_LONG == 64 51 #define PPC64_SKIP_MASK GENMASK(62, 62) 52 #else 53 #define PPC64_SKIP_MASK 0x0 54 #endif 55 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) 56 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) 57 #define RANDOM_NZVALUE GENMASK(7, 0) 58 59 struct pgtable_debug_args { 60 struct mm_struct *mm; 61 struct vm_area_struct *vma; 62 63 pgd_t *pgdp; 64 p4d_t *p4dp; 65 pud_t *pudp; 66 pmd_t *pmdp; 67 pte_t *ptep; 68 69 p4d_t *start_p4dp; 70 pud_t *start_pudp; 71 pmd_t *start_pmdp; 72 pgtable_t start_ptep; 73 74 unsigned long vaddr; 75 pgprot_t page_prot; 76 pgprot_t page_prot_none; 77 78 bool is_contiguous_page; 79 unsigned long pud_pfn; 80 unsigned long pmd_pfn; 81 unsigned long pte_pfn; 82 83 unsigned long fixed_pgd_pfn; 84 unsigned long fixed_p4d_pfn; 85 unsigned long fixed_pud_pfn; 86 unsigned long fixed_pmd_pfn; 87 unsigned long fixed_pte_pfn; 88 }; 89 90 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) 91 { 92 pgprot_t prot = vm_get_page_prot(idx); 93 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); 94 unsigned long val = idx, *ptr = &val; 95 96 pr_debug("Validating PTE basic (%pGv)\n", ptr); 97 98 /* 99 * This test needs to be executed after the given page table entry 100 * is created with pfn_pte() to make sure that vm_get_page_prot(idx) 101 * does not have the dirty bit enabled from the beginning. This is 102 * important for platforms like arm64 where (!PTE_RDONLY) indicate 103 * dirty bit being set. 104 */ 105 WARN_ON(pte_dirty(pte_wrprotect(pte))); 106 107 WARN_ON(!pte_same(pte, pte)); 108 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); 109 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); 110 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); 111 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); 112 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); 113 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); 114 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); 115 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); 116 } 117 118 static void __init pte_advanced_tests(struct pgtable_debug_args *args) 119 { 120 struct page *page; 121 pte_t pte; 122 123 /* 124 * Architectures optimize set_pte_at by avoiding TLB flush. 125 * This requires set_pte_at to be not used to update an 126 * existing pte entry. Clear pte before we do set_pte_at 127 * 128 * flush_dcache_page() is called after set_pte_at() to clear 129 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 130 * when it's released and page allocation check will fail when 131 * the page is allocated again. For architectures other than ARM64, 132 * the unexpected overhead of cache flushing is acceptable. 133 */ 134 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 135 if (!page) 136 return; 137 138 pr_debug("Validating PTE advanced\n"); 139 pte = pfn_pte(args->pte_pfn, args->page_prot); 140 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 141 flush_dcache_page(page); 142 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); 143 pte = ptep_get(args->ptep); 144 WARN_ON(pte_write(pte)); 145 ptep_get_and_clear(args->mm, args->vaddr, args->ptep); 146 pte = ptep_get(args->ptep); 147 WARN_ON(!pte_none(pte)); 148 149 pte = pfn_pte(args->pte_pfn, args->page_prot); 150 pte = pte_wrprotect(pte); 151 pte = pte_mkclean(pte); 152 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 153 flush_dcache_page(page); 154 pte = pte_mkwrite(pte); 155 pte = pte_mkdirty(pte); 156 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); 157 pte = ptep_get(args->ptep); 158 WARN_ON(!(pte_write(pte) && pte_dirty(pte))); 159 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); 160 pte = ptep_get(args->ptep); 161 WARN_ON(!pte_none(pte)); 162 163 pte = pfn_pte(args->pte_pfn, args->page_prot); 164 pte = pte_mkyoung(pte); 165 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 166 flush_dcache_page(page); 167 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); 168 pte = ptep_get(args->ptep); 169 WARN_ON(pte_young(pte)); 170 171 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); 172 } 173 174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 175 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) 176 { 177 pgprot_t prot = vm_get_page_prot(idx); 178 unsigned long val = idx, *ptr = &val; 179 pmd_t pmd; 180 181 if (!has_transparent_hugepage()) 182 return; 183 184 pr_debug("Validating PMD basic (%pGv)\n", ptr); 185 pmd = pfn_pmd(args->fixed_pmd_pfn, prot); 186 187 /* 188 * This test needs to be executed after the given page table entry 189 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) 190 * does not have the dirty bit enabled from the beginning. This is 191 * important for platforms like arm64 where (!PTE_RDONLY) indicate 192 * dirty bit being set. 193 */ 194 WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); 195 196 197 WARN_ON(!pmd_same(pmd, pmd)); 198 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); 199 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); 200 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); 201 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); 202 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); 203 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); 204 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); 205 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); 206 /* 207 * A huge page does not point to next level page table 208 * entry. Hence this must qualify as pmd_bad(). 209 */ 210 WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); 211 } 212 213 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) 214 { 215 struct page *page; 216 pmd_t pmd; 217 unsigned long vaddr = args->vaddr; 218 219 if (!has_transparent_hugepage()) 220 return; 221 222 page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; 223 if (!page) 224 return; 225 226 /* 227 * flush_dcache_page() is called after set_pmd_at() to clear 228 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 229 * when it's released and page allocation check will fail when 230 * the page is allocated again. For architectures other than ARM64, 231 * the unexpected overhead of cache flushing is acceptable. 232 */ 233 pr_debug("Validating PMD advanced\n"); 234 /* Align the address wrt HPAGE_PMD_SIZE */ 235 vaddr &= HPAGE_PMD_MASK; 236 237 pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep); 238 239 pmd = pfn_pmd(args->pmd_pfn, args->page_prot); 240 set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 241 flush_dcache_page(page); 242 pmdp_set_wrprotect(args->mm, vaddr, args->pmdp); 243 pmd = READ_ONCE(*args->pmdp); 244 WARN_ON(pmd_write(pmd)); 245 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); 246 pmd = READ_ONCE(*args->pmdp); 247 WARN_ON(!pmd_none(pmd)); 248 249 pmd = pfn_pmd(args->pmd_pfn, args->page_prot); 250 pmd = pmd_wrprotect(pmd); 251 pmd = pmd_mkclean(pmd); 252 set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 253 flush_dcache_page(page); 254 pmd = pmd_mkwrite(pmd); 255 pmd = pmd_mkdirty(pmd); 256 pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1); 257 pmd = READ_ONCE(*args->pmdp); 258 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); 259 pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1); 260 pmd = READ_ONCE(*args->pmdp); 261 WARN_ON(!pmd_none(pmd)); 262 263 pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot)); 264 pmd = pmd_mkyoung(pmd); 265 set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 266 flush_dcache_page(page); 267 pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp); 268 pmd = READ_ONCE(*args->pmdp); 269 WARN_ON(pmd_young(pmd)); 270 271 /* Clear the pte entries */ 272 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); 273 pgtable_trans_huge_withdraw(args->mm, args->pmdp); 274 } 275 276 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) 277 { 278 pmd_t pmd; 279 280 if (!has_transparent_hugepage()) 281 return; 282 283 pr_debug("Validating PMD leaf\n"); 284 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 285 286 /* 287 * PMD based THP is a leaf entry. 288 */ 289 pmd = pmd_mkhuge(pmd); 290 WARN_ON(!pmd_leaf(pmd)); 291 } 292 293 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 294 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) 295 { 296 pgprot_t prot = vm_get_page_prot(idx); 297 unsigned long val = idx, *ptr = &val; 298 pud_t pud; 299 300 if (!has_transparent_hugepage()) 301 return; 302 303 pr_debug("Validating PUD basic (%pGv)\n", ptr); 304 pud = pfn_pud(args->fixed_pud_pfn, prot); 305 306 /* 307 * This test needs to be executed after the given page table entry 308 * is created with pfn_pud() to make sure that vm_get_page_prot(idx) 309 * does not have the dirty bit enabled from the beginning. This is 310 * important for platforms like arm64 where (!PTE_RDONLY) indicate 311 * dirty bit being set. 312 */ 313 WARN_ON(pud_dirty(pud_wrprotect(pud))); 314 315 WARN_ON(!pud_same(pud, pud)); 316 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); 317 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); 318 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); 319 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); 320 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); 321 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); 322 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); 323 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); 324 325 if (mm_pmd_folded(args->mm)) 326 return; 327 328 /* 329 * A huge page does not point to next level page table 330 * entry. Hence this must qualify as pud_bad(). 331 */ 332 WARN_ON(!pud_bad(pud_mkhuge(pud))); 333 } 334 335 static void __init pud_advanced_tests(struct pgtable_debug_args *args) 336 { 337 struct page *page; 338 unsigned long vaddr = args->vaddr; 339 pud_t pud; 340 341 if (!has_transparent_hugepage()) 342 return; 343 344 page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL; 345 if (!page) 346 return; 347 348 /* 349 * flush_dcache_page() is called after set_pud_at() to clear 350 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 351 * when it's released and page allocation check will fail when 352 * the page is allocated again. For architectures other than ARM64, 353 * the unexpected overhead of cache flushing is acceptable. 354 */ 355 pr_debug("Validating PUD advanced\n"); 356 /* Align the address wrt HPAGE_PUD_SIZE */ 357 vaddr &= HPAGE_PUD_MASK; 358 359 pud = pfn_pud(args->pud_pfn, args->page_prot); 360 set_pud_at(args->mm, vaddr, args->pudp, pud); 361 flush_dcache_page(page); 362 pudp_set_wrprotect(args->mm, vaddr, args->pudp); 363 pud = READ_ONCE(*args->pudp); 364 WARN_ON(pud_write(pud)); 365 366 #ifndef __PAGETABLE_PMD_FOLDED 367 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); 368 pud = READ_ONCE(*args->pudp); 369 WARN_ON(!pud_none(pud)); 370 #endif /* __PAGETABLE_PMD_FOLDED */ 371 pud = pfn_pud(args->pud_pfn, args->page_prot); 372 pud = pud_wrprotect(pud); 373 pud = pud_mkclean(pud); 374 set_pud_at(args->mm, vaddr, args->pudp, pud); 375 flush_dcache_page(page); 376 pud = pud_mkwrite(pud); 377 pud = pud_mkdirty(pud); 378 pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1); 379 pud = READ_ONCE(*args->pudp); 380 WARN_ON(!(pud_write(pud) && pud_dirty(pud))); 381 382 #ifndef __PAGETABLE_PMD_FOLDED 383 pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1); 384 pud = READ_ONCE(*args->pudp); 385 WARN_ON(!pud_none(pud)); 386 #endif /* __PAGETABLE_PMD_FOLDED */ 387 388 pud = pfn_pud(args->pud_pfn, args->page_prot); 389 pud = pud_mkyoung(pud); 390 set_pud_at(args->mm, vaddr, args->pudp, pud); 391 flush_dcache_page(page); 392 pudp_test_and_clear_young(args->vma, vaddr, args->pudp); 393 pud = READ_ONCE(*args->pudp); 394 WARN_ON(pud_young(pud)); 395 396 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); 397 } 398 399 static void __init pud_leaf_tests(struct pgtable_debug_args *args) 400 { 401 pud_t pud; 402 403 if (!has_transparent_hugepage()) 404 return; 405 406 pr_debug("Validating PUD leaf\n"); 407 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 408 /* 409 * PUD based THP is a leaf entry. 410 */ 411 pud = pud_mkhuge(pud); 412 WARN_ON(!pud_leaf(pud)); 413 } 414 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 415 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } 416 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } 417 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } 418 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 419 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 420 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } 421 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } 422 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } 423 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } 424 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } 425 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } 426 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 427 428 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 429 static void __init pmd_huge_tests(struct pgtable_debug_args *args) 430 { 431 pmd_t pmd; 432 433 if (!arch_vmap_pmd_supported(args->page_prot)) 434 return; 435 436 pr_debug("Validating PMD huge\n"); 437 /* 438 * X86 defined pmd_set_huge() verifies that the given 439 * PMD is not a populated non-leaf entry. 440 */ 441 WRITE_ONCE(*args->pmdp, __pmd(0)); 442 WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); 443 WARN_ON(!pmd_clear_huge(args->pmdp)); 444 pmd = READ_ONCE(*args->pmdp); 445 WARN_ON(!pmd_none(pmd)); 446 } 447 448 static void __init pud_huge_tests(struct pgtable_debug_args *args) 449 { 450 pud_t pud; 451 452 if (!arch_vmap_pud_supported(args->page_prot)) 453 return; 454 455 pr_debug("Validating PUD huge\n"); 456 /* 457 * X86 defined pud_set_huge() verifies that the given 458 * PUD is not a populated non-leaf entry. 459 */ 460 WRITE_ONCE(*args->pudp, __pud(0)); 461 WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot)); 462 WARN_ON(!pud_clear_huge(args->pudp)); 463 pud = READ_ONCE(*args->pudp); 464 WARN_ON(!pud_none(pud)); 465 } 466 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 467 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } 468 static void __init pud_huge_tests(struct pgtable_debug_args *args) { } 469 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 470 471 static void __init p4d_basic_tests(struct pgtable_debug_args *args) 472 { 473 p4d_t p4d; 474 475 pr_debug("Validating P4D basic\n"); 476 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); 477 WARN_ON(!p4d_same(p4d, p4d)); 478 } 479 480 static void __init pgd_basic_tests(struct pgtable_debug_args *args) 481 { 482 pgd_t pgd; 483 484 pr_debug("Validating PGD basic\n"); 485 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); 486 WARN_ON(!pgd_same(pgd, pgd)); 487 } 488 489 #ifndef __PAGETABLE_PUD_FOLDED 490 static void __init pud_clear_tests(struct pgtable_debug_args *args) 491 { 492 pud_t pud = READ_ONCE(*args->pudp); 493 494 if (mm_pmd_folded(args->mm)) 495 return; 496 497 pr_debug("Validating PUD clear\n"); 498 pud = __pud(pud_val(pud) | RANDOM_ORVALUE); 499 WRITE_ONCE(*args->pudp, pud); 500 pud_clear(args->pudp); 501 pud = READ_ONCE(*args->pudp); 502 WARN_ON(!pud_none(pud)); 503 } 504 505 static void __init pud_populate_tests(struct pgtable_debug_args *args) 506 { 507 pud_t pud; 508 509 if (mm_pmd_folded(args->mm)) 510 return; 511 512 pr_debug("Validating PUD populate\n"); 513 /* 514 * This entry points to next level page table page. 515 * Hence this must not qualify as pud_bad(). 516 */ 517 pud_populate(args->mm, args->pudp, args->start_pmdp); 518 pud = READ_ONCE(*args->pudp); 519 WARN_ON(pud_bad(pud)); 520 } 521 #else /* !__PAGETABLE_PUD_FOLDED */ 522 static void __init pud_clear_tests(struct pgtable_debug_args *args) { } 523 static void __init pud_populate_tests(struct pgtable_debug_args *args) { } 524 #endif /* PAGETABLE_PUD_FOLDED */ 525 526 #ifndef __PAGETABLE_P4D_FOLDED 527 static void __init p4d_clear_tests(struct pgtable_debug_args *args) 528 { 529 p4d_t p4d = READ_ONCE(*args->p4dp); 530 531 if (mm_pud_folded(args->mm)) 532 return; 533 534 pr_debug("Validating P4D clear\n"); 535 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); 536 WRITE_ONCE(*args->p4dp, p4d); 537 p4d_clear(args->p4dp); 538 p4d = READ_ONCE(*args->p4dp); 539 WARN_ON(!p4d_none(p4d)); 540 } 541 542 static void __init p4d_populate_tests(struct pgtable_debug_args *args) 543 { 544 p4d_t p4d; 545 546 if (mm_pud_folded(args->mm)) 547 return; 548 549 pr_debug("Validating P4D populate\n"); 550 /* 551 * This entry points to next level page table page. 552 * Hence this must not qualify as p4d_bad(). 553 */ 554 pud_clear(args->pudp); 555 p4d_clear(args->p4dp); 556 p4d_populate(args->mm, args->p4dp, args->start_pudp); 557 p4d = READ_ONCE(*args->p4dp); 558 WARN_ON(p4d_bad(p4d)); 559 } 560 561 static void __init pgd_clear_tests(struct pgtable_debug_args *args) 562 { 563 pgd_t pgd = READ_ONCE(*(args->pgdp)); 564 565 if (mm_p4d_folded(args->mm)) 566 return; 567 568 pr_debug("Validating PGD clear\n"); 569 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); 570 WRITE_ONCE(*args->pgdp, pgd); 571 pgd_clear(args->pgdp); 572 pgd = READ_ONCE(*args->pgdp); 573 WARN_ON(!pgd_none(pgd)); 574 } 575 576 static void __init pgd_populate_tests(struct pgtable_debug_args *args) 577 { 578 pgd_t pgd; 579 580 if (mm_p4d_folded(args->mm)) 581 return; 582 583 pr_debug("Validating PGD populate\n"); 584 /* 585 * This entry points to next level page table page. 586 * Hence this must not qualify as pgd_bad(). 587 */ 588 p4d_clear(args->p4dp); 589 pgd_clear(args->pgdp); 590 pgd_populate(args->mm, args->pgdp, args->start_p4dp); 591 pgd = READ_ONCE(*args->pgdp); 592 WARN_ON(pgd_bad(pgd)); 593 } 594 #else /* !__PAGETABLE_P4D_FOLDED */ 595 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { } 596 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { } 597 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { } 598 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { } 599 #endif /* PAGETABLE_P4D_FOLDED */ 600 601 static void __init pte_clear_tests(struct pgtable_debug_args *args) 602 { 603 struct page *page; 604 pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); 605 606 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 607 if (!page) 608 return; 609 610 /* 611 * flush_dcache_page() is called after set_pte_at() to clear 612 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 613 * when it's released and page allocation check will fail when 614 * the page is allocated again. For architectures other than ARM64, 615 * the unexpected overhead of cache flushing is acceptable. 616 */ 617 pr_debug("Validating PTE clear\n"); 618 #ifndef CONFIG_RISCV 619 pte = __pte(pte_val(pte) | RANDOM_ORVALUE); 620 #endif 621 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 622 flush_dcache_page(page); 623 barrier(); 624 ptep_clear(args->mm, args->vaddr, args->ptep); 625 pte = ptep_get(args->ptep); 626 WARN_ON(!pte_none(pte)); 627 } 628 629 static void __init pmd_clear_tests(struct pgtable_debug_args *args) 630 { 631 pmd_t pmd = READ_ONCE(*args->pmdp); 632 633 pr_debug("Validating PMD clear\n"); 634 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); 635 WRITE_ONCE(*args->pmdp, pmd); 636 pmd_clear(args->pmdp); 637 pmd = READ_ONCE(*args->pmdp); 638 WARN_ON(!pmd_none(pmd)); 639 } 640 641 static void __init pmd_populate_tests(struct pgtable_debug_args *args) 642 { 643 pmd_t pmd; 644 645 pr_debug("Validating PMD populate\n"); 646 /* 647 * This entry points to next level page table page. 648 * Hence this must not qualify as pmd_bad(). 649 */ 650 pmd_populate(args->mm, args->pmdp, args->start_ptep); 651 pmd = READ_ONCE(*args->pmdp); 652 WARN_ON(pmd_bad(pmd)); 653 } 654 655 static void __init pte_special_tests(struct pgtable_debug_args *args) 656 { 657 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 658 659 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) 660 return; 661 662 pr_debug("Validating PTE special\n"); 663 WARN_ON(!pte_special(pte_mkspecial(pte))); 664 } 665 666 static void __init pte_protnone_tests(struct pgtable_debug_args *args) 667 { 668 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); 669 670 if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 671 return; 672 673 pr_debug("Validating PTE protnone\n"); 674 WARN_ON(!pte_protnone(pte)); 675 WARN_ON(!pte_present(pte)); 676 } 677 678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 679 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) 680 { 681 pmd_t pmd; 682 683 if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 684 return; 685 686 if (!has_transparent_hugepage()) 687 return; 688 689 pr_debug("Validating PMD protnone\n"); 690 pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); 691 WARN_ON(!pmd_protnone(pmd)); 692 WARN_ON(!pmd_present(pmd)); 693 } 694 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 695 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } 696 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 697 698 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 699 static void __init pte_devmap_tests(struct pgtable_debug_args *args) 700 { 701 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 702 703 pr_debug("Validating PTE devmap\n"); 704 WARN_ON(!pte_devmap(pte_mkdevmap(pte))); 705 } 706 707 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 708 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) 709 { 710 pmd_t pmd; 711 712 if (!has_transparent_hugepage()) 713 return; 714 715 pr_debug("Validating PMD devmap\n"); 716 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 717 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); 718 } 719 720 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 721 static void __init pud_devmap_tests(struct pgtable_debug_args *args) 722 { 723 pud_t pud; 724 725 if (!has_transparent_hugepage()) 726 return; 727 728 pr_debug("Validating PUD devmap\n"); 729 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 730 WARN_ON(!pud_devmap(pud_mkdevmap(pud))); 731 } 732 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 733 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 734 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 735 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 736 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 737 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 738 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 739 #else 740 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } 741 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 742 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 743 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ 744 745 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) 746 { 747 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 748 749 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 750 return; 751 752 pr_debug("Validating PTE soft dirty\n"); 753 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); 754 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); 755 } 756 757 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) 758 { 759 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 760 761 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 762 return; 763 764 pr_debug("Validating PTE swap soft dirty\n"); 765 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); 766 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); 767 } 768 769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 770 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) 771 { 772 pmd_t pmd; 773 774 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 775 return; 776 777 if (!has_transparent_hugepage()) 778 return; 779 780 pr_debug("Validating PMD soft dirty\n"); 781 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 782 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); 783 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); 784 } 785 786 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) 787 { 788 pmd_t pmd; 789 790 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || 791 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) 792 return; 793 794 if (!has_transparent_hugepage()) 795 return; 796 797 pr_debug("Validating PMD swap soft dirty\n"); 798 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 799 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); 800 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); 801 } 802 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 803 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } 804 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } 805 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 806 807 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) 808 { 809 #ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE 810 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 811 812 pr_debug("Validating PTE swap exclusive\n"); 813 pte = pte_swp_mkexclusive(pte); 814 WARN_ON(!pte_swp_exclusive(pte)); 815 pte = pte_swp_clear_exclusive(pte); 816 WARN_ON(pte_swp_exclusive(pte)); 817 #endif /* __HAVE_ARCH_PTE_SWP_EXCLUSIVE */ 818 } 819 820 static void __init pte_swap_tests(struct pgtable_debug_args *args) 821 { 822 swp_entry_t swp; 823 pte_t pte; 824 825 pr_debug("Validating PTE swap\n"); 826 pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 827 swp = __pte_to_swp_entry(pte); 828 pte = __swp_entry_to_pte(swp); 829 WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); 830 } 831 832 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 833 static void __init pmd_swap_tests(struct pgtable_debug_args *args) 834 { 835 swp_entry_t swp; 836 pmd_t pmd; 837 838 if (!has_transparent_hugepage()) 839 return; 840 841 pr_debug("Validating PMD swap\n"); 842 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 843 swp = __pmd_to_swp_entry(pmd); 844 pmd = __swp_entry_to_pmd(swp); 845 WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); 846 } 847 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ 848 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } 849 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 850 851 static void __init swap_migration_tests(struct pgtable_debug_args *args) 852 { 853 struct page *page; 854 swp_entry_t swp; 855 856 if (!IS_ENABLED(CONFIG_MIGRATION)) 857 return; 858 859 /* 860 * swap_migration_tests() requires a dedicated page as it needs to 861 * be locked before creating a migration entry from it. Locking the 862 * page that actually maps kernel text ('start_kernel') can be real 863 * problematic. Lets use the allocated page explicitly for this 864 * purpose. 865 */ 866 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 867 if (!page) 868 return; 869 870 pr_debug("Validating swap migration\n"); 871 872 /* 873 * make_[readable|writable]_migration_entry() expects given page to 874 * be locked, otherwise it stumbles upon a BUG_ON(). 875 */ 876 __SetPageLocked(page); 877 swp = make_writable_migration_entry(page_to_pfn(page)); 878 WARN_ON(!is_migration_entry(swp)); 879 WARN_ON(!is_writable_migration_entry(swp)); 880 881 swp = make_readable_migration_entry(swp_offset(swp)); 882 WARN_ON(!is_migration_entry(swp)); 883 WARN_ON(is_writable_migration_entry(swp)); 884 885 swp = make_readable_migration_entry(page_to_pfn(page)); 886 WARN_ON(!is_migration_entry(swp)); 887 WARN_ON(is_writable_migration_entry(swp)); 888 __ClearPageLocked(page); 889 } 890 891 #ifdef CONFIG_HUGETLB_PAGE 892 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) 893 { 894 struct page *page; 895 pte_t pte; 896 897 pr_debug("Validating HugeTLB basic\n"); 898 /* 899 * Accessing the page associated with the pfn is safe here, 900 * as it was previously derived from a real kernel symbol. 901 */ 902 page = pfn_to_page(args->fixed_pmd_pfn); 903 pte = mk_huge_pte(page, args->page_prot); 904 905 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); 906 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); 907 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); 908 909 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 910 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); 911 912 WARN_ON(!pte_huge(pte_mkhuge(pte))); 913 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 914 } 915 #else /* !CONFIG_HUGETLB_PAGE */ 916 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } 917 #endif /* CONFIG_HUGETLB_PAGE */ 918 919 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 920 static void __init pmd_thp_tests(struct pgtable_debug_args *args) 921 { 922 pmd_t pmd; 923 924 if (!has_transparent_hugepage()) 925 return; 926 927 pr_debug("Validating PMD based THP\n"); 928 /* 929 * pmd_trans_huge() and pmd_present() must return positive after 930 * MMU invalidation with pmd_mkinvalid(). This behavior is an 931 * optimization for transparent huge page. pmd_trans_huge() must 932 * be true if pmd_page() returns a valid THP to avoid taking the 933 * pmd_lock when others walk over non transhuge pmds (i.e. there 934 * are no THP allocated). Especially when splitting a THP and 935 * removing the present bit from the pmd, pmd_trans_huge() still 936 * needs to return true. pmd_present() should be true whenever 937 * pmd_trans_huge() returns true. 938 */ 939 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 940 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); 941 942 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 943 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); 944 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); 945 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ 946 } 947 948 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 949 static void __init pud_thp_tests(struct pgtable_debug_args *args) 950 { 951 pud_t pud; 952 953 if (!has_transparent_hugepage()) 954 return; 955 956 pr_debug("Validating PUD based THP\n"); 957 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 958 WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); 959 960 /* 961 * pud_mkinvalid() has been dropped for now. Enable back 962 * these tests when it comes back with a modified pud_present(). 963 * 964 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); 965 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); 966 */ 967 } 968 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 969 static void __init pud_thp_tests(struct pgtable_debug_args *args) { } 970 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 971 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 972 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } 973 static void __init pud_thp_tests(struct pgtable_debug_args *args) { } 974 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 975 976 static unsigned long __init get_random_vaddr(void) 977 { 978 unsigned long random_vaddr, random_pages, total_user_pages; 979 980 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; 981 982 random_pages = get_random_long() % total_user_pages; 983 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; 984 985 return random_vaddr; 986 } 987 988 static void __init destroy_args(struct pgtable_debug_args *args) 989 { 990 struct page *page = NULL; 991 992 /* Free (huge) page */ 993 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 994 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 995 has_transparent_hugepage() && 996 args->pud_pfn != ULONG_MAX) { 997 if (args->is_contiguous_page) { 998 free_contig_range(args->pud_pfn, 999 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); 1000 } else { 1001 page = pfn_to_page(args->pud_pfn); 1002 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); 1003 } 1004 1005 args->pud_pfn = ULONG_MAX; 1006 args->pmd_pfn = ULONG_MAX; 1007 args->pte_pfn = ULONG_MAX; 1008 } 1009 1010 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 1011 has_transparent_hugepage() && 1012 args->pmd_pfn != ULONG_MAX) { 1013 if (args->is_contiguous_page) { 1014 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); 1015 } else { 1016 page = pfn_to_page(args->pmd_pfn); 1017 __free_pages(page, HPAGE_PMD_ORDER); 1018 } 1019 1020 args->pmd_pfn = ULONG_MAX; 1021 args->pte_pfn = ULONG_MAX; 1022 } 1023 1024 if (args->pte_pfn != ULONG_MAX) { 1025 page = pfn_to_page(args->pte_pfn); 1026 __free_pages(page, 0); 1027 1028 args->pte_pfn = ULONG_MAX; 1029 } 1030 1031 /* Free page table entries */ 1032 if (args->start_ptep) { 1033 pte_free(args->mm, args->start_ptep); 1034 mm_dec_nr_ptes(args->mm); 1035 } 1036 1037 if (args->start_pmdp) { 1038 pmd_free(args->mm, args->start_pmdp); 1039 mm_dec_nr_pmds(args->mm); 1040 } 1041 1042 if (args->start_pudp) { 1043 pud_free(args->mm, args->start_pudp); 1044 mm_dec_nr_puds(args->mm); 1045 } 1046 1047 if (args->start_p4dp) 1048 p4d_free(args->mm, args->start_p4dp); 1049 1050 /* Free vma and mm struct */ 1051 if (args->vma) 1052 vm_area_free(args->vma); 1053 1054 if (args->mm) 1055 mmdrop(args->mm); 1056 } 1057 1058 static struct page * __init 1059 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) 1060 { 1061 struct page *page = NULL; 1062 1063 #ifdef CONFIG_CONTIG_ALLOC 1064 if (order >= MAX_ORDER) { 1065 page = alloc_contig_pages((1 << order), GFP_KERNEL, 1066 first_online_node, NULL); 1067 if (page) { 1068 args->is_contiguous_page = true; 1069 return page; 1070 } 1071 } 1072 #endif 1073 1074 if (order < MAX_ORDER) 1075 page = alloc_pages(GFP_KERNEL, order); 1076 1077 return page; 1078 } 1079 1080 static int __init init_args(struct pgtable_debug_args *args) 1081 { 1082 struct page *page = NULL; 1083 phys_addr_t phys; 1084 int ret = 0; 1085 1086 /* 1087 * Initialize the debugging data. 1088 * 1089 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) 1090 * will help create page table entries with PROT_NONE permission as 1091 * required for pxx_protnone_tests(). 1092 */ 1093 memset(args, 0, sizeof(*args)); 1094 args->vaddr = get_random_vaddr(); 1095 args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS); 1096 args->page_prot_none = vm_get_page_prot(VM_NONE); 1097 args->is_contiguous_page = false; 1098 args->pud_pfn = ULONG_MAX; 1099 args->pmd_pfn = ULONG_MAX; 1100 args->pte_pfn = ULONG_MAX; 1101 args->fixed_pgd_pfn = ULONG_MAX; 1102 args->fixed_p4d_pfn = ULONG_MAX; 1103 args->fixed_pud_pfn = ULONG_MAX; 1104 args->fixed_pmd_pfn = ULONG_MAX; 1105 args->fixed_pte_pfn = ULONG_MAX; 1106 1107 /* Allocate mm and vma */ 1108 args->mm = mm_alloc(); 1109 if (!args->mm) { 1110 pr_err("Failed to allocate mm struct\n"); 1111 ret = -ENOMEM; 1112 goto error; 1113 } 1114 1115 args->vma = vm_area_alloc(args->mm); 1116 if (!args->vma) { 1117 pr_err("Failed to allocate vma\n"); 1118 ret = -ENOMEM; 1119 goto error; 1120 } 1121 1122 /* 1123 * Allocate page table entries. They will be modified in the tests. 1124 * Lets save the page table entries so that they can be released 1125 * when the tests are completed. 1126 */ 1127 args->pgdp = pgd_offset(args->mm, args->vaddr); 1128 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); 1129 if (!args->p4dp) { 1130 pr_err("Failed to allocate p4d entries\n"); 1131 ret = -ENOMEM; 1132 goto error; 1133 } 1134 args->start_p4dp = p4d_offset(args->pgdp, 0UL); 1135 WARN_ON(!args->start_p4dp); 1136 1137 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); 1138 if (!args->pudp) { 1139 pr_err("Failed to allocate pud entries\n"); 1140 ret = -ENOMEM; 1141 goto error; 1142 } 1143 args->start_pudp = pud_offset(args->p4dp, 0UL); 1144 WARN_ON(!args->start_pudp); 1145 1146 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); 1147 if (!args->pmdp) { 1148 pr_err("Failed to allocate pmd entries\n"); 1149 ret = -ENOMEM; 1150 goto error; 1151 } 1152 args->start_pmdp = pmd_offset(args->pudp, 0UL); 1153 WARN_ON(!args->start_pmdp); 1154 1155 if (pte_alloc(args->mm, args->pmdp)) { 1156 pr_err("Failed to allocate pte entries\n"); 1157 ret = -ENOMEM; 1158 goto error; 1159 } 1160 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); 1161 WARN_ON(!args->start_ptep); 1162 1163 /* 1164 * PFN for mapping at PTE level is determined from a standard kernel 1165 * text symbol. But pfns for higher page table levels are derived by 1166 * masking lower bits of this real pfn. These derived pfns might not 1167 * exist on the platform but that does not really matter as pfn_pxx() 1168 * helpers will still create appropriate entries for the test. This 1169 * helps avoid large memory block allocations to be used for mapping 1170 * at higher page table levels in some of the tests. 1171 */ 1172 phys = __pa_symbol(&start_kernel); 1173 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); 1174 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); 1175 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); 1176 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); 1177 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); 1178 WARN_ON(!pfn_valid(args->fixed_pte_pfn)); 1179 1180 /* 1181 * Allocate (huge) pages because some of the tests need to access 1182 * the data in the pages. The corresponding tests will be skipped 1183 * if we fail to allocate (huge) pages. 1184 */ 1185 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 1186 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 1187 has_transparent_hugepage()) { 1188 page = debug_vm_pgtable_alloc_huge_page(args, 1189 HPAGE_PUD_SHIFT - PAGE_SHIFT); 1190 if (page) { 1191 args->pud_pfn = page_to_pfn(page); 1192 args->pmd_pfn = args->pud_pfn; 1193 args->pte_pfn = args->pud_pfn; 1194 return 0; 1195 } 1196 } 1197 1198 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 1199 has_transparent_hugepage()) { 1200 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); 1201 if (page) { 1202 args->pmd_pfn = page_to_pfn(page); 1203 args->pte_pfn = args->pmd_pfn; 1204 return 0; 1205 } 1206 } 1207 1208 page = alloc_pages(GFP_KERNEL, 0); 1209 if (page) 1210 args->pte_pfn = page_to_pfn(page); 1211 1212 return 0; 1213 1214 error: 1215 destroy_args(args); 1216 return ret; 1217 } 1218 1219 static int __init debug_vm_pgtable(void) 1220 { 1221 struct pgtable_debug_args args; 1222 spinlock_t *ptl = NULL; 1223 int idx, ret; 1224 1225 pr_info("Validating architecture page table helpers\n"); 1226 ret = init_args(&args); 1227 if (ret) 1228 return ret; 1229 1230 /* 1231 * Iterate over each possible vm_flags to make sure that all 1232 * the basic page table transformation validations just hold 1233 * true irrespective of the starting protection value for a 1234 * given page table entry. 1235 * 1236 * Protection based vm_flags combinatins are always linear 1237 * and increasing i.e starting from VM_NONE and going upto 1238 * (VM_SHARED | READ | WRITE | EXEC). 1239 */ 1240 #define VM_FLAGS_START (VM_NONE) 1241 #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) 1242 1243 for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { 1244 pte_basic_tests(&args, idx); 1245 pmd_basic_tests(&args, idx); 1246 pud_basic_tests(&args, idx); 1247 } 1248 1249 /* 1250 * Both P4D and PGD level tests are very basic which do not 1251 * involve creating page table entries from the protection 1252 * value and the given pfn. Hence just keep them out from 1253 * the above iteration for now to save some test execution 1254 * time. 1255 */ 1256 p4d_basic_tests(&args); 1257 pgd_basic_tests(&args); 1258 1259 pmd_leaf_tests(&args); 1260 pud_leaf_tests(&args); 1261 1262 pte_special_tests(&args); 1263 pte_protnone_tests(&args); 1264 pmd_protnone_tests(&args); 1265 1266 pte_devmap_tests(&args); 1267 pmd_devmap_tests(&args); 1268 pud_devmap_tests(&args); 1269 1270 pte_soft_dirty_tests(&args); 1271 pmd_soft_dirty_tests(&args); 1272 pte_swap_soft_dirty_tests(&args); 1273 pmd_swap_soft_dirty_tests(&args); 1274 1275 pte_swap_exclusive_tests(&args); 1276 1277 pte_swap_tests(&args); 1278 pmd_swap_tests(&args); 1279 1280 swap_migration_tests(&args); 1281 1282 pmd_thp_tests(&args); 1283 pud_thp_tests(&args); 1284 1285 hugetlb_basic_tests(&args); 1286 1287 /* 1288 * Page table modifying tests. They need to hold 1289 * proper page table lock. 1290 */ 1291 1292 args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); 1293 pte_clear_tests(&args); 1294 pte_advanced_tests(&args); 1295 pte_unmap_unlock(args.ptep, ptl); 1296 1297 ptl = pmd_lock(args.mm, args.pmdp); 1298 pmd_clear_tests(&args); 1299 pmd_advanced_tests(&args); 1300 pmd_huge_tests(&args); 1301 pmd_populate_tests(&args); 1302 spin_unlock(ptl); 1303 1304 ptl = pud_lock(args.mm, args.pudp); 1305 pud_clear_tests(&args); 1306 pud_advanced_tests(&args); 1307 pud_huge_tests(&args); 1308 pud_populate_tests(&args); 1309 spin_unlock(ptl); 1310 1311 spin_lock(&(args.mm->page_table_lock)); 1312 p4d_clear_tests(&args); 1313 pgd_clear_tests(&args); 1314 p4d_populate_tests(&args); 1315 pgd_populate_tests(&args); 1316 spin_unlock(&(args.mm->page_table_lock)); 1317 1318 destroy_args(&args); 1319 return 0; 1320 } 1321 late_initcall(debug_vm_pgtable); 1322