1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This kernel test validates architecture page table helpers and 4 * accessors and helps in verifying their continued compliance with 5 * expected generic MM semantics. 6 * 7 * Copyright (C) 2019 ARM Ltd. 8 * 9 * Author: Anshuman Khandual <anshuman.khandual@arm.com> 10 */ 11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ 12 13 #include <linux/gfp.h> 14 #include <linux/highmem.h> 15 #include <linux/hugetlb.h> 16 #include <linux/kernel.h> 17 #include <linux/kconfig.h> 18 #include <linux/memblock.h> 19 #include <linux/mm.h> 20 #include <linux/mman.h> 21 #include <linux/mm_types.h> 22 #include <linux/module.h> 23 #include <linux/pfn_t.h> 24 #include <linux/printk.h> 25 #include <linux/pgtable.h> 26 #include <linux/random.h> 27 #include <linux/spinlock.h> 28 #include <linux/swap.h> 29 #include <linux/swapops.h> 30 #include <linux/start_kernel.h> 31 #include <linux/sched/mm.h> 32 #include <linux/io.h> 33 34 #include <asm/cacheflush.h> 35 #include <asm/pgalloc.h> 36 #include <asm/tlbflush.h> 37 38 /* 39 * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics 40 * expectations that are being validated here. All future changes in here 41 * or the documentation need to be in sync. 42 * 43 * On s390 platform, the lower 4 bits are used to identify given page table 44 * entry type. But these bits might affect the ability to clear entries with 45 * pxx_clear() because of how dynamic page table folding works on s390. So 46 * while loading up the entries do not change the lower 4 bits. It does not 47 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is 48 * used to mark a pte entry. 49 */ 50 #define S390_SKIP_MASK GENMASK(3, 0) 51 #if __BITS_PER_LONG == 64 52 #define PPC64_SKIP_MASK GENMASK(62, 62) 53 #else 54 #define PPC64_SKIP_MASK 0x0 55 #endif 56 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) 57 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) 58 #define RANDOM_NZVALUE GENMASK(7, 0) 59 60 struct pgtable_debug_args { 61 struct mm_struct *mm; 62 struct vm_area_struct *vma; 63 64 pgd_t *pgdp; 65 p4d_t *p4dp; 66 pud_t *pudp; 67 pmd_t *pmdp; 68 pte_t *ptep; 69 70 p4d_t *start_p4dp; 71 pud_t *start_pudp; 72 pmd_t *start_pmdp; 73 pgtable_t start_ptep; 74 75 unsigned long vaddr; 76 pgprot_t page_prot; 77 pgprot_t page_prot_none; 78 79 bool is_contiguous_page; 80 unsigned long pud_pfn; 81 unsigned long pmd_pfn; 82 unsigned long pte_pfn; 83 84 unsigned long fixed_alignment; 85 unsigned long fixed_pgd_pfn; 86 unsigned long fixed_p4d_pfn; 87 unsigned long fixed_pud_pfn; 88 unsigned long fixed_pmd_pfn; 89 unsigned long fixed_pte_pfn; 90 }; 91 92 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) 93 { 94 pgprot_t prot = vm_get_page_prot(idx); 95 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); 96 unsigned long val = idx, *ptr = &val; 97 98 pr_debug("Validating PTE basic (%pGv)\n", ptr); 99 100 /* 101 * This test needs to be executed after the given page table entry 102 * is created with pfn_pte() to make sure that vm_get_page_prot(idx) 103 * does not have the dirty bit enabled from the beginning. This is 104 * important for platforms like arm64 where (!PTE_RDONLY) indicate 105 * dirty bit being set. 106 */ 107 WARN_ON(pte_dirty(pte_wrprotect(pte))); 108 109 WARN_ON(!pte_same(pte, pte)); 110 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); 111 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); 112 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); 113 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); 114 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); 115 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); 116 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); 117 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); 118 } 119 120 static void __init pte_advanced_tests(struct pgtable_debug_args *args) 121 { 122 struct page *page; 123 pte_t pte; 124 125 /* 126 * Architectures optimize set_pte_at by avoiding TLB flush. 127 * This requires set_pte_at to be not used to update an 128 * existing pte entry. Clear pte before we do set_pte_at 129 * 130 * flush_dcache_page() is called after set_pte_at() to clear 131 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 132 * when it's released and page allocation check will fail when 133 * the page is allocated again. For architectures other than ARM64, 134 * the unexpected overhead of cache flushing is acceptable. 135 */ 136 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 137 if (!page) 138 return; 139 140 pr_debug("Validating PTE advanced\n"); 141 pte = pfn_pte(args->pte_pfn, args->page_prot); 142 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 143 flush_dcache_page(page); 144 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); 145 pte = ptep_get(args->ptep); 146 WARN_ON(pte_write(pte)); 147 ptep_get_and_clear(args->mm, args->vaddr, args->ptep); 148 pte = ptep_get(args->ptep); 149 WARN_ON(!pte_none(pte)); 150 151 pte = pfn_pte(args->pte_pfn, args->page_prot); 152 pte = pte_wrprotect(pte); 153 pte = pte_mkclean(pte); 154 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 155 flush_dcache_page(page); 156 pte = pte_mkwrite(pte); 157 pte = pte_mkdirty(pte); 158 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); 159 pte = ptep_get(args->ptep); 160 WARN_ON(!(pte_write(pte) && pte_dirty(pte))); 161 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); 162 pte = ptep_get(args->ptep); 163 WARN_ON(!pte_none(pte)); 164 165 pte = pfn_pte(args->pte_pfn, args->page_prot); 166 pte = pte_mkyoung(pte); 167 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 168 flush_dcache_page(page); 169 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); 170 pte = ptep_get(args->ptep); 171 WARN_ON(pte_young(pte)); 172 173 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); 174 } 175 176 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 177 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) 178 { 179 pgprot_t prot = vm_get_page_prot(idx); 180 unsigned long val = idx, *ptr = &val; 181 pmd_t pmd; 182 183 if (!has_transparent_hugepage()) 184 return; 185 186 pr_debug("Validating PMD basic (%pGv)\n", ptr); 187 pmd = pfn_pmd(args->fixed_pmd_pfn, prot); 188 189 /* 190 * This test needs to be executed after the given page table entry 191 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) 192 * does not have the dirty bit enabled from the beginning. This is 193 * important for platforms like arm64 where (!PTE_RDONLY) indicate 194 * dirty bit being set. 195 */ 196 WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); 197 198 199 WARN_ON(!pmd_same(pmd, pmd)); 200 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); 201 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); 202 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); 203 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); 204 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); 205 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); 206 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); 207 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); 208 /* 209 * A huge page does not point to next level page table 210 * entry. Hence this must qualify as pmd_bad(). 211 */ 212 WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); 213 } 214 215 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) 216 { 217 struct page *page; 218 pmd_t pmd; 219 unsigned long vaddr = args->vaddr; 220 221 if (!has_transparent_hugepage()) 222 return; 223 224 page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; 225 if (!page) 226 return; 227 228 /* 229 * flush_dcache_page() is called after set_pmd_at() to clear 230 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 231 * when it's released and page allocation check will fail when 232 * the page is allocated again. For architectures other than ARM64, 233 * the unexpected overhead of cache flushing is acceptable. 234 */ 235 pr_debug("Validating PMD advanced\n"); 236 /* Align the address wrt HPAGE_PMD_SIZE */ 237 vaddr &= HPAGE_PMD_MASK; 238 239 pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep); 240 241 pmd = pfn_pmd(args->pmd_pfn, args->page_prot); 242 set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 243 flush_dcache_page(page); 244 pmdp_set_wrprotect(args->mm, vaddr, args->pmdp); 245 pmd = READ_ONCE(*args->pmdp); 246 WARN_ON(pmd_write(pmd)); 247 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); 248 pmd = READ_ONCE(*args->pmdp); 249 WARN_ON(!pmd_none(pmd)); 250 251 pmd = pfn_pmd(args->pmd_pfn, args->page_prot); 252 pmd = pmd_wrprotect(pmd); 253 pmd = pmd_mkclean(pmd); 254 set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 255 flush_dcache_page(page); 256 pmd = pmd_mkwrite(pmd); 257 pmd = pmd_mkdirty(pmd); 258 pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1); 259 pmd = READ_ONCE(*args->pmdp); 260 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); 261 pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1); 262 pmd = READ_ONCE(*args->pmdp); 263 WARN_ON(!pmd_none(pmd)); 264 265 pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot)); 266 pmd = pmd_mkyoung(pmd); 267 set_pmd_at(args->mm, vaddr, args->pmdp, pmd); 268 flush_dcache_page(page); 269 pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp); 270 pmd = READ_ONCE(*args->pmdp); 271 WARN_ON(pmd_young(pmd)); 272 273 /* Clear the pte entries */ 274 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); 275 pgtable_trans_huge_withdraw(args->mm, args->pmdp); 276 } 277 278 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) 279 { 280 pmd_t pmd; 281 282 if (!has_transparent_hugepage()) 283 return; 284 285 pr_debug("Validating PMD leaf\n"); 286 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 287 288 /* 289 * PMD based THP is a leaf entry. 290 */ 291 pmd = pmd_mkhuge(pmd); 292 WARN_ON(!pmd_leaf(pmd)); 293 } 294 295 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 296 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) 297 { 298 pgprot_t prot = vm_get_page_prot(idx); 299 unsigned long val = idx, *ptr = &val; 300 pud_t pud; 301 302 if (!has_transparent_hugepage()) 303 return; 304 305 pr_debug("Validating PUD basic (%pGv)\n", ptr); 306 pud = pfn_pud(args->fixed_pud_pfn, prot); 307 308 /* 309 * This test needs to be executed after the given page table entry 310 * is created with pfn_pud() to make sure that vm_get_page_prot(idx) 311 * does not have the dirty bit enabled from the beginning. This is 312 * important for platforms like arm64 where (!PTE_RDONLY) indicate 313 * dirty bit being set. 314 */ 315 WARN_ON(pud_dirty(pud_wrprotect(pud))); 316 317 WARN_ON(!pud_same(pud, pud)); 318 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); 319 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); 320 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); 321 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); 322 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); 323 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); 324 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); 325 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); 326 327 if (mm_pmd_folded(args->mm)) 328 return; 329 330 /* 331 * A huge page does not point to next level page table 332 * entry. Hence this must qualify as pud_bad(). 333 */ 334 WARN_ON(!pud_bad(pud_mkhuge(pud))); 335 } 336 337 static void __init pud_advanced_tests(struct pgtable_debug_args *args) 338 { 339 struct page *page; 340 unsigned long vaddr = args->vaddr; 341 pud_t pud; 342 343 if (!has_transparent_hugepage()) 344 return; 345 346 page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL; 347 if (!page) 348 return; 349 350 /* 351 * flush_dcache_page() is called after set_pud_at() to clear 352 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 353 * when it's released and page allocation check will fail when 354 * the page is allocated again. For architectures other than ARM64, 355 * the unexpected overhead of cache flushing is acceptable. 356 */ 357 pr_debug("Validating PUD advanced\n"); 358 /* Align the address wrt HPAGE_PUD_SIZE */ 359 vaddr &= HPAGE_PUD_MASK; 360 361 pud = pfn_pud(args->pud_pfn, args->page_prot); 362 set_pud_at(args->mm, vaddr, args->pudp, pud); 363 flush_dcache_page(page); 364 pudp_set_wrprotect(args->mm, vaddr, args->pudp); 365 pud = READ_ONCE(*args->pudp); 366 WARN_ON(pud_write(pud)); 367 368 #ifndef __PAGETABLE_PMD_FOLDED 369 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); 370 pud = READ_ONCE(*args->pudp); 371 WARN_ON(!pud_none(pud)); 372 #endif /* __PAGETABLE_PMD_FOLDED */ 373 pud = pfn_pud(args->pud_pfn, args->page_prot); 374 pud = pud_wrprotect(pud); 375 pud = pud_mkclean(pud); 376 set_pud_at(args->mm, vaddr, args->pudp, pud); 377 flush_dcache_page(page); 378 pud = pud_mkwrite(pud); 379 pud = pud_mkdirty(pud); 380 pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1); 381 pud = READ_ONCE(*args->pudp); 382 WARN_ON(!(pud_write(pud) && pud_dirty(pud))); 383 384 #ifndef __PAGETABLE_PMD_FOLDED 385 pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1); 386 pud = READ_ONCE(*args->pudp); 387 WARN_ON(!pud_none(pud)); 388 #endif /* __PAGETABLE_PMD_FOLDED */ 389 390 pud = pfn_pud(args->pud_pfn, args->page_prot); 391 pud = pud_mkyoung(pud); 392 set_pud_at(args->mm, vaddr, args->pudp, pud); 393 flush_dcache_page(page); 394 pudp_test_and_clear_young(args->vma, vaddr, args->pudp); 395 pud = READ_ONCE(*args->pudp); 396 WARN_ON(pud_young(pud)); 397 398 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); 399 } 400 401 static void __init pud_leaf_tests(struct pgtable_debug_args *args) 402 { 403 pud_t pud; 404 405 if (!has_transparent_hugepage()) 406 return; 407 408 pr_debug("Validating PUD leaf\n"); 409 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 410 /* 411 * PUD based THP is a leaf entry. 412 */ 413 pud = pud_mkhuge(pud); 414 WARN_ON(!pud_leaf(pud)); 415 } 416 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 417 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } 418 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } 419 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } 420 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 421 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 422 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } 423 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } 424 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } 425 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } 426 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } 427 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } 428 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 429 430 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 431 static void __init pmd_huge_tests(struct pgtable_debug_args *args) 432 { 433 pmd_t pmd; 434 435 if (!arch_vmap_pmd_supported(args->page_prot) || 436 args->fixed_alignment < PMD_SIZE) 437 return; 438 439 pr_debug("Validating PMD huge\n"); 440 /* 441 * X86 defined pmd_set_huge() verifies that the given 442 * PMD is not a populated non-leaf entry. 443 */ 444 WRITE_ONCE(*args->pmdp, __pmd(0)); 445 WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); 446 WARN_ON(!pmd_clear_huge(args->pmdp)); 447 pmd = READ_ONCE(*args->pmdp); 448 WARN_ON(!pmd_none(pmd)); 449 } 450 451 static void __init pud_huge_tests(struct pgtable_debug_args *args) 452 { 453 pud_t pud; 454 455 if (!arch_vmap_pud_supported(args->page_prot) || 456 args->fixed_alignment < PUD_SIZE) 457 return; 458 459 pr_debug("Validating PUD huge\n"); 460 /* 461 * X86 defined pud_set_huge() verifies that the given 462 * PUD is not a populated non-leaf entry. 463 */ 464 WRITE_ONCE(*args->pudp, __pud(0)); 465 WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot)); 466 WARN_ON(!pud_clear_huge(args->pudp)); 467 pud = READ_ONCE(*args->pudp); 468 WARN_ON(!pud_none(pud)); 469 } 470 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 471 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } 472 static void __init pud_huge_tests(struct pgtable_debug_args *args) { } 473 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 474 475 static void __init p4d_basic_tests(struct pgtable_debug_args *args) 476 { 477 p4d_t p4d; 478 479 pr_debug("Validating P4D basic\n"); 480 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); 481 WARN_ON(!p4d_same(p4d, p4d)); 482 } 483 484 static void __init pgd_basic_tests(struct pgtable_debug_args *args) 485 { 486 pgd_t pgd; 487 488 pr_debug("Validating PGD basic\n"); 489 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); 490 WARN_ON(!pgd_same(pgd, pgd)); 491 } 492 493 #ifndef __PAGETABLE_PUD_FOLDED 494 static void __init pud_clear_tests(struct pgtable_debug_args *args) 495 { 496 pud_t pud = READ_ONCE(*args->pudp); 497 498 if (mm_pmd_folded(args->mm)) 499 return; 500 501 pr_debug("Validating PUD clear\n"); 502 pud = __pud(pud_val(pud) | RANDOM_ORVALUE); 503 WRITE_ONCE(*args->pudp, pud); 504 pud_clear(args->pudp); 505 pud = READ_ONCE(*args->pudp); 506 WARN_ON(!pud_none(pud)); 507 } 508 509 static void __init pud_populate_tests(struct pgtable_debug_args *args) 510 { 511 pud_t pud; 512 513 if (mm_pmd_folded(args->mm)) 514 return; 515 516 pr_debug("Validating PUD populate\n"); 517 /* 518 * This entry points to next level page table page. 519 * Hence this must not qualify as pud_bad(). 520 */ 521 pud_populate(args->mm, args->pudp, args->start_pmdp); 522 pud = READ_ONCE(*args->pudp); 523 WARN_ON(pud_bad(pud)); 524 } 525 #else /* !__PAGETABLE_PUD_FOLDED */ 526 static void __init pud_clear_tests(struct pgtable_debug_args *args) { } 527 static void __init pud_populate_tests(struct pgtable_debug_args *args) { } 528 #endif /* PAGETABLE_PUD_FOLDED */ 529 530 #ifndef __PAGETABLE_P4D_FOLDED 531 static void __init p4d_clear_tests(struct pgtable_debug_args *args) 532 { 533 p4d_t p4d = READ_ONCE(*args->p4dp); 534 535 if (mm_pud_folded(args->mm)) 536 return; 537 538 pr_debug("Validating P4D clear\n"); 539 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); 540 WRITE_ONCE(*args->p4dp, p4d); 541 p4d_clear(args->p4dp); 542 p4d = READ_ONCE(*args->p4dp); 543 WARN_ON(!p4d_none(p4d)); 544 } 545 546 static void __init p4d_populate_tests(struct pgtable_debug_args *args) 547 { 548 p4d_t p4d; 549 550 if (mm_pud_folded(args->mm)) 551 return; 552 553 pr_debug("Validating P4D populate\n"); 554 /* 555 * This entry points to next level page table page. 556 * Hence this must not qualify as p4d_bad(). 557 */ 558 pud_clear(args->pudp); 559 p4d_clear(args->p4dp); 560 p4d_populate(args->mm, args->p4dp, args->start_pudp); 561 p4d = READ_ONCE(*args->p4dp); 562 WARN_ON(p4d_bad(p4d)); 563 } 564 565 static void __init pgd_clear_tests(struct pgtable_debug_args *args) 566 { 567 pgd_t pgd = READ_ONCE(*(args->pgdp)); 568 569 if (mm_p4d_folded(args->mm)) 570 return; 571 572 pr_debug("Validating PGD clear\n"); 573 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); 574 WRITE_ONCE(*args->pgdp, pgd); 575 pgd_clear(args->pgdp); 576 pgd = READ_ONCE(*args->pgdp); 577 WARN_ON(!pgd_none(pgd)); 578 } 579 580 static void __init pgd_populate_tests(struct pgtable_debug_args *args) 581 { 582 pgd_t pgd; 583 584 if (mm_p4d_folded(args->mm)) 585 return; 586 587 pr_debug("Validating PGD populate\n"); 588 /* 589 * This entry points to next level page table page. 590 * Hence this must not qualify as pgd_bad(). 591 */ 592 p4d_clear(args->p4dp); 593 pgd_clear(args->pgdp); 594 pgd_populate(args->mm, args->pgdp, args->start_p4dp); 595 pgd = READ_ONCE(*args->pgdp); 596 WARN_ON(pgd_bad(pgd)); 597 } 598 #else /* !__PAGETABLE_P4D_FOLDED */ 599 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { } 600 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { } 601 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { } 602 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { } 603 #endif /* PAGETABLE_P4D_FOLDED */ 604 605 static void __init pte_clear_tests(struct pgtable_debug_args *args) 606 { 607 struct page *page; 608 pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); 609 610 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 611 if (!page) 612 return; 613 614 /* 615 * flush_dcache_page() is called after set_pte_at() to clear 616 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 617 * when it's released and page allocation check will fail when 618 * the page is allocated again. For architectures other than ARM64, 619 * the unexpected overhead of cache flushing is acceptable. 620 */ 621 pr_debug("Validating PTE clear\n"); 622 #ifndef CONFIG_RISCV 623 pte = __pte(pte_val(pte) | RANDOM_ORVALUE); 624 #endif 625 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 626 flush_dcache_page(page); 627 barrier(); 628 ptep_clear(args->mm, args->vaddr, args->ptep); 629 pte = ptep_get(args->ptep); 630 WARN_ON(!pte_none(pte)); 631 } 632 633 static void __init pmd_clear_tests(struct pgtable_debug_args *args) 634 { 635 pmd_t pmd = READ_ONCE(*args->pmdp); 636 637 pr_debug("Validating PMD clear\n"); 638 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); 639 WRITE_ONCE(*args->pmdp, pmd); 640 pmd_clear(args->pmdp); 641 pmd = READ_ONCE(*args->pmdp); 642 WARN_ON(!pmd_none(pmd)); 643 } 644 645 static void __init pmd_populate_tests(struct pgtable_debug_args *args) 646 { 647 pmd_t pmd; 648 649 pr_debug("Validating PMD populate\n"); 650 /* 651 * This entry points to next level page table page. 652 * Hence this must not qualify as pmd_bad(). 653 */ 654 pmd_populate(args->mm, args->pmdp, args->start_ptep); 655 pmd = READ_ONCE(*args->pmdp); 656 WARN_ON(pmd_bad(pmd)); 657 } 658 659 static void __init pte_special_tests(struct pgtable_debug_args *args) 660 { 661 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 662 663 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) 664 return; 665 666 pr_debug("Validating PTE special\n"); 667 WARN_ON(!pte_special(pte_mkspecial(pte))); 668 } 669 670 static void __init pte_protnone_tests(struct pgtable_debug_args *args) 671 { 672 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); 673 674 if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 675 return; 676 677 pr_debug("Validating PTE protnone\n"); 678 WARN_ON(!pte_protnone(pte)); 679 WARN_ON(!pte_present(pte)); 680 } 681 682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 683 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) 684 { 685 pmd_t pmd; 686 687 if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 688 return; 689 690 if (!has_transparent_hugepage()) 691 return; 692 693 pr_debug("Validating PMD protnone\n"); 694 pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); 695 WARN_ON(!pmd_protnone(pmd)); 696 WARN_ON(!pmd_present(pmd)); 697 } 698 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 699 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } 700 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 701 702 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 703 static void __init pte_devmap_tests(struct pgtable_debug_args *args) 704 { 705 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 706 707 pr_debug("Validating PTE devmap\n"); 708 WARN_ON(!pte_devmap(pte_mkdevmap(pte))); 709 } 710 711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 712 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) 713 { 714 pmd_t pmd; 715 716 if (!has_transparent_hugepage()) 717 return; 718 719 pr_debug("Validating PMD devmap\n"); 720 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 721 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); 722 } 723 724 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 725 static void __init pud_devmap_tests(struct pgtable_debug_args *args) 726 { 727 pud_t pud; 728 729 if (!has_transparent_hugepage()) 730 return; 731 732 pr_debug("Validating PUD devmap\n"); 733 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 734 WARN_ON(!pud_devmap(pud_mkdevmap(pud))); 735 } 736 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 737 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 738 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 739 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 740 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 741 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 742 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 743 #else 744 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } 745 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 746 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 747 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ 748 749 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) 750 { 751 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 752 753 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 754 return; 755 756 pr_debug("Validating PTE soft dirty\n"); 757 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); 758 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); 759 } 760 761 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) 762 { 763 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 764 765 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 766 return; 767 768 pr_debug("Validating PTE swap soft dirty\n"); 769 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); 770 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); 771 } 772 773 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 774 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) 775 { 776 pmd_t pmd; 777 778 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) 779 return; 780 781 if (!has_transparent_hugepage()) 782 return; 783 784 pr_debug("Validating PMD soft dirty\n"); 785 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 786 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); 787 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); 788 } 789 790 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) 791 { 792 pmd_t pmd; 793 794 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || 795 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) 796 return; 797 798 if (!has_transparent_hugepage()) 799 return; 800 801 pr_debug("Validating PMD swap soft dirty\n"); 802 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 803 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); 804 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); 805 } 806 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 807 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } 808 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } 809 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 810 811 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) 812 { 813 #ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE 814 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 815 816 pr_debug("Validating PTE swap exclusive\n"); 817 pte = pte_swp_mkexclusive(pte); 818 WARN_ON(!pte_swp_exclusive(pte)); 819 pte = pte_swp_clear_exclusive(pte); 820 WARN_ON(pte_swp_exclusive(pte)); 821 #endif /* __HAVE_ARCH_PTE_SWP_EXCLUSIVE */ 822 } 823 824 static void __init pte_swap_tests(struct pgtable_debug_args *args) 825 { 826 swp_entry_t swp; 827 pte_t pte; 828 829 pr_debug("Validating PTE swap\n"); 830 pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 831 swp = __pte_to_swp_entry(pte); 832 pte = __swp_entry_to_pte(swp); 833 WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); 834 } 835 836 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 837 static void __init pmd_swap_tests(struct pgtable_debug_args *args) 838 { 839 swp_entry_t swp; 840 pmd_t pmd; 841 842 if (!has_transparent_hugepage()) 843 return; 844 845 pr_debug("Validating PMD swap\n"); 846 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 847 swp = __pmd_to_swp_entry(pmd); 848 pmd = __swp_entry_to_pmd(swp); 849 WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); 850 } 851 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ 852 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } 853 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 854 855 static void __init swap_migration_tests(struct pgtable_debug_args *args) 856 { 857 struct page *page; 858 swp_entry_t swp; 859 860 if (!IS_ENABLED(CONFIG_MIGRATION)) 861 return; 862 863 /* 864 * swap_migration_tests() requires a dedicated page as it needs to 865 * be locked before creating a migration entry from it. Locking the 866 * page that actually maps kernel text ('start_kernel') can be real 867 * problematic. Lets use the allocated page explicitly for this 868 * purpose. 869 */ 870 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; 871 if (!page) 872 return; 873 874 pr_debug("Validating swap migration\n"); 875 876 /* 877 * make_[readable|writable]_migration_entry() expects given page to 878 * be locked, otherwise it stumbles upon a BUG_ON(). 879 */ 880 __SetPageLocked(page); 881 swp = make_writable_migration_entry(page_to_pfn(page)); 882 WARN_ON(!is_migration_entry(swp)); 883 WARN_ON(!is_writable_migration_entry(swp)); 884 885 swp = make_readable_migration_entry(swp_offset(swp)); 886 WARN_ON(!is_migration_entry(swp)); 887 WARN_ON(is_writable_migration_entry(swp)); 888 889 swp = make_readable_migration_entry(page_to_pfn(page)); 890 WARN_ON(!is_migration_entry(swp)); 891 WARN_ON(is_writable_migration_entry(swp)); 892 __ClearPageLocked(page); 893 } 894 895 #ifdef CONFIG_HUGETLB_PAGE 896 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) 897 { 898 struct page *page; 899 pte_t pte; 900 901 pr_debug("Validating HugeTLB basic\n"); 902 /* 903 * Accessing the page associated with the pfn is safe here, 904 * as it was previously derived from a real kernel symbol. 905 */ 906 page = pfn_to_page(args->fixed_pmd_pfn); 907 pte = mk_huge_pte(page, args->page_prot); 908 909 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); 910 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); 911 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); 912 913 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 914 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); 915 916 WARN_ON(!pte_huge(pte_mkhuge(pte))); 917 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 918 } 919 #else /* !CONFIG_HUGETLB_PAGE */ 920 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } 921 #endif /* CONFIG_HUGETLB_PAGE */ 922 923 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 924 static void __init pmd_thp_tests(struct pgtable_debug_args *args) 925 { 926 pmd_t pmd; 927 928 if (!has_transparent_hugepage()) 929 return; 930 931 pr_debug("Validating PMD based THP\n"); 932 /* 933 * pmd_trans_huge() and pmd_present() must return positive after 934 * MMU invalidation with pmd_mkinvalid(). This behavior is an 935 * optimization for transparent huge page. pmd_trans_huge() must 936 * be true if pmd_page() returns a valid THP to avoid taking the 937 * pmd_lock when others walk over non transhuge pmds (i.e. there 938 * are no THP allocated). Especially when splitting a THP and 939 * removing the present bit from the pmd, pmd_trans_huge() still 940 * needs to return true. pmd_present() should be true whenever 941 * pmd_trans_huge() returns true. 942 */ 943 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 944 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); 945 946 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 947 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); 948 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); 949 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ 950 } 951 952 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 953 static void __init pud_thp_tests(struct pgtable_debug_args *args) 954 { 955 pud_t pud; 956 957 if (!has_transparent_hugepage()) 958 return; 959 960 pr_debug("Validating PUD based THP\n"); 961 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 962 WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); 963 964 /* 965 * pud_mkinvalid() has been dropped for now. Enable back 966 * these tests when it comes back with a modified pud_present(). 967 * 968 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); 969 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); 970 */ 971 } 972 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 973 static void __init pud_thp_tests(struct pgtable_debug_args *args) { } 974 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 975 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 976 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } 977 static void __init pud_thp_tests(struct pgtable_debug_args *args) { } 978 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 979 980 static unsigned long __init get_random_vaddr(void) 981 { 982 unsigned long random_vaddr, random_pages, total_user_pages; 983 984 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; 985 986 random_pages = get_random_long() % total_user_pages; 987 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; 988 989 return random_vaddr; 990 } 991 992 static void __init destroy_args(struct pgtable_debug_args *args) 993 { 994 struct page *page = NULL; 995 996 /* Free (huge) page */ 997 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 998 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 999 has_transparent_hugepage() && 1000 args->pud_pfn != ULONG_MAX) { 1001 if (args->is_contiguous_page) { 1002 free_contig_range(args->pud_pfn, 1003 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); 1004 } else { 1005 page = pfn_to_page(args->pud_pfn); 1006 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); 1007 } 1008 1009 args->pud_pfn = ULONG_MAX; 1010 args->pmd_pfn = ULONG_MAX; 1011 args->pte_pfn = ULONG_MAX; 1012 } 1013 1014 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 1015 has_transparent_hugepage() && 1016 args->pmd_pfn != ULONG_MAX) { 1017 if (args->is_contiguous_page) { 1018 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); 1019 } else { 1020 page = pfn_to_page(args->pmd_pfn); 1021 __free_pages(page, HPAGE_PMD_ORDER); 1022 } 1023 1024 args->pmd_pfn = ULONG_MAX; 1025 args->pte_pfn = ULONG_MAX; 1026 } 1027 1028 if (args->pte_pfn != ULONG_MAX) { 1029 page = pfn_to_page(args->pte_pfn); 1030 __free_pages(page, 0); 1031 1032 args->pte_pfn = ULONG_MAX; 1033 } 1034 1035 /* Free page table entries */ 1036 if (args->start_ptep) { 1037 pte_free(args->mm, args->start_ptep); 1038 mm_dec_nr_ptes(args->mm); 1039 } 1040 1041 if (args->start_pmdp) { 1042 pmd_free(args->mm, args->start_pmdp); 1043 mm_dec_nr_pmds(args->mm); 1044 } 1045 1046 if (args->start_pudp) { 1047 pud_free(args->mm, args->start_pudp); 1048 mm_dec_nr_puds(args->mm); 1049 } 1050 1051 if (args->start_p4dp) 1052 p4d_free(args->mm, args->start_p4dp); 1053 1054 /* Free vma and mm struct */ 1055 if (args->vma) 1056 vm_area_free(args->vma); 1057 1058 if (args->mm) 1059 mmdrop(args->mm); 1060 } 1061 1062 static struct page * __init 1063 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) 1064 { 1065 struct page *page = NULL; 1066 1067 #ifdef CONFIG_CONTIG_ALLOC 1068 if (order >= MAX_ORDER) { 1069 page = alloc_contig_pages((1 << order), GFP_KERNEL, 1070 first_online_node, NULL); 1071 if (page) { 1072 args->is_contiguous_page = true; 1073 return page; 1074 } 1075 } 1076 #endif 1077 1078 if (order < MAX_ORDER) 1079 page = alloc_pages(GFP_KERNEL, order); 1080 1081 return page; 1082 } 1083 1084 /* 1085 * Check if a physical memory range described by <pstart, pend> contains 1086 * an area that is of size psize, and aligned to psize. 1087 * 1088 * Don't use address 0, an all-zeroes physical address might mask bugs, and 1089 * it's not used on x86. 1090 */ 1091 static void __init phys_align_check(phys_addr_t pstart, 1092 phys_addr_t pend, unsigned long psize, 1093 phys_addr_t *physp, unsigned long *alignp) 1094 { 1095 phys_addr_t aligned_start, aligned_end; 1096 1097 if (pstart == 0) 1098 pstart = PAGE_SIZE; 1099 1100 aligned_start = ALIGN(pstart, psize); 1101 aligned_end = aligned_start + psize; 1102 1103 if (aligned_end > aligned_start && aligned_end <= pend) { 1104 *alignp = psize; 1105 *physp = aligned_start; 1106 } 1107 } 1108 1109 static void __init init_fixed_pfns(struct pgtable_debug_args *args) 1110 { 1111 u64 idx; 1112 phys_addr_t phys, pstart, pend; 1113 1114 /* 1115 * Initialize the fixed pfns. To do this, try to find a 1116 * valid physical range, preferably aligned to PUD_SIZE, 1117 * but settling for aligned to PMD_SIZE as a fallback. If 1118 * neither of those is found, use the physical address of 1119 * the start_kernel symbol. 1120 * 1121 * The memory doesn't need to be allocated, it just needs to exist 1122 * as usable memory. It won't be touched. 1123 * 1124 * The alignment is recorded, and can be checked to see if we 1125 * can run the tests that require an actual valid physical 1126 * address range on some architectures ({pmd,pud}_huge_test 1127 * on x86). 1128 */ 1129 1130 phys = __pa_symbol(&start_kernel); 1131 args->fixed_alignment = PAGE_SIZE; 1132 1133 for_each_mem_range(idx, &pstart, &pend) { 1134 /* First check for a PUD-aligned area */ 1135 phys_align_check(pstart, pend, PUD_SIZE, &phys, 1136 &args->fixed_alignment); 1137 1138 /* If a PUD-aligned area is found, we're done */ 1139 if (args->fixed_alignment == PUD_SIZE) 1140 break; 1141 1142 /* 1143 * If no PMD-aligned area found yet, check for one, 1144 * but continue the loop to look for a PUD-aligned area. 1145 */ 1146 if (args->fixed_alignment < PMD_SIZE) 1147 phys_align_check(pstart, pend, PMD_SIZE, &phys, 1148 &args->fixed_alignment); 1149 } 1150 1151 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); 1152 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); 1153 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); 1154 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); 1155 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); 1156 WARN_ON(!pfn_valid(args->fixed_pte_pfn)); 1157 } 1158 1159 1160 static int __init init_args(struct pgtable_debug_args *args) 1161 { 1162 struct page *page = NULL; 1163 int ret = 0; 1164 1165 /* 1166 * Initialize the debugging data. 1167 * 1168 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) 1169 * will help create page table entries with PROT_NONE permission as 1170 * required for pxx_protnone_tests(). 1171 */ 1172 memset(args, 0, sizeof(*args)); 1173 args->vaddr = get_random_vaddr(); 1174 args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS); 1175 args->page_prot_none = vm_get_page_prot(VM_NONE); 1176 args->is_contiguous_page = false; 1177 args->pud_pfn = ULONG_MAX; 1178 args->pmd_pfn = ULONG_MAX; 1179 args->pte_pfn = ULONG_MAX; 1180 args->fixed_pgd_pfn = ULONG_MAX; 1181 args->fixed_p4d_pfn = ULONG_MAX; 1182 args->fixed_pud_pfn = ULONG_MAX; 1183 args->fixed_pmd_pfn = ULONG_MAX; 1184 args->fixed_pte_pfn = ULONG_MAX; 1185 1186 /* Allocate mm and vma */ 1187 args->mm = mm_alloc(); 1188 if (!args->mm) { 1189 pr_err("Failed to allocate mm struct\n"); 1190 ret = -ENOMEM; 1191 goto error; 1192 } 1193 1194 args->vma = vm_area_alloc(args->mm); 1195 if (!args->vma) { 1196 pr_err("Failed to allocate vma\n"); 1197 ret = -ENOMEM; 1198 goto error; 1199 } 1200 1201 /* 1202 * Allocate page table entries. They will be modified in the tests. 1203 * Lets save the page table entries so that they can be released 1204 * when the tests are completed. 1205 */ 1206 args->pgdp = pgd_offset(args->mm, args->vaddr); 1207 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); 1208 if (!args->p4dp) { 1209 pr_err("Failed to allocate p4d entries\n"); 1210 ret = -ENOMEM; 1211 goto error; 1212 } 1213 args->start_p4dp = p4d_offset(args->pgdp, 0UL); 1214 WARN_ON(!args->start_p4dp); 1215 1216 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); 1217 if (!args->pudp) { 1218 pr_err("Failed to allocate pud entries\n"); 1219 ret = -ENOMEM; 1220 goto error; 1221 } 1222 args->start_pudp = pud_offset(args->p4dp, 0UL); 1223 WARN_ON(!args->start_pudp); 1224 1225 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); 1226 if (!args->pmdp) { 1227 pr_err("Failed to allocate pmd entries\n"); 1228 ret = -ENOMEM; 1229 goto error; 1230 } 1231 args->start_pmdp = pmd_offset(args->pudp, 0UL); 1232 WARN_ON(!args->start_pmdp); 1233 1234 if (pte_alloc(args->mm, args->pmdp)) { 1235 pr_err("Failed to allocate pte entries\n"); 1236 ret = -ENOMEM; 1237 goto error; 1238 } 1239 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); 1240 WARN_ON(!args->start_ptep); 1241 1242 init_fixed_pfns(args); 1243 1244 /* 1245 * Allocate (huge) pages because some of the tests need to access 1246 * the data in the pages. The corresponding tests will be skipped 1247 * if we fail to allocate (huge) pages. 1248 */ 1249 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 1250 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 1251 has_transparent_hugepage()) { 1252 page = debug_vm_pgtable_alloc_huge_page(args, 1253 HPAGE_PUD_SHIFT - PAGE_SHIFT); 1254 if (page) { 1255 args->pud_pfn = page_to_pfn(page); 1256 args->pmd_pfn = args->pud_pfn; 1257 args->pte_pfn = args->pud_pfn; 1258 return 0; 1259 } 1260 } 1261 1262 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 1263 has_transparent_hugepage()) { 1264 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); 1265 if (page) { 1266 args->pmd_pfn = page_to_pfn(page); 1267 args->pte_pfn = args->pmd_pfn; 1268 return 0; 1269 } 1270 } 1271 1272 page = alloc_pages(GFP_KERNEL, 0); 1273 if (page) 1274 args->pte_pfn = page_to_pfn(page); 1275 1276 return 0; 1277 1278 error: 1279 destroy_args(args); 1280 return ret; 1281 } 1282 1283 static int __init debug_vm_pgtable(void) 1284 { 1285 struct pgtable_debug_args args; 1286 spinlock_t *ptl = NULL; 1287 int idx, ret; 1288 1289 pr_info("Validating architecture page table helpers\n"); 1290 ret = init_args(&args); 1291 if (ret) 1292 return ret; 1293 1294 /* 1295 * Iterate over each possible vm_flags to make sure that all 1296 * the basic page table transformation validations just hold 1297 * true irrespective of the starting protection value for a 1298 * given page table entry. 1299 * 1300 * Protection based vm_flags combinatins are always linear 1301 * and increasing i.e starting from VM_NONE and going upto 1302 * (VM_SHARED | READ | WRITE | EXEC). 1303 */ 1304 #define VM_FLAGS_START (VM_NONE) 1305 #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) 1306 1307 for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { 1308 pte_basic_tests(&args, idx); 1309 pmd_basic_tests(&args, idx); 1310 pud_basic_tests(&args, idx); 1311 } 1312 1313 /* 1314 * Both P4D and PGD level tests are very basic which do not 1315 * involve creating page table entries from the protection 1316 * value and the given pfn. Hence just keep them out from 1317 * the above iteration for now to save some test execution 1318 * time. 1319 */ 1320 p4d_basic_tests(&args); 1321 pgd_basic_tests(&args); 1322 1323 pmd_leaf_tests(&args); 1324 pud_leaf_tests(&args); 1325 1326 pte_special_tests(&args); 1327 pte_protnone_tests(&args); 1328 pmd_protnone_tests(&args); 1329 1330 pte_devmap_tests(&args); 1331 pmd_devmap_tests(&args); 1332 pud_devmap_tests(&args); 1333 1334 pte_soft_dirty_tests(&args); 1335 pmd_soft_dirty_tests(&args); 1336 pte_swap_soft_dirty_tests(&args); 1337 pmd_swap_soft_dirty_tests(&args); 1338 1339 pte_swap_exclusive_tests(&args); 1340 1341 pte_swap_tests(&args); 1342 pmd_swap_tests(&args); 1343 1344 swap_migration_tests(&args); 1345 1346 pmd_thp_tests(&args); 1347 pud_thp_tests(&args); 1348 1349 hugetlb_basic_tests(&args); 1350 1351 /* 1352 * Page table modifying tests. They need to hold 1353 * proper page table lock. 1354 */ 1355 1356 args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); 1357 pte_clear_tests(&args); 1358 pte_advanced_tests(&args); 1359 pte_unmap_unlock(args.ptep, ptl); 1360 1361 ptl = pmd_lock(args.mm, args.pmdp); 1362 pmd_clear_tests(&args); 1363 pmd_advanced_tests(&args); 1364 pmd_huge_tests(&args); 1365 pmd_populate_tests(&args); 1366 spin_unlock(ptl); 1367 1368 ptl = pud_lock(args.mm, args.pudp); 1369 pud_clear_tests(&args); 1370 pud_advanced_tests(&args); 1371 pud_huge_tests(&args); 1372 pud_populate_tests(&args); 1373 spin_unlock(ptl); 1374 1375 spin_lock(&(args.mm->page_table_lock)); 1376 p4d_clear_tests(&args); 1377 pgd_clear_tests(&args); 1378 p4d_populate_tests(&args); 1379 pgd_populate_tests(&args); 1380 spin_unlock(&(args.mm->page_table_lock)); 1381 1382 destroy_args(&args); 1383 return 0; 1384 } 1385 late_initcall(debug_vm_pgtable); 1386