1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PGTABLE_H 3 #define _ASM_X86_PGTABLE_H 4 5 #include <linux/mem_encrypt.h> 6 #include <asm/page.h> 7 #include <asm/pgtable_types.h> 8 9 /* 10 * Macro to mark a page protection value as UC- 11 */ 12 #define pgprot_noncached(prot) \ 13 ((boot_cpu_data.x86 > 3) \ 14 ? (__pgprot(pgprot_val(prot) | \ 15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ 16 : (prot)) 17 18 /* 19 * Macros to add or remove encryption attribute 20 */ 21 #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot))) 22 #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot))) 23 24 #ifndef __ASSEMBLY__ 25 #include <asm/x86_init.h> 26 27 extern pgd_t early_top_pgt[PTRS_PER_PGD]; 28 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); 29 30 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); 31 void ptdump_walk_pgd_level_checkwx(void); 32 33 #ifdef CONFIG_DEBUG_WX 34 #define debug_checkwx() ptdump_walk_pgd_level_checkwx() 35 #else 36 #define debug_checkwx() do { } while (0) 37 #endif 38 39 /* 40 * ZERO_PAGE is a global shared page that is always zero: used 41 * for zero-mapped memory areas etc.. 42 */ 43 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 44 __visible; 45 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 46 47 extern spinlock_t pgd_lock; 48 extern struct list_head pgd_list; 49 50 extern struct mm_struct *pgd_page_get_mm(struct page *page); 51 52 extern pmdval_t early_pmd_flags; 53 54 #ifdef CONFIG_PARAVIRT 55 #include <asm/paravirt.h> 56 #else /* !CONFIG_PARAVIRT */ 57 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 58 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 59 60 #define set_pte_atomic(ptep, pte) \ 61 native_set_pte_atomic(ptep, pte) 62 63 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 64 65 #ifndef __PAGETABLE_P4D_FOLDED 66 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 67 #define pgd_clear(pgd) native_pgd_clear(pgd) 68 #endif 69 70 #ifndef set_p4d 71 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d) 72 #endif 73 74 #ifndef __PAGETABLE_PUD_FOLDED 75 #define p4d_clear(p4d) native_p4d_clear(p4d) 76 #endif 77 78 #ifndef set_pud 79 # define set_pud(pudp, pud) native_set_pud(pudp, pud) 80 #endif 81 82 #ifndef __PAGETABLE_PUD_FOLDED 83 #define pud_clear(pud) native_pud_clear(pud) 84 #endif 85 86 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 87 #define pmd_clear(pmd) native_pmd_clear(pmd) 88 89 #define pgd_val(x) native_pgd_val(x) 90 #define __pgd(x) native_make_pgd(x) 91 92 #ifndef __PAGETABLE_P4D_FOLDED 93 #define p4d_val(x) native_p4d_val(x) 94 #define __p4d(x) native_make_p4d(x) 95 #endif 96 97 #ifndef __PAGETABLE_PUD_FOLDED 98 #define pud_val(x) native_pud_val(x) 99 #define __pud(x) native_make_pud(x) 100 #endif 101 102 #ifndef __PAGETABLE_PMD_FOLDED 103 #define pmd_val(x) native_pmd_val(x) 104 #define __pmd(x) native_make_pmd(x) 105 #endif 106 107 #define pte_val(x) native_pte_val(x) 108 #define __pte(x) native_make_pte(x) 109 110 #define arch_end_context_switch(prev) do {} while(0) 111 112 #endif /* CONFIG_PARAVIRT */ 113 114 /* 115 * The following only work if pte_present() is true. 116 * Undefined behaviour if not.. 117 */ 118 static inline int pte_dirty(pte_t pte) 119 { 120 return pte_flags(pte) & _PAGE_DIRTY; 121 } 122 123 124 static inline u32 read_pkru(void) 125 { 126 if (boot_cpu_has(X86_FEATURE_OSPKE)) 127 return __read_pkru(); 128 return 0; 129 } 130 131 static inline void write_pkru(u32 pkru) 132 { 133 if (boot_cpu_has(X86_FEATURE_OSPKE)) 134 __write_pkru(pkru); 135 } 136 137 static inline int pte_young(pte_t pte) 138 { 139 return pte_flags(pte) & _PAGE_ACCESSED; 140 } 141 142 static inline int pmd_dirty(pmd_t pmd) 143 { 144 return pmd_flags(pmd) & _PAGE_DIRTY; 145 } 146 147 static inline int pmd_young(pmd_t pmd) 148 { 149 return pmd_flags(pmd) & _PAGE_ACCESSED; 150 } 151 152 static inline int pud_dirty(pud_t pud) 153 { 154 return pud_flags(pud) & _PAGE_DIRTY; 155 } 156 157 static inline int pud_young(pud_t pud) 158 { 159 return pud_flags(pud) & _PAGE_ACCESSED; 160 } 161 162 static inline int pte_write(pte_t pte) 163 { 164 return pte_flags(pte) & _PAGE_RW; 165 } 166 167 static inline int pte_huge(pte_t pte) 168 { 169 return pte_flags(pte) & _PAGE_PSE; 170 } 171 172 static inline int pte_global(pte_t pte) 173 { 174 return pte_flags(pte) & _PAGE_GLOBAL; 175 } 176 177 static inline int pte_exec(pte_t pte) 178 { 179 return !(pte_flags(pte) & _PAGE_NX); 180 } 181 182 static inline int pte_special(pte_t pte) 183 { 184 return pte_flags(pte) & _PAGE_SPECIAL; 185 } 186 187 static inline unsigned long pte_pfn(pte_t pte) 188 { 189 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 190 } 191 192 static inline unsigned long pmd_pfn(pmd_t pmd) 193 { 194 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; 195 } 196 197 static inline unsigned long pud_pfn(pud_t pud) 198 { 199 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; 200 } 201 202 static inline unsigned long p4d_pfn(p4d_t p4d) 203 { 204 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT; 205 } 206 207 static inline unsigned long pgd_pfn(pgd_t pgd) 208 { 209 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; 210 } 211 212 static inline int p4d_large(p4d_t p4d) 213 { 214 /* No 512 GiB pages yet */ 215 return 0; 216 } 217 218 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 219 220 static inline int pmd_large(pmd_t pte) 221 { 222 return pmd_flags(pte) & _PAGE_PSE; 223 } 224 225 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 226 static inline int pmd_trans_huge(pmd_t pmd) 227 { 228 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; 229 } 230 231 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 232 static inline int pud_trans_huge(pud_t pud) 233 { 234 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; 235 } 236 #endif 237 238 #define has_transparent_hugepage has_transparent_hugepage 239 static inline int has_transparent_hugepage(void) 240 { 241 return boot_cpu_has(X86_FEATURE_PSE); 242 } 243 244 #ifdef __HAVE_ARCH_PTE_DEVMAP 245 static inline int pmd_devmap(pmd_t pmd) 246 { 247 return !!(pmd_val(pmd) & _PAGE_DEVMAP); 248 } 249 250 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 251 static inline int pud_devmap(pud_t pud) 252 { 253 return !!(pud_val(pud) & _PAGE_DEVMAP); 254 } 255 #else 256 static inline int pud_devmap(pud_t pud) 257 { 258 return 0; 259 } 260 #endif 261 262 static inline int pgd_devmap(pgd_t pgd) 263 { 264 return 0; 265 } 266 #endif 267 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 268 269 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 270 { 271 pteval_t v = native_pte_val(pte); 272 273 return native_make_pte(v | set); 274 } 275 276 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) 277 { 278 pteval_t v = native_pte_val(pte); 279 280 return native_make_pte(v & ~clear); 281 } 282 283 static inline pte_t pte_mkclean(pte_t pte) 284 { 285 return pte_clear_flags(pte, _PAGE_DIRTY); 286 } 287 288 static inline pte_t pte_mkold(pte_t pte) 289 { 290 return pte_clear_flags(pte, _PAGE_ACCESSED); 291 } 292 293 static inline pte_t pte_wrprotect(pte_t pte) 294 { 295 return pte_clear_flags(pte, _PAGE_RW); 296 } 297 298 static inline pte_t pte_mkexec(pte_t pte) 299 { 300 return pte_clear_flags(pte, _PAGE_NX); 301 } 302 303 static inline pte_t pte_mkdirty(pte_t pte) 304 { 305 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 306 } 307 308 static inline pte_t pte_mkyoung(pte_t pte) 309 { 310 return pte_set_flags(pte, _PAGE_ACCESSED); 311 } 312 313 static inline pte_t pte_mkwrite(pte_t pte) 314 { 315 return pte_set_flags(pte, _PAGE_RW); 316 } 317 318 static inline pte_t pte_mkhuge(pte_t pte) 319 { 320 return pte_set_flags(pte, _PAGE_PSE); 321 } 322 323 static inline pte_t pte_clrhuge(pte_t pte) 324 { 325 return pte_clear_flags(pte, _PAGE_PSE); 326 } 327 328 static inline pte_t pte_mkglobal(pte_t pte) 329 { 330 return pte_set_flags(pte, _PAGE_GLOBAL); 331 } 332 333 static inline pte_t pte_clrglobal(pte_t pte) 334 { 335 return pte_clear_flags(pte, _PAGE_GLOBAL); 336 } 337 338 static inline pte_t pte_mkspecial(pte_t pte) 339 { 340 return pte_set_flags(pte, _PAGE_SPECIAL); 341 } 342 343 static inline pte_t pte_mkdevmap(pte_t pte) 344 { 345 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); 346 } 347 348 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 349 { 350 pmdval_t v = native_pmd_val(pmd); 351 352 return __pmd(v | set); 353 } 354 355 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) 356 { 357 pmdval_t v = native_pmd_val(pmd); 358 359 return __pmd(v & ~clear); 360 } 361 362 static inline pmd_t pmd_mkold(pmd_t pmd) 363 { 364 return pmd_clear_flags(pmd, _PAGE_ACCESSED); 365 } 366 367 static inline pmd_t pmd_mkclean(pmd_t pmd) 368 { 369 return pmd_clear_flags(pmd, _PAGE_DIRTY); 370 } 371 372 static inline pmd_t pmd_wrprotect(pmd_t pmd) 373 { 374 return pmd_clear_flags(pmd, _PAGE_RW); 375 } 376 377 static inline pmd_t pmd_mkdirty(pmd_t pmd) 378 { 379 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 380 } 381 382 static inline pmd_t pmd_mkdevmap(pmd_t pmd) 383 { 384 return pmd_set_flags(pmd, _PAGE_DEVMAP); 385 } 386 387 static inline pmd_t pmd_mkhuge(pmd_t pmd) 388 { 389 return pmd_set_flags(pmd, _PAGE_PSE); 390 } 391 392 static inline pmd_t pmd_mkyoung(pmd_t pmd) 393 { 394 return pmd_set_flags(pmd, _PAGE_ACCESSED); 395 } 396 397 static inline pmd_t pmd_mkwrite(pmd_t pmd) 398 { 399 return pmd_set_flags(pmd, _PAGE_RW); 400 } 401 402 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 403 { 404 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); 405 } 406 407 static inline pud_t pud_set_flags(pud_t pud, pudval_t set) 408 { 409 pudval_t v = native_pud_val(pud); 410 411 return __pud(v | set); 412 } 413 414 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) 415 { 416 pudval_t v = native_pud_val(pud); 417 418 return __pud(v & ~clear); 419 } 420 421 static inline pud_t pud_mkold(pud_t pud) 422 { 423 return pud_clear_flags(pud, _PAGE_ACCESSED); 424 } 425 426 static inline pud_t pud_mkclean(pud_t pud) 427 { 428 return pud_clear_flags(pud, _PAGE_DIRTY); 429 } 430 431 static inline pud_t pud_wrprotect(pud_t pud) 432 { 433 return pud_clear_flags(pud, _PAGE_RW); 434 } 435 436 static inline pud_t pud_mkdirty(pud_t pud) 437 { 438 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 439 } 440 441 static inline pud_t pud_mkdevmap(pud_t pud) 442 { 443 return pud_set_flags(pud, _PAGE_DEVMAP); 444 } 445 446 static inline pud_t pud_mkhuge(pud_t pud) 447 { 448 return pud_set_flags(pud, _PAGE_PSE); 449 } 450 451 static inline pud_t pud_mkyoung(pud_t pud) 452 { 453 return pud_set_flags(pud, _PAGE_ACCESSED); 454 } 455 456 static inline pud_t pud_mkwrite(pud_t pud) 457 { 458 return pud_set_flags(pud, _PAGE_RW); 459 } 460 461 static inline pud_t pud_mknotpresent(pud_t pud) 462 { 463 return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); 464 } 465 466 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 467 static inline int pte_soft_dirty(pte_t pte) 468 { 469 return pte_flags(pte) & _PAGE_SOFT_DIRTY; 470 } 471 472 static inline int pmd_soft_dirty(pmd_t pmd) 473 { 474 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; 475 } 476 477 static inline int pud_soft_dirty(pud_t pud) 478 { 479 return pud_flags(pud) & _PAGE_SOFT_DIRTY; 480 } 481 482 static inline pte_t pte_mksoft_dirty(pte_t pte) 483 { 484 return pte_set_flags(pte, _PAGE_SOFT_DIRTY); 485 } 486 487 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 488 { 489 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 490 } 491 492 static inline pud_t pud_mksoft_dirty(pud_t pud) 493 { 494 return pud_set_flags(pud, _PAGE_SOFT_DIRTY); 495 } 496 497 static inline pte_t pte_clear_soft_dirty(pte_t pte) 498 { 499 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); 500 } 501 502 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 503 { 504 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 505 } 506 507 static inline pud_t pud_clear_soft_dirty(pud_t pud) 508 { 509 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY); 510 } 511 512 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 513 514 /* 515 * Mask out unsupported bits in a present pgprot. Non-present pgprots 516 * can use those bits for other purposes, so leave them be. 517 */ 518 static inline pgprotval_t massage_pgprot(pgprot_t pgprot) 519 { 520 pgprotval_t protval = pgprot_val(pgprot); 521 522 if (protval & _PAGE_PRESENT) 523 protval &= __supported_pte_mask; 524 525 return protval; 526 } 527 528 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 529 { 530 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | 531 massage_pgprot(pgprot)); 532 } 533 534 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 535 { 536 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | 537 massage_pgprot(pgprot)); 538 } 539 540 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) 541 { 542 return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | 543 massage_pgprot(pgprot)); 544 } 545 546 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 547 { 548 pteval_t val = pte_val(pte); 549 550 /* 551 * Chop off the NX bit (if present), and add the NX portion of 552 * the newprot (if present): 553 */ 554 val &= _PAGE_CHG_MASK; 555 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; 556 557 return __pte(val); 558 } 559 560 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 561 { 562 pmdval_t val = pmd_val(pmd); 563 564 val &= _HPAGE_CHG_MASK; 565 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; 566 567 return __pmd(val); 568 } 569 570 /* mprotect needs to preserve PAT bits when updating vm_page_prot */ 571 #define pgprot_modify pgprot_modify 572 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 573 { 574 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; 575 pgprotval_t addbits = pgprot_val(newprot); 576 return __pgprot(preservebits | addbits); 577 } 578 579 #define pte_pgprot(x) __pgprot(pte_flags(x)) 580 #define pmd_pgprot(x) __pgprot(pmd_flags(x)) 581 #define pud_pgprot(x) __pgprot(pud_flags(x)) 582 #define p4d_pgprot(x) __pgprot(p4d_flags(x)) 583 584 #define canon_pgprot(p) __pgprot(massage_pgprot(p)) 585 586 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 587 enum page_cache_mode pcm, 588 enum page_cache_mode new_pcm) 589 { 590 /* 591 * PAT type is always WB for untracked ranges, so no need to check. 592 */ 593 if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) 594 return 1; 595 596 /* 597 * Certain new memtypes are not allowed with certain 598 * requested memtype: 599 * - request is uncached, return cannot be write-back 600 * - request is write-combine, return cannot be write-back 601 * - request is write-through, return cannot be write-back 602 * - request is write-through, return cannot be write-combine 603 */ 604 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && 605 new_pcm == _PAGE_CACHE_MODE_WB) || 606 (pcm == _PAGE_CACHE_MODE_WC && 607 new_pcm == _PAGE_CACHE_MODE_WB) || 608 (pcm == _PAGE_CACHE_MODE_WT && 609 new_pcm == _PAGE_CACHE_MODE_WB) || 610 (pcm == _PAGE_CACHE_MODE_WT && 611 new_pcm == _PAGE_CACHE_MODE_WC)) { 612 return 0; 613 } 614 615 return 1; 616 } 617 618 pmd_t *populate_extra_pmd(unsigned long vaddr); 619 pte_t *populate_extra_pte(unsigned long vaddr); 620 #endif /* __ASSEMBLY__ */ 621 622 #ifdef CONFIG_X86_32 623 # include <asm/pgtable_32.h> 624 #else 625 # include <asm/pgtable_64.h> 626 #endif 627 628 #ifndef __ASSEMBLY__ 629 #include <linux/mm_types.h> 630 #include <linux/mmdebug.h> 631 #include <linux/log2.h> 632 #include <asm/fixmap.h> 633 634 static inline int pte_none(pte_t pte) 635 { 636 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK)); 637 } 638 639 #define __HAVE_ARCH_PTE_SAME 640 static inline int pte_same(pte_t a, pte_t b) 641 { 642 return a.pte == b.pte; 643 } 644 645 static inline int pte_present(pte_t a) 646 { 647 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 648 } 649 650 #ifdef __HAVE_ARCH_PTE_DEVMAP 651 static inline int pte_devmap(pte_t a) 652 { 653 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; 654 } 655 #endif 656 657 #define pte_accessible pte_accessible 658 static inline bool pte_accessible(struct mm_struct *mm, pte_t a) 659 { 660 if (pte_flags(a) & _PAGE_PRESENT) 661 return true; 662 663 if ((pte_flags(a) & _PAGE_PROTNONE) && 664 mm_tlb_flush_pending(mm)) 665 return true; 666 667 return false; 668 } 669 670 static inline int pmd_present(pmd_t pmd) 671 { 672 /* 673 * Checking for _PAGE_PSE is needed too because 674 * split_huge_page will temporarily clear the present bit (but 675 * the _PAGE_PSE flag will remain set at all times while the 676 * _PAGE_PRESENT bit is clear). 677 */ 678 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); 679 } 680 681 #ifdef CONFIG_NUMA_BALANCING 682 /* 683 * These work without NUMA balancing but the kernel does not care. See the 684 * comment in include/asm-generic/pgtable.h 685 */ 686 static inline int pte_protnone(pte_t pte) 687 { 688 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) 689 == _PAGE_PROTNONE; 690 } 691 692 static inline int pmd_protnone(pmd_t pmd) 693 { 694 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) 695 == _PAGE_PROTNONE; 696 } 697 #endif /* CONFIG_NUMA_BALANCING */ 698 699 static inline int pmd_none(pmd_t pmd) 700 { 701 /* Only check low word on 32-bit platforms, since it might be 702 out of sync with upper half. */ 703 unsigned long val = native_pmd_val(pmd); 704 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0; 705 } 706 707 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 708 { 709 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); 710 } 711 712 /* 713 * Currently stuck as a macro due to indirect forward reference to 714 * linux/mmzone.h's __section_mem_map_addr() definition: 715 */ 716 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 717 718 /* 719 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 720 * 721 * this macro returns the index of the entry in the pmd page which would 722 * control the given virtual address 723 */ 724 static inline unsigned long pmd_index(unsigned long address) 725 { 726 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 727 } 728 729 /* 730 * Conversion functions: convert a page and protection to a page entry, 731 * and a page entry and page directory to the page they refer to. 732 * 733 * (Currently stuck as a macro because of indirect forward reference 734 * to linux/mm.h:page_to_nid()) 735 */ 736 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 737 738 /* 739 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 740 * 741 * this function returns the index of the entry in the pte page which would 742 * control the given virtual address 743 */ 744 static inline unsigned long pte_index(unsigned long address) 745 { 746 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 747 } 748 749 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 750 { 751 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 752 } 753 754 static inline int pmd_bad(pmd_t pmd) 755 { 756 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; 757 } 758 759 static inline unsigned long pages_to_mb(unsigned long npg) 760 { 761 return npg >> (20 - PAGE_SHIFT); 762 } 763 764 #if CONFIG_PGTABLE_LEVELS > 2 765 static inline int pud_none(pud_t pud) 766 { 767 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; 768 } 769 770 static inline int pud_present(pud_t pud) 771 { 772 return pud_flags(pud) & _PAGE_PRESENT; 773 } 774 775 static inline unsigned long pud_page_vaddr(pud_t pud) 776 { 777 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); 778 } 779 780 /* 781 * Currently stuck as a macro due to indirect forward reference to 782 * linux/mmzone.h's __section_mem_map_addr() definition: 783 */ 784 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 785 786 /* Find an entry in the second-level page table.. */ 787 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 788 { 789 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 790 } 791 792 static inline int pud_large(pud_t pud) 793 { 794 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 795 (_PAGE_PSE | _PAGE_PRESENT); 796 } 797 798 static inline int pud_bad(pud_t pud) 799 { 800 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 801 } 802 #else 803 static inline int pud_large(pud_t pud) 804 { 805 return 0; 806 } 807 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 808 809 static inline unsigned long pud_index(unsigned long address) 810 { 811 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 812 } 813 814 #if CONFIG_PGTABLE_LEVELS > 3 815 static inline int p4d_none(p4d_t p4d) 816 { 817 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; 818 } 819 820 static inline int p4d_present(p4d_t p4d) 821 { 822 return p4d_flags(p4d) & _PAGE_PRESENT; 823 } 824 825 static inline unsigned long p4d_page_vaddr(p4d_t p4d) 826 { 827 return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d)); 828 } 829 830 /* 831 * Currently stuck as a macro due to indirect forward reference to 832 * linux/mmzone.h's __section_mem_map_addr() definition: 833 */ 834 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) 835 836 /* Find an entry in the third-level page table.. */ 837 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 838 { 839 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); 840 } 841 842 static inline int p4d_bad(p4d_t p4d) 843 { 844 return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 845 } 846 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 847 848 static inline unsigned long p4d_index(unsigned long address) 849 { 850 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1); 851 } 852 853 #if CONFIG_PGTABLE_LEVELS > 4 854 static inline int pgd_present(pgd_t pgd) 855 { 856 return pgd_flags(pgd) & _PAGE_PRESENT; 857 } 858 859 static inline unsigned long pgd_page_vaddr(pgd_t pgd) 860 { 861 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); 862 } 863 864 /* 865 * Currently stuck as a macro due to indirect forward reference to 866 * linux/mmzone.h's __section_mem_map_addr() definition: 867 */ 868 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) 869 870 /* to find an entry in a page-table-directory. */ 871 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 872 { 873 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); 874 } 875 876 static inline int pgd_bad(pgd_t pgd) 877 { 878 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; 879 } 880 881 static inline int pgd_none(pgd_t pgd) 882 { 883 /* 884 * There is no need to do a workaround for the KNL stray 885 * A/D bit erratum here. PGDs only point to page tables 886 * except on 32-bit non-PAE which is not supported on 887 * KNL. 888 */ 889 return !native_pgd_val(pgd); 890 } 891 #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 892 893 #endif /* __ASSEMBLY__ */ 894 895 /* 896 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 897 * 898 * this macro returns the index of the entry in the pgd page which would 899 * control the given virtual address 900 */ 901 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 902 903 /* 904 * pgd_offset() returns a (pgd_t *) 905 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 906 */ 907 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) 908 /* 909 * a shortcut which implies the use of the kernel's pgd, instead 910 * of a process's 911 */ 912 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 913 914 915 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 916 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 917 918 #ifndef __ASSEMBLY__ 919 920 extern int direct_gbpages; 921 void init_mem_mapping(void); 922 void early_alloc_pgt_buf(void); 923 extern void memblock_find_dma_reserve(void); 924 925 #ifdef CONFIG_X86_64 926 /* Realmode trampoline initialization. */ 927 extern pgd_t trampoline_pgd_entry; 928 static inline void __meminit init_trampoline_default(void) 929 { 930 /* Default trampoline pgd value */ 931 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; 932 } 933 # ifdef CONFIG_RANDOMIZE_MEMORY 934 void __meminit init_trampoline(void); 935 # else 936 # define init_trampoline init_trampoline_default 937 # endif 938 #else 939 static inline void init_trampoline(void) { } 940 #endif 941 942 /* local pte updates need not use xchg for locking */ 943 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 944 { 945 pte_t res = *ptep; 946 947 /* Pure native function needs no input for mm, addr */ 948 native_pte_clear(NULL, 0, ptep); 949 return res; 950 } 951 952 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) 953 { 954 pmd_t res = *pmdp; 955 956 native_pmd_clear(pmdp); 957 return res; 958 } 959 960 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) 961 { 962 pud_t res = *pudp; 963 964 native_pud_clear(pudp); 965 return res; 966 } 967 968 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, 969 pte_t *ptep , pte_t pte) 970 { 971 native_set_pte(ptep, pte); 972 } 973 974 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 975 pmd_t *pmdp, pmd_t pmd) 976 { 977 native_set_pmd(pmdp, pmd); 978 } 979 980 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 981 pud_t *pudp, pud_t pud) 982 { 983 native_set_pud(pudp, pud); 984 } 985 986 /* 987 * We only update the dirty/accessed state if we set 988 * the dirty bit by hand in the kernel, since the hardware 989 * will do the accessed bit for us, and we don't want to 990 * race with other CPU's that might be updating the dirty 991 * bit at the same time. 992 */ 993 struct vm_area_struct; 994 995 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 996 extern int ptep_set_access_flags(struct vm_area_struct *vma, 997 unsigned long address, pte_t *ptep, 998 pte_t entry, int dirty); 999 1000 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1001 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, 1002 unsigned long addr, pte_t *ptep); 1003 1004 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1005 extern int ptep_clear_flush_young(struct vm_area_struct *vma, 1006 unsigned long address, pte_t *ptep); 1007 1008 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1009 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 1010 pte_t *ptep) 1011 { 1012 pte_t pte = native_ptep_get_and_clear(ptep); 1013 return pte; 1014 } 1015 1016 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1017 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1018 unsigned long addr, pte_t *ptep, 1019 int full) 1020 { 1021 pte_t pte; 1022 if (full) { 1023 /* 1024 * Full address destruction in progress; paravirt does not 1025 * care about updates and native needs no locking 1026 */ 1027 pte = native_local_ptep_get_and_clear(ptep); 1028 } else { 1029 pte = ptep_get_and_clear(mm, addr, ptep); 1030 } 1031 return pte; 1032 } 1033 1034 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1035 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1036 unsigned long addr, pte_t *ptep) 1037 { 1038 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 1039 } 1040 1041 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) 1042 1043 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1044 1045 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1046 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 1047 unsigned long address, pmd_t *pmdp, 1048 pmd_t entry, int dirty); 1049 extern int pudp_set_access_flags(struct vm_area_struct *vma, 1050 unsigned long address, pud_t *pudp, 1051 pud_t entry, int dirty); 1052 1053 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1054 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1055 unsigned long addr, pmd_t *pmdp); 1056 extern int pudp_test_and_clear_young(struct vm_area_struct *vma, 1057 unsigned long addr, pud_t *pudp); 1058 1059 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1060 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 1061 unsigned long address, pmd_t *pmdp); 1062 1063 1064 #define pmd_write pmd_write 1065 static inline int pmd_write(pmd_t pmd) 1066 { 1067 return pmd_flags(pmd) & _PAGE_RW; 1068 } 1069 1070 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1071 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 1072 pmd_t *pmdp) 1073 { 1074 return native_pmdp_get_and_clear(pmdp); 1075 } 1076 1077 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR 1078 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, 1079 unsigned long addr, pud_t *pudp) 1080 { 1081 return native_pudp_get_and_clear(pudp); 1082 } 1083 1084 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1085 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1086 unsigned long addr, pmd_t *pmdp) 1087 { 1088 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 1089 } 1090 1091 #define pud_write pud_write 1092 static inline int pud_write(pud_t pud) 1093 { 1094 return pud_flags(pud) & _PAGE_RW; 1095 } 1096 1097 /* 1098 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 1099 * 1100 * dst - pointer to pgd range anwhere on a pgd page 1101 * src - "" 1102 * count - the number of pgds to copy. 1103 * 1104 * dst and src can be on the same page, but the range must not overlap, 1105 * and must not cross a page boundary. 1106 */ 1107 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) 1108 { 1109 memcpy(dst, src, count * sizeof(pgd_t)); 1110 } 1111 1112 #define PTE_SHIFT ilog2(PTRS_PER_PTE) 1113 static inline int page_level_shift(enum pg_level level) 1114 { 1115 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; 1116 } 1117 static inline unsigned long page_level_size(enum pg_level level) 1118 { 1119 return 1UL << page_level_shift(level); 1120 } 1121 static inline unsigned long page_level_mask(enum pg_level level) 1122 { 1123 return ~(page_level_size(level) - 1); 1124 } 1125 1126 /* 1127 * The x86 doesn't have any external MMU info: the kernel page 1128 * tables contain all the necessary information. 1129 */ 1130 static inline void update_mmu_cache(struct vm_area_struct *vma, 1131 unsigned long addr, pte_t *ptep) 1132 { 1133 } 1134 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 1135 unsigned long addr, pmd_t *pmd) 1136 { 1137 } 1138 static inline void update_mmu_cache_pud(struct vm_area_struct *vma, 1139 unsigned long addr, pud_t *pud) 1140 { 1141 } 1142 1143 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 1144 static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 1145 { 1146 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); 1147 } 1148 1149 static inline int pte_swp_soft_dirty(pte_t pte) 1150 { 1151 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; 1152 } 1153 1154 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 1155 { 1156 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); 1157 } 1158 1159 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1160 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1161 { 1162 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY); 1163 } 1164 1165 static inline int pmd_swp_soft_dirty(pmd_t pmd) 1166 { 1167 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY; 1168 } 1169 1170 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1171 { 1172 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY); 1173 } 1174 #endif 1175 #endif 1176 1177 #define PKRU_AD_BIT 0x1 1178 #define PKRU_WD_BIT 0x2 1179 #define PKRU_BITS_PER_PKEY 2 1180 1181 static inline bool __pkru_allows_read(u32 pkru, u16 pkey) 1182 { 1183 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; 1184 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); 1185 } 1186 1187 static inline bool __pkru_allows_write(u32 pkru, u16 pkey) 1188 { 1189 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; 1190 /* 1191 * Access-disable disables writes too so we need to check 1192 * both bits here. 1193 */ 1194 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits)); 1195 } 1196 1197 static inline u16 pte_flags_pkey(unsigned long pte_flags) 1198 { 1199 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 1200 /* ifdef to avoid doing 59-bit shift on 32-bit values */ 1201 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0; 1202 #else 1203 return 0; 1204 #endif 1205 } 1206 1207 static inline bool __pkru_allows_pkey(u16 pkey, bool write) 1208 { 1209 u32 pkru = read_pkru(); 1210 1211 if (!__pkru_allows_read(pkru, pkey)) 1212 return false; 1213 if (write && !__pkru_allows_write(pkru, pkey)) 1214 return false; 1215 1216 return true; 1217 } 1218 1219 /* 1220 * 'pteval' can come from a PTE, PMD or PUD. We only check 1221 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the 1222 * same value on all 3 types. 1223 */ 1224 static inline bool __pte_access_permitted(unsigned long pteval, bool write) 1225 { 1226 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER; 1227 1228 if (write) 1229 need_pte_bits |= _PAGE_RW; 1230 1231 if ((pteval & need_pte_bits) != need_pte_bits) 1232 return 0; 1233 1234 return __pkru_allows_pkey(pte_flags_pkey(pteval), write); 1235 } 1236 1237 #define pte_access_permitted pte_access_permitted 1238 static inline bool pte_access_permitted(pte_t pte, bool write) 1239 { 1240 return __pte_access_permitted(pte_val(pte), write); 1241 } 1242 1243 #define pmd_access_permitted pmd_access_permitted 1244 static inline bool pmd_access_permitted(pmd_t pmd, bool write) 1245 { 1246 return __pte_access_permitted(pmd_val(pmd), write); 1247 } 1248 1249 #define pud_access_permitted pud_access_permitted 1250 static inline bool pud_access_permitted(pud_t pud, bool write) 1251 { 1252 return __pte_access_permitted(pud_val(pud), write); 1253 } 1254 1255 #include <asm-generic/pgtable.h> 1256 #endif /* __ASSEMBLY__ */ 1257 1258 #endif /* _ASM_X86_PGTABLE_H */ 1259