1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #ifndef _ASM_RISCV_PGTABLE_H 7 #define _ASM_RISCV_PGTABLE_H 8 9 #include <linux/mmzone.h> 10 #include <linux/sizes.h> 11 12 #include <asm/pgtable-bits.h> 13 14 #ifndef CONFIG_MMU 15 #define KERNEL_LINK_ADDR PAGE_OFFSET 16 #define KERN_VIRT_SIZE (UL(-1)) 17 #else 18 19 #define ADDRESS_SPACE_END (UL(-1)) 20 21 #ifdef CONFIG_64BIT 22 /* Leave 2GB for kernel and BPF at the end of the address space */ 23 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) 24 #else 25 #define KERNEL_LINK_ADDR PAGE_OFFSET 26 #endif 27 28 /* Number of entries in the page global directory */ 29 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 30 /* Number of entries in the page table */ 31 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 32 33 /* 34 * Half of the kernel address space (1/4 of the entries of the page global 35 * directory) is for the direct mapping. 36 */ 37 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) 38 39 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 40 #define VMALLOC_END PAGE_OFFSET 41 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 42 43 #define BPF_JIT_REGION_SIZE (SZ_128M) 44 #ifdef CONFIG_64BIT 45 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) 46 #define BPF_JIT_REGION_END (MODULES_END) 47 #else 48 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) 49 #define BPF_JIT_REGION_END (VMALLOC_END) 50 #endif 51 52 /* Modules always live before the kernel */ 53 #ifdef CONFIG_64BIT 54 /* This is used to define the end of the KASAN shadow region */ 55 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) 56 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) 57 #define MODULES_END (PFN_ALIGN((unsigned long)&_start)) 58 #endif 59 60 /* 61 * Roughly size the vmemmap space to be large enough to fit enough 62 * struct pages to map half the virtual address space. Then 63 * position vmemmap directly below the VMALLOC region. 64 */ 65 #ifdef CONFIG_64BIT 66 #define VA_BITS (pgtable_l5_enabled ? \ 67 57 : (pgtable_l4_enabled ? 48 : 39)) 68 #else 69 #define VA_BITS 32 70 #endif 71 72 #define VMEMMAP_SHIFT \ 73 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 74 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 75 #define VMEMMAP_END VMALLOC_START 76 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 77 78 /* 79 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel 80 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. 81 */ 82 #define vmemmap ((struct page *)VMEMMAP_START) 83 84 #define PCI_IO_SIZE SZ_16M 85 #define PCI_IO_END VMEMMAP_START 86 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 87 88 #define FIXADDR_TOP PCI_IO_START 89 #ifdef CONFIG_64BIT 90 #define MAX_FDT_SIZE PMD_SIZE 91 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) 92 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) 93 #else 94 #define MAX_FDT_SIZE PGDIR_SIZE 95 #define FIX_FDT_SIZE MAX_FDT_SIZE 96 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) 97 #endif 98 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 99 100 #endif 101 102 #ifdef CONFIG_XIP_KERNEL 103 #define XIP_OFFSET SZ_32M 104 #define XIP_OFFSET_MASK (SZ_32M - 1) 105 #else 106 #define XIP_OFFSET 0 107 #endif 108 109 #ifndef __ASSEMBLY__ 110 111 #include <asm/page.h> 112 #include <asm/tlbflush.h> 113 #include <linux/mm_types.h> 114 115 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) 116 117 #ifdef CONFIG_64BIT 118 #include <asm/pgtable-64.h> 119 #else 120 #include <asm/pgtable-32.h> 121 #endif /* CONFIG_64BIT */ 122 123 #include <linux/page_table_check.h> 124 125 #ifdef CONFIG_XIP_KERNEL 126 #define XIP_FIXUP(addr) ({ \ 127 uintptr_t __a = (uintptr_t)(addr); \ 128 (__a >= CONFIG_XIP_PHYS_ADDR && \ 129 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ 130 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ 131 __a; \ 132 }) 133 #else 134 #define XIP_FIXUP(addr) (addr) 135 #endif /* CONFIG_XIP_KERNEL */ 136 137 struct pt_alloc_ops { 138 pte_t *(*get_pte_virt)(phys_addr_t pa); 139 phys_addr_t (*alloc_pte)(uintptr_t va); 140 #ifndef __PAGETABLE_PMD_FOLDED 141 pmd_t *(*get_pmd_virt)(phys_addr_t pa); 142 phys_addr_t (*alloc_pmd)(uintptr_t va); 143 pud_t *(*get_pud_virt)(phys_addr_t pa); 144 phys_addr_t (*alloc_pud)(uintptr_t va); 145 p4d_t *(*get_p4d_virt)(phys_addr_t pa); 146 phys_addr_t (*alloc_p4d)(uintptr_t va); 147 #endif 148 }; 149 150 extern struct pt_alloc_ops pt_ops __initdata; 151 152 #ifdef CONFIG_MMU 153 /* Number of PGD entries that a user-mode program can use */ 154 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 155 156 /* Page protection bits */ 157 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 158 159 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) 160 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 161 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 162 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 163 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 164 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 165 _PAGE_EXEC | _PAGE_WRITE) 166 167 #define PAGE_COPY PAGE_READ 168 #define PAGE_COPY_EXEC PAGE_EXEC 169 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC 170 #define PAGE_SHARED PAGE_WRITE 171 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 172 173 #define _PAGE_KERNEL (_PAGE_READ \ 174 | _PAGE_WRITE \ 175 | _PAGE_PRESENT \ 176 | _PAGE_ACCESSED \ 177 | _PAGE_DIRTY \ 178 | _PAGE_GLOBAL) 179 180 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 181 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 182 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 183 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ 184 | _PAGE_EXEC) 185 186 #define PAGE_TABLE __pgprot(_PAGE_TABLE) 187 188 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) 189 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) 190 191 extern pgd_t swapper_pg_dir[]; 192 193 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 194 static inline int pmd_present(pmd_t pmd) 195 { 196 /* 197 * Checking for _PAGE_LEAF is needed too because: 198 * When splitting a THP, split_huge_page() will temporarily clear 199 * the present bit, in this situation, pmd_present() and 200 * pmd_trans_huge() still needs to return true. 201 */ 202 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); 203 } 204 #else 205 static inline int pmd_present(pmd_t pmd) 206 { 207 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 208 } 209 #endif 210 211 static inline int pmd_none(pmd_t pmd) 212 { 213 return (pmd_val(pmd) == 0); 214 } 215 216 static inline int pmd_bad(pmd_t pmd) 217 { 218 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); 219 } 220 221 #define pmd_leaf pmd_leaf 222 static inline int pmd_leaf(pmd_t pmd) 223 { 224 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); 225 } 226 227 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 228 { 229 *pmdp = pmd; 230 } 231 232 static inline void pmd_clear(pmd_t *pmdp) 233 { 234 set_pmd(pmdp, __pmd(0)); 235 } 236 237 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 238 { 239 unsigned long prot_val = pgprot_val(prot); 240 241 ALT_THEAD_PMA(prot_val); 242 243 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val); 244 } 245 246 static inline unsigned long _pgd_pfn(pgd_t pgd) 247 { 248 return __page_val_to_pfn(pgd_val(pgd)); 249 } 250 251 static inline struct page *pmd_page(pmd_t pmd) 252 { 253 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd))); 254 } 255 256 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 257 { 258 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd))); 259 } 260 261 static inline pte_t pmd_pte(pmd_t pmd) 262 { 263 return __pte(pmd_val(pmd)); 264 } 265 266 static inline pte_t pud_pte(pud_t pud) 267 { 268 return __pte(pud_val(pud)); 269 } 270 271 /* Yields the page frame number (PFN) of a page table entry */ 272 static inline unsigned long pte_pfn(pte_t pte) 273 { 274 return __page_val_to_pfn(pte_val(pte)); 275 } 276 277 #define pte_page(x) pfn_to_page(pte_pfn(x)) 278 279 /* Constructs a page table entry */ 280 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 281 { 282 unsigned long prot_val = pgprot_val(prot); 283 284 ALT_THEAD_PMA(prot_val); 285 286 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); 287 } 288 289 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 290 291 static inline int pte_present(pte_t pte) 292 { 293 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 294 } 295 296 static inline int pte_none(pte_t pte) 297 { 298 return (pte_val(pte) == 0); 299 } 300 301 static inline int pte_write(pte_t pte) 302 { 303 return pte_val(pte) & _PAGE_WRITE; 304 } 305 306 static inline int pte_exec(pte_t pte) 307 { 308 return pte_val(pte) & _PAGE_EXEC; 309 } 310 311 static inline int pte_user(pte_t pte) 312 { 313 return pte_val(pte) & _PAGE_USER; 314 } 315 316 static inline int pte_huge(pte_t pte) 317 { 318 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); 319 } 320 321 static inline int pte_dirty(pte_t pte) 322 { 323 return pte_val(pte) & _PAGE_DIRTY; 324 } 325 326 static inline int pte_young(pte_t pte) 327 { 328 return pte_val(pte) & _PAGE_ACCESSED; 329 } 330 331 static inline int pte_special(pte_t pte) 332 { 333 return pte_val(pte) & _PAGE_SPECIAL; 334 } 335 336 /* static inline pte_t pte_rdprotect(pte_t pte) */ 337 338 static inline pte_t pte_wrprotect(pte_t pte) 339 { 340 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 341 } 342 343 /* static inline pte_t pte_mkread(pte_t pte) */ 344 345 static inline pte_t pte_mkwrite(pte_t pte) 346 { 347 return __pte(pte_val(pte) | _PAGE_WRITE); 348 } 349 350 /* static inline pte_t pte_mkexec(pte_t pte) */ 351 352 static inline pte_t pte_mkdirty(pte_t pte) 353 { 354 return __pte(pte_val(pte) | _PAGE_DIRTY); 355 } 356 357 static inline pte_t pte_mkclean(pte_t pte) 358 { 359 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 360 } 361 362 static inline pte_t pte_mkyoung(pte_t pte) 363 { 364 return __pte(pte_val(pte) | _PAGE_ACCESSED); 365 } 366 367 static inline pte_t pte_mkold(pte_t pte) 368 { 369 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 370 } 371 372 static inline pte_t pte_mkspecial(pte_t pte) 373 { 374 return __pte(pte_val(pte) | _PAGE_SPECIAL); 375 } 376 377 static inline pte_t pte_mkhuge(pte_t pte) 378 { 379 return pte; 380 } 381 382 #ifdef CONFIG_NUMA_BALANCING 383 /* 384 * See the comment in include/asm-generic/pgtable.h 385 */ 386 static inline int pte_protnone(pte_t pte) 387 { 388 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; 389 } 390 391 static inline int pmd_protnone(pmd_t pmd) 392 { 393 return pte_protnone(pmd_pte(pmd)); 394 } 395 #endif 396 397 /* Modify page protection bits */ 398 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 399 { 400 unsigned long newprot_val = pgprot_val(newprot); 401 402 ALT_THEAD_PMA(newprot_val); 403 404 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val); 405 } 406 407 #define pgd_ERROR(e) \ 408 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 409 410 411 /* Commit new configuration to MMU hardware */ 412 static inline void update_mmu_cache(struct vm_area_struct *vma, 413 unsigned long address, pte_t *ptep) 414 { 415 /* 416 * The kernel assumes that TLBs don't cache invalid entries, but 417 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 418 * cache flush; it is necessary even after writing invalid entries. 419 * Relying on flush_tlb_fix_spurious_fault would suffice, but 420 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 421 */ 422 local_flush_tlb_page(address); 423 } 424 425 #define __HAVE_ARCH_UPDATE_MMU_TLB 426 #define update_mmu_tlb update_mmu_cache 427 428 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 429 unsigned long address, pmd_t *pmdp) 430 { 431 pte_t *ptep = (pte_t *)pmdp; 432 433 update_mmu_cache(vma, address, ptep); 434 } 435 436 #define __HAVE_ARCH_PTE_SAME 437 static inline int pte_same(pte_t pte_a, pte_t pte_b) 438 { 439 return pte_val(pte_a) == pte_val(pte_b); 440 } 441 442 /* 443 * Certain architectures need to do special things when PTEs within 444 * a page table are directly modified. Thus, the following hook is 445 * made available. 446 */ 447 static inline void set_pte(pte_t *ptep, pte_t pteval) 448 { 449 *ptep = pteval; 450 } 451 452 void flush_icache_pte(pte_t pte); 453 454 static inline void __set_pte_at(struct mm_struct *mm, 455 unsigned long addr, pte_t *ptep, pte_t pteval) 456 { 457 if (pte_present(pteval) && pte_exec(pteval)) 458 flush_icache_pte(pteval); 459 460 set_pte(ptep, pteval); 461 } 462 463 static inline void set_pte_at(struct mm_struct *mm, 464 unsigned long addr, pte_t *ptep, pte_t pteval) 465 { 466 page_table_check_pte_set(mm, addr, ptep, pteval); 467 __set_pte_at(mm, addr, ptep, pteval); 468 } 469 470 static inline void pte_clear(struct mm_struct *mm, 471 unsigned long addr, pte_t *ptep) 472 { 473 __set_pte_at(mm, addr, ptep, __pte(0)); 474 } 475 476 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 477 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 478 unsigned long address, pte_t *ptep, 479 pte_t entry, int dirty) 480 { 481 if (!pte_same(*ptep, entry)) 482 set_pte_at(vma->vm_mm, address, ptep, entry); 483 /* 484 * update_mmu_cache will unconditionally execute, handling both 485 * the case that the PTE changed and the spurious fault case. 486 */ 487 return true; 488 } 489 490 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 491 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 492 unsigned long address, pte_t *ptep) 493 { 494 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); 495 496 page_table_check_pte_clear(mm, address, pte); 497 498 return pte; 499 } 500 501 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 502 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 503 unsigned long address, 504 pte_t *ptep) 505 { 506 if (!pte_young(*ptep)) 507 return 0; 508 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); 509 } 510 511 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 512 static inline void ptep_set_wrprotect(struct mm_struct *mm, 513 unsigned long address, pte_t *ptep) 514 { 515 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 516 } 517 518 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 519 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 520 unsigned long address, pte_t *ptep) 521 { 522 /* 523 * This comment is borrowed from x86, but applies equally to RISC-V: 524 * 525 * Clearing the accessed bit without a TLB flush 526 * doesn't cause data corruption. [ It could cause incorrect 527 * page aging and the (mistaken) reclaim of hot pages, but the 528 * chance of that should be relatively low. ] 529 * 530 * So as a performance optimization don't flush the TLB when 531 * clearing the accessed bit, it will eventually be flushed by 532 * a context switch or a VM operation anyway. [ In the rare 533 * event of it not getting flushed for a long time the delay 534 * shouldn't really matter because there's no real memory 535 * pressure for swapout to react to. ] 536 */ 537 return ptep_test_and_clear_young(vma, address, ptep); 538 } 539 540 #define pgprot_noncached pgprot_noncached 541 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 542 { 543 unsigned long prot = pgprot_val(_prot); 544 545 prot &= ~_PAGE_MTMASK; 546 prot |= _PAGE_IO; 547 548 return __pgprot(prot); 549 } 550 551 #define pgprot_writecombine pgprot_writecombine 552 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 553 { 554 unsigned long prot = pgprot_val(_prot); 555 556 prot &= ~_PAGE_MTMASK; 557 prot |= _PAGE_NOCACHE; 558 559 return __pgprot(prot); 560 } 561 562 /* 563 * THP functions 564 */ 565 static inline pmd_t pte_pmd(pte_t pte) 566 { 567 return __pmd(pte_val(pte)); 568 } 569 570 static inline pmd_t pmd_mkhuge(pmd_t pmd) 571 { 572 return pmd; 573 } 574 575 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 576 { 577 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); 578 } 579 580 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT) 581 582 static inline unsigned long pmd_pfn(pmd_t pmd) 583 { 584 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); 585 } 586 587 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) 588 589 static inline unsigned long pud_pfn(pud_t pud) 590 { 591 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); 592 } 593 594 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 595 { 596 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 597 } 598 599 #define pmd_write pmd_write 600 static inline int pmd_write(pmd_t pmd) 601 { 602 return pte_write(pmd_pte(pmd)); 603 } 604 605 static inline int pmd_dirty(pmd_t pmd) 606 { 607 return pte_dirty(pmd_pte(pmd)); 608 } 609 610 #define pmd_young pmd_young 611 static inline int pmd_young(pmd_t pmd) 612 { 613 return pte_young(pmd_pte(pmd)); 614 } 615 616 static inline int pmd_user(pmd_t pmd) 617 { 618 return pte_user(pmd_pte(pmd)); 619 } 620 621 static inline pmd_t pmd_mkold(pmd_t pmd) 622 { 623 return pte_pmd(pte_mkold(pmd_pte(pmd))); 624 } 625 626 static inline pmd_t pmd_mkyoung(pmd_t pmd) 627 { 628 return pte_pmd(pte_mkyoung(pmd_pte(pmd))); 629 } 630 631 static inline pmd_t pmd_mkwrite(pmd_t pmd) 632 { 633 return pte_pmd(pte_mkwrite(pmd_pte(pmd))); 634 } 635 636 static inline pmd_t pmd_wrprotect(pmd_t pmd) 637 { 638 return pte_pmd(pte_wrprotect(pmd_pte(pmd))); 639 } 640 641 static inline pmd_t pmd_mkclean(pmd_t pmd) 642 { 643 return pte_pmd(pte_mkclean(pmd_pte(pmd))); 644 } 645 646 static inline pmd_t pmd_mkdirty(pmd_t pmd) 647 { 648 return pte_pmd(pte_mkdirty(pmd_pte(pmd))); 649 } 650 651 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 652 pmd_t *pmdp, pmd_t pmd) 653 { 654 page_table_check_pmd_set(mm, addr, pmdp, pmd); 655 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); 656 } 657 658 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 659 pud_t *pudp, pud_t pud) 660 { 661 page_table_check_pud_set(mm, addr, pudp, pud); 662 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); 663 } 664 665 #ifdef CONFIG_PAGE_TABLE_CHECK 666 static inline bool pte_user_accessible_page(pte_t pte) 667 { 668 return pte_present(pte) && pte_user(pte); 669 } 670 671 static inline bool pmd_user_accessible_page(pmd_t pmd) 672 { 673 return pmd_leaf(pmd) && pmd_user(pmd); 674 } 675 676 static inline bool pud_user_accessible_page(pud_t pud) 677 { 678 return pud_leaf(pud) && pud_user(pud); 679 } 680 #endif 681 682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 683 static inline int pmd_trans_huge(pmd_t pmd) 684 { 685 return pmd_leaf(pmd); 686 } 687 688 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 689 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 690 unsigned long address, pmd_t *pmdp, 691 pmd_t entry, int dirty) 692 { 693 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 694 } 695 696 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 697 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 698 unsigned long address, pmd_t *pmdp) 699 { 700 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 701 } 702 703 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 704 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 705 unsigned long address, pmd_t *pmdp) 706 { 707 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); 708 709 page_table_check_pmd_clear(mm, address, pmd); 710 711 return pmd; 712 } 713 714 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 715 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 716 unsigned long address, pmd_t *pmdp) 717 { 718 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 719 } 720 721 #define pmdp_establish pmdp_establish 722 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 723 unsigned long address, pmd_t *pmdp, pmd_t pmd) 724 { 725 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); 726 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); 727 } 728 729 #define pmdp_collapse_flush pmdp_collapse_flush 730 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 731 unsigned long address, pmd_t *pmdp); 732 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 733 734 /* 735 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 736 * are !pte_none() && !pte_present(). 737 * 738 * Format of swap PTE: 739 * bit 0: _PAGE_PRESENT (zero) 740 * bit 1 to 3: _PAGE_LEAF (zero) 741 * bit 5: _PAGE_PROT_NONE (zero) 742 * bit 6: exclusive marker 743 * bits 7 to 11: swap type 744 * bits 11 to XLEN-1: swap offset 745 */ 746 #define __SWP_TYPE_SHIFT 7 747 #define __SWP_TYPE_BITS 5 748 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 749 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 750 751 #define MAX_SWAPFILES_CHECK() \ 752 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 753 754 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 755 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 756 #define __swp_entry(type, offset) ((swp_entry_t) \ 757 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \ 758 ((offset) << __SWP_OFFSET_SHIFT) }) 759 760 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 761 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 762 763 static inline int pte_swp_exclusive(pte_t pte) 764 { 765 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 766 } 767 768 static inline pte_t pte_swp_mkexclusive(pte_t pte) 769 { 770 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); 771 } 772 773 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 774 { 775 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); 776 } 777 778 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 779 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 780 #define __swp_entry_to_pmd(swp) __pmd((swp).val) 781 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 782 783 /* 784 * In the RV64 Linux scheme, we give the user half of the virtual-address space 785 * and give the kernel the other (upper) half. 786 */ 787 #ifdef CONFIG_64BIT 788 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) 789 #else 790 #define KERN_VIRT_START FIXADDR_START 791 #endif 792 793 /* 794 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 795 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 796 * Task size is: 797 * - 0x9fc00000 (~2.5GB) for RV32. 798 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu 799 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu 800 * 801 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V 802 * Instruction Set Manual Volume II: Privileged Architecture" states that 803 * "load and store effective addresses, which are 64bits, must have bits 804 * 63–48 all equal to bit 47, or else a page-fault exception will occur." 805 */ 806 #ifdef CONFIG_64BIT 807 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) 808 #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) 809 810 #ifdef CONFIG_COMPAT 811 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) 812 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 813 TASK_SIZE_32 : TASK_SIZE_64) 814 #else 815 #define TASK_SIZE TASK_SIZE_64 816 #endif 817 818 #else 819 #define TASK_SIZE FIXADDR_START 820 #define TASK_SIZE_MIN TASK_SIZE 821 #endif 822 823 #else /* CONFIG_MMU */ 824 825 #define PAGE_SHARED __pgprot(0) 826 #define PAGE_KERNEL __pgprot(0) 827 #define swapper_pg_dir NULL 828 #define TASK_SIZE 0xffffffffUL 829 #define VMALLOC_START 0 830 #define VMALLOC_END TASK_SIZE 831 832 #endif /* !CONFIG_MMU */ 833 834 extern char _start[]; 835 extern void *_dtb_early_va; 836 extern uintptr_t _dtb_early_pa; 837 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) 838 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) 839 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) 840 #else 841 #define dtb_early_va _dtb_early_va 842 #define dtb_early_pa _dtb_early_pa 843 #endif /* CONFIG_XIP_KERNEL */ 844 extern u64 satp_mode; 845 extern bool pgtable_l4_enabled; 846 847 void paging_init(void); 848 void misc_mem_init(void); 849 850 /* 851 * ZERO_PAGE is a global shared page that is always zero, 852 * used for zero-mapped memory areas, etc. 853 */ 854 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 855 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 856 857 #endif /* !__ASSEMBLY__ */ 858 859 #endif /* _ASM_RISCV_PGTABLE_H */ 860