1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #ifndef _ASM_RISCV_PGTABLE_H 7 #define _ASM_RISCV_PGTABLE_H 8 9 #include <linux/mmzone.h> 10 11 #include <asm/pgtable-bits.h> 12 13 #ifndef __ASSEMBLY__ 14 15 /* Page Upper Directory not used in RISC-V */ 16 #include <asm-generic/pgtable-nopud.h> 17 #include <asm/page.h> 18 #include <asm/tlbflush.h> 19 #include <linux/mm_types.h> 20 21 #ifdef CONFIG_64BIT 22 #include <asm/pgtable-64.h> 23 #else 24 #include <asm/pgtable-32.h> 25 #endif /* CONFIG_64BIT */ 26 27 /* Number of entries in the page global directory */ 28 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 29 /* Number of entries in the page table */ 30 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 31 32 /* Number of PGD entries that a user-mode program can use */ 33 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 34 #define FIRST_USER_ADDRESS 0 35 36 /* Page protection bits */ 37 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 38 39 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE) 40 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 41 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 42 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 43 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 44 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 45 _PAGE_EXEC | _PAGE_WRITE) 46 47 #define PAGE_COPY PAGE_READ 48 #define PAGE_COPY_EXEC PAGE_EXEC 49 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC 50 #define PAGE_SHARED PAGE_WRITE 51 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 52 53 #define _PAGE_KERNEL (_PAGE_READ \ 54 | _PAGE_WRITE \ 55 | _PAGE_PRESENT \ 56 | _PAGE_ACCESSED \ 57 | _PAGE_DIRTY) 58 59 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 60 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 61 62 #define PAGE_TABLE __pgprot(_PAGE_TABLE) 63 64 extern pgd_t swapper_pg_dir[]; 65 66 /* MAP_PRIVATE permissions: xwr (copy-on-write) */ 67 #define __P000 PAGE_NONE 68 #define __P001 PAGE_READ 69 #define __P010 PAGE_COPY 70 #define __P011 PAGE_COPY 71 #define __P100 PAGE_EXEC 72 #define __P101 PAGE_READ_EXEC 73 #define __P110 PAGE_COPY_EXEC 74 #define __P111 PAGE_COPY_READ_EXEC 75 76 /* MAP_SHARED permissions: xwr */ 77 #define __S000 PAGE_NONE 78 #define __S001 PAGE_READ 79 #define __S010 PAGE_SHARED 80 #define __S011 PAGE_SHARED 81 #define __S100 PAGE_EXEC 82 #define __S101 PAGE_READ_EXEC 83 #define __S110 PAGE_SHARED_EXEC 84 #define __S111 PAGE_SHARED_EXEC 85 86 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 87 #define VMALLOC_END (PAGE_OFFSET - 1) 88 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 89 90 #define FIXADDR_TOP VMALLOC_START 91 #ifdef CONFIG_64BIT 92 #define FIXADDR_SIZE PMD_SIZE 93 #else 94 #define FIXADDR_SIZE PGDIR_SIZE 95 #endif 96 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 97 98 /* 99 * Roughly size the vmemmap space to be large enough to fit enough 100 * struct pages to map half the virtual address space. Then 101 * position vmemmap directly below the VMALLOC region. 102 */ 103 #define VMEMMAP_SHIFT \ 104 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 105 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 106 #define VMEMMAP_END (VMALLOC_START - 1) 107 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 108 109 #define vmemmap ((struct page *)VMEMMAP_START) 110 111 /* 112 * ZERO_PAGE is a global shared page that is always zero, 113 * used for zero-mapped memory areas, etc. 114 */ 115 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 116 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 117 118 static inline int pmd_present(pmd_t pmd) 119 { 120 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 121 } 122 123 static inline int pmd_none(pmd_t pmd) 124 { 125 return (pmd_val(pmd) == 0); 126 } 127 128 static inline int pmd_bad(pmd_t pmd) 129 { 130 return !pmd_present(pmd); 131 } 132 133 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 134 { 135 *pmdp = pmd; 136 } 137 138 static inline void pmd_clear(pmd_t *pmdp) 139 { 140 set_pmd(pmdp, __pmd(0)); 141 } 142 143 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 144 { 145 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); 146 } 147 148 static inline unsigned long _pgd_pfn(pgd_t pgd) 149 { 150 return pgd_val(pgd) >> _PAGE_PFN_SHIFT; 151 } 152 153 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 154 155 /* Locate an entry in the page global directory */ 156 static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr) 157 { 158 return mm->pgd + pgd_index(addr); 159 } 160 /* Locate an entry in the kernel page global directory */ 161 #define pgd_offset_k(addr) pgd_offset(&init_mm, (addr)) 162 163 static inline struct page *pmd_page(pmd_t pmd) 164 { 165 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT); 166 } 167 168 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 169 { 170 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); 171 } 172 173 /* Yields the page frame number (PFN) of a page table entry */ 174 static inline unsigned long pte_pfn(pte_t pte) 175 { 176 return (pte_val(pte) >> _PAGE_PFN_SHIFT); 177 } 178 179 #define pte_page(x) pfn_to_page(pte_pfn(x)) 180 181 /* Constructs a page table entry */ 182 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 183 { 184 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); 185 } 186 187 static inline pte_t mk_pte(struct page *page, pgprot_t prot) 188 { 189 return pfn_pte(page_to_pfn(page), prot); 190 } 191 192 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 193 194 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) 195 { 196 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); 197 } 198 199 #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) 200 #define pte_unmap(pte) ((void)(pte)) 201 202 static inline int pte_present(pte_t pte) 203 { 204 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 205 } 206 207 static inline int pte_none(pte_t pte) 208 { 209 return (pte_val(pte) == 0); 210 } 211 212 static inline int pte_write(pte_t pte) 213 { 214 return pte_val(pte) & _PAGE_WRITE; 215 } 216 217 static inline int pte_exec(pte_t pte) 218 { 219 return pte_val(pte) & _PAGE_EXEC; 220 } 221 222 static inline int pte_huge(pte_t pte) 223 { 224 return pte_present(pte) 225 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); 226 } 227 228 static inline int pte_dirty(pte_t pte) 229 { 230 return pte_val(pte) & _PAGE_DIRTY; 231 } 232 233 static inline int pte_young(pte_t pte) 234 { 235 return pte_val(pte) & _PAGE_ACCESSED; 236 } 237 238 static inline int pte_special(pte_t pte) 239 { 240 return pte_val(pte) & _PAGE_SPECIAL; 241 } 242 243 /* static inline pte_t pte_rdprotect(pte_t pte) */ 244 245 static inline pte_t pte_wrprotect(pte_t pte) 246 { 247 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 248 } 249 250 /* static inline pte_t pte_mkread(pte_t pte) */ 251 252 static inline pte_t pte_mkwrite(pte_t pte) 253 { 254 return __pte(pte_val(pte) | _PAGE_WRITE); 255 } 256 257 /* static inline pte_t pte_mkexec(pte_t pte) */ 258 259 static inline pte_t pte_mkdirty(pte_t pte) 260 { 261 return __pte(pte_val(pte) | _PAGE_DIRTY); 262 } 263 264 static inline pte_t pte_mkclean(pte_t pte) 265 { 266 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 267 } 268 269 static inline pte_t pte_mkyoung(pte_t pte) 270 { 271 return __pte(pte_val(pte) | _PAGE_ACCESSED); 272 } 273 274 static inline pte_t pte_mkold(pte_t pte) 275 { 276 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 277 } 278 279 static inline pte_t pte_mkspecial(pte_t pte) 280 { 281 return __pte(pte_val(pte) | _PAGE_SPECIAL); 282 } 283 284 static inline pte_t pte_mkhuge(pte_t pte) 285 { 286 return pte; 287 } 288 289 /* Modify page protection bits */ 290 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 291 { 292 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 293 } 294 295 #define pgd_ERROR(e) \ 296 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 297 298 299 /* Commit new configuration to MMU hardware */ 300 static inline void update_mmu_cache(struct vm_area_struct *vma, 301 unsigned long address, pte_t *ptep) 302 { 303 /* 304 * The kernel assumes that TLBs don't cache invalid entries, but 305 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 306 * cache flush; it is necessary even after writing invalid entries. 307 * Relying on flush_tlb_fix_spurious_fault would suffice, but 308 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 309 */ 310 local_flush_tlb_page(address); 311 } 312 313 #define __HAVE_ARCH_PTE_SAME 314 static inline int pte_same(pte_t pte_a, pte_t pte_b) 315 { 316 return pte_val(pte_a) == pte_val(pte_b); 317 } 318 319 /* 320 * Certain architectures need to do special things when PTEs within 321 * a page table are directly modified. Thus, the following hook is 322 * made available. 323 */ 324 static inline void set_pte(pte_t *ptep, pte_t pteval) 325 { 326 *ptep = pteval; 327 } 328 329 void flush_icache_pte(pte_t pte); 330 331 static inline void set_pte_at(struct mm_struct *mm, 332 unsigned long addr, pte_t *ptep, pte_t pteval) 333 { 334 if (pte_present(pteval) && pte_exec(pteval)) 335 flush_icache_pte(pteval); 336 337 set_pte(ptep, pteval); 338 } 339 340 static inline void pte_clear(struct mm_struct *mm, 341 unsigned long addr, pte_t *ptep) 342 { 343 set_pte_at(mm, addr, ptep, __pte(0)); 344 } 345 346 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 347 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 348 unsigned long address, pte_t *ptep, 349 pte_t entry, int dirty) 350 { 351 if (!pte_same(*ptep, entry)) 352 set_pte_at(vma->vm_mm, address, ptep, entry); 353 /* 354 * update_mmu_cache will unconditionally execute, handling both 355 * the case that the PTE changed and the spurious fault case. 356 */ 357 return true; 358 } 359 360 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 361 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 362 unsigned long address, pte_t *ptep) 363 { 364 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); 365 } 366 367 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 368 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 369 unsigned long address, 370 pte_t *ptep) 371 { 372 if (!pte_young(*ptep)) 373 return 0; 374 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); 375 } 376 377 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 378 static inline void ptep_set_wrprotect(struct mm_struct *mm, 379 unsigned long address, pte_t *ptep) 380 { 381 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 382 } 383 384 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 385 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 386 unsigned long address, pte_t *ptep) 387 { 388 /* 389 * This comment is borrowed from x86, but applies equally to RISC-V: 390 * 391 * Clearing the accessed bit without a TLB flush 392 * doesn't cause data corruption. [ It could cause incorrect 393 * page aging and the (mistaken) reclaim of hot pages, but the 394 * chance of that should be relatively low. ] 395 * 396 * So as a performance optimization don't flush the TLB when 397 * clearing the accessed bit, it will eventually be flushed by 398 * a context switch or a VM operation anyway. [ In the rare 399 * event of it not getting flushed for a long time the delay 400 * shouldn't really matter because there's no real memory 401 * pressure for swapout to react to. ] 402 */ 403 return ptep_test_and_clear_young(vma, address, ptep); 404 } 405 406 /* 407 * Encode and decode a swap entry 408 * 409 * Format of swap PTE: 410 * bit 0: _PAGE_PRESENT (zero) 411 * bit 1: _PAGE_PROT_NONE (zero) 412 * bits 2 to 6: swap type 413 * bits 7 to XLEN-1: swap offset 414 */ 415 #define __SWP_TYPE_SHIFT 2 416 #define __SWP_TYPE_BITS 5 417 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 418 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 419 420 #define MAX_SWAPFILES_CHECK() \ 421 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 422 423 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 424 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 425 #define __swp_entry(type, offset) ((swp_entry_t) \ 426 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 427 428 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 429 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 430 431 #ifdef CONFIG_FLATMEM 432 #define kern_addr_valid(addr) (1) /* FIXME */ 433 #endif 434 435 extern void *dtb_early_va; 436 extern void setup_bootmem(void); 437 extern void paging_init(void); 438 439 /* 440 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 441 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 442 */ 443 #ifdef CONFIG_64BIT 444 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 445 #else 446 #define TASK_SIZE FIXADDR_START 447 #endif 448 449 #include <asm-generic/pgtable.h> 450 451 #endif /* !__ASSEMBLY__ */ 452 453 #endif /* _ASM_RISCV_PGTABLE_H */ 454