1 /* 2 * Copyright (C) 2012 Regents of the University of California 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #ifndef _ASM_RISCV_PGTABLE_H 15 #define _ASM_RISCV_PGTABLE_H 16 17 #include <linux/mmzone.h> 18 19 #include <asm/pgtable-bits.h> 20 21 #ifndef __ASSEMBLY__ 22 23 #ifdef CONFIG_MMU 24 25 /* Page Upper Directory not used in RISC-V */ 26 #include <asm-generic/pgtable-nopud.h> 27 #include <asm/page.h> 28 #include <asm/tlbflush.h> 29 #include <linux/mm_types.h> 30 31 #ifdef CONFIG_64BIT 32 #include <asm/pgtable-64.h> 33 #else 34 #include <asm/pgtable-32.h> 35 #endif /* CONFIG_64BIT */ 36 37 /* Number of entries in the page global directory */ 38 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 39 /* Number of entries in the page table */ 40 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 41 42 /* Number of PGD entries that a user-mode program can use */ 43 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 44 #define FIRST_USER_ADDRESS 0 45 46 /* Page protection bits */ 47 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 48 49 #define PAGE_NONE __pgprot(0) 50 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 51 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 52 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 53 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 54 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 55 _PAGE_EXEC | _PAGE_WRITE) 56 57 #define PAGE_COPY PAGE_READ 58 #define PAGE_COPY_EXEC PAGE_EXEC 59 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC 60 #define PAGE_SHARED PAGE_WRITE 61 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 62 63 #define _PAGE_KERNEL (_PAGE_READ \ 64 | _PAGE_WRITE \ 65 | _PAGE_PRESENT \ 66 | _PAGE_ACCESSED \ 67 | _PAGE_DIRTY) 68 69 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 70 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 71 72 extern pgd_t swapper_pg_dir[]; 73 74 /* MAP_PRIVATE permissions: xwr (copy-on-write) */ 75 #define __P000 PAGE_NONE 76 #define __P001 PAGE_READ 77 #define __P010 PAGE_COPY 78 #define __P011 PAGE_COPY 79 #define __P100 PAGE_EXEC 80 #define __P101 PAGE_READ_EXEC 81 #define __P110 PAGE_COPY_EXEC 82 #define __P111 PAGE_COPY_READ_EXEC 83 84 /* MAP_SHARED permissions: xwr */ 85 #define __S000 PAGE_NONE 86 #define __S001 PAGE_READ 87 #define __S010 PAGE_SHARED 88 #define __S011 PAGE_SHARED 89 #define __S100 PAGE_EXEC 90 #define __S101 PAGE_READ_EXEC 91 #define __S110 PAGE_SHARED_EXEC 92 #define __S111 PAGE_SHARED_EXEC 93 94 /* 95 * ZERO_PAGE is a global shared page that is always zero, 96 * used for zero-mapped memory areas, etc. 97 */ 98 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 99 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 100 101 static inline int pmd_present(pmd_t pmd) 102 { 103 return (pmd_val(pmd) & _PAGE_PRESENT); 104 } 105 106 static inline int pmd_none(pmd_t pmd) 107 { 108 return (pmd_val(pmd) == 0); 109 } 110 111 static inline int pmd_bad(pmd_t pmd) 112 { 113 return !pmd_present(pmd); 114 } 115 116 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 117 { 118 *pmdp = pmd; 119 } 120 121 static inline void pmd_clear(pmd_t *pmdp) 122 { 123 set_pmd(pmdp, __pmd(0)); 124 } 125 126 127 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 128 { 129 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); 130 } 131 132 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 133 134 /* Locate an entry in the page global directory */ 135 static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr) 136 { 137 return mm->pgd + pgd_index(addr); 138 } 139 /* Locate an entry in the kernel page global directory */ 140 #define pgd_offset_k(addr) pgd_offset(&init_mm, (addr)) 141 142 static inline struct page *pmd_page(pmd_t pmd) 143 { 144 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT); 145 } 146 147 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 148 { 149 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); 150 } 151 152 /* Yields the page frame number (PFN) of a page table entry */ 153 static inline unsigned long pte_pfn(pte_t pte) 154 { 155 return (pte_val(pte) >> _PAGE_PFN_SHIFT); 156 } 157 158 #define pte_page(x) pfn_to_page(pte_pfn(x)) 159 160 /* Constructs a page table entry */ 161 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 162 { 163 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); 164 } 165 166 static inline pte_t mk_pte(struct page *page, pgprot_t prot) 167 { 168 return pfn_pte(page_to_pfn(page), prot); 169 } 170 171 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 172 173 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) 174 { 175 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); 176 } 177 178 #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) 179 #define pte_unmap(pte) ((void)(pte)) 180 181 /* 182 * Certain architectures need to do special things when PTEs within 183 * a page table are directly modified. Thus, the following hook is 184 * made available. 185 */ 186 static inline void set_pte(pte_t *ptep, pte_t pteval) 187 { 188 *ptep = pteval; 189 } 190 191 static inline void set_pte_at(struct mm_struct *mm, 192 unsigned long addr, pte_t *ptep, pte_t pteval) 193 { 194 set_pte(ptep, pteval); 195 } 196 197 static inline void pte_clear(struct mm_struct *mm, 198 unsigned long addr, pte_t *ptep) 199 { 200 set_pte_at(mm, addr, ptep, __pte(0)); 201 } 202 203 static inline int pte_present(pte_t pte) 204 { 205 return (pte_val(pte) & _PAGE_PRESENT); 206 } 207 208 static inline int pte_none(pte_t pte) 209 { 210 return (pte_val(pte) == 0); 211 } 212 213 /* static inline int pte_read(pte_t pte) */ 214 215 static inline int pte_write(pte_t pte) 216 { 217 return pte_val(pte) & _PAGE_WRITE; 218 } 219 220 static inline int pte_huge(pte_t pte) 221 { 222 return pte_present(pte) 223 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); 224 } 225 226 /* static inline int pte_exec(pte_t pte) */ 227 228 static inline int pte_dirty(pte_t pte) 229 { 230 return pte_val(pte) & _PAGE_DIRTY; 231 } 232 233 static inline int pte_young(pte_t pte) 234 { 235 return pte_val(pte) & _PAGE_ACCESSED; 236 } 237 238 static inline int pte_special(pte_t pte) 239 { 240 return pte_val(pte) & _PAGE_SPECIAL; 241 } 242 243 /* static inline pte_t pte_rdprotect(pte_t pte) */ 244 245 static inline pte_t pte_wrprotect(pte_t pte) 246 { 247 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 248 } 249 250 /* static inline pte_t pte_mkread(pte_t pte) */ 251 252 static inline pte_t pte_mkwrite(pte_t pte) 253 { 254 return __pte(pte_val(pte) | _PAGE_WRITE); 255 } 256 257 /* static inline pte_t pte_mkexec(pte_t pte) */ 258 259 static inline pte_t pte_mkdirty(pte_t pte) 260 { 261 return __pte(pte_val(pte) | _PAGE_DIRTY); 262 } 263 264 static inline pte_t pte_mkclean(pte_t pte) 265 { 266 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 267 } 268 269 static inline pte_t pte_mkyoung(pte_t pte) 270 { 271 return __pte(pte_val(pte) | _PAGE_ACCESSED); 272 } 273 274 static inline pte_t pte_mkold(pte_t pte) 275 { 276 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 277 } 278 279 static inline pte_t pte_mkspecial(pte_t pte) 280 { 281 return __pte(pte_val(pte) | _PAGE_SPECIAL); 282 } 283 284 /* Modify page protection bits */ 285 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 286 { 287 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 288 } 289 290 #define pgd_ERROR(e) \ 291 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 292 293 294 /* Commit new configuration to MMU hardware */ 295 static inline void update_mmu_cache(struct vm_area_struct *vma, 296 unsigned long address, pte_t *ptep) 297 { 298 /* 299 * The kernel assumes that TLBs don't cache invalid entries, but 300 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 301 * cache flush; it is necessary even after writing invalid entries. 302 * Relying on flush_tlb_fix_spurious_fault would suffice, but 303 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 304 */ 305 local_flush_tlb_page(address); 306 } 307 308 #define __HAVE_ARCH_PTE_SAME 309 static inline int pte_same(pte_t pte_a, pte_t pte_b) 310 { 311 return pte_val(pte_a) == pte_val(pte_b); 312 } 313 314 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 315 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 316 unsigned long address, pte_t *ptep, 317 pte_t entry, int dirty) 318 { 319 if (!pte_same(*ptep, entry)) 320 set_pte_at(vma->vm_mm, address, ptep, entry); 321 /* 322 * update_mmu_cache will unconditionally execute, handling both 323 * the case that the PTE changed and the spurious fault case. 324 */ 325 return true; 326 } 327 328 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 329 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 330 unsigned long address, pte_t *ptep) 331 { 332 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); 333 } 334 335 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 336 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 337 unsigned long address, 338 pte_t *ptep) 339 { 340 if (!pte_young(*ptep)) 341 return 0; 342 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); 343 } 344 345 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 346 static inline void ptep_set_wrprotect(struct mm_struct *mm, 347 unsigned long address, pte_t *ptep) 348 { 349 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 350 } 351 352 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 353 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 354 unsigned long address, pte_t *ptep) 355 { 356 /* 357 * This comment is borrowed from x86, but applies equally to RISC-V: 358 * 359 * Clearing the accessed bit without a TLB flush 360 * doesn't cause data corruption. [ It could cause incorrect 361 * page aging and the (mistaken) reclaim of hot pages, but the 362 * chance of that should be relatively low. ] 363 * 364 * So as a performance optimization don't flush the TLB when 365 * clearing the accessed bit, it will eventually be flushed by 366 * a context switch or a VM operation anyway. [ In the rare 367 * event of it not getting flushed for a long time the delay 368 * shouldn't really matter because there's no real memory 369 * pressure for swapout to react to. ] 370 */ 371 return ptep_test_and_clear_young(vma, address, ptep); 372 } 373 374 /* 375 * Encode and decode a swap entry 376 * 377 * Format of swap PTE: 378 * bit 0: _PAGE_PRESENT (zero) 379 * bit 1: reserved for future use (zero) 380 * bits 2 to 6: swap type 381 * bits 7 to XLEN-1: swap offset 382 */ 383 #define __SWP_TYPE_SHIFT 2 384 #define __SWP_TYPE_BITS 5 385 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 386 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 387 388 #define MAX_SWAPFILES_CHECK() \ 389 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 390 391 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 392 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 393 #define __swp_entry(type, offset) ((swp_entry_t) \ 394 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 395 396 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 397 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 398 399 #ifdef CONFIG_FLATMEM 400 #define kern_addr_valid(addr) (1) /* FIXME */ 401 #endif 402 403 extern void paging_init(void); 404 405 static inline void pgtable_cache_init(void) 406 { 407 /* No page table caches to initialize */ 408 } 409 410 #endif /* CONFIG_MMU */ 411 412 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 413 #define VMALLOC_END (PAGE_OFFSET - 1) 414 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 415 416 /* 417 * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32. 418 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 419 */ 420 #ifdef CONFIG_64BIT 421 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 422 #else 423 #define TASK_SIZE VMALLOC_START 424 #endif 425 426 #include <asm-generic/pgtable.h> 427 428 #endif /* !__ASSEMBLY__ */ 429 430 #endif /* _ASM_RISCV_PGTABLE_H */ 431