xref: /openbmc/linux/arch/riscv/include/asm/pgtable.h (revision 65a0d3c1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8 
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11 
12 #include <asm/pgtable-bits.h>
13 
14 #ifndef CONFIG_MMU
15 #define KERNEL_LINK_ADDR	PAGE_OFFSET
16 #else
17 
18 #define ADDRESS_SPACE_END	(UL(-1))
19 
20 #ifdef CONFIG_64BIT
21 /* Leave 2GB for kernel and BPF at the end of the address space */
22 #define KERNEL_LINK_ADDR	(ADDRESS_SPACE_END - SZ_2G + 1)
23 #else
24 #define KERNEL_LINK_ADDR	PAGE_OFFSET
25 #endif
26 
27 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
28 #define VMALLOC_END      (PAGE_OFFSET - 1)
29 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
30 
31 #define BPF_JIT_REGION_SIZE	(SZ_128M)
32 #ifdef CONFIG_64BIT
33 #define BPF_JIT_REGION_START	(BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
34 #define BPF_JIT_REGION_END	(MODULES_END)
35 #else
36 #define BPF_JIT_REGION_START	(PAGE_OFFSET - BPF_JIT_REGION_SIZE)
37 #define BPF_JIT_REGION_END	(VMALLOC_END)
38 #endif
39 
40 /* Modules always live before the kernel */
41 #ifdef CONFIG_64BIT
42 #define MODULES_VADDR	(PFN_ALIGN((unsigned long)&_end) - SZ_2G)
43 #define MODULES_END	(PFN_ALIGN((unsigned long)&_start))
44 #endif
45 
46 /*
47  * Roughly size the vmemmap space to be large enough to fit enough
48  * struct pages to map half the virtual address space. Then
49  * position vmemmap directly below the VMALLOC region.
50  */
51 #define VMEMMAP_SHIFT \
52 	(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
53 #define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
54 #define VMEMMAP_END	(VMALLOC_START - 1)
55 #define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)
56 
57 /*
58  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
59  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
60  */
61 #define vmemmap		((struct page *)VMEMMAP_START)
62 
63 #define PCI_IO_SIZE      SZ_16M
64 #define PCI_IO_END       VMEMMAP_START
65 #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
66 
67 #define FIXADDR_TOP      PCI_IO_START
68 #ifdef CONFIG_64BIT
69 #define FIXADDR_SIZE     PMD_SIZE
70 #else
71 #define FIXADDR_SIZE     PGDIR_SIZE
72 #endif
73 #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
74 
75 #endif
76 
77 #ifdef CONFIG_XIP_KERNEL
78 #define XIP_OFFSET		SZ_8M
79 #endif
80 
81 #ifndef __ASSEMBLY__
82 
83 /* Page Upper Directory not used in RISC-V */
84 #include <asm-generic/pgtable-nopud.h>
85 #include <asm/page.h>
86 #include <asm/tlbflush.h>
87 #include <linux/mm_types.h>
88 
89 #ifdef CONFIG_64BIT
90 #include <asm/pgtable-64.h>
91 #else
92 #include <asm/pgtable-32.h>
93 #endif /* CONFIG_64BIT */
94 
95 #ifdef CONFIG_XIP_KERNEL
96 #define XIP_FIXUP(addr) ({							\
97 	uintptr_t __a = (uintptr_t)(addr);					\
98 	(__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ?	\
99 		__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
100 		__a;								\
101 	})
102 #else
103 #define XIP_FIXUP(addr)		(addr)
104 #endif /* CONFIG_XIP_KERNEL */
105 
106 #ifdef CONFIG_MMU
107 /* Number of entries in the page global directory */
108 #define PTRS_PER_PGD    (PAGE_SIZE / sizeof(pgd_t))
109 /* Number of entries in the page table */
110 #define PTRS_PER_PTE    (PAGE_SIZE / sizeof(pte_t))
111 
112 /* Number of PGD entries that a user-mode program can use */
113 #define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)
114 
115 /* Page protection bits */
116 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
117 
118 #define PAGE_NONE		__pgprot(_PAGE_PROT_NONE)
119 #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
120 #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
121 #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
122 #define PAGE_READ_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
123 #define PAGE_WRITE_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ |	\
124 					 _PAGE_EXEC | _PAGE_WRITE)
125 
126 #define PAGE_COPY		PAGE_READ
127 #define PAGE_COPY_EXEC		PAGE_EXEC
128 #define PAGE_COPY_READ_EXEC	PAGE_READ_EXEC
129 #define PAGE_SHARED		PAGE_WRITE
130 #define PAGE_SHARED_EXEC	PAGE_WRITE_EXEC
131 
132 #define _PAGE_KERNEL		(_PAGE_READ \
133 				| _PAGE_WRITE \
134 				| _PAGE_PRESENT \
135 				| _PAGE_ACCESSED \
136 				| _PAGE_DIRTY)
137 
138 #define PAGE_KERNEL		__pgprot(_PAGE_KERNEL)
139 #define PAGE_KERNEL_READ	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
140 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL | _PAGE_EXEC)
141 #define PAGE_KERNEL_READ_EXEC	__pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
142 					 | _PAGE_EXEC)
143 
144 #define PAGE_TABLE		__pgprot(_PAGE_TABLE)
145 
146 /*
147  * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
148  * change the properties of memory regions.
149  */
150 #define _PAGE_IOREMAP _PAGE_KERNEL
151 
152 extern pgd_t swapper_pg_dir[];
153 
154 /* MAP_PRIVATE permissions: xwr (copy-on-write) */
155 #define __P000	PAGE_NONE
156 #define __P001	PAGE_READ
157 #define __P010	PAGE_COPY
158 #define __P011	PAGE_COPY
159 #define __P100	PAGE_EXEC
160 #define __P101	PAGE_READ_EXEC
161 #define __P110	PAGE_COPY_EXEC
162 #define __P111	PAGE_COPY_READ_EXEC
163 
164 /* MAP_SHARED permissions: xwr */
165 #define __S000	PAGE_NONE
166 #define __S001	PAGE_READ
167 #define __S010	PAGE_SHARED
168 #define __S011	PAGE_SHARED
169 #define __S100	PAGE_EXEC
170 #define __S101	PAGE_READ_EXEC
171 #define __S110	PAGE_SHARED_EXEC
172 #define __S111	PAGE_SHARED_EXEC
173 
174 static inline int pmd_present(pmd_t pmd)
175 {
176 	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
177 }
178 
179 static inline int pmd_none(pmd_t pmd)
180 {
181 	return (pmd_val(pmd) == 0);
182 }
183 
184 static inline int pmd_bad(pmd_t pmd)
185 {
186 	return !pmd_present(pmd);
187 }
188 
189 #define pmd_leaf	pmd_leaf
190 static inline int pmd_leaf(pmd_t pmd)
191 {
192 	return pmd_present(pmd) &&
193 	       (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
194 }
195 
196 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
197 {
198 	*pmdp = pmd;
199 }
200 
201 static inline void pmd_clear(pmd_t *pmdp)
202 {
203 	set_pmd(pmdp, __pmd(0));
204 }
205 
206 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
207 {
208 	return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
209 }
210 
211 static inline unsigned long _pgd_pfn(pgd_t pgd)
212 {
213 	return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
214 }
215 
216 static inline struct page *pmd_page(pmd_t pmd)
217 {
218 	return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
219 }
220 
221 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
222 {
223 	return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
224 }
225 
226 static inline pte_t pmd_pte(pmd_t pmd)
227 {
228 	return __pte(pmd_val(pmd));
229 }
230 
231 /* Yields the page frame number (PFN) of a page table entry */
232 static inline unsigned long pte_pfn(pte_t pte)
233 {
234 	return (pte_val(pte) >> _PAGE_PFN_SHIFT);
235 }
236 
237 #define pte_page(x)     pfn_to_page(pte_pfn(x))
238 
239 /* Constructs a page table entry */
240 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
241 {
242 	return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
243 }
244 
245 #define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
246 
247 static inline int pte_present(pte_t pte)
248 {
249 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
250 }
251 
252 static inline int pte_none(pte_t pte)
253 {
254 	return (pte_val(pte) == 0);
255 }
256 
257 static inline int pte_write(pte_t pte)
258 {
259 	return pte_val(pte) & _PAGE_WRITE;
260 }
261 
262 static inline int pte_exec(pte_t pte)
263 {
264 	return pte_val(pte) & _PAGE_EXEC;
265 }
266 
267 static inline int pte_huge(pte_t pte)
268 {
269 	return pte_present(pte)
270 		&& (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
271 }
272 
273 static inline int pte_dirty(pte_t pte)
274 {
275 	return pte_val(pte) & _PAGE_DIRTY;
276 }
277 
278 static inline int pte_young(pte_t pte)
279 {
280 	return pte_val(pte) & _PAGE_ACCESSED;
281 }
282 
283 static inline int pte_special(pte_t pte)
284 {
285 	return pte_val(pte) & _PAGE_SPECIAL;
286 }
287 
288 /* static inline pte_t pte_rdprotect(pte_t pte) */
289 
290 static inline pte_t pte_wrprotect(pte_t pte)
291 {
292 	return __pte(pte_val(pte) & ~(_PAGE_WRITE));
293 }
294 
295 /* static inline pte_t pte_mkread(pte_t pte) */
296 
297 static inline pte_t pte_mkwrite(pte_t pte)
298 {
299 	return __pte(pte_val(pte) | _PAGE_WRITE);
300 }
301 
302 /* static inline pte_t pte_mkexec(pte_t pte) */
303 
304 static inline pte_t pte_mkdirty(pte_t pte)
305 {
306 	return __pte(pte_val(pte) | _PAGE_DIRTY);
307 }
308 
309 static inline pte_t pte_mkclean(pte_t pte)
310 {
311 	return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
312 }
313 
314 static inline pte_t pte_mkyoung(pte_t pte)
315 {
316 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
317 }
318 
319 static inline pte_t pte_mkold(pte_t pte)
320 {
321 	return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
322 }
323 
324 static inline pte_t pte_mkspecial(pte_t pte)
325 {
326 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
327 }
328 
329 static inline pte_t pte_mkhuge(pte_t pte)
330 {
331 	return pte;
332 }
333 
334 #ifdef CONFIG_NUMA_BALANCING
335 /*
336  * See the comment in include/asm-generic/pgtable.h
337  */
338 static inline int pte_protnone(pte_t pte)
339 {
340 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
341 }
342 
343 static inline int pmd_protnone(pmd_t pmd)
344 {
345 	return pte_protnone(pmd_pte(pmd));
346 }
347 #endif
348 
349 /* Modify page protection bits */
350 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
351 {
352 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
353 }
354 
355 #define pgd_ERROR(e) \
356 	pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
357 
358 
359 /* Commit new configuration to MMU hardware */
360 static inline void update_mmu_cache(struct vm_area_struct *vma,
361 	unsigned long address, pte_t *ptep)
362 {
363 	/*
364 	 * The kernel assumes that TLBs don't cache invalid entries, but
365 	 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
366 	 * cache flush; it is necessary even after writing invalid entries.
367 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
368 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
369 	 */
370 	local_flush_tlb_page(address);
371 }
372 
373 #define __HAVE_ARCH_PTE_SAME
374 static inline int pte_same(pte_t pte_a, pte_t pte_b)
375 {
376 	return pte_val(pte_a) == pte_val(pte_b);
377 }
378 
379 /*
380  * Certain architectures need to do special things when PTEs within
381  * a page table are directly modified.  Thus, the following hook is
382  * made available.
383  */
384 static inline void set_pte(pte_t *ptep, pte_t pteval)
385 {
386 	*ptep = pteval;
387 }
388 
389 void flush_icache_pte(pte_t pte);
390 
391 static inline void set_pte_at(struct mm_struct *mm,
392 	unsigned long addr, pte_t *ptep, pte_t pteval)
393 {
394 	if (pte_present(pteval) && pte_exec(pteval))
395 		flush_icache_pte(pteval);
396 
397 	set_pte(ptep, pteval);
398 }
399 
400 static inline void pte_clear(struct mm_struct *mm,
401 	unsigned long addr, pte_t *ptep)
402 {
403 	set_pte_at(mm, addr, ptep, __pte(0));
404 }
405 
406 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
407 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
408 					unsigned long address, pte_t *ptep,
409 					pte_t entry, int dirty)
410 {
411 	if (!pte_same(*ptep, entry))
412 		set_pte_at(vma->vm_mm, address, ptep, entry);
413 	/*
414 	 * update_mmu_cache will unconditionally execute, handling both
415 	 * the case that the PTE changed and the spurious fault case.
416 	 */
417 	return true;
418 }
419 
420 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
421 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
422 				       unsigned long address, pte_t *ptep)
423 {
424 	return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
425 }
426 
427 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
428 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
429 					    unsigned long address,
430 					    pte_t *ptep)
431 {
432 	if (!pte_young(*ptep))
433 		return 0;
434 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
435 }
436 
437 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
438 static inline void ptep_set_wrprotect(struct mm_struct *mm,
439 				      unsigned long address, pte_t *ptep)
440 {
441 	atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
442 }
443 
444 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
445 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
446 					 unsigned long address, pte_t *ptep)
447 {
448 	/*
449 	 * This comment is borrowed from x86, but applies equally to RISC-V:
450 	 *
451 	 * Clearing the accessed bit without a TLB flush
452 	 * doesn't cause data corruption. [ It could cause incorrect
453 	 * page aging and the (mistaken) reclaim of hot pages, but the
454 	 * chance of that should be relatively low. ]
455 	 *
456 	 * So as a performance optimization don't flush the TLB when
457 	 * clearing the accessed bit, it will eventually be flushed by
458 	 * a context switch or a VM operation anyway. [ In the rare
459 	 * event of it not getting flushed for a long time the delay
460 	 * shouldn't really matter because there's no real memory
461 	 * pressure for swapout to react to. ]
462 	 */
463 	return ptep_test_and_clear_young(vma, address, ptep);
464 }
465 
466 /*
467  * Encode and decode a swap entry
468  *
469  * Format of swap PTE:
470  *	bit            0:	_PAGE_PRESENT (zero)
471  *	bit            1:	_PAGE_PROT_NONE (zero)
472  *	bits      2 to 6:	swap type
473  *	bits 7 to XLEN-1:	swap offset
474  */
475 #define __SWP_TYPE_SHIFT	2
476 #define __SWP_TYPE_BITS		5
477 #define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
478 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
479 
480 #define MAX_SWAPFILES_CHECK()	\
481 	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
482 
483 #define __swp_type(x)	(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
484 #define __swp_offset(x)	((x).val >> __SWP_OFFSET_SHIFT)
485 #define __swp_entry(type, offset) ((swp_entry_t) \
486 	{ ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
487 
488 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
489 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
490 
491 /*
492  * In the RV64 Linux scheme, we give the user half of the virtual-address space
493  * and give the kernel the other (upper) half.
494  */
495 #ifdef CONFIG_64BIT
496 #define KERN_VIRT_START	(-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
497 #else
498 #define KERN_VIRT_START	FIXADDR_START
499 #endif
500 
501 /*
502  * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
503  * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
504  */
505 #ifdef CONFIG_64BIT
506 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
507 #else
508 #define TASK_SIZE FIXADDR_START
509 #endif
510 
511 #else /* CONFIG_MMU */
512 
513 #define PAGE_SHARED		__pgprot(0)
514 #define PAGE_KERNEL		__pgprot(0)
515 #define swapper_pg_dir		NULL
516 #define TASK_SIZE		0xffffffffUL
517 #define VMALLOC_START		0
518 #define VMALLOC_END		TASK_SIZE
519 
520 #endif /* !CONFIG_MMU */
521 
522 #define kern_addr_valid(addr)   (1) /* FIXME */
523 
524 extern char _start[];
525 extern void *_dtb_early_va;
526 extern uintptr_t _dtb_early_pa;
527 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
528 #define dtb_early_va	(*(void **)XIP_FIXUP(&_dtb_early_va))
529 #define dtb_early_pa	(*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
530 #else
531 #define dtb_early_va	_dtb_early_va
532 #define dtb_early_pa	_dtb_early_pa
533 #endif /* CONFIG_XIP_KERNEL */
534 
535 void setup_bootmem(void);
536 void paging_init(void);
537 void misc_mem_init(void);
538 
539 /*
540  * ZERO_PAGE is a global shared page that is always zero,
541  * used for zero-mapped memory areas, etc.
542  */
543 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
544 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
545 
546 #endif /* !__ASSEMBLY__ */
547 
548 #endif /* _ASM_RISCV_PGTABLE_H */
549