1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_RADIX_H
3 #define _ASM_POWERPC_PGTABLE_RADIX_H
4 
5 #include <asm/asm-const.h>
6 
7 #ifndef __ASSEMBLY__
8 #include <asm/cmpxchg.h>
9 #endif
10 
11 #ifdef CONFIG_PPC_64K_PAGES
12 #include <asm/book3s/64/radix-64k.h>
13 #else
14 #include <asm/book3s/64/radix-4k.h>
15 #endif
16 
17 #ifndef __ASSEMBLY__
18 #include <asm/book3s/64/tlbflush-radix.h>
19 #include <asm/cpu_has_feature.h>
20 #endif
21 
22 /* An empty PTE can still have a R or C writeback */
23 #define RADIX_PTE_NONE_MASK		(_PAGE_DIRTY | _PAGE_ACCESSED)
24 
25 /* Bits to set in a RPMD/RPUD/RPGD */
26 #define RADIX_PMD_VAL_BITS		(0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
27 #define RADIX_PUD_VAL_BITS		(0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
28 #define RADIX_PGD_VAL_BITS		(0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
29 
30 /* Don't have anything in the reserved bits and leaf bits */
31 #define RADIX_PMD_BAD_BITS		0x60000000000000e0UL
32 #define RADIX_PUD_BAD_BITS		0x60000000000000e0UL
33 #define RADIX_P4D_BAD_BITS		0x60000000000000e0UL
34 
35 #define RADIX_PMD_SHIFT		(PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
36 #define RADIX_PUD_SHIFT		(RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
37 #define RADIX_PGD_SHIFT		(RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
38 /*
39  * Size of EA range mapped by our pagetables.
40  */
41 #define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE +	\
42 			      RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
43 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
44 
45 /*
46  * We support 52 bit address space, Use top bit for kernel
47  * virtual mapping. Also make sure kernel fit in the top
48  * quadrant.
49  *
50  *           +------------------+
51  *           +------------------+  Kernel virtual map (0xc008000000000000)
52  *           |                  |
53  *           |                  |
54  *           |                  |
55  * 0b11......+------------------+  Kernel linear map (0xc....)
56  *           |                  |
57  *           |     2 quadrant   |
58  *           |                  |
59  * 0b10......+------------------+
60  *           |                  |
61  *           |    1 quadrant    |
62  *           |                  |
63  * 0b01......+------------------+
64  *           |                  |
65  *           |    0 quadrant    |
66  *           |                  |
67  * 0b00......+------------------+
68  *
69  *
70  * 3rd quadrant expanded:
71  * +------------------------------+
72  * |                              |
73  * |                              |
74  * |                              |
75  * +------------------------------+  Kernel vmemmap end (0xc010000000000000)
76  * |                              |
77  * |           512TB		  |
78  * |                              |
79  * +------------------------------+  Kernel IO map end/vmemap start
80  * |                              |
81  * |           512TB		  |
82  * |                              |
83  * +------------------------------+  Kernel vmap end/ IO map start
84  * |                              |
85  * |           512TB		  |
86  * |                              |
87  * +------------------------------+  Kernel virt start (0xc008000000000000)
88  * |                              |
89  * |                              |
90  * |                              |
91  * +------------------------------+  Kernel linear (0xc.....)
92  */
93 
94 
95 /*
96  * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
97  * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
98  * page_to_nid does a page->section->node lookup
99  * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
100  * memory requirements with large number of sections.
101  * 51 bits is the max physical real address on POWER9
102  */
103 
104 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
105 #define R_MAX_PHYSMEM_BITS	51
106 #else
107 #define R_MAX_PHYSMEM_BITS	46
108 #endif
109 
110 #define RADIX_KERN_VIRT_START	ASM_CONST(0xc008000000000000)
111 /*
112  * 49 =  MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick
113  * the same value as hash.
114  */
115 #define RADIX_KERN_MAP_SIZE	(1UL << 49)
116 
117 #define RADIX_VMALLOC_START	RADIX_KERN_VIRT_START
118 #define RADIX_VMALLOC_SIZE	RADIX_KERN_MAP_SIZE
119 #define RADIX_VMALLOC_END	(RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
120 
121 #define RADIX_KERN_IO_START	RADIX_VMALLOC_END
122 #define RADIX_KERN_IO_SIZE	RADIX_KERN_MAP_SIZE
123 #define RADIX_KERN_IO_END	(RADIX_KERN_IO_START + RADIX_KERN_IO_SIZE)
124 
125 #define RADIX_VMEMMAP_START	RADIX_KERN_IO_END
126 #define RADIX_VMEMMAP_SIZE	RADIX_KERN_MAP_SIZE
127 #define RADIX_VMEMMAP_END	(RADIX_VMEMMAP_START + RADIX_VMEMMAP_SIZE)
128 
129 #ifndef __ASSEMBLY__
130 #define RADIX_PTE_TABLE_SIZE	(sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
131 #define RADIX_PMD_TABLE_SIZE	(sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
132 #define RADIX_PUD_TABLE_SIZE	(sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
133 #define RADIX_PGD_TABLE_SIZE	(sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
134 
135 #ifdef CONFIG_STRICT_KERNEL_RWX
136 extern void radix__mark_rodata_ro(void);
137 extern void radix__mark_initmem_nx(void);
138 #endif
139 
140 extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
141 					 pte_t entry, unsigned long address,
142 					 int psize);
143 
144 extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
145 					   unsigned long addr, pte_t *ptep,
146 					   pte_t old_pte, pte_t pte);
147 
148 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
149 					       unsigned long set)
150 {
151 	__be64 old_be, tmp_be;
152 
153 	__asm__ __volatile__(
154 	"1:	ldarx	%0,0,%3		# pte_update\n"
155 	"	andc	%1,%0,%5	\n"
156 	"	or	%1,%1,%4	\n"
157 	"	stdcx.	%1,0,%3		\n"
158 	"	bne-	1b"
159 	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
160 	: "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr))
161 	: "cc" );
162 
163 	return be64_to_cpu(old_be);
164 }
165 
166 static inline unsigned long radix__pte_update(struct mm_struct *mm,
167 					unsigned long addr,
168 					pte_t *ptep, unsigned long clr,
169 					unsigned long set,
170 					int huge)
171 {
172 	unsigned long old_pte;
173 
174 	old_pte = __radix_pte_update(ptep, clr, set);
175 	if (!huge)
176 		assert_pte_locked(mm, addr);
177 
178 	return old_pte;
179 }
180 
181 static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
182 						   unsigned long addr,
183 						   pte_t *ptep, int full)
184 {
185 	unsigned long old_pte;
186 
187 	if (full) {
188 		old_pte = pte_val(*ptep);
189 		*ptep = __pte(0);
190 	} else
191 		old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
192 
193 	return __pte(old_pte);
194 }
195 
196 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
197 {
198 	return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
199 }
200 
201 static inline int radix__pte_none(pte_t pte)
202 {
203 	return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
204 }
205 
206 static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
207 				 pte_t *ptep, pte_t pte, int percpu)
208 {
209 	*ptep = pte;
210 
211 	/*
212 	 * The architecture suggests a ptesync after setting the pte, which
213 	 * orders the store that updates the pte with subsequent page table
214 	 * walk accesses which may load the pte. Without this it may be
215 	 * possible for a subsequent access to result in spurious fault.
216 	 *
217 	 * This is not necessary for correctness, because a spurious fault
218 	 * is tolerated by the page fault handler, and this store will
219 	 * eventually be seen. In testing, there was no noticable increase
220 	 * in user faults on POWER9. Avoiding ptesync here is a significant
221 	 * win for things like fork. If a future microarchitecture benefits
222 	 * from ptesync, it should probably go into update_mmu_cache, rather
223 	 * than set_pte_at (which is used to set ptes unrelated to faults).
224 	 *
225 	 * Spurious faults from the kernel memory are not tolerated, so there
226 	 * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
227 	 * the pte update sequence from ISA Book III 6.10 Translation Table
228 	 * Update Synchronization Requirements.
229 	 */
230 }
231 
232 static inline int radix__pmd_bad(pmd_t pmd)
233 {
234 	return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
235 }
236 
237 static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
238 {
239 	return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
240 }
241 
242 static inline int radix__pud_bad(pud_t pud)
243 {
244 	return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
245 }
246 
247 
248 static inline int radix__p4d_bad(p4d_t p4d)
249 {
250 	return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
251 }
252 
253 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
254 
255 static inline int radix__pmd_trans_huge(pmd_t pmd)
256 {
257 	return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
258 }
259 
260 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
261 {
262 	return __pmd(pmd_val(pmd) | _PAGE_PTE);
263 }
264 
265 extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
266 					  pmd_t *pmdp, unsigned long clr,
267 					  unsigned long set);
268 extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
269 				  unsigned long address, pmd_t *pmdp);
270 extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
271 					pgtable_t pgtable);
272 extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
273 extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
274 				      unsigned long addr, pmd_t *pmdp);
275 static inline int radix__has_transparent_hugepage(void)
276 {
277 	/* For radix 2M at PMD level means thp */
278 	if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
279 		return 1;
280 	return 0;
281 }
282 #endif
283 
284 static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
285 {
286 	return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
287 }
288 
289 extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
290 					     unsigned long page_size,
291 					     unsigned long phys);
292 extern void radix__vmemmap_remove_mapping(unsigned long start,
293 				    unsigned long page_size);
294 
295 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
296 				 pgprot_t flags, unsigned int psz);
297 
298 static inline unsigned long radix__get_tree_size(void)
299 {
300 	unsigned long rts_field;
301 	/*
302 	 * We support 52 bits, hence:
303 	 * bits 52 - 31 = 21, 0b10101
304 	 * RTS encoding details
305 	 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
306 	 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
307 	 */
308 	rts_field = (0x5UL << 5); /* 6 - 8 bits */
309 	rts_field |= (0x2UL << 61);
310 
311 	return rts_field;
312 }
313 
314 #ifdef CONFIG_MEMORY_HOTPLUG
315 int radix__create_section_mapping(unsigned long start, unsigned long end,
316 				  int nid, pgprot_t prot);
317 int radix__remove_section_mapping(unsigned long start, unsigned long end);
318 #endif /* CONFIG_MEMORY_HOTPLUG */
319 
320 void radix__kernel_map_pages(struct page *page, int numpages, int enable);
321 
322 #endif /* __ASSEMBLY__ */
323 #endif
324