xref: /openbmc/linux/arch/arm64/include/asm/memory.h (revision f125e2d4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/memory.h
4  *
5  * Copyright (C) 2000-2002 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * Note: this file should not be included by non-asm/.h files
9  */
10 #ifndef __ASM_MEMORY_H
11 #define __ASM_MEMORY_H
12 
13 #include <linux/compiler.h>
14 #include <linux/const.h>
15 #include <linux/sizes.h>
16 #include <linux/types.h>
17 #include <asm/bug.h>
18 #include <asm/page-def.h>
19 
20 /*
21  * Size of the PCI I/O space. This must remain a power of two so that
22  * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
23  */
24 #define PCI_IO_SIZE		SZ_16M
25 
26 /*
27  * VMEMMAP_SIZE - allows the whole linear region to be covered by
28  *                a struct page array
29  *
30  * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
31  * needs to cover the memory region from the beginning of the 52-bit
32  * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
33  * keep a constant PAGE_OFFSET and "fallback" to using the higher end
34  * of the VMEMMAP where 52-bit support is not available in hardware.
35  */
36 #define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
37 			>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
38 
39 /*
40  * PAGE_OFFSET - the virtual address of the start of the linear map, at the
41  *               start of the TTBR1 address space.
42  * PAGE_END - the end of the linear map, where all other kernel mappings begin.
43  * KIMAGE_VADDR - the virtual address of the start of the kernel image.
44  * VA_BITS - the maximum number of bits for virtual addresses.
45  */
46 #define VA_BITS			(CONFIG_ARM64_VA_BITS)
47 #define _PAGE_OFFSET(va)	(-(UL(1) << (va)))
48 #define PAGE_OFFSET		(_PAGE_OFFSET(VA_BITS))
49 #define KIMAGE_VADDR		(MODULES_END)
50 #define BPF_JIT_REGION_START	(KASAN_SHADOW_END)
51 #define BPF_JIT_REGION_SIZE	(SZ_128M)
52 #define BPF_JIT_REGION_END	(BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
53 #define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
54 #define MODULES_VADDR		(BPF_JIT_REGION_END)
55 #define MODULES_VSIZE		(SZ_128M)
56 #define VMEMMAP_START		(-VMEMMAP_SIZE - SZ_2M)
57 #define PCI_IO_END		(VMEMMAP_START - SZ_2M)
58 #define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
59 #define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
60 
61 #if VA_BITS > 48
62 #define VA_BITS_MIN		(48)
63 #else
64 #define VA_BITS_MIN		(VA_BITS)
65 #endif
66 
67 #define _PAGE_END(va)		(-(UL(1) << ((va) - 1)))
68 
69 #define KERNEL_START		_text
70 #define KERNEL_END		_end
71 
72 /*
73  * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
74  * address space for the shadow region respectively. They can bloat the stack
75  * significantly, so double the (minimum) stack size when they are in use.
76  */
77 #ifdef CONFIG_KASAN
78 #define KASAN_SHADOW_OFFSET	_AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
79 #define KASAN_SHADOW_END	((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
80 					+ KASAN_SHADOW_OFFSET)
81 #define KASAN_THREAD_SHIFT	1
82 #else
83 #define KASAN_THREAD_SHIFT	0
84 #define KASAN_SHADOW_END	(_PAGE_END(VA_BITS_MIN))
85 #endif /* CONFIG_KASAN */
86 
87 #define MIN_THREAD_SHIFT	(14 + KASAN_THREAD_SHIFT)
88 
89 /*
90  * VMAP'd stacks are allocated at page granularity, so we must ensure that such
91  * stacks are a multiple of page size.
92  */
93 #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
94 #define THREAD_SHIFT		PAGE_SHIFT
95 #else
96 #define THREAD_SHIFT		MIN_THREAD_SHIFT
97 #endif
98 
99 #if THREAD_SHIFT >= PAGE_SHIFT
100 #define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
101 #endif
102 
103 #define THREAD_SIZE		(UL(1) << THREAD_SHIFT)
104 
105 /*
106  * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
107  * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
108  * assembly.
109  */
110 #ifdef CONFIG_VMAP_STACK
111 #define THREAD_ALIGN		(2 * THREAD_SIZE)
112 #else
113 #define THREAD_ALIGN		THREAD_SIZE
114 #endif
115 
116 #define IRQ_STACK_SIZE		THREAD_SIZE
117 
118 #define OVERFLOW_STACK_SIZE	SZ_4K
119 
120 /*
121  * Alignment of kernel segments (e.g. .text, .data).
122  */
123 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
124 /*
125  *  4 KB granule:   1 level 2 entry
126  * 16 KB granule: 128 level 3 entries, with contiguous bit
127  * 64 KB granule:  32 level 3 entries, with contiguous bit
128  */
129 #define SEGMENT_ALIGN		SZ_2M
130 #else
131 /*
132  *  4 KB granule:  16 level 3 entries, with contiguous bit
133  * 16 KB granule:   4 level 3 entries, without contiguous bit
134  * 64 KB granule:   1 level 3 entry
135  */
136 #define SEGMENT_ALIGN		SZ_64K
137 #endif
138 
139 /*
140  * Memory types available.
141  */
142 #define MT_DEVICE_nGnRnE	0
143 #define MT_DEVICE_nGnRE		1
144 #define MT_DEVICE_GRE		2
145 #define MT_NORMAL_NC		3
146 #define MT_NORMAL		4
147 #define MT_NORMAL_WT		5
148 
149 /*
150  * Memory types for Stage-2 translation
151  */
152 #define MT_S2_NORMAL		0xf
153 #define MT_S2_DEVICE_nGnRE	0x1
154 
155 /*
156  * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
157  * Stage-2 enforces Normal-WB and Device-nGnRE
158  */
159 #define MT_S2_FWB_NORMAL	6
160 #define MT_S2_FWB_DEVICE_nGnRE	1
161 
162 #ifdef CONFIG_ARM64_4K_PAGES
163 #define IOREMAP_MAX_ORDER	(PUD_SHIFT)
164 #else
165 #define IOREMAP_MAX_ORDER	(PMD_SHIFT)
166 #endif
167 
168 #ifndef __ASSEMBLY__
169 extern u64			vabits_actual;
170 #define PAGE_END		(_PAGE_END(vabits_actual))
171 
172 #include <linux/bitops.h>
173 #include <linux/mmdebug.h>
174 
175 extern s64			physvirt_offset;
176 extern s64			memstart_addr;
177 /* PHYS_OFFSET - the physical address of the start of memory. */
178 #define PHYS_OFFSET		({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
179 
180 /* the virtual base of the kernel image (minus TEXT_OFFSET) */
181 extern u64			kimage_vaddr;
182 
183 /* the offset between the kernel virtual and physical mappings */
184 extern u64			kimage_voffset;
185 
186 static inline unsigned long kaslr_offset(void)
187 {
188 	return kimage_vaddr - KIMAGE_VADDR;
189 }
190 
191 /*
192  * Allow all memory at the discovery stage. We will clip it later.
193  */
194 #define MIN_MEMBLOCK_ADDR	0
195 #define MAX_MEMBLOCK_ADDR	U64_MAX
196 
197 /*
198  * PFNs are used to describe any physical page; this means
199  * PFN 0 == physical address 0.
200  *
201  * This is the PFN of the first RAM page in the kernel
202  * direct-mapped view.  We assume this is the first page
203  * of RAM in the mem_map as well.
204  */
205 #define PHYS_PFN_OFFSET	(PHYS_OFFSET >> PAGE_SHIFT)
206 
207 /*
208  * When dealing with data aborts, watchpoints, or instruction traps we may end
209  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
210  * pass on to access_ok(), for instance.
211  */
212 #define __untagged_addr(addr)	\
213 	((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
214 
215 #define untagged_addr(addr)	({					\
216 	u64 __addr = (__force u64)(addr);					\
217 	__addr &= __untagged_addr(__addr);				\
218 	(__force __typeof__(addr))__addr;				\
219 })
220 
221 #ifdef CONFIG_KASAN_SW_TAGS
222 #define __tag_shifted(tag)	((u64)(tag) << 56)
223 #define __tag_reset(addr)	__untagged_addr(addr)
224 #define __tag_get(addr)		(__u8)((u64)(addr) >> 56)
225 #else
226 #define __tag_shifted(tag)	0UL
227 #define __tag_reset(addr)	(addr)
228 #define __tag_get(addr)		0
229 #endif /* CONFIG_KASAN_SW_TAGS */
230 
231 static inline const void *__tag_set(const void *addr, u8 tag)
232 {
233 	u64 __addr = (u64)addr & ~__tag_shifted(0xff);
234 	return (const void *)(__addr | __tag_shifted(tag));
235 }
236 
237 /*
238  * Physical vs virtual RAM address space conversion.  These are
239  * private definitions which should NOT be used outside memory.h
240  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
241  */
242 
243 
244 /*
245  * The linear kernel range starts at the bottom of the virtual address
246  * space. Testing the top bit for the start of the region is a
247  * sufficient check and avoids having to worry about the tag.
248  */
249 #define __is_lm_address(addr)	(!(((u64)addr) & BIT(vabits_actual - 1)))
250 
251 #define __lm_to_phys(addr)	(((addr) + physvirt_offset))
252 #define __kimg_to_phys(addr)	((addr) - kimage_voffset)
253 
254 #define __virt_to_phys_nodebug(x) ({					\
255 	phys_addr_t __x = (phys_addr_t)(__tag_reset(x));		\
256 	__is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x);	\
257 })
258 
259 #define __pa_symbol_nodebug(x)	__kimg_to_phys((phys_addr_t)(x))
260 
261 #ifdef CONFIG_DEBUG_VIRTUAL
262 extern phys_addr_t __virt_to_phys(unsigned long x);
263 extern phys_addr_t __phys_addr_symbol(unsigned long x);
264 #else
265 #define __virt_to_phys(x)	__virt_to_phys_nodebug(x)
266 #define __phys_addr_symbol(x)	__pa_symbol_nodebug(x)
267 #endif /* CONFIG_DEBUG_VIRTUAL */
268 
269 #define __phys_to_virt(x)	((unsigned long)((x) - physvirt_offset))
270 #define __phys_to_kimg(x)	((unsigned long)((x) + kimage_voffset))
271 
272 /*
273  * Convert a page to/from a physical address
274  */
275 #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
276 #define phys_to_page(phys)	(pfn_to_page(__phys_to_pfn(phys)))
277 
278 /*
279  * Note: Drivers should NOT use these.  They are the wrong
280  * translation for translating DMA addresses.  Use the driver
281  * DMA support - see dma-mapping.h.
282  */
283 #define virt_to_phys virt_to_phys
284 static inline phys_addr_t virt_to_phys(const volatile void *x)
285 {
286 	return __virt_to_phys((unsigned long)(x));
287 }
288 
289 #define phys_to_virt phys_to_virt
290 static inline void *phys_to_virt(phys_addr_t x)
291 {
292 	return (void *)(__phys_to_virt(x));
293 }
294 
295 /*
296  * Drivers should NOT use these either.
297  */
298 #define __pa(x)			__virt_to_phys((unsigned long)(x))
299 #define __pa_symbol(x)		__phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
300 #define __pa_nodebug(x)		__virt_to_phys_nodebug((unsigned long)(x))
301 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
302 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
303 #define virt_to_pfn(x)		__phys_to_pfn(__virt_to_phys((unsigned long)(x)))
304 #define sym_to_pfn(x)		__phys_to_pfn(__pa_symbol(x))
305 
306 /*
307  *  virt_to_page(x)	convert a _valid_ virtual address to struct page *
308  *  virt_addr_valid(x)	indicates whether a virtual address is valid
309  */
310 #define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)
311 
312 #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
313 #define virt_to_page(x)		pfn_to_page(virt_to_pfn(x))
314 #else
315 #define page_to_virt(x)	({						\
316 	__typeof__(x) __page = x;					\
317 	u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
318 	u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE);			\
319 	(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
320 })
321 
322 #define virt_to_page(x)	({						\
323 	u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE;	\
324 	u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page));	\
325 	(struct page *)__addr;						\
326 })
327 #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
328 
329 #define virt_addr_valid(addr)	({					\
330 	__typeof__(addr) __addr = addr;					\
331 	__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));	\
332 })
333 
334 #endif /* !ASSEMBLY */
335 
336 /*
337  * Given that the GIC architecture permits ITS implementations that can only be
338  * configured with a LPI table address once, GICv3 systems with many CPUs may
339  * end up reserving a lot of different regions after a kexec for their LPI
340  * tables (one per CPU), as we are forced to reuse the same memory after kexec
341  * (and thus reserve it persistently with EFI beforehand)
342  */
343 #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
344 # define INIT_MEMBLOCK_RESERVED_REGIONS	(INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
345 #endif
346 
347 #include <asm-generic/memory_model.h>
348 
349 #endif /* __ASM_MEMORY_H */
350