1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Based on arch/arm/include/asm/memory.h 4 * 5 * Copyright (C) 2000-2002 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 * 8 * Note: this file should not be included by non-asm/.h files 9 */ 10 #ifndef __ASM_MEMORY_H 11 #define __ASM_MEMORY_H 12 13 #include <linux/const.h> 14 #include <linux/sizes.h> 15 #include <asm/page-def.h> 16 17 /* 18 * Size of the PCI I/O space. This must remain a power of two so that 19 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. 20 */ 21 #define PCI_IO_SIZE SZ_16M 22 23 /* 24 * VMEMMAP_SIZE - allows the whole linear region to be covered by 25 * a struct page array 26 * 27 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE 28 * needs to cover the memory region from the beginning of the 52-bit 29 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to 30 * keep a constant PAGE_OFFSET and "fallback" to using the higher end 31 * of the VMEMMAP where 52-bit support is not available in hardware. 32 */ 33 #define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT) 34 #define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT) 35 36 /* 37 * PAGE_OFFSET - the virtual address of the start of the linear map, at the 38 * start of the TTBR1 address space. 39 * PAGE_END - the end of the linear map, where all other kernel mappings begin. 40 * KIMAGE_VADDR - the virtual address of the start of the kernel image. 41 * VA_BITS - the maximum number of bits for virtual addresses. 42 */ 43 #define VA_BITS (CONFIG_ARM64_VA_BITS) 44 #define _PAGE_OFFSET(va) (-(UL(1) << (va))) 45 #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 46 #define KIMAGE_VADDR (MODULES_END) 47 #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 48 #define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) 49 #define MODULES_VSIZE (SZ_2G) 50 #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) 51 #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) 52 #define PCI_IO_END (VMEMMAP_START - SZ_8M) 53 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 54 #define FIXADDR_TOP (VMEMMAP_START - SZ_32M) 55 56 #if VA_BITS > 48 57 #define VA_BITS_MIN (48) 58 #else 59 #define VA_BITS_MIN (VA_BITS) 60 #endif 61 62 #define _PAGE_END(va) (-(UL(1) << ((va) - 1))) 63 64 #define KERNEL_START _text 65 #define KERNEL_END _end 66 67 /* 68 * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual 69 * address space for the shadow region respectively. They can bloat the stack 70 * significantly, so double the (minimum) stack size when they are in use. 71 */ 72 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 73 #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 74 #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ 75 + KASAN_SHADOW_OFFSET) 76 #define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT))) 77 #define KASAN_THREAD_SHIFT 1 78 #else 79 #define KASAN_THREAD_SHIFT 0 80 #define PAGE_END (_PAGE_END(VA_BITS_MIN)) 81 #endif /* CONFIG_KASAN */ 82 83 #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) 84 85 /* 86 * VMAP'd stacks are allocated at page granularity, so we must ensure that such 87 * stacks are a multiple of page size. 88 */ 89 #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) 90 #define THREAD_SHIFT PAGE_SHIFT 91 #else 92 #define THREAD_SHIFT MIN_THREAD_SHIFT 93 #endif 94 95 #if THREAD_SHIFT >= PAGE_SHIFT 96 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 97 #endif 98 99 #define THREAD_SIZE (UL(1) << THREAD_SHIFT) 100 101 /* 102 * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by 103 * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry 104 * assembly. 105 */ 106 #ifdef CONFIG_VMAP_STACK 107 #define THREAD_ALIGN (2 * THREAD_SIZE) 108 #else 109 #define THREAD_ALIGN THREAD_SIZE 110 #endif 111 112 #define IRQ_STACK_SIZE THREAD_SIZE 113 114 #define OVERFLOW_STACK_SIZE SZ_4K 115 116 /* 117 * With the minimum frame size of [x29, x30], exactly half the combined 118 * sizes of the hyp and overflow stacks is the maximum size needed to 119 * save the unwinded stacktrace; plus an additional entry to delimit the 120 * end. 121 */ 122 #define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long)) 123 124 /* 125 * Alignment of kernel segments (e.g. .text, .data). 126 * 127 * 4 KB granule: 16 level 3 entries, with contiguous bit 128 * 16 KB granule: 4 level 3 entries, without contiguous bit 129 * 64 KB granule: 1 level 3 entry 130 */ 131 #define SEGMENT_ALIGN SZ_64K 132 133 /* 134 * Memory types available. 135 * 136 * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in 137 * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note 138 * that protection_map[] only contains MT_NORMAL attributes. 139 */ 140 #define MT_NORMAL 0 141 #define MT_NORMAL_TAGGED 1 142 #define MT_NORMAL_NC 2 143 #define MT_DEVICE_nGnRnE 3 144 #define MT_DEVICE_nGnRE 4 145 146 /* 147 * Memory types for Stage-2 translation 148 */ 149 #define MT_S2_NORMAL 0xf 150 #define MT_S2_DEVICE_nGnRE 0x1 151 152 /* 153 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001 154 * Stage-2 enforces Normal-WB and Device-nGnRE 155 */ 156 #define MT_S2_FWB_NORMAL 6 157 #define MT_S2_FWB_DEVICE_nGnRE 1 158 159 #ifdef CONFIG_ARM64_4K_PAGES 160 #define IOREMAP_MAX_ORDER (PUD_SHIFT) 161 #else 162 #define IOREMAP_MAX_ORDER (PMD_SHIFT) 163 #endif 164 165 /* 166 * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated 167 * until link time. 168 */ 169 #define RESERVED_SWAPPER_OFFSET (PAGE_SIZE) 170 171 /* 172 * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated 173 * until link time. 174 */ 175 #define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) 176 177 #ifndef __ASSEMBLY__ 178 179 #include <linux/bitops.h> 180 #include <linux/compiler.h> 181 #include <linux/mmdebug.h> 182 #include <linux/types.h> 183 #include <asm/boot.h> 184 #include <asm/bug.h> 185 186 #if VA_BITS > 48 187 extern u64 vabits_actual; 188 #else 189 #define vabits_actual ((u64)VA_BITS) 190 #endif 191 192 extern s64 memstart_addr; 193 /* PHYS_OFFSET - the physical address of the start of memory. */ 194 #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) 195 196 /* the virtual base of the kernel image */ 197 extern u64 kimage_vaddr; 198 199 /* the offset between the kernel virtual and physical mappings */ 200 extern u64 kimage_voffset; 201 202 static inline unsigned long kaslr_offset(void) 203 { 204 return kimage_vaddr - KIMAGE_VADDR; 205 } 206 207 #ifdef CONFIG_RANDOMIZE_BASE 208 void kaslr_init(void); 209 static inline bool kaslr_enabled(void) 210 { 211 extern bool __kaslr_is_enabled; 212 return __kaslr_is_enabled; 213 } 214 #else 215 static inline void kaslr_init(void) { } 216 static inline bool kaslr_enabled(void) { return false; } 217 #endif 218 219 /* 220 * Allow all memory at the discovery stage. We will clip it later. 221 */ 222 #define MIN_MEMBLOCK_ADDR 0 223 #define MAX_MEMBLOCK_ADDR U64_MAX 224 225 /* 226 * PFNs are used to describe any physical page; this means 227 * PFN 0 == physical address 0. 228 * 229 * This is the PFN of the first RAM page in the kernel 230 * direct-mapped view. We assume this is the first page 231 * of RAM in the mem_map as well. 232 */ 233 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 234 235 /* 236 * When dealing with data aborts, watchpoints, or instruction traps we may end 237 * up with a tagged userland pointer. Clear the tag to get a sane pointer to 238 * pass on to access_ok(), for instance. 239 */ 240 #define __untagged_addr(addr) \ 241 ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) 242 243 #define untagged_addr(addr) ({ \ 244 u64 __addr = (__force u64)(addr); \ 245 __addr &= __untagged_addr(__addr); \ 246 (__force __typeof__(addr))__addr; \ 247 }) 248 249 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 250 #define __tag_shifted(tag) ((u64)(tag) << 56) 251 #define __tag_reset(addr) __untagged_addr(addr) 252 #define __tag_get(addr) (__u8)((u64)(addr) >> 56) 253 #else 254 #define __tag_shifted(tag) 0UL 255 #define __tag_reset(addr) (addr) 256 #define __tag_get(addr) 0 257 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 258 259 static inline const void *__tag_set(const void *addr, u8 tag) 260 { 261 u64 __addr = (u64)addr & ~__tag_shifted(0xff); 262 return (const void *)(__addr | __tag_shifted(tag)); 263 } 264 265 #ifdef CONFIG_KASAN_HW_TAGS 266 #define arch_enable_tag_checks_sync() mte_enable_kernel_sync() 267 #define arch_enable_tag_checks_async() mte_enable_kernel_async() 268 #define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm() 269 #define arch_suppress_tag_checks_start() mte_enable_tco() 270 #define arch_suppress_tag_checks_stop() mte_disable_tco() 271 #define arch_force_async_tag_fault() mte_check_tfsr_exit() 272 #define arch_get_random_tag() mte_get_random_tag() 273 #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) 274 #define arch_set_mem_tag_range(addr, size, tag, init) \ 275 mte_set_mem_tag_range((addr), (size), (tag), (init)) 276 #endif /* CONFIG_KASAN_HW_TAGS */ 277 278 /* 279 * Physical vs virtual RAM address space conversion. These are 280 * private definitions which should NOT be used outside memory.h 281 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 282 */ 283 284 285 /* 286 * Check whether an arbitrary address is within the linear map, which 287 * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the 288 * kernel's TTBR1 address range. 289 */ 290 #define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) 291 292 #define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET) 293 #define __kimg_to_phys(addr) ((addr) - kimage_voffset) 294 295 #define __virt_to_phys_nodebug(x) ({ \ 296 phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ 297 __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ 298 }) 299 300 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) 301 302 #ifdef CONFIG_DEBUG_VIRTUAL 303 extern phys_addr_t __virt_to_phys(unsigned long x); 304 extern phys_addr_t __phys_addr_symbol(unsigned long x); 305 #else 306 #define __virt_to_phys(x) __virt_to_phys_nodebug(x) 307 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 308 #endif /* CONFIG_DEBUG_VIRTUAL */ 309 310 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) 311 #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) 312 313 /* 314 * Convert a page to/from a physical address 315 */ 316 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 317 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 318 319 /* 320 * Note: Drivers should NOT use these. They are the wrong 321 * translation for translating DMA addresses. Use the driver 322 * DMA support - see dma-mapping.h. 323 */ 324 #define virt_to_phys virt_to_phys 325 static inline phys_addr_t virt_to_phys(const volatile void *x) 326 { 327 return __virt_to_phys((unsigned long)(x)); 328 } 329 330 #define phys_to_virt phys_to_virt 331 static inline void *phys_to_virt(phys_addr_t x) 332 { 333 return (void *)(__phys_to_virt(x)); 334 } 335 336 /* Needed already here for resolving __phys_to_pfn() in virt_to_pfn() */ 337 #include <asm-generic/memory_model.h> 338 339 static inline unsigned long virt_to_pfn(const void *kaddr) 340 { 341 return __phys_to_pfn(virt_to_phys(kaddr)); 342 } 343 344 /* 345 * Drivers should NOT use these either. 346 */ 347 #define __pa(x) __virt_to_phys((unsigned long)(x)) 348 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 349 #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) 350 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 351 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 352 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 353 354 /* 355 * virt_to_page(x) convert a _valid_ virtual address to struct page * 356 * virt_addr_valid(x) indicates whether a virtual address is valid 357 */ 358 #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) 359 360 #if defined(CONFIG_DEBUG_VIRTUAL) 361 #define page_to_virt(x) ({ \ 362 __typeof__(x) __page = x; \ 363 void *__addr = __va(page_to_phys(__page)); \ 364 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ 365 }) 366 #define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) 367 #else 368 #define page_to_virt(x) ({ \ 369 __typeof__(x) __page = x; \ 370 u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ 371 u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ 372 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ 373 }) 374 375 #define virt_to_page(x) ({ \ 376 u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ 377 u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ 378 (struct page *)__addr; \ 379 }) 380 #endif /* CONFIG_DEBUG_VIRTUAL */ 381 382 #define virt_addr_valid(addr) ({ \ 383 __typeof__(addr) __addr = __tag_reset(addr); \ 384 __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr)); \ 385 }) 386 387 void dump_mem_limit(void); 388 #endif /* !ASSEMBLY */ 389 390 /* 391 * Given that the GIC architecture permits ITS implementations that can only be 392 * configured with a LPI table address once, GICv3 systems with many CPUs may 393 * end up reserving a lot of different regions after a kexec for their LPI 394 * tables (one per CPU), as we are forced to reuse the same memory after kexec 395 * (and thus reserve it persistently with EFI beforehand) 396 */ 397 #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) 398 # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) 399 #endif 400 401 /* 402 * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory 403 * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into 404 * multiple parts. As a result, the number of memory regions is large. 405 */ 406 #ifdef CONFIG_EFI 407 #define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8) 408 #endif 409 410 #include <asm-generic/memory_model.h> 411 412 #endif /* __ASM_MEMORY_H */ 413