1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/init.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/export.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/cache.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/gfp.h> 19 #include <linux/memblock.h> 20 #include <linux/sort.h> 21 #include <linux/of.h> 22 #include <linux/of_fdt.h> 23 #include <linux/dma-direct.h> 24 #include <linux/dma-map-ops.h> 25 #include <linux/efi.h> 26 #include <linux/swiotlb.h> 27 #include <linux/vmalloc.h> 28 #include <linux/mm.h> 29 #include <linux/kexec.h> 30 #include <linux/crash_dump.h> 31 #include <linux/hugetlb.h> 32 #include <linux/acpi_iort.h> 33 34 #include <asm/boot.h> 35 #include <asm/fixmap.h> 36 #include <asm/kasan.h> 37 #include <asm/kernel-pgtable.h> 38 #include <asm/kvm_host.h> 39 #include <asm/memory.h> 40 #include <asm/numa.h> 41 #include <asm/sections.h> 42 #include <asm/setup.h> 43 #include <linux/sizes.h> 44 #include <asm/tlb.h> 45 #include <asm/alternative.h> 46 #include <asm/xen/swiotlb-xen.h> 47 48 /* 49 * We need to be able to catch inadvertent references to memstart_addr 50 * that occur (potentially in generic code) before arm64_memblock_init() 51 * executes, which assigns it its actual value. So use a default value 52 * that cannot be mistaken for a real physical address. 53 */ 54 s64 memstart_addr __ro_after_init = -1; 55 EXPORT_SYMBOL(memstart_addr); 56 57 /* 58 * If the corresponding config options are enabled, we create both ZONE_DMA 59 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory 60 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). 61 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, 62 * otherwise it is empty. 63 */ 64 phys_addr_t arm64_dma_phys_limit __ro_after_init; 65 66 #ifdef CONFIG_KEXEC_CORE 67 /* 68 * reserve_crashkernel() - reserves memory for crash kernel 69 * 70 * This function reserves memory area given in "crashkernel=" kernel command 71 * line parameter. The memory reserved is used by dump capture kernel when 72 * primary kernel is crashing. 73 */ 74 static void __init reserve_crashkernel(void) 75 { 76 unsigned long long crash_base, crash_size; 77 int ret; 78 79 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), 80 &crash_size, &crash_base); 81 /* no crashkernel= or invalid value specified */ 82 if (ret || !crash_size) 83 return; 84 85 crash_size = PAGE_ALIGN(crash_size); 86 87 if (crash_base == 0) { 88 /* Current arm64 boot protocol requires 2MB alignment */ 89 crash_base = memblock_find_in_range(0, arm64_dma_phys_limit, 90 crash_size, SZ_2M); 91 if (crash_base == 0) { 92 pr_warn("cannot allocate crashkernel (size:0x%llx)\n", 93 crash_size); 94 return; 95 } 96 } else { 97 /* User specifies base address explicitly. */ 98 if (!memblock_is_region_memory(crash_base, crash_size)) { 99 pr_warn("cannot reserve crashkernel: region is not memory\n"); 100 return; 101 } 102 103 if (memblock_is_region_reserved(crash_base, crash_size)) { 104 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); 105 return; 106 } 107 108 if (!IS_ALIGNED(crash_base, SZ_2M)) { 109 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); 110 return; 111 } 112 } 113 memblock_reserve(crash_base, crash_size); 114 115 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", 116 crash_base, crash_base + crash_size, crash_size >> 20); 117 118 crashk_res.start = crash_base; 119 crashk_res.end = crash_base + crash_size - 1; 120 } 121 #else 122 static void __init reserve_crashkernel(void) 123 { 124 } 125 #endif /* CONFIG_KEXEC_CORE */ 126 127 #ifdef CONFIG_CRASH_DUMP 128 static int __init early_init_dt_scan_elfcorehdr(unsigned long node, 129 const char *uname, int depth, void *data) 130 { 131 const __be32 *reg; 132 int len; 133 134 if (depth != 1 || strcmp(uname, "chosen") != 0) 135 return 0; 136 137 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); 138 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) 139 return 1; 140 141 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); 142 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); 143 144 return 1; 145 } 146 147 /* 148 * reserve_elfcorehdr() - reserves memory for elf core header 149 * 150 * This function reserves the memory occupied by an elf core header 151 * described in the device tree. This region contains all the 152 * information about primary kernel's core image and is used by a dump 153 * capture kernel to access the system memory on primary kernel. 154 */ 155 static void __init reserve_elfcorehdr(void) 156 { 157 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); 158 159 if (!elfcorehdr_size) 160 return; 161 162 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { 163 pr_warn("elfcorehdr is overlapped\n"); 164 return; 165 } 166 167 memblock_reserve(elfcorehdr_addr, elfcorehdr_size); 168 169 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", 170 elfcorehdr_size >> 10, elfcorehdr_addr); 171 } 172 #else 173 static void __init reserve_elfcorehdr(void) 174 { 175 } 176 #endif /* CONFIG_CRASH_DUMP */ 177 178 /* 179 * Return the maximum physical address for a zone accessible by the given bits 180 * limit. If DRAM starts above 32-bit, expand the zone to the maximum 181 * available memory, otherwise cap it at 32-bit. 182 */ 183 static phys_addr_t __init max_zone_phys(unsigned int zone_bits) 184 { 185 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); 186 phys_addr_t phys_start = memblock_start_of_DRAM(); 187 188 if (phys_start > U32_MAX) 189 zone_mask = PHYS_ADDR_MAX; 190 else if (phys_start > zone_mask) 191 zone_mask = U32_MAX; 192 193 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; 194 } 195 196 static void __init zone_sizes_init(unsigned long min, unsigned long max) 197 { 198 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; 199 unsigned int __maybe_unused acpi_zone_dma_bits; 200 unsigned int __maybe_unused dt_zone_dma_bits; 201 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); 202 203 #ifdef CONFIG_ZONE_DMA 204 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); 205 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); 206 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); 207 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); 208 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); 209 #endif 210 #ifdef CONFIG_ZONE_DMA32 211 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 212 if (!arm64_dma_phys_limit) 213 arm64_dma_phys_limit = dma32_phys_limit; 214 #endif 215 if (!arm64_dma_phys_limit) 216 arm64_dma_phys_limit = PHYS_MASK + 1; 217 max_zone_pfns[ZONE_NORMAL] = max; 218 219 free_area_init(max_zone_pfns); 220 } 221 222 int pfn_valid(unsigned long pfn) 223 { 224 phys_addr_t addr = PFN_PHYS(pfn); 225 struct mem_section *ms; 226 227 /* 228 * Ensure the upper PAGE_SHIFT bits are clear in the 229 * pfn. Else it might lead to false positives when 230 * some of the upper bits are set, but the lower bits 231 * match a valid pfn. 232 */ 233 if (PHYS_PFN(addr) != pfn) 234 return 0; 235 236 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 237 return 0; 238 239 ms = __pfn_to_section(pfn); 240 if (!valid_section(ms)) 241 return 0; 242 243 /* 244 * ZONE_DEVICE memory does not have the memblock entries. 245 * memblock_is_map_memory() check for ZONE_DEVICE based 246 * addresses will always fail. Even the normal hotplugged 247 * memory will never have MEMBLOCK_NOMAP flag set in their 248 * memblock entries. Skip memblock search for all non early 249 * memory sections covering all of hotplug memory including 250 * both normal and ZONE_DEVICE based. 251 */ 252 if (!early_section(ms)) 253 return pfn_section_valid(ms, pfn); 254 255 return memblock_is_map_memory(addr); 256 } 257 EXPORT_SYMBOL(pfn_valid); 258 259 static phys_addr_t memory_limit = PHYS_ADDR_MAX; 260 261 /* 262 * Limit the memory size that was specified via FDT. 263 */ 264 static int __init early_mem(char *p) 265 { 266 if (!p) 267 return 1; 268 269 memory_limit = memparse(p, &p) & PAGE_MASK; 270 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); 271 272 return 0; 273 } 274 early_param("mem", early_mem); 275 276 static int __init early_init_dt_scan_usablemem(unsigned long node, 277 const char *uname, int depth, void *data) 278 { 279 struct memblock_region *usablemem = data; 280 const __be32 *reg; 281 int len; 282 283 if (depth != 1 || strcmp(uname, "chosen") != 0) 284 return 0; 285 286 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); 287 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) 288 return 1; 289 290 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); 291 usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); 292 293 return 1; 294 } 295 296 static void __init fdt_enforce_memory_region(void) 297 { 298 struct memblock_region reg = { 299 .size = 0, 300 }; 301 302 of_scan_flat_dt(early_init_dt_scan_usablemem, ®); 303 304 if (reg.size) 305 memblock_cap_memory_range(reg.base, reg.size); 306 } 307 308 void __init arm64_memblock_init(void) 309 { 310 const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); 311 312 /* Handle linux,usable-memory-range property */ 313 fdt_enforce_memory_region(); 314 315 /* Remove memory above our supported physical address size */ 316 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); 317 318 /* 319 * Select a suitable value for the base of physical memory. 320 */ 321 memstart_addr = round_down(memblock_start_of_DRAM(), 322 ARM64_MEMSTART_ALIGN); 323 324 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) 325 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n"); 326 327 /* 328 * Remove the memory that we will not be able to cover with the 329 * linear mapping. Take care not to clip the kernel which may be 330 * high in memory. 331 */ 332 memblock_remove(max_t(u64, memstart_addr + linear_region_size, 333 __pa_symbol(_end)), ULLONG_MAX); 334 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { 335 /* ensure that memstart_addr remains sufficiently aligned */ 336 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, 337 ARM64_MEMSTART_ALIGN); 338 memblock_remove(0, memstart_addr); 339 } 340 341 /* 342 * If we are running with a 52-bit kernel VA config on a system that 343 * does not support it, we have to place the available physical 344 * memory in the 48-bit addressable part of the linear region, i.e., 345 * we have to move it upward. Since memstart_addr represents the 346 * physical address of PAGE_OFFSET, we have to *subtract* from it. 347 */ 348 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) 349 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); 350 351 /* 352 * Apply the memory limit if it was set. Since the kernel may be loaded 353 * high up in memory, add back the kernel region that must be accessible 354 * via the linear mapping. 355 */ 356 if (memory_limit != PHYS_ADDR_MAX) { 357 memblock_mem_limit_remove_map(memory_limit); 358 memblock_add(__pa_symbol(_text), (u64)(_end - _text)); 359 } 360 361 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 362 /* 363 * Add back the memory we just removed if it results in the 364 * initrd to become inaccessible via the linear mapping. 365 * Otherwise, this is a no-op 366 */ 367 u64 base = phys_initrd_start & PAGE_MASK; 368 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; 369 370 /* 371 * We can only add back the initrd memory if we don't end up 372 * with more memory than we can address via the linear mapping. 373 * It is up to the bootloader to position the kernel and the 374 * initrd reasonably close to each other (i.e., within 32 GB of 375 * each other) so that all granule/#levels combinations can 376 * always access both. 377 */ 378 if (WARN(base < memblock_start_of_DRAM() || 379 base + size > memblock_start_of_DRAM() + 380 linear_region_size, 381 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { 382 phys_initrd_size = 0; 383 } else { 384 memblock_remove(base, size); /* clear MEMBLOCK_ flags */ 385 memblock_add(base, size); 386 memblock_reserve(base, size); 387 } 388 } 389 390 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 391 extern u16 memstart_offset_seed; 392 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 393 int parange = cpuid_feature_extract_unsigned_field( 394 mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); 395 s64 range = linear_region_size - 396 BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); 397 398 /* 399 * If the size of the linear region exceeds, by a sufficient 400 * margin, the size of the region that the physical memory can 401 * span, randomize the linear region as well. 402 */ 403 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { 404 range /= ARM64_MEMSTART_ALIGN; 405 memstart_addr -= ARM64_MEMSTART_ALIGN * 406 ((range * memstart_offset_seed) >> 16); 407 } 408 } 409 410 /* 411 * Register the kernel text, kernel data, initrd, and initial 412 * pagetables with memblock. 413 */ 414 memblock_reserve(__pa_symbol(_stext), _end - _stext); 415 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 416 /* the generic initrd code expects virtual addresses */ 417 initrd_start = __phys_to_virt(phys_initrd_start); 418 initrd_end = initrd_start + phys_initrd_size; 419 } 420 421 early_init_fdt_scan_reserved_mem(); 422 423 reserve_elfcorehdr(); 424 425 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 426 } 427 428 void __init bootmem_init(void) 429 { 430 unsigned long min, max; 431 432 min = PFN_UP(memblock_start_of_DRAM()); 433 max = PFN_DOWN(memblock_end_of_DRAM()); 434 435 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); 436 437 max_pfn = max_low_pfn = max; 438 min_low_pfn = min; 439 440 arch_numa_init(); 441 442 /* 443 * must be done after arch_numa_init() which calls numa_init() to 444 * initialize node_online_map that gets used in hugetlb_cma_reserve() 445 * while allocating required CMA size across online nodes. 446 */ 447 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 448 arm64_hugetlb_cma_reserve(); 449 #endif 450 451 dma_pernuma_cma_reserve(); 452 453 kvm_hyp_reserve(); 454 455 /* 456 * sparse_init() tries to allocate memory from memblock, so must be 457 * done after the fixed reservations 458 */ 459 sparse_init(); 460 zone_sizes_init(min, max); 461 462 /* 463 * Reserve the CMA area after arm64_dma_phys_limit was initialised. 464 */ 465 dma_contiguous_reserve(arm64_dma_phys_limit); 466 467 /* 468 * request_standard_resources() depends on crashkernel's memory being 469 * reserved, so do it here. 470 */ 471 reserve_crashkernel(); 472 473 memblock_dump_all(); 474 } 475 476 /* 477 * mem_init() marks the free areas in the mem_map and tells us how much memory 478 * is free. This is done after various parts of the system have claimed their 479 * memory after the kernel image. 480 */ 481 void __init mem_init(void) 482 { 483 if (swiotlb_force == SWIOTLB_FORCE || 484 max_pfn > PFN_DOWN(arm64_dma_phys_limit)) 485 swiotlb_init(1); 486 else if (!xen_swiotlb_detect()) 487 swiotlb_force = SWIOTLB_NO_FORCE; 488 489 set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); 490 491 /* this will put all unused low memory onto the freelists */ 492 memblock_free_all(); 493 494 /* 495 * Check boundaries twice: Some fundamental inconsistencies can be 496 * detected at build time already. 497 */ 498 #ifdef CONFIG_COMPAT 499 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); 500 #endif 501 502 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 503 extern int sysctl_overcommit_memory; 504 /* 505 * On a machine this small we won't get anywhere without 506 * overcommit, so turn it on by default. 507 */ 508 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 509 } 510 } 511 512 void free_initmem(void) 513 { 514 free_reserved_area(lm_alias(__init_begin), 515 lm_alias(__init_end), 516 POISON_FREE_INITMEM, "unused kernel"); 517 /* 518 * Unmap the __init region but leave the VM area in place. This 519 * prevents the region from being reused for kernel modules, which 520 * is not supported by kallsyms. 521 */ 522 vunmap_range((u64)__init_begin, (u64)__init_end); 523 } 524 525 void dump_mem_limit(void) 526 { 527 if (memory_limit != PHYS_ADDR_MAX) { 528 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); 529 } else { 530 pr_emerg("Memory Limit: none\n"); 531 } 532 } 533