xref: /openbmc/linux/arch/arm64/mm/init.c (revision 67d96729)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/init.c
4  *
5  * Copyright (C) 1995-2005 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/cache.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/gfp.h>
19 #include <linux/memblock.h>
20 #include <linux/sort.h>
21 #include <linux/of.h>
22 #include <linux/of_fdt.h>
23 #include <linux/dma-direct.h>
24 #include <linux/dma-map-ops.h>
25 #include <linux/efi.h>
26 #include <linux/swiotlb.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mm.h>
29 #include <linux/kexec.h>
30 #include <linux/crash_dump.h>
31 #include <linux/hugetlb.h>
32 #include <linux/acpi_iort.h>
33 
34 #include <asm/boot.h>
35 #include <asm/fixmap.h>
36 #include <asm/kasan.h>
37 #include <asm/kernel-pgtable.h>
38 #include <asm/memory.h>
39 #include <asm/numa.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <linux/sizes.h>
43 #include <asm/tlb.h>
44 #include <asm/alternative.h>
45 
46 /*
47  * We need to be able to catch inadvertent references to memstart_addr
48  * that occur (potentially in generic code) before arm64_memblock_init()
49  * executes, which assigns it its actual value. So use a default value
50  * that cannot be mistaken for a real physical address.
51  */
52 s64 memstart_addr __ro_after_init = -1;
53 EXPORT_SYMBOL(memstart_addr);
54 
55 /*
56  * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
57  * memory as some devices, namely the Raspberry Pi 4, have peripherals with
58  * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
59  * bit addressable memory area.
60  */
61 phys_addr_t arm64_dma_phys_limit __ro_after_init;
62 static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
63 
64 #ifdef CONFIG_KEXEC_CORE
65 /*
66  * reserve_crashkernel() - reserves memory for crash kernel
67  *
68  * This function reserves memory area given in "crashkernel=" kernel command
69  * line parameter. The memory reserved is used by dump capture kernel when
70  * primary kernel is crashing.
71  */
72 static void __init reserve_crashkernel(void)
73 {
74 	unsigned long long crash_base, crash_size;
75 	int ret;
76 
77 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
78 				&crash_size, &crash_base);
79 	/* no crashkernel= or invalid value specified */
80 	if (ret || !crash_size)
81 		return;
82 
83 	crash_size = PAGE_ALIGN(crash_size);
84 
85 	if (crash_base == 0) {
86 		/* Current arm64 boot protocol requires 2MB alignment */
87 		crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
88 				crash_size, SZ_2M);
89 		if (crash_base == 0) {
90 			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
91 				crash_size);
92 			return;
93 		}
94 	} else {
95 		/* User specifies base address explicitly. */
96 		if (!memblock_is_region_memory(crash_base, crash_size)) {
97 			pr_warn("cannot reserve crashkernel: region is not memory\n");
98 			return;
99 		}
100 
101 		if (memblock_is_region_reserved(crash_base, crash_size)) {
102 			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
103 			return;
104 		}
105 
106 		if (!IS_ALIGNED(crash_base, SZ_2M)) {
107 			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
108 			return;
109 		}
110 	}
111 	memblock_reserve(crash_base, crash_size);
112 
113 	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
114 		crash_base, crash_base + crash_size, crash_size >> 20);
115 
116 	crashk_res.start = crash_base;
117 	crashk_res.end = crash_base + crash_size - 1;
118 }
119 #else
120 static void __init reserve_crashkernel(void)
121 {
122 }
123 #endif /* CONFIG_KEXEC_CORE */
124 
125 #ifdef CONFIG_CRASH_DUMP
126 static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
127 		const char *uname, int depth, void *data)
128 {
129 	const __be32 *reg;
130 	int len;
131 
132 	if (depth != 1 || strcmp(uname, "chosen") != 0)
133 		return 0;
134 
135 	reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
136 	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
137 		return 1;
138 
139 	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
140 	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
141 
142 	return 1;
143 }
144 
145 /*
146  * reserve_elfcorehdr() - reserves memory for elf core header
147  *
148  * This function reserves the memory occupied by an elf core header
149  * described in the device tree. This region contains all the
150  * information about primary kernel's core image and is used by a dump
151  * capture kernel to access the system memory on primary kernel.
152  */
153 static void __init reserve_elfcorehdr(void)
154 {
155 	of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
156 
157 	if (!elfcorehdr_size)
158 		return;
159 
160 	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
161 		pr_warn("elfcorehdr is overlapped\n");
162 		return;
163 	}
164 
165 	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
166 
167 	pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
168 		elfcorehdr_size >> 10, elfcorehdr_addr);
169 }
170 #else
171 static void __init reserve_elfcorehdr(void)
172 {
173 }
174 #endif /* CONFIG_CRASH_DUMP */
175 
176 /*
177  * Return the maximum physical address for a zone accessible by the given bits
178  * limit. If DRAM starts above 32-bit, expand the zone to the maximum
179  * available memory, otherwise cap it at 32-bit.
180  */
181 static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
182 {
183 	phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
184 	phys_addr_t phys_start = memblock_start_of_DRAM();
185 
186 	if (phys_start > U32_MAX)
187 		zone_mask = PHYS_ADDR_MAX;
188 	else if (phys_start > zone_mask)
189 		zone_mask = U32_MAX;
190 
191 	return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
192 }
193 
194 static void __init zone_sizes_init(unsigned long min, unsigned long max)
195 {
196 	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
197 	unsigned int __maybe_unused acpi_zone_dma_bits;
198 	unsigned int __maybe_unused dt_zone_dma_bits;
199 
200 #ifdef CONFIG_ZONE_DMA
201 	acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
202 	dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
203 	zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
204 	arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
205 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
206 #endif
207 #ifdef CONFIG_ZONE_DMA32
208 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
209 #endif
210 	max_zone_pfns[ZONE_NORMAL] = max;
211 
212 	free_area_init(max_zone_pfns);
213 }
214 
215 int pfn_valid(unsigned long pfn)
216 {
217 	phys_addr_t addr = pfn << PAGE_SHIFT;
218 
219 	if ((addr >> PAGE_SHIFT) != pfn)
220 		return 0;
221 
222 #ifdef CONFIG_SPARSEMEM
223 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
224 		return 0;
225 
226 	if (!valid_section(__pfn_to_section(pfn)))
227 		return 0;
228 #endif
229 	return memblock_is_map_memory(addr);
230 }
231 EXPORT_SYMBOL(pfn_valid);
232 
233 static phys_addr_t memory_limit = PHYS_ADDR_MAX;
234 
235 /*
236  * Limit the memory size that was specified via FDT.
237  */
238 static int __init early_mem(char *p)
239 {
240 	if (!p)
241 		return 1;
242 
243 	memory_limit = memparse(p, &p) & PAGE_MASK;
244 	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
245 
246 	return 0;
247 }
248 early_param("mem", early_mem);
249 
250 static int __init early_init_dt_scan_usablemem(unsigned long node,
251 		const char *uname, int depth, void *data)
252 {
253 	struct memblock_region *usablemem = data;
254 	const __be32 *reg;
255 	int len;
256 
257 	if (depth != 1 || strcmp(uname, "chosen") != 0)
258 		return 0;
259 
260 	reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
261 	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
262 		return 1;
263 
264 	usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
265 	usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
266 
267 	return 1;
268 }
269 
270 static void __init fdt_enforce_memory_region(void)
271 {
272 	struct memblock_region reg = {
273 		.size = 0,
274 	};
275 
276 	of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
277 
278 	if (reg.size)
279 		memblock_cap_memory_range(reg.base, reg.size);
280 }
281 
282 void __init arm64_memblock_init(void)
283 {
284 	const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
285 
286 	/* Handle linux,usable-memory-range property */
287 	fdt_enforce_memory_region();
288 
289 	/* Remove memory above our supported physical address size */
290 	memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
291 
292 	/*
293 	 * Select a suitable value for the base of physical memory.
294 	 */
295 	memstart_addr = round_down(memblock_start_of_DRAM(),
296 				   ARM64_MEMSTART_ALIGN);
297 
298 	if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
299 		pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
300 
301 	/*
302 	 * Remove the memory that we will not be able to cover with the
303 	 * linear mapping. Take care not to clip the kernel which may be
304 	 * high in memory.
305 	 */
306 	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
307 			__pa_symbol(_end)), ULLONG_MAX);
308 	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
309 		/* ensure that memstart_addr remains sufficiently aligned */
310 		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
311 					 ARM64_MEMSTART_ALIGN);
312 		memblock_remove(0, memstart_addr);
313 	}
314 
315 	/*
316 	 * If we are running with a 52-bit kernel VA config on a system that
317 	 * does not support it, we have to place the available physical
318 	 * memory in the 48-bit addressable part of the linear region, i.e.,
319 	 * we have to move it upward. Since memstart_addr represents the
320 	 * physical address of PAGE_OFFSET, we have to *subtract* from it.
321 	 */
322 	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
323 		memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
324 
325 	/*
326 	 * Apply the memory limit if it was set. Since the kernel may be loaded
327 	 * high up in memory, add back the kernel region that must be accessible
328 	 * via the linear mapping.
329 	 */
330 	if (memory_limit != PHYS_ADDR_MAX) {
331 		memblock_mem_limit_remove_map(memory_limit);
332 		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
333 	}
334 
335 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
336 		/*
337 		 * Add back the memory we just removed if it results in the
338 		 * initrd to become inaccessible via the linear mapping.
339 		 * Otherwise, this is a no-op
340 		 */
341 		u64 base = phys_initrd_start & PAGE_MASK;
342 		u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
343 
344 		/*
345 		 * We can only add back the initrd memory if we don't end up
346 		 * with more memory than we can address via the linear mapping.
347 		 * It is up to the bootloader to position the kernel and the
348 		 * initrd reasonably close to each other (i.e., within 32 GB of
349 		 * each other) so that all granule/#levels combinations can
350 		 * always access both.
351 		 */
352 		if (WARN(base < memblock_start_of_DRAM() ||
353 			 base + size > memblock_start_of_DRAM() +
354 				       linear_region_size,
355 			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
356 			phys_initrd_size = 0;
357 		} else {
358 			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
359 			memblock_add(base, size);
360 			memblock_reserve(base, size);
361 		}
362 	}
363 
364 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
365 		extern u16 memstart_offset_seed;
366 		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
367 		int parange = cpuid_feature_extract_unsigned_field(
368 					mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
369 		s64 range = linear_region_size -
370 			    BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
371 
372 		/*
373 		 * If the size of the linear region exceeds, by a sufficient
374 		 * margin, the size of the region that the physical memory can
375 		 * span, randomize the linear region as well.
376 		 */
377 		if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
378 			range /= ARM64_MEMSTART_ALIGN;
379 			memstart_addr -= ARM64_MEMSTART_ALIGN *
380 					 ((range * memstart_offset_seed) >> 16);
381 		}
382 	}
383 
384 	/*
385 	 * Register the kernel text, kernel data, initrd, and initial
386 	 * pagetables with memblock.
387 	 */
388 	memblock_reserve(__pa_symbol(_stext), _end - _stext);
389 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
390 		/* the generic initrd code expects virtual addresses */
391 		initrd_start = __phys_to_virt(phys_initrd_start);
392 		initrd_end = initrd_start + phys_initrd_size;
393 	}
394 
395 	early_init_fdt_scan_reserved_mem();
396 
397 	if (IS_ENABLED(CONFIG_ZONE_DMA32))
398 		arm64_dma32_phys_limit = max_zone_phys(32);
399 	else
400 		arm64_dma32_phys_limit = PHYS_MASK + 1;
401 
402 	reserve_elfcorehdr();
403 
404 	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
405 
406 	dma_contiguous_reserve(arm64_dma32_phys_limit);
407 }
408 
409 void __init bootmem_init(void)
410 {
411 	unsigned long min, max;
412 
413 	min = PFN_UP(memblock_start_of_DRAM());
414 	max = PFN_DOWN(memblock_end_of_DRAM());
415 
416 	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
417 
418 	max_pfn = max_low_pfn = max;
419 	min_low_pfn = min;
420 
421 	arch_numa_init();
422 
423 	/*
424 	 * must be done after arch_numa_init() which calls numa_init() to
425 	 * initialize node_online_map that gets used in hugetlb_cma_reserve()
426 	 * while allocating required CMA size across online nodes.
427 	 */
428 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
429 	arm64_hugetlb_cma_reserve();
430 #endif
431 
432 	dma_pernuma_cma_reserve();
433 
434 	/*
435 	 * sparse_init() tries to allocate memory from memblock, so must be
436 	 * done after the fixed reservations
437 	 */
438 	sparse_init();
439 	zone_sizes_init(min, max);
440 
441 	/*
442 	 * request_standard_resources() depends on crashkernel's memory being
443 	 * reserved, so do it here.
444 	 */
445 	reserve_crashkernel();
446 
447 	memblock_dump_all();
448 }
449 
450 /*
451  * mem_init() marks the free areas in the mem_map and tells us how much memory
452  * is free.  This is done after various parts of the system have claimed their
453  * memory after the kernel image.
454  */
455 void __init mem_init(void)
456 {
457 	if (swiotlb_force == SWIOTLB_FORCE ||
458 	    max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
459 		swiotlb_init(1);
460 	else
461 		swiotlb_force = SWIOTLB_NO_FORCE;
462 
463 	set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
464 
465 	/* this will put all unused low memory onto the freelists */
466 	memblock_free_all();
467 
468 	mem_init_print_info(NULL);
469 
470 	/*
471 	 * Check boundaries twice: Some fundamental inconsistencies can be
472 	 * detected at build time already.
473 	 */
474 #ifdef CONFIG_COMPAT
475 	BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
476 #endif
477 
478 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
479 		extern int sysctl_overcommit_memory;
480 		/*
481 		 * On a machine this small we won't get anywhere without
482 		 * overcommit, so turn it on by default.
483 		 */
484 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
485 	}
486 }
487 
488 void free_initmem(void)
489 {
490 	free_reserved_area(lm_alias(__init_begin),
491 			   lm_alias(__init_end),
492 			   POISON_FREE_INITMEM, "unused kernel");
493 	/*
494 	 * Unmap the __init region but leave the VM area in place. This
495 	 * prevents the region from being reused for kernel modules, which
496 	 * is not supported by kallsyms.
497 	 */
498 	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
499 }
500 
501 void dump_mem_limit(void)
502 {
503 	if (memory_limit != PHYS_ADDR_MAX) {
504 		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
505 	} else {
506 		pr_emerg("Memory Limit: none\n");
507 	}
508 }
509