xref: /openbmc/linux/arch/arm64/mm/init.c (revision 78700c0a)
1 /*
2  * Based on arch/arm/mm/init.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
32 #include <linux/of_fdt.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dma-contiguous.h>
35 #include <linux/efi.h>
36 #include <linux/swiotlb.h>
37 
38 #include <asm/boot.h>
39 #include <asm/fixmap.h>
40 #include <asm/kasan.h>
41 #include <asm/kernel-pgtable.h>
42 #include <asm/memory.h>
43 #include <asm/numa.h>
44 #include <asm/sections.h>
45 #include <asm/setup.h>
46 #include <asm/sizes.h>
47 #include <asm/tlb.h>
48 #include <asm/alternative.h>
49 
50 #include "mm.h"
51 
52 /*
53  * We need to be able to catch inadvertent references to memstart_addr
54  * that occur (potentially in generic code) before arm64_memblock_init()
55  * executes, which assigns it its actual value. So use a default value
56  * that cannot be mistaken for a real physical address.
57  */
58 s64 memstart_addr __read_mostly = -1;
59 phys_addr_t arm64_dma_phys_limit __read_mostly;
60 
61 #ifdef CONFIG_BLK_DEV_INITRD
62 static int __init early_initrd(char *p)
63 {
64 	unsigned long start, size;
65 	char *endp;
66 
67 	start = memparse(p, &endp);
68 	if (*endp == ',') {
69 		size = memparse(endp + 1, NULL);
70 
71 		initrd_start = start;
72 		initrd_end = start + size;
73 	}
74 	return 0;
75 }
76 early_param("initrd", early_initrd);
77 #endif
78 
79 /*
80  * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
81  * currently assumes that for memory starting above 4G, 32-bit devices will
82  * use a DMA offset.
83  */
84 static phys_addr_t __init max_zone_dma_phys(void)
85 {
86 	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
87 	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
88 }
89 
90 #ifdef CONFIG_NUMA
91 
92 static void __init zone_sizes_init(unsigned long min, unsigned long max)
93 {
94 	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
95 
96 	if (IS_ENABLED(CONFIG_ZONE_DMA))
97 		max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
98 	max_zone_pfns[ZONE_NORMAL] = max;
99 
100 	free_area_init_nodes(max_zone_pfns);
101 }
102 
103 #else
104 
105 static void __init zone_sizes_init(unsigned long min, unsigned long max)
106 {
107 	struct memblock_region *reg;
108 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
109 	unsigned long max_dma = min;
110 
111 	memset(zone_size, 0, sizeof(zone_size));
112 
113 	/* 4GB maximum for 32-bit only capable devices */
114 #ifdef CONFIG_ZONE_DMA
115 	max_dma = PFN_DOWN(arm64_dma_phys_limit);
116 	zone_size[ZONE_DMA] = max_dma - min;
117 #endif
118 	zone_size[ZONE_NORMAL] = max - max_dma;
119 
120 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
121 
122 	for_each_memblock(memory, reg) {
123 		unsigned long start = memblock_region_memory_base_pfn(reg);
124 		unsigned long end = memblock_region_memory_end_pfn(reg);
125 
126 		if (start >= max)
127 			continue;
128 
129 #ifdef CONFIG_ZONE_DMA
130 		if (start < max_dma) {
131 			unsigned long dma_end = min(end, max_dma);
132 			zhole_size[ZONE_DMA] -= dma_end - start;
133 		}
134 #endif
135 		if (end > max_dma) {
136 			unsigned long normal_end = min(end, max);
137 			unsigned long normal_start = max(start, max_dma);
138 			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
139 		}
140 	}
141 
142 	free_area_init_node(0, zone_size, min, zhole_size);
143 }
144 
145 #endif /* CONFIG_NUMA */
146 
147 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
148 int pfn_valid(unsigned long pfn)
149 {
150 	return memblock_is_map_memory(pfn << PAGE_SHIFT);
151 }
152 EXPORT_SYMBOL(pfn_valid);
153 #endif
154 
155 #ifndef CONFIG_SPARSEMEM
156 static void __init arm64_memory_present(void)
157 {
158 }
159 #else
160 static void __init arm64_memory_present(void)
161 {
162 	struct memblock_region *reg;
163 	int nid = 0;
164 
165 	for_each_memblock(memory, reg) {
166 #ifdef CONFIG_NUMA
167 		nid = reg->nid;
168 #endif
169 		memory_present(nid, memblock_region_memory_base_pfn(reg),
170 				memblock_region_memory_end_pfn(reg));
171 	}
172 }
173 #endif
174 
175 static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
176 
177 /*
178  * Limit the memory size that was specified via FDT.
179  */
180 static int __init early_mem(char *p)
181 {
182 	if (!p)
183 		return 1;
184 
185 	memory_limit = memparse(p, &p) & PAGE_MASK;
186 	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
187 
188 	return 0;
189 }
190 early_param("mem", early_mem);
191 
192 void __init arm64_memblock_init(void)
193 {
194 	const s64 linear_region_size = -(s64)PAGE_OFFSET;
195 
196 	/*
197 	 * Ensure that the linear region takes up exactly half of the kernel
198 	 * virtual address space. This way, we can distinguish a linear address
199 	 * from a kernel/module/vmalloc address by testing a single bit.
200 	 */
201 	BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
202 
203 	/*
204 	 * Select a suitable value for the base of physical memory.
205 	 */
206 	memstart_addr = round_down(memblock_start_of_DRAM(),
207 				   ARM64_MEMSTART_ALIGN);
208 
209 	/*
210 	 * Remove the memory that we will not be able to cover with the
211 	 * linear mapping. Take care not to clip the kernel which may be
212 	 * high in memory.
213 	 */
214 	memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
215 			ULLONG_MAX);
216 	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
217 		/* ensure that memstart_addr remains sufficiently aligned */
218 		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
219 					 ARM64_MEMSTART_ALIGN);
220 		memblock_remove(0, memstart_addr);
221 	}
222 
223 	/*
224 	 * Apply the memory limit if it was set. Since the kernel may be loaded
225 	 * high up in memory, add back the kernel region that must be accessible
226 	 * via the linear mapping.
227 	 */
228 	if (memory_limit != (phys_addr_t)ULLONG_MAX) {
229 		memblock_enforce_memory_limit(memory_limit);
230 		memblock_add(__pa(_text), (u64)(_end - _text));
231 	}
232 
233 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
234 		/*
235 		 * Add back the memory we just removed if it results in the
236 		 * initrd to become inaccessible via the linear mapping.
237 		 * Otherwise, this is a no-op
238 		 */
239 		u64 base = initrd_start & PAGE_MASK;
240 		u64 size = PAGE_ALIGN(initrd_end) - base;
241 
242 		/*
243 		 * We can only add back the initrd memory if we don't end up
244 		 * with more memory than we can address via the linear mapping.
245 		 * It is up to the bootloader to position the kernel and the
246 		 * initrd reasonably close to each other (i.e., within 32 GB of
247 		 * each other) so that all granule/#levels combinations can
248 		 * always access both.
249 		 */
250 		if (WARN(base < memblock_start_of_DRAM() ||
251 			 base + size > memblock_start_of_DRAM() +
252 				       linear_region_size,
253 			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
254 			initrd_start = 0;
255 		} else {
256 			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
257 			memblock_add(base, size);
258 			memblock_reserve(base, size);
259 		}
260 	}
261 
262 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
263 		extern u16 memstart_offset_seed;
264 		u64 range = linear_region_size -
265 			    (memblock_end_of_DRAM() - memblock_start_of_DRAM());
266 
267 		/*
268 		 * If the size of the linear region exceeds, by a sufficient
269 		 * margin, the size of the region that the available physical
270 		 * memory spans, randomize the linear region as well.
271 		 */
272 		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
273 			range = range / ARM64_MEMSTART_ALIGN + 1;
274 			memstart_addr -= ARM64_MEMSTART_ALIGN *
275 					 ((range * memstart_offset_seed) >> 16);
276 		}
277 	}
278 
279 	/*
280 	 * Register the kernel text, kernel data, initrd, and initial
281 	 * pagetables with memblock.
282 	 */
283 	memblock_reserve(__pa(_text), _end - _text);
284 #ifdef CONFIG_BLK_DEV_INITRD
285 	if (initrd_start) {
286 		memblock_reserve(initrd_start, initrd_end - initrd_start);
287 
288 		/* the generic initrd code expects virtual addresses */
289 		initrd_start = __phys_to_virt(initrd_start);
290 		initrd_end = __phys_to_virt(initrd_end);
291 	}
292 #endif
293 
294 	early_init_fdt_scan_reserved_mem();
295 
296 	/* 4GB maximum for 32-bit only capable devices */
297 	if (IS_ENABLED(CONFIG_ZONE_DMA))
298 		arm64_dma_phys_limit = max_zone_dma_phys();
299 	else
300 		arm64_dma_phys_limit = PHYS_MASK + 1;
301 	dma_contiguous_reserve(arm64_dma_phys_limit);
302 
303 	memblock_allow_resize();
304 }
305 
306 void __init bootmem_init(void)
307 {
308 	unsigned long min, max;
309 
310 	min = PFN_UP(memblock_start_of_DRAM());
311 	max = PFN_DOWN(memblock_end_of_DRAM());
312 
313 	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
314 
315 	max_pfn = max_low_pfn = max;
316 
317 	arm64_numa_init();
318 	/*
319 	 * Sparsemem tries to allocate bootmem in memory_present(), so must be
320 	 * done after the fixed reservations.
321 	 */
322 	arm64_memory_present();
323 
324 	sparse_init();
325 	zone_sizes_init(min, max);
326 
327 	high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
328 	memblock_dump_all();
329 }
330 
331 #ifndef CONFIG_SPARSEMEM_VMEMMAP
332 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
333 {
334 	struct page *start_pg, *end_pg;
335 	unsigned long pg, pgend;
336 
337 	/*
338 	 * Convert start_pfn/end_pfn to a struct page pointer.
339 	 */
340 	start_pg = pfn_to_page(start_pfn - 1) + 1;
341 	end_pg = pfn_to_page(end_pfn - 1) + 1;
342 
343 	/*
344 	 * Convert to physical addresses, and round start upwards and end
345 	 * downwards.
346 	 */
347 	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
348 	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
349 
350 	/*
351 	 * If there are free pages between these, free the section of the
352 	 * memmap array.
353 	 */
354 	if (pg < pgend)
355 		free_bootmem(pg, pgend - pg);
356 }
357 
358 /*
359  * The mem_map array can get very big. Free the unused area of the memory map.
360  */
361 static void __init free_unused_memmap(void)
362 {
363 	unsigned long start, prev_end = 0;
364 	struct memblock_region *reg;
365 
366 	for_each_memblock(memory, reg) {
367 		start = __phys_to_pfn(reg->base);
368 
369 #ifdef CONFIG_SPARSEMEM
370 		/*
371 		 * Take care not to free memmap entries that don't exist due
372 		 * to SPARSEMEM sections which aren't present.
373 		 */
374 		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
375 #endif
376 		/*
377 		 * If we had a previous bank, and there is a space between the
378 		 * current bank and the previous, free it.
379 		 */
380 		if (prev_end && prev_end < start)
381 			free_memmap(prev_end, start);
382 
383 		/*
384 		 * Align up here since the VM subsystem insists that the
385 		 * memmap entries are valid from the bank end aligned to
386 		 * MAX_ORDER_NR_PAGES.
387 		 */
388 		prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
389 				 MAX_ORDER_NR_PAGES);
390 	}
391 
392 #ifdef CONFIG_SPARSEMEM
393 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
394 		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
395 #endif
396 }
397 #endif	/* !CONFIG_SPARSEMEM_VMEMMAP */
398 
399 /*
400  * mem_init() marks the free areas in the mem_map and tells us how much memory
401  * is free.  This is done after various parts of the system have claimed their
402  * memory after the kernel image.
403  */
404 void __init mem_init(void)
405 {
406 	swiotlb_init(1);
407 
408 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
409 
410 #ifndef CONFIG_SPARSEMEM_VMEMMAP
411 	free_unused_memmap();
412 #endif
413 	/* this will put all unused low memory onto the freelists */
414 	free_all_bootmem();
415 
416 	mem_init_print_info(NULL);
417 
418 #define MLK(b, t) b, t, ((t) - (b)) >> 10
419 #define MLM(b, t) b, t, ((t) - (b)) >> 20
420 #define MLG(b, t) b, t, ((t) - (b)) >> 30
421 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
422 
423 	pr_notice("Virtual kernel memory layout:\n");
424 #ifdef CONFIG_KASAN
425 	pr_cont("    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n",
426 		MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
427 #endif
428 	pr_cont("    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n",
429 		MLM(MODULES_VADDR, MODULES_END));
430 	pr_cont("    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n",
431 		MLG(VMALLOC_START, VMALLOC_END));
432 	pr_cont("      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n",
433 		MLK_ROUNDUP(_text, __start_rodata));
434 	pr_cont("    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n",
435 		MLK_ROUNDUP(__start_rodata, _etext));
436 	pr_cont("      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n",
437 		MLK_ROUNDUP(__init_begin, __init_end));
438 	pr_cont("      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
439 		MLK_ROUNDUP(_sdata, _edata));
440 	pr_cont("       .bss : 0x%p" " - 0x%p" "   (%6ld KB)\n",
441 		MLK_ROUNDUP(__bss_start, __bss_stop));
442 	pr_cont("    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n",
443 		MLK(FIXADDR_START, FIXADDR_TOP));
444 	pr_cont("    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n",
445 		MLM(PCI_IO_START, PCI_IO_END));
446 #ifdef CONFIG_SPARSEMEM_VMEMMAP
447 	pr_cont("    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n",
448 		MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
449 	pr_cont("              0x%16lx - 0x%16lx   (%6ld MB actual)\n",
450 		MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
451 		    (unsigned long)virt_to_page(high_memory)));
452 #endif
453 	pr_cont("    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
454 		MLM(__phys_to_virt(memblock_start_of_DRAM()),
455 		    (unsigned long)high_memory));
456 
457 #undef MLK
458 #undef MLM
459 #undef MLK_ROUNDUP
460 
461 	/*
462 	 * Check boundaries twice: Some fundamental inconsistencies can be
463 	 * detected at build time already.
464 	 */
465 #ifdef CONFIG_COMPAT
466 	BUILD_BUG_ON(TASK_SIZE_32			> TASK_SIZE_64);
467 #endif
468 
469 	/*
470 	 * Make sure we chose the upper bound of sizeof(struct page)
471 	 * correctly.
472 	 */
473 	BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
474 
475 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
476 		extern int sysctl_overcommit_memory;
477 		/*
478 		 * On a machine this small we won't get anywhere without
479 		 * overcommit, so turn it on by default.
480 		 */
481 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
482 	}
483 }
484 
485 void free_initmem(void)
486 {
487 	free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
488 			   0, "unused kernel");
489 	fixup_init();
490 }
491 
492 #ifdef CONFIG_BLK_DEV_INITRD
493 
494 static int keep_initrd __initdata;
495 
496 void __init free_initrd_mem(unsigned long start, unsigned long end)
497 {
498 	if (!keep_initrd)
499 		free_reserved_area((void *)start, (void *)end, 0, "initrd");
500 }
501 
502 static int __init keepinitrd_setup(char *__unused)
503 {
504 	keep_initrd = 1;
505 	return 1;
506 }
507 
508 __setup("keepinitrd", keepinitrd_setup);
509 #endif
510 
511 /*
512  * Dump out memory limit information on panic.
513  */
514 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
515 {
516 	if (memory_limit != (phys_addr_t)ULLONG_MAX) {
517 		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
518 	} else {
519 		pr_emerg("Memory Limit: none\n");
520 	}
521 	return 0;
522 }
523 
524 static struct notifier_block mem_limit_notifier = {
525 	.notifier_call = dump_mem_limit,
526 };
527 
528 static int __init register_mem_limit_dumper(void)
529 {
530 	atomic_notifier_chain_register(&panic_notifier_list,
531 				       &mem_limit_notifier);
532 	return 0;
533 }
534 __initcall(register_mem_limit_dumper);
535