xref: /openbmc/linux/arch/arm64/mm/init.c (revision 5e29a910)
1 /*
2  * Based on arch/arm/mm/init.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
32 #include <linux/of_fdt.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dma-contiguous.h>
35 #include <linux/efi.h>
36 #include <linux/swiotlb.h>
37 
38 #include <asm/fixmap.h>
39 #include <asm/memory.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/sizes.h>
43 #include <asm/tlb.h>
44 #include <asm/alternative.h>
45 
46 #include "mm.h"
47 
48 phys_addr_t memstart_addr __read_mostly = 0;
49 phys_addr_t arm64_dma_phys_limit __read_mostly;
50 
51 #ifdef CONFIG_BLK_DEV_INITRD
52 static int __init early_initrd(char *p)
53 {
54 	unsigned long start, size;
55 	char *endp;
56 
57 	start = memparse(p, &endp);
58 	if (*endp == ',') {
59 		size = memparse(endp + 1, NULL);
60 
61 		initrd_start = (unsigned long)__va(start);
62 		initrd_end = (unsigned long)__va(start + size);
63 	}
64 	return 0;
65 }
66 early_param("initrd", early_initrd);
67 #endif
68 
69 /*
70  * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
71  * currently assumes that for memory starting above 4G, 32-bit devices will
72  * use a DMA offset.
73  */
74 static phys_addr_t max_zone_dma_phys(void)
75 {
76 	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
77 	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
78 }
79 
80 static void __init zone_sizes_init(unsigned long min, unsigned long max)
81 {
82 	struct memblock_region *reg;
83 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
84 	unsigned long max_dma = min;
85 
86 	memset(zone_size, 0, sizeof(zone_size));
87 
88 	/* 4GB maximum for 32-bit only capable devices */
89 	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
90 		max_dma = PFN_DOWN(arm64_dma_phys_limit);
91 		zone_size[ZONE_DMA] = max_dma - min;
92 	}
93 	zone_size[ZONE_NORMAL] = max - max_dma;
94 
95 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
96 
97 	for_each_memblock(memory, reg) {
98 		unsigned long start = memblock_region_memory_base_pfn(reg);
99 		unsigned long end = memblock_region_memory_end_pfn(reg);
100 
101 		if (start >= max)
102 			continue;
103 
104 		if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
105 			unsigned long dma_end = min(end, max_dma);
106 			zhole_size[ZONE_DMA] -= dma_end - start;
107 		}
108 
109 		if (end > max_dma) {
110 			unsigned long normal_end = min(end, max);
111 			unsigned long normal_start = max(start, max_dma);
112 			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
113 		}
114 	}
115 
116 	free_area_init_node(0, zone_size, min, zhole_size);
117 }
118 
119 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
120 int pfn_valid(unsigned long pfn)
121 {
122 	return memblock_is_memory(pfn << PAGE_SHIFT);
123 }
124 EXPORT_SYMBOL(pfn_valid);
125 #endif
126 
127 #ifndef CONFIG_SPARSEMEM
128 static void arm64_memory_present(void)
129 {
130 }
131 #else
132 static void arm64_memory_present(void)
133 {
134 	struct memblock_region *reg;
135 
136 	for_each_memblock(memory, reg)
137 		memory_present(0, memblock_region_memory_base_pfn(reg),
138 			       memblock_region_memory_end_pfn(reg));
139 }
140 #endif
141 
142 static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
143 
144 /*
145  * Limit the memory size that was specified via FDT.
146  */
147 static int __init early_mem(char *p)
148 {
149 	if (!p)
150 		return 1;
151 
152 	memory_limit = memparse(p, &p) & PAGE_MASK;
153 	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
154 
155 	return 0;
156 }
157 early_param("mem", early_mem);
158 
159 void __init arm64_memblock_init(void)
160 {
161 	memblock_enforce_memory_limit(memory_limit);
162 
163 	/*
164 	 * Register the kernel text, kernel data, initrd, and initial
165 	 * pagetables with memblock.
166 	 */
167 	memblock_reserve(__pa(_text), _end - _text);
168 #ifdef CONFIG_BLK_DEV_INITRD
169 	if (initrd_start)
170 		memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
171 #endif
172 
173 	early_init_fdt_scan_reserved_mem();
174 
175 	/* 4GB maximum for 32-bit only capable devices */
176 	if (IS_ENABLED(CONFIG_ZONE_DMA))
177 		arm64_dma_phys_limit = max_zone_dma_phys();
178 	else
179 		arm64_dma_phys_limit = PHYS_MASK + 1;
180 	dma_contiguous_reserve(arm64_dma_phys_limit);
181 
182 	memblock_allow_resize();
183 	memblock_dump_all();
184 }
185 
186 void __init bootmem_init(void)
187 {
188 	unsigned long min, max;
189 
190 	min = PFN_UP(memblock_start_of_DRAM());
191 	max = PFN_DOWN(memblock_end_of_DRAM());
192 
193 	/*
194 	 * Sparsemem tries to allocate bootmem in memory_present(), so must be
195 	 * done after the fixed reservations.
196 	 */
197 	arm64_memory_present();
198 
199 	sparse_init();
200 	zone_sizes_init(min, max);
201 
202 	high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
203 	max_pfn = max_low_pfn = max;
204 }
205 
206 #ifndef CONFIG_SPARSEMEM_VMEMMAP
207 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
208 {
209 	struct page *start_pg, *end_pg;
210 	unsigned long pg, pgend;
211 
212 	/*
213 	 * Convert start_pfn/end_pfn to a struct page pointer.
214 	 */
215 	start_pg = pfn_to_page(start_pfn - 1) + 1;
216 	end_pg = pfn_to_page(end_pfn - 1) + 1;
217 
218 	/*
219 	 * Convert to physical addresses, and round start upwards and end
220 	 * downwards.
221 	 */
222 	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
223 	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
224 
225 	/*
226 	 * If there are free pages between these, free the section of the
227 	 * memmap array.
228 	 */
229 	if (pg < pgend)
230 		free_bootmem(pg, pgend - pg);
231 }
232 
233 /*
234  * The mem_map array can get very big. Free the unused area of the memory map.
235  */
236 static void __init free_unused_memmap(void)
237 {
238 	unsigned long start, prev_end = 0;
239 	struct memblock_region *reg;
240 
241 	for_each_memblock(memory, reg) {
242 		start = __phys_to_pfn(reg->base);
243 
244 #ifdef CONFIG_SPARSEMEM
245 		/*
246 		 * Take care not to free memmap entries that don't exist due
247 		 * to SPARSEMEM sections which aren't present.
248 		 */
249 		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
250 #endif
251 		/*
252 		 * If we had a previous bank, and there is a space between the
253 		 * current bank and the previous, free it.
254 		 */
255 		if (prev_end && prev_end < start)
256 			free_memmap(prev_end, start);
257 
258 		/*
259 		 * Align up here since the VM subsystem insists that the
260 		 * memmap entries are valid from the bank end aligned to
261 		 * MAX_ORDER_NR_PAGES.
262 		 */
263 		prev_end = ALIGN(start + __phys_to_pfn(reg->size),
264 				 MAX_ORDER_NR_PAGES);
265 	}
266 
267 #ifdef CONFIG_SPARSEMEM
268 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
269 		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
270 #endif
271 }
272 #endif	/* !CONFIG_SPARSEMEM_VMEMMAP */
273 
274 /*
275  * mem_init() marks the free areas in the mem_map and tells us how much memory
276  * is free.  This is done after various parts of the system have claimed their
277  * memory after the kernel image.
278  */
279 void __init mem_init(void)
280 {
281 	swiotlb_init(1);
282 
283 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
284 
285 #ifndef CONFIG_SPARSEMEM_VMEMMAP
286 	free_unused_memmap();
287 #endif
288 	/* this will put all unused low memory onto the freelists */
289 	free_all_bootmem();
290 
291 	mem_init_print_info(NULL);
292 
293 #define MLK(b, t) b, t, ((t) - (b)) >> 10
294 #define MLM(b, t) b, t, ((t) - (b)) >> 20
295 #define MLG(b, t) b, t, ((t) - (b)) >> 30
296 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
297 
298 	pr_notice("Virtual kernel memory layout:\n"
299 		  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
300 #ifdef CONFIG_SPARSEMEM_VMEMMAP
301 		  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
302 		  "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
303 #endif
304 		  "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
305 		  "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
306 		  "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
307 		  "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
308 		  "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
309 		  "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
310 		  "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
311 		  MLG(VMALLOC_START, VMALLOC_END),
312 #ifdef CONFIG_SPARSEMEM_VMEMMAP
313 		  MLG((unsigned long)vmemmap,
314 		      (unsigned long)vmemmap + VMEMMAP_SIZE),
315 		  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
316 		      (unsigned long)virt_to_page(high_memory)),
317 #endif
318 		  MLK(FIXADDR_START, FIXADDR_TOP),
319 		  MLM(PCI_IO_START, PCI_IO_END),
320 		  MLM(MODULES_VADDR, MODULES_END),
321 		  MLM(PAGE_OFFSET, (unsigned long)high_memory),
322 		  MLK_ROUNDUP(__init_begin, __init_end),
323 		  MLK_ROUNDUP(_text, _etext),
324 		  MLK_ROUNDUP(_sdata, _edata));
325 
326 #undef MLK
327 #undef MLM
328 #undef MLK_ROUNDUP
329 
330 	/*
331 	 * Check boundaries twice: Some fundamental inconsistencies can be
332 	 * detected at build time already.
333 	 */
334 #ifdef CONFIG_COMPAT
335 	BUILD_BUG_ON(TASK_SIZE_32			> TASK_SIZE_64);
336 #endif
337 	BUILD_BUG_ON(TASK_SIZE_64			> MODULES_VADDR);
338 	BUG_ON(TASK_SIZE_64				> MODULES_VADDR);
339 
340 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
341 		extern int sysctl_overcommit_memory;
342 		/*
343 		 * On a machine this small we won't get anywhere without
344 		 * overcommit, so turn it on by default.
345 		 */
346 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
347 	}
348 }
349 
350 void free_initmem(void)
351 {
352 	fixup_init();
353 	free_initmem_default(0);
354 	free_alternatives_memory();
355 }
356 
357 #ifdef CONFIG_BLK_DEV_INITRD
358 
359 static int keep_initrd;
360 
361 void free_initrd_mem(unsigned long start, unsigned long end)
362 {
363 	if (!keep_initrd)
364 		free_reserved_area((void *)start, (void *)end, 0, "initrd");
365 }
366 
367 static int __init keepinitrd_setup(char *__unused)
368 {
369 	keep_initrd = 1;
370 	return 1;
371 }
372 
373 __setup("keepinitrd", keepinitrd_setup);
374 #endif
375