xref: /openbmc/linux/arch/arm/mm/init.c (revision 1c2f87c2)
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23 #include <linux/dma-contiguous.h>
24 #include <linux/sizes.h>
25 
26 #include <asm/mach-types.h>
27 #include <asm/memblock.h>
28 #include <asm/prom.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
31 #include <asm/tlb.h>
32 #include <asm/fixmap.h>
33 
34 #include <asm/mach/arch.h>
35 #include <asm/mach/map.h>
36 
37 #include "mm.h"
38 
39 static phys_addr_t phys_initrd_start __initdata = 0;
40 static unsigned long phys_initrd_size __initdata = 0;
41 
42 static int __init early_initrd(char *p)
43 {
44 	phys_addr_t start;
45 	unsigned long size;
46 	char *endp;
47 
48 	start = memparse(p, &endp);
49 	if (*endp == ',') {
50 		size = memparse(endp + 1, NULL);
51 
52 		phys_initrd_start = start;
53 		phys_initrd_size = size;
54 	}
55 	return 0;
56 }
57 early_param("initrd", early_initrd);
58 
59 static int __init parse_tag_initrd(const struct tag *tag)
60 {
61 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
62 		"please update your bootloader.\n");
63 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
64 	phys_initrd_size = tag->u.initrd.size;
65 	return 0;
66 }
67 
68 __tagtable(ATAG_INITRD, parse_tag_initrd);
69 
70 static int __init parse_tag_initrd2(const struct tag *tag)
71 {
72 	phys_initrd_start = tag->u.initrd.start;
73 	phys_initrd_size = tag->u.initrd.size;
74 	return 0;
75 }
76 
77 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
78 
79 /*
80  * This keeps memory configuration data used by a couple memory
81  * initialization functions, as well as show_mem() for the skipping
82  * of holes in the memory map.  It is populated by arm_add_memory().
83  */
84 void show_mem(unsigned int filter)
85 {
86 	int free = 0, total = 0, reserved = 0;
87 	int shared = 0, cached = 0, slab = 0;
88 	struct memblock_region *reg;
89 
90 	printk("Mem-info:\n");
91 	show_free_areas(filter);
92 
93 	for_each_memblock (memory, reg) {
94 		unsigned int pfn1, pfn2;
95 		struct page *page, *end;
96 
97 		pfn1 = memblock_region_memory_base_pfn(reg);
98 		pfn2 = memblock_region_memory_end_pfn(reg);
99 
100 		page = pfn_to_page(pfn1);
101 		end  = pfn_to_page(pfn2 - 1) + 1;
102 
103 		do {
104 			total++;
105 			if (PageReserved(page))
106 				reserved++;
107 			else if (PageSwapCache(page))
108 				cached++;
109 			else if (PageSlab(page))
110 				slab++;
111 			else if (!page_count(page))
112 				free++;
113 			else
114 				shared += page_count(page) - 1;
115 			pfn1++;
116 			page = pfn_to_page(pfn1);
117 		} while (pfn1 < pfn2);
118 	}
119 
120 	printk("%d pages of RAM\n", total);
121 	printk("%d free pages\n", free);
122 	printk("%d reserved pages\n", reserved);
123 	printk("%d slab pages\n", slab);
124 	printk("%d pages shared\n", shared);
125 	printk("%d pages swap cached\n", cached);
126 }
127 
128 static void __init find_limits(unsigned long *min, unsigned long *max_low,
129 			       unsigned long *max_high)
130 {
131 	*max_low = PFN_DOWN(memblock_get_current_limit());
132 	*min = PFN_UP(memblock_start_of_DRAM());
133 	*max_high = PFN_DOWN(memblock_end_of_DRAM());
134 }
135 
136 #ifdef CONFIG_ZONE_DMA
137 
138 phys_addr_t arm_dma_zone_size __read_mostly;
139 EXPORT_SYMBOL(arm_dma_zone_size);
140 
141 /*
142  * The DMA mask corresponding to the maximum bus address allocatable
143  * using GFP_DMA.  The default here places no restriction on DMA
144  * allocations.  This must be the smallest DMA mask in the system,
145  * so a successful GFP_DMA allocation will always satisfy this.
146  */
147 phys_addr_t arm_dma_limit;
148 unsigned long arm_dma_pfn_limit;
149 
150 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
151 	unsigned long dma_size)
152 {
153 	if (size[0] <= dma_size)
154 		return;
155 
156 	size[ZONE_NORMAL] = size[0] - dma_size;
157 	size[ZONE_DMA] = dma_size;
158 	hole[ZONE_NORMAL] = hole[0];
159 	hole[ZONE_DMA] = 0;
160 }
161 #endif
162 
163 void __init setup_dma_zone(const struct machine_desc *mdesc)
164 {
165 #ifdef CONFIG_ZONE_DMA
166 	if (mdesc->dma_zone_size) {
167 		arm_dma_zone_size = mdesc->dma_zone_size;
168 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
169 	} else
170 		arm_dma_limit = 0xffffffff;
171 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
172 #endif
173 }
174 
175 static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
176 	unsigned long max_high)
177 {
178 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
179 	struct memblock_region *reg;
180 
181 	/*
182 	 * initialise the zones.
183 	 */
184 	memset(zone_size, 0, sizeof(zone_size));
185 
186 	/*
187 	 * The memory size has already been determined.  If we need
188 	 * to do anything fancy with the allocation of this memory
189 	 * to the zones, now is the time to do it.
190 	 */
191 	zone_size[0] = max_low - min;
192 #ifdef CONFIG_HIGHMEM
193 	zone_size[ZONE_HIGHMEM] = max_high - max_low;
194 #endif
195 
196 	/*
197 	 * Calculate the size of the holes.
198 	 *  holes = node_size - sum(bank_sizes)
199 	 */
200 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
201 	for_each_memblock(memory, reg) {
202 		unsigned long start = memblock_region_memory_base_pfn(reg);
203 		unsigned long end = memblock_region_memory_end_pfn(reg);
204 
205 		if (start < max_low) {
206 			unsigned long low_end = min(end, max_low);
207 			zhole_size[0] -= low_end - start;
208 		}
209 #ifdef CONFIG_HIGHMEM
210 		if (end > max_low) {
211 			unsigned long high_start = max(start, max_low);
212 			zhole_size[ZONE_HIGHMEM] -= end - high_start;
213 		}
214 #endif
215 	}
216 
217 #ifdef CONFIG_ZONE_DMA
218 	/*
219 	 * Adjust the sizes according to any special requirements for
220 	 * this machine type.
221 	 */
222 	if (arm_dma_zone_size)
223 		arm_adjust_dma_zone(zone_size, zhole_size,
224 			arm_dma_zone_size >> PAGE_SHIFT);
225 #endif
226 
227 	free_area_init_node(0, zone_size, min, zhole_size);
228 }
229 
230 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
231 int pfn_valid(unsigned long pfn)
232 {
233 	return memblock_is_memory(__pfn_to_phys(pfn));
234 }
235 EXPORT_SYMBOL(pfn_valid);
236 #endif
237 
238 #ifndef CONFIG_SPARSEMEM
239 static void __init arm_memory_present(void)
240 {
241 }
242 #else
243 static void __init arm_memory_present(void)
244 {
245 	struct memblock_region *reg;
246 
247 	for_each_memblock(memory, reg)
248 		memory_present(0, memblock_region_memory_base_pfn(reg),
249 			       memblock_region_memory_end_pfn(reg));
250 }
251 #endif
252 
253 static bool arm_memblock_steal_permitted = true;
254 
255 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
256 {
257 	phys_addr_t phys;
258 
259 	BUG_ON(!arm_memblock_steal_permitted);
260 
261 	phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
262 	memblock_free(phys, size);
263 	memblock_remove(phys, size);
264 
265 	return phys;
266 }
267 
268 void __init arm_memblock_init(const struct machine_desc *mdesc)
269 {
270 	/* Register the kernel text, kernel data and initrd with memblock. */
271 #ifdef CONFIG_XIP_KERNEL
272 	memblock_reserve(__pa(_sdata), _end - _sdata);
273 #else
274 	memblock_reserve(__pa(_stext), _end - _stext);
275 #endif
276 #ifdef CONFIG_BLK_DEV_INITRD
277 	/* FDT scan will populate initrd_start */
278 	if (initrd_start && !phys_initrd_size) {
279 		phys_initrd_start = __virt_to_phys(initrd_start);
280 		phys_initrd_size = initrd_end - initrd_start;
281 	}
282 	initrd_start = initrd_end = 0;
283 	if (phys_initrd_size &&
284 	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
285 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
286 		       (u64)phys_initrd_start, phys_initrd_size);
287 		phys_initrd_start = phys_initrd_size = 0;
288 	}
289 	if (phys_initrd_size &&
290 	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
291 		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
292 		       (u64)phys_initrd_start, phys_initrd_size);
293 		phys_initrd_start = phys_initrd_size = 0;
294 	}
295 	if (phys_initrd_size) {
296 		memblock_reserve(phys_initrd_start, phys_initrd_size);
297 
298 		/* Now convert initrd to virtual addresses */
299 		initrd_start = __phys_to_virt(phys_initrd_start);
300 		initrd_end = initrd_start + phys_initrd_size;
301 	}
302 #endif
303 
304 	arm_mm_memblock_reserve();
305 	arm_dt_memblock_reserve();
306 
307 	/* reserve any platform specific memblock areas */
308 	if (mdesc->reserve)
309 		mdesc->reserve();
310 
311 	early_init_fdt_scan_reserved_mem();
312 
313 	/*
314 	 * reserve memory for DMA contigouos allocations,
315 	 * must come from DMA area inside low memory
316 	 */
317 	dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
318 
319 	arm_memblock_steal_permitted = false;
320 	memblock_dump_all();
321 }
322 
323 void __init bootmem_init(void)
324 {
325 	unsigned long min, max_low, max_high;
326 
327 	memblock_allow_resize();
328 	max_low = max_high = 0;
329 
330 	find_limits(&min, &max_low, &max_high);
331 
332 	/*
333 	 * Sparsemem tries to allocate bootmem in memory_present(),
334 	 * so must be done after the fixed reservations
335 	 */
336 	arm_memory_present();
337 
338 	/*
339 	 * sparse_init() needs the bootmem allocator up and running.
340 	 */
341 	sparse_init();
342 
343 	/*
344 	 * Now free the memory - free_area_init_node needs
345 	 * the sparse mem_map arrays initialized by sparse_init()
346 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
347 	 */
348 	zone_sizes_init(min, max_low, max_high);
349 
350 	/*
351 	 * This doesn't seem to be used by the Linux memory manager any
352 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
353 	 * also get rid of some of the stuff above as well.
354 	 */
355 	min_low_pfn = min;
356 	max_low_pfn = max_low;
357 	max_pfn = max_high;
358 }
359 
360 /*
361  * Poison init memory with an undefined instruction (ARM) or a branch to an
362  * undefined instruction (Thumb).
363  */
364 static inline void poison_init_mem(void *s, size_t count)
365 {
366 	u32 *p = (u32 *)s;
367 	for (; count != 0; count -= 4)
368 		*p++ = 0xe7fddef0;
369 }
370 
371 static inline void
372 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
373 {
374 	struct page *start_pg, *end_pg;
375 	phys_addr_t pg, pgend;
376 
377 	/*
378 	 * Convert start_pfn/end_pfn to a struct page pointer.
379 	 */
380 	start_pg = pfn_to_page(start_pfn - 1) + 1;
381 	end_pg = pfn_to_page(end_pfn - 1) + 1;
382 
383 	/*
384 	 * Convert to physical addresses, and
385 	 * round start upwards and end downwards.
386 	 */
387 	pg = PAGE_ALIGN(__pa(start_pg));
388 	pgend = __pa(end_pg) & PAGE_MASK;
389 
390 	/*
391 	 * If there are free pages between these,
392 	 * free the section of the memmap array.
393 	 */
394 	if (pg < pgend)
395 		memblock_free_early(pg, pgend - pg);
396 }
397 
398 /*
399  * The mem_map array can get very big.  Free the unused area of the memory map.
400  */
401 static void __init free_unused_memmap(void)
402 {
403 	unsigned long start, prev_end = 0;
404 	struct memblock_region *reg;
405 
406 	/*
407 	 * This relies on each bank being in address order.
408 	 * The banks are sorted previously in bootmem_init().
409 	 */
410 	for_each_memblock(memory, reg) {
411 		start = memblock_region_memory_base_pfn(reg);
412 
413 #ifdef CONFIG_SPARSEMEM
414 		/*
415 		 * Take care not to free memmap entries that don't exist
416 		 * due to SPARSEMEM sections which aren't present.
417 		 */
418 		start = min(start,
419 				 ALIGN(prev_end, PAGES_PER_SECTION));
420 #else
421 		/*
422 		 * Align down here since the VM subsystem insists that the
423 		 * memmap entries are valid from the bank start aligned to
424 		 * MAX_ORDER_NR_PAGES.
425 		 */
426 		start = round_down(start, MAX_ORDER_NR_PAGES);
427 #endif
428 		/*
429 		 * If we had a previous bank, and there is a space
430 		 * between the current bank and the previous, free it.
431 		 */
432 		if (prev_end && prev_end < start)
433 			free_memmap(prev_end, start);
434 
435 		/*
436 		 * Align up here since the VM subsystem insists that the
437 		 * memmap entries are valid from the bank end aligned to
438 		 * MAX_ORDER_NR_PAGES.
439 		 */
440 		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
441 				 MAX_ORDER_NR_PAGES);
442 	}
443 
444 #ifdef CONFIG_SPARSEMEM
445 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
446 		free_memmap(prev_end,
447 			    ALIGN(prev_end, PAGES_PER_SECTION));
448 #endif
449 }
450 
451 #ifdef CONFIG_HIGHMEM
452 static inline void free_area_high(unsigned long pfn, unsigned long end)
453 {
454 	for (; pfn < end; pfn++)
455 		free_highmem_page(pfn_to_page(pfn));
456 }
457 #endif
458 
459 static void __init free_highpages(void)
460 {
461 #ifdef CONFIG_HIGHMEM
462 	unsigned long max_low = max_low_pfn;
463 	struct memblock_region *mem, *res;
464 
465 	/* set highmem page free */
466 	for_each_memblock(memory, mem) {
467 		unsigned long start = memblock_region_memory_base_pfn(mem);
468 		unsigned long end = memblock_region_memory_end_pfn(mem);
469 
470 		/* Ignore complete lowmem entries */
471 		if (end <= max_low)
472 			continue;
473 
474 		/* Truncate partial highmem entries */
475 		if (start < max_low)
476 			start = max_low;
477 
478 		/* Find and exclude any reserved regions */
479 		for_each_memblock(reserved, res) {
480 			unsigned long res_start, res_end;
481 
482 			res_start = memblock_region_reserved_base_pfn(res);
483 			res_end = memblock_region_reserved_end_pfn(res);
484 
485 			if (res_end < start)
486 				continue;
487 			if (res_start < start)
488 				res_start = start;
489 			if (res_start > end)
490 				res_start = end;
491 			if (res_end > end)
492 				res_end = end;
493 			if (res_start != start)
494 				free_area_high(start, res_start);
495 			start = res_end;
496 			if (start == end)
497 				break;
498 		}
499 
500 		/* And now free anything which remains */
501 		if (start < end)
502 			free_area_high(start, end);
503 	}
504 #endif
505 }
506 
507 /*
508  * mem_init() marks the free areas in the mem_map and tells us how much
509  * memory is free.  This is done after various parts of the system have
510  * claimed their memory after the kernel image.
511  */
512 void __init mem_init(void)
513 {
514 #ifdef CONFIG_HAVE_TCM
515 	/* These pointers are filled in on TCM detection */
516 	extern u32 dtcm_end;
517 	extern u32 itcm_end;
518 #endif
519 
520 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
521 
522 	/* this will put all unused low memory onto the freelists */
523 	free_unused_memmap();
524 	free_all_bootmem();
525 
526 #ifdef CONFIG_SA1111
527 	/* now that our DMA memory is actually so designated, we can free it */
528 	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
529 #endif
530 
531 	free_highpages();
532 
533 	mem_init_print_info(NULL);
534 
535 #define MLK(b, t) b, t, ((t) - (b)) >> 10
536 #define MLM(b, t) b, t, ((t) - (b)) >> 20
537 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
538 
539 	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
540 			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
541 #ifdef CONFIG_HAVE_TCM
542 			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
543 			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
544 #endif
545 			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
546 			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
547 			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
548 #ifdef CONFIG_HIGHMEM
549 			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
550 #endif
551 #ifdef CONFIG_MODULES
552 			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
553 #endif
554 			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
555 			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
556 			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
557 			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
558 
559 			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
560 				(PAGE_SIZE)),
561 #ifdef CONFIG_HAVE_TCM
562 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
563 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
564 #endif
565 			MLK(FIXADDR_START, FIXADDR_TOP),
566 			MLM(VMALLOC_START, VMALLOC_END),
567 			MLM(PAGE_OFFSET, (unsigned long)high_memory),
568 #ifdef CONFIG_HIGHMEM
569 			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
570 				(PAGE_SIZE)),
571 #endif
572 #ifdef CONFIG_MODULES
573 			MLM(MODULES_VADDR, MODULES_END),
574 #endif
575 
576 			MLK_ROUNDUP(_text, _etext),
577 			MLK_ROUNDUP(__init_begin, __init_end),
578 			MLK_ROUNDUP(_sdata, _edata),
579 			MLK_ROUNDUP(__bss_start, __bss_stop));
580 
581 #undef MLK
582 #undef MLM
583 #undef MLK_ROUNDUP
584 
585 	/*
586 	 * Check boundaries twice: Some fundamental inconsistencies can
587 	 * be detected at build time already.
588 	 */
589 #ifdef CONFIG_MMU
590 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
591 	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
592 #endif
593 
594 #ifdef CONFIG_HIGHMEM
595 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
596 	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
597 #endif
598 
599 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
600 		extern int sysctl_overcommit_memory;
601 		/*
602 		 * On a machine this small we won't get
603 		 * anywhere without overcommit, so turn
604 		 * it on by default.
605 		 */
606 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
607 	}
608 }
609 
610 void free_initmem(void)
611 {
612 #ifdef CONFIG_HAVE_TCM
613 	extern char __tcm_start, __tcm_end;
614 
615 	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
616 	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
617 #endif
618 
619 	poison_init_mem(__init_begin, __init_end - __init_begin);
620 	if (!machine_is_integrator() && !machine_is_cintegrator())
621 		free_initmem_default(-1);
622 }
623 
624 #ifdef CONFIG_BLK_DEV_INITRD
625 
626 static int keep_initrd;
627 
628 void free_initrd_mem(unsigned long start, unsigned long end)
629 {
630 	if (!keep_initrd) {
631 		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
632 		free_reserved_area((void *)start, (void *)end, -1, "initrd");
633 	}
634 }
635 
636 static int __init keepinitrd_setup(char *__unused)
637 {
638 	keep_initrd = 1;
639 	return 1;
640 }
641 
642 __setup("keepinitrd", keepinitrd_setup);
643 #endif
644