xref: /openbmc/linux/arch/arm/mm/init.c (revision f78f10436806660f39440a729acbaf03e3a01023)
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/ptrace.h>
14 #include <linux/swap.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/initrd.h>
20 
21 #include <asm/mach-types.h>
22 #include <asm/setup.h>
23 #include <asm/tlb.h>
24 
25 #include <asm/mach/arch.h>
26 #include <asm/mach/map.h>
27 
28 #define TABLE_SIZE	(2 * PTRS_PER_PTE * sizeof(pte_t))
29 
30 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
31 
32 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end;
34 extern unsigned long phys_initrd_start;
35 extern unsigned long phys_initrd_size;
36 
37 /*
38  * The sole use of this is to pass memory configuration
39  * data from paging_init to mem_init.
40  */
41 static struct meminfo meminfo __initdata = { 0, };
42 
43 /*
44  * empty_zero_page is a special page that is used for
45  * zero-initialized data and COW.
46  */
47 struct page *empty_zero_page;
48 
49 void show_mem(void)
50 {
51 	int free = 0, total = 0, reserved = 0;
52 	int shared = 0, cached = 0, slab = 0, node;
53 
54 	printk("Mem-info:\n");
55 	show_free_areas();
56 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
57 
58 	for_each_online_node(node) {
59 		struct page *page, *end;
60 
61 		page = NODE_MEM_MAP(node);
62 		end  = page + NODE_DATA(node)->node_spanned_pages;
63 
64 		do {
65 			total++;
66 			if (PageReserved(page))
67 				reserved++;
68 			else if (PageSwapCache(page))
69 				cached++;
70 			else if (PageSlab(page))
71 				slab++;
72 			else if (!page_count(page))
73 				free++;
74 			else
75 				shared += page_count(page) - 1;
76 			page++;
77 		} while (page < end);
78 	}
79 
80 	printk("%d pages of RAM\n", total);
81 	printk("%d free pages\n", free);
82 	printk("%d reserved pages\n", reserved);
83 	printk("%d slab pages\n", slab);
84 	printk("%d pages shared\n", shared);
85 	printk("%d pages swap cached\n", cached);
86 }
87 
88 static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
89 {
90 	return pmd_offset(pgd, virt);
91 }
92 
93 static inline pmd_t *pmd_off_k(unsigned long virt)
94 {
95 	return pmd_off(pgd_offset_k(virt), virt);
96 }
97 
98 #define for_each_nodebank(iter,mi,no)			\
99 	for (iter = 0; iter < mi->nr_banks; iter++)	\
100 		if (mi->bank[iter].node == no)
101 
102 /*
103  * FIXME: We really want to avoid allocating the bootmap bitmap
104  * over the top of the initrd.  Hopefully, this is located towards
105  * the start of a bank, so if we allocate the bootmap bitmap at
106  * the end, we won't clash.
107  */
108 static unsigned int __init
109 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
110 {
111 	unsigned int start_pfn, bank, bootmap_pfn;
112 
113 	start_pfn   = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
114 	bootmap_pfn = 0;
115 
116 	for_each_nodebank(bank, mi, node) {
117 		unsigned int start, end;
118 
119 		start = mi->bank[bank].start >> PAGE_SHIFT;
120 		end   = (mi->bank[bank].size +
121 			 mi->bank[bank].start) >> PAGE_SHIFT;
122 
123 		if (end < start_pfn)
124 			continue;
125 
126 		if (start < start_pfn)
127 			start = start_pfn;
128 
129 		if (end <= start)
130 			continue;
131 
132 		if (end - start >= bootmap_pages) {
133 			bootmap_pfn = start;
134 			break;
135 		}
136 	}
137 
138 	if (bootmap_pfn == 0)
139 		BUG();
140 
141 	return bootmap_pfn;
142 }
143 
144 static int __init check_initrd(struct meminfo *mi)
145 {
146 	int initrd_node = -2;
147 #ifdef CONFIG_BLK_DEV_INITRD
148 	unsigned long end = phys_initrd_start + phys_initrd_size;
149 
150 	/*
151 	 * Make sure that the initrd is within a valid area of
152 	 * memory.
153 	 */
154 	if (phys_initrd_size) {
155 		unsigned int i;
156 
157 		initrd_node = -1;
158 
159 		for (i = 0; i < mi->nr_banks; i++) {
160 			unsigned long bank_end;
161 
162 			bank_end = mi->bank[i].start + mi->bank[i].size;
163 
164 			if (mi->bank[i].start <= phys_initrd_start &&
165 			    end <= bank_end)
166 				initrd_node = mi->bank[i].node;
167 		}
168 	}
169 
170 	if (initrd_node == -1) {
171 		printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond "
172 		       "physical memory - disabling initrd\n",
173 		       phys_initrd_start, end);
174 		phys_initrd_start = phys_initrd_size = 0;
175 	}
176 #endif
177 
178 	return initrd_node;
179 }
180 
181 /*
182  * Reserve the various regions of node 0
183  */
184 static __init void reserve_node_zero(pg_data_t *pgdat)
185 {
186 	unsigned long res_size = 0;
187 
188 	/*
189 	 * Register the kernel text and data with bootmem.
190 	 * Note that this can only be in node 0.
191 	 */
192 #ifdef CONFIG_XIP_KERNEL
193 	reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
194 #else
195 	reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
196 #endif
197 
198 	/*
199 	 * Reserve the page tables.  These are already in use,
200 	 * and can only be in node 0.
201 	 */
202 	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
203 			     PTRS_PER_PGD * sizeof(pgd_t));
204 
205 	/*
206 	 * Hmm... This should go elsewhere, but we really really need to
207 	 * stop things allocating the low memory; ideally we need a better
208 	 * implementation of GFP_DMA which does not assume that DMA-able
209 	 * memory starts at zero.
210 	 */
211 	if (machine_is_integrator() || machine_is_cintegrator())
212 		res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
213 
214 	/*
215 	 * These should likewise go elsewhere.  They pre-reserve the
216 	 * screen memory region at the start of main system memory.
217 	 */
218 	if (machine_is_edb7211())
219 		res_size = 0x00020000;
220 	if (machine_is_p720t())
221 		res_size = 0x00014000;
222 
223 #ifdef CONFIG_SA1111
224 	/*
225 	 * Because of the SA1111 DMA bug, we want to preserve our
226 	 * precious DMA-able memory...
227 	 */
228 	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
229 #endif
230 	if (res_size)
231 		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
232 }
233 
234 void __init build_mem_type_table(void);
235 void __init create_mapping(struct map_desc *md);
236 
237 static unsigned long __init
238 bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
239 {
240 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
241 	unsigned long start_pfn, end_pfn, boot_pfn;
242 	unsigned int boot_pages;
243 	pg_data_t *pgdat;
244 	int i;
245 
246 	start_pfn = -1UL;
247 	end_pfn = 0;
248 
249 	/*
250 	 * Calculate the pfn range, and map the memory banks for this node.
251 	 */
252 	for_each_nodebank(i, mi, node) {
253 		unsigned long start, end;
254 		struct map_desc map;
255 
256 		start = mi->bank[i].start >> PAGE_SHIFT;
257 		end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT;
258 
259 		if (start_pfn > start)
260 			start_pfn = start;
261 		if (end_pfn < end)
262 			end_pfn = end;
263 
264 		map.pfn = __phys_to_pfn(mi->bank[i].start);
265 		map.virtual = __phys_to_virt(mi->bank[i].start);
266 		map.length = mi->bank[i].size;
267 		map.type = MT_MEMORY;
268 
269 		create_mapping(&map);
270 	}
271 
272 	/*
273 	 * If there is no memory in this node, ignore it.
274 	 */
275 	if (end_pfn == 0)
276 		return end_pfn;
277 
278 	/*
279 	 * Allocate the bootmem bitmap page.
280 	 */
281 	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
282 	boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
283 
284 	/*
285 	 * Initialise the bootmem allocator for this node, handing the
286 	 * memory banks over to bootmem.
287 	 */
288 	node_set_online(node);
289 	pgdat = NODE_DATA(node);
290 	init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
291 
292 	for_each_nodebank(i, mi, node)
293 		free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size);
294 
295 	/*
296 	 * Reserve the bootmem bitmap for this node.
297 	 */
298 	reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
299 			     boot_pages << PAGE_SHIFT);
300 
301 #ifdef CONFIG_BLK_DEV_INITRD
302 	/*
303 	 * If the initrd is in this node, reserve its memory.
304 	 */
305 	if (node == initrd_node) {
306 		reserve_bootmem_node(pgdat, phys_initrd_start,
307 				     phys_initrd_size);
308 		initrd_start = __phys_to_virt(phys_initrd_start);
309 		initrd_end = initrd_start + phys_initrd_size;
310 	}
311 #endif
312 
313 	/*
314 	 * Finally, reserve any node zero regions.
315 	 */
316 	if (node == 0)
317 		reserve_node_zero(pgdat);
318 
319 	/*
320 	 * initialise the zones within this node.
321 	 */
322 	memset(zone_size, 0, sizeof(zone_size));
323 	memset(zhole_size, 0, sizeof(zhole_size));
324 
325 	/*
326 	 * The size of this node has already been determined.  If we need
327 	 * to do anything fancy with the allocation of this memory to the
328 	 * zones, now is the time to do it.
329 	 */
330 	zone_size[0] = end_pfn - start_pfn;
331 
332 	/*
333 	 * For each bank in this node, calculate the size of the holes.
334 	 *  holes = node_size - sum(bank_sizes_in_node)
335 	 */
336 	zhole_size[0] = zone_size[0];
337 	for_each_nodebank(i, mi, node)
338 		zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
339 
340 	/*
341 	 * Adjust the sizes according to any special requirements for
342 	 * this machine type.
343 	 */
344 	arch_adjust_zones(node, zone_size, zhole_size);
345 
346 	free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size);
347 
348 	return end_pfn;
349 }
350 
351 static void __init bootmem_init(struct meminfo *mi)
352 {
353 	unsigned long addr, memend_pfn = 0;
354 	int node, initrd_node, i;
355 
356 	/*
357 	 * Invalidate the node number for empty or invalid memory banks
358 	 */
359 	for (i = 0; i < mi->nr_banks; i++)
360 		if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES)
361 			mi->bank[i].node = -1;
362 
363 	memcpy(&meminfo, mi, sizeof(meminfo));
364 
365 	/*
366 	 * Clear out all the mappings below the kernel image.
367 	 */
368 	for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
369 		pmd_clear(pmd_off_k(addr));
370 #ifdef CONFIG_XIP_KERNEL
371 	/* The XIP kernel is mapped in the module area -- skip over it */
372 	addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
373 #endif
374 	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
375 		pmd_clear(pmd_off_k(addr));
376 
377 	/*
378 	 * Clear out all the kernel space mappings, except for the first
379 	 * memory bank, up to the end of the vmalloc region.
380 	 */
381 	for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
382 	     addr < VMALLOC_END; addr += PGDIR_SIZE)
383 		pmd_clear(pmd_off_k(addr));
384 
385 	/*
386 	 * Locate which node contains the ramdisk image, if any.
387 	 */
388 	initrd_node = check_initrd(mi);
389 
390 	/*
391 	 * Run through each node initialising the bootmem allocator.
392 	 */
393 	for_each_node(node) {
394 		unsigned long end_pfn;
395 
396 		end_pfn = bootmem_init_node(node, initrd_node, mi);
397 
398 		/*
399 		 * Remember the highest memory PFN.
400 		 */
401 		if (end_pfn > memend_pfn)
402 			memend_pfn = end_pfn;
403 	}
404 
405 	high_memory = __va(memend_pfn << PAGE_SHIFT);
406 
407 	/*
408 	 * This doesn't seem to be used by the Linux memory manager any
409 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
410 	 * also get rid of some of the stuff above as well.
411 	 *
412 	 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
413 	 * the system, not the maximum PFN.
414 	 */
415 	max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
416 }
417 
418 /*
419  * Set up device the mappings.  Since we clear out the page tables for all
420  * mappings above VMALLOC_END, we will remove any debug device mappings.
421  * This means you have to be careful how you debug this function, or any
422  * called function.  This means you can't use any function or debugging
423  * method which may touch any device, otherwise the kernel _will_ crash.
424  */
425 static void __init devicemaps_init(struct machine_desc *mdesc)
426 {
427 	struct map_desc map;
428 	unsigned long addr;
429 	void *vectors;
430 
431 	/*
432 	 * Allocate the vector page early.
433 	 */
434 	vectors = alloc_bootmem_low_pages(PAGE_SIZE);
435 	BUG_ON(!vectors);
436 
437 	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
438 		pmd_clear(pmd_off_k(addr));
439 
440 	/*
441 	 * Map the kernel if it is XIP.
442 	 * It is always first in the modulearea.
443 	 */
444 #ifdef CONFIG_XIP_KERNEL
445 	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PGDIR_MASK);
446 	map.virtual = MODULE_START;
447 	map.length = ((unsigned long)&_etext - map.virtual + ~PGDIR_MASK) & PGDIR_MASK;
448 	map.type = MT_ROM;
449 	create_mapping(&map);
450 #endif
451 
452 	/*
453 	 * Map the cache flushing regions.
454 	 */
455 #ifdef FLUSH_BASE
456 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
457 	map.virtual = FLUSH_BASE;
458 	map.length = PGDIR_SIZE;
459 	map.type = MT_CACHECLEAN;
460 	create_mapping(&map);
461 #endif
462 #ifdef FLUSH_BASE_MINICACHE
463 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + PGDIR_SIZE);
464 	map.virtual = FLUSH_BASE_MINICACHE;
465 	map.length = PGDIR_SIZE;
466 	map.type = MT_MINICLEAN;
467 	create_mapping(&map);
468 #endif
469 
470 	/*
471 	 * Create a mapping for the machine vectors at the high-vectors
472 	 * location (0xffff0000).  If we aren't using high-vectors, also
473 	 * create a mapping at the low-vectors virtual address.
474 	 */
475 	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
476 	map.virtual = 0xffff0000;
477 	map.length = PAGE_SIZE;
478 	map.type = MT_HIGH_VECTORS;
479 	create_mapping(&map);
480 
481 	if (!vectors_high()) {
482 		map.virtual = 0;
483 		map.type = MT_LOW_VECTORS;
484 		create_mapping(&map);
485 	}
486 
487 	/*
488 	 * Ask the machine support to map in the statically mapped devices.
489 	 */
490 	if (mdesc->map_io)
491 		mdesc->map_io();
492 
493 	/*
494 	 * Finally flush the caches and tlb to ensure that we're in a
495 	 * consistent state wrt the writebuffer.  This also ensures that
496 	 * any write-allocated cache lines in the vector page are written
497 	 * back.  After this point, we can start to touch devices again.
498 	 */
499 	local_flush_tlb_all();
500 	flush_cache_all();
501 }
502 
503 /*
504  * paging_init() sets up the page tables, initialises the zone memory
505  * maps, and sets up the zero page, bad page and bad page tables.
506  */
507 void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
508 {
509 	void *zero_page;
510 
511 	build_mem_type_table();
512 	bootmem_init(mi);
513 	devicemaps_init(mdesc);
514 
515 	top_pmd = pmd_off_k(0xffff0000);
516 
517 	/*
518 	 * allocate the zero page.  Note that we count on this going ok.
519 	 */
520 	zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
521 	memzero(zero_page, PAGE_SIZE);
522 	empty_zero_page = virt_to_page(zero_page);
523 	flush_dcache_page(empty_zero_page);
524 }
525 
526 static inline void free_area(unsigned long addr, unsigned long end, char *s)
527 {
528 	unsigned int size = (end - addr) >> 10;
529 
530 	for (; addr < end; addr += PAGE_SIZE) {
531 		struct page *page = virt_to_page(addr);
532 		ClearPageReserved(page);
533 		set_page_count(page, 1);
534 		free_page(addr);
535 		totalram_pages++;
536 	}
537 
538 	if (size && s)
539 		printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
540 }
541 
542 static inline void
543 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
544 {
545 	struct page *start_pg, *end_pg;
546 	unsigned long pg, pgend;
547 
548 	/*
549 	 * Convert start_pfn/end_pfn to a struct page pointer.
550 	 */
551 	start_pg = pfn_to_page(start_pfn);
552 	end_pg = pfn_to_page(end_pfn);
553 
554 	/*
555 	 * Convert to physical addresses, and
556 	 * round start upwards and end downwards.
557 	 */
558 	pg = PAGE_ALIGN(__pa(start_pg));
559 	pgend = __pa(end_pg) & PAGE_MASK;
560 
561 	/*
562 	 * If there are free pages between these,
563 	 * free the section of the memmap array.
564 	 */
565 	if (pg < pgend)
566 		free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
567 }
568 
569 /*
570  * The mem_map array can get very big.  Free the unused area of the memory map.
571  */
572 static void __init free_unused_memmap_node(int node, struct meminfo *mi)
573 {
574 	unsigned long bank_start, prev_bank_end = 0;
575 	unsigned int i;
576 
577 	/*
578 	 * [FIXME] This relies on each bank being in address order.  This
579 	 * may not be the case, especially if the user has provided the
580 	 * information on the command line.
581 	 */
582 	for_each_nodebank(i, mi, node) {
583 		bank_start = mi->bank[i].start >> PAGE_SHIFT;
584 		if (bank_start < prev_bank_end) {
585 			printk(KERN_ERR "MEM: unordered memory banks.  "
586 				"Not freeing memmap.\n");
587 			break;
588 		}
589 
590 		/*
591 		 * If we had a previous bank, and there is a space
592 		 * between the current bank and the previous, free it.
593 		 */
594 		if (prev_bank_end && prev_bank_end != bank_start)
595 			free_memmap(node, prev_bank_end, bank_start);
596 
597 		prev_bank_end = (mi->bank[i].start +
598 				 mi->bank[i].size) >> PAGE_SHIFT;
599 	}
600 }
601 
602 /*
603  * mem_init() marks the free areas in the mem_map and tells us how much
604  * memory is free.  This is done after various parts of the system have
605  * claimed their memory after the kernel image.
606  */
607 void __init mem_init(void)
608 {
609 	unsigned int codepages, datapages, initpages;
610 	int i, node;
611 
612 	codepages = &_etext - &_text;
613 	datapages = &_end - &__data_start;
614 	initpages = &__init_end - &__init_begin;
615 
616 #ifndef CONFIG_DISCONTIGMEM
617 	max_mapnr   = virt_to_page(high_memory) - mem_map;
618 #endif
619 
620 	/* this will put all unused low memory onto the freelists */
621 	for_each_online_node(node) {
622 		pg_data_t *pgdat = NODE_DATA(node);
623 
624 		free_unused_memmap_node(node, &meminfo);
625 
626 		if (pgdat->node_spanned_pages != 0)
627 			totalram_pages += free_all_bootmem_node(pgdat);
628 	}
629 
630 #ifdef CONFIG_SA1111
631 	/* now that our DMA memory is actually so designated, we can free it */
632 	free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
633 #endif
634 
635 	/*
636 	 * Since our memory may not be contiguous, calculate the
637 	 * real number of pages we have in this system
638 	 */
639 	printk(KERN_INFO "Memory:");
640 
641 	num_physpages = 0;
642 	for (i = 0; i < meminfo.nr_banks; i++) {
643 		num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
644 		printk(" %ldMB", meminfo.bank[i].size >> 20);
645 	}
646 
647 	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
648 	printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
649 		"%dK data, %dK init)\n",
650 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
651 		codepages >> 10, datapages >> 10, initpages >> 10);
652 
653 	if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
654 		extern int sysctl_overcommit_memory;
655 		/*
656 		 * On a machine this small we won't get
657 		 * anywhere without overcommit, so turn
658 		 * it on by default.
659 		 */
660 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
661 	}
662 }
663 
664 void free_initmem(void)
665 {
666 	if (!machine_is_integrator() && !machine_is_cintegrator()) {
667 		free_area((unsigned long)(&__init_begin),
668 			  (unsigned long)(&__init_end),
669 			  "init");
670 	}
671 }
672 
673 #ifdef CONFIG_BLK_DEV_INITRD
674 
675 static int keep_initrd;
676 
677 void free_initrd_mem(unsigned long start, unsigned long end)
678 {
679 	if (!keep_initrd)
680 		free_area(start, end, "initrd");
681 }
682 
683 static int __init keepinitrd_setup(char *__unused)
684 {
685 	keep_initrd = 1;
686 	return 1;
687 }
688 
689 __setup("keepinitrd", keepinitrd_setup);
690 #endif
691