xref: /openbmc/linux/arch/arm/mm/init.c (revision 1da177e4)
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/ptrace.h>
14 #include <linux/swap.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/initrd.h>
20 
21 #include <asm/mach-types.h>
22 #include <asm/hardware.h>
23 #include <asm/setup.h>
24 #include <asm/tlb.h>
25 
26 #include <asm/mach/arch.h>
27 #include <asm/mach/map.h>
28 
29 #define TABLE_SIZE	(2 * PTRS_PER_PTE * sizeof(pte_t))
30 
31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 
33 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
34 extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end;
35 extern unsigned long phys_initrd_start;
36 extern unsigned long phys_initrd_size;
37 
38 /*
39  * The sole use of this is to pass memory configuration
40  * data from paging_init to mem_init.
41  */
42 static struct meminfo meminfo __initdata = { 0, };
43 
44 /*
45  * empty_zero_page is a special page that is used for
46  * zero-initialized data and COW.
47  */
48 struct page *empty_zero_page;
49 
50 void show_mem(void)
51 {
52 	int free = 0, total = 0, reserved = 0;
53 	int shared = 0, cached = 0, slab = 0, node;
54 
55 	printk("Mem-info:\n");
56 	show_free_areas();
57 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
58 
59 	for_each_online_node(node) {
60 		struct page *page, *end;
61 
62 		page = NODE_MEM_MAP(node);
63 		end  = page + NODE_DATA(node)->node_spanned_pages;
64 
65 		do {
66 			total++;
67 			if (PageReserved(page))
68 				reserved++;
69 			else if (PageSwapCache(page))
70 				cached++;
71 			else if (PageSlab(page))
72 				slab++;
73 			else if (!page_count(page))
74 				free++;
75 			else
76 				shared += page_count(page) - 1;
77 			page++;
78 		} while (page < end);
79 	}
80 
81 	printk("%d pages of RAM\n", total);
82 	printk("%d free pages\n", free);
83 	printk("%d reserved pages\n", reserved);
84 	printk("%d slab pages\n", slab);
85 	printk("%d pages shared\n", shared);
86 	printk("%d pages swap cached\n", cached);
87 }
88 
89 struct node_info {
90 	unsigned int start;
91 	unsigned int end;
92 	int bootmap_pages;
93 };
94 
95 #define O_PFN_DOWN(x)	((x) >> PAGE_SHIFT)
96 #define V_PFN_DOWN(x)	O_PFN_DOWN(__pa(x))
97 
98 #define O_PFN_UP(x)	(PAGE_ALIGN(x) >> PAGE_SHIFT)
99 #define V_PFN_UP(x)	O_PFN_UP(__pa(x))
100 
101 #define PFN_SIZE(x)	((x) >> PAGE_SHIFT)
102 #define PFN_RANGE(s,e)	PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
103 				(((unsigned long)(s)) & PAGE_MASK))
104 
105 /*
106  * FIXME: We really want to avoid allocating the bootmap bitmap
107  * over the top of the initrd.  Hopefully, this is located towards
108  * the start of a bank, so if we allocate the bootmap bitmap at
109  * the end, we won't clash.
110  */
111 static unsigned int __init
112 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
113 {
114 	unsigned int start_pfn, bank, bootmap_pfn;
115 
116 	start_pfn   = V_PFN_UP(&_end);
117 	bootmap_pfn = 0;
118 
119 	for (bank = 0; bank < mi->nr_banks; bank ++) {
120 		unsigned int start, end;
121 
122 		if (mi->bank[bank].node != node)
123 			continue;
124 
125 		start = O_PFN_UP(mi->bank[bank].start);
126 		end   = O_PFN_DOWN(mi->bank[bank].size +
127 				   mi->bank[bank].start);
128 
129 		if (end < start_pfn)
130 			continue;
131 
132 		if (start < start_pfn)
133 			start = start_pfn;
134 
135 		if (end <= start)
136 			continue;
137 
138 		if (end - start >= bootmap_pages) {
139 			bootmap_pfn = start;
140 			break;
141 		}
142 	}
143 
144 	if (bootmap_pfn == 0)
145 		BUG();
146 
147 	return bootmap_pfn;
148 }
149 
150 /*
151  * Scan the memory info structure and pull out:
152  *  - the end of memory
153  *  - the number of nodes
154  *  - the pfn range of each node
155  *  - the number of bootmem bitmap pages
156  */
157 static unsigned int __init
158 find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
159 {
160 	unsigned int i, bootmem_pages = 0, memend_pfn = 0;
161 
162 	for (i = 0; i < MAX_NUMNODES; i++) {
163 		np[i].start = -1U;
164 		np[i].end = 0;
165 		np[i].bootmap_pages = 0;
166 	}
167 
168 	for (i = 0; i < mi->nr_banks; i++) {
169 		unsigned long start, end;
170 		int node;
171 
172 		if (mi->bank[i].size == 0) {
173 			/*
174 			 * Mark this bank with an invalid node number
175 			 */
176 			mi->bank[i].node = -1;
177 			continue;
178 		}
179 
180 		node = mi->bank[i].node;
181 
182 		/*
183 		 * Make sure we haven't exceeded the maximum number of nodes
184 		 * that we have in this configuration.  If we have, we're in
185 		 * trouble.  (maybe we ought to limit, instead of bugging?)
186 		 */
187 		if (node >= MAX_NUMNODES)
188 			BUG();
189 		node_set_online(node);
190 
191 		/*
192 		 * Get the start and end pfns for this bank
193 		 */
194 		start = O_PFN_UP(mi->bank[i].start);
195 		end   = O_PFN_DOWN(mi->bank[i].start + mi->bank[i].size);
196 
197 		if (np[node].start > start)
198 			np[node].start = start;
199 
200 		if (np[node].end < end)
201 			np[node].end = end;
202 
203 		if (memend_pfn < end)
204 			memend_pfn = end;
205 	}
206 
207 	/*
208 	 * Calculate the number of pages we require to
209 	 * store the bootmem bitmaps.
210 	 */
211 	for_each_online_node(i) {
212 		if (np[i].end == 0)
213 			continue;
214 
215 		np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end -
216 							    np[i].start);
217 		bootmem_pages += np[i].bootmap_pages;
218 	}
219 
220 	high_memory = __va(memend_pfn << PAGE_SHIFT);
221 
222 	/*
223 	 * This doesn't seem to be used by the Linux memory
224 	 * manager any more.  If we can get rid of it, we
225 	 * also get rid of some of the stuff above as well.
226 	 */
227 	max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
228 	max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
229 
230 	return bootmem_pages;
231 }
232 
233 static int __init check_initrd(struct meminfo *mi)
234 {
235 	int initrd_node = -2;
236 #ifdef CONFIG_BLK_DEV_INITRD
237 	unsigned long end = phys_initrd_start + phys_initrd_size;
238 
239 	/*
240 	 * Make sure that the initrd is within a valid area of
241 	 * memory.
242 	 */
243 	if (phys_initrd_size) {
244 		unsigned int i;
245 
246 		initrd_node = -1;
247 
248 		for (i = 0; i < mi->nr_banks; i++) {
249 			unsigned long bank_end;
250 
251 			bank_end = mi->bank[i].start + mi->bank[i].size;
252 
253 			if (mi->bank[i].start <= phys_initrd_start &&
254 			    end <= bank_end)
255 				initrd_node = mi->bank[i].node;
256 		}
257 	}
258 
259 	if (initrd_node == -1) {
260 		printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond "
261 		       "physical memory - disabling initrd\n",
262 		       phys_initrd_start, end);
263 		phys_initrd_start = phys_initrd_size = 0;
264 	}
265 #endif
266 
267 	return initrd_node;
268 }
269 
270 /*
271  * Reserve the various regions of node 0
272  */
273 static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages)
274 {
275 	pg_data_t *pgdat = NODE_DATA(0);
276 	unsigned long res_size = 0;
277 
278 	/*
279 	 * Register the kernel text and data with bootmem.
280 	 * Note that this can only be in node 0.
281 	 */
282 #ifdef CONFIG_XIP_KERNEL
283 	reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
284 #else
285 	reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
286 #endif
287 
288 	/*
289 	 * Reserve the page tables.  These are already in use,
290 	 * and can only be in node 0.
291 	 */
292 	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
293 			     PTRS_PER_PGD * sizeof(pgd_t));
294 
295 	/*
296 	 * And don't forget to reserve the allocator bitmap,
297 	 * which will be freed later.
298 	 */
299 	reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
300 			     bootmap_pages << PAGE_SHIFT);
301 
302 	/*
303 	 * Hmm... This should go elsewhere, but we really really need to
304 	 * stop things allocating the low memory; ideally we need a better
305 	 * implementation of GFP_DMA which does not assume that DMA-able
306 	 * memory starts at zero.
307 	 */
308 	if (machine_is_integrator() || machine_is_cintegrator())
309 		res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
310 
311 	/*
312 	 * These should likewise go elsewhere.  They pre-reserve the
313 	 * screen memory region at the start of main system memory.
314 	 */
315 	if (machine_is_edb7211())
316 		res_size = 0x00020000;
317 	if (machine_is_p720t())
318 		res_size = 0x00014000;
319 
320 #ifdef CONFIG_SA1111
321 	/*
322 	 * Because of the SA1111 DMA bug, we want to preserve our
323 	 * precious DMA-able memory...
324 	 */
325 	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
326 #endif
327 	if (res_size)
328 		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
329 }
330 
331 /*
332  * Register all available RAM in this node with the bootmem allocator.
333  */
334 static inline void free_bootmem_node_bank(int node, struct meminfo *mi)
335 {
336 	pg_data_t *pgdat = NODE_DATA(node);
337 	int bank;
338 
339 	for (bank = 0; bank < mi->nr_banks; bank++)
340 		if (mi->bank[bank].node == node)
341 			free_bootmem_node(pgdat, mi->bank[bank].start,
342 					  mi->bank[bank].size);
343 }
344 
345 /*
346  * Initialise the bootmem allocator for all nodes.  This is called
347  * early during the architecture specific initialisation.
348  */
349 static void __init bootmem_init(struct meminfo *mi)
350 {
351 	struct node_info node_info[MAX_NUMNODES], *np = node_info;
352 	unsigned int bootmap_pages, bootmap_pfn, map_pg;
353 	int node, initrd_node;
354 
355 	bootmap_pages = find_memend_and_nodes(mi, np);
356 	bootmap_pfn   = find_bootmap_pfn(0, mi, bootmap_pages);
357 	initrd_node   = check_initrd(mi);
358 
359 	map_pg = bootmap_pfn;
360 
361 	/*
362 	 * Initialise the bootmem nodes.
363 	 *
364 	 * What we really want to do is:
365 	 *
366 	 *   unmap_all_regions_except_kernel();
367 	 *   for_each_node_in_reverse_order(node) {
368 	 *     map_node(node);
369 	 *     allocate_bootmem_map(node);
370 	 *     init_bootmem_node(node);
371 	 *     free_bootmem_node(node);
372 	 *   }
373 	 *
374 	 * but this is a 2.5-type change.  For now, we just set
375 	 * the nodes up in reverse order.
376 	 *
377 	 * (we could also do with rolling bootmem_init and paging_init
378 	 * into one generic "memory_init" type function).
379 	 */
380 	np += num_online_nodes() - 1;
381 	for (node = num_online_nodes() - 1; node >= 0; node--, np--) {
382 		/*
383 		 * If there are no pages in this node, ignore it.
384 		 * Note that node 0 must always have some pages.
385 		 */
386 		if (np->end == 0 || !node_online(node)) {
387 			if (node == 0)
388 				BUG();
389 			continue;
390 		}
391 
392 		/*
393 		 * Initialise the bootmem allocator.
394 		 */
395 		init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end);
396 		free_bootmem_node_bank(node, mi);
397 		map_pg += np->bootmap_pages;
398 
399 		/*
400 		 * If this is node 0, we need to reserve some areas ASAP -
401 		 * we may use bootmem on node 0 to setup the other nodes.
402 		 */
403 		if (node == 0)
404 			reserve_node_zero(bootmap_pfn, bootmap_pages);
405 	}
406 
407 
408 #ifdef CONFIG_BLK_DEV_INITRD
409 	if (phys_initrd_size && initrd_node >= 0) {
410 		reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start,
411 				     phys_initrd_size);
412 		initrd_start = __phys_to_virt(phys_initrd_start);
413 		initrd_end = initrd_start + phys_initrd_size;
414 	}
415 #endif
416 
417 	BUG_ON(map_pg != bootmap_pfn + bootmap_pages);
418 }
419 
420 /*
421  * paging_init() sets up the page tables, initialises the zone memory
422  * maps, and sets up the zero page, bad page and bad page tables.
423  */
424 void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
425 {
426 	void *zero_page;
427 	int node;
428 
429 	bootmem_init(mi);
430 
431 	memcpy(&meminfo, mi, sizeof(meminfo));
432 
433 	/*
434 	 * allocate the zero page.  Note that we count on this going ok.
435 	 */
436 	zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
437 
438 	/*
439 	 * initialise the page tables.
440 	 */
441 	memtable_init(mi);
442 	if (mdesc->map_io)
443 		mdesc->map_io();
444 	flush_tlb_all();
445 
446 	/*
447 	 * initialise the zones within each node
448 	 */
449 	for_each_online_node(node) {
450 		unsigned long zone_size[MAX_NR_ZONES];
451 		unsigned long zhole_size[MAX_NR_ZONES];
452 		struct bootmem_data *bdata;
453 		pg_data_t *pgdat;
454 		int i;
455 
456 		/*
457 		 * Initialise the zone size information.
458 		 */
459 		for (i = 0; i < MAX_NR_ZONES; i++) {
460 			zone_size[i]  = 0;
461 			zhole_size[i] = 0;
462 		}
463 
464 		pgdat = NODE_DATA(node);
465 		bdata = pgdat->bdata;
466 
467 		/*
468 		 * The size of this node has already been determined.
469 		 * If we need to do anything fancy with the allocation
470 		 * of this memory to the zones, now is the time to do
471 		 * it.
472 		 */
473 		zone_size[0] = bdata->node_low_pfn -
474 				(bdata->node_boot_start >> PAGE_SHIFT);
475 
476 		/*
477 		 * If this zone has zero size, skip it.
478 		 */
479 		if (!zone_size[0])
480 			continue;
481 
482 		/*
483 		 * For each bank in this node, calculate the size of the
484 		 * holes.  holes = node_size - sum(bank_sizes_in_node)
485 		 */
486 		zhole_size[0] = zone_size[0];
487 		for (i = 0; i < mi->nr_banks; i++) {
488 			if (mi->bank[i].node != node)
489 				continue;
490 
491 			zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
492 		}
493 
494 		/*
495 		 * Adjust the sizes according to any special
496 		 * requirements for this machine type.
497 		 */
498 		arch_adjust_zones(node, zone_size, zhole_size);
499 
500 		free_area_init_node(node, pgdat, zone_size,
501 				bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
502 	}
503 
504 	/*
505 	 * finish off the bad pages once
506 	 * the mem_map is initialised
507 	 */
508 	memzero(zero_page, PAGE_SIZE);
509 	empty_zero_page = virt_to_page(zero_page);
510 	flush_dcache_page(empty_zero_page);
511 }
512 
513 static inline void free_area(unsigned long addr, unsigned long end, char *s)
514 {
515 	unsigned int size = (end - addr) >> 10;
516 
517 	for (; addr < end; addr += PAGE_SIZE) {
518 		struct page *page = virt_to_page(addr);
519 		ClearPageReserved(page);
520 		set_page_count(page, 1);
521 		free_page(addr);
522 		totalram_pages++;
523 	}
524 
525 	if (size && s)
526 		printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
527 }
528 
529 /*
530  * mem_init() marks the free areas in the mem_map and tells us how much
531  * memory is free.  This is done after various parts of the system have
532  * claimed their memory after the kernel image.
533  */
534 void __init mem_init(void)
535 {
536 	unsigned int codepages, datapages, initpages;
537 	int i, node;
538 
539 	codepages = &_etext - &_text;
540 	datapages = &_end - &__data_start;
541 	initpages = &__init_end - &__init_begin;
542 
543 #ifndef CONFIG_DISCONTIGMEM
544 	max_mapnr   = virt_to_page(high_memory) - mem_map;
545 #endif
546 
547 	/*
548 	 * We may have non-contiguous memory.
549 	 */
550 	if (meminfo.nr_banks != 1)
551 		create_memmap_holes(&meminfo);
552 
553 	/* this will put all unused low memory onto the freelists */
554 	for_each_online_node(node) {
555 		pg_data_t *pgdat = NODE_DATA(node);
556 
557 		if (pgdat->node_spanned_pages != 0)
558 			totalram_pages += free_all_bootmem_node(pgdat);
559 	}
560 
561 #ifdef CONFIG_SA1111
562 	/* now that our DMA memory is actually so designated, we can free it */
563 	free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
564 #endif
565 
566 	/*
567 	 * Since our memory may not be contiguous, calculate the
568 	 * real number of pages we have in this system
569 	 */
570 	printk(KERN_INFO "Memory:");
571 
572 	num_physpages = 0;
573 	for (i = 0; i < meminfo.nr_banks; i++) {
574 		num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
575 		printk(" %ldMB", meminfo.bank[i].size >> 20);
576 	}
577 
578 	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
579 	printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
580 		"%dK data, %dK init)\n",
581 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
582 		codepages >> 10, datapages >> 10, initpages >> 10);
583 
584 	if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
585 		extern int sysctl_overcommit_memory;
586 		/*
587 		 * On a machine this small we won't get
588 		 * anywhere without overcommit, so turn
589 		 * it on by default.
590 		 */
591 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
592 	}
593 }
594 
595 void free_initmem(void)
596 {
597 	if (!machine_is_integrator() && !machine_is_cintegrator()) {
598 		free_area((unsigned long)(&__init_begin),
599 			  (unsigned long)(&__init_end),
600 			  "init");
601 	}
602 }
603 
604 #ifdef CONFIG_BLK_DEV_INITRD
605 
606 static int keep_initrd;
607 
608 void free_initrd_mem(unsigned long start, unsigned long end)
609 {
610 	if (!keep_initrd)
611 		free_area(start, end, "initrd");
612 }
613 
614 static int __init keepinitrd_setup(char *__unused)
615 {
616 	keep_initrd = 1;
617 	return 1;
618 }
619 
620 __setup("keepinitrd", keepinitrd_setup);
621 #endif
622