xref: /openbmc/linux/arch/arm/mm/init.c (revision fb89fcfb)
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/of_fdt.h>
19 #include <linux/highmem.h>
20 #include <linux/gfp.h>
21 #include <linux/memblock.h>
22 #include <linux/sort.h>
23 
24 #include <asm/mach-types.h>
25 #include <asm/prom.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sizes.h>
29 #include <asm/tlb.h>
30 #include <asm/fixmap.h>
31 
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34 
35 #include "mm.h"
36 
37 static unsigned long phys_initrd_start __initdata = 0;
38 static unsigned long phys_initrd_size __initdata = 0;
39 
40 static int __init early_initrd(char *p)
41 {
42 	unsigned long start, size;
43 	char *endp;
44 
45 	start = memparse(p, &endp);
46 	if (*endp == ',') {
47 		size = memparse(endp + 1, NULL);
48 
49 		phys_initrd_start = start;
50 		phys_initrd_size = size;
51 	}
52 	return 0;
53 }
54 early_param("initrd", early_initrd);
55 
56 static int __init parse_tag_initrd(const struct tag *tag)
57 {
58 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
59 		"please update your bootloader.\n");
60 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
61 	phys_initrd_size = tag->u.initrd.size;
62 	return 0;
63 }
64 
65 __tagtable(ATAG_INITRD, parse_tag_initrd);
66 
67 static int __init parse_tag_initrd2(const struct tag *tag)
68 {
69 	phys_initrd_start = tag->u.initrd.start;
70 	phys_initrd_size = tag->u.initrd.size;
71 	return 0;
72 }
73 
74 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
75 
76 #ifdef CONFIG_OF_FLATTREE
77 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78 {
79 	phys_initrd_start = start;
80 	phys_initrd_size = end - start;
81 }
82 #endif /* CONFIG_OF_FLATTREE */
83 
84 /*
85  * This keeps memory configuration data used by a couple memory
86  * initialization functions, as well as show_mem() for the skipping
87  * of holes in the memory map.  It is populated by arm_add_memory().
88  */
89 struct meminfo meminfo;
90 
91 void show_mem(unsigned int filter)
92 {
93 	int free = 0, total = 0, reserved = 0;
94 	int shared = 0, cached = 0, slab = 0, i;
95 	struct meminfo * mi = &meminfo;
96 
97 	printk("Mem-info:\n");
98 	show_free_areas(filter);
99 
100 	for_each_bank (i, mi) {
101 		struct membank *bank = &mi->bank[i];
102 		unsigned int pfn1, pfn2;
103 		struct page *page, *end;
104 
105 		pfn1 = bank_pfn_start(bank);
106 		pfn2 = bank_pfn_end(bank);
107 
108 		page = pfn_to_page(pfn1);
109 		end  = pfn_to_page(pfn2 - 1) + 1;
110 
111 		do {
112 			total++;
113 			if (PageReserved(page))
114 				reserved++;
115 			else if (PageSwapCache(page))
116 				cached++;
117 			else if (PageSlab(page))
118 				slab++;
119 			else if (!page_count(page))
120 				free++;
121 			else
122 				shared += page_count(page) - 1;
123 			page++;
124 		} while (page < end);
125 	}
126 
127 	printk("%d pages of RAM\n", total);
128 	printk("%d free pages\n", free);
129 	printk("%d reserved pages\n", reserved);
130 	printk("%d slab pages\n", slab);
131 	printk("%d pages shared\n", shared);
132 	printk("%d pages swap cached\n", cached);
133 }
134 
135 static void __init find_limits(unsigned long *min, unsigned long *max_low,
136 	unsigned long *max_high)
137 {
138 	struct meminfo *mi = &meminfo;
139 	int i;
140 
141 	*min = -1UL;
142 	*max_low = *max_high = 0;
143 
144 	for_each_bank (i, mi) {
145 		struct membank *bank = &mi->bank[i];
146 		unsigned long start, end;
147 
148 		start = bank_pfn_start(bank);
149 		end = bank_pfn_end(bank);
150 
151 		if (*min > start)
152 			*min = start;
153 		if (*max_high < end)
154 			*max_high = end;
155 		if (bank->highmem)
156 			continue;
157 		if (*max_low < end)
158 			*max_low = end;
159 	}
160 }
161 
162 static void __init arm_bootmem_init(unsigned long start_pfn,
163 	unsigned long end_pfn)
164 {
165 	struct memblock_region *reg;
166 	unsigned int boot_pages;
167 	phys_addr_t bitmap;
168 	pg_data_t *pgdat;
169 
170 	/*
171 	 * Allocate the bootmem bitmap page.  This must be in a region
172 	 * of memory which has already been mapped.
173 	 */
174 	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
175 	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
176 				__pfn_to_phys(end_pfn));
177 
178 	/*
179 	 * Initialise the bootmem allocator, handing the
180 	 * memory banks over to bootmem.
181 	 */
182 	node_set_online(0);
183 	pgdat = NODE_DATA(0);
184 	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
185 
186 	/* Free the lowmem regions from memblock into bootmem. */
187 	for_each_memblock(memory, reg) {
188 		unsigned long start = memblock_region_memory_base_pfn(reg);
189 		unsigned long end = memblock_region_memory_end_pfn(reg);
190 
191 		if (end >= end_pfn)
192 			end = end_pfn;
193 		if (start >= end)
194 			break;
195 
196 		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
197 	}
198 
199 	/* Reserve the lowmem memblock reserved regions in bootmem. */
200 	for_each_memblock(reserved, reg) {
201 		unsigned long start = memblock_region_reserved_base_pfn(reg);
202 		unsigned long end = memblock_region_reserved_end_pfn(reg);
203 
204 		if (end >= end_pfn)
205 			end = end_pfn;
206 		if (start >= end)
207 			break;
208 
209 		reserve_bootmem(__pfn_to_phys(start),
210 			        (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
211 	}
212 }
213 
214 #ifdef CONFIG_ZONE_DMA
215 
216 unsigned long arm_dma_zone_size __read_mostly;
217 EXPORT_SYMBOL(arm_dma_zone_size);
218 
219 /*
220  * The DMA mask corresponding to the maximum bus address allocatable
221  * using GFP_DMA.  The default here places no restriction on DMA
222  * allocations.  This must be the smallest DMA mask in the system,
223  * so a successful GFP_DMA allocation will always satisfy this.
224  */
225 u32 arm_dma_limit;
226 
227 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
228 	unsigned long dma_size)
229 {
230 	if (size[0] <= dma_size)
231 		return;
232 
233 	size[ZONE_NORMAL] = size[0] - dma_size;
234 	size[ZONE_DMA] = dma_size;
235 	hole[ZONE_NORMAL] = hole[0];
236 	hole[ZONE_DMA] = 0;
237 }
238 #endif
239 
240 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
241 	unsigned long max_high)
242 {
243 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
244 	struct memblock_region *reg;
245 
246 	/*
247 	 * initialise the zones.
248 	 */
249 	memset(zone_size, 0, sizeof(zone_size));
250 
251 	/*
252 	 * The memory size has already been determined.  If we need
253 	 * to do anything fancy with the allocation of this memory
254 	 * to the zones, now is the time to do it.
255 	 */
256 	zone_size[0] = max_low - min;
257 #ifdef CONFIG_HIGHMEM
258 	zone_size[ZONE_HIGHMEM] = max_high - max_low;
259 #endif
260 
261 	/*
262 	 * Calculate the size of the holes.
263 	 *  holes = node_size - sum(bank_sizes)
264 	 */
265 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
266 	for_each_memblock(memory, reg) {
267 		unsigned long start = memblock_region_memory_base_pfn(reg);
268 		unsigned long end = memblock_region_memory_end_pfn(reg);
269 
270 		if (start < max_low) {
271 			unsigned long low_end = min(end, max_low);
272 			zhole_size[0] -= low_end - start;
273 		}
274 #ifdef CONFIG_HIGHMEM
275 		if (end > max_low) {
276 			unsigned long high_start = max(start, max_low);
277 			zhole_size[ZONE_HIGHMEM] -= end - high_start;
278 		}
279 #endif
280 	}
281 
282 #ifdef CONFIG_ZONE_DMA
283 	/*
284 	 * Adjust the sizes according to any special requirements for
285 	 * this machine type.
286 	 */
287 	if (arm_dma_zone_size) {
288 		arm_adjust_dma_zone(zone_size, zhole_size,
289 			arm_dma_zone_size >> PAGE_SHIFT);
290 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
291 	} else
292 		arm_dma_limit = 0xffffffff;
293 #endif
294 
295 	free_area_init_node(0, zone_size, min, zhole_size);
296 }
297 
298 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
299 int pfn_valid(unsigned long pfn)
300 {
301 	return memblock_is_memory(pfn << PAGE_SHIFT);
302 }
303 EXPORT_SYMBOL(pfn_valid);
304 #endif
305 
306 #ifndef CONFIG_SPARSEMEM
307 static void arm_memory_present(void)
308 {
309 }
310 #else
311 static void arm_memory_present(void)
312 {
313 	struct memblock_region *reg;
314 
315 	for_each_memblock(memory, reg)
316 		memory_present(0, memblock_region_memory_base_pfn(reg),
317 			       memblock_region_memory_end_pfn(reg));
318 }
319 #endif
320 
321 static int __init meminfo_cmp(const void *_a, const void *_b)
322 {
323 	const struct membank *a = _a, *b = _b;
324 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
325 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
326 }
327 
328 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
329 {
330 	int i;
331 
332 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
333 
334 	memblock_init();
335 	for (i = 0; i < mi->nr_banks; i++)
336 		memblock_add(mi->bank[i].start, mi->bank[i].size);
337 
338 	/* Register the kernel text, kernel data and initrd with memblock. */
339 #ifdef CONFIG_XIP_KERNEL
340 	memblock_reserve(__pa(_sdata), _end - _sdata);
341 #else
342 	memblock_reserve(__pa(_stext), _end - _stext);
343 #endif
344 #ifdef CONFIG_BLK_DEV_INITRD
345 	if (phys_initrd_size &&
346 	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
347 		pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
348 		       phys_initrd_start, phys_initrd_size);
349 		phys_initrd_start = phys_initrd_size = 0;
350 	}
351 	if (phys_initrd_size &&
352 	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
353 		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
354 		       phys_initrd_start, phys_initrd_size);
355 		phys_initrd_start = phys_initrd_size = 0;
356 	}
357 	if (phys_initrd_size) {
358 		memblock_reserve(phys_initrd_start, phys_initrd_size);
359 
360 		/* Now convert initrd to virtual addresses */
361 		initrd_start = __phys_to_virt(phys_initrd_start);
362 		initrd_end = initrd_start + phys_initrd_size;
363 	}
364 #endif
365 
366 	arm_mm_memblock_reserve();
367 	arm_dt_memblock_reserve();
368 
369 	/* reserve any platform specific memblock areas */
370 	if (mdesc->reserve)
371 		mdesc->reserve();
372 
373 	memblock_analyze();
374 	memblock_dump_all();
375 }
376 
377 void __init bootmem_init(void)
378 {
379 	unsigned long min, max_low, max_high;
380 
381 	max_low = max_high = 0;
382 
383 	find_limits(&min, &max_low, &max_high);
384 
385 	arm_bootmem_init(min, max_low);
386 
387 	/*
388 	 * Sparsemem tries to allocate bootmem in memory_present(),
389 	 * so must be done after the fixed reservations
390 	 */
391 	arm_memory_present();
392 
393 	/*
394 	 * sparse_init() needs the bootmem allocator up and running.
395 	 */
396 	sparse_init();
397 
398 	/*
399 	 * Now free the memory - free_area_init_node needs
400 	 * the sparse mem_map arrays initialized by sparse_init()
401 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
402 	 */
403 	arm_bootmem_free(min, max_low, max_high);
404 
405 	high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
406 
407 	/*
408 	 * This doesn't seem to be used by the Linux memory manager any
409 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
410 	 * also get rid of some of the stuff above as well.
411 	 *
412 	 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
413 	 * the system, not the maximum PFN.
414 	 */
415 	max_low_pfn = max_low - PHYS_PFN_OFFSET;
416 	max_pfn = max_high - PHYS_PFN_OFFSET;
417 }
418 
419 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
420 {
421 	unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
422 
423 	for (; pfn < end; pfn++) {
424 		struct page *page = pfn_to_page(pfn);
425 		ClearPageReserved(page);
426 		init_page_count(page);
427 		__free_page(page);
428 		pages++;
429 	}
430 
431 	if (size && s)
432 		printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
433 
434 	return pages;
435 }
436 
437 static inline void
438 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
439 {
440 	struct page *start_pg, *end_pg;
441 	unsigned long pg, pgend;
442 
443 	/*
444 	 * Convert start_pfn/end_pfn to a struct page pointer.
445 	 */
446 	start_pg = pfn_to_page(start_pfn - 1) + 1;
447 	end_pg = pfn_to_page(end_pfn - 1) + 1;
448 
449 	/*
450 	 * Convert to physical addresses, and
451 	 * round start upwards and end downwards.
452 	 */
453 	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
454 	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
455 
456 	/*
457 	 * If there are free pages between these,
458 	 * free the section of the memmap array.
459 	 */
460 	if (pg < pgend)
461 		free_bootmem(pg, pgend - pg);
462 }
463 
464 /*
465  * The mem_map array can get very big.  Free the unused area of the memory map.
466  */
467 static void __init free_unused_memmap(struct meminfo *mi)
468 {
469 	unsigned long bank_start, prev_bank_end = 0;
470 	unsigned int i;
471 
472 	/*
473 	 * This relies on each bank being in address order.
474 	 * The banks are sorted previously in bootmem_init().
475 	 */
476 	for_each_bank(i, mi) {
477 		struct membank *bank = &mi->bank[i];
478 
479 		bank_start = bank_pfn_start(bank);
480 
481 #ifdef CONFIG_SPARSEMEM
482 		/*
483 		 * Take care not to free memmap entries that don't exist
484 		 * due to SPARSEMEM sections which aren't present.
485 		 */
486 		bank_start = min(bank_start,
487 				 ALIGN(prev_bank_end, PAGES_PER_SECTION));
488 #endif
489 		/*
490 		 * If we had a previous bank, and there is a space
491 		 * between the current bank and the previous, free it.
492 		 */
493 		if (prev_bank_end && prev_bank_end < bank_start)
494 			free_memmap(prev_bank_end, bank_start);
495 
496 		/*
497 		 * Align up here since the VM subsystem insists that the
498 		 * memmap entries are valid from the bank end aligned to
499 		 * MAX_ORDER_NR_PAGES.
500 		 */
501 		prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
502 	}
503 
504 #ifdef CONFIG_SPARSEMEM
505 	if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
506 		free_memmap(prev_bank_end,
507 			    ALIGN(prev_bank_end, PAGES_PER_SECTION));
508 #endif
509 }
510 
511 static void __init free_highpages(void)
512 {
513 #ifdef CONFIG_HIGHMEM
514 	unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
515 	struct memblock_region *mem, *res;
516 
517 	/* set highmem page free */
518 	for_each_memblock(memory, mem) {
519 		unsigned long start = memblock_region_memory_base_pfn(mem);
520 		unsigned long end = memblock_region_memory_end_pfn(mem);
521 
522 		/* Ignore complete lowmem entries */
523 		if (end <= max_low)
524 			continue;
525 
526 		/* Truncate partial highmem entries */
527 		if (start < max_low)
528 			start = max_low;
529 
530 		/* Find and exclude any reserved regions */
531 		for_each_memblock(reserved, res) {
532 			unsigned long res_start, res_end;
533 
534 			res_start = memblock_region_reserved_base_pfn(res);
535 			res_end = memblock_region_reserved_end_pfn(res);
536 
537 			if (res_end < start)
538 				continue;
539 			if (res_start < start)
540 				res_start = start;
541 			if (res_start > end)
542 				res_start = end;
543 			if (res_end > end)
544 				res_end = end;
545 			if (res_start != start)
546 				totalhigh_pages += free_area(start, res_start,
547 							     NULL);
548 			start = res_end;
549 			if (start == end)
550 				break;
551 		}
552 
553 		/* And now free anything which remains */
554 		if (start < end)
555 			totalhigh_pages += free_area(start, end, NULL);
556 	}
557 	totalram_pages += totalhigh_pages;
558 #endif
559 }
560 
561 /*
562  * mem_init() marks the free areas in the mem_map and tells us how much
563  * memory is free.  This is done after various parts of the system have
564  * claimed their memory after the kernel image.
565  */
566 void __init mem_init(void)
567 {
568 	unsigned long reserved_pages, free_pages;
569 	struct memblock_region *reg;
570 	int i;
571 #ifdef CONFIG_HAVE_TCM
572 	/* These pointers are filled in on TCM detection */
573 	extern u32 dtcm_end;
574 	extern u32 itcm_end;
575 #endif
576 
577 	max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
578 
579 	/* this will put all unused low memory onto the freelists */
580 	free_unused_memmap(&meminfo);
581 
582 	totalram_pages += free_all_bootmem();
583 
584 #ifdef CONFIG_SA1111
585 	/* now that our DMA memory is actually so designated, we can free it */
586 	totalram_pages += free_area(PHYS_PFN_OFFSET,
587 				    __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
588 #endif
589 
590 	free_highpages();
591 
592 	reserved_pages = free_pages = 0;
593 
594 	for_each_bank(i, &meminfo) {
595 		struct membank *bank = &meminfo.bank[i];
596 		unsigned int pfn1, pfn2;
597 		struct page *page, *end;
598 
599 		pfn1 = bank_pfn_start(bank);
600 		pfn2 = bank_pfn_end(bank);
601 
602 		page = pfn_to_page(pfn1);
603 		end  = pfn_to_page(pfn2 - 1) + 1;
604 
605 		do {
606 			if (PageReserved(page))
607 				reserved_pages++;
608 			else if (!page_count(page))
609 				free_pages++;
610 			page++;
611 		} while (page < end);
612 	}
613 
614 	/*
615 	 * Since our memory may not be contiguous, calculate the
616 	 * real number of pages we have in this system
617 	 */
618 	printk(KERN_INFO "Memory:");
619 	num_physpages = 0;
620 	for_each_memblock(memory, reg) {
621 		unsigned long pages = memblock_region_memory_end_pfn(reg) -
622 			memblock_region_memory_base_pfn(reg);
623 		num_physpages += pages;
624 		printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
625 	}
626 	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
627 
628 	printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
629 		nr_free_pages() << (PAGE_SHIFT-10),
630 		free_pages << (PAGE_SHIFT-10),
631 		reserved_pages << (PAGE_SHIFT-10),
632 		totalhigh_pages << (PAGE_SHIFT-10));
633 
634 #define MLK(b, t) b, t, ((t) - (b)) >> 10
635 #define MLM(b, t) b, t, ((t) - (b)) >> 20
636 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
637 
638 	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
639 			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
640 #ifdef CONFIG_HAVE_TCM
641 			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
642 			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
643 #endif
644 			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
645 #ifdef CONFIG_MMU
646 			"    DMA     : 0x%08lx - 0x%08lx   (%4ld MB)\n"
647 #endif
648 			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
649 			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
650 #ifdef CONFIG_HIGHMEM
651 			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
652 #endif
653 			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
654 			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
655 			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
656 			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
657 			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
658 
659 			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
660 				(PAGE_SIZE)),
661 #ifdef CONFIG_HAVE_TCM
662 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
663 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
664 #endif
665 			MLK(FIXADDR_START, FIXADDR_TOP),
666 #ifdef CONFIG_MMU
667 			MLM(CONSISTENT_BASE, CONSISTENT_END),
668 #endif
669 			MLM(VMALLOC_START, VMALLOC_END),
670 			MLM(PAGE_OFFSET, (unsigned long)high_memory),
671 #ifdef CONFIG_HIGHMEM
672 			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
673 				(PAGE_SIZE)),
674 #endif
675 			MLM(MODULES_VADDR, MODULES_END),
676 
677 			MLK_ROUNDUP(__init_begin, __init_end),
678 			MLK_ROUNDUP(_text, _etext),
679 			MLK_ROUNDUP(_sdata, _edata),
680 			MLK_ROUNDUP(__bss_start, __bss_stop));
681 
682 #undef MLK
683 #undef MLM
684 #undef MLK_ROUNDUP
685 
686 	/*
687 	 * Check boundaries twice: Some fundamental inconsistencies can
688 	 * be detected at build time already.
689 	 */
690 #ifdef CONFIG_MMU
691 	BUILD_BUG_ON(VMALLOC_END			> CONSISTENT_BASE);
692 	BUG_ON(VMALLOC_END				> CONSISTENT_BASE);
693 
694 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
695 	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
696 #endif
697 
698 #ifdef CONFIG_HIGHMEM
699 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
700 	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
701 #endif
702 
703 	if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
704 		extern int sysctl_overcommit_memory;
705 		/*
706 		 * On a machine this small we won't get
707 		 * anywhere without overcommit, so turn
708 		 * it on by default.
709 		 */
710 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
711 	}
712 }
713 
714 void free_initmem(void)
715 {
716 #ifdef CONFIG_HAVE_TCM
717 	extern char __tcm_start, __tcm_end;
718 
719 	totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
720 				    __phys_to_pfn(__pa(&__tcm_end)),
721 				    "TCM link");
722 #endif
723 
724 	if (!machine_is_integrator() && !machine_is_cintegrator())
725 		totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
726 					    __phys_to_pfn(__pa(__init_end)),
727 					    "init");
728 }
729 
730 #ifdef CONFIG_BLK_DEV_INITRD
731 
732 static int keep_initrd;
733 
734 void free_initrd_mem(unsigned long start, unsigned long end)
735 {
736 	if (!keep_initrd)
737 		totalram_pages += free_area(__phys_to_pfn(__pa(start)),
738 					    __phys_to_pfn(__pa(end)),
739 					    "initrd");
740 }
741 
742 static int __init keepinitrd_setup(char *__unused)
743 {
744 	keep_initrd = 1;
745 	return 1;
746 }
747 
748 __setup("keepinitrd", keepinitrd_setup);
749 #endif
750