xref: /openbmc/linux/arch/arm/mm/init.c (revision 55a8173c)
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23 
24 #include <asm/mach-types.h>
25 #include <asm/prom.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sizes.h>
29 #include <asm/tlb.h>
30 #include <asm/fixmap.h>
31 
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34 
35 #include "mm.h"
36 
37 static unsigned long phys_initrd_start __initdata = 0;
38 static unsigned long phys_initrd_size __initdata = 0;
39 
40 static int __init early_initrd(char *p)
41 {
42 	unsigned long start, size;
43 	char *endp;
44 
45 	start = memparse(p, &endp);
46 	if (*endp == ',') {
47 		size = memparse(endp + 1, NULL);
48 
49 		phys_initrd_start = start;
50 		phys_initrd_size = size;
51 	}
52 	return 0;
53 }
54 early_param("initrd", early_initrd);
55 
56 static int __init parse_tag_initrd(const struct tag *tag)
57 {
58 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
59 		"please update your bootloader.\n");
60 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
61 	phys_initrd_size = tag->u.initrd.size;
62 	return 0;
63 }
64 
65 __tagtable(ATAG_INITRD, parse_tag_initrd);
66 
67 static int __init parse_tag_initrd2(const struct tag *tag)
68 {
69 	phys_initrd_start = tag->u.initrd.start;
70 	phys_initrd_size = tag->u.initrd.size;
71 	return 0;
72 }
73 
74 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
75 
76 #ifdef CONFIG_OF_FLATTREE
77 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78 {
79 	phys_initrd_start = start;
80 	phys_initrd_size = end - start;
81 }
82 #endif /* CONFIG_OF_FLATTREE */
83 
84 /*
85  * This keeps memory configuration data used by a couple memory
86  * initialization functions, as well as show_mem() for the skipping
87  * of holes in the memory map.  It is populated by arm_add_memory().
88  */
89 struct meminfo meminfo;
90 
91 void show_mem(unsigned int filter)
92 {
93 	int free = 0, total = 0, reserved = 0;
94 	int shared = 0, cached = 0, slab = 0, i;
95 	struct meminfo * mi = &meminfo;
96 
97 	printk("Mem-info:\n");
98 	show_free_areas(filter);
99 
100 	for_each_bank (i, mi) {
101 		struct membank *bank = &mi->bank[i];
102 		unsigned int pfn1, pfn2;
103 		struct page *page, *end;
104 
105 		pfn1 = bank_pfn_start(bank);
106 		pfn2 = bank_pfn_end(bank);
107 
108 		page = pfn_to_page(pfn1);
109 		end  = pfn_to_page(pfn2 - 1) + 1;
110 
111 		do {
112 			total++;
113 			if (PageReserved(page))
114 				reserved++;
115 			else if (PageSwapCache(page))
116 				cached++;
117 			else if (PageSlab(page))
118 				slab++;
119 			else if (!page_count(page))
120 				free++;
121 			else
122 				shared += page_count(page) - 1;
123 			page++;
124 		} while (page < end);
125 	}
126 
127 	printk("%d pages of RAM\n", total);
128 	printk("%d free pages\n", free);
129 	printk("%d reserved pages\n", reserved);
130 	printk("%d slab pages\n", slab);
131 	printk("%d pages shared\n", shared);
132 	printk("%d pages swap cached\n", cached);
133 }
134 
135 static void __init find_limits(unsigned long *min, unsigned long *max_low,
136 			       unsigned long *max_high)
137 {
138 	struct meminfo *mi = &meminfo;
139 	int i;
140 
141 	/* This assumes the meminfo array is properly sorted */
142 	*min = bank_pfn_start(&mi->bank[0]);
143 	for_each_bank (i, mi)
144 		if (mi->bank[i].highmem)
145 				break;
146 	*max_low = bank_pfn_end(&mi->bank[i - 1]);
147 	*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
148 }
149 
150 static void __init arm_bootmem_init(unsigned long start_pfn,
151 	unsigned long end_pfn)
152 {
153 	struct memblock_region *reg;
154 	unsigned int boot_pages;
155 	phys_addr_t bitmap;
156 	pg_data_t *pgdat;
157 
158 	/*
159 	 * Allocate the bootmem bitmap page.  This must be in a region
160 	 * of memory which has already been mapped.
161 	 */
162 	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
163 	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
164 				__pfn_to_phys(end_pfn));
165 
166 	/*
167 	 * Initialise the bootmem allocator, handing the
168 	 * memory banks over to bootmem.
169 	 */
170 	node_set_online(0);
171 	pgdat = NODE_DATA(0);
172 	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
173 
174 	/* Free the lowmem regions from memblock into bootmem. */
175 	for_each_memblock(memory, reg) {
176 		unsigned long start = memblock_region_memory_base_pfn(reg);
177 		unsigned long end = memblock_region_memory_end_pfn(reg);
178 
179 		if (end >= end_pfn)
180 			end = end_pfn;
181 		if (start >= end)
182 			break;
183 
184 		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
185 	}
186 
187 	/* Reserve the lowmem memblock reserved regions in bootmem. */
188 	for_each_memblock(reserved, reg) {
189 		unsigned long start = memblock_region_reserved_base_pfn(reg);
190 		unsigned long end = memblock_region_reserved_end_pfn(reg);
191 
192 		if (end >= end_pfn)
193 			end = end_pfn;
194 		if (start >= end)
195 			break;
196 
197 		reserve_bootmem(__pfn_to_phys(start),
198 			        (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
199 	}
200 }
201 
202 #ifdef CONFIG_ZONE_DMA
203 
204 unsigned long arm_dma_zone_size __read_mostly;
205 EXPORT_SYMBOL(arm_dma_zone_size);
206 
207 /*
208  * The DMA mask corresponding to the maximum bus address allocatable
209  * using GFP_DMA.  The default here places no restriction on DMA
210  * allocations.  This must be the smallest DMA mask in the system,
211  * so a successful GFP_DMA allocation will always satisfy this.
212  */
213 u32 arm_dma_limit;
214 
215 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
216 	unsigned long dma_size)
217 {
218 	if (size[0] <= dma_size)
219 		return;
220 
221 	size[ZONE_NORMAL] = size[0] - dma_size;
222 	size[ZONE_DMA] = dma_size;
223 	hole[ZONE_NORMAL] = hole[0];
224 	hole[ZONE_DMA] = 0;
225 }
226 #endif
227 
228 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
229 	unsigned long max_high)
230 {
231 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
232 	struct memblock_region *reg;
233 
234 	/*
235 	 * initialise the zones.
236 	 */
237 	memset(zone_size, 0, sizeof(zone_size));
238 
239 	/*
240 	 * The memory size has already been determined.  If we need
241 	 * to do anything fancy with the allocation of this memory
242 	 * to the zones, now is the time to do it.
243 	 */
244 	zone_size[0] = max_low - min;
245 #ifdef CONFIG_HIGHMEM
246 	zone_size[ZONE_HIGHMEM] = max_high - max_low;
247 #endif
248 
249 	/*
250 	 * Calculate the size of the holes.
251 	 *  holes = node_size - sum(bank_sizes)
252 	 */
253 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
254 	for_each_memblock(memory, reg) {
255 		unsigned long start = memblock_region_memory_base_pfn(reg);
256 		unsigned long end = memblock_region_memory_end_pfn(reg);
257 
258 		if (start < max_low) {
259 			unsigned long low_end = min(end, max_low);
260 			zhole_size[0] -= low_end - start;
261 		}
262 #ifdef CONFIG_HIGHMEM
263 		if (end > max_low) {
264 			unsigned long high_start = max(start, max_low);
265 			zhole_size[ZONE_HIGHMEM] -= end - high_start;
266 		}
267 #endif
268 	}
269 
270 #ifdef CONFIG_ZONE_DMA
271 	/*
272 	 * Adjust the sizes according to any special requirements for
273 	 * this machine type.
274 	 */
275 	if (arm_dma_zone_size) {
276 		arm_adjust_dma_zone(zone_size, zhole_size,
277 			arm_dma_zone_size >> PAGE_SHIFT);
278 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
279 	} else
280 		arm_dma_limit = 0xffffffff;
281 #endif
282 
283 	free_area_init_node(0, zone_size, min, zhole_size);
284 }
285 
286 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
287 int pfn_valid(unsigned long pfn)
288 {
289 	return memblock_is_memory(__pfn_to_phys(pfn));
290 }
291 EXPORT_SYMBOL(pfn_valid);
292 #endif
293 
294 #ifndef CONFIG_SPARSEMEM
295 static void arm_memory_present(void)
296 {
297 }
298 #else
299 static void arm_memory_present(void)
300 {
301 	struct memblock_region *reg;
302 
303 	for_each_memblock(memory, reg)
304 		memory_present(0, memblock_region_memory_base_pfn(reg),
305 			       memblock_region_memory_end_pfn(reg));
306 }
307 #endif
308 
309 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
310 {
311 	int i;
312 
313 	memblock_init();
314 	for (i = 0; i < mi->nr_banks; i++)
315 		memblock_add(mi->bank[i].start, mi->bank[i].size);
316 
317 	/* Register the kernel text, kernel data and initrd with memblock. */
318 #ifdef CONFIG_XIP_KERNEL
319 	memblock_reserve(__pa(_sdata), _end - _sdata);
320 #else
321 	memblock_reserve(__pa(_stext), _end - _stext);
322 #endif
323 #ifdef CONFIG_BLK_DEV_INITRD
324 	if (phys_initrd_size &&
325 	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
326 		pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
327 		       phys_initrd_start, phys_initrd_size);
328 		phys_initrd_start = phys_initrd_size = 0;
329 	}
330 	if (phys_initrd_size &&
331 	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
332 		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
333 		       phys_initrd_start, phys_initrd_size);
334 		phys_initrd_start = phys_initrd_size = 0;
335 	}
336 	if (phys_initrd_size) {
337 		memblock_reserve(phys_initrd_start, phys_initrd_size);
338 
339 		/* Now convert initrd to virtual addresses */
340 		initrd_start = __phys_to_virt(phys_initrd_start);
341 		initrd_end = initrd_start + phys_initrd_size;
342 	}
343 #endif
344 
345 	arm_mm_memblock_reserve();
346 	arm_dt_memblock_reserve();
347 
348 	/* reserve any platform specific memblock areas */
349 	if (mdesc->reserve)
350 		mdesc->reserve();
351 
352 	memblock_analyze();
353 	memblock_dump_all();
354 }
355 
356 void __init bootmem_init(void)
357 {
358 	unsigned long min, max_low, max_high;
359 
360 	max_low = max_high = 0;
361 
362 	find_limits(&min, &max_low, &max_high);
363 
364 	arm_bootmem_init(min, max_low);
365 
366 	/*
367 	 * Sparsemem tries to allocate bootmem in memory_present(),
368 	 * so must be done after the fixed reservations
369 	 */
370 	arm_memory_present();
371 
372 	/*
373 	 * sparse_init() needs the bootmem allocator up and running.
374 	 */
375 	sparse_init();
376 
377 	/*
378 	 * Now free the memory - free_area_init_node needs
379 	 * the sparse mem_map arrays initialized by sparse_init()
380 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
381 	 */
382 	arm_bootmem_free(min, max_low, max_high);
383 
384 	/*
385 	 * This doesn't seem to be used by the Linux memory manager any
386 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
387 	 * also get rid of some of the stuff above as well.
388 	 *
389 	 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
390 	 * the system, not the maximum PFN.
391 	 */
392 	max_low_pfn = max_low - PHYS_PFN_OFFSET;
393 	max_pfn = max_high - PHYS_PFN_OFFSET;
394 }
395 
396 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
397 {
398 	unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
399 
400 	for (; pfn < end; pfn++) {
401 		struct page *page = pfn_to_page(pfn);
402 		ClearPageReserved(page);
403 		init_page_count(page);
404 		__free_page(page);
405 		pages++;
406 	}
407 
408 	if (size && s)
409 		printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
410 
411 	return pages;
412 }
413 
414 /*
415  * Poison init memory with an undefined instruction (ARM) or a branch to an
416  * undefined instruction (Thumb).
417  */
418 static inline void poison_init_mem(void *s, size_t count)
419 {
420 	u32 *p = (u32 *)s;
421 	for (; count != 0; count -= 4)
422 		*p++ = 0xe7fddef0;
423 }
424 
425 static inline void
426 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
427 {
428 	struct page *start_pg, *end_pg;
429 	unsigned long pg, pgend;
430 
431 	/*
432 	 * Convert start_pfn/end_pfn to a struct page pointer.
433 	 */
434 	start_pg = pfn_to_page(start_pfn - 1) + 1;
435 	end_pg = pfn_to_page(end_pfn - 1) + 1;
436 
437 	/*
438 	 * Convert to physical addresses, and
439 	 * round start upwards and end downwards.
440 	 */
441 	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
442 	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
443 
444 	/*
445 	 * If there are free pages between these,
446 	 * free the section of the memmap array.
447 	 */
448 	if (pg < pgend)
449 		free_bootmem(pg, pgend - pg);
450 }
451 
452 /*
453  * The mem_map array can get very big.  Free the unused area of the memory map.
454  */
455 static void __init free_unused_memmap(struct meminfo *mi)
456 {
457 	unsigned long bank_start, prev_bank_end = 0;
458 	unsigned int i;
459 
460 	/*
461 	 * This relies on each bank being in address order.
462 	 * The banks are sorted previously in bootmem_init().
463 	 */
464 	for_each_bank(i, mi) {
465 		struct membank *bank = &mi->bank[i];
466 
467 		bank_start = bank_pfn_start(bank);
468 
469 #ifdef CONFIG_SPARSEMEM
470 		/*
471 		 * Take care not to free memmap entries that don't exist
472 		 * due to SPARSEMEM sections which aren't present.
473 		 */
474 		bank_start = min(bank_start,
475 				 ALIGN(prev_bank_end, PAGES_PER_SECTION));
476 #else
477 		/*
478 		 * Align down here since the VM subsystem insists that the
479 		 * memmap entries are valid from the bank start aligned to
480 		 * MAX_ORDER_NR_PAGES.
481 		 */
482 		bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
483 #endif
484 		/*
485 		 * If we had a previous bank, and there is a space
486 		 * between the current bank and the previous, free it.
487 		 */
488 		if (prev_bank_end && prev_bank_end < bank_start)
489 			free_memmap(prev_bank_end, bank_start);
490 
491 		/*
492 		 * Align up here since the VM subsystem insists that the
493 		 * memmap entries are valid from the bank end aligned to
494 		 * MAX_ORDER_NR_PAGES.
495 		 */
496 		prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
497 	}
498 
499 #ifdef CONFIG_SPARSEMEM
500 	if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
501 		free_memmap(prev_bank_end,
502 			    ALIGN(prev_bank_end, PAGES_PER_SECTION));
503 #endif
504 }
505 
506 static void __init free_highpages(void)
507 {
508 #ifdef CONFIG_HIGHMEM
509 	unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
510 	struct memblock_region *mem, *res;
511 
512 	/* set highmem page free */
513 	for_each_memblock(memory, mem) {
514 		unsigned long start = memblock_region_memory_base_pfn(mem);
515 		unsigned long end = memblock_region_memory_end_pfn(mem);
516 
517 		/* Ignore complete lowmem entries */
518 		if (end <= max_low)
519 			continue;
520 
521 		/* Truncate partial highmem entries */
522 		if (start < max_low)
523 			start = max_low;
524 
525 		/* Find and exclude any reserved regions */
526 		for_each_memblock(reserved, res) {
527 			unsigned long res_start, res_end;
528 
529 			res_start = memblock_region_reserved_base_pfn(res);
530 			res_end = memblock_region_reserved_end_pfn(res);
531 
532 			if (res_end < start)
533 				continue;
534 			if (res_start < start)
535 				res_start = start;
536 			if (res_start > end)
537 				res_start = end;
538 			if (res_end > end)
539 				res_end = end;
540 			if (res_start != start)
541 				totalhigh_pages += free_area(start, res_start,
542 							     NULL);
543 			start = res_end;
544 			if (start == end)
545 				break;
546 		}
547 
548 		/* And now free anything which remains */
549 		if (start < end)
550 			totalhigh_pages += free_area(start, end, NULL);
551 	}
552 	totalram_pages += totalhigh_pages;
553 #endif
554 }
555 
556 /*
557  * mem_init() marks the free areas in the mem_map and tells us how much
558  * memory is free.  This is done after various parts of the system have
559  * claimed their memory after the kernel image.
560  */
561 void __init mem_init(void)
562 {
563 	unsigned long reserved_pages, free_pages;
564 	struct memblock_region *reg;
565 	int i;
566 #ifdef CONFIG_HAVE_TCM
567 	/* These pointers are filled in on TCM detection */
568 	extern u32 dtcm_end;
569 	extern u32 itcm_end;
570 #endif
571 
572 	max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
573 
574 	/* this will put all unused low memory onto the freelists */
575 	free_unused_memmap(&meminfo);
576 
577 	totalram_pages += free_all_bootmem();
578 
579 #ifdef CONFIG_SA1111
580 	/* now that our DMA memory is actually so designated, we can free it */
581 	totalram_pages += free_area(PHYS_PFN_OFFSET,
582 				    __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
583 #endif
584 
585 	free_highpages();
586 
587 	reserved_pages = free_pages = 0;
588 
589 	for_each_bank(i, &meminfo) {
590 		struct membank *bank = &meminfo.bank[i];
591 		unsigned int pfn1, pfn2;
592 		struct page *page, *end;
593 
594 		pfn1 = bank_pfn_start(bank);
595 		pfn2 = bank_pfn_end(bank);
596 
597 		page = pfn_to_page(pfn1);
598 		end  = pfn_to_page(pfn2 - 1) + 1;
599 
600 		do {
601 			if (PageReserved(page))
602 				reserved_pages++;
603 			else if (!page_count(page))
604 				free_pages++;
605 			page++;
606 		} while (page < end);
607 	}
608 
609 	/*
610 	 * Since our memory may not be contiguous, calculate the
611 	 * real number of pages we have in this system
612 	 */
613 	printk(KERN_INFO "Memory:");
614 	num_physpages = 0;
615 	for_each_memblock(memory, reg) {
616 		unsigned long pages = memblock_region_memory_end_pfn(reg) -
617 			memblock_region_memory_base_pfn(reg);
618 		num_physpages += pages;
619 		printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
620 	}
621 	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
622 
623 	printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
624 		nr_free_pages() << (PAGE_SHIFT-10),
625 		free_pages << (PAGE_SHIFT-10),
626 		reserved_pages << (PAGE_SHIFT-10),
627 		totalhigh_pages << (PAGE_SHIFT-10));
628 
629 #define MLK(b, t) b, t, ((t) - (b)) >> 10
630 #define MLM(b, t) b, t, ((t) - (b)) >> 20
631 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
632 
633 	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
634 			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
635 #ifdef CONFIG_HAVE_TCM
636 			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
637 			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
638 #endif
639 			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
640 			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
641 			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
642 #ifdef CONFIG_HIGHMEM
643 			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
644 #endif
645 			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
646 			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
647 			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
648 			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
649 			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
650 
651 			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
652 				(PAGE_SIZE)),
653 #ifdef CONFIG_HAVE_TCM
654 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
655 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
656 #endif
657 			MLK(FIXADDR_START, FIXADDR_TOP),
658 			MLM(VMALLOC_START, VMALLOC_END),
659 			MLM(PAGE_OFFSET, (unsigned long)high_memory),
660 #ifdef CONFIG_HIGHMEM
661 			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
662 				(PAGE_SIZE)),
663 #endif
664 			MLM(MODULES_VADDR, MODULES_END),
665 
666 			MLK_ROUNDUP(_text, _etext),
667 			MLK_ROUNDUP(__init_begin, __init_end),
668 			MLK_ROUNDUP(_sdata, _edata),
669 			MLK_ROUNDUP(__bss_start, __bss_stop));
670 
671 #undef MLK
672 #undef MLM
673 #undef MLK_ROUNDUP
674 
675 	/*
676 	 * Check boundaries twice: Some fundamental inconsistencies can
677 	 * be detected at build time already.
678 	 */
679 #ifdef CONFIG_MMU
680 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
681 	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
682 #endif
683 
684 #ifdef CONFIG_HIGHMEM
685 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
686 	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
687 #endif
688 
689 	if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
690 		extern int sysctl_overcommit_memory;
691 		/*
692 		 * On a machine this small we won't get
693 		 * anywhere without overcommit, so turn
694 		 * it on by default.
695 		 */
696 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
697 	}
698 }
699 
700 void free_initmem(void)
701 {
702 #ifdef CONFIG_HAVE_TCM
703 	extern char __tcm_start, __tcm_end;
704 
705 	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
706 	totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
707 				    __phys_to_pfn(__pa(&__tcm_end)),
708 				    "TCM link");
709 #endif
710 
711 	poison_init_mem(__init_begin, __init_end - __init_begin);
712 	if (!machine_is_integrator() && !machine_is_cintegrator())
713 		totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
714 					    __phys_to_pfn(__pa(__init_end)),
715 					    "init");
716 }
717 
718 #ifdef CONFIG_BLK_DEV_INITRD
719 
720 static int keep_initrd;
721 
722 void free_initrd_mem(unsigned long start, unsigned long end)
723 {
724 	if (!keep_initrd) {
725 		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
726 		totalram_pages += free_area(__phys_to_pfn(__pa(start)),
727 					    __phys_to_pfn(__pa(end)),
728 					    "initrd");
729 	}
730 }
731 
732 static int __init keepinitrd_setup(char *__unused)
733 {
734 	keep_initrd = 1;
735 	return 1;
736 }
737 
738 __setup("keepinitrd", keepinitrd_setup);
739 #endif
740