1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/export.h> 17 #include <linux/nodemask.h> 18 #include <linux/initrd.h> 19 #include <linux/of_fdt.h> 20 #include <linux/highmem.h> 21 #include <linux/gfp.h> 22 #include <linux/memblock.h> 23 #include <linux/sort.h> 24 25 #include <asm/mach-types.h> 26 #include <asm/prom.h> 27 #include <asm/sections.h> 28 #include <asm/setup.h> 29 #include <asm/sizes.h> 30 #include <asm/tlb.h> 31 #include <asm/fixmap.h> 32 33 #include <asm/mach/arch.h> 34 #include <asm/mach/map.h> 35 #include <asm/memblock.h> 36 37 #include "mm.h" 38 39 static unsigned long phys_initrd_start __initdata = 0; 40 static unsigned long phys_initrd_size __initdata = 0; 41 42 static int __init early_initrd(char *p) 43 { 44 unsigned long start, size; 45 char *endp; 46 47 start = memparse(p, &endp); 48 if (*endp == ',') { 49 size = memparse(endp + 1, NULL); 50 51 phys_initrd_start = start; 52 phys_initrd_size = size; 53 } 54 return 0; 55 } 56 early_param("initrd", early_initrd); 57 58 static int __init parse_tag_initrd(const struct tag *tag) 59 { 60 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 61 "please update your bootloader.\n"); 62 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 63 phys_initrd_size = tag->u.initrd.size; 64 return 0; 65 } 66 67 __tagtable(ATAG_INITRD, parse_tag_initrd); 68 69 static int __init parse_tag_initrd2(const struct tag *tag) 70 { 71 phys_initrd_start = tag->u.initrd.start; 72 phys_initrd_size = tag->u.initrd.size; 73 return 0; 74 } 75 76 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 77 78 #ifdef CONFIG_OF_FLATTREE 79 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) 80 { 81 phys_initrd_start = start; 82 phys_initrd_size = end - start; 83 } 84 #endif /* CONFIG_OF_FLATTREE */ 85 86 /* 87 * This keeps memory configuration data used by a couple memory 88 * initialization functions, as well as show_mem() for the skipping 89 * of holes in the memory map. It is populated by arm_add_memory(). 90 */ 91 struct meminfo meminfo; 92 93 void show_mem(unsigned int filter) 94 { 95 int free = 0, total = 0, reserved = 0; 96 int shared = 0, cached = 0, slab = 0, i; 97 struct meminfo * mi = &meminfo; 98 99 printk("Mem-info:\n"); 100 show_free_areas(filter); 101 102 for_each_bank (i, mi) { 103 struct membank *bank = &mi->bank[i]; 104 unsigned int pfn1, pfn2; 105 struct page *page, *end; 106 107 pfn1 = bank_pfn_start(bank); 108 pfn2 = bank_pfn_end(bank); 109 110 page = pfn_to_page(pfn1); 111 end = pfn_to_page(pfn2 - 1) + 1; 112 113 do { 114 total++; 115 if (PageReserved(page)) 116 reserved++; 117 else if (PageSwapCache(page)) 118 cached++; 119 else if (PageSlab(page)) 120 slab++; 121 else if (!page_count(page)) 122 free++; 123 else 124 shared += page_count(page) - 1; 125 page++; 126 } while (page < end); 127 } 128 129 printk("%d pages of RAM\n", total); 130 printk("%d free pages\n", free); 131 printk("%d reserved pages\n", reserved); 132 printk("%d slab pages\n", slab); 133 printk("%d pages shared\n", shared); 134 printk("%d pages swap cached\n", cached); 135 } 136 137 static void __init find_limits(unsigned long *min, unsigned long *max_low, 138 unsigned long *max_high) 139 { 140 struct meminfo *mi = &meminfo; 141 int i; 142 143 *min = -1UL; 144 *max_low = *max_high = 0; 145 146 for_each_bank (i, mi) { 147 struct membank *bank = &mi->bank[i]; 148 unsigned long start, end; 149 150 start = bank_pfn_start(bank); 151 end = bank_pfn_end(bank); 152 153 if (*min > start) 154 *min = start; 155 if (*max_high < end) 156 *max_high = end; 157 if (bank->highmem) 158 continue; 159 if (*max_low < end) 160 *max_low = end; 161 } 162 } 163 164 static void __init arm_bootmem_init(unsigned long start_pfn, 165 unsigned long end_pfn) 166 { 167 struct memblock_region *reg; 168 unsigned int boot_pages; 169 phys_addr_t bitmap; 170 pg_data_t *pgdat; 171 172 /* 173 * Allocate the bootmem bitmap page. This must be in a region 174 * of memory which has already been mapped. 175 */ 176 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 177 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, 178 __pfn_to_phys(end_pfn)); 179 180 /* 181 * Initialise the bootmem allocator, handing the 182 * memory banks over to bootmem. 183 */ 184 node_set_online(0); 185 pgdat = NODE_DATA(0); 186 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 187 188 /* Free the lowmem regions from memblock into bootmem. */ 189 for_each_memblock(memory, reg) { 190 unsigned long start = memblock_region_memory_base_pfn(reg); 191 unsigned long end = memblock_region_memory_end_pfn(reg); 192 193 if (end >= end_pfn) 194 end = end_pfn; 195 if (start >= end) 196 break; 197 198 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); 199 } 200 201 /* Reserve the lowmem memblock reserved regions in bootmem. */ 202 for_each_memblock(reserved, reg) { 203 unsigned long start = memblock_region_reserved_base_pfn(reg); 204 unsigned long end = memblock_region_reserved_end_pfn(reg); 205 206 if (end >= end_pfn) 207 end = end_pfn; 208 if (start >= end) 209 break; 210 211 reserve_bootmem(__pfn_to_phys(start), 212 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); 213 } 214 } 215 216 #ifdef CONFIG_ZONE_DMA 217 218 unsigned long arm_dma_zone_size __read_mostly; 219 EXPORT_SYMBOL(arm_dma_zone_size); 220 221 /* 222 * The DMA mask corresponding to the maximum bus address allocatable 223 * using GFP_DMA. The default here places no restriction on DMA 224 * allocations. This must be the smallest DMA mask in the system, 225 * so a successful GFP_DMA allocation will always satisfy this. 226 */ 227 u32 arm_dma_limit; 228 229 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 230 unsigned long dma_size) 231 { 232 if (size[0] <= dma_size) 233 return; 234 235 size[ZONE_NORMAL] = size[0] - dma_size; 236 size[ZONE_DMA] = dma_size; 237 hole[ZONE_NORMAL] = hole[0]; 238 hole[ZONE_DMA] = 0; 239 } 240 #endif 241 242 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, 243 unsigned long max_high) 244 { 245 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 246 struct memblock_region *reg; 247 248 /* 249 * initialise the zones. 250 */ 251 memset(zone_size, 0, sizeof(zone_size)); 252 253 /* 254 * The memory size has already been determined. If we need 255 * to do anything fancy with the allocation of this memory 256 * to the zones, now is the time to do it. 257 */ 258 zone_size[0] = max_low - min; 259 #ifdef CONFIG_HIGHMEM 260 zone_size[ZONE_HIGHMEM] = max_high - max_low; 261 #endif 262 263 /* 264 * Calculate the size of the holes. 265 * holes = node_size - sum(bank_sizes) 266 */ 267 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 268 for_each_memblock(memory, reg) { 269 unsigned long start = memblock_region_memory_base_pfn(reg); 270 unsigned long end = memblock_region_memory_end_pfn(reg); 271 272 if (start < max_low) { 273 unsigned long low_end = min(end, max_low); 274 zhole_size[0] -= low_end - start; 275 } 276 #ifdef CONFIG_HIGHMEM 277 if (end > max_low) { 278 unsigned long high_start = max(start, max_low); 279 zhole_size[ZONE_HIGHMEM] -= end - high_start; 280 } 281 #endif 282 } 283 284 #ifdef CONFIG_ZONE_DMA 285 /* 286 * Adjust the sizes according to any special requirements for 287 * this machine type. 288 */ 289 if (arm_dma_zone_size) { 290 arm_adjust_dma_zone(zone_size, zhole_size, 291 arm_dma_zone_size >> PAGE_SHIFT); 292 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 293 } else 294 arm_dma_limit = 0xffffffff; 295 #endif 296 297 free_area_init_node(0, zone_size, min, zhole_size); 298 } 299 300 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 301 int pfn_valid(unsigned long pfn) 302 { 303 return memblock_is_memory(__pfn_to_phys(pfn)); 304 } 305 EXPORT_SYMBOL(pfn_valid); 306 #endif 307 308 #ifndef CONFIG_SPARSEMEM 309 static void arm_memory_present(void) 310 { 311 } 312 #else 313 static void arm_memory_present(void) 314 { 315 struct memblock_region *reg; 316 317 for_each_memblock(memory, reg) 318 memory_present(0, memblock_region_memory_base_pfn(reg), 319 memblock_region_memory_end_pfn(reg)); 320 } 321 #endif 322 323 static int __init meminfo_cmp(const void *_a, const void *_b) 324 { 325 const struct membank *a = _a, *b = _b; 326 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 327 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 328 } 329 330 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 331 { 332 int i; 333 334 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 335 336 memblock_init(); 337 for (i = 0; i < mi->nr_banks; i++) 338 memblock_add(mi->bank[i].start, mi->bank[i].size); 339 340 /* Register the kernel text, kernel data and initrd with memblock. */ 341 #ifdef CONFIG_XIP_KERNEL 342 memblock_reserve(__pa(_sdata), _end - _sdata); 343 #else 344 memblock_reserve(__pa(_stext), _end - _stext); 345 #endif 346 #ifdef CONFIG_BLK_DEV_INITRD 347 if (phys_initrd_size && 348 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 349 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", 350 phys_initrd_start, phys_initrd_size); 351 phys_initrd_start = phys_initrd_size = 0; 352 } 353 if (phys_initrd_size && 354 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 355 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", 356 phys_initrd_start, phys_initrd_size); 357 phys_initrd_start = phys_initrd_size = 0; 358 } 359 if (phys_initrd_size) { 360 memblock_reserve(phys_initrd_start, phys_initrd_size); 361 362 /* Now convert initrd to virtual addresses */ 363 initrd_start = __phys_to_virt(phys_initrd_start); 364 initrd_end = initrd_start + phys_initrd_size; 365 } 366 #endif 367 368 arm_mm_memblock_reserve(); 369 arm_dt_memblock_reserve(); 370 371 /* reserve any platform specific memblock areas */ 372 if (mdesc->reserve) 373 mdesc->reserve(); 374 375 memblock_analyze(); 376 memblock_dump_all(); 377 } 378 379 void __init bootmem_init(void) 380 { 381 unsigned long min, max_low, max_high; 382 383 max_low = max_high = 0; 384 385 find_limits(&min, &max_low, &max_high); 386 387 arm_bootmem_init(min, max_low); 388 389 /* 390 * Sparsemem tries to allocate bootmem in memory_present(), 391 * so must be done after the fixed reservations 392 */ 393 arm_memory_present(); 394 395 /* 396 * sparse_init() needs the bootmem allocator up and running. 397 */ 398 sparse_init(); 399 400 /* 401 * Now free the memory - free_area_init_node needs 402 * the sparse mem_map arrays initialized by sparse_init() 403 * for memmap_init_zone(), otherwise all PFNs are invalid. 404 */ 405 arm_bootmem_free(min, max_low, max_high); 406 407 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1; 408 409 /* 410 * This doesn't seem to be used by the Linux memory manager any 411 * more, but is used by ll_rw_block. If we can get rid of it, we 412 * also get rid of some of the stuff above as well. 413 * 414 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 415 * the system, not the maximum PFN. 416 */ 417 max_low_pfn = max_low - PHYS_PFN_OFFSET; 418 max_pfn = max_high - PHYS_PFN_OFFSET; 419 } 420 421 static inline int free_area(unsigned long pfn, unsigned long end, char *s) 422 { 423 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); 424 425 for (; pfn < end; pfn++) { 426 struct page *page = pfn_to_page(pfn); 427 ClearPageReserved(page); 428 init_page_count(page); 429 __free_page(page); 430 pages++; 431 } 432 433 if (size && s) 434 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 435 436 return pages; 437 } 438 439 /* 440 * Poison init memory with an undefined instruction (ARM) or a branch to an 441 * undefined instruction (Thumb). 442 */ 443 static inline void poison_init_mem(void *s, size_t count) 444 { 445 u32 *p = (u32 *)s; 446 for (; count != 0; count -= 4) 447 *p++ = 0xe7fddef0; 448 } 449 450 static inline void 451 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 452 { 453 struct page *start_pg, *end_pg; 454 unsigned long pg, pgend; 455 456 /* 457 * Convert start_pfn/end_pfn to a struct page pointer. 458 */ 459 start_pg = pfn_to_page(start_pfn - 1) + 1; 460 end_pg = pfn_to_page(end_pfn - 1) + 1; 461 462 /* 463 * Convert to physical addresses, and 464 * round start upwards and end downwards. 465 */ 466 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); 467 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; 468 469 /* 470 * If there are free pages between these, 471 * free the section of the memmap array. 472 */ 473 if (pg < pgend) 474 free_bootmem(pg, pgend - pg); 475 } 476 477 /* 478 * The mem_map array can get very big. Free the unused area of the memory map. 479 */ 480 static void __init free_unused_memmap(struct meminfo *mi) 481 { 482 unsigned long bank_start, prev_bank_end = 0; 483 unsigned int i; 484 485 /* 486 * This relies on each bank being in address order. 487 * The banks are sorted previously in bootmem_init(). 488 */ 489 for_each_bank(i, mi) { 490 struct membank *bank = &mi->bank[i]; 491 492 bank_start = bank_pfn_start(bank); 493 494 #ifdef CONFIG_SPARSEMEM 495 /* 496 * Take care not to free memmap entries that don't exist 497 * due to SPARSEMEM sections which aren't present. 498 */ 499 bank_start = min(bank_start, 500 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 501 #else 502 /* 503 * Align down here since the VM subsystem insists that the 504 * memmap entries are valid from the bank start aligned to 505 * MAX_ORDER_NR_PAGES. 506 */ 507 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); 508 #endif 509 /* 510 * If we had a previous bank, and there is a space 511 * between the current bank and the previous, free it. 512 */ 513 if (prev_bank_end && prev_bank_end < bank_start) 514 free_memmap(prev_bank_end, bank_start); 515 516 /* 517 * Align up here since the VM subsystem insists that the 518 * memmap entries are valid from the bank end aligned to 519 * MAX_ORDER_NR_PAGES. 520 */ 521 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 522 } 523 524 #ifdef CONFIG_SPARSEMEM 525 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 526 free_memmap(prev_bank_end, 527 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 528 #endif 529 } 530 531 static void __init free_highpages(void) 532 { 533 #ifdef CONFIG_HIGHMEM 534 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; 535 struct memblock_region *mem, *res; 536 537 /* set highmem page free */ 538 for_each_memblock(memory, mem) { 539 unsigned long start = memblock_region_memory_base_pfn(mem); 540 unsigned long end = memblock_region_memory_end_pfn(mem); 541 542 /* Ignore complete lowmem entries */ 543 if (end <= max_low) 544 continue; 545 546 /* Truncate partial highmem entries */ 547 if (start < max_low) 548 start = max_low; 549 550 /* Find and exclude any reserved regions */ 551 for_each_memblock(reserved, res) { 552 unsigned long res_start, res_end; 553 554 res_start = memblock_region_reserved_base_pfn(res); 555 res_end = memblock_region_reserved_end_pfn(res); 556 557 if (res_end < start) 558 continue; 559 if (res_start < start) 560 res_start = start; 561 if (res_start > end) 562 res_start = end; 563 if (res_end > end) 564 res_end = end; 565 if (res_start != start) 566 totalhigh_pages += free_area(start, res_start, 567 NULL); 568 start = res_end; 569 if (start == end) 570 break; 571 } 572 573 /* And now free anything which remains */ 574 if (start < end) 575 totalhigh_pages += free_area(start, end, NULL); 576 } 577 totalram_pages += totalhigh_pages; 578 #endif 579 } 580 581 /* 582 * mem_init() marks the free areas in the mem_map and tells us how much 583 * memory is free. This is done after various parts of the system have 584 * claimed their memory after the kernel image. 585 */ 586 void __init mem_init(void) 587 { 588 unsigned long reserved_pages, free_pages; 589 struct memblock_region *reg; 590 int i; 591 #ifdef CONFIG_HAVE_TCM 592 /* These pointers are filled in on TCM detection */ 593 extern u32 dtcm_end; 594 extern u32 itcm_end; 595 #endif 596 597 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 598 599 /* this will put all unused low memory onto the freelists */ 600 free_unused_memmap(&meminfo); 601 602 totalram_pages += free_all_bootmem(); 603 604 #ifdef CONFIG_SA1111 605 /* now that our DMA memory is actually so designated, we can free it */ 606 totalram_pages += free_area(PHYS_PFN_OFFSET, 607 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 608 #endif 609 610 free_highpages(); 611 612 reserved_pages = free_pages = 0; 613 614 for_each_bank(i, &meminfo) { 615 struct membank *bank = &meminfo.bank[i]; 616 unsigned int pfn1, pfn2; 617 struct page *page, *end; 618 619 pfn1 = bank_pfn_start(bank); 620 pfn2 = bank_pfn_end(bank); 621 622 page = pfn_to_page(pfn1); 623 end = pfn_to_page(pfn2 - 1) + 1; 624 625 do { 626 if (PageReserved(page)) 627 reserved_pages++; 628 else if (!page_count(page)) 629 free_pages++; 630 page++; 631 } while (page < end); 632 } 633 634 /* 635 * Since our memory may not be contiguous, calculate the 636 * real number of pages we have in this system 637 */ 638 printk(KERN_INFO "Memory:"); 639 num_physpages = 0; 640 for_each_memblock(memory, reg) { 641 unsigned long pages = memblock_region_memory_end_pfn(reg) - 642 memblock_region_memory_base_pfn(reg); 643 num_physpages += pages; 644 printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); 645 } 646 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 647 648 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", 649 nr_free_pages() << (PAGE_SHIFT-10), 650 free_pages << (PAGE_SHIFT-10), 651 reserved_pages << (PAGE_SHIFT-10), 652 totalhigh_pages << (PAGE_SHIFT-10)); 653 654 #define MLK(b, t) b, t, ((t) - (b)) >> 10 655 #define MLM(b, t) b, t, ((t) - (b)) >> 20 656 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 657 658 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 659 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 660 #ifdef CONFIG_HAVE_TCM 661 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 662 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 663 #endif 664 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 665 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 666 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 667 #ifdef CONFIG_HIGHMEM 668 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 669 #endif 670 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 671 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 672 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 673 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 674 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 675 676 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 677 (PAGE_SIZE)), 678 #ifdef CONFIG_HAVE_TCM 679 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 680 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 681 #endif 682 MLK(FIXADDR_START, FIXADDR_TOP), 683 MLM(VMALLOC_START, VMALLOC_END), 684 MLM(PAGE_OFFSET, (unsigned long)high_memory), 685 #ifdef CONFIG_HIGHMEM 686 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 687 (PAGE_SIZE)), 688 #endif 689 MLM(MODULES_VADDR, MODULES_END), 690 691 MLK_ROUNDUP(_text, _etext), 692 MLK_ROUNDUP(__init_begin, __init_end), 693 MLK_ROUNDUP(_sdata, _edata), 694 MLK_ROUNDUP(__bss_start, __bss_stop)); 695 696 #undef MLK 697 #undef MLM 698 #undef MLK_ROUNDUP 699 700 /* 701 * Check boundaries twice: Some fundamental inconsistencies can 702 * be detected at build time already. 703 */ 704 #ifdef CONFIG_MMU 705 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 706 BUG_ON(TASK_SIZE > MODULES_VADDR); 707 #endif 708 709 #ifdef CONFIG_HIGHMEM 710 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 711 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 712 #endif 713 714 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 715 extern int sysctl_overcommit_memory; 716 /* 717 * On a machine this small we won't get 718 * anywhere without overcommit, so turn 719 * it on by default. 720 */ 721 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 722 } 723 } 724 725 void free_initmem(void) 726 { 727 #ifdef CONFIG_HAVE_TCM 728 extern char __tcm_start, __tcm_end; 729 730 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 731 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 732 __phys_to_pfn(__pa(&__tcm_end)), 733 "TCM link"); 734 #endif 735 736 poison_init_mem(__init_begin, __init_end - __init_begin); 737 if (!machine_is_integrator() && !machine_is_cintegrator()) 738 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 739 __phys_to_pfn(__pa(__init_end)), 740 "init"); 741 } 742 743 #ifdef CONFIG_BLK_DEV_INITRD 744 745 static int keep_initrd; 746 747 void free_initrd_mem(unsigned long start, unsigned long end) 748 { 749 if (!keep_initrd) { 750 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 751 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 752 __phys_to_pfn(__pa(end)), 753 "initrd"); 754 } 755 } 756 757 static int __init keepinitrd_setup(char *__unused) 758 { 759 keep_initrd = 1; 760 return 1; 761 } 762 763 __setup("keepinitrd", keepinitrd_setup); 764 #endif 765