1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/of_fdt.h> 19 #include <linux/highmem.h> 20 #include <linux/gfp.h> 21 #include <linux/memblock.h> 22 #include <linux/sort.h> 23 24 #include <asm/mach-types.h> 25 #include <asm/prom.h> 26 #include <asm/sections.h> 27 #include <asm/setup.h> 28 #include <asm/sizes.h> 29 #include <asm/tlb.h> 30 #include <asm/fixmap.h> 31 32 #include <asm/mach/arch.h> 33 #include <asm/mach/map.h> 34 35 #include "mm.h" 36 37 static unsigned long phys_initrd_start __initdata = 0; 38 static unsigned long phys_initrd_size __initdata = 0; 39 40 static int __init early_initrd(char *p) 41 { 42 unsigned long start, size; 43 char *endp; 44 45 start = memparse(p, &endp); 46 if (*endp == ',') { 47 size = memparse(endp + 1, NULL); 48 49 phys_initrd_start = start; 50 phys_initrd_size = size; 51 } 52 return 0; 53 } 54 early_param("initrd", early_initrd); 55 56 static int __init parse_tag_initrd(const struct tag *tag) 57 { 58 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 59 "please update your bootloader.\n"); 60 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 61 phys_initrd_size = tag->u.initrd.size; 62 return 0; 63 } 64 65 __tagtable(ATAG_INITRD, parse_tag_initrd); 66 67 static int __init parse_tag_initrd2(const struct tag *tag) 68 { 69 phys_initrd_start = tag->u.initrd.start; 70 phys_initrd_size = tag->u.initrd.size; 71 return 0; 72 } 73 74 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 75 76 #ifdef CONFIG_OF_FLATTREE 77 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) 78 { 79 phys_initrd_start = start; 80 phys_initrd_size = end - start; 81 } 82 #endif /* CONFIG_OF_FLATTREE */ 83 84 /* 85 * This keeps memory configuration data used by a couple memory 86 * initialization functions, as well as show_mem() for the skipping 87 * of holes in the memory map. It is populated by arm_add_memory(). 88 */ 89 struct meminfo meminfo; 90 91 void show_mem(unsigned int filter) 92 { 93 int free = 0, total = 0, reserved = 0; 94 int shared = 0, cached = 0, slab = 0, i; 95 struct meminfo * mi = &meminfo; 96 97 printk("Mem-info:\n"); 98 show_free_areas(filter); 99 100 for_each_bank (i, mi) { 101 struct membank *bank = &mi->bank[i]; 102 unsigned int pfn1, pfn2; 103 struct page *page, *end; 104 105 pfn1 = bank_pfn_start(bank); 106 pfn2 = bank_pfn_end(bank); 107 108 page = pfn_to_page(pfn1); 109 end = pfn_to_page(pfn2 - 1) + 1; 110 111 do { 112 total++; 113 if (PageReserved(page)) 114 reserved++; 115 else if (PageSwapCache(page)) 116 cached++; 117 else if (PageSlab(page)) 118 slab++; 119 else if (!page_count(page)) 120 free++; 121 else 122 shared += page_count(page) - 1; 123 page++; 124 } while (page < end); 125 } 126 127 printk("%d pages of RAM\n", total); 128 printk("%d free pages\n", free); 129 printk("%d reserved pages\n", reserved); 130 printk("%d slab pages\n", slab); 131 printk("%d pages shared\n", shared); 132 printk("%d pages swap cached\n", cached); 133 } 134 135 static void __init find_limits(unsigned long *min, unsigned long *max_low, 136 unsigned long *max_high) 137 { 138 struct meminfo *mi = &meminfo; 139 int i; 140 141 *min = -1UL; 142 *max_low = *max_high = 0; 143 144 for_each_bank (i, mi) { 145 struct membank *bank = &mi->bank[i]; 146 unsigned long start, end; 147 148 start = bank_pfn_start(bank); 149 end = bank_pfn_end(bank); 150 151 if (*min > start) 152 *min = start; 153 if (*max_high < end) 154 *max_high = end; 155 if (bank->highmem) 156 continue; 157 if (*max_low < end) 158 *max_low = end; 159 } 160 } 161 162 static void __init arm_bootmem_init(unsigned long start_pfn, 163 unsigned long end_pfn) 164 { 165 struct memblock_region *reg; 166 unsigned int boot_pages; 167 phys_addr_t bitmap; 168 pg_data_t *pgdat; 169 170 /* 171 * Allocate the bootmem bitmap page. This must be in a region 172 * of memory which has already been mapped. 173 */ 174 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 175 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, 176 __pfn_to_phys(end_pfn)); 177 178 /* 179 * Initialise the bootmem allocator, handing the 180 * memory banks over to bootmem. 181 */ 182 node_set_online(0); 183 pgdat = NODE_DATA(0); 184 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 185 186 /* Free the lowmem regions from memblock into bootmem. */ 187 for_each_memblock(memory, reg) { 188 unsigned long start = memblock_region_memory_base_pfn(reg); 189 unsigned long end = memblock_region_memory_end_pfn(reg); 190 191 if (end >= end_pfn) 192 end = end_pfn; 193 if (start >= end) 194 break; 195 196 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); 197 } 198 199 /* Reserve the lowmem memblock reserved regions in bootmem. */ 200 for_each_memblock(reserved, reg) { 201 unsigned long start = memblock_region_reserved_base_pfn(reg); 202 unsigned long end = memblock_region_reserved_end_pfn(reg); 203 204 if (end >= end_pfn) 205 end = end_pfn; 206 if (start >= end) 207 break; 208 209 reserve_bootmem(__pfn_to_phys(start), 210 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); 211 } 212 } 213 214 #ifdef CONFIG_ZONE_DMA 215 /* 216 * The DMA mask corresponding to the maximum bus address allocatable 217 * using GFP_DMA. The default here places no restriction on DMA 218 * allocations. This must be the smallest DMA mask in the system, 219 * so a successful GFP_DMA allocation will always satisfy this. 220 */ 221 u32 arm_dma_limit; 222 223 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 224 unsigned long dma_size) 225 { 226 if (size[0] <= dma_size) 227 return; 228 229 size[ZONE_NORMAL] = size[0] - dma_size; 230 size[ZONE_DMA] = dma_size; 231 hole[ZONE_NORMAL] = hole[0]; 232 hole[ZONE_DMA] = 0; 233 } 234 #endif 235 236 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, 237 unsigned long max_high) 238 { 239 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 240 struct memblock_region *reg; 241 242 /* 243 * initialise the zones. 244 */ 245 memset(zone_size, 0, sizeof(zone_size)); 246 247 /* 248 * The memory size has already been determined. If we need 249 * to do anything fancy with the allocation of this memory 250 * to the zones, now is the time to do it. 251 */ 252 zone_size[0] = max_low - min; 253 #ifdef CONFIG_HIGHMEM 254 zone_size[ZONE_HIGHMEM] = max_high - max_low; 255 #endif 256 257 /* 258 * Calculate the size of the holes. 259 * holes = node_size - sum(bank_sizes) 260 */ 261 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 262 for_each_memblock(memory, reg) { 263 unsigned long start = memblock_region_memory_base_pfn(reg); 264 unsigned long end = memblock_region_memory_end_pfn(reg); 265 266 if (start < max_low) { 267 unsigned long low_end = min(end, max_low); 268 zhole_size[0] -= low_end - start; 269 } 270 #ifdef CONFIG_HIGHMEM 271 if (end > max_low) { 272 unsigned long high_start = max(start, max_low); 273 zhole_size[ZONE_HIGHMEM] -= end - high_start; 274 } 275 #endif 276 } 277 278 #ifdef ARM_DMA_ZONE_SIZE 279 #ifndef CONFIG_ZONE_DMA 280 #error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations 281 #endif 282 283 /* 284 * Adjust the sizes according to any special requirements for 285 * this machine type. 286 */ 287 arm_adjust_dma_zone(zone_size, zhole_size, 288 ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); 289 290 arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1; 291 #endif 292 293 free_area_init_node(0, zone_size, min, zhole_size); 294 } 295 296 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 297 int pfn_valid(unsigned long pfn) 298 { 299 return memblock_is_memory(pfn << PAGE_SHIFT); 300 } 301 EXPORT_SYMBOL(pfn_valid); 302 #endif 303 304 #ifndef CONFIG_SPARSEMEM 305 static void arm_memory_present(void) 306 { 307 } 308 #else 309 static void arm_memory_present(void) 310 { 311 struct memblock_region *reg; 312 313 for_each_memblock(memory, reg) 314 memory_present(0, memblock_region_memory_base_pfn(reg), 315 memblock_region_memory_end_pfn(reg)); 316 } 317 #endif 318 319 static int __init meminfo_cmp(const void *_a, const void *_b) 320 { 321 const struct membank *a = _a, *b = _b; 322 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 323 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 324 } 325 326 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 327 { 328 int i; 329 330 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 331 332 memblock_init(); 333 for (i = 0; i < mi->nr_banks; i++) 334 memblock_add(mi->bank[i].start, mi->bank[i].size); 335 336 /* Register the kernel text, kernel data and initrd with memblock. */ 337 #ifdef CONFIG_XIP_KERNEL 338 memblock_reserve(__pa(_sdata), _end - _sdata); 339 #else 340 memblock_reserve(__pa(_stext), _end - _stext); 341 #endif 342 #ifdef CONFIG_BLK_DEV_INITRD 343 if (phys_initrd_size && 344 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 345 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", 346 phys_initrd_start, phys_initrd_size); 347 phys_initrd_start = phys_initrd_size = 0; 348 } 349 if (phys_initrd_size && 350 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 351 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", 352 phys_initrd_start, phys_initrd_size); 353 phys_initrd_start = phys_initrd_size = 0; 354 } 355 if (phys_initrd_size) { 356 memblock_reserve(phys_initrd_start, phys_initrd_size); 357 358 /* Now convert initrd to virtual addresses */ 359 initrd_start = __phys_to_virt(phys_initrd_start); 360 initrd_end = initrd_start + phys_initrd_size; 361 } 362 #endif 363 364 arm_mm_memblock_reserve(); 365 arm_dt_memblock_reserve(); 366 367 /* reserve any platform specific memblock areas */ 368 if (mdesc->reserve) 369 mdesc->reserve(); 370 371 memblock_analyze(); 372 memblock_dump_all(); 373 } 374 375 void __init bootmem_init(void) 376 { 377 unsigned long min, max_low, max_high; 378 379 max_low = max_high = 0; 380 381 find_limits(&min, &max_low, &max_high); 382 383 arm_bootmem_init(min, max_low); 384 385 /* 386 * Sparsemem tries to allocate bootmem in memory_present(), 387 * so must be done after the fixed reservations 388 */ 389 arm_memory_present(); 390 391 /* 392 * sparse_init() needs the bootmem allocator up and running. 393 */ 394 sparse_init(); 395 396 /* 397 * Now free the memory - free_area_init_node needs 398 * the sparse mem_map arrays initialized by sparse_init() 399 * for memmap_init_zone(), otherwise all PFNs are invalid. 400 */ 401 arm_bootmem_free(min, max_low, max_high); 402 403 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1; 404 405 /* 406 * This doesn't seem to be used by the Linux memory manager any 407 * more, but is used by ll_rw_block. If we can get rid of it, we 408 * also get rid of some of the stuff above as well. 409 * 410 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 411 * the system, not the maximum PFN. 412 */ 413 max_low_pfn = max_low - PHYS_PFN_OFFSET; 414 max_pfn = max_high - PHYS_PFN_OFFSET; 415 } 416 417 static inline int free_area(unsigned long pfn, unsigned long end, char *s) 418 { 419 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); 420 421 for (; pfn < end; pfn++) { 422 struct page *page = pfn_to_page(pfn); 423 ClearPageReserved(page); 424 init_page_count(page); 425 __free_page(page); 426 pages++; 427 } 428 429 if (size && s) 430 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 431 432 return pages; 433 } 434 435 static inline void 436 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 437 { 438 struct page *start_pg, *end_pg; 439 unsigned long pg, pgend; 440 441 /* 442 * Convert start_pfn/end_pfn to a struct page pointer. 443 */ 444 start_pg = pfn_to_page(start_pfn - 1) + 1; 445 end_pg = pfn_to_page(end_pfn - 1) + 1; 446 447 /* 448 * Convert to physical addresses, and 449 * round start upwards and end downwards. 450 */ 451 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); 452 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; 453 454 /* 455 * If there are free pages between these, 456 * free the section of the memmap array. 457 */ 458 if (pg < pgend) 459 free_bootmem(pg, pgend - pg); 460 } 461 462 /* 463 * The mem_map array can get very big. Free the unused area of the memory map. 464 */ 465 static void __init free_unused_memmap(struct meminfo *mi) 466 { 467 unsigned long bank_start, prev_bank_end = 0; 468 unsigned int i; 469 470 /* 471 * This relies on each bank being in address order. 472 * The banks are sorted previously in bootmem_init(). 473 */ 474 for_each_bank(i, mi) { 475 struct membank *bank = &mi->bank[i]; 476 477 bank_start = bank_pfn_start(bank); 478 479 #ifdef CONFIG_SPARSEMEM 480 /* 481 * Take care not to free memmap entries that don't exist 482 * due to SPARSEMEM sections which aren't present. 483 */ 484 bank_start = min(bank_start, 485 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 486 #endif 487 /* 488 * If we had a previous bank, and there is a space 489 * between the current bank and the previous, free it. 490 */ 491 if (prev_bank_end && prev_bank_end < bank_start) 492 free_memmap(prev_bank_end, bank_start); 493 494 /* 495 * Align up here since the VM subsystem insists that the 496 * memmap entries are valid from the bank end aligned to 497 * MAX_ORDER_NR_PAGES. 498 */ 499 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 500 } 501 502 #ifdef CONFIG_SPARSEMEM 503 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 504 free_memmap(prev_bank_end, 505 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 506 #endif 507 } 508 509 static void __init free_highpages(void) 510 { 511 #ifdef CONFIG_HIGHMEM 512 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; 513 struct memblock_region *mem, *res; 514 515 /* set highmem page free */ 516 for_each_memblock(memory, mem) { 517 unsigned long start = memblock_region_memory_base_pfn(mem); 518 unsigned long end = memblock_region_memory_end_pfn(mem); 519 520 /* Ignore complete lowmem entries */ 521 if (end <= max_low) 522 continue; 523 524 /* Truncate partial highmem entries */ 525 if (start < max_low) 526 start = max_low; 527 528 /* Find and exclude any reserved regions */ 529 for_each_memblock(reserved, res) { 530 unsigned long res_start, res_end; 531 532 res_start = memblock_region_reserved_base_pfn(res); 533 res_end = memblock_region_reserved_end_pfn(res); 534 535 if (res_end < start) 536 continue; 537 if (res_start < start) 538 res_start = start; 539 if (res_start > end) 540 res_start = end; 541 if (res_end > end) 542 res_end = end; 543 if (res_start != start) 544 totalhigh_pages += free_area(start, res_start, 545 NULL); 546 start = res_end; 547 if (start == end) 548 break; 549 } 550 551 /* And now free anything which remains */ 552 if (start < end) 553 totalhigh_pages += free_area(start, end, NULL); 554 } 555 totalram_pages += totalhigh_pages; 556 #endif 557 } 558 559 /* 560 * mem_init() marks the free areas in the mem_map and tells us how much 561 * memory is free. This is done after various parts of the system have 562 * claimed their memory after the kernel image. 563 */ 564 void __init mem_init(void) 565 { 566 unsigned long reserved_pages, free_pages; 567 struct memblock_region *reg; 568 int i; 569 #ifdef CONFIG_HAVE_TCM 570 /* These pointers are filled in on TCM detection */ 571 extern u32 dtcm_end; 572 extern u32 itcm_end; 573 #endif 574 575 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 576 577 /* this will put all unused low memory onto the freelists */ 578 free_unused_memmap(&meminfo); 579 580 totalram_pages += free_all_bootmem(); 581 582 #ifdef CONFIG_SA1111 583 /* now that our DMA memory is actually so designated, we can free it */ 584 totalram_pages += free_area(PHYS_PFN_OFFSET, 585 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 586 #endif 587 588 free_highpages(); 589 590 reserved_pages = free_pages = 0; 591 592 for_each_bank(i, &meminfo) { 593 struct membank *bank = &meminfo.bank[i]; 594 unsigned int pfn1, pfn2; 595 struct page *page, *end; 596 597 pfn1 = bank_pfn_start(bank); 598 pfn2 = bank_pfn_end(bank); 599 600 page = pfn_to_page(pfn1); 601 end = pfn_to_page(pfn2 - 1) + 1; 602 603 do { 604 if (PageReserved(page)) 605 reserved_pages++; 606 else if (!page_count(page)) 607 free_pages++; 608 page++; 609 } while (page < end); 610 } 611 612 /* 613 * Since our memory may not be contiguous, calculate the 614 * real number of pages we have in this system 615 */ 616 printk(KERN_INFO "Memory:"); 617 num_physpages = 0; 618 for_each_memblock(memory, reg) { 619 unsigned long pages = memblock_region_memory_end_pfn(reg) - 620 memblock_region_memory_base_pfn(reg); 621 num_physpages += pages; 622 printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); 623 } 624 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 625 626 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", 627 nr_free_pages() << (PAGE_SHIFT-10), 628 free_pages << (PAGE_SHIFT-10), 629 reserved_pages << (PAGE_SHIFT-10), 630 totalhigh_pages << (PAGE_SHIFT-10)); 631 632 #define MLK(b, t) b, t, ((t) - (b)) >> 10 633 #define MLM(b, t) b, t, ((t) - (b)) >> 20 634 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 635 636 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 637 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 638 #ifdef CONFIG_HAVE_TCM 639 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 640 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 641 #endif 642 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 643 #ifdef CONFIG_MMU 644 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" 645 #endif 646 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 647 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 648 #ifdef CONFIG_HIGHMEM 649 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 650 #endif 651 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 652 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 653 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 654 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 655 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 656 657 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 658 (PAGE_SIZE)), 659 #ifdef CONFIG_HAVE_TCM 660 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 661 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 662 #endif 663 MLK(FIXADDR_START, FIXADDR_TOP), 664 #ifdef CONFIG_MMU 665 MLM(CONSISTENT_BASE, CONSISTENT_END), 666 #endif 667 MLM(VMALLOC_START, VMALLOC_END), 668 MLM(PAGE_OFFSET, (unsigned long)high_memory), 669 #ifdef CONFIG_HIGHMEM 670 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 671 (PAGE_SIZE)), 672 #endif 673 MLM(MODULES_VADDR, MODULES_END), 674 675 MLK_ROUNDUP(__init_begin, __init_end), 676 MLK_ROUNDUP(_text, _etext), 677 MLK_ROUNDUP(_sdata, _edata), 678 MLK_ROUNDUP(__bss_start, __bss_stop)); 679 680 #undef MLK 681 #undef MLM 682 #undef MLK_ROUNDUP 683 684 /* 685 * Check boundaries twice: Some fundamental inconsistencies can 686 * be detected at build time already. 687 */ 688 #ifdef CONFIG_MMU 689 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); 690 BUG_ON(VMALLOC_END > CONSISTENT_BASE); 691 692 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 693 BUG_ON(TASK_SIZE > MODULES_VADDR); 694 #endif 695 696 #ifdef CONFIG_HIGHMEM 697 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 698 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 699 #endif 700 701 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 702 extern int sysctl_overcommit_memory; 703 /* 704 * On a machine this small we won't get 705 * anywhere without overcommit, so turn 706 * it on by default. 707 */ 708 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 709 } 710 } 711 712 void free_initmem(void) 713 { 714 #ifdef CONFIG_HAVE_TCM 715 extern char __tcm_start, __tcm_end; 716 717 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 718 __phys_to_pfn(__pa(&__tcm_end)), 719 "TCM link"); 720 #endif 721 722 if (!machine_is_integrator() && !machine_is_cintegrator()) 723 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 724 __phys_to_pfn(__pa(__init_end)), 725 "init"); 726 } 727 728 #ifdef CONFIG_BLK_DEV_INITRD 729 730 static int keep_initrd; 731 732 void free_initrd_mem(unsigned long start, unsigned long end) 733 { 734 if (!keep_initrd) 735 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 736 __phys_to_pfn(__pa(end)), 737 "initrd"); 738 } 739 740 static int __init keepinitrd_setup(char *__unused) 741 { 742 keep_initrd = 1; 743 return 1; 744 } 745 746 __setup("keepinitrd", keepinitrd_setup); 747 #endif 748