1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/export.h> 17 #include <linux/nodemask.h> 18 #include <linux/initrd.h> 19 #include <linux/of_fdt.h> 20 #include <linux/of_reserved_mem.h> 21 #include <linux/highmem.h> 22 #include <linux/gfp.h> 23 #include <linux/memblock.h> 24 #include <linux/dma-contiguous.h> 25 #include <linux/sizes.h> 26 27 #include <asm/mach-types.h> 28 #include <asm/memblock.h> 29 #include <asm/prom.h> 30 #include <asm/sections.h> 31 #include <asm/setup.h> 32 #include <asm/tlb.h> 33 #include <asm/fixmap.h> 34 35 #include <asm/mach/arch.h> 36 #include <asm/mach/map.h> 37 38 #include "mm.h" 39 40 static phys_addr_t phys_initrd_start __initdata = 0; 41 static unsigned long phys_initrd_size __initdata = 0; 42 43 static int __init early_initrd(char *p) 44 { 45 phys_addr_t start; 46 unsigned long size; 47 char *endp; 48 49 start = memparse(p, &endp); 50 if (*endp == ',') { 51 size = memparse(endp + 1, NULL); 52 53 phys_initrd_start = start; 54 phys_initrd_size = size; 55 } 56 return 0; 57 } 58 early_param("initrd", early_initrd); 59 60 static int __init parse_tag_initrd(const struct tag *tag) 61 { 62 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 63 "please update your bootloader.\n"); 64 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 65 phys_initrd_size = tag->u.initrd.size; 66 return 0; 67 } 68 69 __tagtable(ATAG_INITRD, parse_tag_initrd); 70 71 static int __init parse_tag_initrd2(const struct tag *tag) 72 { 73 phys_initrd_start = tag->u.initrd.start; 74 phys_initrd_size = tag->u.initrd.size; 75 return 0; 76 } 77 78 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 79 80 #ifdef CONFIG_OF_FLATTREE 81 void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) 82 { 83 phys_initrd_start = start; 84 phys_initrd_size = end - start; 85 } 86 #endif /* CONFIG_OF_FLATTREE */ 87 88 /* 89 * This keeps memory configuration data used by a couple memory 90 * initialization functions, as well as show_mem() for the skipping 91 * of holes in the memory map. It is populated by arm_add_memory(). 92 */ 93 struct meminfo meminfo; 94 95 void show_mem(unsigned int filter) 96 { 97 int free = 0, total = 0, reserved = 0; 98 int shared = 0, cached = 0, slab = 0, i; 99 struct meminfo * mi = &meminfo; 100 101 printk("Mem-info:\n"); 102 show_free_areas(filter); 103 104 if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 105 return; 106 107 for_each_bank (i, mi) { 108 struct membank *bank = &mi->bank[i]; 109 unsigned int pfn1, pfn2; 110 struct page *page, *end; 111 112 pfn1 = bank_pfn_start(bank); 113 pfn2 = bank_pfn_end(bank); 114 115 page = pfn_to_page(pfn1); 116 end = pfn_to_page(pfn2 - 1) + 1; 117 118 do { 119 total++; 120 if (PageReserved(page)) 121 reserved++; 122 else if (PageSwapCache(page)) 123 cached++; 124 else if (PageSlab(page)) 125 slab++; 126 else if (!page_count(page)) 127 free++; 128 else 129 shared += page_count(page) - 1; 130 page++; 131 } while (page < end); 132 } 133 134 printk("%d pages of RAM\n", total); 135 printk("%d free pages\n", free); 136 printk("%d reserved pages\n", reserved); 137 printk("%d slab pages\n", slab); 138 printk("%d pages shared\n", shared); 139 printk("%d pages swap cached\n", cached); 140 } 141 142 static void __init find_limits(unsigned long *min, unsigned long *max_low, 143 unsigned long *max_high) 144 { 145 struct meminfo *mi = &meminfo; 146 int i; 147 148 /* This assumes the meminfo array is properly sorted */ 149 *min = bank_pfn_start(&mi->bank[0]); 150 for_each_bank (i, mi) 151 if (mi->bank[i].highmem) 152 break; 153 *max_low = bank_pfn_end(&mi->bank[i - 1]); 154 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); 155 } 156 157 static void __init arm_bootmem_init(unsigned long start_pfn, 158 unsigned long end_pfn) 159 { 160 struct memblock_region *reg; 161 unsigned int boot_pages; 162 phys_addr_t bitmap; 163 pg_data_t *pgdat; 164 165 /* 166 * Allocate the bootmem bitmap page. This must be in a region 167 * of memory which has already been mapped. 168 */ 169 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 170 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, 171 __pfn_to_phys(end_pfn)); 172 173 /* 174 * Initialise the bootmem allocator, handing the 175 * memory banks over to bootmem. 176 */ 177 node_set_online(0); 178 pgdat = NODE_DATA(0); 179 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 180 181 /* Free the lowmem regions from memblock into bootmem. */ 182 for_each_memblock(memory, reg) { 183 unsigned long start = memblock_region_memory_base_pfn(reg); 184 unsigned long end = memblock_region_memory_end_pfn(reg); 185 186 if (end >= end_pfn) 187 end = end_pfn; 188 if (start >= end) 189 break; 190 191 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); 192 } 193 194 /* Reserve the lowmem memblock reserved regions in bootmem. */ 195 for_each_memblock(reserved, reg) { 196 unsigned long start = memblock_region_reserved_base_pfn(reg); 197 unsigned long end = memblock_region_reserved_end_pfn(reg); 198 199 if (end >= end_pfn) 200 end = end_pfn; 201 if (start >= end) 202 break; 203 204 reserve_bootmem(__pfn_to_phys(start), 205 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); 206 } 207 } 208 209 #ifdef CONFIG_ZONE_DMA 210 211 phys_addr_t arm_dma_zone_size __read_mostly; 212 EXPORT_SYMBOL(arm_dma_zone_size); 213 214 /* 215 * The DMA mask corresponding to the maximum bus address allocatable 216 * using GFP_DMA. The default here places no restriction on DMA 217 * allocations. This must be the smallest DMA mask in the system, 218 * so a successful GFP_DMA allocation will always satisfy this. 219 */ 220 phys_addr_t arm_dma_limit; 221 222 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 223 unsigned long dma_size) 224 { 225 if (size[0] <= dma_size) 226 return; 227 228 size[ZONE_NORMAL] = size[0] - dma_size; 229 size[ZONE_DMA] = dma_size; 230 hole[ZONE_NORMAL] = hole[0]; 231 hole[ZONE_DMA] = 0; 232 } 233 #endif 234 235 void __init setup_dma_zone(const struct machine_desc *mdesc) 236 { 237 #ifdef CONFIG_ZONE_DMA 238 if (mdesc->dma_zone_size) { 239 arm_dma_zone_size = mdesc->dma_zone_size; 240 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 241 } else 242 arm_dma_limit = 0xffffffff; 243 #endif 244 } 245 246 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, 247 unsigned long max_high) 248 { 249 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 250 struct memblock_region *reg; 251 252 /* 253 * initialise the zones. 254 */ 255 memset(zone_size, 0, sizeof(zone_size)); 256 257 /* 258 * The memory size has already been determined. If we need 259 * to do anything fancy with the allocation of this memory 260 * to the zones, now is the time to do it. 261 */ 262 zone_size[0] = max_low - min; 263 #ifdef CONFIG_HIGHMEM 264 zone_size[ZONE_HIGHMEM] = max_high - max_low; 265 #endif 266 267 /* 268 * Calculate the size of the holes. 269 * holes = node_size - sum(bank_sizes) 270 */ 271 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 272 for_each_memblock(memory, reg) { 273 unsigned long start = memblock_region_memory_base_pfn(reg); 274 unsigned long end = memblock_region_memory_end_pfn(reg); 275 276 if (start < max_low) { 277 unsigned long low_end = min(end, max_low); 278 zhole_size[0] -= low_end - start; 279 } 280 #ifdef CONFIG_HIGHMEM 281 if (end > max_low) { 282 unsigned long high_start = max(start, max_low); 283 zhole_size[ZONE_HIGHMEM] -= end - high_start; 284 } 285 #endif 286 } 287 288 #ifdef CONFIG_ZONE_DMA 289 /* 290 * Adjust the sizes according to any special requirements for 291 * this machine type. 292 */ 293 if (arm_dma_zone_size) 294 arm_adjust_dma_zone(zone_size, zhole_size, 295 arm_dma_zone_size >> PAGE_SHIFT); 296 #endif 297 298 free_area_init_node(0, zone_size, min, zhole_size); 299 } 300 301 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 302 int pfn_valid(unsigned long pfn) 303 { 304 return memblock_is_memory(__pfn_to_phys(pfn)); 305 } 306 EXPORT_SYMBOL(pfn_valid); 307 #endif 308 309 #ifndef CONFIG_SPARSEMEM 310 static void __init arm_memory_present(void) 311 { 312 } 313 #else 314 static void __init arm_memory_present(void) 315 { 316 struct memblock_region *reg; 317 318 for_each_memblock(memory, reg) 319 memory_present(0, memblock_region_memory_base_pfn(reg), 320 memblock_region_memory_end_pfn(reg)); 321 } 322 #endif 323 324 static bool arm_memblock_steal_permitted = true; 325 326 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 327 { 328 phys_addr_t phys; 329 330 BUG_ON(!arm_memblock_steal_permitted); 331 332 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 333 memblock_free(phys, size); 334 memblock_remove(phys, size); 335 336 return phys; 337 } 338 339 void __init arm_memblock_init(struct meminfo *mi, 340 const struct machine_desc *mdesc) 341 { 342 int i; 343 344 for (i = 0; i < mi->nr_banks; i++) 345 memblock_add(mi->bank[i].start, mi->bank[i].size); 346 347 /* Register the kernel text, kernel data and initrd with memblock. */ 348 #ifdef CONFIG_XIP_KERNEL 349 memblock_reserve(__pa(_sdata), _end - _sdata); 350 #else 351 memblock_reserve(__pa(_stext), _end - _stext); 352 #endif 353 #ifdef CONFIG_BLK_DEV_INITRD 354 if (phys_initrd_size && 355 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 356 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 357 (u64)phys_initrd_start, phys_initrd_size); 358 phys_initrd_start = phys_initrd_size = 0; 359 } 360 if (phys_initrd_size && 361 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 362 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 363 (u64)phys_initrd_start, phys_initrd_size); 364 phys_initrd_start = phys_initrd_size = 0; 365 } 366 if (phys_initrd_size) { 367 memblock_reserve(phys_initrd_start, phys_initrd_size); 368 369 /* Now convert initrd to virtual addresses */ 370 initrd_start = __phys_to_virt(phys_initrd_start); 371 initrd_end = initrd_start + phys_initrd_size; 372 } 373 #endif 374 375 arm_mm_memblock_reserve(); 376 arm_dt_memblock_reserve(); 377 378 /* reserve any platform specific memblock areas */ 379 if (mdesc->reserve) 380 mdesc->reserve(); 381 382 early_init_dt_scan_reserved_mem(); 383 384 /* 385 * reserve memory for DMA contigouos allocations, 386 * must come from DMA area inside low memory 387 */ 388 dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); 389 390 arm_memblock_steal_permitted = false; 391 memblock_allow_resize(); 392 memblock_dump_all(); 393 } 394 395 void __init bootmem_init(void) 396 { 397 unsigned long min, max_low, max_high; 398 399 max_low = max_high = 0; 400 401 find_limits(&min, &max_low, &max_high); 402 403 arm_bootmem_init(min, max_low); 404 405 /* 406 * Sparsemem tries to allocate bootmem in memory_present(), 407 * so must be done after the fixed reservations 408 */ 409 arm_memory_present(); 410 411 /* 412 * sparse_init() needs the bootmem allocator up and running. 413 */ 414 sparse_init(); 415 416 /* 417 * Now free the memory - free_area_init_node needs 418 * the sparse mem_map arrays initialized by sparse_init() 419 * for memmap_init_zone(), otherwise all PFNs are invalid. 420 */ 421 arm_bootmem_free(min, max_low, max_high); 422 423 /* 424 * This doesn't seem to be used by the Linux memory manager any 425 * more, but is used by ll_rw_block. If we can get rid of it, we 426 * also get rid of some of the stuff above as well. 427 * 428 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 429 * the system, not the maximum PFN. 430 */ 431 max_low_pfn = max_low - PHYS_PFN_OFFSET; 432 max_pfn = max_high - PHYS_PFN_OFFSET; 433 } 434 435 /* 436 * Poison init memory with an undefined instruction (ARM) or a branch to an 437 * undefined instruction (Thumb). 438 */ 439 static inline void poison_init_mem(void *s, size_t count) 440 { 441 u32 *p = (u32 *)s; 442 for (; count != 0; count -= 4) 443 *p++ = 0xe7fddef0; 444 } 445 446 static inline void 447 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 448 { 449 struct page *start_pg, *end_pg; 450 phys_addr_t pg, pgend; 451 452 /* 453 * Convert start_pfn/end_pfn to a struct page pointer. 454 */ 455 start_pg = pfn_to_page(start_pfn - 1) + 1; 456 end_pg = pfn_to_page(end_pfn - 1) + 1; 457 458 /* 459 * Convert to physical addresses, and 460 * round start upwards and end downwards. 461 */ 462 pg = PAGE_ALIGN(__pa(start_pg)); 463 pgend = __pa(end_pg) & PAGE_MASK; 464 465 /* 466 * If there are free pages between these, 467 * free the section of the memmap array. 468 */ 469 if (pg < pgend) 470 free_bootmem(pg, pgend - pg); 471 } 472 473 /* 474 * The mem_map array can get very big. Free the unused area of the memory map. 475 */ 476 static void __init free_unused_memmap(struct meminfo *mi) 477 { 478 unsigned long bank_start, prev_bank_end = 0; 479 unsigned int i; 480 481 /* 482 * This relies on each bank being in address order. 483 * The banks are sorted previously in bootmem_init(). 484 */ 485 for_each_bank(i, mi) { 486 struct membank *bank = &mi->bank[i]; 487 488 bank_start = bank_pfn_start(bank); 489 490 #ifdef CONFIG_SPARSEMEM 491 /* 492 * Take care not to free memmap entries that don't exist 493 * due to SPARSEMEM sections which aren't present. 494 */ 495 bank_start = min(bank_start, 496 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 497 #else 498 /* 499 * Align down here since the VM subsystem insists that the 500 * memmap entries are valid from the bank start aligned to 501 * MAX_ORDER_NR_PAGES. 502 */ 503 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); 504 #endif 505 /* 506 * If we had a previous bank, and there is a space 507 * between the current bank and the previous, free it. 508 */ 509 if (prev_bank_end && prev_bank_end < bank_start) 510 free_memmap(prev_bank_end, bank_start); 511 512 /* 513 * Align up here since the VM subsystem insists that the 514 * memmap entries are valid from the bank end aligned to 515 * MAX_ORDER_NR_PAGES. 516 */ 517 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 518 } 519 520 #ifdef CONFIG_SPARSEMEM 521 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 522 free_memmap(prev_bank_end, 523 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 524 #endif 525 } 526 527 #ifdef CONFIG_HIGHMEM 528 static inline void free_area_high(unsigned long pfn, unsigned long end) 529 { 530 for (; pfn < end; pfn++) 531 free_highmem_page(pfn_to_page(pfn)); 532 } 533 #endif 534 535 static void __init free_highpages(void) 536 { 537 #ifdef CONFIG_HIGHMEM 538 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; 539 struct memblock_region *mem, *res; 540 541 /* set highmem page free */ 542 for_each_memblock(memory, mem) { 543 unsigned long start = memblock_region_memory_base_pfn(mem); 544 unsigned long end = memblock_region_memory_end_pfn(mem); 545 546 /* Ignore complete lowmem entries */ 547 if (end <= max_low) 548 continue; 549 550 /* Truncate partial highmem entries */ 551 if (start < max_low) 552 start = max_low; 553 554 /* Find and exclude any reserved regions */ 555 for_each_memblock(reserved, res) { 556 unsigned long res_start, res_end; 557 558 res_start = memblock_region_reserved_base_pfn(res); 559 res_end = memblock_region_reserved_end_pfn(res); 560 561 if (res_end < start) 562 continue; 563 if (res_start < start) 564 res_start = start; 565 if (res_start > end) 566 res_start = end; 567 if (res_end > end) 568 res_end = end; 569 if (res_start != start) 570 free_area_high(start, res_start); 571 start = res_end; 572 if (start == end) 573 break; 574 } 575 576 /* And now free anything which remains */ 577 if (start < end) 578 free_area_high(start, end); 579 } 580 #endif 581 } 582 583 /* 584 * mem_init() marks the free areas in the mem_map and tells us how much 585 * memory is free. This is done after various parts of the system have 586 * claimed their memory after the kernel image. 587 */ 588 void __init mem_init(void) 589 { 590 #ifdef CONFIG_HAVE_TCM 591 /* These pointers are filled in on TCM detection */ 592 extern u32 dtcm_end; 593 extern u32 itcm_end; 594 #endif 595 596 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 597 598 /* this will put all unused low memory onto the freelists */ 599 free_unused_memmap(&meminfo); 600 free_all_bootmem(); 601 602 #ifdef CONFIG_SA1111 603 /* now that our DMA memory is actually so designated, we can free it */ 604 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 605 #endif 606 607 free_highpages(); 608 609 mem_init_print_info(NULL); 610 611 #define MLK(b, t) b, t, ((t) - (b)) >> 10 612 #define MLM(b, t) b, t, ((t) - (b)) >> 20 613 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 614 615 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 616 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 617 #ifdef CONFIG_HAVE_TCM 618 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 619 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 620 #endif 621 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 622 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 623 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 624 #ifdef CONFIG_HIGHMEM 625 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 626 #endif 627 #ifdef CONFIG_MODULES 628 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 629 #endif 630 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 631 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 632 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 633 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 634 635 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 636 (PAGE_SIZE)), 637 #ifdef CONFIG_HAVE_TCM 638 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 639 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 640 #endif 641 MLK(FIXADDR_START, FIXADDR_TOP), 642 MLM(VMALLOC_START, VMALLOC_END), 643 MLM(PAGE_OFFSET, (unsigned long)high_memory), 644 #ifdef CONFIG_HIGHMEM 645 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 646 (PAGE_SIZE)), 647 #endif 648 #ifdef CONFIG_MODULES 649 MLM(MODULES_VADDR, MODULES_END), 650 #endif 651 652 MLK_ROUNDUP(_text, _etext), 653 MLK_ROUNDUP(__init_begin, __init_end), 654 MLK_ROUNDUP(_sdata, _edata), 655 MLK_ROUNDUP(__bss_start, __bss_stop)); 656 657 #undef MLK 658 #undef MLM 659 #undef MLK_ROUNDUP 660 661 /* 662 * Check boundaries twice: Some fundamental inconsistencies can 663 * be detected at build time already. 664 */ 665 #ifdef CONFIG_MMU 666 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 667 BUG_ON(TASK_SIZE > MODULES_VADDR); 668 #endif 669 670 #ifdef CONFIG_HIGHMEM 671 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 672 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 673 #endif 674 675 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 676 extern int sysctl_overcommit_memory; 677 /* 678 * On a machine this small we won't get 679 * anywhere without overcommit, so turn 680 * it on by default. 681 */ 682 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 683 } 684 } 685 686 void free_initmem(void) 687 { 688 #ifdef CONFIG_HAVE_TCM 689 extern char __tcm_start, __tcm_end; 690 691 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 692 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 693 #endif 694 695 poison_init_mem(__init_begin, __init_end - __init_begin); 696 if (!machine_is_integrator() && !machine_is_cintegrator()) 697 free_initmem_default(-1); 698 } 699 700 #ifdef CONFIG_BLK_DEV_INITRD 701 702 static int keep_initrd; 703 704 void free_initrd_mem(unsigned long start, unsigned long end) 705 { 706 if (!keep_initrd) { 707 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 708 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 709 } 710 } 711 712 static int __init keepinitrd_setup(char *__unused) 713 { 714 keep_initrd = 1; 715 return 1; 716 } 717 718 __setup("keepinitrd", keepinitrd_setup); 719 #endif 720