1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/sort.h> 19 #include <linux/highmem.h> 20 #include <linux/gfp.h> 21 22 #include <asm/mach-types.h> 23 #include <asm/sections.h> 24 #include <asm/setup.h> 25 #include <asm/sizes.h> 26 #include <asm/tlb.h> 27 #include <asm/fixmap.h> 28 29 #include <asm/mach/arch.h> 30 #include <asm/mach/map.h> 31 32 #include "mm.h" 33 34 static unsigned long phys_initrd_start __initdata = 0; 35 static unsigned long phys_initrd_size __initdata = 0; 36 37 static int __init early_initrd(char *p) 38 { 39 unsigned long start, size; 40 char *endp; 41 42 start = memparse(p, &endp); 43 if (*endp == ',') { 44 size = memparse(endp + 1, NULL); 45 46 phys_initrd_start = start; 47 phys_initrd_size = size; 48 } 49 return 0; 50 } 51 early_param("initrd", early_initrd); 52 53 static int __init parse_tag_initrd(const struct tag *tag) 54 { 55 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 56 "please update your bootloader.\n"); 57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 58 phys_initrd_size = tag->u.initrd.size; 59 return 0; 60 } 61 62 __tagtable(ATAG_INITRD, parse_tag_initrd); 63 64 static int __init parse_tag_initrd2(const struct tag *tag) 65 { 66 phys_initrd_start = tag->u.initrd.start; 67 phys_initrd_size = tag->u.initrd.size; 68 return 0; 69 } 70 71 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 72 73 /* 74 * This keeps memory configuration data used by a couple memory 75 * initialization functions, as well as show_mem() for the skipping 76 * of holes in the memory map. It is populated by arm_add_memory(). 77 */ 78 struct meminfo meminfo; 79 80 void show_mem(void) 81 { 82 int free = 0, total = 0, reserved = 0; 83 int shared = 0, cached = 0, slab = 0, node, i; 84 struct meminfo * mi = &meminfo; 85 86 printk("Mem-info:\n"); 87 show_free_areas(); 88 for_each_online_node(node) { 89 for_each_nodebank (i,mi,node) { 90 struct membank *bank = &mi->bank[i]; 91 unsigned int pfn1, pfn2; 92 struct page *page, *end; 93 94 pfn1 = bank_pfn_start(bank); 95 pfn2 = bank_pfn_end(bank); 96 97 page = pfn_to_page(pfn1); 98 end = pfn_to_page(pfn2 - 1) + 1; 99 100 do { 101 total++; 102 if (PageReserved(page)) 103 reserved++; 104 else if (PageSwapCache(page)) 105 cached++; 106 else if (PageSlab(page)) 107 slab++; 108 else if (!page_count(page)) 109 free++; 110 else 111 shared += page_count(page) - 1; 112 page++; 113 } while (page < end); 114 } 115 } 116 117 printk("%d pages of RAM\n", total); 118 printk("%d free pages\n", free); 119 printk("%d reserved pages\n", reserved); 120 printk("%d slab pages\n", slab); 121 printk("%d pages shared\n", shared); 122 printk("%d pages swap cached\n", cached); 123 } 124 125 static void __init find_node_limits(int node, struct meminfo *mi, 126 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 127 { 128 int i; 129 130 *min = -1UL; 131 *max_low = *max_high = 0; 132 133 for_each_nodebank(i, mi, node) { 134 struct membank *bank = &mi->bank[i]; 135 unsigned long start, end; 136 137 start = bank_pfn_start(bank); 138 end = bank_pfn_end(bank); 139 140 if (*min > start) 141 *min = start; 142 if (*max_high < end) 143 *max_high = end; 144 if (bank->highmem) 145 continue; 146 if (*max_low < end) 147 *max_low = end; 148 } 149 } 150 151 /* 152 * FIXME: We really want to avoid allocating the bootmap bitmap 153 * over the top of the initrd. Hopefully, this is located towards 154 * the start of a bank, so if we allocate the bootmap bitmap at 155 * the end, we won't clash. 156 */ 157 static unsigned int __init 158 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) 159 { 160 unsigned int start_pfn, i, bootmap_pfn; 161 162 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; 163 bootmap_pfn = 0; 164 165 for_each_nodebank(i, mi, node) { 166 struct membank *bank = &mi->bank[i]; 167 unsigned int start, end; 168 169 start = bank_pfn_start(bank); 170 end = bank_pfn_end(bank); 171 172 if (end < start_pfn) 173 continue; 174 175 if (start < start_pfn) 176 start = start_pfn; 177 178 if (end <= start) 179 continue; 180 181 if (end - start >= bootmap_pages) { 182 bootmap_pfn = start; 183 break; 184 } 185 } 186 187 if (bootmap_pfn == 0) 188 BUG(); 189 190 return bootmap_pfn; 191 } 192 193 static int __init check_initrd(struct meminfo *mi) 194 { 195 int initrd_node = -2; 196 #ifdef CONFIG_BLK_DEV_INITRD 197 unsigned long end = phys_initrd_start + phys_initrd_size; 198 199 /* 200 * Make sure that the initrd is within a valid area of 201 * memory. 202 */ 203 if (phys_initrd_size) { 204 unsigned int i; 205 206 initrd_node = -1; 207 208 for (i = 0; i < mi->nr_banks; i++) { 209 struct membank *bank = &mi->bank[i]; 210 if (bank_phys_start(bank) <= phys_initrd_start && 211 end <= bank_phys_end(bank)) 212 initrd_node = bank->node; 213 } 214 } 215 216 if (initrd_node == -1) { 217 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " 218 "physical memory - disabling initrd\n", 219 phys_initrd_start, phys_initrd_size); 220 phys_initrd_start = phys_initrd_size = 0; 221 } 222 #endif 223 224 return initrd_node; 225 } 226 227 static inline void map_memory_bank(struct membank *bank) 228 { 229 #ifdef CONFIG_MMU 230 struct map_desc map; 231 232 map.pfn = bank_pfn_start(bank); 233 map.virtual = __phys_to_virt(bank_phys_start(bank)); 234 map.length = bank_phys_size(bank); 235 map.type = MT_MEMORY; 236 237 create_mapping(&map); 238 #endif 239 } 240 241 static void __init bootmem_init_node(int node, struct meminfo *mi, 242 unsigned long start_pfn, unsigned long end_pfn) 243 { 244 unsigned long boot_pfn; 245 unsigned int boot_pages; 246 pg_data_t *pgdat; 247 int i; 248 249 /* 250 * Map the memory banks for this node. 251 */ 252 for_each_nodebank(i, mi, node) { 253 struct membank *bank = &mi->bank[i]; 254 255 if (!bank->highmem) 256 map_memory_bank(bank); 257 } 258 259 /* 260 * Allocate the bootmem bitmap page. 261 */ 262 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 263 boot_pfn = find_bootmap_pfn(node, mi, boot_pages); 264 265 /* 266 * Initialise the bootmem allocator for this node, handing the 267 * memory banks over to bootmem. 268 */ 269 node_set_online(node); 270 pgdat = NODE_DATA(node); 271 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); 272 273 for_each_nodebank(i, mi, node) { 274 struct membank *bank = &mi->bank[i]; 275 if (!bank->highmem) 276 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 277 } 278 279 /* 280 * Reserve the bootmem bitmap for this node. 281 */ 282 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 283 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 284 } 285 286 static void __init bootmem_reserve_initrd(int node) 287 { 288 #ifdef CONFIG_BLK_DEV_INITRD 289 pg_data_t *pgdat = NODE_DATA(node); 290 int res; 291 292 res = reserve_bootmem_node(pgdat, phys_initrd_start, 293 phys_initrd_size, BOOTMEM_EXCLUSIVE); 294 295 if (res == 0) { 296 initrd_start = __phys_to_virt(phys_initrd_start); 297 initrd_end = initrd_start + phys_initrd_size; 298 } else { 299 printk(KERN_ERR 300 "INITRD: 0x%08lx+0x%08lx overlaps in-use " 301 "memory region - disabling initrd\n", 302 phys_initrd_start, phys_initrd_size); 303 } 304 #endif 305 } 306 307 static void __init bootmem_free_node(int node, struct meminfo *mi) 308 { 309 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 310 unsigned long min, max_low, max_high; 311 int i; 312 313 find_node_limits(node, mi, &min, &max_low, &max_high); 314 315 /* 316 * initialise the zones within this node. 317 */ 318 memset(zone_size, 0, sizeof(zone_size)); 319 320 /* 321 * The size of this node has already been determined. If we need 322 * to do anything fancy with the allocation of this memory to the 323 * zones, now is the time to do it. 324 */ 325 zone_size[0] = max_low - min; 326 #ifdef CONFIG_HIGHMEM 327 zone_size[ZONE_HIGHMEM] = max_high - max_low; 328 #endif 329 330 /* 331 * For each bank in this node, calculate the size of the holes. 332 * holes = node_size - sum(bank_sizes_in_node) 333 */ 334 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 335 for_each_nodebank(i, mi, node) { 336 int idx = 0; 337 #ifdef CONFIG_HIGHMEM 338 if (mi->bank[i].highmem) 339 idx = ZONE_HIGHMEM; 340 #endif 341 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); 342 } 343 344 /* 345 * Adjust the sizes according to any special requirements for 346 * this machine type. 347 */ 348 arch_adjust_zones(node, zone_size, zhole_size); 349 350 free_area_init_node(node, zone_size, min, zhole_size); 351 } 352 353 #ifndef CONFIG_SPARSEMEM 354 int pfn_valid(unsigned long pfn) 355 { 356 struct meminfo *mi = &meminfo; 357 unsigned int left = 0, right = mi->nr_banks; 358 359 do { 360 unsigned int mid = (right + left) / 2; 361 struct membank *bank = &mi->bank[mid]; 362 363 if (pfn < bank_pfn_start(bank)) 364 right = mid; 365 else if (pfn >= bank_pfn_end(bank)) 366 left = mid + 1; 367 else 368 return 1; 369 } while (left < right); 370 return 0; 371 } 372 EXPORT_SYMBOL(pfn_valid); 373 374 static void arm_memory_present(struct meminfo *mi, int node) 375 { 376 } 377 #else 378 static void arm_memory_present(struct meminfo *mi, int node) 379 { 380 int i; 381 for_each_nodebank(i, mi, node) { 382 struct membank *bank = &mi->bank[i]; 383 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 384 } 385 } 386 #endif 387 388 static int __init meminfo_cmp(const void *_a, const void *_b) 389 { 390 const struct membank *a = _a, *b = _b; 391 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 392 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 393 } 394 395 void __init bootmem_init(void) 396 { 397 struct meminfo *mi = &meminfo; 398 unsigned long min, max_low, max_high; 399 int node, initrd_node; 400 401 sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL); 402 403 /* 404 * Locate which node contains the ramdisk image, if any. 405 */ 406 initrd_node = check_initrd(mi); 407 408 max_low = max_high = 0; 409 410 /* 411 * Run through each node initialising the bootmem allocator. 412 */ 413 for_each_node(node) { 414 unsigned long node_low, node_high; 415 416 find_node_limits(node, mi, &min, &node_low, &node_high); 417 418 if (node_low > max_low) 419 max_low = node_low; 420 if (node_high > max_high) 421 max_high = node_high; 422 423 /* 424 * If there is no memory in this node, ignore it. 425 * (We can't have nodes which have no lowmem) 426 */ 427 if (node_low == 0) 428 continue; 429 430 bootmem_init_node(node, mi, min, node_low); 431 432 /* 433 * Reserve any special node zero regions. 434 */ 435 if (node == 0) 436 reserve_node_zero(NODE_DATA(node)); 437 438 /* 439 * If the initrd is in this node, reserve its memory. 440 */ 441 if (node == initrd_node) 442 bootmem_reserve_initrd(node); 443 444 /* 445 * Sparsemem tries to allocate bootmem in memory_present(), 446 * so must be done after the fixed reservations 447 */ 448 arm_memory_present(mi, node); 449 } 450 451 /* 452 * sparse_init() needs the bootmem allocator up and running. 453 */ 454 sparse_init(); 455 456 /* 457 * Now free memory in each node - free_area_init_node needs 458 * the sparse mem_map arrays initialized by sparse_init() 459 * for memmap_init_zone(), otherwise all PFNs are invalid. 460 */ 461 for_each_node(node) 462 bootmem_free_node(node, mi); 463 464 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 465 466 /* 467 * This doesn't seem to be used by the Linux memory manager any 468 * more, but is used by ll_rw_block. If we can get rid of it, we 469 * also get rid of some of the stuff above as well. 470 * 471 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 472 * the system, not the maximum PFN. 473 */ 474 max_low_pfn = max_low - PHYS_PFN_OFFSET; 475 max_pfn = max_high - PHYS_PFN_OFFSET; 476 } 477 478 static inline int free_area(unsigned long pfn, unsigned long end, char *s) 479 { 480 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); 481 482 for (; pfn < end; pfn++) { 483 struct page *page = pfn_to_page(pfn); 484 ClearPageReserved(page); 485 init_page_count(page); 486 __free_page(page); 487 pages++; 488 } 489 490 if (size && s) 491 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 492 493 return pages; 494 } 495 496 static inline void 497 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) 498 { 499 struct page *start_pg, *end_pg; 500 unsigned long pg, pgend; 501 502 /* 503 * Convert start_pfn/end_pfn to a struct page pointer. 504 */ 505 start_pg = pfn_to_page(start_pfn - 1) + 1; 506 end_pg = pfn_to_page(end_pfn); 507 508 /* 509 * Convert to physical addresses, and 510 * round start upwards and end downwards. 511 */ 512 pg = PAGE_ALIGN(__pa(start_pg)); 513 pgend = __pa(end_pg) & PAGE_MASK; 514 515 /* 516 * If there are free pages between these, 517 * free the section of the memmap array. 518 */ 519 if (pg < pgend) 520 free_bootmem_node(NODE_DATA(node), pg, pgend - pg); 521 } 522 523 /* 524 * The mem_map array can get very big. Free the unused area of the memory map. 525 */ 526 static void __init free_unused_memmap_node(int node, struct meminfo *mi) 527 { 528 unsigned long bank_start, prev_bank_end = 0; 529 unsigned int i; 530 531 /* 532 * [FIXME] This relies on each bank being in address order. This 533 * may not be the case, especially if the user has provided the 534 * information on the command line. 535 */ 536 for_each_nodebank(i, mi, node) { 537 struct membank *bank = &mi->bank[i]; 538 539 bank_start = bank_pfn_start(bank); 540 if (bank_start < prev_bank_end) { 541 printk(KERN_ERR "MEM: unordered memory banks. " 542 "Not freeing memmap.\n"); 543 break; 544 } 545 546 /* 547 * If we had a previous bank, and there is a space 548 * between the current bank and the previous, free it. 549 */ 550 if (prev_bank_end && prev_bank_end != bank_start) 551 free_memmap(node, prev_bank_end, bank_start); 552 553 prev_bank_end = bank_pfn_end(bank); 554 } 555 } 556 557 /* 558 * mem_init() marks the free areas in the mem_map and tells us how much 559 * memory is free. This is done after various parts of the system have 560 * claimed their memory after the kernel image. 561 */ 562 void __init mem_init(void) 563 { 564 unsigned long reserved_pages, free_pages; 565 int i, node; 566 567 #ifndef CONFIG_DISCONTIGMEM 568 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 569 #endif 570 571 /* this will put all unused low memory onto the freelists */ 572 for_each_online_node(node) { 573 pg_data_t *pgdat = NODE_DATA(node); 574 575 free_unused_memmap_node(node, &meminfo); 576 577 if (pgdat->node_spanned_pages != 0) 578 totalram_pages += free_all_bootmem_node(pgdat); 579 } 580 581 #ifdef CONFIG_SA1111 582 /* now that our DMA memory is actually so designated, we can free it */ 583 totalram_pages += free_area(PHYS_PFN_OFFSET, 584 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 585 #endif 586 587 #ifdef CONFIG_HIGHMEM 588 /* set highmem page free */ 589 for_each_online_node(node) { 590 for_each_nodebank (i, &meminfo, node) { 591 unsigned long start = bank_pfn_start(&meminfo.bank[i]); 592 unsigned long end = bank_pfn_end(&meminfo.bank[i]); 593 if (start >= max_low_pfn + PHYS_PFN_OFFSET) 594 totalhigh_pages += free_area(start, end, NULL); 595 } 596 } 597 totalram_pages += totalhigh_pages; 598 #endif 599 600 reserved_pages = free_pages = 0; 601 602 for_each_online_node(node) { 603 for_each_nodebank(i, &meminfo, node) { 604 struct membank *bank = &meminfo.bank[i]; 605 unsigned int pfn1, pfn2; 606 struct page *page, *end; 607 608 pfn1 = bank_pfn_start(bank); 609 pfn2 = bank_pfn_end(bank); 610 611 page = pfn_to_page(pfn1); 612 end = pfn_to_page(pfn2 - 1) + 1; 613 614 do { 615 if (PageReserved(page)) 616 reserved_pages++; 617 else if (!page_count(page)) 618 free_pages++; 619 page++; 620 } while (page < end); 621 } 622 } 623 624 /* 625 * Since our memory may not be contiguous, calculate the 626 * real number of pages we have in this system 627 */ 628 printk(KERN_INFO "Memory:"); 629 num_physpages = 0; 630 for (i = 0; i < meminfo.nr_banks; i++) { 631 num_physpages += bank_pfn_size(&meminfo.bank[i]); 632 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 633 } 634 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 635 636 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", 637 nr_free_pages() << (PAGE_SHIFT-10), 638 free_pages << (PAGE_SHIFT-10), 639 reserved_pages << (PAGE_SHIFT-10), 640 totalhigh_pages << (PAGE_SHIFT-10)); 641 642 #define MLK(b, t) b, t, ((t) - (b)) >> 10 643 #define MLM(b, t) b, t, ((t) - (b)) >> 20 644 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 645 646 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 647 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 648 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 649 #ifdef CONFIG_MMU 650 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" 651 #endif 652 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 653 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 654 #ifdef CONFIG_HIGHMEM 655 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 656 #endif 657 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 658 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 659 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 660 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 661 662 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 663 (PAGE_SIZE)), 664 MLK(FIXADDR_START, FIXADDR_TOP), 665 #ifdef CONFIG_MMU 666 MLM(CONSISTENT_BASE, CONSISTENT_END), 667 #endif 668 MLM(VMALLOC_START, VMALLOC_END), 669 MLM(PAGE_OFFSET, (unsigned long)high_memory), 670 #ifdef CONFIG_HIGHMEM 671 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 672 (PAGE_SIZE)), 673 #endif 674 MLM(MODULES_VADDR, MODULES_END), 675 676 MLK_ROUNDUP(__init_begin, __init_end), 677 MLK_ROUNDUP(_text, _etext), 678 MLK_ROUNDUP(_data, _edata)); 679 680 #undef MLK 681 #undef MLM 682 #undef MLK_ROUNDUP 683 684 /* 685 * Check boundaries twice: Some fundamental inconsistencies can 686 * be detected at build time already. 687 */ 688 #ifdef CONFIG_MMU 689 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); 690 BUG_ON(VMALLOC_END > CONSISTENT_BASE); 691 692 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 693 BUG_ON(TASK_SIZE > MODULES_VADDR); 694 #endif 695 696 #ifdef CONFIG_HIGHMEM 697 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 698 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 699 #endif 700 701 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 702 extern int sysctl_overcommit_memory; 703 /* 704 * On a machine this small we won't get 705 * anywhere without overcommit, so turn 706 * it on by default. 707 */ 708 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 709 } 710 } 711 712 void free_initmem(void) 713 { 714 #ifdef CONFIG_HAVE_TCM 715 extern char *__tcm_start, *__tcm_end; 716 717 totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), 718 __phys_to_pfn(__pa(__tcm_end)), 719 "TCM link"); 720 #endif 721 722 if (!machine_is_integrator() && !machine_is_cintegrator()) 723 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 724 __phys_to_pfn(__pa(__init_end)), 725 "init"); 726 } 727 728 #ifdef CONFIG_BLK_DEV_INITRD 729 730 static int keep_initrd; 731 732 void free_initrd_mem(unsigned long start, unsigned long end) 733 { 734 if (!keep_initrd) 735 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 736 __phys_to_pfn(__pa(end)), 737 "initrd"); 738 } 739 740 static int __init keepinitrd_setup(char *__unused) 741 { 742 keep_initrd = 1; 743 return 1; 744 } 745 746 __setup("keepinitrd", keepinitrd_setup); 747 #endif 748