1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/highmem.h> 19 20 #include <asm/mach-types.h> 21 #include <asm/sections.h> 22 #include <asm/setup.h> 23 #include <asm/sizes.h> 24 #include <asm/tlb.h> 25 #include <asm/fixmap.h> 26 27 #include <asm/mach/arch.h> 28 #include <asm/mach/map.h> 29 30 #include "mm.h" 31 32 static unsigned long phys_initrd_start __initdata = 0; 33 static unsigned long phys_initrd_size __initdata = 0; 34 35 static int __init early_initrd(char *p) 36 { 37 unsigned long start, size; 38 char *endp; 39 40 start = memparse(p, &endp); 41 if (*endp == ',') { 42 size = memparse(endp + 1, NULL); 43 44 phys_initrd_start = start; 45 phys_initrd_size = size; 46 } 47 return 0; 48 } 49 early_param("initrd", early_initrd); 50 51 static int __init parse_tag_initrd(const struct tag *tag) 52 { 53 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 54 "please update your bootloader.\n"); 55 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 56 phys_initrd_size = tag->u.initrd.size; 57 return 0; 58 } 59 60 __tagtable(ATAG_INITRD, parse_tag_initrd); 61 62 static int __init parse_tag_initrd2(const struct tag *tag) 63 { 64 phys_initrd_start = tag->u.initrd.start; 65 phys_initrd_size = tag->u.initrd.size; 66 return 0; 67 } 68 69 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 70 71 /* 72 * This keeps memory configuration data used by a couple memory 73 * initialization functions, as well as show_mem() for the skipping 74 * of holes in the memory map. It is populated by arm_add_memory(). 75 */ 76 struct meminfo meminfo; 77 78 void show_mem(void) 79 { 80 int free = 0, total = 0, reserved = 0; 81 int shared = 0, cached = 0, slab = 0, node, i; 82 struct meminfo * mi = &meminfo; 83 84 printk("Mem-info:\n"); 85 show_free_areas(); 86 for_each_online_node(node) { 87 pg_data_t *n = NODE_DATA(node); 88 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; 89 90 for_each_nodebank (i,mi,node) { 91 struct membank *bank = &mi->bank[i]; 92 unsigned int pfn1, pfn2; 93 struct page *page, *end; 94 95 pfn1 = bank_pfn_start(bank); 96 pfn2 = bank_pfn_end(bank); 97 98 page = map + pfn1; 99 end = map + pfn2; 100 101 do { 102 total++; 103 if (PageReserved(page)) 104 reserved++; 105 else if (PageSwapCache(page)) 106 cached++; 107 else if (PageSlab(page)) 108 slab++; 109 else if (!page_count(page)) 110 free++; 111 else 112 shared += page_count(page) - 1; 113 page++; 114 } while (page < end); 115 } 116 } 117 118 printk("%d pages of RAM\n", total); 119 printk("%d free pages\n", free); 120 printk("%d reserved pages\n", reserved); 121 printk("%d slab pages\n", slab); 122 printk("%d pages shared\n", shared); 123 printk("%d pages swap cached\n", cached); 124 } 125 126 static void __init find_node_limits(int node, struct meminfo *mi, 127 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 128 { 129 int i; 130 131 *min = -1UL; 132 *max_low = *max_high = 0; 133 134 for_each_nodebank(i, mi, node) { 135 struct membank *bank = &mi->bank[i]; 136 unsigned long start, end; 137 138 start = bank_pfn_start(bank); 139 end = bank_pfn_end(bank); 140 141 if (*min > start) 142 *min = start; 143 if (*max_high < end) 144 *max_high = end; 145 if (bank->highmem) 146 continue; 147 if (*max_low < end) 148 *max_low = end; 149 } 150 } 151 152 /* 153 * FIXME: We really want to avoid allocating the bootmap bitmap 154 * over the top of the initrd. Hopefully, this is located towards 155 * the start of a bank, so if we allocate the bootmap bitmap at 156 * the end, we won't clash. 157 */ 158 static unsigned int __init 159 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) 160 { 161 unsigned int start_pfn, i, bootmap_pfn; 162 163 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; 164 bootmap_pfn = 0; 165 166 for_each_nodebank(i, mi, node) { 167 struct membank *bank = &mi->bank[i]; 168 unsigned int start, end; 169 170 start = bank_pfn_start(bank); 171 end = bank_pfn_end(bank); 172 173 if (end < start_pfn) 174 continue; 175 176 if (start < start_pfn) 177 start = start_pfn; 178 179 if (end <= start) 180 continue; 181 182 if (end - start >= bootmap_pages) { 183 bootmap_pfn = start; 184 break; 185 } 186 } 187 188 if (bootmap_pfn == 0) 189 BUG(); 190 191 return bootmap_pfn; 192 } 193 194 static int __init check_initrd(struct meminfo *mi) 195 { 196 int initrd_node = -2; 197 #ifdef CONFIG_BLK_DEV_INITRD 198 unsigned long end = phys_initrd_start + phys_initrd_size; 199 200 /* 201 * Make sure that the initrd is within a valid area of 202 * memory. 203 */ 204 if (phys_initrd_size) { 205 unsigned int i; 206 207 initrd_node = -1; 208 209 for (i = 0; i < mi->nr_banks; i++) { 210 struct membank *bank = &mi->bank[i]; 211 if (bank_phys_start(bank) <= phys_initrd_start && 212 end <= bank_phys_end(bank)) 213 initrd_node = bank->node; 214 } 215 } 216 217 if (initrd_node == -1) { 218 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " 219 "physical memory - disabling initrd\n", 220 phys_initrd_start, phys_initrd_size); 221 phys_initrd_start = phys_initrd_size = 0; 222 } 223 #endif 224 225 return initrd_node; 226 } 227 228 static void __init bootmem_init_node(int node, struct meminfo *mi, 229 unsigned long start_pfn, unsigned long end_pfn) 230 { 231 unsigned long boot_pfn; 232 unsigned int boot_pages; 233 pg_data_t *pgdat; 234 int i; 235 236 /* 237 * Allocate the bootmem bitmap page. 238 */ 239 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 240 boot_pfn = find_bootmap_pfn(node, mi, boot_pages); 241 242 /* 243 * Initialise the bootmem allocator for this node, handing the 244 * memory banks over to bootmem. 245 */ 246 node_set_online(node); 247 pgdat = NODE_DATA(node); 248 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); 249 250 for_each_nodebank(i, mi, node) { 251 struct membank *bank = &mi->bank[i]; 252 if (!bank->highmem) 253 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 254 } 255 256 /* 257 * Reserve the bootmem bitmap for this node. 258 */ 259 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 260 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 261 } 262 263 static void __init bootmem_reserve_initrd(int node) 264 { 265 #ifdef CONFIG_BLK_DEV_INITRD 266 pg_data_t *pgdat = NODE_DATA(node); 267 int res; 268 269 res = reserve_bootmem_node(pgdat, phys_initrd_start, 270 phys_initrd_size, BOOTMEM_EXCLUSIVE); 271 272 if (res == 0) { 273 initrd_start = __phys_to_virt(phys_initrd_start); 274 initrd_end = initrd_start + phys_initrd_size; 275 } else { 276 printk(KERN_ERR 277 "INITRD: 0x%08lx+0x%08lx overlaps in-use " 278 "memory region - disabling initrd\n", 279 phys_initrd_start, phys_initrd_size); 280 } 281 #endif 282 } 283 284 static void __init bootmem_free_node(int node, struct meminfo *mi) 285 { 286 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 287 unsigned long min, max_low, max_high; 288 int i; 289 290 find_node_limits(node, mi, &min, &max_low, &max_high); 291 292 /* 293 * initialise the zones within this node. 294 */ 295 memset(zone_size, 0, sizeof(zone_size)); 296 297 /* 298 * The size of this node has already been determined. If we need 299 * to do anything fancy with the allocation of this memory to the 300 * zones, now is the time to do it. 301 */ 302 zone_size[0] = max_low - min; 303 #ifdef CONFIG_HIGHMEM 304 zone_size[ZONE_HIGHMEM] = max_high - max_low; 305 #endif 306 307 /* 308 * For each bank in this node, calculate the size of the holes. 309 * holes = node_size - sum(bank_sizes_in_node) 310 */ 311 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 312 for_each_nodebank(i, mi, node) { 313 int idx = 0; 314 #ifdef CONFIG_HIGHMEM 315 if (mi->bank[i].highmem) 316 idx = ZONE_HIGHMEM; 317 #endif 318 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); 319 } 320 321 /* 322 * Adjust the sizes according to any special requirements for 323 * this machine type. 324 */ 325 arch_adjust_zones(node, zone_size, zhole_size); 326 327 free_area_init_node(node, zone_size, min, zhole_size); 328 } 329 330 #ifndef CONFIG_SPARSEMEM 331 int pfn_valid(unsigned long pfn) 332 { 333 struct meminfo *mi = &meminfo; 334 unsigned int left = 0, right = mi->nr_banks; 335 336 do { 337 unsigned int mid = (right + left) / 2; 338 struct membank *bank = &mi->bank[mid]; 339 340 if (pfn < bank_pfn_start(bank)) 341 right = mid; 342 else if (pfn >= bank_pfn_end(bank)) 343 left = mid + 1; 344 else 345 return 1; 346 } while (left < right); 347 return 0; 348 } 349 EXPORT_SYMBOL(pfn_valid); 350 351 static void arm_memory_present(struct meminfo *mi, int node) 352 { 353 } 354 #else 355 static void arm_memory_present(struct meminfo *mi, int node) 356 { 357 int i; 358 for_each_nodebank(i, mi, node) { 359 struct membank *bank = &mi->bank[i]; 360 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 361 } 362 } 363 #endif 364 365 void __init bootmem_init(void) 366 { 367 struct meminfo *mi = &meminfo; 368 unsigned long min, max_low, max_high; 369 int node, initrd_node; 370 371 /* 372 * Locate which node contains the ramdisk image, if any. 373 */ 374 initrd_node = check_initrd(mi); 375 376 max_low = max_high = 0; 377 378 /* 379 * Run through each node initialising the bootmem allocator. 380 */ 381 for_each_node(node) { 382 unsigned long node_low, node_high; 383 384 find_node_limits(node, mi, &min, &node_low, &node_high); 385 386 if (node_low > max_low) 387 max_low = node_low; 388 if (node_high > max_high) 389 max_high = node_high; 390 391 /* 392 * If there is no memory in this node, ignore it. 393 * (We can't have nodes which have no lowmem) 394 */ 395 if (node_low == 0) 396 continue; 397 398 bootmem_init_node(node, mi, min, node_low); 399 400 /* 401 * Reserve any special node zero regions. 402 */ 403 if (node == 0) 404 reserve_node_zero(NODE_DATA(node)); 405 406 /* 407 * If the initrd is in this node, reserve its memory. 408 */ 409 if (node == initrd_node) 410 bootmem_reserve_initrd(node); 411 412 /* 413 * Sparsemem tries to allocate bootmem in memory_present(), 414 * so must be done after the fixed reservations 415 */ 416 arm_memory_present(mi, node); 417 } 418 419 /* 420 * sparse_init() needs the bootmem allocator up and running. 421 */ 422 sparse_init(); 423 424 /* 425 * Now free memory in each node - free_area_init_node needs 426 * the sparse mem_map arrays initialized by sparse_init() 427 * for memmap_init_zone(), otherwise all PFNs are invalid. 428 */ 429 for_each_node(node) 430 bootmem_free_node(node, mi); 431 432 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 433 434 /* 435 * This doesn't seem to be used by the Linux memory manager any 436 * more, but is used by ll_rw_block. If we can get rid of it, we 437 * also get rid of some of the stuff above as well. 438 * 439 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 440 * the system, not the maximum PFN. 441 */ 442 max_low_pfn = max_low - PHYS_PFN_OFFSET; 443 max_pfn = max_high - PHYS_PFN_OFFSET; 444 } 445 446 static inline int free_area(unsigned long pfn, unsigned long end, char *s) 447 { 448 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); 449 450 for (; pfn < end; pfn++) { 451 struct page *page = pfn_to_page(pfn); 452 ClearPageReserved(page); 453 init_page_count(page); 454 __free_page(page); 455 pages++; 456 } 457 458 if (size && s) 459 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 460 461 return pages; 462 } 463 464 static inline void 465 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) 466 { 467 struct page *start_pg, *end_pg; 468 unsigned long pg, pgend; 469 470 /* 471 * Convert start_pfn/end_pfn to a struct page pointer. 472 */ 473 start_pg = pfn_to_page(start_pfn - 1) + 1; 474 end_pg = pfn_to_page(end_pfn); 475 476 /* 477 * Convert to physical addresses, and 478 * round start upwards and end downwards. 479 */ 480 pg = PAGE_ALIGN(__pa(start_pg)); 481 pgend = __pa(end_pg) & PAGE_MASK; 482 483 /* 484 * If there are free pages between these, 485 * free the section of the memmap array. 486 */ 487 if (pg < pgend) 488 free_bootmem_node(NODE_DATA(node), pg, pgend - pg); 489 } 490 491 /* 492 * The mem_map array can get very big. Free the unused area of the memory map. 493 */ 494 static void __init free_unused_memmap_node(int node, struct meminfo *mi) 495 { 496 unsigned long bank_start, prev_bank_end = 0; 497 unsigned int i; 498 499 /* 500 * [FIXME] This relies on each bank being in address order. This 501 * may not be the case, especially if the user has provided the 502 * information on the command line. 503 */ 504 for_each_nodebank(i, mi, node) { 505 struct membank *bank = &mi->bank[i]; 506 507 bank_start = bank_pfn_start(bank); 508 if (bank_start < prev_bank_end) { 509 printk(KERN_ERR "MEM: unordered memory banks. " 510 "Not freeing memmap.\n"); 511 break; 512 } 513 514 /* 515 * If we had a previous bank, and there is a space 516 * between the current bank and the previous, free it. 517 */ 518 if (prev_bank_end && prev_bank_end != bank_start) 519 free_memmap(node, prev_bank_end, bank_start); 520 521 prev_bank_end = bank_pfn_end(bank); 522 } 523 } 524 525 /* 526 * mem_init() marks the free areas in the mem_map and tells us how much 527 * memory is free. This is done after various parts of the system have 528 * claimed their memory after the kernel image. 529 */ 530 void __init mem_init(void) 531 { 532 unsigned long reserved_pages, free_pages; 533 int i, node; 534 535 #ifndef CONFIG_DISCONTIGMEM 536 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 537 #endif 538 539 /* this will put all unused low memory onto the freelists */ 540 for_each_online_node(node) { 541 pg_data_t *pgdat = NODE_DATA(node); 542 543 free_unused_memmap_node(node, &meminfo); 544 545 if (pgdat->node_spanned_pages != 0) 546 totalram_pages += free_all_bootmem_node(pgdat); 547 } 548 549 #ifdef CONFIG_SA1111 550 /* now that our DMA memory is actually so designated, we can free it */ 551 totalram_pages += free_area(PHYS_PFN_OFFSET, 552 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 553 #endif 554 555 #ifdef CONFIG_HIGHMEM 556 /* set highmem page free */ 557 for_each_online_node(node) { 558 for_each_nodebank (i, &meminfo, node) { 559 unsigned long start = bank_pfn_start(&meminfo.bank[i]); 560 unsigned long end = bank_pfn_end(&meminfo.bank[i]); 561 if (start >= max_low_pfn + PHYS_PFN_OFFSET) 562 totalhigh_pages += free_area(start, end, NULL); 563 } 564 } 565 totalram_pages += totalhigh_pages; 566 #endif 567 568 reserved_pages = free_pages = 0; 569 570 for_each_online_node(node) { 571 pg_data_t *n = NODE_DATA(node); 572 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; 573 574 for_each_nodebank(i, &meminfo, node) { 575 struct membank *bank = &meminfo.bank[i]; 576 unsigned int pfn1, pfn2; 577 struct page *page, *end; 578 579 pfn1 = bank_pfn_start(bank); 580 pfn2 = bank_pfn_end(bank); 581 582 page = map + pfn1; 583 end = map + pfn2; 584 585 do { 586 if (PageReserved(page)) 587 reserved_pages++; 588 else if (!page_count(page)) 589 free_pages++; 590 page++; 591 } while (page < end); 592 } 593 } 594 595 /* 596 * Since our memory may not be contiguous, calculate the 597 * real number of pages we have in this system 598 */ 599 printk(KERN_INFO "Memory:"); 600 num_physpages = 0; 601 for (i = 0; i < meminfo.nr_banks; i++) { 602 num_physpages += bank_pfn_size(&meminfo.bank[i]); 603 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 604 } 605 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 606 607 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", 608 nr_free_pages() << (PAGE_SHIFT-10), 609 free_pages << (PAGE_SHIFT-10), 610 reserved_pages << (PAGE_SHIFT-10), 611 totalhigh_pages << (PAGE_SHIFT-10)); 612 613 #define MLK(b, t) b, t, ((t) - (b)) >> 10 614 #define MLM(b, t) b, t, ((t) - (b)) >> 20 615 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 616 617 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 618 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 619 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 620 #ifdef CONFIG_MMU 621 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" 622 #endif 623 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 624 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 625 #ifdef CONFIG_HIGHMEM 626 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 627 #endif 628 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 629 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 630 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 631 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 632 633 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 634 (PAGE_SIZE)), 635 MLK(FIXADDR_START, FIXADDR_TOP), 636 #ifdef CONFIG_MMU 637 MLM(CONSISTENT_BASE, CONSISTENT_END), 638 #endif 639 MLM(VMALLOC_START, VMALLOC_END), 640 MLM(PAGE_OFFSET, (unsigned long)high_memory), 641 #ifdef CONFIG_HIGHMEM 642 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 643 (PAGE_SIZE)), 644 #endif 645 MLM(MODULES_VADDR, MODULES_END), 646 647 MLK_ROUNDUP(__init_begin, __init_end), 648 MLK_ROUNDUP(_text, _etext), 649 MLK_ROUNDUP(_data, _edata)); 650 651 #undef MLK 652 #undef MLM 653 #undef MLK_ROUNDUP 654 655 /* 656 * Check boundaries twice: Some fundamental inconsistencies can 657 * be detected at build time already. 658 */ 659 #ifdef CONFIG_MMU 660 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); 661 BUG_ON(VMALLOC_END > CONSISTENT_BASE); 662 663 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 664 BUG_ON(TASK_SIZE > MODULES_VADDR); 665 #endif 666 667 #ifdef CONFIG_HIGHMEM 668 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 669 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 670 #endif 671 672 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 673 extern int sysctl_overcommit_memory; 674 /* 675 * On a machine this small we won't get 676 * anywhere without overcommit, so turn 677 * it on by default. 678 */ 679 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 680 } 681 } 682 683 void free_initmem(void) 684 { 685 #ifdef CONFIG_HAVE_TCM 686 extern char *__tcm_start, *__tcm_end; 687 688 totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), 689 __phys_to_pfn(__pa(__tcm_end)), 690 "TCM link"); 691 #endif 692 693 if (!machine_is_integrator() && !machine_is_cintegrator()) 694 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 695 __phys_to_pfn(__pa(__init_end)), 696 "init"); 697 } 698 699 #ifdef CONFIG_BLK_DEV_INITRD 700 701 static int keep_initrd; 702 703 void free_initrd_mem(unsigned long start, unsigned long end) 704 { 705 if (!keep_initrd) 706 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 707 __phys_to_pfn(__pa(end)), 708 "initrd"); 709 } 710 711 static int __init keepinitrd_setup(char *__unused) 712 { 713 keep_initrd = 1; 714 return 1; 715 } 716 717 __setup("keepinitrd", keepinitrd_setup); 718 #endif 719