1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/highmem.h> 19 #include <linux/gfp.h> 20 #include <linux/memblock.h> 21 #include <linux/sort.h> 22 23 #include <asm/mach-types.h> 24 #include <asm/sections.h> 25 #include <asm/setup.h> 26 #include <asm/sizes.h> 27 #include <asm/tlb.h> 28 #include <asm/fixmap.h> 29 30 #include <asm/mach/arch.h> 31 #include <asm/mach/map.h> 32 33 #include "mm.h" 34 35 static unsigned long phys_initrd_start __initdata = 0; 36 static unsigned long phys_initrd_size __initdata = 0; 37 38 static int __init early_initrd(char *p) 39 { 40 unsigned long start, size; 41 char *endp; 42 43 start = memparse(p, &endp); 44 if (*endp == ',') { 45 size = memparse(endp + 1, NULL); 46 47 phys_initrd_start = start; 48 phys_initrd_size = size; 49 } 50 return 0; 51 } 52 early_param("initrd", early_initrd); 53 54 static int __init parse_tag_initrd(const struct tag *tag) 55 { 56 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 57 "please update your bootloader.\n"); 58 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 59 phys_initrd_size = tag->u.initrd.size; 60 return 0; 61 } 62 63 __tagtable(ATAG_INITRD, parse_tag_initrd); 64 65 static int __init parse_tag_initrd2(const struct tag *tag) 66 { 67 phys_initrd_start = tag->u.initrd.start; 68 phys_initrd_size = tag->u.initrd.size; 69 return 0; 70 } 71 72 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 73 74 /* 75 * This keeps memory configuration data used by a couple memory 76 * initialization functions, as well as show_mem() for the skipping 77 * of holes in the memory map. It is populated by arm_add_memory(). 78 */ 79 struct meminfo meminfo; 80 81 void show_mem(void) 82 { 83 int free = 0, total = 0, reserved = 0; 84 int shared = 0, cached = 0, slab = 0, i; 85 struct meminfo * mi = &meminfo; 86 87 printk("Mem-info:\n"); 88 show_free_areas(); 89 90 for_each_bank (i, mi) { 91 struct membank *bank = &mi->bank[i]; 92 unsigned int pfn1, pfn2; 93 struct page *page, *end; 94 95 pfn1 = bank_pfn_start(bank); 96 pfn2 = bank_pfn_end(bank); 97 98 page = pfn_to_page(pfn1); 99 end = pfn_to_page(pfn2 - 1) + 1; 100 101 do { 102 total++; 103 if (PageReserved(page)) 104 reserved++; 105 else if (PageSwapCache(page)) 106 cached++; 107 else if (PageSlab(page)) 108 slab++; 109 else if (!page_count(page)) 110 free++; 111 else 112 shared += page_count(page) - 1; 113 page++; 114 } while (page < end); 115 } 116 117 printk("%d pages of RAM\n", total); 118 printk("%d free pages\n", free); 119 printk("%d reserved pages\n", reserved); 120 printk("%d slab pages\n", slab); 121 printk("%d pages shared\n", shared); 122 printk("%d pages swap cached\n", cached); 123 } 124 125 static void __init find_limits(struct meminfo *mi, 126 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 127 { 128 int i; 129 130 *min = -1UL; 131 *max_low = *max_high = 0; 132 133 for_each_bank (i, mi) { 134 struct membank *bank = &mi->bank[i]; 135 unsigned long start, end; 136 137 start = bank_pfn_start(bank); 138 end = bank_pfn_end(bank); 139 140 if (*min > start) 141 *min = start; 142 if (*max_high < end) 143 *max_high = end; 144 if (bank->highmem) 145 continue; 146 if (*max_low < end) 147 *max_low = end; 148 } 149 } 150 151 static void __init arm_bootmem_init(struct meminfo *mi, 152 unsigned long start_pfn, unsigned long end_pfn) 153 { 154 struct memblock_region *reg; 155 unsigned int boot_pages; 156 phys_addr_t bitmap; 157 pg_data_t *pgdat; 158 int i; 159 160 /* 161 * Allocate the bootmem bitmap page. This must be in a region 162 * of memory which has already been mapped. 163 */ 164 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 165 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, 166 __pfn_to_phys(end_pfn)); 167 168 /* 169 * Initialise the bootmem allocator, handing the 170 * memory banks over to bootmem. 171 */ 172 node_set_online(0); 173 pgdat = NODE_DATA(0); 174 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 175 176 for_each_bank(i, mi) { 177 struct membank *bank = &mi->bank[i]; 178 if (!bank->highmem) 179 free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); 180 } 181 182 /* 183 * Reserve the memblock reserved regions in bootmem. 184 */ 185 for_each_memblock(reserved, reg) { 186 phys_addr_t start = memblock_region_reserved_base_pfn(reg); 187 phys_addr_t end = memblock_region_reserved_end_pfn(reg); 188 if (start >= start_pfn && end <= end_pfn) 189 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 190 (end - start) << PAGE_SHIFT, 191 BOOTMEM_DEFAULT); 192 } 193 } 194 195 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, 196 unsigned long max_high) 197 { 198 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 199 struct memblock_region *reg; 200 201 /* 202 * initialise the zones. 203 */ 204 memset(zone_size, 0, sizeof(zone_size)); 205 206 /* 207 * The memory size has already been determined. If we need 208 * to do anything fancy with the allocation of this memory 209 * to the zones, now is the time to do it. 210 */ 211 zone_size[0] = max_low - min; 212 #ifdef CONFIG_HIGHMEM 213 zone_size[ZONE_HIGHMEM] = max_high - max_low; 214 #endif 215 216 /* 217 * Calculate the size of the holes. 218 * holes = node_size - sum(bank_sizes) 219 */ 220 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 221 for_each_memblock(memory, reg) { 222 unsigned long start = memblock_region_memory_base_pfn(reg); 223 unsigned long end = memblock_region_memory_end_pfn(reg); 224 225 if (start < max_low) { 226 unsigned long low_end = min(end, max_low); 227 zhole_size[0] -= low_end - start; 228 } 229 #ifdef CONFIG_HIGHMEM 230 if (end > max_low) { 231 unsigned long high_start = max(start, max_low); 232 zhole_size[ZONE_HIGHMEM] -= end - high_start; 233 } 234 #endif 235 } 236 237 /* 238 * Adjust the sizes according to any special requirements for 239 * this machine type. 240 */ 241 arch_adjust_zones(zone_size, zhole_size); 242 243 free_area_init_node(0, zone_size, min, zhole_size); 244 } 245 246 #ifndef CONFIG_SPARSEMEM 247 int pfn_valid(unsigned long pfn) 248 { 249 return memblock_is_memory(pfn << PAGE_SHIFT); 250 } 251 EXPORT_SYMBOL(pfn_valid); 252 253 static void arm_memory_present(void) 254 { 255 } 256 #else 257 static void arm_memory_present(void) 258 { 259 struct memblock_region *reg; 260 261 for_each_memblock(memory, reg) 262 memory_present(0, memblock_region_memory_base_pfn(reg), 263 memblock_region_memory_end_pfn(reg)); 264 } 265 #endif 266 267 static int __init meminfo_cmp(const void *_a, const void *_b) 268 { 269 const struct membank *a = _a, *b = _b; 270 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 271 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 272 } 273 274 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 275 { 276 int i; 277 278 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 279 280 memblock_init(); 281 for (i = 0; i < mi->nr_banks; i++) 282 memblock_add(mi->bank[i].start, mi->bank[i].size); 283 284 /* Register the kernel text, kernel data and initrd with memblock. */ 285 #ifdef CONFIG_XIP_KERNEL 286 memblock_reserve(__pa(_sdata), _end - _sdata); 287 #else 288 memblock_reserve(__pa(_stext), _end - _stext); 289 #endif 290 #ifdef CONFIG_BLK_DEV_INITRD 291 if (phys_initrd_size) { 292 memblock_reserve(phys_initrd_start, phys_initrd_size); 293 294 /* Now convert initrd to virtual addresses */ 295 initrd_start = __phys_to_virt(phys_initrd_start); 296 initrd_end = initrd_start + phys_initrd_size; 297 } 298 #endif 299 300 arm_mm_memblock_reserve(); 301 302 /* reserve any platform specific memblock areas */ 303 if (mdesc->reserve) 304 mdesc->reserve(); 305 306 memblock_analyze(); 307 memblock_dump_all(); 308 } 309 310 void __init bootmem_init(void) 311 { 312 struct meminfo *mi = &meminfo; 313 unsigned long min, max_low, max_high; 314 315 max_low = max_high = 0; 316 317 find_limits(mi, &min, &max_low, &max_high); 318 319 arm_bootmem_init(mi, min, max_low); 320 321 /* 322 * Sparsemem tries to allocate bootmem in memory_present(), 323 * so must be done after the fixed reservations 324 */ 325 arm_memory_present(); 326 327 /* 328 * sparse_init() needs the bootmem allocator up and running. 329 */ 330 sparse_init(); 331 332 /* 333 * Now free the memory - free_area_init_node needs 334 * the sparse mem_map arrays initialized by sparse_init() 335 * for memmap_init_zone(), otherwise all PFNs are invalid. 336 */ 337 arm_bootmem_free(min, max_low, max_high); 338 339 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 340 341 /* 342 * This doesn't seem to be used by the Linux memory manager any 343 * more, but is used by ll_rw_block. If we can get rid of it, we 344 * also get rid of some of the stuff above as well. 345 * 346 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 347 * the system, not the maximum PFN. 348 */ 349 max_low_pfn = max_low - PHYS_PFN_OFFSET; 350 max_pfn = max_high - PHYS_PFN_OFFSET; 351 } 352 353 static inline int free_area(unsigned long pfn, unsigned long end, char *s) 354 { 355 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); 356 357 for (; pfn < end; pfn++) { 358 struct page *page = pfn_to_page(pfn); 359 ClearPageReserved(page); 360 init_page_count(page); 361 __free_page(page); 362 pages++; 363 } 364 365 if (size && s) 366 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 367 368 return pages; 369 } 370 371 static inline void 372 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 373 { 374 struct page *start_pg, *end_pg; 375 unsigned long pg, pgend; 376 377 /* 378 * Convert start_pfn/end_pfn to a struct page pointer. 379 */ 380 start_pg = pfn_to_page(start_pfn - 1) + 1; 381 end_pg = pfn_to_page(end_pfn); 382 383 /* 384 * Convert to physical addresses, and 385 * round start upwards and end downwards. 386 */ 387 pg = PAGE_ALIGN(__pa(start_pg)); 388 pgend = __pa(end_pg) & PAGE_MASK; 389 390 /* 391 * If there are free pages between these, 392 * free the section of the memmap array. 393 */ 394 if (pg < pgend) 395 free_bootmem(pg, pgend - pg); 396 } 397 398 /* 399 * The mem_map array can get very big. Free the unused area of the memory map. 400 */ 401 static void __init free_unused_memmap(struct meminfo *mi) 402 { 403 unsigned long bank_start, prev_bank_end = 0; 404 unsigned int i; 405 406 /* 407 * This relies on each bank being in address order. 408 * The banks are sorted previously in bootmem_init(). 409 */ 410 for_each_bank(i, mi) { 411 struct membank *bank = &mi->bank[i]; 412 413 bank_start = bank_pfn_start(bank); 414 415 /* 416 * If we had a previous bank, and there is a space 417 * between the current bank and the previous, free it. 418 */ 419 if (prev_bank_end && prev_bank_end < bank_start) 420 free_memmap(prev_bank_end, bank_start); 421 422 /* 423 * Align up here since the VM subsystem insists that the 424 * memmap entries are valid from the bank end aligned to 425 * MAX_ORDER_NR_PAGES. 426 */ 427 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 428 } 429 } 430 431 /* 432 * mem_init() marks the free areas in the mem_map and tells us how much 433 * memory is free. This is done after various parts of the system have 434 * claimed their memory after the kernel image. 435 */ 436 void __init mem_init(void) 437 { 438 unsigned long reserved_pages, free_pages; 439 int i; 440 #ifdef CONFIG_HAVE_TCM 441 /* These pointers are filled in on TCM detection */ 442 extern u32 dtcm_end; 443 extern u32 itcm_end; 444 #endif 445 446 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 447 448 /* this will put all unused low memory onto the freelists */ 449 free_unused_memmap(&meminfo); 450 451 totalram_pages += free_all_bootmem(); 452 453 #ifdef CONFIG_SA1111 454 /* now that our DMA memory is actually so designated, we can free it */ 455 totalram_pages += free_area(PHYS_PFN_OFFSET, 456 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 457 #endif 458 459 #ifdef CONFIG_HIGHMEM 460 /* set highmem page free */ 461 for_each_bank (i, &meminfo) { 462 unsigned long start = bank_pfn_start(&meminfo.bank[i]); 463 unsigned long end = bank_pfn_end(&meminfo.bank[i]); 464 if (start >= max_low_pfn + PHYS_PFN_OFFSET) 465 totalhigh_pages += free_area(start, end, NULL); 466 } 467 totalram_pages += totalhigh_pages; 468 #endif 469 470 reserved_pages = free_pages = 0; 471 472 for_each_bank(i, &meminfo) { 473 struct membank *bank = &meminfo.bank[i]; 474 unsigned int pfn1, pfn2; 475 struct page *page, *end; 476 477 pfn1 = bank_pfn_start(bank); 478 pfn2 = bank_pfn_end(bank); 479 480 page = pfn_to_page(pfn1); 481 end = pfn_to_page(pfn2 - 1) + 1; 482 483 do { 484 if (PageReserved(page)) 485 reserved_pages++; 486 else if (!page_count(page)) 487 free_pages++; 488 page++; 489 } while (page < end); 490 } 491 492 /* 493 * Since our memory may not be contiguous, calculate the 494 * real number of pages we have in this system 495 */ 496 printk(KERN_INFO "Memory:"); 497 num_physpages = 0; 498 for (i = 0; i < meminfo.nr_banks; i++) { 499 num_physpages += bank_pfn_size(&meminfo.bank[i]); 500 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 501 } 502 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 503 504 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", 505 nr_free_pages() << (PAGE_SHIFT-10), 506 free_pages << (PAGE_SHIFT-10), 507 reserved_pages << (PAGE_SHIFT-10), 508 totalhigh_pages << (PAGE_SHIFT-10)); 509 510 #define MLK(b, t) b, t, ((t) - (b)) >> 10 511 #define MLM(b, t) b, t, ((t) - (b)) >> 20 512 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 513 514 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 515 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 516 #ifdef CONFIG_HAVE_TCM 517 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 518 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 519 #endif 520 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 521 #ifdef CONFIG_MMU 522 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" 523 #endif 524 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 525 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 526 #ifdef CONFIG_HIGHMEM 527 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 528 #endif 529 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 530 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 531 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 532 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 533 534 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 535 (PAGE_SIZE)), 536 #ifdef CONFIG_HAVE_TCM 537 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 538 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 539 #endif 540 MLK(FIXADDR_START, FIXADDR_TOP), 541 #ifdef CONFIG_MMU 542 MLM(CONSISTENT_BASE, CONSISTENT_END), 543 #endif 544 MLM(VMALLOC_START, VMALLOC_END), 545 MLM(PAGE_OFFSET, (unsigned long)high_memory), 546 #ifdef CONFIG_HIGHMEM 547 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 548 (PAGE_SIZE)), 549 #endif 550 MLM(MODULES_VADDR, MODULES_END), 551 552 MLK_ROUNDUP(__init_begin, __init_end), 553 MLK_ROUNDUP(_text, _etext), 554 MLK_ROUNDUP(_sdata, _edata)); 555 556 #undef MLK 557 #undef MLM 558 #undef MLK_ROUNDUP 559 560 /* 561 * Check boundaries twice: Some fundamental inconsistencies can 562 * be detected at build time already. 563 */ 564 #ifdef CONFIG_MMU 565 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); 566 BUG_ON(VMALLOC_END > CONSISTENT_BASE); 567 568 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 569 BUG_ON(TASK_SIZE > MODULES_VADDR); 570 #endif 571 572 #ifdef CONFIG_HIGHMEM 573 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 574 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 575 #endif 576 577 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 578 extern int sysctl_overcommit_memory; 579 /* 580 * On a machine this small we won't get 581 * anywhere without overcommit, so turn 582 * it on by default. 583 */ 584 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 585 } 586 } 587 588 void free_initmem(void) 589 { 590 #ifdef CONFIG_HAVE_TCM 591 extern char __tcm_start, __tcm_end; 592 593 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 594 __phys_to_pfn(__pa(&__tcm_end)), 595 "TCM link"); 596 #endif 597 598 if (!machine_is_integrator() && !machine_is_cintegrator()) 599 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 600 __phys_to_pfn(__pa(__init_end)), 601 "init"); 602 } 603 604 #ifdef CONFIG_BLK_DEV_INITRD 605 606 static int keep_initrd; 607 608 void free_initrd_mem(unsigned long start, unsigned long end) 609 { 610 if (!keep_initrd) 611 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 612 __phys_to_pfn(__pa(end)), 613 "initrd"); 614 } 615 616 static int __init keepinitrd_setup(char *__unused) 617 { 618 keep_initrd = 1; 619 return 1; 620 } 621 622 __setup("keepinitrd", keepinitrd_setup); 623 #endif 624