1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/highmem.h> 19 #include <linux/gfp.h> 20 #include <linux/memblock.h> 21 22 #include <asm/mach-types.h> 23 #include <asm/sections.h> 24 #include <asm/setup.h> 25 #include <asm/sizes.h> 26 #include <asm/tlb.h> 27 #include <asm/fixmap.h> 28 29 #include <asm/mach/arch.h> 30 #include <asm/mach/map.h> 31 32 #include "mm.h" 33 34 static unsigned long phys_initrd_start __initdata = 0; 35 static unsigned long phys_initrd_size __initdata = 0; 36 37 static int __init early_initrd(char *p) 38 { 39 unsigned long start, size; 40 char *endp; 41 42 start = memparse(p, &endp); 43 if (*endp == ',') { 44 size = memparse(endp + 1, NULL); 45 46 phys_initrd_start = start; 47 phys_initrd_size = size; 48 } 49 return 0; 50 } 51 early_param("initrd", early_initrd); 52 53 static int __init parse_tag_initrd(const struct tag *tag) 54 { 55 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 56 "please update your bootloader.\n"); 57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 58 phys_initrd_size = tag->u.initrd.size; 59 return 0; 60 } 61 62 __tagtable(ATAG_INITRD, parse_tag_initrd); 63 64 static int __init parse_tag_initrd2(const struct tag *tag) 65 { 66 phys_initrd_start = tag->u.initrd.start; 67 phys_initrd_size = tag->u.initrd.size; 68 return 0; 69 } 70 71 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 72 73 /* 74 * This keeps memory configuration data used by a couple memory 75 * initialization functions, as well as show_mem() for the skipping 76 * of holes in the memory map. It is populated by arm_add_memory(). 77 */ 78 struct meminfo meminfo; 79 80 void show_mem(void) 81 { 82 int free = 0, total = 0, reserved = 0; 83 int shared = 0, cached = 0, slab = 0, i; 84 struct meminfo * mi = &meminfo; 85 86 printk("Mem-info:\n"); 87 show_free_areas(); 88 89 for_each_bank (i, mi) { 90 struct membank *bank = &mi->bank[i]; 91 unsigned int pfn1, pfn2; 92 struct page *page, *end; 93 94 pfn1 = bank_pfn_start(bank); 95 pfn2 = bank_pfn_end(bank); 96 97 page = pfn_to_page(pfn1); 98 end = pfn_to_page(pfn2 - 1) + 1; 99 100 do { 101 total++; 102 if (PageReserved(page)) 103 reserved++; 104 else if (PageSwapCache(page)) 105 cached++; 106 else if (PageSlab(page)) 107 slab++; 108 else if (!page_count(page)) 109 free++; 110 else 111 shared += page_count(page) - 1; 112 page++; 113 } while (page < end); 114 } 115 116 printk("%d pages of RAM\n", total); 117 printk("%d free pages\n", free); 118 printk("%d reserved pages\n", reserved); 119 printk("%d slab pages\n", slab); 120 printk("%d pages shared\n", shared); 121 printk("%d pages swap cached\n", cached); 122 } 123 124 static void __init find_limits(struct meminfo *mi, 125 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 126 { 127 int i; 128 129 *min = -1UL; 130 *max_low = *max_high = 0; 131 132 for_each_bank (i, mi) { 133 struct membank *bank = &mi->bank[i]; 134 unsigned long start, end; 135 136 start = bank_pfn_start(bank); 137 end = bank_pfn_end(bank); 138 139 if (*min > start) 140 *min = start; 141 if (*max_high < end) 142 *max_high = end; 143 if (bank->highmem) 144 continue; 145 if (*max_low < end) 146 *max_low = end; 147 } 148 } 149 150 static void __init arm_bootmem_init(struct meminfo *mi, 151 unsigned long start_pfn, unsigned long end_pfn) 152 { 153 unsigned int boot_pages; 154 phys_addr_t bitmap; 155 pg_data_t *pgdat; 156 int i; 157 158 /* 159 * Allocate the bootmem bitmap page. This must be in a region 160 * of memory which has already been mapped. 161 */ 162 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 163 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, 164 __pfn_to_phys(end_pfn)); 165 166 /* 167 * Initialise the bootmem allocator, handing the 168 * memory banks over to bootmem. 169 */ 170 node_set_online(0); 171 pgdat = NODE_DATA(0); 172 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 173 174 for_each_bank(i, mi) { 175 struct membank *bank = &mi->bank[i]; 176 if (!bank->highmem) 177 free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); 178 } 179 180 /* 181 * Reserve the memblock reserved regions in bootmem. 182 */ 183 for (i = 0; i < memblock.reserved.cnt; i++) { 184 phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); 185 if (start >= start_pfn && 186 memblock_end_pfn(&memblock.reserved, i) <= end_pfn) 187 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 188 memblock_size_bytes(&memblock.reserved, i), 189 BOOTMEM_DEFAULT); 190 } 191 } 192 193 static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, 194 unsigned long max_low, unsigned long max_high) 195 { 196 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 197 int i; 198 199 /* 200 * initialise the zones. 201 */ 202 memset(zone_size, 0, sizeof(zone_size)); 203 204 /* 205 * The memory size has already been determined. If we need 206 * to do anything fancy with the allocation of this memory 207 * to the zones, now is the time to do it. 208 */ 209 zone_size[0] = max_low - min; 210 #ifdef CONFIG_HIGHMEM 211 zone_size[ZONE_HIGHMEM] = max_high - max_low; 212 #endif 213 214 /* 215 * Calculate the size of the holes. 216 * holes = node_size - sum(bank_sizes) 217 */ 218 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 219 for_each_bank(i, mi) { 220 int idx = 0; 221 #ifdef CONFIG_HIGHMEM 222 if (mi->bank[i].highmem) 223 idx = ZONE_HIGHMEM; 224 #endif 225 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); 226 } 227 228 /* 229 * Adjust the sizes according to any special requirements for 230 * this machine type. 231 */ 232 arch_adjust_zones(zone_size, zhole_size); 233 234 free_area_init_node(0, zone_size, min, zhole_size); 235 } 236 237 #ifndef CONFIG_SPARSEMEM 238 int pfn_valid(unsigned long pfn) 239 { 240 struct memblock_region *mem = &memblock.memory; 241 unsigned int left = 0, right = mem->cnt; 242 243 do { 244 unsigned int mid = (right + left) / 2; 245 246 if (pfn < memblock_start_pfn(mem, mid)) 247 right = mid; 248 else if (pfn >= memblock_end_pfn(mem, mid)) 249 left = mid + 1; 250 else 251 return 1; 252 } while (left < right); 253 return 0; 254 } 255 EXPORT_SYMBOL(pfn_valid); 256 257 static void arm_memory_present(void) 258 { 259 } 260 #else 261 static void arm_memory_present(void) 262 { 263 int i; 264 for (i = 0; i < memblock.memory.cnt; i++) 265 memory_present(0, memblock_start_pfn(&memblock.memory, i), 266 memblock_end_pfn(&memblock.memory, i)); 267 } 268 #endif 269 270 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 271 { 272 int i; 273 274 memblock_init(); 275 for (i = 0; i < mi->nr_banks; i++) 276 memblock_add(mi->bank[i].start, mi->bank[i].size); 277 278 /* Register the kernel text, kernel data and initrd with memblock. */ 279 #ifdef CONFIG_XIP_KERNEL 280 memblock_reserve(__pa(_sdata), _end - _sdata); 281 #else 282 memblock_reserve(__pa(_stext), _end - _stext); 283 #endif 284 #ifdef CONFIG_BLK_DEV_INITRD 285 if (phys_initrd_size) { 286 memblock_reserve(phys_initrd_start, phys_initrd_size); 287 288 /* Now convert initrd to virtual addresses */ 289 initrd_start = __phys_to_virt(phys_initrd_start); 290 initrd_end = initrd_start + phys_initrd_size; 291 } 292 #endif 293 294 arm_mm_memblock_reserve(); 295 296 /* reserve any platform specific memblock areas */ 297 if (mdesc->reserve) 298 mdesc->reserve(); 299 300 memblock_analyze(); 301 memblock_dump_all(); 302 } 303 304 void __init bootmem_init(void) 305 { 306 struct meminfo *mi = &meminfo; 307 unsigned long min, max_low, max_high; 308 309 max_low = max_high = 0; 310 311 find_limits(mi, &min, &max_low, &max_high); 312 313 arm_bootmem_init(mi, min, max_low); 314 315 /* 316 * Sparsemem tries to allocate bootmem in memory_present(), 317 * so must be done after the fixed reservations 318 */ 319 arm_memory_present(); 320 321 /* 322 * sparse_init() needs the bootmem allocator up and running. 323 */ 324 sparse_init(); 325 326 /* 327 * Now free the memory - free_area_init_node needs 328 * the sparse mem_map arrays initialized by sparse_init() 329 * for memmap_init_zone(), otherwise all PFNs are invalid. 330 */ 331 arm_bootmem_free(mi, min, max_low, max_high); 332 333 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 334 335 /* 336 * This doesn't seem to be used by the Linux memory manager any 337 * more, but is used by ll_rw_block. If we can get rid of it, we 338 * also get rid of some of the stuff above as well. 339 * 340 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 341 * the system, not the maximum PFN. 342 */ 343 max_low_pfn = max_low - PHYS_PFN_OFFSET; 344 max_pfn = max_high - PHYS_PFN_OFFSET; 345 } 346 347 static inline int free_area(unsigned long pfn, unsigned long end, char *s) 348 { 349 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); 350 351 for (; pfn < end; pfn++) { 352 struct page *page = pfn_to_page(pfn); 353 ClearPageReserved(page); 354 init_page_count(page); 355 __free_page(page); 356 pages++; 357 } 358 359 if (size && s) 360 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 361 362 return pages; 363 } 364 365 static inline void 366 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 367 { 368 struct page *start_pg, *end_pg; 369 unsigned long pg, pgend; 370 371 /* 372 * Convert start_pfn/end_pfn to a struct page pointer. 373 */ 374 start_pg = pfn_to_page(start_pfn - 1) + 1; 375 end_pg = pfn_to_page(end_pfn); 376 377 /* 378 * Convert to physical addresses, and 379 * round start upwards and end downwards. 380 */ 381 pg = PAGE_ALIGN(__pa(start_pg)); 382 pgend = __pa(end_pg) & PAGE_MASK; 383 384 /* 385 * If there are free pages between these, 386 * free the section of the memmap array. 387 */ 388 if (pg < pgend) 389 free_bootmem(pg, pgend - pg); 390 } 391 392 /* 393 * The mem_map array can get very big. Free the unused area of the memory map. 394 */ 395 static void __init free_unused_memmap(struct meminfo *mi) 396 { 397 unsigned long bank_start, prev_bank_end = 0; 398 unsigned int i; 399 400 /* 401 * This relies on each bank being in address order. 402 * The banks are sorted previously in bootmem_init(). 403 */ 404 for_each_bank(i, mi) { 405 struct membank *bank = &mi->bank[i]; 406 407 bank_start = bank_pfn_start(bank); 408 409 /* 410 * If we had a previous bank, and there is a space 411 * between the current bank and the previous, free it. 412 */ 413 if (prev_bank_end && prev_bank_end < bank_start) 414 free_memmap(prev_bank_end, bank_start); 415 416 /* 417 * Align up here since the VM subsystem insists that the 418 * memmap entries are valid from the bank end aligned to 419 * MAX_ORDER_NR_PAGES. 420 */ 421 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 422 } 423 } 424 425 /* 426 * mem_init() marks the free areas in the mem_map and tells us how much 427 * memory is free. This is done after various parts of the system have 428 * claimed their memory after the kernel image. 429 */ 430 void __init mem_init(void) 431 { 432 unsigned long reserved_pages, free_pages; 433 int i; 434 #ifdef CONFIG_HAVE_TCM 435 /* These pointers are filled in on TCM detection */ 436 extern u32 dtcm_end; 437 extern u32 itcm_end; 438 #endif 439 440 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 441 442 /* this will put all unused low memory onto the freelists */ 443 free_unused_memmap(&meminfo); 444 445 totalram_pages += free_all_bootmem(); 446 447 #ifdef CONFIG_SA1111 448 /* now that our DMA memory is actually so designated, we can free it */ 449 totalram_pages += free_area(PHYS_PFN_OFFSET, 450 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 451 #endif 452 453 #ifdef CONFIG_HIGHMEM 454 /* set highmem page free */ 455 for_each_bank (i, &meminfo) { 456 unsigned long start = bank_pfn_start(&meminfo.bank[i]); 457 unsigned long end = bank_pfn_end(&meminfo.bank[i]); 458 if (start >= max_low_pfn + PHYS_PFN_OFFSET) 459 totalhigh_pages += free_area(start, end, NULL); 460 } 461 totalram_pages += totalhigh_pages; 462 #endif 463 464 reserved_pages = free_pages = 0; 465 466 for_each_bank(i, &meminfo) { 467 struct membank *bank = &meminfo.bank[i]; 468 unsigned int pfn1, pfn2; 469 struct page *page, *end; 470 471 pfn1 = bank_pfn_start(bank); 472 pfn2 = bank_pfn_end(bank); 473 474 page = pfn_to_page(pfn1); 475 end = pfn_to_page(pfn2 - 1) + 1; 476 477 do { 478 if (PageReserved(page)) 479 reserved_pages++; 480 else if (!page_count(page)) 481 free_pages++; 482 page++; 483 } while (page < end); 484 } 485 486 /* 487 * Since our memory may not be contiguous, calculate the 488 * real number of pages we have in this system 489 */ 490 printk(KERN_INFO "Memory:"); 491 num_physpages = 0; 492 for (i = 0; i < meminfo.nr_banks; i++) { 493 num_physpages += bank_pfn_size(&meminfo.bank[i]); 494 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 495 } 496 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 497 498 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", 499 nr_free_pages() << (PAGE_SHIFT-10), 500 free_pages << (PAGE_SHIFT-10), 501 reserved_pages << (PAGE_SHIFT-10), 502 totalhigh_pages << (PAGE_SHIFT-10)); 503 504 #define MLK(b, t) b, t, ((t) - (b)) >> 10 505 #define MLM(b, t) b, t, ((t) - (b)) >> 20 506 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 507 508 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 509 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 510 #ifdef CONFIG_HAVE_TCM 511 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 512 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 513 #endif 514 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 515 #ifdef CONFIG_MMU 516 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" 517 #endif 518 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 519 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 520 #ifdef CONFIG_HIGHMEM 521 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 522 #endif 523 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 524 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 525 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 526 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 527 528 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 529 (PAGE_SIZE)), 530 #ifdef CONFIG_HAVE_TCM 531 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 532 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 533 #endif 534 MLK(FIXADDR_START, FIXADDR_TOP), 535 #ifdef CONFIG_MMU 536 MLM(CONSISTENT_BASE, CONSISTENT_END), 537 #endif 538 MLM(VMALLOC_START, VMALLOC_END), 539 MLM(PAGE_OFFSET, (unsigned long)high_memory), 540 #ifdef CONFIG_HIGHMEM 541 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 542 (PAGE_SIZE)), 543 #endif 544 MLM(MODULES_VADDR, MODULES_END), 545 546 MLK_ROUNDUP(__init_begin, __init_end), 547 MLK_ROUNDUP(_text, _etext), 548 MLK_ROUNDUP(_sdata, _edata)); 549 550 #undef MLK 551 #undef MLM 552 #undef MLK_ROUNDUP 553 554 /* 555 * Check boundaries twice: Some fundamental inconsistencies can 556 * be detected at build time already. 557 */ 558 #ifdef CONFIG_MMU 559 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); 560 BUG_ON(VMALLOC_END > CONSISTENT_BASE); 561 562 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 563 BUG_ON(TASK_SIZE > MODULES_VADDR); 564 #endif 565 566 #ifdef CONFIG_HIGHMEM 567 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 568 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 569 #endif 570 571 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 572 extern int sysctl_overcommit_memory; 573 /* 574 * On a machine this small we won't get 575 * anywhere without overcommit, so turn 576 * it on by default. 577 */ 578 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 579 } 580 } 581 582 void free_initmem(void) 583 { 584 #ifdef CONFIG_HAVE_TCM 585 extern char __tcm_start, __tcm_end; 586 587 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 588 __phys_to_pfn(__pa(&__tcm_end)), 589 "TCM link"); 590 #endif 591 592 if (!machine_is_integrator() && !machine_is_cintegrator()) 593 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 594 __phys_to_pfn(__pa(__init_end)), 595 "init"); 596 } 597 598 #ifdef CONFIG_BLK_DEV_INITRD 599 600 static int keep_initrd; 601 602 void free_initrd_mem(unsigned long start, unsigned long end) 603 { 604 if (!keep_initrd) 605 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 606 __phys_to_pfn(__pa(end)), 607 "initrd"); 608 } 609 610 static int __init keepinitrd_setup(char *__unused) 611 { 612 keep_initrd = 1; 613 return 1; 614 } 615 616 __setup("keepinitrd", keepinitrd_setup); 617 #endif 618