1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/export.h> 17 #include <linux/nodemask.h> 18 #include <linux/initrd.h> 19 #include <linux/of_fdt.h> 20 #include <linux/highmem.h> 21 #include <linux/gfp.h> 22 #include <linux/memblock.h> 23 #include <linux/dma-contiguous.h> 24 #include <linux/sizes.h> 25 26 #include <asm/cp15.h> 27 #include <asm/mach-types.h> 28 #include <asm/memblock.h> 29 #include <asm/prom.h> 30 #include <asm/sections.h> 31 #include <asm/setup.h> 32 #include <asm/tlb.h> 33 #include <asm/fixmap.h> 34 35 #include <asm/mach/arch.h> 36 #include <asm/mach/map.h> 37 38 #include "mm.h" 39 40 #ifdef CONFIG_CPU_CP15_MMU 41 unsigned long __init __clear_cr(unsigned long mask) 42 { 43 cr_no_alignment = cr_no_alignment & ~mask; 44 cr_alignment = cr_alignment & ~mask; 45 return cr_alignment; 46 } 47 #endif 48 49 static phys_addr_t phys_initrd_start __initdata = 0; 50 static unsigned long phys_initrd_size __initdata = 0; 51 52 static int __init early_initrd(char *p) 53 { 54 phys_addr_t start; 55 unsigned long size; 56 char *endp; 57 58 start = memparse(p, &endp); 59 if (*endp == ',') { 60 size = memparse(endp + 1, NULL); 61 62 phys_initrd_start = start; 63 phys_initrd_size = size; 64 } 65 return 0; 66 } 67 early_param("initrd", early_initrd); 68 69 static int __init parse_tag_initrd(const struct tag *tag) 70 { 71 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 72 "please update your bootloader.\n"); 73 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 74 phys_initrd_size = tag->u.initrd.size; 75 return 0; 76 } 77 78 __tagtable(ATAG_INITRD, parse_tag_initrd); 79 80 static int __init parse_tag_initrd2(const struct tag *tag) 81 { 82 phys_initrd_start = tag->u.initrd.start; 83 phys_initrd_size = tag->u.initrd.size; 84 return 0; 85 } 86 87 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 88 89 /* 90 * This keeps memory configuration data used by a couple memory 91 * initialization functions, as well as show_mem() for the skipping 92 * of holes in the memory map. It is populated by arm_add_memory(). 93 */ 94 struct meminfo meminfo; 95 96 void show_mem(unsigned int filter) 97 { 98 int free = 0, total = 0, reserved = 0; 99 int shared = 0, cached = 0, slab = 0, i; 100 struct meminfo * mi = &meminfo; 101 102 printk("Mem-info:\n"); 103 show_free_areas(filter); 104 105 for_each_bank (i, mi) { 106 struct membank *bank = &mi->bank[i]; 107 unsigned int pfn1, pfn2; 108 struct page *page, *end; 109 110 pfn1 = bank_pfn_start(bank); 111 pfn2 = bank_pfn_end(bank); 112 113 page = pfn_to_page(pfn1); 114 end = pfn_to_page(pfn2 - 1) + 1; 115 116 do { 117 total++; 118 if (PageReserved(page)) 119 reserved++; 120 else if (PageSwapCache(page)) 121 cached++; 122 else if (PageSlab(page)) 123 slab++; 124 else if (!page_count(page)) 125 free++; 126 else 127 shared += page_count(page) - 1; 128 page++; 129 } while (page < end); 130 } 131 132 printk("%d pages of RAM\n", total); 133 printk("%d free pages\n", free); 134 printk("%d reserved pages\n", reserved); 135 printk("%d slab pages\n", slab); 136 printk("%d pages shared\n", shared); 137 printk("%d pages swap cached\n", cached); 138 } 139 140 static void __init find_limits(unsigned long *min, unsigned long *max_low, 141 unsigned long *max_high) 142 { 143 struct meminfo *mi = &meminfo; 144 int i; 145 146 /* This assumes the meminfo array is properly sorted */ 147 *min = bank_pfn_start(&mi->bank[0]); 148 for_each_bank (i, mi) 149 if (mi->bank[i].highmem) 150 break; 151 *max_low = bank_pfn_end(&mi->bank[i - 1]); 152 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); 153 } 154 155 #ifdef CONFIG_ZONE_DMA 156 157 phys_addr_t arm_dma_zone_size __read_mostly; 158 EXPORT_SYMBOL(arm_dma_zone_size); 159 160 /* 161 * The DMA mask corresponding to the maximum bus address allocatable 162 * using GFP_DMA. The default here places no restriction on DMA 163 * allocations. This must be the smallest DMA mask in the system, 164 * so a successful GFP_DMA allocation will always satisfy this. 165 */ 166 phys_addr_t arm_dma_limit; 167 unsigned long arm_dma_pfn_limit; 168 169 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 170 unsigned long dma_size) 171 { 172 if (size[0] <= dma_size) 173 return; 174 175 size[ZONE_NORMAL] = size[0] - dma_size; 176 size[ZONE_DMA] = dma_size; 177 hole[ZONE_NORMAL] = hole[0]; 178 hole[ZONE_DMA] = 0; 179 } 180 #endif 181 182 void __init setup_dma_zone(const struct machine_desc *mdesc) 183 { 184 #ifdef CONFIG_ZONE_DMA 185 if (mdesc->dma_zone_size) { 186 arm_dma_zone_size = mdesc->dma_zone_size; 187 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 188 } else 189 arm_dma_limit = 0xffffffff; 190 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 191 #endif 192 } 193 194 static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 195 unsigned long max_high) 196 { 197 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 198 struct memblock_region *reg; 199 200 /* 201 * initialise the zones. 202 */ 203 memset(zone_size, 0, sizeof(zone_size)); 204 205 /* 206 * The memory size has already been determined. If we need 207 * to do anything fancy with the allocation of this memory 208 * to the zones, now is the time to do it. 209 */ 210 zone_size[0] = max_low - min; 211 #ifdef CONFIG_HIGHMEM 212 zone_size[ZONE_HIGHMEM] = max_high - max_low; 213 #endif 214 215 /* 216 * Calculate the size of the holes. 217 * holes = node_size - sum(bank_sizes) 218 */ 219 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 220 for_each_memblock(memory, reg) { 221 unsigned long start = memblock_region_memory_base_pfn(reg); 222 unsigned long end = memblock_region_memory_end_pfn(reg); 223 224 if (start < max_low) { 225 unsigned long low_end = min(end, max_low); 226 zhole_size[0] -= low_end - start; 227 } 228 #ifdef CONFIG_HIGHMEM 229 if (end > max_low) { 230 unsigned long high_start = max(start, max_low); 231 zhole_size[ZONE_HIGHMEM] -= end - high_start; 232 } 233 #endif 234 } 235 236 #ifdef CONFIG_ZONE_DMA 237 /* 238 * Adjust the sizes according to any special requirements for 239 * this machine type. 240 */ 241 if (arm_dma_zone_size) 242 arm_adjust_dma_zone(zone_size, zhole_size, 243 arm_dma_zone_size >> PAGE_SHIFT); 244 #endif 245 246 free_area_init_node(0, zone_size, min, zhole_size); 247 } 248 249 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 250 int pfn_valid(unsigned long pfn) 251 { 252 return memblock_is_memory(__pfn_to_phys(pfn)); 253 } 254 EXPORT_SYMBOL(pfn_valid); 255 #endif 256 257 #ifndef CONFIG_SPARSEMEM 258 static void __init arm_memory_present(void) 259 { 260 } 261 #else 262 static void __init arm_memory_present(void) 263 { 264 struct memblock_region *reg; 265 266 for_each_memblock(memory, reg) 267 memory_present(0, memblock_region_memory_base_pfn(reg), 268 memblock_region_memory_end_pfn(reg)); 269 } 270 #endif 271 272 static bool arm_memblock_steal_permitted = true; 273 274 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 275 { 276 phys_addr_t phys; 277 278 BUG_ON(!arm_memblock_steal_permitted); 279 280 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 281 memblock_free(phys, size); 282 memblock_remove(phys, size); 283 284 return phys; 285 } 286 287 void __init arm_memblock_init(struct meminfo *mi, 288 const struct machine_desc *mdesc) 289 { 290 int i; 291 292 for (i = 0; i < mi->nr_banks; i++) 293 memblock_add(mi->bank[i].start, mi->bank[i].size); 294 295 /* Register the kernel text, kernel data and initrd with memblock. */ 296 #ifdef CONFIG_XIP_KERNEL 297 memblock_reserve(__pa(_sdata), _end - _sdata); 298 #else 299 memblock_reserve(__pa(_stext), _end - _stext); 300 #endif 301 #ifdef CONFIG_BLK_DEV_INITRD 302 /* FDT scan will populate initrd_start */ 303 if (initrd_start && !phys_initrd_size) { 304 phys_initrd_start = __virt_to_phys(initrd_start); 305 phys_initrd_size = initrd_end - initrd_start; 306 } 307 initrd_start = initrd_end = 0; 308 if (phys_initrd_size && 309 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 310 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 311 (u64)phys_initrd_start, phys_initrd_size); 312 phys_initrd_start = phys_initrd_size = 0; 313 } 314 if (phys_initrd_size && 315 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 316 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 317 (u64)phys_initrd_start, phys_initrd_size); 318 phys_initrd_start = phys_initrd_size = 0; 319 } 320 if (phys_initrd_size) { 321 memblock_reserve(phys_initrd_start, phys_initrd_size); 322 323 /* Now convert initrd to virtual addresses */ 324 initrd_start = __phys_to_virt(phys_initrd_start); 325 initrd_end = initrd_start + phys_initrd_size; 326 } 327 #endif 328 329 arm_mm_memblock_reserve(); 330 arm_dt_memblock_reserve(); 331 332 /* reserve any platform specific memblock areas */ 333 if (mdesc->reserve) 334 mdesc->reserve(); 335 336 early_init_fdt_scan_reserved_mem(); 337 338 /* 339 * reserve memory for DMA contigouos allocations, 340 * must come from DMA area inside low memory 341 */ 342 dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); 343 344 arm_memblock_steal_permitted = false; 345 memblock_dump_all(); 346 } 347 348 void __init bootmem_init(void) 349 { 350 unsigned long min, max_low, max_high; 351 352 memblock_allow_resize(); 353 max_low = max_high = 0; 354 355 find_limits(&min, &max_low, &max_high); 356 357 /* 358 * Sparsemem tries to allocate bootmem in memory_present(), 359 * so must be done after the fixed reservations 360 */ 361 arm_memory_present(); 362 363 /* 364 * sparse_init() needs the bootmem allocator up and running. 365 */ 366 sparse_init(); 367 368 /* 369 * Now free the memory - free_area_init_node needs 370 * the sparse mem_map arrays initialized by sparse_init() 371 * for memmap_init_zone(), otherwise all PFNs are invalid. 372 */ 373 zone_sizes_init(min, max_low, max_high); 374 375 /* 376 * This doesn't seem to be used by the Linux memory manager any 377 * more, but is used by ll_rw_block. If we can get rid of it, we 378 * also get rid of some of the stuff above as well. 379 */ 380 min_low_pfn = min; 381 max_low_pfn = max_low; 382 max_pfn = max_high; 383 } 384 385 /* 386 * Poison init memory with an undefined instruction (ARM) or a branch to an 387 * undefined instruction (Thumb). 388 */ 389 static inline void poison_init_mem(void *s, size_t count) 390 { 391 u32 *p = (u32 *)s; 392 for (; count != 0; count -= 4) 393 *p++ = 0xe7fddef0; 394 } 395 396 static inline void 397 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 398 { 399 struct page *start_pg, *end_pg; 400 phys_addr_t pg, pgend; 401 402 /* 403 * Convert start_pfn/end_pfn to a struct page pointer. 404 */ 405 start_pg = pfn_to_page(start_pfn - 1) + 1; 406 end_pg = pfn_to_page(end_pfn - 1) + 1; 407 408 /* 409 * Convert to physical addresses, and 410 * round start upwards and end downwards. 411 */ 412 pg = PAGE_ALIGN(__pa(start_pg)); 413 pgend = __pa(end_pg) & PAGE_MASK; 414 415 /* 416 * If there are free pages between these, 417 * free the section of the memmap array. 418 */ 419 if (pg < pgend) 420 memblock_free_early(pg, pgend - pg); 421 } 422 423 /* 424 * The mem_map array can get very big. Free the unused area of the memory map. 425 */ 426 static void __init free_unused_memmap(struct meminfo *mi) 427 { 428 unsigned long bank_start, prev_bank_end = 0; 429 unsigned int i; 430 431 /* 432 * This relies on each bank being in address order. 433 * The banks are sorted previously in bootmem_init(). 434 */ 435 for_each_bank(i, mi) { 436 struct membank *bank = &mi->bank[i]; 437 438 bank_start = bank_pfn_start(bank); 439 440 #ifdef CONFIG_SPARSEMEM 441 /* 442 * Take care not to free memmap entries that don't exist 443 * due to SPARSEMEM sections which aren't present. 444 */ 445 bank_start = min(bank_start, 446 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 447 #else 448 /* 449 * Align down here since the VM subsystem insists that the 450 * memmap entries are valid from the bank start aligned to 451 * MAX_ORDER_NR_PAGES. 452 */ 453 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); 454 #endif 455 /* 456 * If we had a previous bank, and there is a space 457 * between the current bank and the previous, free it. 458 */ 459 if (prev_bank_end && prev_bank_end < bank_start) 460 free_memmap(prev_bank_end, bank_start); 461 462 /* 463 * Align up here since the VM subsystem insists that the 464 * memmap entries are valid from the bank end aligned to 465 * MAX_ORDER_NR_PAGES. 466 */ 467 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 468 } 469 470 #ifdef CONFIG_SPARSEMEM 471 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 472 free_memmap(prev_bank_end, 473 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 474 #endif 475 } 476 477 #ifdef CONFIG_HIGHMEM 478 static inline void free_area_high(unsigned long pfn, unsigned long end) 479 { 480 for (; pfn < end; pfn++) 481 free_highmem_page(pfn_to_page(pfn)); 482 } 483 #endif 484 485 static void __init free_highpages(void) 486 { 487 #ifdef CONFIG_HIGHMEM 488 unsigned long max_low = max_low_pfn; 489 struct memblock_region *mem, *res; 490 491 /* set highmem page free */ 492 for_each_memblock(memory, mem) { 493 unsigned long start = memblock_region_memory_base_pfn(mem); 494 unsigned long end = memblock_region_memory_end_pfn(mem); 495 496 /* Ignore complete lowmem entries */ 497 if (end <= max_low) 498 continue; 499 500 /* Truncate partial highmem entries */ 501 if (start < max_low) 502 start = max_low; 503 504 /* Find and exclude any reserved regions */ 505 for_each_memblock(reserved, res) { 506 unsigned long res_start, res_end; 507 508 res_start = memblock_region_reserved_base_pfn(res); 509 res_end = memblock_region_reserved_end_pfn(res); 510 511 if (res_end < start) 512 continue; 513 if (res_start < start) 514 res_start = start; 515 if (res_start > end) 516 res_start = end; 517 if (res_end > end) 518 res_end = end; 519 if (res_start != start) 520 free_area_high(start, res_start); 521 start = res_end; 522 if (start == end) 523 break; 524 } 525 526 /* And now free anything which remains */ 527 if (start < end) 528 free_area_high(start, end); 529 } 530 #endif 531 } 532 533 /* 534 * mem_init() marks the free areas in the mem_map and tells us how much 535 * memory is free. This is done after various parts of the system have 536 * claimed their memory after the kernel image. 537 */ 538 void __init mem_init(void) 539 { 540 #ifdef CONFIG_HAVE_TCM 541 /* These pointers are filled in on TCM detection */ 542 extern u32 dtcm_end; 543 extern u32 itcm_end; 544 #endif 545 546 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 547 548 /* this will put all unused low memory onto the freelists */ 549 free_unused_memmap(&meminfo); 550 free_all_bootmem(); 551 552 #ifdef CONFIG_SA1111 553 /* now that our DMA memory is actually so designated, we can free it */ 554 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 555 #endif 556 557 free_highpages(); 558 559 mem_init_print_info(NULL); 560 561 #define MLK(b, t) b, t, ((t) - (b)) >> 10 562 #define MLM(b, t) b, t, ((t) - (b)) >> 20 563 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 564 565 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 566 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 567 #ifdef CONFIG_HAVE_TCM 568 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 569 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 570 #endif 571 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 572 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 573 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 574 #ifdef CONFIG_HIGHMEM 575 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 576 #endif 577 #ifdef CONFIG_MODULES 578 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 579 #endif 580 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 581 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 582 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 583 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 584 585 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 586 (PAGE_SIZE)), 587 #ifdef CONFIG_HAVE_TCM 588 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 589 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 590 #endif 591 MLK(FIXADDR_START, FIXADDR_TOP), 592 MLM(VMALLOC_START, VMALLOC_END), 593 MLM(PAGE_OFFSET, (unsigned long)high_memory), 594 #ifdef CONFIG_HIGHMEM 595 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 596 (PAGE_SIZE)), 597 #endif 598 #ifdef CONFIG_MODULES 599 MLM(MODULES_VADDR, MODULES_END), 600 #endif 601 602 MLK_ROUNDUP(_text, _etext), 603 MLK_ROUNDUP(__init_begin, __init_end), 604 MLK_ROUNDUP(_sdata, _edata), 605 MLK_ROUNDUP(__bss_start, __bss_stop)); 606 607 #undef MLK 608 #undef MLM 609 #undef MLK_ROUNDUP 610 611 /* 612 * Check boundaries twice: Some fundamental inconsistencies can 613 * be detected at build time already. 614 */ 615 #ifdef CONFIG_MMU 616 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 617 BUG_ON(TASK_SIZE > MODULES_VADDR); 618 #endif 619 620 #ifdef CONFIG_HIGHMEM 621 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 622 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 623 #endif 624 625 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 626 extern int sysctl_overcommit_memory; 627 /* 628 * On a machine this small we won't get 629 * anywhere without overcommit, so turn 630 * it on by default. 631 */ 632 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 633 } 634 } 635 636 void free_initmem(void) 637 { 638 #ifdef CONFIG_HAVE_TCM 639 extern char __tcm_start, __tcm_end; 640 641 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 642 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 643 #endif 644 645 poison_init_mem(__init_begin, __init_end - __init_begin); 646 if (!machine_is_integrator() && !machine_is_cintegrator()) 647 free_initmem_default(-1); 648 } 649 650 #ifdef CONFIG_BLK_DEV_INITRD 651 652 static int keep_initrd; 653 654 void free_initrd_mem(unsigned long start, unsigned long end) 655 { 656 if (!keep_initrd) { 657 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 658 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 659 } 660 } 661 662 static int __init keepinitrd_setup(char *__unused) 663 { 664 keep_initrd = 1; 665 return 1; 666 } 667 668 __setup("keepinitrd", keepinitrd_setup); 669 #endif 670