1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/mman.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/task.h> 17 #include <linux/export.h> 18 #include <linux/nodemask.h> 19 #include <linux/initrd.h> 20 #include <linux/of_fdt.h> 21 #include <linux/highmem.h> 22 #include <linux/gfp.h> 23 #include <linux/memblock.h> 24 #include <linux/dma-contiguous.h> 25 #include <linux/sizes.h> 26 #include <linux/stop_machine.h> 27 28 #include <asm/cp15.h> 29 #include <asm/mach-types.h> 30 #include <asm/memblock.h> 31 #include <asm/memory.h> 32 #include <asm/prom.h> 33 #include <asm/sections.h> 34 #include <asm/setup.h> 35 #include <asm/system_info.h> 36 #include <asm/tlb.h> 37 #include <asm/fixmap.h> 38 #include <asm/ptdump.h> 39 40 #include <asm/mach/arch.h> 41 #include <asm/mach/map.h> 42 43 #include "mm.h" 44 45 #ifdef CONFIG_CPU_CP15_MMU 46 unsigned long __init __clear_cr(unsigned long mask) 47 { 48 cr_alignment = cr_alignment & ~mask; 49 return cr_alignment; 50 } 51 #endif 52 53 #ifdef CONFIG_BLK_DEV_INITRD 54 static int __init parse_tag_initrd(const struct tag *tag) 55 { 56 pr_warn("ATAG_INITRD is deprecated; " 57 "please update your bootloader.\n"); 58 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 59 phys_initrd_size = tag->u.initrd.size; 60 return 0; 61 } 62 63 __tagtable(ATAG_INITRD, parse_tag_initrd); 64 65 static int __init parse_tag_initrd2(const struct tag *tag) 66 { 67 phys_initrd_start = tag->u.initrd.start; 68 phys_initrd_size = tag->u.initrd.size; 69 return 0; 70 } 71 72 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 73 #endif 74 75 static void __init find_limits(unsigned long *min, unsigned long *max_low, 76 unsigned long *max_high) 77 { 78 *max_low = PFN_DOWN(memblock_get_current_limit()); 79 *min = PFN_UP(memblock_start_of_DRAM()); 80 *max_high = PFN_DOWN(memblock_end_of_DRAM()); 81 } 82 83 #ifdef CONFIG_ZONE_DMA 84 85 phys_addr_t arm_dma_zone_size __read_mostly; 86 EXPORT_SYMBOL(arm_dma_zone_size); 87 88 /* 89 * The DMA mask corresponding to the maximum bus address allocatable 90 * using GFP_DMA. The default here places no restriction on DMA 91 * allocations. This must be the smallest DMA mask in the system, 92 * so a successful GFP_DMA allocation will always satisfy this. 93 */ 94 phys_addr_t arm_dma_limit; 95 unsigned long arm_dma_pfn_limit; 96 97 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 98 unsigned long dma_size) 99 { 100 if (size[0] <= dma_size) 101 return; 102 103 size[ZONE_NORMAL] = size[0] - dma_size; 104 size[ZONE_DMA] = dma_size; 105 hole[ZONE_NORMAL] = hole[0]; 106 hole[ZONE_DMA] = 0; 107 } 108 #endif 109 110 void __init setup_dma_zone(const struct machine_desc *mdesc) 111 { 112 #ifdef CONFIG_ZONE_DMA 113 if (mdesc->dma_zone_size) { 114 arm_dma_zone_size = mdesc->dma_zone_size; 115 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 116 } else 117 arm_dma_limit = 0xffffffff; 118 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 119 #endif 120 } 121 122 static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 123 unsigned long max_high) 124 { 125 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 126 struct memblock_region *reg; 127 128 /* 129 * initialise the zones. 130 */ 131 memset(zone_size, 0, sizeof(zone_size)); 132 133 /* 134 * The memory size has already been determined. If we need 135 * to do anything fancy with the allocation of this memory 136 * to the zones, now is the time to do it. 137 */ 138 zone_size[0] = max_low - min; 139 #ifdef CONFIG_HIGHMEM 140 zone_size[ZONE_HIGHMEM] = max_high - max_low; 141 #endif 142 143 /* 144 * Calculate the size of the holes. 145 * holes = node_size - sum(bank_sizes) 146 */ 147 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 148 for_each_memblock(memory, reg) { 149 unsigned long start = memblock_region_memory_base_pfn(reg); 150 unsigned long end = memblock_region_memory_end_pfn(reg); 151 152 if (start < max_low) { 153 unsigned long low_end = min(end, max_low); 154 zhole_size[0] -= low_end - start; 155 } 156 #ifdef CONFIG_HIGHMEM 157 if (end > max_low) { 158 unsigned long high_start = max(start, max_low); 159 zhole_size[ZONE_HIGHMEM] -= end - high_start; 160 } 161 #endif 162 } 163 164 #ifdef CONFIG_ZONE_DMA 165 /* 166 * Adjust the sizes according to any special requirements for 167 * this machine type. 168 */ 169 if (arm_dma_zone_size) 170 arm_adjust_dma_zone(zone_size, zhole_size, 171 arm_dma_zone_size >> PAGE_SHIFT); 172 #endif 173 174 free_area_init_node(0, zone_size, min, zhole_size); 175 } 176 177 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 178 int pfn_valid(unsigned long pfn) 179 { 180 return memblock_is_map_memory(__pfn_to_phys(pfn)); 181 } 182 EXPORT_SYMBOL(pfn_valid); 183 #endif 184 185 static bool arm_memblock_steal_permitted = true; 186 187 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 188 { 189 phys_addr_t phys; 190 191 BUG_ON(!arm_memblock_steal_permitted); 192 193 phys = memblock_phys_alloc(size, align); 194 if (!phys) 195 panic("Failed to steal %pa bytes at %pS\n", 196 &size, (void *)_RET_IP_); 197 198 memblock_free(phys, size); 199 memblock_remove(phys, size); 200 201 return phys; 202 } 203 204 static void __init arm_initrd_init(void) 205 { 206 #ifdef CONFIG_BLK_DEV_INITRD 207 phys_addr_t start; 208 unsigned long size; 209 210 initrd_start = initrd_end = 0; 211 212 if (!phys_initrd_size) 213 return; 214 215 /* 216 * Round the memory region to page boundaries as per free_initrd_mem() 217 * This allows us to detect whether the pages overlapping the initrd 218 * are in use, but more importantly, reserves the entire set of pages 219 * as we don't want these pages allocated for other purposes. 220 */ 221 start = round_down(phys_initrd_start, PAGE_SIZE); 222 size = phys_initrd_size + (phys_initrd_start - start); 223 size = round_up(size, PAGE_SIZE); 224 225 if (!memblock_is_region_memory(start, size)) { 226 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 227 (u64)start, size); 228 return; 229 } 230 231 if (memblock_is_region_reserved(start, size)) { 232 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 233 (u64)start, size); 234 return; 235 } 236 237 memblock_reserve(start, size); 238 239 /* Now convert initrd to virtual addresses */ 240 initrd_start = __phys_to_virt(phys_initrd_start); 241 initrd_end = initrd_start + phys_initrd_size; 242 #endif 243 } 244 245 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 246 void check_cpu_icache_size(int cpuid) 247 { 248 u32 size, ctr; 249 250 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); 251 252 size = 1 << ((ctr & 0xf) + 2); 253 if (cpuid != 0 && icache_size != size) 254 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n", 255 cpuid); 256 if (icache_size > size) 257 icache_size = size; 258 } 259 #endif 260 261 void __init arm_memblock_init(const struct machine_desc *mdesc) 262 { 263 /* Register the kernel text, kernel data and initrd with memblock. */ 264 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 265 266 arm_initrd_init(); 267 268 arm_mm_memblock_reserve(); 269 270 /* reserve any platform specific memblock areas */ 271 if (mdesc->reserve) 272 mdesc->reserve(); 273 274 early_init_fdt_reserve_self(); 275 early_init_fdt_scan_reserved_mem(); 276 277 /* reserve memory for DMA contiguous allocations */ 278 dma_contiguous_reserve(arm_dma_limit); 279 280 arm_memblock_steal_permitted = false; 281 memblock_dump_all(); 282 } 283 284 void __init bootmem_init(void) 285 { 286 memblock_allow_resize(); 287 288 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); 289 290 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT, 291 (phys_addr_t)max_low_pfn << PAGE_SHIFT); 292 293 /* 294 * Sparsemem tries to allocate bootmem in memory_present(), 295 * so must be done after the fixed reservations 296 */ 297 memblocks_present(); 298 299 /* 300 * sparse_init() needs the bootmem allocator up and running. 301 */ 302 sparse_init(); 303 304 /* 305 * Now free the memory - free_area_init_node needs 306 * the sparse mem_map arrays initialized by sparse_init() 307 * for memmap_init_zone(), otherwise all PFNs are invalid. 308 */ 309 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn); 310 } 311 312 /* 313 * Poison init memory with an undefined instruction (ARM) or a branch to an 314 * undefined instruction (Thumb). 315 */ 316 static inline void poison_init_mem(void *s, size_t count) 317 { 318 u32 *p = (u32 *)s; 319 for (; count != 0; count -= 4) 320 *p++ = 0xe7fddef0; 321 } 322 323 static inline void 324 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 325 { 326 struct page *start_pg, *end_pg; 327 phys_addr_t pg, pgend; 328 329 /* 330 * Convert start_pfn/end_pfn to a struct page pointer. 331 */ 332 start_pg = pfn_to_page(start_pfn - 1) + 1; 333 end_pg = pfn_to_page(end_pfn - 1) + 1; 334 335 /* 336 * Convert to physical addresses, and 337 * round start upwards and end downwards. 338 */ 339 pg = PAGE_ALIGN(__pa(start_pg)); 340 pgend = __pa(end_pg) & PAGE_MASK; 341 342 /* 343 * If there are free pages between these, 344 * free the section of the memmap array. 345 */ 346 if (pg < pgend) 347 memblock_free_early(pg, pgend - pg); 348 } 349 350 /* 351 * The mem_map array can get very big. Free the unused area of the memory map. 352 */ 353 static void __init free_unused_memmap(void) 354 { 355 unsigned long start, prev_end = 0; 356 struct memblock_region *reg; 357 358 /* 359 * This relies on each bank being in address order. 360 * The banks are sorted previously in bootmem_init(). 361 */ 362 for_each_memblock(memory, reg) { 363 start = memblock_region_memory_base_pfn(reg); 364 365 #ifdef CONFIG_SPARSEMEM 366 /* 367 * Take care not to free memmap entries that don't exist 368 * due to SPARSEMEM sections which aren't present. 369 */ 370 start = min(start, 371 ALIGN(prev_end, PAGES_PER_SECTION)); 372 #else 373 /* 374 * Align down here since the VM subsystem insists that the 375 * memmap entries are valid from the bank start aligned to 376 * MAX_ORDER_NR_PAGES. 377 */ 378 start = round_down(start, MAX_ORDER_NR_PAGES); 379 #endif 380 /* 381 * If we had a previous bank, and there is a space 382 * between the current bank and the previous, free it. 383 */ 384 if (prev_end && prev_end < start) 385 free_memmap(prev_end, start); 386 387 /* 388 * Align up here since the VM subsystem insists that the 389 * memmap entries are valid from the bank end aligned to 390 * MAX_ORDER_NR_PAGES. 391 */ 392 prev_end = ALIGN(memblock_region_memory_end_pfn(reg), 393 MAX_ORDER_NR_PAGES); 394 } 395 396 #ifdef CONFIG_SPARSEMEM 397 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 398 free_memmap(prev_end, 399 ALIGN(prev_end, PAGES_PER_SECTION)); 400 #endif 401 } 402 403 #ifdef CONFIG_HIGHMEM 404 static inline void free_area_high(unsigned long pfn, unsigned long end) 405 { 406 for (; pfn < end; pfn++) 407 free_highmem_page(pfn_to_page(pfn)); 408 } 409 #endif 410 411 static void __init free_highpages(void) 412 { 413 #ifdef CONFIG_HIGHMEM 414 unsigned long max_low = max_low_pfn; 415 struct memblock_region *mem, *res; 416 417 /* set highmem page free */ 418 for_each_memblock(memory, mem) { 419 unsigned long start = memblock_region_memory_base_pfn(mem); 420 unsigned long end = memblock_region_memory_end_pfn(mem); 421 422 /* Ignore complete lowmem entries */ 423 if (end <= max_low) 424 continue; 425 426 if (memblock_is_nomap(mem)) 427 continue; 428 429 /* Truncate partial highmem entries */ 430 if (start < max_low) 431 start = max_low; 432 433 /* Find and exclude any reserved regions */ 434 for_each_memblock(reserved, res) { 435 unsigned long res_start, res_end; 436 437 res_start = memblock_region_reserved_base_pfn(res); 438 res_end = memblock_region_reserved_end_pfn(res); 439 440 if (res_end < start) 441 continue; 442 if (res_start < start) 443 res_start = start; 444 if (res_start > end) 445 res_start = end; 446 if (res_end > end) 447 res_end = end; 448 if (res_start != start) 449 free_area_high(start, res_start); 450 start = res_end; 451 if (start == end) 452 break; 453 } 454 455 /* And now free anything which remains */ 456 if (start < end) 457 free_area_high(start, end); 458 } 459 #endif 460 } 461 462 /* 463 * mem_init() marks the free areas in the mem_map and tells us how much 464 * memory is free. This is done after various parts of the system have 465 * claimed their memory after the kernel image. 466 */ 467 void __init mem_init(void) 468 { 469 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 470 471 /* this will put all unused low memory onto the freelists */ 472 free_unused_memmap(); 473 memblock_free_all(); 474 475 #ifdef CONFIG_SA1111 476 /* now that our DMA memory is actually so designated, we can free it */ 477 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 478 #endif 479 480 free_highpages(); 481 482 mem_init_print_info(NULL); 483 484 /* 485 * Check boundaries twice: Some fundamental inconsistencies can 486 * be detected at build time already. 487 */ 488 #ifdef CONFIG_MMU 489 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 490 BUG_ON(TASK_SIZE > MODULES_VADDR); 491 #endif 492 493 #ifdef CONFIG_HIGHMEM 494 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 495 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 496 #endif 497 } 498 499 #ifdef CONFIG_STRICT_KERNEL_RWX 500 struct section_perm { 501 const char *name; 502 unsigned long start; 503 unsigned long end; 504 pmdval_t mask; 505 pmdval_t prot; 506 pmdval_t clear; 507 }; 508 509 /* First section-aligned location at or after __start_rodata. */ 510 extern char __start_rodata_section_aligned[]; 511 512 static struct section_perm nx_perms[] = { 513 /* Make pages tables, etc before _stext RW (set NX). */ 514 { 515 .name = "pre-text NX", 516 .start = PAGE_OFFSET, 517 .end = (unsigned long)_stext, 518 .mask = ~PMD_SECT_XN, 519 .prot = PMD_SECT_XN, 520 }, 521 /* Make init RW (set NX). */ 522 { 523 .name = "init NX", 524 .start = (unsigned long)__init_begin, 525 .end = (unsigned long)_sdata, 526 .mask = ~PMD_SECT_XN, 527 .prot = PMD_SECT_XN, 528 }, 529 /* Make rodata NX (set RO in ro_perms below). */ 530 { 531 .name = "rodata NX", 532 .start = (unsigned long)__start_rodata_section_aligned, 533 .end = (unsigned long)__init_begin, 534 .mask = ~PMD_SECT_XN, 535 .prot = PMD_SECT_XN, 536 }, 537 }; 538 539 static struct section_perm ro_perms[] = { 540 /* Make kernel code and rodata RX (set RO). */ 541 { 542 .name = "text/rodata RO", 543 .start = (unsigned long)_stext, 544 .end = (unsigned long)__init_begin, 545 #ifdef CONFIG_ARM_LPAE 546 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), 547 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, 548 #else 549 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 550 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 551 .clear = PMD_SECT_AP_WRITE, 552 #endif 553 }, 554 }; 555 556 /* 557 * Updates section permissions only for the current mm (sections are 558 * copied into each mm). During startup, this is the init_mm. Is only 559 * safe to be called with preemption disabled, as under stop_machine(). 560 */ 561 static inline void section_update(unsigned long addr, pmdval_t mask, 562 pmdval_t prot, struct mm_struct *mm) 563 { 564 pmd_t *pmd; 565 566 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 567 568 #ifdef CONFIG_ARM_LPAE 569 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 570 #else 571 if (addr & SECTION_SIZE) 572 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 573 else 574 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 575 #endif 576 flush_pmd_entry(pmd); 577 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 578 } 579 580 /* Make sure extended page tables are in use. */ 581 static inline bool arch_has_strict_perms(void) 582 { 583 if (cpu_architecture() < CPU_ARCH_ARMv6) 584 return false; 585 586 return !!(get_cr() & CR_XP); 587 } 588 589 void set_section_perms(struct section_perm *perms, int n, bool set, 590 struct mm_struct *mm) 591 { 592 size_t i; 593 unsigned long addr; 594 595 if (!arch_has_strict_perms()) 596 return; 597 598 for (i = 0; i < n; i++) { 599 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 600 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 601 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 602 perms[i].name, perms[i].start, perms[i].end, 603 SECTION_SIZE); 604 continue; 605 } 606 607 for (addr = perms[i].start; 608 addr < perms[i].end; 609 addr += SECTION_SIZE) 610 section_update(addr, perms[i].mask, 611 set ? perms[i].prot : perms[i].clear, mm); 612 } 613 614 } 615 616 /** 617 * update_sections_early intended to be called only through stop_machine 618 * framework and executed by only one CPU while all other CPUs will spin and 619 * wait, so no locking is required in this function. 620 */ 621 static void update_sections_early(struct section_perm perms[], int n) 622 { 623 struct task_struct *t, *s; 624 625 for_each_process(t) { 626 if (t->flags & PF_KTHREAD) 627 continue; 628 for_each_thread(t, s) 629 set_section_perms(perms, n, true, s->mm); 630 } 631 set_section_perms(perms, n, true, current->active_mm); 632 set_section_perms(perms, n, true, &init_mm); 633 } 634 635 static int __fix_kernmem_perms(void *unused) 636 { 637 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 638 return 0; 639 } 640 641 static void fix_kernmem_perms(void) 642 { 643 stop_machine(__fix_kernmem_perms, NULL, NULL); 644 } 645 646 static int __mark_rodata_ro(void *unused) 647 { 648 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 649 return 0; 650 } 651 652 static int kernel_set_to_readonly __read_mostly; 653 654 void mark_rodata_ro(void) 655 { 656 kernel_set_to_readonly = 1; 657 stop_machine(__mark_rodata_ro, NULL, NULL); 658 debug_checkwx(); 659 } 660 661 void set_kernel_text_rw(void) 662 { 663 if (!kernel_set_to_readonly) 664 return; 665 666 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 667 current->active_mm); 668 } 669 670 void set_kernel_text_ro(void) 671 { 672 if (!kernel_set_to_readonly) 673 return; 674 675 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 676 current->active_mm); 677 } 678 679 #else 680 static inline void fix_kernmem_perms(void) { } 681 #endif /* CONFIG_STRICT_KERNEL_RWX */ 682 683 void free_initmem(void) 684 { 685 fix_kernmem_perms(); 686 687 poison_init_mem(__init_begin, __init_end - __init_begin); 688 if (!machine_is_integrator() && !machine_is_cintegrator()) 689 free_initmem_default(-1); 690 } 691 692 #ifdef CONFIG_BLK_DEV_INITRD 693 void free_initrd_mem(unsigned long start, unsigned long end) 694 { 695 if (start == initrd_start) 696 start = round_down(start, PAGE_SIZE); 697 if (end == initrd_end) 698 end = round_up(end, PAGE_SIZE); 699 700 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 701 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 702 } 703 #endif 704