1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/export.h> 17 #include <linux/nodemask.h> 18 #include <linux/initrd.h> 19 #include <linux/of_fdt.h> 20 #include <linux/highmem.h> 21 #include <linux/gfp.h> 22 #include <linux/memblock.h> 23 #include <linux/dma-contiguous.h> 24 #include <linux/sizes.h> 25 #include <linux/stop_machine.h> 26 27 #include <asm/cp15.h> 28 #include <asm/mach-types.h> 29 #include <asm/memblock.h> 30 #include <asm/memory.h> 31 #include <asm/prom.h> 32 #include <asm/sections.h> 33 #include <asm/setup.h> 34 #include <asm/system_info.h> 35 #include <asm/tlb.h> 36 #include <asm/fixmap.h> 37 38 #include <asm/mach/arch.h> 39 #include <asm/mach/map.h> 40 41 #include "mm.h" 42 43 #ifdef CONFIG_CPU_CP15_MMU 44 unsigned long __init __clear_cr(unsigned long mask) 45 { 46 cr_alignment = cr_alignment & ~mask; 47 return cr_alignment; 48 } 49 #endif 50 51 static phys_addr_t phys_initrd_start __initdata = 0; 52 static unsigned long phys_initrd_size __initdata = 0; 53 54 static int __init early_initrd(char *p) 55 { 56 phys_addr_t start; 57 unsigned long size; 58 char *endp; 59 60 start = memparse(p, &endp); 61 if (*endp == ',') { 62 size = memparse(endp + 1, NULL); 63 64 phys_initrd_start = start; 65 phys_initrd_size = size; 66 } 67 return 0; 68 } 69 early_param("initrd", early_initrd); 70 71 static int __init parse_tag_initrd(const struct tag *tag) 72 { 73 pr_warn("ATAG_INITRD is deprecated; " 74 "please update your bootloader.\n"); 75 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 76 phys_initrd_size = tag->u.initrd.size; 77 return 0; 78 } 79 80 __tagtable(ATAG_INITRD, parse_tag_initrd); 81 82 static int __init parse_tag_initrd2(const struct tag *tag) 83 { 84 phys_initrd_start = tag->u.initrd.start; 85 phys_initrd_size = tag->u.initrd.size; 86 return 0; 87 } 88 89 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 90 91 static void __init find_limits(unsigned long *min, unsigned long *max_low, 92 unsigned long *max_high) 93 { 94 *max_low = PFN_DOWN(memblock_get_current_limit()); 95 *min = PFN_UP(memblock_start_of_DRAM()); 96 *max_high = PFN_DOWN(memblock_end_of_DRAM()); 97 } 98 99 #ifdef CONFIG_ZONE_DMA 100 101 phys_addr_t arm_dma_zone_size __read_mostly; 102 EXPORT_SYMBOL(arm_dma_zone_size); 103 104 /* 105 * The DMA mask corresponding to the maximum bus address allocatable 106 * using GFP_DMA. The default here places no restriction on DMA 107 * allocations. This must be the smallest DMA mask in the system, 108 * so a successful GFP_DMA allocation will always satisfy this. 109 */ 110 phys_addr_t arm_dma_limit; 111 unsigned long arm_dma_pfn_limit; 112 113 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 114 unsigned long dma_size) 115 { 116 if (size[0] <= dma_size) 117 return; 118 119 size[ZONE_NORMAL] = size[0] - dma_size; 120 size[ZONE_DMA] = dma_size; 121 hole[ZONE_NORMAL] = hole[0]; 122 hole[ZONE_DMA] = 0; 123 } 124 #endif 125 126 void __init setup_dma_zone(const struct machine_desc *mdesc) 127 { 128 #ifdef CONFIG_ZONE_DMA 129 if (mdesc->dma_zone_size) { 130 arm_dma_zone_size = mdesc->dma_zone_size; 131 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 132 } else 133 arm_dma_limit = 0xffffffff; 134 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 135 #endif 136 } 137 138 static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 139 unsigned long max_high) 140 { 141 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 142 struct memblock_region *reg; 143 144 /* 145 * initialise the zones. 146 */ 147 memset(zone_size, 0, sizeof(zone_size)); 148 149 /* 150 * The memory size has already been determined. If we need 151 * to do anything fancy with the allocation of this memory 152 * to the zones, now is the time to do it. 153 */ 154 zone_size[0] = max_low - min; 155 #ifdef CONFIG_HIGHMEM 156 zone_size[ZONE_HIGHMEM] = max_high - max_low; 157 #endif 158 159 /* 160 * Calculate the size of the holes. 161 * holes = node_size - sum(bank_sizes) 162 */ 163 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 164 for_each_memblock(memory, reg) { 165 unsigned long start = memblock_region_memory_base_pfn(reg); 166 unsigned long end = memblock_region_memory_end_pfn(reg); 167 168 if (start < max_low) { 169 unsigned long low_end = min(end, max_low); 170 zhole_size[0] -= low_end - start; 171 } 172 #ifdef CONFIG_HIGHMEM 173 if (end > max_low) { 174 unsigned long high_start = max(start, max_low); 175 zhole_size[ZONE_HIGHMEM] -= end - high_start; 176 } 177 #endif 178 } 179 180 #ifdef CONFIG_ZONE_DMA 181 /* 182 * Adjust the sizes according to any special requirements for 183 * this machine type. 184 */ 185 if (arm_dma_zone_size) 186 arm_adjust_dma_zone(zone_size, zhole_size, 187 arm_dma_zone_size >> PAGE_SHIFT); 188 #endif 189 190 free_area_init_node(0, zone_size, min, zhole_size); 191 } 192 193 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 194 int pfn_valid(unsigned long pfn) 195 { 196 return memblock_is_map_memory(__pfn_to_phys(pfn)); 197 } 198 EXPORT_SYMBOL(pfn_valid); 199 #endif 200 201 #ifndef CONFIG_SPARSEMEM 202 static void __init arm_memory_present(void) 203 { 204 } 205 #else 206 static void __init arm_memory_present(void) 207 { 208 struct memblock_region *reg; 209 210 for_each_memblock(memory, reg) 211 memory_present(0, memblock_region_memory_base_pfn(reg), 212 memblock_region_memory_end_pfn(reg)); 213 } 214 #endif 215 216 static bool arm_memblock_steal_permitted = true; 217 218 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 219 { 220 phys_addr_t phys; 221 222 BUG_ON(!arm_memblock_steal_permitted); 223 224 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 225 memblock_free(phys, size); 226 memblock_remove(phys, size); 227 228 return phys; 229 } 230 231 static void __init arm_initrd_init(void) 232 { 233 #ifdef CONFIG_BLK_DEV_INITRD 234 phys_addr_t start; 235 unsigned long size; 236 237 /* FDT scan will populate initrd_start */ 238 if (initrd_start && !phys_initrd_size) { 239 phys_initrd_start = __virt_to_phys(initrd_start); 240 phys_initrd_size = initrd_end - initrd_start; 241 } 242 243 initrd_start = initrd_end = 0; 244 245 if (!phys_initrd_size) 246 return; 247 248 /* 249 * Round the memory region to page boundaries as per free_initrd_mem() 250 * This allows us to detect whether the pages overlapping the initrd 251 * are in use, but more importantly, reserves the entire set of pages 252 * as we don't want these pages allocated for other purposes. 253 */ 254 start = round_down(phys_initrd_start, PAGE_SIZE); 255 size = phys_initrd_size + (phys_initrd_start - start); 256 size = round_up(size, PAGE_SIZE); 257 258 if (!memblock_is_region_memory(start, size)) { 259 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 260 (u64)start, size); 261 return; 262 } 263 264 if (memblock_is_region_reserved(start, size)) { 265 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 266 (u64)start, size); 267 return; 268 } 269 270 memblock_reserve(start, size); 271 272 /* Now convert initrd to virtual addresses */ 273 initrd_start = __phys_to_virt(phys_initrd_start); 274 initrd_end = initrd_start + phys_initrd_size; 275 #endif 276 } 277 278 void __init arm_memblock_init(const struct machine_desc *mdesc) 279 { 280 /* Register the kernel text, kernel data and initrd with memblock. */ 281 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 282 283 arm_initrd_init(); 284 285 arm_mm_memblock_reserve(); 286 287 /* reserve any platform specific memblock areas */ 288 if (mdesc->reserve) 289 mdesc->reserve(); 290 291 early_init_fdt_reserve_self(); 292 early_init_fdt_scan_reserved_mem(); 293 294 /* reserve memory for DMA contiguous allocations */ 295 dma_contiguous_reserve(arm_dma_limit); 296 297 arm_memblock_steal_permitted = false; 298 memblock_dump_all(); 299 } 300 301 void __init bootmem_init(void) 302 { 303 unsigned long min, max_low, max_high; 304 305 memblock_allow_resize(); 306 max_low = max_high = 0; 307 308 find_limits(&min, &max_low, &max_high); 309 310 early_memtest((phys_addr_t)min << PAGE_SHIFT, 311 (phys_addr_t)max_low << PAGE_SHIFT); 312 313 /* 314 * Sparsemem tries to allocate bootmem in memory_present(), 315 * so must be done after the fixed reservations 316 */ 317 arm_memory_present(); 318 319 /* 320 * sparse_init() needs the bootmem allocator up and running. 321 */ 322 sparse_init(); 323 324 /* 325 * Now free the memory - free_area_init_node needs 326 * the sparse mem_map arrays initialized by sparse_init() 327 * for memmap_init_zone(), otherwise all PFNs are invalid. 328 */ 329 zone_sizes_init(min, max_low, max_high); 330 331 /* 332 * This doesn't seem to be used by the Linux memory manager any 333 * more, but is used by ll_rw_block. If we can get rid of it, we 334 * also get rid of some of the stuff above as well. 335 */ 336 min_low_pfn = min; 337 max_low_pfn = max_low; 338 max_pfn = max_high; 339 } 340 341 /* 342 * Poison init memory with an undefined instruction (ARM) or a branch to an 343 * undefined instruction (Thumb). 344 */ 345 static inline void poison_init_mem(void *s, size_t count) 346 { 347 u32 *p = (u32 *)s; 348 for (; count != 0; count -= 4) 349 *p++ = 0xe7fddef0; 350 } 351 352 static inline void 353 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 354 { 355 struct page *start_pg, *end_pg; 356 phys_addr_t pg, pgend; 357 358 /* 359 * Convert start_pfn/end_pfn to a struct page pointer. 360 */ 361 start_pg = pfn_to_page(start_pfn - 1) + 1; 362 end_pg = pfn_to_page(end_pfn - 1) + 1; 363 364 /* 365 * Convert to physical addresses, and 366 * round start upwards and end downwards. 367 */ 368 pg = PAGE_ALIGN(__pa(start_pg)); 369 pgend = __pa(end_pg) & PAGE_MASK; 370 371 /* 372 * If there are free pages between these, 373 * free the section of the memmap array. 374 */ 375 if (pg < pgend) 376 memblock_free_early(pg, pgend - pg); 377 } 378 379 /* 380 * The mem_map array can get very big. Free the unused area of the memory map. 381 */ 382 static void __init free_unused_memmap(void) 383 { 384 unsigned long start, prev_end = 0; 385 struct memblock_region *reg; 386 387 /* 388 * This relies on each bank being in address order. 389 * The banks are sorted previously in bootmem_init(). 390 */ 391 for_each_memblock(memory, reg) { 392 start = memblock_region_memory_base_pfn(reg); 393 394 #ifdef CONFIG_SPARSEMEM 395 /* 396 * Take care not to free memmap entries that don't exist 397 * due to SPARSEMEM sections which aren't present. 398 */ 399 start = min(start, 400 ALIGN(prev_end, PAGES_PER_SECTION)); 401 #else 402 /* 403 * Align down here since the VM subsystem insists that the 404 * memmap entries are valid from the bank start aligned to 405 * MAX_ORDER_NR_PAGES. 406 */ 407 start = round_down(start, MAX_ORDER_NR_PAGES); 408 #endif 409 /* 410 * If we had a previous bank, and there is a space 411 * between the current bank and the previous, free it. 412 */ 413 if (prev_end && prev_end < start) 414 free_memmap(prev_end, start); 415 416 /* 417 * Align up here since the VM subsystem insists that the 418 * memmap entries are valid from the bank end aligned to 419 * MAX_ORDER_NR_PAGES. 420 */ 421 prev_end = ALIGN(memblock_region_memory_end_pfn(reg), 422 MAX_ORDER_NR_PAGES); 423 } 424 425 #ifdef CONFIG_SPARSEMEM 426 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 427 free_memmap(prev_end, 428 ALIGN(prev_end, PAGES_PER_SECTION)); 429 #endif 430 } 431 432 #ifdef CONFIG_HIGHMEM 433 static inline void free_area_high(unsigned long pfn, unsigned long end) 434 { 435 for (; pfn < end; pfn++) 436 free_highmem_page(pfn_to_page(pfn)); 437 } 438 #endif 439 440 static void __init free_highpages(void) 441 { 442 #ifdef CONFIG_HIGHMEM 443 unsigned long max_low = max_low_pfn; 444 struct memblock_region *mem, *res; 445 446 /* set highmem page free */ 447 for_each_memblock(memory, mem) { 448 unsigned long start = memblock_region_memory_base_pfn(mem); 449 unsigned long end = memblock_region_memory_end_pfn(mem); 450 451 /* Ignore complete lowmem entries */ 452 if (end <= max_low) 453 continue; 454 455 if (memblock_is_nomap(mem)) 456 continue; 457 458 /* Truncate partial highmem entries */ 459 if (start < max_low) 460 start = max_low; 461 462 /* Find and exclude any reserved regions */ 463 for_each_memblock(reserved, res) { 464 unsigned long res_start, res_end; 465 466 res_start = memblock_region_reserved_base_pfn(res); 467 res_end = memblock_region_reserved_end_pfn(res); 468 469 if (res_end < start) 470 continue; 471 if (res_start < start) 472 res_start = start; 473 if (res_start > end) 474 res_start = end; 475 if (res_end > end) 476 res_end = end; 477 if (res_start != start) 478 free_area_high(start, res_start); 479 start = res_end; 480 if (start == end) 481 break; 482 } 483 484 /* And now free anything which remains */ 485 if (start < end) 486 free_area_high(start, end); 487 } 488 #endif 489 } 490 491 /* 492 * mem_init() marks the free areas in the mem_map and tells us how much 493 * memory is free. This is done after various parts of the system have 494 * claimed their memory after the kernel image. 495 */ 496 void __init mem_init(void) 497 { 498 #ifdef CONFIG_HAVE_TCM 499 /* These pointers are filled in on TCM detection */ 500 extern u32 dtcm_end; 501 extern u32 itcm_end; 502 #endif 503 504 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 505 506 /* this will put all unused low memory onto the freelists */ 507 free_unused_memmap(); 508 free_all_bootmem(); 509 510 #ifdef CONFIG_SA1111 511 /* now that our DMA memory is actually so designated, we can free it */ 512 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 513 #endif 514 515 free_highpages(); 516 517 mem_init_print_info(NULL); 518 519 #define MLK(b, t) b, t, ((t) - (b)) >> 10 520 #define MLM(b, t) b, t, ((t) - (b)) >> 20 521 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 522 523 pr_notice("Virtual kernel memory layout:\n" 524 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 525 #ifdef CONFIG_HAVE_TCM 526 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 527 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 528 #endif 529 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 530 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 531 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 532 #ifdef CONFIG_HIGHMEM 533 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 534 #endif 535 #ifdef CONFIG_MODULES 536 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 537 #endif 538 " .text : 0x%p" " - 0x%p" " (%4td kB)\n" 539 " .init : 0x%p" " - 0x%p" " (%4td kB)\n" 540 " .data : 0x%p" " - 0x%p" " (%4td kB)\n" 541 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", 542 543 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE), 544 #ifdef CONFIG_HAVE_TCM 545 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 546 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 547 #endif 548 MLK(FIXADDR_START, FIXADDR_END), 549 MLM(VMALLOC_START, VMALLOC_END), 550 MLM(PAGE_OFFSET, (unsigned long)high_memory), 551 #ifdef CONFIG_HIGHMEM 552 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 553 (PAGE_SIZE)), 554 #endif 555 #ifdef CONFIG_MODULES 556 MLM(MODULES_VADDR, MODULES_END), 557 #endif 558 559 MLK_ROUNDUP(_text, _etext), 560 MLK_ROUNDUP(__init_begin, __init_end), 561 MLK_ROUNDUP(_sdata, _edata), 562 MLK_ROUNDUP(__bss_start, __bss_stop)); 563 564 #undef MLK 565 #undef MLM 566 #undef MLK_ROUNDUP 567 568 /* 569 * Check boundaries twice: Some fundamental inconsistencies can 570 * be detected at build time already. 571 */ 572 #ifdef CONFIG_MMU 573 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 574 BUG_ON(TASK_SIZE > MODULES_VADDR); 575 #endif 576 577 #ifdef CONFIG_HIGHMEM 578 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 579 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 580 #endif 581 582 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 583 extern int sysctl_overcommit_memory; 584 /* 585 * On a machine this small we won't get 586 * anywhere without overcommit, so turn 587 * it on by default. 588 */ 589 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 590 } 591 } 592 593 #ifdef CONFIG_STRICT_KERNEL_RWX 594 struct section_perm { 595 const char *name; 596 unsigned long start; 597 unsigned long end; 598 pmdval_t mask; 599 pmdval_t prot; 600 pmdval_t clear; 601 }; 602 603 /* First section-aligned location at or after __start_rodata. */ 604 extern char __start_rodata_section_aligned[]; 605 606 static struct section_perm nx_perms[] = { 607 /* Make pages tables, etc before _stext RW (set NX). */ 608 { 609 .name = "pre-text NX", 610 .start = PAGE_OFFSET, 611 .end = (unsigned long)_stext, 612 .mask = ~PMD_SECT_XN, 613 .prot = PMD_SECT_XN, 614 }, 615 /* Make init RW (set NX). */ 616 { 617 .name = "init NX", 618 .start = (unsigned long)__init_begin, 619 .end = (unsigned long)_sdata, 620 .mask = ~PMD_SECT_XN, 621 .prot = PMD_SECT_XN, 622 }, 623 /* Make rodata NX (set RO in ro_perms below). */ 624 { 625 .name = "rodata NX", 626 .start = (unsigned long)__start_rodata_section_aligned, 627 .end = (unsigned long)__init_begin, 628 .mask = ~PMD_SECT_XN, 629 .prot = PMD_SECT_XN, 630 }, 631 }; 632 633 static struct section_perm ro_perms[] = { 634 /* Make kernel code and rodata RX (set RO). */ 635 { 636 .name = "text/rodata RO", 637 .start = (unsigned long)_stext, 638 .end = (unsigned long)__init_begin, 639 #ifdef CONFIG_ARM_LPAE 640 .mask = ~L_PMD_SECT_RDONLY, 641 .prot = L_PMD_SECT_RDONLY, 642 #else 643 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 644 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 645 .clear = PMD_SECT_AP_WRITE, 646 #endif 647 }, 648 }; 649 650 /* 651 * Updates section permissions only for the current mm (sections are 652 * copied into each mm). During startup, this is the init_mm. Is only 653 * safe to be called with preemption disabled, as under stop_machine(). 654 */ 655 static inline void section_update(unsigned long addr, pmdval_t mask, 656 pmdval_t prot, struct mm_struct *mm) 657 { 658 pmd_t *pmd; 659 660 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 661 662 #ifdef CONFIG_ARM_LPAE 663 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 664 #else 665 if (addr & SECTION_SIZE) 666 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 667 else 668 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 669 #endif 670 flush_pmd_entry(pmd); 671 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 672 } 673 674 /* Make sure extended page tables are in use. */ 675 static inline bool arch_has_strict_perms(void) 676 { 677 if (cpu_architecture() < CPU_ARCH_ARMv6) 678 return false; 679 680 return !!(get_cr() & CR_XP); 681 } 682 683 void set_section_perms(struct section_perm *perms, int n, bool set, 684 struct mm_struct *mm) 685 { 686 size_t i; 687 unsigned long addr; 688 689 if (!arch_has_strict_perms()) 690 return; 691 692 for (i = 0; i < n; i++) { 693 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 694 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 695 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 696 perms[i].name, perms[i].start, perms[i].end, 697 SECTION_SIZE); 698 continue; 699 } 700 701 for (addr = perms[i].start; 702 addr < perms[i].end; 703 addr += SECTION_SIZE) 704 section_update(addr, perms[i].mask, 705 set ? perms[i].prot : perms[i].clear, mm); 706 } 707 708 } 709 710 static void update_sections_early(struct section_perm perms[], int n) 711 { 712 struct task_struct *t, *s; 713 714 read_lock(&tasklist_lock); 715 for_each_process(t) { 716 if (t->flags & PF_KTHREAD) 717 continue; 718 for_each_thread(t, s) 719 set_section_perms(perms, n, true, s->mm); 720 } 721 read_unlock(&tasklist_lock); 722 set_section_perms(perms, n, true, current->active_mm); 723 set_section_perms(perms, n, true, &init_mm); 724 } 725 726 int __fix_kernmem_perms(void *unused) 727 { 728 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 729 return 0; 730 } 731 732 void fix_kernmem_perms(void) 733 { 734 stop_machine(__fix_kernmem_perms, NULL, NULL); 735 } 736 737 int __mark_rodata_ro(void *unused) 738 { 739 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 740 return 0; 741 } 742 743 void mark_rodata_ro(void) 744 { 745 stop_machine(__mark_rodata_ro, NULL, NULL); 746 } 747 748 void set_kernel_text_rw(void) 749 { 750 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 751 current->active_mm); 752 } 753 754 void set_kernel_text_ro(void) 755 { 756 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 757 current->active_mm); 758 } 759 760 #else 761 static inline void fix_kernmem_perms(void) { } 762 #endif /* CONFIG_STRICT_KERNEL_RWX */ 763 764 void free_tcmmem(void) 765 { 766 #ifdef CONFIG_HAVE_TCM 767 extern char __tcm_start, __tcm_end; 768 769 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 770 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 771 #endif 772 } 773 774 void free_initmem(void) 775 { 776 fix_kernmem_perms(); 777 free_tcmmem(); 778 779 poison_init_mem(__init_begin, __init_end - __init_begin); 780 if (!machine_is_integrator() && !machine_is_cintegrator()) 781 free_initmem_default(-1); 782 } 783 784 #ifdef CONFIG_BLK_DEV_INITRD 785 786 static int keep_initrd; 787 788 void free_initrd_mem(unsigned long start, unsigned long end) 789 { 790 if (!keep_initrd) { 791 if (start == initrd_start) 792 start = round_down(start, PAGE_SIZE); 793 if (end == initrd_end) 794 end = round_up(end, PAGE_SIZE); 795 796 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 797 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 798 } 799 } 800 801 static int __init keepinitrd_setup(char *__unused) 802 { 803 keep_initrd = 1; 804 return 1; 805 } 806 807 __setup("keepinitrd", keepinitrd_setup); 808 #endif 809