1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/mman.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/task.h> 17 #include <linux/export.h> 18 #include <linux/nodemask.h> 19 #include <linux/initrd.h> 20 #include <linux/of_fdt.h> 21 #include <linux/highmem.h> 22 #include <linux/gfp.h> 23 #include <linux/memblock.h> 24 #include <linux/dma-contiguous.h> 25 #include <linux/sizes.h> 26 #include <linux/stop_machine.h> 27 28 #include <asm/cp15.h> 29 #include <asm/mach-types.h> 30 #include <asm/memblock.h> 31 #include <asm/memory.h> 32 #include <asm/prom.h> 33 #include <asm/sections.h> 34 #include <asm/setup.h> 35 #include <asm/system_info.h> 36 #include <asm/tlb.h> 37 #include <asm/fixmap.h> 38 #include <asm/ptdump.h> 39 40 #include <asm/mach/arch.h> 41 #include <asm/mach/map.h> 42 43 #include "mm.h" 44 45 #ifdef CONFIG_CPU_CP15_MMU 46 unsigned long __init __clear_cr(unsigned long mask) 47 { 48 cr_alignment = cr_alignment & ~mask; 49 return cr_alignment; 50 } 51 #endif 52 53 #ifdef CONFIG_BLK_DEV_INITRD 54 static int __init early_initrd(char *p) 55 { 56 phys_addr_t start; 57 unsigned long size; 58 char *endp; 59 60 start = memparse(p, &endp); 61 if (*endp == ',') { 62 size = memparse(endp + 1, NULL); 63 64 phys_initrd_start = start; 65 phys_initrd_size = size; 66 } 67 return 0; 68 } 69 early_param("initrd", early_initrd); 70 71 static int __init parse_tag_initrd(const struct tag *tag) 72 { 73 pr_warn("ATAG_INITRD is deprecated; " 74 "please update your bootloader.\n"); 75 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 76 phys_initrd_size = tag->u.initrd.size; 77 return 0; 78 } 79 80 __tagtable(ATAG_INITRD, parse_tag_initrd); 81 82 static int __init parse_tag_initrd2(const struct tag *tag) 83 { 84 phys_initrd_start = tag->u.initrd.start; 85 phys_initrd_size = tag->u.initrd.size; 86 return 0; 87 } 88 89 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 90 #endif 91 92 static void __init find_limits(unsigned long *min, unsigned long *max_low, 93 unsigned long *max_high) 94 { 95 *max_low = PFN_DOWN(memblock_get_current_limit()); 96 *min = PFN_UP(memblock_start_of_DRAM()); 97 *max_high = PFN_DOWN(memblock_end_of_DRAM()); 98 } 99 100 #ifdef CONFIG_ZONE_DMA 101 102 phys_addr_t arm_dma_zone_size __read_mostly; 103 EXPORT_SYMBOL(arm_dma_zone_size); 104 105 /* 106 * The DMA mask corresponding to the maximum bus address allocatable 107 * using GFP_DMA. The default here places no restriction on DMA 108 * allocations. This must be the smallest DMA mask in the system, 109 * so a successful GFP_DMA allocation will always satisfy this. 110 */ 111 phys_addr_t arm_dma_limit; 112 unsigned long arm_dma_pfn_limit; 113 114 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 115 unsigned long dma_size) 116 { 117 if (size[0] <= dma_size) 118 return; 119 120 size[ZONE_NORMAL] = size[0] - dma_size; 121 size[ZONE_DMA] = dma_size; 122 hole[ZONE_NORMAL] = hole[0]; 123 hole[ZONE_DMA] = 0; 124 } 125 #endif 126 127 void __init setup_dma_zone(const struct machine_desc *mdesc) 128 { 129 #ifdef CONFIG_ZONE_DMA 130 if (mdesc->dma_zone_size) { 131 arm_dma_zone_size = mdesc->dma_zone_size; 132 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 133 } else 134 arm_dma_limit = 0xffffffff; 135 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 136 #endif 137 } 138 139 static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 140 unsigned long max_high) 141 { 142 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 143 struct memblock_region *reg; 144 145 /* 146 * initialise the zones. 147 */ 148 memset(zone_size, 0, sizeof(zone_size)); 149 150 /* 151 * The memory size has already been determined. If we need 152 * to do anything fancy with the allocation of this memory 153 * to the zones, now is the time to do it. 154 */ 155 zone_size[0] = max_low - min; 156 #ifdef CONFIG_HIGHMEM 157 zone_size[ZONE_HIGHMEM] = max_high - max_low; 158 #endif 159 160 /* 161 * Calculate the size of the holes. 162 * holes = node_size - sum(bank_sizes) 163 */ 164 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 165 for_each_memblock(memory, reg) { 166 unsigned long start = memblock_region_memory_base_pfn(reg); 167 unsigned long end = memblock_region_memory_end_pfn(reg); 168 169 if (start < max_low) { 170 unsigned long low_end = min(end, max_low); 171 zhole_size[0] -= low_end - start; 172 } 173 #ifdef CONFIG_HIGHMEM 174 if (end > max_low) { 175 unsigned long high_start = max(start, max_low); 176 zhole_size[ZONE_HIGHMEM] -= end - high_start; 177 } 178 #endif 179 } 180 181 #ifdef CONFIG_ZONE_DMA 182 /* 183 * Adjust the sizes according to any special requirements for 184 * this machine type. 185 */ 186 if (arm_dma_zone_size) 187 arm_adjust_dma_zone(zone_size, zhole_size, 188 arm_dma_zone_size >> PAGE_SHIFT); 189 #endif 190 191 free_area_init_node(0, zone_size, min, zhole_size); 192 } 193 194 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 195 int pfn_valid(unsigned long pfn) 196 { 197 return memblock_is_map_memory(__pfn_to_phys(pfn)); 198 } 199 EXPORT_SYMBOL(pfn_valid); 200 #endif 201 202 #ifndef CONFIG_SPARSEMEM 203 static void __init arm_memory_present(void) 204 { 205 } 206 #else 207 static void __init arm_memory_present(void) 208 { 209 struct memblock_region *reg; 210 211 for_each_memblock(memory, reg) 212 memory_present(0, memblock_region_memory_base_pfn(reg), 213 memblock_region_memory_end_pfn(reg)); 214 } 215 #endif 216 217 static bool arm_memblock_steal_permitted = true; 218 219 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 220 { 221 phys_addr_t phys; 222 223 BUG_ON(!arm_memblock_steal_permitted); 224 225 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 226 memblock_free(phys, size); 227 memblock_remove(phys, size); 228 229 return phys; 230 } 231 232 static void __init arm_initrd_init(void) 233 { 234 #ifdef CONFIG_BLK_DEV_INITRD 235 phys_addr_t start; 236 unsigned long size; 237 238 /* FDT scan will populate initrd_start */ 239 if (initrd_start && !phys_initrd_size) { 240 phys_initrd_start = __virt_to_phys(initrd_start); 241 phys_initrd_size = initrd_end - initrd_start; 242 } 243 244 initrd_start = initrd_end = 0; 245 246 if (!phys_initrd_size) 247 return; 248 249 /* 250 * Round the memory region to page boundaries as per free_initrd_mem() 251 * This allows us to detect whether the pages overlapping the initrd 252 * are in use, but more importantly, reserves the entire set of pages 253 * as we don't want these pages allocated for other purposes. 254 */ 255 start = round_down(phys_initrd_start, PAGE_SIZE); 256 size = phys_initrd_size + (phys_initrd_start - start); 257 size = round_up(size, PAGE_SIZE); 258 259 if (!memblock_is_region_memory(start, size)) { 260 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 261 (u64)start, size); 262 return; 263 } 264 265 if (memblock_is_region_reserved(start, size)) { 266 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 267 (u64)start, size); 268 return; 269 } 270 271 memblock_reserve(start, size); 272 273 /* Now convert initrd to virtual addresses */ 274 initrd_start = __phys_to_virt(phys_initrd_start); 275 initrd_end = initrd_start + phys_initrd_size; 276 #endif 277 } 278 279 void __init arm_memblock_init(const struct machine_desc *mdesc) 280 { 281 /* Register the kernel text, kernel data and initrd with memblock. */ 282 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 283 284 arm_initrd_init(); 285 286 arm_mm_memblock_reserve(); 287 288 /* reserve any platform specific memblock areas */ 289 if (mdesc->reserve) 290 mdesc->reserve(); 291 292 early_init_fdt_reserve_self(); 293 early_init_fdt_scan_reserved_mem(); 294 295 /* reserve memory for DMA contiguous allocations */ 296 dma_contiguous_reserve(arm_dma_limit); 297 298 arm_memblock_steal_permitted = false; 299 memblock_dump_all(); 300 } 301 302 void __init bootmem_init(void) 303 { 304 unsigned long min, max_low, max_high; 305 306 memblock_allow_resize(); 307 max_low = max_high = 0; 308 309 find_limits(&min, &max_low, &max_high); 310 311 early_memtest((phys_addr_t)min << PAGE_SHIFT, 312 (phys_addr_t)max_low << PAGE_SHIFT); 313 314 /* 315 * Sparsemem tries to allocate bootmem in memory_present(), 316 * so must be done after the fixed reservations 317 */ 318 arm_memory_present(); 319 320 /* 321 * sparse_init() needs the bootmem allocator up and running. 322 */ 323 sparse_init(); 324 325 /* 326 * Now free the memory - free_area_init_node needs 327 * the sparse mem_map arrays initialized by sparse_init() 328 * for memmap_init_zone(), otherwise all PFNs are invalid. 329 */ 330 zone_sizes_init(min, max_low, max_high); 331 332 /* 333 * This doesn't seem to be used by the Linux memory manager any 334 * more, but is used by ll_rw_block. If we can get rid of it, we 335 * also get rid of some of the stuff above as well. 336 */ 337 min_low_pfn = min; 338 max_low_pfn = max_low; 339 max_pfn = max_high; 340 } 341 342 /* 343 * Poison init memory with an undefined instruction (ARM) or a branch to an 344 * undefined instruction (Thumb). 345 */ 346 static inline void poison_init_mem(void *s, size_t count) 347 { 348 u32 *p = (u32 *)s; 349 for (; count != 0; count -= 4) 350 *p++ = 0xe7fddef0; 351 } 352 353 static inline void 354 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 355 { 356 struct page *start_pg, *end_pg; 357 phys_addr_t pg, pgend; 358 359 /* 360 * Convert start_pfn/end_pfn to a struct page pointer. 361 */ 362 start_pg = pfn_to_page(start_pfn - 1) + 1; 363 end_pg = pfn_to_page(end_pfn - 1) + 1; 364 365 /* 366 * Convert to physical addresses, and 367 * round start upwards and end downwards. 368 */ 369 pg = PAGE_ALIGN(__pa(start_pg)); 370 pgend = __pa(end_pg) & PAGE_MASK; 371 372 /* 373 * If there are free pages between these, 374 * free the section of the memmap array. 375 */ 376 if (pg < pgend) 377 memblock_free_early(pg, pgend - pg); 378 } 379 380 /* 381 * The mem_map array can get very big. Free the unused area of the memory map. 382 */ 383 static void __init free_unused_memmap(void) 384 { 385 unsigned long start, prev_end = 0; 386 struct memblock_region *reg; 387 388 /* 389 * This relies on each bank being in address order. 390 * The banks are sorted previously in bootmem_init(). 391 */ 392 for_each_memblock(memory, reg) { 393 start = memblock_region_memory_base_pfn(reg); 394 395 #ifdef CONFIG_SPARSEMEM 396 /* 397 * Take care not to free memmap entries that don't exist 398 * due to SPARSEMEM sections which aren't present. 399 */ 400 start = min(start, 401 ALIGN(prev_end, PAGES_PER_SECTION)); 402 #else 403 /* 404 * Align down here since the VM subsystem insists that the 405 * memmap entries are valid from the bank start aligned to 406 * MAX_ORDER_NR_PAGES. 407 */ 408 start = round_down(start, MAX_ORDER_NR_PAGES); 409 #endif 410 /* 411 * If we had a previous bank, and there is a space 412 * between the current bank and the previous, free it. 413 */ 414 if (prev_end && prev_end < start) 415 free_memmap(prev_end, start); 416 417 /* 418 * Align up here since the VM subsystem insists that the 419 * memmap entries are valid from the bank end aligned to 420 * MAX_ORDER_NR_PAGES. 421 */ 422 prev_end = ALIGN(memblock_region_memory_end_pfn(reg), 423 MAX_ORDER_NR_PAGES); 424 } 425 426 #ifdef CONFIG_SPARSEMEM 427 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 428 free_memmap(prev_end, 429 ALIGN(prev_end, PAGES_PER_SECTION)); 430 #endif 431 } 432 433 #ifdef CONFIG_HIGHMEM 434 static inline void free_area_high(unsigned long pfn, unsigned long end) 435 { 436 for (; pfn < end; pfn++) 437 free_highmem_page(pfn_to_page(pfn)); 438 } 439 #endif 440 441 static void __init free_highpages(void) 442 { 443 #ifdef CONFIG_HIGHMEM 444 unsigned long max_low = max_low_pfn; 445 struct memblock_region *mem, *res; 446 447 /* set highmem page free */ 448 for_each_memblock(memory, mem) { 449 unsigned long start = memblock_region_memory_base_pfn(mem); 450 unsigned long end = memblock_region_memory_end_pfn(mem); 451 452 /* Ignore complete lowmem entries */ 453 if (end <= max_low) 454 continue; 455 456 if (memblock_is_nomap(mem)) 457 continue; 458 459 /* Truncate partial highmem entries */ 460 if (start < max_low) 461 start = max_low; 462 463 /* Find and exclude any reserved regions */ 464 for_each_memblock(reserved, res) { 465 unsigned long res_start, res_end; 466 467 res_start = memblock_region_reserved_base_pfn(res); 468 res_end = memblock_region_reserved_end_pfn(res); 469 470 if (res_end < start) 471 continue; 472 if (res_start < start) 473 res_start = start; 474 if (res_start > end) 475 res_start = end; 476 if (res_end > end) 477 res_end = end; 478 if (res_start != start) 479 free_area_high(start, res_start); 480 start = res_end; 481 if (start == end) 482 break; 483 } 484 485 /* And now free anything which remains */ 486 if (start < end) 487 free_area_high(start, end); 488 } 489 #endif 490 } 491 492 /* 493 * mem_init() marks the free areas in the mem_map and tells us how much 494 * memory is free. This is done after various parts of the system have 495 * claimed their memory after the kernel image. 496 */ 497 void __init mem_init(void) 498 { 499 #ifdef CONFIG_HAVE_TCM 500 /* These pointers are filled in on TCM detection */ 501 extern u32 dtcm_end; 502 extern u32 itcm_end; 503 #endif 504 505 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 506 507 /* this will put all unused low memory onto the freelists */ 508 free_unused_memmap(); 509 memblock_free_all(); 510 511 #ifdef CONFIG_SA1111 512 /* now that our DMA memory is actually so designated, we can free it */ 513 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 514 #endif 515 516 free_highpages(); 517 518 mem_init_print_info(NULL); 519 520 #define MLK(b, t) b, t, ((t) - (b)) >> 10 521 #define MLM(b, t) b, t, ((t) - (b)) >> 20 522 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 523 524 pr_notice("Virtual kernel memory layout:\n" 525 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 526 #ifdef CONFIG_HAVE_TCM 527 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 528 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 529 #endif 530 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 531 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 532 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 533 #ifdef CONFIG_HIGHMEM 534 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 535 #endif 536 #ifdef CONFIG_MODULES 537 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 538 #endif 539 " .text : 0x%p" " - 0x%p" " (%4td kB)\n" 540 " .init : 0x%p" " - 0x%p" " (%4td kB)\n" 541 " .data : 0x%p" " - 0x%p" " (%4td kB)\n" 542 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", 543 544 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE), 545 #ifdef CONFIG_HAVE_TCM 546 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 547 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 548 #endif 549 MLK(FIXADDR_START, FIXADDR_END), 550 MLM(VMALLOC_START, VMALLOC_END), 551 MLM(PAGE_OFFSET, (unsigned long)high_memory), 552 #ifdef CONFIG_HIGHMEM 553 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 554 (PAGE_SIZE)), 555 #endif 556 #ifdef CONFIG_MODULES 557 MLM(MODULES_VADDR, MODULES_END), 558 #endif 559 560 MLK_ROUNDUP(_text, _etext), 561 MLK_ROUNDUP(__init_begin, __init_end), 562 MLK_ROUNDUP(_sdata, _edata), 563 MLK_ROUNDUP(__bss_start, __bss_stop)); 564 565 #undef MLK 566 #undef MLM 567 #undef MLK_ROUNDUP 568 569 /* 570 * Check boundaries twice: Some fundamental inconsistencies can 571 * be detected at build time already. 572 */ 573 #ifdef CONFIG_MMU 574 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 575 BUG_ON(TASK_SIZE > MODULES_VADDR); 576 #endif 577 578 #ifdef CONFIG_HIGHMEM 579 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 580 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 581 #endif 582 } 583 584 #ifdef CONFIG_STRICT_KERNEL_RWX 585 struct section_perm { 586 const char *name; 587 unsigned long start; 588 unsigned long end; 589 pmdval_t mask; 590 pmdval_t prot; 591 pmdval_t clear; 592 }; 593 594 /* First section-aligned location at or after __start_rodata. */ 595 extern char __start_rodata_section_aligned[]; 596 597 static struct section_perm nx_perms[] = { 598 /* Make pages tables, etc before _stext RW (set NX). */ 599 { 600 .name = "pre-text NX", 601 .start = PAGE_OFFSET, 602 .end = (unsigned long)_stext, 603 .mask = ~PMD_SECT_XN, 604 .prot = PMD_SECT_XN, 605 }, 606 /* Make init RW (set NX). */ 607 { 608 .name = "init NX", 609 .start = (unsigned long)__init_begin, 610 .end = (unsigned long)_sdata, 611 .mask = ~PMD_SECT_XN, 612 .prot = PMD_SECT_XN, 613 }, 614 /* Make rodata NX (set RO in ro_perms below). */ 615 { 616 .name = "rodata NX", 617 .start = (unsigned long)__start_rodata_section_aligned, 618 .end = (unsigned long)__init_begin, 619 .mask = ~PMD_SECT_XN, 620 .prot = PMD_SECT_XN, 621 }, 622 }; 623 624 static struct section_perm ro_perms[] = { 625 /* Make kernel code and rodata RX (set RO). */ 626 { 627 .name = "text/rodata RO", 628 .start = (unsigned long)_stext, 629 .end = (unsigned long)__init_begin, 630 #ifdef CONFIG_ARM_LPAE 631 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), 632 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, 633 #else 634 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 635 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 636 .clear = PMD_SECT_AP_WRITE, 637 #endif 638 }, 639 }; 640 641 /* 642 * Updates section permissions only for the current mm (sections are 643 * copied into each mm). During startup, this is the init_mm. Is only 644 * safe to be called with preemption disabled, as under stop_machine(). 645 */ 646 static inline void section_update(unsigned long addr, pmdval_t mask, 647 pmdval_t prot, struct mm_struct *mm) 648 { 649 pmd_t *pmd; 650 651 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 652 653 #ifdef CONFIG_ARM_LPAE 654 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 655 #else 656 if (addr & SECTION_SIZE) 657 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 658 else 659 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 660 #endif 661 flush_pmd_entry(pmd); 662 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 663 } 664 665 /* Make sure extended page tables are in use. */ 666 static inline bool arch_has_strict_perms(void) 667 { 668 if (cpu_architecture() < CPU_ARCH_ARMv6) 669 return false; 670 671 return !!(get_cr() & CR_XP); 672 } 673 674 void set_section_perms(struct section_perm *perms, int n, bool set, 675 struct mm_struct *mm) 676 { 677 size_t i; 678 unsigned long addr; 679 680 if (!arch_has_strict_perms()) 681 return; 682 683 for (i = 0; i < n; i++) { 684 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 685 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 686 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 687 perms[i].name, perms[i].start, perms[i].end, 688 SECTION_SIZE); 689 continue; 690 } 691 692 for (addr = perms[i].start; 693 addr < perms[i].end; 694 addr += SECTION_SIZE) 695 section_update(addr, perms[i].mask, 696 set ? perms[i].prot : perms[i].clear, mm); 697 } 698 699 } 700 701 /** 702 * update_sections_early intended to be called only through stop_machine 703 * framework and executed by only one CPU while all other CPUs will spin and 704 * wait, so no locking is required in this function. 705 */ 706 static void update_sections_early(struct section_perm perms[], int n) 707 { 708 struct task_struct *t, *s; 709 710 for_each_process(t) { 711 if (t->flags & PF_KTHREAD) 712 continue; 713 for_each_thread(t, s) 714 set_section_perms(perms, n, true, s->mm); 715 } 716 set_section_perms(perms, n, true, current->active_mm); 717 set_section_perms(perms, n, true, &init_mm); 718 } 719 720 static int __fix_kernmem_perms(void *unused) 721 { 722 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 723 return 0; 724 } 725 726 static void fix_kernmem_perms(void) 727 { 728 stop_machine(__fix_kernmem_perms, NULL, NULL); 729 } 730 731 static int __mark_rodata_ro(void *unused) 732 { 733 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 734 return 0; 735 } 736 737 static int kernel_set_to_readonly __read_mostly; 738 739 void mark_rodata_ro(void) 740 { 741 kernel_set_to_readonly = 1; 742 stop_machine(__mark_rodata_ro, NULL, NULL); 743 debug_checkwx(); 744 } 745 746 void set_kernel_text_rw(void) 747 { 748 if (!kernel_set_to_readonly) 749 return; 750 751 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 752 current->active_mm); 753 } 754 755 void set_kernel_text_ro(void) 756 { 757 if (!kernel_set_to_readonly) 758 return; 759 760 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 761 current->active_mm); 762 } 763 764 #else 765 static inline void fix_kernmem_perms(void) { } 766 #endif /* CONFIG_STRICT_KERNEL_RWX */ 767 768 void free_initmem(void) 769 { 770 fix_kernmem_perms(); 771 772 poison_init_mem(__init_begin, __init_end - __init_begin); 773 if (!machine_is_integrator() && !machine_is_cintegrator()) 774 free_initmem_default(-1); 775 } 776 777 #ifdef CONFIG_BLK_DEV_INITRD 778 779 static int keep_initrd; 780 781 void free_initrd_mem(unsigned long start, unsigned long end) 782 { 783 if (!keep_initrd) { 784 if (start == initrd_start) 785 start = round_down(start, PAGE_SIZE); 786 if (end == initrd_end) 787 end = round_up(end, PAGE_SIZE); 788 789 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 790 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 791 } 792 } 793 794 static int __init keepinitrd_setup(char *__unused) 795 { 796 keep_initrd = 1; 797 return 1; 798 } 799 800 __setup("keepinitrd", keepinitrd_setup); 801 #endif 802