1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/mman.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/task.h> 17 #include <linux/export.h> 18 #include <linux/nodemask.h> 19 #include <linux/initrd.h> 20 #include <linux/of_fdt.h> 21 #include <linux/highmem.h> 22 #include <linux/gfp.h> 23 #include <linux/memblock.h> 24 #include <linux/dma-contiguous.h> 25 #include <linux/sizes.h> 26 #include <linux/stop_machine.h> 27 28 #include <asm/cp15.h> 29 #include <asm/mach-types.h> 30 #include <asm/memblock.h> 31 #include <asm/memory.h> 32 #include <asm/prom.h> 33 #include <asm/sections.h> 34 #include <asm/setup.h> 35 #include <asm/system_info.h> 36 #include <asm/tlb.h> 37 #include <asm/fixmap.h> 38 #include <asm/ptdump.h> 39 40 #include <asm/mach/arch.h> 41 #include <asm/mach/map.h> 42 43 #include "mm.h" 44 45 #ifdef CONFIG_CPU_CP15_MMU 46 unsigned long __init __clear_cr(unsigned long mask) 47 { 48 cr_alignment = cr_alignment & ~mask; 49 return cr_alignment; 50 } 51 #endif 52 53 #ifdef CONFIG_BLK_DEV_INITRD 54 static int __init parse_tag_initrd(const struct tag *tag) 55 { 56 pr_warn("ATAG_INITRD is deprecated; " 57 "please update your bootloader.\n"); 58 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 59 phys_initrd_size = tag->u.initrd.size; 60 return 0; 61 } 62 63 __tagtable(ATAG_INITRD, parse_tag_initrd); 64 65 static int __init parse_tag_initrd2(const struct tag *tag) 66 { 67 phys_initrd_start = tag->u.initrd.start; 68 phys_initrd_size = tag->u.initrd.size; 69 return 0; 70 } 71 72 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 73 #endif 74 75 static void __init find_limits(unsigned long *min, unsigned long *max_low, 76 unsigned long *max_high) 77 { 78 *max_low = PFN_DOWN(memblock_get_current_limit()); 79 *min = PFN_UP(memblock_start_of_DRAM()); 80 *max_high = PFN_DOWN(memblock_end_of_DRAM()); 81 } 82 83 #ifdef CONFIG_ZONE_DMA 84 85 phys_addr_t arm_dma_zone_size __read_mostly; 86 EXPORT_SYMBOL(arm_dma_zone_size); 87 88 /* 89 * The DMA mask corresponding to the maximum bus address allocatable 90 * using GFP_DMA. The default here places no restriction on DMA 91 * allocations. This must be the smallest DMA mask in the system, 92 * so a successful GFP_DMA allocation will always satisfy this. 93 */ 94 phys_addr_t arm_dma_limit; 95 unsigned long arm_dma_pfn_limit; 96 97 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 98 unsigned long dma_size) 99 { 100 if (size[0] <= dma_size) 101 return; 102 103 size[ZONE_NORMAL] = size[0] - dma_size; 104 size[ZONE_DMA] = dma_size; 105 hole[ZONE_NORMAL] = hole[0]; 106 hole[ZONE_DMA] = 0; 107 } 108 #endif 109 110 void __init setup_dma_zone(const struct machine_desc *mdesc) 111 { 112 #ifdef CONFIG_ZONE_DMA 113 if (mdesc->dma_zone_size) { 114 arm_dma_zone_size = mdesc->dma_zone_size; 115 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 116 } else 117 arm_dma_limit = 0xffffffff; 118 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 119 #endif 120 } 121 122 static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 123 unsigned long max_high) 124 { 125 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 126 struct memblock_region *reg; 127 128 /* 129 * initialise the zones. 130 */ 131 memset(zone_size, 0, sizeof(zone_size)); 132 133 /* 134 * The memory size has already been determined. If we need 135 * to do anything fancy with the allocation of this memory 136 * to the zones, now is the time to do it. 137 */ 138 zone_size[0] = max_low - min; 139 #ifdef CONFIG_HIGHMEM 140 zone_size[ZONE_HIGHMEM] = max_high - max_low; 141 #endif 142 143 /* 144 * Calculate the size of the holes. 145 * holes = node_size - sum(bank_sizes) 146 */ 147 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 148 for_each_memblock(memory, reg) { 149 unsigned long start = memblock_region_memory_base_pfn(reg); 150 unsigned long end = memblock_region_memory_end_pfn(reg); 151 152 if (start < max_low) { 153 unsigned long low_end = min(end, max_low); 154 zhole_size[0] -= low_end - start; 155 } 156 #ifdef CONFIG_HIGHMEM 157 if (end > max_low) { 158 unsigned long high_start = max(start, max_low); 159 zhole_size[ZONE_HIGHMEM] -= end - high_start; 160 } 161 #endif 162 } 163 164 #ifdef CONFIG_ZONE_DMA 165 /* 166 * Adjust the sizes according to any special requirements for 167 * this machine type. 168 */ 169 if (arm_dma_zone_size) 170 arm_adjust_dma_zone(zone_size, zhole_size, 171 arm_dma_zone_size >> PAGE_SHIFT); 172 #endif 173 174 free_area_init_node(0, zone_size, min, zhole_size); 175 } 176 177 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 178 int pfn_valid(unsigned long pfn) 179 { 180 return memblock_is_map_memory(__pfn_to_phys(pfn)); 181 } 182 EXPORT_SYMBOL(pfn_valid); 183 #endif 184 185 #ifndef CONFIG_SPARSEMEM 186 static void __init arm_memory_present(void) 187 { 188 } 189 #else 190 static void __init arm_memory_present(void) 191 { 192 struct memblock_region *reg; 193 194 for_each_memblock(memory, reg) 195 memory_present(0, memblock_region_memory_base_pfn(reg), 196 memblock_region_memory_end_pfn(reg)); 197 } 198 #endif 199 200 static bool arm_memblock_steal_permitted = true; 201 202 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 203 { 204 phys_addr_t phys; 205 206 BUG_ON(!arm_memblock_steal_permitted); 207 208 phys = memblock_phys_alloc(size, align); 209 if (!phys) 210 panic("Failed to steal %pa bytes at %pS\n", 211 &size, (void *)_RET_IP_); 212 213 memblock_free(phys, size); 214 memblock_remove(phys, size); 215 216 return phys; 217 } 218 219 static void __init arm_initrd_init(void) 220 { 221 #ifdef CONFIG_BLK_DEV_INITRD 222 phys_addr_t start; 223 unsigned long size; 224 225 initrd_start = initrd_end = 0; 226 227 if (!phys_initrd_size) 228 return; 229 230 /* 231 * Round the memory region to page boundaries as per free_initrd_mem() 232 * This allows us to detect whether the pages overlapping the initrd 233 * are in use, but more importantly, reserves the entire set of pages 234 * as we don't want these pages allocated for other purposes. 235 */ 236 start = round_down(phys_initrd_start, PAGE_SIZE); 237 size = phys_initrd_size + (phys_initrd_start - start); 238 size = round_up(size, PAGE_SIZE); 239 240 if (!memblock_is_region_memory(start, size)) { 241 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 242 (u64)start, size); 243 return; 244 } 245 246 if (memblock_is_region_reserved(start, size)) { 247 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 248 (u64)start, size); 249 return; 250 } 251 252 memblock_reserve(start, size); 253 254 /* Now convert initrd to virtual addresses */ 255 initrd_start = __phys_to_virt(phys_initrd_start); 256 initrd_end = initrd_start + phys_initrd_size; 257 #endif 258 } 259 260 void __init arm_memblock_init(const struct machine_desc *mdesc) 261 { 262 /* Register the kernel text, kernel data and initrd with memblock. */ 263 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 264 265 arm_initrd_init(); 266 267 arm_mm_memblock_reserve(); 268 269 /* reserve any platform specific memblock areas */ 270 if (mdesc->reserve) 271 mdesc->reserve(); 272 273 early_init_fdt_reserve_self(); 274 early_init_fdt_scan_reserved_mem(); 275 276 /* reserve memory for DMA contiguous allocations */ 277 dma_contiguous_reserve(arm_dma_limit); 278 279 arm_memblock_steal_permitted = false; 280 memblock_dump_all(); 281 } 282 283 void __init bootmem_init(void) 284 { 285 unsigned long min, max_low, max_high; 286 287 memblock_allow_resize(); 288 max_low = max_high = 0; 289 290 find_limits(&min, &max_low, &max_high); 291 292 early_memtest((phys_addr_t)min << PAGE_SHIFT, 293 (phys_addr_t)max_low << PAGE_SHIFT); 294 295 /* 296 * Sparsemem tries to allocate bootmem in memory_present(), 297 * so must be done after the fixed reservations 298 */ 299 arm_memory_present(); 300 301 /* 302 * sparse_init() needs the bootmem allocator up and running. 303 */ 304 sparse_init(); 305 306 /* 307 * Now free the memory - free_area_init_node needs 308 * the sparse mem_map arrays initialized by sparse_init() 309 * for memmap_init_zone(), otherwise all PFNs are invalid. 310 */ 311 zone_sizes_init(min, max_low, max_high); 312 313 /* 314 * This doesn't seem to be used by the Linux memory manager any 315 * more, but is used by ll_rw_block. If we can get rid of it, we 316 * also get rid of some of the stuff above as well. 317 */ 318 min_low_pfn = min; 319 max_low_pfn = max_low; 320 max_pfn = max_high; 321 } 322 323 /* 324 * Poison init memory with an undefined instruction (ARM) or a branch to an 325 * undefined instruction (Thumb). 326 */ 327 static inline void poison_init_mem(void *s, size_t count) 328 { 329 u32 *p = (u32 *)s; 330 for (; count != 0; count -= 4) 331 *p++ = 0xe7fddef0; 332 } 333 334 static inline void 335 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 336 { 337 struct page *start_pg, *end_pg; 338 phys_addr_t pg, pgend; 339 340 /* 341 * Convert start_pfn/end_pfn to a struct page pointer. 342 */ 343 start_pg = pfn_to_page(start_pfn - 1) + 1; 344 end_pg = pfn_to_page(end_pfn - 1) + 1; 345 346 /* 347 * Convert to physical addresses, and 348 * round start upwards and end downwards. 349 */ 350 pg = PAGE_ALIGN(__pa(start_pg)); 351 pgend = __pa(end_pg) & PAGE_MASK; 352 353 /* 354 * If there are free pages between these, 355 * free the section of the memmap array. 356 */ 357 if (pg < pgend) 358 memblock_free_early(pg, pgend - pg); 359 } 360 361 /* 362 * The mem_map array can get very big. Free the unused area of the memory map. 363 */ 364 static void __init free_unused_memmap(void) 365 { 366 unsigned long start, prev_end = 0; 367 struct memblock_region *reg; 368 369 /* 370 * This relies on each bank being in address order. 371 * The banks are sorted previously in bootmem_init(). 372 */ 373 for_each_memblock(memory, reg) { 374 start = memblock_region_memory_base_pfn(reg); 375 376 #ifdef CONFIG_SPARSEMEM 377 /* 378 * Take care not to free memmap entries that don't exist 379 * due to SPARSEMEM sections which aren't present. 380 */ 381 start = min(start, 382 ALIGN(prev_end, PAGES_PER_SECTION)); 383 #else 384 /* 385 * Align down here since the VM subsystem insists that the 386 * memmap entries are valid from the bank start aligned to 387 * MAX_ORDER_NR_PAGES. 388 */ 389 start = round_down(start, MAX_ORDER_NR_PAGES); 390 #endif 391 /* 392 * If we had a previous bank, and there is a space 393 * between the current bank and the previous, free it. 394 */ 395 if (prev_end && prev_end < start) 396 free_memmap(prev_end, start); 397 398 /* 399 * Align up here since the VM subsystem insists that the 400 * memmap entries are valid from the bank end aligned to 401 * MAX_ORDER_NR_PAGES. 402 */ 403 prev_end = ALIGN(memblock_region_memory_end_pfn(reg), 404 MAX_ORDER_NR_PAGES); 405 } 406 407 #ifdef CONFIG_SPARSEMEM 408 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 409 free_memmap(prev_end, 410 ALIGN(prev_end, PAGES_PER_SECTION)); 411 #endif 412 } 413 414 #ifdef CONFIG_HIGHMEM 415 static inline void free_area_high(unsigned long pfn, unsigned long end) 416 { 417 for (; pfn < end; pfn++) 418 free_highmem_page(pfn_to_page(pfn)); 419 } 420 #endif 421 422 static void __init free_highpages(void) 423 { 424 #ifdef CONFIG_HIGHMEM 425 unsigned long max_low = max_low_pfn; 426 struct memblock_region *mem, *res; 427 428 /* set highmem page free */ 429 for_each_memblock(memory, mem) { 430 unsigned long start = memblock_region_memory_base_pfn(mem); 431 unsigned long end = memblock_region_memory_end_pfn(mem); 432 433 /* Ignore complete lowmem entries */ 434 if (end <= max_low) 435 continue; 436 437 if (memblock_is_nomap(mem)) 438 continue; 439 440 /* Truncate partial highmem entries */ 441 if (start < max_low) 442 start = max_low; 443 444 /* Find and exclude any reserved regions */ 445 for_each_memblock(reserved, res) { 446 unsigned long res_start, res_end; 447 448 res_start = memblock_region_reserved_base_pfn(res); 449 res_end = memblock_region_reserved_end_pfn(res); 450 451 if (res_end < start) 452 continue; 453 if (res_start < start) 454 res_start = start; 455 if (res_start > end) 456 res_start = end; 457 if (res_end > end) 458 res_end = end; 459 if (res_start != start) 460 free_area_high(start, res_start); 461 start = res_end; 462 if (start == end) 463 break; 464 } 465 466 /* And now free anything which remains */ 467 if (start < end) 468 free_area_high(start, end); 469 } 470 #endif 471 } 472 473 /* 474 * mem_init() marks the free areas in the mem_map and tells us how much 475 * memory is free. This is done after various parts of the system have 476 * claimed their memory after the kernel image. 477 */ 478 void __init mem_init(void) 479 { 480 #ifdef CONFIG_HAVE_TCM 481 /* These pointers are filled in on TCM detection */ 482 extern u32 dtcm_end; 483 extern u32 itcm_end; 484 #endif 485 486 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 487 488 /* this will put all unused low memory onto the freelists */ 489 free_unused_memmap(); 490 memblock_free_all(); 491 492 #ifdef CONFIG_SA1111 493 /* now that our DMA memory is actually so designated, we can free it */ 494 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 495 #endif 496 497 free_highpages(); 498 499 mem_init_print_info(NULL); 500 501 #define MLK(b, t) b, t, ((t) - (b)) >> 10 502 #define MLM(b, t) b, t, ((t) - (b)) >> 20 503 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 504 505 pr_notice("Virtual kernel memory layout:\n" 506 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 507 #ifdef CONFIG_HAVE_TCM 508 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 509 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 510 #endif 511 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 512 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 513 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 514 #ifdef CONFIG_HIGHMEM 515 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 516 #endif 517 #ifdef CONFIG_MODULES 518 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 519 #endif 520 " .text : 0x%p" " - 0x%p" " (%4td kB)\n" 521 " .init : 0x%p" " - 0x%p" " (%4td kB)\n" 522 " .data : 0x%p" " - 0x%p" " (%4td kB)\n" 523 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", 524 525 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE), 526 #ifdef CONFIG_HAVE_TCM 527 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 528 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 529 #endif 530 MLK(FIXADDR_START, FIXADDR_END), 531 MLM(VMALLOC_START, VMALLOC_END), 532 MLM(PAGE_OFFSET, (unsigned long)high_memory), 533 #ifdef CONFIG_HIGHMEM 534 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 535 (PAGE_SIZE)), 536 #endif 537 #ifdef CONFIG_MODULES 538 MLM(MODULES_VADDR, MODULES_END), 539 #endif 540 541 MLK_ROUNDUP(_text, _etext), 542 MLK_ROUNDUP(__init_begin, __init_end), 543 MLK_ROUNDUP(_sdata, _edata), 544 MLK_ROUNDUP(__bss_start, __bss_stop)); 545 546 #undef MLK 547 #undef MLM 548 #undef MLK_ROUNDUP 549 550 /* 551 * Check boundaries twice: Some fundamental inconsistencies can 552 * be detected at build time already. 553 */ 554 #ifdef CONFIG_MMU 555 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 556 BUG_ON(TASK_SIZE > MODULES_VADDR); 557 #endif 558 559 #ifdef CONFIG_HIGHMEM 560 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 561 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 562 #endif 563 } 564 565 #ifdef CONFIG_STRICT_KERNEL_RWX 566 struct section_perm { 567 const char *name; 568 unsigned long start; 569 unsigned long end; 570 pmdval_t mask; 571 pmdval_t prot; 572 pmdval_t clear; 573 }; 574 575 /* First section-aligned location at or after __start_rodata. */ 576 extern char __start_rodata_section_aligned[]; 577 578 static struct section_perm nx_perms[] = { 579 /* Make pages tables, etc before _stext RW (set NX). */ 580 { 581 .name = "pre-text NX", 582 .start = PAGE_OFFSET, 583 .end = (unsigned long)_stext, 584 .mask = ~PMD_SECT_XN, 585 .prot = PMD_SECT_XN, 586 }, 587 /* Make init RW (set NX). */ 588 { 589 .name = "init NX", 590 .start = (unsigned long)__init_begin, 591 .end = (unsigned long)_sdata, 592 .mask = ~PMD_SECT_XN, 593 .prot = PMD_SECT_XN, 594 }, 595 /* Make rodata NX (set RO in ro_perms below). */ 596 { 597 .name = "rodata NX", 598 .start = (unsigned long)__start_rodata_section_aligned, 599 .end = (unsigned long)__init_begin, 600 .mask = ~PMD_SECT_XN, 601 .prot = PMD_SECT_XN, 602 }, 603 }; 604 605 static struct section_perm ro_perms[] = { 606 /* Make kernel code and rodata RX (set RO). */ 607 { 608 .name = "text/rodata RO", 609 .start = (unsigned long)_stext, 610 .end = (unsigned long)__init_begin, 611 #ifdef CONFIG_ARM_LPAE 612 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), 613 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, 614 #else 615 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 616 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 617 .clear = PMD_SECT_AP_WRITE, 618 #endif 619 }, 620 }; 621 622 /* 623 * Updates section permissions only for the current mm (sections are 624 * copied into each mm). During startup, this is the init_mm. Is only 625 * safe to be called with preemption disabled, as under stop_machine(). 626 */ 627 static inline void section_update(unsigned long addr, pmdval_t mask, 628 pmdval_t prot, struct mm_struct *mm) 629 { 630 pmd_t *pmd; 631 632 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 633 634 #ifdef CONFIG_ARM_LPAE 635 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 636 #else 637 if (addr & SECTION_SIZE) 638 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 639 else 640 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 641 #endif 642 flush_pmd_entry(pmd); 643 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 644 } 645 646 /* Make sure extended page tables are in use. */ 647 static inline bool arch_has_strict_perms(void) 648 { 649 if (cpu_architecture() < CPU_ARCH_ARMv6) 650 return false; 651 652 return !!(get_cr() & CR_XP); 653 } 654 655 void set_section_perms(struct section_perm *perms, int n, bool set, 656 struct mm_struct *mm) 657 { 658 size_t i; 659 unsigned long addr; 660 661 if (!arch_has_strict_perms()) 662 return; 663 664 for (i = 0; i < n; i++) { 665 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 666 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 667 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 668 perms[i].name, perms[i].start, perms[i].end, 669 SECTION_SIZE); 670 continue; 671 } 672 673 for (addr = perms[i].start; 674 addr < perms[i].end; 675 addr += SECTION_SIZE) 676 section_update(addr, perms[i].mask, 677 set ? perms[i].prot : perms[i].clear, mm); 678 } 679 680 } 681 682 /** 683 * update_sections_early intended to be called only through stop_machine 684 * framework and executed by only one CPU while all other CPUs will spin and 685 * wait, so no locking is required in this function. 686 */ 687 static void update_sections_early(struct section_perm perms[], int n) 688 { 689 struct task_struct *t, *s; 690 691 for_each_process(t) { 692 if (t->flags & PF_KTHREAD) 693 continue; 694 for_each_thread(t, s) 695 set_section_perms(perms, n, true, s->mm); 696 } 697 set_section_perms(perms, n, true, current->active_mm); 698 set_section_perms(perms, n, true, &init_mm); 699 } 700 701 static int __fix_kernmem_perms(void *unused) 702 { 703 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 704 return 0; 705 } 706 707 static void fix_kernmem_perms(void) 708 { 709 stop_machine(__fix_kernmem_perms, NULL, NULL); 710 } 711 712 static int __mark_rodata_ro(void *unused) 713 { 714 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 715 return 0; 716 } 717 718 static int kernel_set_to_readonly __read_mostly; 719 720 void mark_rodata_ro(void) 721 { 722 kernel_set_to_readonly = 1; 723 stop_machine(__mark_rodata_ro, NULL, NULL); 724 debug_checkwx(); 725 } 726 727 void set_kernel_text_rw(void) 728 { 729 if (!kernel_set_to_readonly) 730 return; 731 732 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 733 current->active_mm); 734 } 735 736 void set_kernel_text_ro(void) 737 { 738 if (!kernel_set_to_readonly) 739 return; 740 741 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 742 current->active_mm); 743 } 744 745 #else 746 static inline void fix_kernmem_perms(void) { } 747 #endif /* CONFIG_STRICT_KERNEL_RWX */ 748 749 void free_initmem(void) 750 { 751 fix_kernmem_perms(); 752 753 poison_init_mem(__init_begin, __init_end - __init_begin); 754 if (!machine_is_integrator() && !machine_is_cintegrator()) 755 free_initmem_default(-1); 756 } 757 758 #ifdef CONFIG_BLK_DEV_INITRD 759 760 static int keep_initrd; 761 762 void free_initrd_mem(unsigned long start, unsigned long end) 763 { 764 if (!keep_initrd) { 765 if (start == initrd_start) 766 start = round_down(start, PAGE_SIZE); 767 if (end == initrd_end) 768 end = round_up(end, PAGE_SIZE); 769 770 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 771 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 772 } 773 } 774 775 static int __init keepinitrd_setup(char *__unused) 776 { 777 keep_initrd = 1; 778 return 1; 779 } 780 781 __setup("keepinitrd", keepinitrd_setup); 782 #endif 783