1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/mman.h> 16 #include <linux/export.h> 17 #include <linux/nodemask.h> 18 #include <linux/initrd.h> 19 #include <linux/of_fdt.h> 20 #include <linux/highmem.h> 21 #include <linux/gfp.h> 22 #include <linux/memblock.h> 23 #include <linux/dma-contiguous.h> 24 #include <linux/sizes.h> 25 #include <linux/stop_machine.h> 26 27 #include <asm/cp15.h> 28 #include <asm/mach-types.h> 29 #include <asm/memblock.h> 30 #include <asm/prom.h> 31 #include <asm/sections.h> 32 #include <asm/setup.h> 33 #include <asm/system_info.h> 34 #include <asm/tlb.h> 35 #include <asm/fixmap.h> 36 37 #include <asm/mach/arch.h> 38 #include <asm/mach/map.h> 39 40 #include "mm.h" 41 42 #ifdef CONFIG_CPU_CP15_MMU 43 unsigned long __init __clear_cr(unsigned long mask) 44 { 45 cr_alignment = cr_alignment & ~mask; 46 return cr_alignment; 47 } 48 #endif 49 50 static phys_addr_t phys_initrd_start __initdata = 0; 51 static unsigned long phys_initrd_size __initdata = 0; 52 53 static int __init early_initrd(char *p) 54 { 55 phys_addr_t start; 56 unsigned long size; 57 char *endp; 58 59 start = memparse(p, &endp); 60 if (*endp == ',') { 61 size = memparse(endp + 1, NULL); 62 63 phys_initrd_start = start; 64 phys_initrd_size = size; 65 } 66 return 0; 67 } 68 early_param("initrd", early_initrd); 69 70 static int __init parse_tag_initrd(const struct tag *tag) 71 { 72 pr_warn("ATAG_INITRD is deprecated; " 73 "please update your bootloader.\n"); 74 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 75 phys_initrd_size = tag->u.initrd.size; 76 return 0; 77 } 78 79 __tagtable(ATAG_INITRD, parse_tag_initrd); 80 81 static int __init parse_tag_initrd2(const struct tag *tag) 82 { 83 phys_initrd_start = tag->u.initrd.start; 84 phys_initrd_size = tag->u.initrd.size; 85 return 0; 86 } 87 88 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 89 90 static void __init find_limits(unsigned long *min, unsigned long *max_low, 91 unsigned long *max_high) 92 { 93 *max_low = PFN_DOWN(memblock_get_current_limit()); 94 *min = PFN_UP(memblock_start_of_DRAM()); 95 *max_high = PFN_DOWN(memblock_end_of_DRAM()); 96 } 97 98 #ifdef CONFIG_ZONE_DMA 99 100 phys_addr_t arm_dma_zone_size __read_mostly; 101 EXPORT_SYMBOL(arm_dma_zone_size); 102 103 /* 104 * The DMA mask corresponding to the maximum bus address allocatable 105 * using GFP_DMA. The default here places no restriction on DMA 106 * allocations. This must be the smallest DMA mask in the system, 107 * so a successful GFP_DMA allocation will always satisfy this. 108 */ 109 phys_addr_t arm_dma_limit; 110 unsigned long arm_dma_pfn_limit; 111 112 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 113 unsigned long dma_size) 114 { 115 if (size[0] <= dma_size) 116 return; 117 118 size[ZONE_NORMAL] = size[0] - dma_size; 119 size[ZONE_DMA] = dma_size; 120 hole[ZONE_NORMAL] = hole[0]; 121 hole[ZONE_DMA] = 0; 122 } 123 #endif 124 125 void __init setup_dma_zone(const struct machine_desc *mdesc) 126 { 127 #ifdef CONFIG_ZONE_DMA 128 if (mdesc->dma_zone_size) { 129 arm_dma_zone_size = mdesc->dma_zone_size; 130 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 131 } else 132 arm_dma_limit = 0xffffffff; 133 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 134 #endif 135 } 136 137 static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 138 unsigned long max_high) 139 { 140 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 141 struct memblock_region *reg; 142 143 /* 144 * initialise the zones. 145 */ 146 memset(zone_size, 0, sizeof(zone_size)); 147 148 /* 149 * The memory size has already been determined. If we need 150 * to do anything fancy with the allocation of this memory 151 * to the zones, now is the time to do it. 152 */ 153 zone_size[0] = max_low - min; 154 #ifdef CONFIG_HIGHMEM 155 zone_size[ZONE_HIGHMEM] = max_high - max_low; 156 #endif 157 158 /* 159 * Calculate the size of the holes. 160 * holes = node_size - sum(bank_sizes) 161 */ 162 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 163 for_each_memblock(memory, reg) { 164 unsigned long start = memblock_region_memory_base_pfn(reg); 165 unsigned long end = memblock_region_memory_end_pfn(reg); 166 167 if (start < max_low) { 168 unsigned long low_end = min(end, max_low); 169 zhole_size[0] -= low_end - start; 170 } 171 #ifdef CONFIG_HIGHMEM 172 if (end > max_low) { 173 unsigned long high_start = max(start, max_low); 174 zhole_size[ZONE_HIGHMEM] -= end - high_start; 175 } 176 #endif 177 } 178 179 #ifdef CONFIG_ZONE_DMA 180 /* 181 * Adjust the sizes according to any special requirements for 182 * this machine type. 183 */ 184 if (arm_dma_zone_size) 185 arm_adjust_dma_zone(zone_size, zhole_size, 186 arm_dma_zone_size >> PAGE_SHIFT); 187 #endif 188 189 free_area_init_node(0, zone_size, min, zhole_size); 190 } 191 192 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 193 int pfn_valid(unsigned long pfn) 194 { 195 return memblock_is_map_memory(__pfn_to_phys(pfn)); 196 } 197 EXPORT_SYMBOL(pfn_valid); 198 #endif 199 200 #ifndef CONFIG_SPARSEMEM 201 static void __init arm_memory_present(void) 202 { 203 } 204 #else 205 static void __init arm_memory_present(void) 206 { 207 struct memblock_region *reg; 208 209 for_each_memblock(memory, reg) 210 memory_present(0, memblock_region_memory_base_pfn(reg), 211 memblock_region_memory_end_pfn(reg)); 212 } 213 #endif 214 215 static bool arm_memblock_steal_permitted = true; 216 217 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 218 { 219 phys_addr_t phys; 220 221 BUG_ON(!arm_memblock_steal_permitted); 222 223 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 224 memblock_free(phys, size); 225 memblock_remove(phys, size); 226 227 return phys; 228 } 229 230 void __init arm_memblock_init(const struct machine_desc *mdesc) 231 { 232 /* Register the kernel text, kernel data and initrd with memblock. */ 233 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 234 235 #ifdef CONFIG_BLK_DEV_INITRD 236 /* FDT scan will populate initrd_start */ 237 if (initrd_start && !phys_initrd_size) { 238 phys_initrd_start = __virt_to_phys(initrd_start); 239 phys_initrd_size = initrd_end - initrd_start; 240 } 241 initrd_start = initrd_end = 0; 242 if (phys_initrd_size && 243 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 244 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 245 (u64)phys_initrd_start, phys_initrd_size); 246 phys_initrd_start = phys_initrd_size = 0; 247 } 248 if (phys_initrd_size && 249 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 250 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 251 (u64)phys_initrd_start, phys_initrd_size); 252 phys_initrd_start = phys_initrd_size = 0; 253 } 254 if (phys_initrd_size) { 255 memblock_reserve(phys_initrd_start, phys_initrd_size); 256 257 /* Now convert initrd to virtual addresses */ 258 initrd_start = __phys_to_virt(phys_initrd_start); 259 initrd_end = initrd_start + phys_initrd_size; 260 } 261 #endif 262 263 arm_mm_memblock_reserve(); 264 265 /* reserve any platform specific memblock areas */ 266 if (mdesc->reserve) 267 mdesc->reserve(); 268 269 early_init_fdt_reserve_self(); 270 early_init_fdt_scan_reserved_mem(); 271 272 /* reserve memory for DMA contiguous allocations */ 273 dma_contiguous_reserve(arm_dma_limit); 274 275 arm_memblock_steal_permitted = false; 276 memblock_dump_all(); 277 } 278 279 void __init bootmem_init(void) 280 { 281 unsigned long min, max_low, max_high; 282 283 memblock_allow_resize(); 284 max_low = max_high = 0; 285 286 find_limits(&min, &max_low, &max_high); 287 288 early_memtest((phys_addr_t)min << PAGE_SHIFT, 289 (phys_addr_t)max_low << PAGE_SHIFT); 290 291 /* 292 * Sparsemem tries to allocate bootmem in memory_present(), 293 * so must be done after the fixed reservations 294 */ 295 arm_memory_present(); 296 297 /* 298 * sparse_init() needs the bootmem allocator up and running. 299 */ 300 sparse_init(); 301 302 /* 303 * Now free the memory - free_area_init_node needs 304 * the sparse mem_map arrays initialized by sparse_init() 305 * for memmap_init_zone(), otherwise all PFNs are invalid. 306 */ 307 zone_sizes_init(min, max_low, max_high); 308 309 /* 310 * This doesn't seem to be used by the Linux memory manager any 311 * more, but is used by ll_rw_block. If we can get rid of it, we 312 * also get rid of some of the stuff above as well. 313 */ 314 min_low_pfn = min; 315 max_low_pfn = max_low; 316 max_pfn = max_high; 317 } 318 319 /* 320 * Poison init memory with an undefined instruction (ARM) or a branch to an 321 * undefined instruction (Thumb). 322 */ 323 static inline void poison_init_mem(void *s, size_t count) 324 { 325 u32 *p = (u32 *)s; 326 for (; count != 0; count -= 4) 327 *p++ = 0xe7fddef0; 328 } 329 330 static inline void 331 free_memmap(unsigned long start_pfn, unsigned long end_pfn) 332 { 333 struct page *start_pg, *end_pg; 334 phys_addr_t pg, pgend; 335 336 /* 337 * Convert start_pfn/end_pfn to a struct page pointer. 338 */ 339 start_pg = pfn_to_page(start_pfn - 1) + 1; 340 end_pg = pfn_to_page(end_pfn - 1) + 1; 341 342 /* 343 * Convert to physical addresses, and 344 * round start upwards and end downwards. 345 */ 346 pg = PAGE_ALIGN(__pa(start_pg)); 347 pgend = __pa(end_pg) & PAGE_MASK; 348 349 /* 350 * If there are free pages between these, 351 * free the section of the memmap array. 352 */ 353 if (pg < pgend) 354 memblock_free_early(pg, pgend - pg); 355 } 356 357 /* 358 * The mem_map array can get very big. Free the unused area of the memory map. 359 */ 360 static void __init free_unused_memmap(void) 361 { 362 unsigned long start, prev_end = 0; 363 struct memblock_region *reg; 364 365 /* 366 * This relies on each bank being in address order. 367 * The banks are sorted previously in bootmem_init(). 368 */ 369 for_each_memblock(memory, reg) { 370 start = memblock_region_memory_base_pfn(reg); 371 372 #ifdef CONFIG_SPARSEMEM 373 /* 374 * Take care not to free memmap entries that don't exist 375 * due to SPARSEMEM sections which aren't present. 376 */ 377 start = min(start, 378 ALIGN(prev_end, PAGES_PER_SECTION)); 379 #else 380 /* 381 * Align down here since the VM subsystem insists that the 382 * memmap entries are valid from the bank start aligned to 383 * MAX_ORDER_NR_PAGES. 384 */ 385 start = round_down(start, MAX_ORDER_NR_PAGES); 386 #endif 387 /* 388 * If we had a previous bank, and there is a space 389 * between the current bank and the previous, free it. 390 */ 391 if (prev_end && prev_end < start) 392 free_memmap(prev_end, start); 393 394 /* 395 * Align up here since the VM subsystem insists that the 396 * memmap entries are valid from the bank end aligned to 397 * MAX_ORDER_NR_PAGES. 398 */ 399 prev_end = ALIGN(memblock_region_memory_end_pfn(reg), 400 MAX_ORDER_NR_PAGES); 401 } 402 403 #ifdef CONFIG_SPARSEMEM 404 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 405 free_memmap(prev_end, 406 ALIGN(prev_end, PAGES_PER_SECTION)); 407 #endif 408 } 409 410 #ifdef CONFIG_HIGHMEM 411 static inline void free_area_high(unsigned long pfn, unsigned long end) 412 { 413 for (; pfn < end; pfn++) 414 free_highmem_page(pfn_to_page(pfn)); 415 } 416 #endif 417 418 static void __init free_highpages(void) 419 { 420 #ifdef CONFIG_HIGHMEM 421 unsigned long max_low = max_low_pfn; 422 struct memblock_region *mem, *res; 423 424 /* set highmem page free */ 425 for_each_memblock(memory, mem) { 426 unsigned long start = memblock_region_memory_base_pfn(mem); 427 unsigned long end = memblock_region_memory_end_pfn(mem); 428 429 /* Ignore complete lowmem entries */ 430 if (end <= max_low) 431 continue; 432 433 if (memblock_is_nomap(mem)) 434 continue; 435 436 /* Truncate partial highmem entries */ 437 if (start < max_low) 438 start = max_low; 439 440 /* Find and exclude any reserved regions */ 441 for_each_memblock(reserved, res) { 442 unsigned long res_start, res_end; 443 444 res_start = memblock_region_reserved_base_pfn(res); 445 res_end = memblock_region_reserved_end_pfn(res); 446 447 if (res_end < start) 448 continue; 449 if (res_start < start) 450 res_start = start; 451 if (res_start > end) 452 res_start = end; 453 if (res_end > end) 454 res_end = end; 455 if (res_start != start) 456 free_area_high(start, res_start); 457 start = res_end; 458 if (start == end) 459 break; 460 } 461 462 /* And now free anything which remains */ 463 if (start < end) 464 free_area_high(start, end); 465 } 466 #endif 467 } 468 469 /* 470 * mem_init() marks the free areas in the mem_map and tells us how much 471 * memory is free. This is done after various parts of the system have 472 * claimed their memory after the kernel image. 473 */ 474 void __init mem_init(void) 475 { 476 #ifdef CONFIG_HAVE_TCM 477 /* These pointers are filled in on TCM detection */ 478 extern u32 dtcm_end; 479 extern u32 itcm_end; 480 #endif 481 482 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 483 484 /* this will put all unused low memory onto the freelists */ 485 free_unused_memmap(); 486 free_all_bootmem(); 487 488 #ifdef CONFIG_SA1111 489 /* now that our DMA memory is actually so designated, we can free it */ 490 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 491 #endif 492 493 free_highpages(); 494 495 mem_init_print_info(NULL); 496 497 #define MLK(b, t) b, t, ((t) - (b)) >> 10 498 #define MLM(b, t) b, t, ((t) - (b)) >> 20 499 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 500 501 pr_notice("Virtual kernel memory layout:\n" 502 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 503 #ifdef CONFIG_HAVE_TCM 504 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 505 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 506 #endif 507 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 508 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 509 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 510 #ifdef CONFIG_HIGHMEM 511 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 512 #endif 513 #ifdef CONFIG_MODULES 514 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 515 #endif 516 " .text : 0x%p" " - 0x%p" " (%4td kB)\n" 517 " .init : 0x%p" " - 0x%p" " (%4td kB)\n" 518 " .data : 0x%p" " - 0x%p" " (%4td kB)\n" 519 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", 520 521 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 522 (PAGE_SIZE)), 523 #ifdef CONFIG_HAVE_TCM 524 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 525 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 526 #endif 527 MLK(FIXADDR_START, FIXADDR_END), 528 MLM(VMALLOC_START, VMALLOC_END), 529 MLM(PAGE_OFFSET, (unsigned long)high_memory), 530 #ifdef CONFIG_HIGHMEM 531 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 532 (PAGE_SIZE)), 533 #endif 534 #ifdef CONFIG_MODULES 535 MLM(MODULES_VADDR, MODULES_END), 536 #endif 537 538 MLK_ROUNDUP(_text, _etext), 539 MLK_ROUNDUP(__init_begin, __init_end), 540 MLK_ROUNDUP(_sdata, _edata), 541 MLK_ROUNDUP(__bss_start, __bss_stop)); 542 543 #undef MLK 544 #undef MLM 545 #undef MLK_ROUNDUP 546 547 /* 548 * Check boundaries twice: Some fundamental inconsistencies can 549 * be detected at build time already. 550 */ 551 #ifdef CONFIG_MMU 552 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 553 BUG_ON(TASK_SIZE > MODULES_VADDR); 554 #endif 555 556 #ifdef CONFIG_HIGHMEM 557 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 558 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 559 #endif 560 561 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 562 extern int sysctl_overcommit_memory; 563 /* 564 * On a machine this small we won't get 565 * anywhere without overcommit, so turn 566 * it on by default. 567 */ 568 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 569 } 570 } 571 572 #ifdef CONFIG_DEBUG_RODATA 573 struct section_perm { 574 const char *name; 575 unsigned long start; 576 unsigned long end; 577 pmdval_t mask; 578 pmdval_t prot; 579 pmdval_t clear; 580 }; 581 582 /* First section-aligned location at or after __start_rodata. */ 583 extern char __start_rodata_section_aligned[]; 584 585 static struct section_perm nx_perms[] = { 586 /* Make pages tables, etc before _stext RW (set NX). */ 587 { 588 .name = "pre-text NX", 589 .start = PAGE_OFFSET, 590 .end = (unsigned long)_stext, 591 .mask = ~PMD_SECT_XN, 592 .prot = PMD_SECT_XN, 593 }, 594 /* Make init RW (set NX). */ 595 { 596 .name = "init NX", 597 .start = (unsigned long)__init_begin, 598 .end = (unsigned long)_sdata, 599 .mask = ~PMD_SECT_XN, 600 .prot = PMD_SECT_XN, 601 }, 602 /* Make rodata NX (set RO in ro_perms below). */ 603 { 604 .name = "rodata NX", 605 .start = (unsigned long)__start_rodata_section_aligned, 606 .end = (unsigned long)__init_begin, 607 .mask = ~PMD_SECT_XN, 608 .prot = PMD_SECT_XN, 609 }, 610 }; 611 612 static struct section_perm ro_perms[] = { 613 /* Make kernel code and rodata RX (set RO). */ 614 { 615 .name = "text/rodata RO", 616 .start = (unsigned long)_stext, 617 .end = (unsigned long)__init_begin, 618 #ifdef CONFIG_ARM_LPAE 619 .mask = ~L_PMD_SECT_RDONLY, 620 .prot = L_PMD_SECT_RDONLY, 621 #else 622 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 623 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 624 .clear = PMD_SECT_AP_WRITE, 625 #endif 626 }, 627 }; 628 629 /* 630 * Updates section permissions only for the current mm (sections are 631 * copied into each mm). During startup, this is the init_mm. Is only 632 * safe to be called with preemption disabled, as under stop_machine(). 633 */ 634 static inline void section_update(unsigned long addr, pmdval_t mask, 635 pmdval_t prot, struct mm_struct *mm) 636 { 637 pmd_t *pmd; 638 639 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 640 641 #ifdef CONFIG_ARM_LPAE 642 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 643 #else 644 if (addr & SECTION_SIZE) 645 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 646 else 647 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 648 #endif 649 flush_pmd_entry(pmd); 650 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 651 } 652 653 /* Make sure extended page tables are in use. */ 654 static inline bool arch_has_strict_perms(void) 655 { 656 if (cpu_architecture() < CPU_ARCH_ARMv6) 657 return false; 658 659 return !!(get_cr() & CR_XP); 660 } 661 662 void set_section_perms(struct section_perm *perms, int n, bool set, 663 struct mm_struct *mm) 664 { 665 size_t i; 666 unsigned long addr; 667 668 if (!arch_has_strict_perms()) 669 return; 670 671 for (i = 0; i < n; i++) { 672 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 673 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 674 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 675 perms[i].name, perms[i].start, perms[i].end, 676 SECTION_SIZE); 677 continue; 678 } 679 680 for (addr = perms[i].start; 681 addr < perms[i].end; 682 addr += SECTION_SIZE) 683 section_update(addr, perms[i].mask, 684 set ? perms[i].prot : perms[i].clear, mm); 685 } 686 687 } 688 689 static void update_sections_early(struct section_perm perms[], int n) 690 { 691 struct task_struct *t, *s; 692 693 read_lock(&tasklist_lock); 694 for_each_process(t) { 695 if (t->flags & PF_KTHREAD) 696 continue; 697 for_each_thread(t, s) 698 set_section_perms(perms, n, true, s->mm); 699 } 700 read_unlock(&tasklist_lock); 701 set_section_perms(perms, n, true, current->active_mm); 702 set_section_perms(perms, n, true, &init_mm); 703 } 704 705 int __fix_kernmem_perms(void *unused) 706 { 707 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 708 return 0; 709 } 710 711 void fix_kernmem_perms(void) 712 { 713 stop_machine(__fix_kernmem_perms, NULL, NULL); 714 } 715 716 int __mark_rodata_ro(void *unused) 717 { 718 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 719 return 0; 720 } 721 722 void mark_rodata_ro(void) 723 { 724 stop_machine(__mark_rodata_ro, NULL, NULL); 725 } 726 727 void set_kernel_text_rw(void) 728 { 729 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 730 current->active_mm); 731 } 732 733 void set_kernel_text_ro(void) 734 { 735 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 736 current->active_mm); 737 } 738 739 #else 740 static inline void fix_kernmem_perms(void) { } 741 #endif /* CONFIG_DEBUG_RODATA */ 742 743 void free_tcmmem(void) 744 { 745 #ifdef CONFIG_HAVE_TCM 746 extern char __tcm_start, __tcm_end; 747 748 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 749 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 750 #endif 751 } 752 753 void free_initmem(void) 754 { 755 fix_kernmem_perms(); 756 free_tcmmem(); 757 758 poison_init_mem(__init_begin, __init_end - __init_begin); 759 if (!machine_is_integrator() && !machine_is_cintegrator()) 760 free_initmem_default(-1); 761 } 762 763 #ifdef CONFIG_BLK_DEV_INITRD 764 765 static int keep_initrd; 766 767 void free_initrd_mem(unsigned long start, unsigned long end) 768 { 769 if (!keep_initrd) { 770 if (start == initrd_start) 771 start = round_down(start, PAGE_SIZE); 772 if (end == initrd_end) 773 end = round_up(end, PAGE_SIZE); 774 775 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 776 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 777 } 778 } 779 780 static int __init keepinitrd_setup(char *__unused) 781 { 782 keep_initrd = 1; 783 return 1; 784 } 785 786 __setup("keepinitrd", keepinitrd_setup); 787 #endif 788