11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/arch/arm/mm/init.c 31da177e4SLinus Torvalds * 490072059SRussell King * Copyright (C) 1995-2005 Russell King 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 71da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 81da177e4SLinus Torvalds * published by the Free Software Foundation. 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds #include <linux/kernel.h> 111da177e4SLinus Torvalds #include <linux/errno.h> 121da177e4SLinus Torvalds #include <linux/swap.h> 131da177e4SLinus Torvalds #include <linux/init.h> 141da177e4SLinus Torvalds #include <linux/mman.h> 153f07c014SIngo Molnar #include <linux/sched/signal.h> 1629930025SIngo Molnar #include <linux/sched/task.h> 17dc28094bSPaul Gortmaker #include <linux/export.h> 181da177e4SLinus Torvalds #include <linux/nodemask.h> 191da177e4SLinus Torvalds #include <linux/initrd.h> 209eb8f674SGrant Likely #include <linux/of_fdt.h> 213835f6cbSNicolas Pitre #include <linux/highmem.h> 225a0e3ad6STejun Heo #include <linux/gfp.h> 232778f620SRussell King #include <linux/memblock.h> 24c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 25158e8bfeSAlessandro Rubini #include <linux/sizes.h> 2608925c2fSLaura Abbott #include <linux/stop_machine.h> 271da177e4SLinus Torvalds 28b4b20ad8SRussell King #include <asm/cp15.h> 291da177e4SLinus Torvalds #include <asm/mach-types.h> 30716a3dc2SRussell King #include <asm/memblock.h> 31d2ca5f24SAfzal Mohammed #include <asm/memory.h> 3293c02ab4SGrant Likely #include <asm/prom.h> 3337efe642SRussell King #include <asm/sections.h> 341da177e4SLinus Torvalds #include <asm/setup.h> 351e6b4811SKees Cook #include <asm/system_info.h> 361da177e4SLinus Torvalds #include <asm/tlb.h> 37db9ef1afSFenkart/Bostandzhyan #include <asm/fixmap.h> 38a8e53c15SJinbum Park #include <asm/ptdump.h> 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds #include <asm/mach/arch.h> 411da177e4SLinus Torvalds #include <asm/mach/map.h> 421da177e4SLinus Torvalds 431b2e2b73SRussell King #include "mm.h" 441b2e2b73SRussell King 45b4b20ad8SRussell King #ifdef CONFIG_CPU_CP15_MMU 46b4b20ad8SRussell King unsigned long __init __clear_cr(unsigned long mask) 47b4b20ad8SRussell King { 48b4b20ad8SRussell King cr_alignment = cr_alignment & ~mask; 49b4b20ad8SRussell King return cr_alignment; 50b4b20ad8SRussell King } 51b4b20ad8SRussell King #endif 52b4b20ad8SRussell King 53b1ab95c6SFlorian Fainelli #ifdef CONFIG_BLK_DEV_INITRD 54012d1f4aSRussell King static int __init parse_tag_initrd(const struct tag *tag) 55012d1f4aSRussell King { 564ed89f22SRussell King pr_warn("ATAG_INITRD is deprecated; " 57012d1f4aSRussell King "please update your bootloader.\n"); 58012d1f4aSRussell King phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 59012d1f4aSRussell King phys_initrd_size = tag->u.initrd.size; 60012d1f4aSRussell King return 0; 61012d1f4aSRussell King } 62012d1f4aSRussell King 63012d1f4aSRussell King __tagtable(ATAG_INITRD, parse_tag_initrd); 64012d1f4aSRussell King 65012d1f4aSRussell King static int __init parse_tag_initrd2(const struct tag *tag) 66012d1f4aSRussell King { 67012d1f4aSRussell King phys_initrd_start = tag->u.initrd.start; 68012d1f4aSRussell King phys_initrd_size = tag->u.initrd.size; 69012d1f4aSRussell King return 0; 70012d1f4aSRussell King } 71012d1f4aSRussell King 72012d1f4aSRussell King __tagtable(ATAG_INITRD2, parse_tag_initrd2); 73b1ab95c6SFlorian Fainelli #endif 741da177e4SLinus Torvalds 75f25b4b4cSRussell King static void __init find_limits(unsigned long *min, unsigned long *max_low, 76f25b4b4cSRussell King unsigned long *max_high) 77dde5828fSRussell King { 781c2f87c2SLaura Abbott *max_low = PFN_DOWN(memblock_get_current_limit()); 791c2f87c2SLaura Abbott *min = PFN_UP(memblock_start_of_DRAM()); 801c2f87c2SLaura Abbott *max_high = PFN_DOWN(memblock_end_of_DRAM()); 81dde5828fSRussell King } 82dde5828fSRussell King 83be20902bSRussell King #ifdef CONFIG_ZONE_DMA 8465032018SNicolas Pitre 85364230b9SRob Herring phys_addr_t arm_dma_zone_size __read_mostly; 8665032018SNicolas Pitre EXPORT_SYMBOL(arm_dma_zone_size); 8765032018SNicolas Pitre 88022ae537SRussell King /* 89022ae537SRussell King * The DMA mask corresponding to the maximum bus address allocatable 90022ae537SRussell King * using GFP_DMA. The default here places no restriction on DMA 91022ae537SRussell King * allocations. This must be the smallest DMA mask in the system, 92022ae537SRussell King * so a successful GFP_DMA allocation will always satisfy this. 93022ae537SRussell King */ 944986e5c7SMarek Szyprowski phys_addr_t arm_dma_limit; 954dcfa600SRussell King unsigned long arm_dma_pfn_limit; 96022ae537SRussell King 97be20902bSRussell King static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 98be20902bSRussell King unsigned long dma_size) 99be20902bSRussell King { 100be20902bSRussell King if (size[0] <= dma_size) 101be20902bSRussell King return; 102be20902bSRussell King 103be20902bSRussell King size[ZONE_NORMAL] = size[0] - dma_size; 104be20902bSRussell King size[ZONE_DMA] = dma_size; 105be20902bSRussell King hole[ZONE_NORMAL] = hole[0]; 106be20902bSRussell King hole[ZONE_DMA] = 0; 107be20902bSRussell King } 108be20902bSRussell King #endif 109be20902bSRussell King 110ff69a4c8SRussell King void __init setup_dma_zone(const struct machine_desc *mdesc) 111c7909509SMarek Szyprowski { 112c7909509SMarek Szyprowski #ifdef CONFIG_ZONE_DMA 113c7909509SMarek Szyprowski if (mdesc->dma_zone_size) { 114c7909509SMarek Szyprowski arm_dma_zone_size = mdesc->dma_zone_size; 1156bcac805SRussell King arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 116c7909509SMarek Szyprowski } else 117c7909509SMarek Szyprowski arm_dma_limit = 0xffffffff; 1184dcfa600SRussell King arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 119c7909509SMarek Szyprowski #endif 120c7909509SMarek Szyprowski } 121c7909509SMarek Szyprowski 12284f452b1SSantosh Shilimkar static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 123a2c54d2aSRussell King unsigned long max_high) 124b7a69ac3SRussell King { 125b7a69ac3SRussell King unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 126a2c54d2aSRussell King struct memblock_region *reg; 127b7a69ac3SRussell King 12890072059SRussell King /* 129be370302SRussell King * initialise the zones. 13090072059SRussell King */ 13190072059SRussell King memset(zone_size, 0, sizeof(zone_size)); 13290072059SRussell King 13390072059SRussell King /* 134be370302SRussell King * The memory size has already been determined. If we need 135be370302SRussell King * to do anything fancy with the allocation of this memory 136be370302SRussell King * to the zones, now is the time to do it. 13790072059SRussell King */ 138dde5828fSRussell King zone_size[0] = max_low - min; 139dde5828fSRussell King #ifdef CONFIG_HIGHMEM 140dde5828fSRussell King zone_size[ZONE_HIGHMEM] = max_high - max_low; 141dde5828fSRussell King #endif 14290072059SRussell King 14390072059SRussell King /* 144be370302SRussell King * Calculate the size of the holes. 145be370302SRussell King * holes = node_size - sum(bank_sizes) 14690072059SRussell King */ 147dde5828fSRussell King memcpy(zhole_size, zone_size, sizeof(zhole_size)); 148a2c54d2aSRussell King for_each_memblock(memory, reg) { 149a2c54d2aSRussell King unsigned long start = memblock_region_memory_base_pfn(reg); 150a2c54d2aSRussell King unsigned long end = memblock_region_memory_end_pfn(reg); 151a2c54d2aSRussell King 152a2c54d2aSRussell King if (start < max_low) { 153a2c54d2aSRussell King unsigned long low_end = min(end, max_low); 154a2c54d2aSRussell King zhole_size[0] -= low_end - start; 155a2c54d2aSRussell King } 156dde5828fSRussell King #ifdef CONFIG_HIGHMEM 157a2c54d2aSRussell King if (end > max_low) { 158a2c54d2aSRussell King unsigned long high_start = max(start, max_low); 159a2c54d2aSRussell King zhole_size[ZONE_HIGHMEM] -= end - high_start; 160a2c54d2aSRussell King } 161dde5828fSRussell King #endif 162dde5828fSRussell King } 16390072059SRussell King 16465032018SNicolas Pitre #ifdef CONFIG_ZONE_DMA 16590072059SRussell King /* 16690072059SRussell King * Adjust the sizes according to any special requirements for 16790072059SRussell King * this machine type. 16890072059SRussell King */ 169c7909509SMarek Szyprowski if (arm_dma_zone_size) 170be20902bSRussell King arm_adjust_dma_zone(zone_size, zhole_size, 17165032018SNicolas Pitre arm_dma_zone_size >> PAGE_SHIFT); 172be20902bSRussell King #endif 17390072059SRussell King 174be370302SRussell King free_area_init_node(0, zone_size, min, zhole_size); 17590072059SRussell King } 17690072059SRussell King 1777b7bf499SWill Deacon #ifdef CONFIG_HAVE_ARCH_PFN_VALID 178b7cfda9fSRussell King int pfn_valid(unsigned long pfn) 179b7cfda9fSRussell King { 18009414d00SArd Biesheuvel return memblock_is_map_memory(__pfn_to_phys(pfn)); 181b7cfda9fSRussell King } 182b7cfda9fSRussell King EXPORT_SYMBOL(pfn_valid); 1837b7bf499SWill Deacon #endif 184657e12fdSRussell King 185716a3dc2SRussell King static bool arm_memblock_steal_permitted = true; 186716a3dc2SRussell King 187bc2827d0SRussell King phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 188716a3dc2SRussell King { 189716a3dc2SRussell King phys_addr_t phys; 190716a3dc2SRussell King 191716a3dc2SRussell King BUG_ON(!arm_memblock_steal_permitted); 192716a3dc2SRussell King 193f240ec09SMike Rapoport phys = memblock_phys_alloc(size, align); 194ecc3e771SMike Rapoport if (!phys) 195ecc3e771SMike Rapoport panic("Failed to steal %pa bytes at %pS\n", 196ecc3e771SMike Rapoport &size, (void *)_RET_IP_); 197ecc3e771SMike Rapoport 198716a3dc2SRussell King memblock_free(phys, size); 199716a3dc2SRussell King memblock_remove(phys, size); 200716a3dc2SRussell King 201716a3dc2SRussell King return phys; 202716a3dc2SRussell King } 203716a3dc2SRussell King 20439286248SRussell King static void __init arm_initrd_init(void) 2052778f620SRussell King { 2062778f620SRussell King #ifdef CONFIG_BLK_DEV_INITRD 207cdcc5fa0SRussell King phys_addr_t start; 208cdcc5fa0SRussell King unsigned long size; 209cdcc5fa0SRussell King 2104c235cb9SBen Peddell initrd_start = initrd_end = 0; 21168b32f36SRussell King 21268b32f36SRussell King if (!phys_initrd_size) 21368b32f36SRussell King return; 21468b32f36SRussell King 215cdcc5fa0SRussell King /* 216cdcc5fa0SRussell King * Round the memory region to page boundaries as per free_initrd_mem() 217cdcc5fa0SRussell King * This allows us to detect whether the pages overlapping the initrd 218cdcc5fa0SRussell King * are in use, but more importantly, reserves the entire set of pages 219cdcc5fa0SRussell King * as we don't want these pages allocated for other purposes. 220cdcc5fa0SRussell King */ 221cdcc5fa0SRussell King start = round_down(phys_initrd_start, PAGE_SIZE); 222cdcc5fa0SRussell King size = phys_initrd_size + (phys_initrd_start - start); 223cdcc5fa0SRussell King size = round_up(size, PAGE_SIZE); 224cdcc5fa0SRussell King 225cdcc5fa0SRussell King if (!memblock_is_region_memory(start, size)) { 226de22cc6eSVitaly Andrianov pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 227cdcc5fa0SRussell King (u64)start, size); 22868b32f36SRussell King return; 2298f4b8c76SRussell King } 23068b32f36SRussell King 231cdcc5fa0SRussell King if (memblock_is_region_reserved(start, size)) { 232de22cc6eSVitaly Andrianov pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 233cdcc5fa0SRussell King (u64)start, size); 23468b32f36SRussell King return; 235b0a2679dSRussell King } 23668b32f36SRussell King 237cdcc5fa0SRussell King memblock_reserve(start, size); 2382778f620SRussell King 2392778f620SRussell King /* Now convert initrd to virtual addresses */ 2402778f620SRussell King initrd_start = __phys_to_virt(phys_initrd_start); 2412778f620SRussell King initrd_end = initrd_start + phys_initrd_size; 2422778f620SRussell King #endif 24339286248SRussell King } 24439286248SRussell King 245*5f41f919SMarek Szyprowski #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 246*5f41f919SMarek Szyprowski void check_cpu_icache_size(int cpuid) 247*5f41f919SMarek Szyprowski { 248*5f41f919SMarek Szyprowski u32 size, ctr; 249*5f41f919SMarek Szyprowski 250*5f41f919SMarek Szyprowski asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); 251*5f41f919SMarek Szyprowski 252*5f41f919SMarek Szyprowski size = 1 << ((ctr & 0xf) + 2); 253*5f41f919SMarek Szyprowski if (cpuid != 0 && icache_size != size) 254*5f41f919SMarek Szyprowski pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n", 255*5f41f919SMarek Szyprowski cpuid); 256*5f41f919SMarek Szyprowski if (icache_size > size) 257*5f41f919SMarek Szyprowski icache_size = size; 258*5f41f919SMarek Szyprowski } 259*5f41f919SMarek Szyprowski #endif 260*5f41f919SMarek Szyprowski 26139286248SRussell King void __init arm_memblock_init(const struct machine_desc *mdesc) 26239286248SRussell King { 26339286248SRussell King /* Register the kernel text, kernel data and initrd with memblock. */ 26439286248SRussell King memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 26539286248SRussell King 26639286248SRussell King arm_initrd_init(); 2672778f620SRussell King 2682778f620SRussell King arm_mm_memblock_reserve(); 2692778f620SRussell King 2708d717a52SRussell King /* reserve any platform specific memblock areas */ 2718d717a52SRussell King if (mdesc->reserve) 2728d717a52SRussell King mdesc->reserve(); 2738d717a52SRussell King 27424bbd929SArd Biesheuvel early_init_fdt_reserve_self(); 275bcedb5f9SMarek Szyprowski early_init_fdt_scan_reserved_mem(); 276bcedb5f9SMarek Szyprowski 27799a468d7SGeorge G. Davis /* reserve memory for DMA contiguous allocations */ 27895b0e655SMarek Szyprowski dma_contiguous_reserve(arm_dma_limit); 279c7909509SMarek Szyprowski 280716a3dc2SRussell King arm_memblock_steal_permitted = false; 2812778f620SRussell King memblock_dump_all(); 2822778f620SRussell King } 2832778f620SRussell King 2848d717a52SRussell King void __init bootmem_init(void) 28590072059SRussell King { 2868e58caefSGrygorii Strashko memblock_allow_resize(); 287dde5828fSRussell King 288071d184aSDoug Berger find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); 289dde5828fSRussell King 290071d184aSDoug Berger early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT, 291071d184aSDoug Berger (phys_addr_t)max_low_pfn << PAGE_SHIFT); 292d30eae47SVladimir Murzin 293b7a69ac3SRussell King /* 294657e12fdSRussell King * Sparsemem tries to allocate bootmem in memory_present(), 295657e12fdSRussell King * so must be done after the fixed reservations 296657e12fdSRussell King */ 29714b5f54bSPeng Fan memblocks_present(); 29890072059SRussell King 299b7a69ac3SRussell King /* 300b7a69ac3SRussell King * sparse_init() needs the bootmem allocator up and running. 301b7a69ac3SRussell King */ 302b7a69ac3SRussell King sparse_init(); 303b7a69ac3SRussell King 304b7a69ac3SRussell King /* 305be370302SRussell King * Now free the memory - free_area_init_node needs 306b7a69ac3SRussell King * the sparse mem_map arrays initialized by sparse_init() 307b7a69ac3SRussell King * for memmap_init_zone(), otherwise all PFNs are invalid. 308b7a69ac3SRussell King */ 309071d184aSDoug Berger zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn); 31090072059SRussell King } 31190072059SRussell King 31254d52573SStephen Boyd /* 31354d52573SStephen Boyd * Poison init memory with an undefined instruction (ARM) or a branch to an 31454d52573SStephen Boyd * undefined instruction (Thumb). 31554d52573SStephen Boyd */ 31654d52573SStephen Boyd static inline void poison_init_mem(void *s, size_t count) 31754d52573SStephen Boyd { 31854d52573SStephen Boyd u32 *p = (u32 *)s; 319bf912d99SJamie Iles for (; count != 0; count -= 4) 32054d52573SStephen Boyd *p++ = 0xe7fddef0; 32154d52573SStephen Boyd } 32254d52573SStephen Boyd 323a013053dSRussell King static inline void 324be370302SRussell King free_memmap(unsigned long start_pfn, unsigned long end_pfn) 325a013053dSRussell King { 326a013053dSRussell King struct page *start_pg, *end_pg; 32756bc6286SVitaly Andrianov phys_addr_t pg, pgend; 328a013053dSRussell King 329a013053dSRussell King /* 330a013053dSRussell King * Convert start_pfn/end_pfn to a struct page pointer. 331a013053dSRussell King */ 3323257f43dSCatalin Marinas start_pg = pfn_to_page(start_pfn - 1) + 1; 3339af386c8SWill Deacon end_pg = pfn_to_page(end_pfn - 1) + 1; 334a013053dSRussell King 335a013053dSRussell King /* 336a013053dSRussell King * Convert to physical addresses, and 337a013053dSRussell King * round start upwards and end downwards. 338a013053dSRussell King */ 33956bc6286SVitaly Andrianov pg = PAGE_ALIGN(__pa(start_pg)); 34056bc6286SVitaly Andrianov pgend = __pa(end_pg) & PAGE_MASK; 341a013053dSRussell King 342a013053dSRussell King /* 343a013053dSRussell King * If there are free pages between these, 344a013053dSRussell King * free the section of the memmap array. 345a013053dSRussell King */ 346a013053dSRussell King if (pg < pgend) 347cfb66586SSantosh Shilimkar memblock_free_early(pg, pgend - pg); 348a013053dSRussell King } 349a013053dSRussell King 350a013053dSRussell King /* 351a013053dSRussell King * The mem_map array can get very big. Free the unused area of the memory map. 352a013053dSRussell King */ 3531c2f87c2SLaura Abbott static void __init free_unused_memmap(void) 354a013053dSRussell King { 3551c2f87c2SLaura Abbott unsigned long start, prev_end = 0; 3561c2f87c2SLaura Abbott struct memblock_region *reg; 357a013053dSRussell King 358a013053dSRussell King /* 3593260e529SMichael Bohan * This relies on each bank being in address order. 3603260e529SMichael Bohan * The banks are sorted previously in bootmem_init(). 361a013053dSRussell King */ 3621c2f87c2SLaura Abbott for_each_memblock(memory, reg) { 3631c2f87c2SLaura Abbott start = memblock_region_memory_base_pfn(reg); 364a013053dSRussell King 3659af386c8SWill Deacon #ifdef CONFIG_SPARSEMEM 3669af386c8SWill Deacon /* 3679af386c8SWill Deacon * Take care not to free memmap entries that don't exist 3689af386c8SWill Deacon * due to SPARSEMEM sections which aren't present. 3699af386c8SWill Deacon */ 3701c2f87c2SLaura Abbott start = min(start, 3711c2f87c2SLaura Abbott ALIGN(prev_end, PAGES_PER_SECTION)); 372002ea9eeSLinus Walleij #else 373002ea9eeSLinus Walleij /* 374002ea9eeSLinus Walleij * Align down here since the VM subsystem insists that the 375002ea9eeSLinus Walleij * memmap entries are valid from the bank start aligned to 376002ea9eeSLinus Walleij * MAX_ORDER_NR_PAGES. 377002ea9eeSLinus Walleij */ 3781c2f87c2SLaura Abbott start = round_down(start, MAX_ORDER_NR_PAGES); 3799af386c8SWill Deacon #endif 380a013053dSRussell King /* 381a013053dSRussell King * If we had a previous bank, and there is a space 382a013053dSRussell King * between the current bank and the previous, free it. 383a013053dSRussell King */ 3841c2f87c2SLaura Abbott if (prev_end && prev_end < start) 3851c2f87c2SLaura Abbott free_memmap(prev_end, start); 386a013053dSRussell King 3873260e529SMichael Bohan /* 3883260e529SMichael Bohan * Align up here since the VM subsystem insists that the 3893260e529SMichael Bohan * memmap entries are valid from the bank end aligned to 3903260e529SMichael Bohan * MAX_ORDER_NR_PAGES. 3913260e529SMichael Bohan */ 3921c2f87c2SLaura Abbott prev_end = ALIGN(memblock_region_memory_end_pfn(reg), 3931c2f87c2SLaura Abbott MAX_ORDER_NR_PAGES); 394a013053dSRussell King } 3959af386c8SWill Deacon 3969af386c8SWill Deacon #ifdef CONFIG_SPARSEMEM 3971c2f87c2SLaura Abbott if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 3981c2f87c2SLaura Abbott free_memmap(prev_end, 3991c2f87c2SLaura Abbott ALIGN(prev_end, PAGES_PER_SECTION)); 4009af386c8SWill Deacon #endif 401a013053dSRussell King } 402a013053dSRussell King 40383db0384SJiang Liu #ifdef CONFIG_HIGHMEM 40483db0384SJiang Liu static inline void free_area_high(unsigned long pfn, unsigned long end) 40583db0384SJiang Liu { 406dd6911efSJiang Liu for (; pfn < end; pfn++) 407dd6911efSJiang Liu free_highmem_page(pfn_to_page(pfn)); 40883db0384SJiang Liu } 40983db0384SJiang Liu #endif 41083db0384SJiang Liu 411d0e775afSRussell King static void __init free_highpages(void) 412d0e775afSRussell King { 413d0e775afSRussell King #ifdef CONFIG_HIGHMEM 41426ba47b1SSantosh Shilimkar unsigned long max_low = max_low_pfn; 415df4f14c7SRussell King struct memblock_region *mem, *res; 416d0e775afSRussell King 417d0e775afSRussell King /* set highmem page free */ 418df4f14c7SRussell King for_each_memblock(memory, mem) { 419df4f14c7SRussell King unsigned long start = memblock_region_memory_base_pfn(mem); 420df4f14c7SRussell King unsigned long end = memblock_region_memory_end_pfn(mem); 421df4f14c7SRussell King 422df4f14c7SRussell King /* Ignore complete lowmem entries */ 423df4f14c7SRussell King if (end <= max_low) 424df4f14c7SRussell King continue; 425df4f14c7SRussell King 42609414d00SArd Biesheuvel if (memblock_is_nomap(mem)) 42709414d00SArd Biesheuvel continue; 42809414d00SArd Biesheuvel 429df4f14c7SRussell King /* Truncate partial highmem entries */ 430df4f14c7SRussell King if (start < max_low) 431df4f14c7SRussell King start = max_low; 432df4f14c7SRussell King 433df4f14c7SRussell King /* Find and exclude any reserved regions */ 434df4f14c7SRussell King for_each_memblock(reserved, res) { 435df4f14c7SRussell King unsigned long res_start, res_end; 436df4f14c7SRussell King 437df4f14c7SRussell King res_start = memblock_region_reserved_base_pfn(res); 438df4f14c7SRussell King res_end = memblock_region_reserved_end_pfn(res); 439df4f14c7SRussell King 440df4f14c7SRussell King if (res_end < start) 441df4f14c7SRussell King continue; 442df4f14c7SRussell King if (res_start < start) 443df4f14c7SRussell King res_start = start; 444df4f14c7SRussell King if (res_start > end) 445df4f14c7SRussell King res_start = end; 446df4f14c7SRussell King if (res_end > end) 447df4f14c7SRussell King res_end = end; 448df4f14c7SRussell King if (res_start != start) 44983db0384SJiang Liu free_area_high(start, res_start); 450df4f14c7SRussell King start = res_end; 451df4f14c7SRussell King if (start == end) 452df4f14c7SRussell King break; 453df4f14c7SRussell King } 454df4f14c7SRussell King 455df4f14c7SRussell King /* And now free anything which remains */ 456df4f14c7SRussell King if (start < end) 45783db0384SJiang Liu free_area_high(start, end); 458d0e775afSRussell King } 459d0e775afSRussell King #endif 460d0e775afSRussell King } 461d0e775afSRussell King 4621da177e4SLinus Torvalds /* 4631da177e4SLinus Torvalds * mem_init() marks the free areas in the mem_map and tells us how much 4641da177e4SLinus Torvalds * memory is free. This is done after various parts of the system have 4651da177e4SLinus Torvalds * claimed their memory after the kernel image. 4661da177e4SLinus Torvalds */ 4671da177e4SLinus Torvalds void __init mem_init(void) 4681da177e4SLinus Torvalds { 4691dbd30e9SLinus Walleij #ifdef CONFIG_HAVE_TCM 4701dbd30e9SLinus Walleij /* These pointers are filled in on TCM detection */ 4711dbd30e9SLinus Walleij extern u32 dtcm_end; 4721dbd30e9SLinus Walleij extern u32 itcm_end; 4731dbd30e9SLinus Walleij #endif 4741da177e4SLinus Torvalds 475b3ba41f2SSantosh Shilimkar set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 4761da177e4SLinus Torvalds 4771da177e4SLinus Torvalds /* this will put all unused low memory onto the freelists */ 4781c2f87c2SLaura Abbott free_unused_memmap(); 479c6ffc5caSMike Rapoport memblock_free_all(); 4801da177e4SLinus Torvalds 4811da177e4SLinus Torvalds #ifdef CONFIG_SA1111 4821da177e4SLinus Torvalds /* now that our DMA memory is actually so designated, we can free it */ 483bfd65dd9SLinus Torvalds free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 4841da177e4SLinus Torvalds #endif 4851da177e4SLinus Torvalds 486d0e775afSRussell King free_highpages(); 4873835f6cbSNicolas Pitre 4882450c973SJiang Liu mem_init_print_info(NULL); 4891da177e4SLinus Torvalds 490a1839272SFenkart/Bostandzhyan /* 491a1839272SFenkart/Bostandzhyan * Check boundaries twice: Some fundamental inconsistencies can 492a1839272SFenkart/Bostandzhyan * be detected at build time already. 493a1839272SFenkart/Bostandzhyan */ 494a1839272SFenkart/Bostandzhyan #ifdef CONFIG_MMU 495a1839272SFenkart/Bostandzhyan BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 496a1839272SFenkart/Bostandzhyan BUG_ON(TASK_SIZE > MODULES_VADDR); 497a1839272SFenkart/Bostandzhyan #endif 498a1839272SFenkart/Bostandzhyan 499a1839272SFenkart/Bostandzhyan #ifdef CONFIG_HIGHMEM 500a1839272SFenkart/Bostandzhyan BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 501a1839272SFenkart/Bostandzhyan BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 502a1839272SFenkart/Bostandzhyan #endif 5031da177e4SLinus Torvalds } 5041da177e4SLinus Torvalds 5050f5bf6d0SLaura Abbott #ifdef CONFIG_STRICT_KERNEL_RWX 5061e6b4811SKees Cook struct section_perm { 50725362dc4SKees Cook const char *name; 5081e6b4811SKees Cook unsigned long start; 5091e6b4811SKees Cook unsigned long end; 5101e6b4811SKees Cook pmdval_t mask; 5111e6b4811SKees Cook pmdval_t prot; 51280d6b0c2SKees Cook pmdval_t clear; 5131e6b4811SKees Cook }; 5141e6b4811SKees Cook 51564ac2e74SKees Cook /* First section-aligned location at or after __start_rodata. */ 51664ac2e74SKees Cook extern char __start_rodata_section_aligned[]; 51764ac2e74SKees Cook 51880d6b0c2SKees Cook static struct section_perm nx_perms[] = { 5191e6b4811SKees Cook /* Make pages tables, etc before _stext RW (set NX). */ 5201e6b4811SKees Cook { 52125362dc4SKees Cook .name = "pre-text NX", 5221e6b4811SKees Cook .start = PAGE_OFFSET, 5231e6b4811SKees Cook .end = (unsigned long)_stext, 5241e6b4811SKees Cook .mask = ~PMD_SECT_XN, 5251e6b4811SKees Cook .prot = PMD_SECT_XN, 5261e6b4811SKees Cook }, 5271e6b4811SKees Cook /* Make init RW (set NX). */ 5281e6b4811SKees Cook { 52925362dc4SKees Cook .name = "init NX", 5301e6b4811SKees Cook .start = (unsigned long)__init_begin, 5311e6b4811SKees Cook .end = (unsigned long)_sdata, 5321e6b4811SKees Cook .mask = ~PMD_SECT_XN, 5331e6b4811SKees Cook .prot = PMD_SECT_XN, 5341e6b4811SKees Cook }, 53580d6b0c2SKees Cook /* Make rodata NX (set RO in ro_perms below). */ 53680d6b0c2SKees Cook { 53725362dc4SKees Cook .name = "rodata NX", 53864ac2e74SKees Cook .start = (unsigned long)__start_rodata_section_aligned, 53980d6b0c2SKees Cook .end = (unsigned long)__init_begin, 54080d6b0c2SKees Cook .mask = ~PMD_SECT_XN, 54180d6b0c2SKees Cook .prot = PMD_SECT_XN, 54280d6b0c2SKees Cook }, 5431e6b4811SKees Cook }; 5441e6b4811SKees Cook 54580d6b0c2SKees Cook static struct section_perm ro_perms[] = { 54680d6b0c2SKees Cook /* Make kernel code and rodata RX (set RO). */ 54780d6b0c2SKees Cook { 54825362dc4SKees Cook .name = "text/rodata RO", 54980d6b0c2SKees Cook .start = (unsigned long)_stext, 55080d6b0c2SKees Cook .end = (unsigned long)__init_begin, 55180d6b0c2SKees Cook #ifdef CONFIG_ARM_LPAE 552400eeffaSPhilip Derrin .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), 553400eeffaSPhilip Derrin .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, 55480d6b0c2SKees Cook #else 55580d6b0c2SKees Cook .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 55680d6b0c2SKees Cook .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 55780d6b0c2SKees Cook .clear = PMD_SECT_AP_WRITE, 55880d6b0c2SKees Cook #endif 55980d6b0c2SKees Cook }, 56080d6b0c2SKees Cook }; 56180d6b0c2SKees Cook 5621e6b4811SKees Cook /* 5631e6b4811SKees Cook * Updates section permissions only for the current mm (sections are 5641e6b4811SKees Cook * copied into each mm). During startup, this is the init_mm. Is only 5651e6b4811SKees Cook * safe to be called with preemption disabled, as under stop_machine(). 5661e6b4811SKees Cook */ 5671e6b4811SKees Cook static inline void section_update(unsigned long addr, pmdval_t mask, 56808925c2fSLaura Abbott pmdval_t prot, struct mm_struct *mm) 5691e6b4811SKees Cook { 5701e6b4811SKees Cook pmd_t *pmd; 5711e6b4811SKees Cook 5721e6b4811SKees Cook pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 5731e6b4811SKees Cook 5741e6b4811SKees Cook #ifdef CONFIG_ARM_LPAE 5751e6b4811SKees Cook pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 5761e6b4811SKees Cook #else 5771e6b4811SKees Cook if (addr & SECTION_SIZE) 5781e6b4811SKees Cook pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 5791e6b4811SKees Cook else 5801e6b4811SKees Cook pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 5811e6b4811SKees Cook #endif 5821e6b4811SKees Cook flush_pmd_entry(pmd); 5831e6b4811SKees Cook local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 5841e6b4811SKees Cook } 5851e6b4811SKees Cook 5861e6b4811SKees Cook /* Make sure extended page tables are in use. */ 5871e6b4811SKees Cook static inline bool arch_has_strict_perms(void) 5881e6b4811SKees Cook { 5891e6b4811SKees Cook if (cpu_architecture() < CPU_ARCH_ARMv6) 5901e6b4811SKees Cook return false; 5911e6b4811SKees Cook 5921e6b4811SKees Cook return !!(get_cr() & CR_XP); 5931e6b4811SKees Cook } 5941e6b4811SKees Cook 59508925c2fSLaura Abbott void set_section_perms(struct section_perm *perms, int n, bool set, 59608925c2fSLaura Abbott struct mm_struct *mm) 59708925c2fSLaura Abbott { 59808925c2fSLaura Abbott size_t i; 59908925c2fSLaura Abbott unsigned long addr; 60008925c2fSLaura Abbott 60108925c2fSLaura Abbott if (!arch_has_strict_perms()) 60208925c2fSLaura Abbott return; 60308925c2fSLaura Abbott 60408925c2fSLaura Abbott for (i = 0; i < n; i++) { 60508925c2fSLaura Abbott if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 60608925c2fSLaura Abbott !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 60725362dc4SKees Cook pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 60825362dc4SKees Cook perms[i].name, perms[i].start, perms[i].end, 60908925c2fSLaura Abbott SECTION_SIZE); 61008925c2fSLaura Abbott continue; 6111e6b4811SKees Cook } 6121e6b4811SKees Cook 61308925c2fSLaura Abbott for (addr = perms[i].start; 61408925c2fSLaura Abbott addr < perms[i].end; 61508925c2fSLaura Abbott addr += SECTION_SIZE) 61608925c2fSLaura Abbott section_update(addr, perms[i].mask, 61708925c2fSLaura Abbott set ? perms[i].prot : perms[i].clear, mm); 61808925c2fSLaura Abbott } 61908925c2fSLaura Abbott 62008925c2fSLaura Abbott } 62108925c2fSLaura Abbott 62211ce4b33SGrygorii Strashko /** 62311ce4b33SGrygorii Strashko * update_sections_early intended to be called only through stop_machine 62411ce4b33SGrygorii Strashko * framework and executed by only one CPU while all other CPUs will spin and 62511ce4b33SGrygorii Strashko * wait, so no locking is required in this function. 62611ce4b33SGrygorii Strashko */ 62708925c2fSLaura Abbott static void update_sections_early(struct section_perm perms[], int n) 6281e6b4811SKees Cook { 62908925c2fSLaura Abbott struct task_struct *t, *s; 63008925c2fSLaura Abbott 63108925c2fSLaura Abbott for_each_process(t) { 63208925c2fSLaura Abbott if (t->flags & PF_KTHREAD) 63308925c2fSLaura Abbott continue; 63408925c2fSLaura Abbott for_each_thread(t, s) 63508925c2fSLaura Abbott set_section_perms(perms, n, true, s->mm); 63608925c2fSLaura Abbott } 63708925c2fSLaura Abbott set_section_perms(perms, n, true, current->active_mm); 63808925c2fSLaura Abbott set_section_perms(perms, n, true, &init_mm); 63908925c2fSLaura Abbott } 64008925c2fSLaura Abbott 64111ce4b33SGrygorii Strashko static int __fix_kernmem_perms(void *unused) 64208925c2fSLaura Abbott { 64308925c2fSLaura Abbott update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 64408925c2fSLaura Abbott return 0; 64508925c2fSLaura Abbott } 64608925c2fSLaura Abbott 64711ce4b33SGrygorii Strashko static void fix_kernmem_perms(void) 64808925c2fSLaura Abbott { 64908925c2fSLaura Abbott stop_machine(__fix_kernmem_perms, NULL, NULL); 6501e6b4811SKees Cook } 65180d6b0c2SKees Cook 65211ce4b33SGrygorii Strashko static int __mark_rodata_ro(void *unused) 65308925c2fSLaura Abbott { 65408925c2fSLaura Abbott update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 65508925c2fSLaura Abbott return 0; 65608925c2fSLaura Abbott } 65708925c2fSLaura Abbott 658b4c7e2bdSSteven Rostedt (VMware) static int kernel_set_to_readonly __read_mostly; 659b4c7e2bdSSteven Rostedt (VMware) 66080d6b0c2SKees Cook void mark_rodata_ro(void) 66180d6b0c2SKees Cook { 662b4c7e2bdSSteven Rostedt (VMware) kernel_set_to_readonly = 1; 66308925c2fSLaura Abbott stop_machine(__mark_rodata_ro, NULL, NULL); 664a8e53c15SJinbum Park debug_checkwx(); 66580d6b0c2SKees Cook } 66680d6b0c2SKees Cook 66780d6b0c2SKees Cook void set_kernel_text_rw(void) 66880d6b0c2SKees Cook { 669b4c7e2bdSSteven Rostedt (VMware) if (!kernel_set_to_readonly) 670b4c7e2bdSSteven Rostedt (VMware) return; 671b4c7e2bdSSteven Rostedt (VMware) 67208925c2fSLaura Abbott set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 67308925c2fSLaura Abbott current->active_mm); 67480d6b0c2SKees Cook } 67580d6b0c2SKees Cook 67680d6b0c2SKees Cook void set_kernel_text_ro(void) 67780d6b0c2SKees Cook { 678b4c7e2bdSSteven Rostedt (VMware) if (!kernel_set_to_readonly) 679b4c7e2bdSSteven Rostedt (VMware) return; 680b4c7e2bdSSteven Rostedt (VMware) 68108925c2fSLaura Abbott set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 68208925c2fSLaura Abbott current->active_mm); 68380d6b0c2SKees Cook } 68480d6b0c2SKees Cook 6851e6b4811SKees Cook #else 6861e6b4811SKees Cook static inline void fix_kernmem_perms(void) { } 6870f5bf6d0SLaura Abbott #endif /* CONFIG_STRICT_KERNEL_RWX */ 6881e6b4811SKees Cook 6891e6b4811SKees Cook void free_initmem(void) 6901e6b4811SKees Cook { 6911e6b4811SKees Cook fix_kernmem_perms(); 692bc581770SLinus Walleij 69354d52573SStephen Boyd poison_init_mem(__init_begin, __init_end - __init_begin); 6946db015e4SNicolas Pitre if (!machine_is_integrator() && !machine_is_cintegrator()) 695dbe67df4SJiang Liu free_initmem_default(-1); 6961da177e4SLinus Torvalds } 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_INITRD 6991da177e4SLinus Torvalds void free_initrd_mem(unsigned long start, unsigned long end) 7001da177e4SLinus Torvalds { 701421520baSYalin Wang if (start == initrd_start) 702421520baSYalin Wang start = round_down(start, PAGE_SIZE); 703421520baSYalin Wang if (end == initrd_end) 704421520baSYalin Wang end = round_up(end, PAGE_SIZE); 705421520baSYalin Wang 70654d52573SStephen Boyd poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 707dbe67df4SJiang Liu free_reserved_area((void *)start, (void *)end, -1, "initrd"); 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds #endif 710