xref: /openbmc/linux/arch/arm/mm/init.c (revision d2912cb1)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/arch/arm/mm/init.c
41da177e4SLinus Torvalds  *
590072059SRussell King  *  Copyright (C) 1995-2005 Russell King
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds #include <linux/kernel.h>
81da177e4SLinus Torvalds #include <linux/errno.h>
91da177e4SLinus Torvalds #include <linux/swap.h>
101da177e4SLinus Torvalds #include <linux/init.h>
111da177e4SLinus Torvalds #include <linux/mman.h>
123f07c014SIngo Molnar #include <linux/sched/signal.h>
1329930025SIngo Molnar #include <linux/sched/task.h>
14dc28094bSPaul Gortmaker #include <linux/export.h>
151da177e4SLinus Torvalds #include <linux/nodemask.h>
161da177e4SLinus Torvalds #include <linux/initrd.h>
179eb8f674SGrant Likely #include <linux/of_fdt.h>
183835f6cbSNicolas Pitre #include <linux/highmem.h>
195a0e3ad6STejun Heo #include <linux/gfp.h>
202778f620SRussell King #include <linux/memblock.h>
21c7909509SMarek Szyprowski #include <linux/dma-contiguous.h>
22158e8bfeSAlessandro Rubini #include <linux/sizes.h>
2308925c2fSLaura Abbott #include <linux/stop_machine.h>
241da177e4SLinus Torvalds 
25b4b20ad8SRussell King #include <asm/cp15.h>
261da177e4SLinus Torvalds #include <asm/mach-types.h>
27716a3dc2SRussell King #include <asm/memblock.h>
28d2ca5f24SAfzal Mohammed #include <asm/memory.h>
2993c02ab4SGrant Likely #include <asm/prom.h>
3037efe642SRussell King #include <asm/sections.h>
311da177e4SLinus Torvalds #include <asm/setup.h>
321e6b4811SKees Cook #include <asm/system_info.h>
331da177e4SLinus Torvalds #include <asm/tlb.h>
34db9ef1afSFenkart/Bostandzhyan #include <asm/fixmap.h>
35a8e53c15SJinbum Park #include <asm/ptdump.h>
361da177e4SLinus Torvalds 
371da177e4SLinus Torvalds #include <asm/mach/arch.h>
381da177e4SLinus Torvalds #include <asm/mach/map.h>
391da177e4SLinus Torvalds 
401b2e2b73SRussell King #include "mm.h"
411b2e2b73SRussell King 
42b4b20ad8SRussell King #ifdef CONFIG_CPU_CP15_MMU
43b4b20ad8SRussell King unsigned long __init __clear_cr(unsigned long mask)
44b4b20ad8SRussell King {
45b4b20ad8SRussell King 	cr_alignment = cr_alignment & ~mask;
46b4b20ad8SRussell King 	return cr_alignment;
47b4b20ad8SRussell King }
48b4b20ad8SRussell King #endif
49b4b20ad8SRussell King 
50b1ab95c6SFlorian Fainelli #ifdef CONFIG_BLK_DEV_INITRD
51012d1f4aSRussell King static int __init parse_tag_initrd(const struct tag *tag)
52012d1f4aSRussell King {
534ed89f22SRussell King 	pr_warn("ATAG_INITRD is deprecated; "
54012d1f4aSRussell King 		"please update your bootloader.\n");
55012d1f4aSRussell King 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
56012d1f4aSRussell King 	phys_initrd_size = tag->u.initrd.size;
57012d1f4aSRussell King 	return 0;
58012d1f4aSRussell King }
59012d1f4aSRussell King 
60012d1f4aSRussell King __tagtable(ATAG_INITRD, parse_tag_initrd);
61012d1f4aSRussell King 
62012d1f4aSRussell King static int __init parse_tag_initrd2(const struct tag *tag)
63012d1f4aSRussell King {
64012d1f4aSRussell King 	phys_initrd_start = tag->u.initrd.start;
65012d1f4aSRussell King 	phys_initrd_size = tag->u.initrd.size;
66012d1f4aSRussell King 	return 0;
67012d1f4aSRussell King }
68012d1f4aSRussell King 
69012d1f4aSRussell King __tagtable(ATAG_INITRD2, parse_tag_initrd2);
70b1ab95c6SFlorian Fainelli #endif
711da177e4SLinus Torvalds 
72f25b4b4cSRussell King static void __init find_limits(unsigned long *min, unsigned long *max_low,
73f25b4b4cSRussell King 			       unsigned long *max_high)
74dde5828fSRussell King {
751c2f87c2SLaura Abbott 	*max_low = PFN_DOWN(memblock_get_current_limit());
761c2f87c2SLaura Abbott 	*min = PFN_UP(memblock_start_of_DRAM());
771c2f87c2SLaura Abbott 	*max_high = PFN_DOWN(memblock_end_of_DRAM());
78dde5828fSRussell King }
79dde5828fSRussell King 
80be20902bSRussell King #ifdef CONFIG_ZONE_DMA
8165032018SNicolas Pitre 
82364230b9SRob Herring phys_addr_t arm_dma_zone_size __read_mostly;
8365032018SNicolas Pitre EXPORT_SYMBOL(arm_dma_zone_size);
8465032018SNicolas Pitre 
85022ae537SRussell King /*
86022ae537SRussell King  * The DMA mask corresponding to the maximum bus address allocatable
87022ae537SRussell King  * using GFP_DMA.  The default here places no restriction on DMA
88022ae537SRussell King  * allocations.  This must be the smallest DMA mask in the system,
89022ae537SRussell King  * so a successful GFP_DMA allocation will always satisfy this.
90022ae537SRussell King  */
914986e5c7SMarek Szyprowski phys_addr_t arm_dma_limit;
924dcfa600SRussell King unsigned long arm_dma_pfn_limit;
93022ae537SRussell King 
94be20902bSRussell King static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
95be20902bSRussell King 	unsigned long dma_size)
96be20902bSRussell King {
97be20902bSRussell King 	if (size[0] <= dma_size)
98be20902bSRussell King 		return;
99be20902bSRussell King 
100be20902bSRussell King 	size[ZONE_NORMAL] = size[0] - dma_size;
101be20902bSRussell King 	size[ZONE_DMA] = dma_size;
102be20902bSRussell King 	hole[ZONE_NORMAL] = hole[0];
103be20902bSRussell King 	hole[ZONE_DMA] = 0;
104be20902bSRussell King }
105be20902bSRussell King #endif
106be20902bSRussell King 
107ff69a4c8SRussell King void __init setup_dma_zone(const struct machine_desc *mdesc)
108c7909509SMarek Szyprowski {
109c7909509SMarek Szyprowski #ifdef CONFIG_ZONE_DMA
110c7909509SMarek Szyprowski 	if (mdesc->dma_zone_size) {
111c7909509SMarek Szyprowski 		arm_dma_zone_size = mdesc->dma_zone_size;
1126bcac805SRussell King 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
113c7909509SMarek Szyprowski 	} else
114c7909509SMarek Szyprowski 		arm_dma_limit = 0xffffffff;
1154dcfa600SRussell King 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
116c7909509SMarek Szyprowski #endif
117c7909509SMarek Szyprowski }
118c7909509SMarek Szyprowski 
11984f452b1SSantosh Shilimkar static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
120a2c54d2aSRussell King 	unsigned long max_high)
121b7a69ac3SRussell King {
122b7a69ac3SRussell King 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
123a2c54d2aSRussell King 	struct memblock_region *reg;
124b7a69ac3SRussell King 
12590072059SRussell King 	/*
126be370302SRussell King 	 * initialise the zones.
12790072059SRussell King 	 */
12890072059SRussell King 	memset(zone_size, 0, sizeof(zone_size));
12990072059SRussell King 
13090072059SRussell King 	/*
131be370302SRussell King 	 * The memory size has already been determined.  If we need
132be370302SRussell King 	 * to do anything fancy with the allocation of this memory
133be370302SRussell King 	 * to the zones, now is the time to do it.
13490072059SRussell King 	 */
135dde5828fSRussell King 	zone_size[0] = max_low - min;
136dde5828fSRussell King #ifdef CONFIG_HIGHMEM
137dde5828fSRussell King 	zone_size[ZONE_HIGHMEM] = max_high - max_low;
138dde5828fSRussell King #endif
13990072059SRussell King 
14090072059SRussell King 	/*
141be370302SRussell King 	 * Calculate the size of the holes.
142be370302SRussell King 	 *  holes = node_size - sum(bank_sizes)
14390072059SRussell King 	 */
144dde5828fSRussell King 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
145a2c54d2aSRussell King 	for_each_memblock(memory, reg) {
146a2c54d2aSRussell King 		unsigned long start = memblock_region_memory_base_pfn(reg);
147a2c54d2aSRussell King 		unsigned long end = memblock_region_memory_end_pfn(reg);
148a2c54d2aSRussell King 
149a2c54d2aSRussell King 		if (start < max_low) {
150a2c54d2aSRussell King 			unsigned long low_end = min(end, max_low);
151a2c54d2aSRussell King 			zhole_size[0] -= low_end - start;
152a2c54d2aSRussell King 		}
153dde5828fSRussell King #ifdef CONFIG_HIGHMEM
154a2c54d2aSRussell King 		if (end > max_low) {
155a2c54d2aSRussell King 			unsigned long high_start = max(start, max_low);
156a2c54d2aSRussell King 			zhole_size[ZONE_HIGHMEM] -= end - high_start;
157a2c54d2aSRussell King 		}
158dde5828fSRussell King #endif
159dde5828fSRussell King 	}
16090072059SRussell King 
16165032018SNicolas Pitre #ifdef CONFIG_ZONE_DMA
16290072059SRussell King 	/*
16390072059SRussell King 	 * Adjust the sizes according to any special requirements for
16490072059SRussell King 	 * this machine type.
16590072059SRussell King 	 */
166c7909509SMarek Szyprowski 	if (arm_dma_zone_size)
167be20902bSRussell King 		arm_adjust_dma_zone(zone_size, zhole_size,
16865032018SNicolas Pitre 			arm_dma_zone_size >> PAGE_SHIFT);
169be20902bSRussell King #endif
17090072059SRussell King 
171be370302SRussell King 	free_area_init_node(0, zone_size, min, zhole_size);
17290072059SRussell King }
17390072059SRussell King 
1747b7bf499SWill Deacon #ifdef CONFIG_HAVE_ARCH_PFN_VALID
175b7cfda9fSRussell King int pfn_valid(unsigned long pfn)
176b7cfda9fSRussell King {
17709414d00SArd Biesheuvel 	return memblock_is_map_memory(__pfn_to_phys(pfn));
178b7cfda9fSRussell King }
179b7cfda9fSRussell King EXPORT_SYMBOL(pfn_valid);
1807b7bf499SWill Deacon #endif
181657e12fdSRussell King 
182716a3dc2SRussell King static bool arm_memblock_steal_permitted = true;
183716a3dc2SRussell King 
184bc2827d0SRussell King phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
185716a3dc2SRussell King {
186716a3dc2SRussell King 	phys_addr_t phys;
187716a3dc2SRussell King 
188716a3dc2SRussell King 	BUG_ON(!arm_memblock_steal_permitted);
189716a3dc2SRussell King 
190f240ec09SMike Rapoport 	phys = memblock_phys_alloc(size, align);
191ecc3e771SMike Rapoport 	if (!phys)
192ecc3e771SMike Rapoport 		panic("Failed to steal %pa bytes at %pS\n",
193ecc3e771SMike Rapoport 		      &size, (void *)_RET_IP_);
194ecc3e771SMike Rapoport 
195716a3dc2SRussell King 	memblock_free(phys, size);
196716a3dc2SRussell King 	memblock_remove(phys, size);
197716a3dc2SRussell King 
198716a3dc2SRussell King 	return phys;
199716a3dc2SRussell King }
200716a3dc2SRussell King 
20139286248SRussell King static void __init arm_initrd_init(void)
2022778f620SRussell King {
2032778f620SRussell King #ifdef CONFIG_BLK_DEV_INITRD
204cdcc5fa0SRussell King 	phys_addr_t start;
205cdcc5fa0SRussell King 	unsigned long size;
206cdcc5fa0SRussell King 
2074c235cb9SBen Peddell 	initrd_start = initrd_end = 0;
20868b32f36SRussell King 
20968b32f36SRussell King 	if (!phys_initrd_size)
21068b32f36SRussell King 		return;
21168b32f36SRussell King 
212cdcc5fa0SRussell King 	/*
213cdcc5fa0SRussell King 	 * Round the memory region to page boundaries as per free_initrd_mem()
214cdcc5fa0SRussell King 	 * This allows us to detect whether the pages overlapping the initrd
215cdcc5fa0SRussell King 	 * are in use, but more importantly, reserves the entire set of pages
216cdcc5fa0SRussell King 	 * as we don't want these pages allocated for other purposes.
217cdcc5fa0SRussell King 	 */
218cdcc5fa0SRussell King 	start = round_down(phys_initrd_start, PAGE_SIZE);
219cdcc5fa0SRussell King 	size = phys_initrd_size + (phys_initrd_start - start);
220cdcc5fa0SRussell King 	size = round_up(size, PAGE_SIZE);
221cdcc5fa0SRussell King 
222cdcc5fa0SRussell King 	if (!memblock_is_region_memory(start, size)) {
223de22cc6eSVitaly Andrianov 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
224cdcc5fa0SRussell King 		       (u64)start, size);
22568b32f36SRussell King 		return;
2268f4b8c76SRussell King 	}
22768b32f36SRussell King 
228cdcc5fa0SRussell King 	if (memblock_is_region_reserved(start, size)) {
229de22cc6eSVitaly Andrianov 		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
230cdcc5fa0SRussell King 		       (u64)start, size);
23168b32f36SRussell King 		return;
232b0a2679dSRussell King 	}
23368b32f36SRussell King 
234cdcc5fa0SRussell King 	memblock_reserve(start, size);
2352778f620SRussell King 
2362778f620SRussell King 	/* Now convert initrd to virtual addresses */
2372778f620SRussell King 	initrd_start = __phys_to_virt(phys_initrd_start);
2382778f620SRussell King 	initrd_end = initrd_start + phys_initrd_size;
2392778f620SRussell King #endif
24039286248SRussell King }
24139286248SRussell King 
24239286248SRussell King void __init arm_memblock_init(const struct machine_desc *mdesc)
24339286248SRussell King {
24439286248SRussell King 	/* Register the kernel text, kernel data and initrd with memblock. */
24539286248SRussell King 	memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
24639286248SRussell King 
24739286248SRussell King 	arm_initrd_init();
2482778f620SRussell King 
2492778f620SRussell King 	arm_mm_memblock_reserve();
2502778f620SRussell King 
2518d717a52SRussell King 	/* reserve any platform specific memblock areas */
2528d717a52SRussell King 	if (mdesc->reserve)
2538d717a52SRussell King 		mdesc->reserve();
2548d717a52SRussell King 
25524bbd929SArd Biesheuvel 	early_init_fdt_reserve_self();
256bcedb5f9SMarek Szyprowski 	early_init_fdt_scan_reserved_mem();
257bcedb5f9SMarek Szyprowski 
25899a468d7SGeorge G. Davis 	/* reserve memory for DMA contiguous allocations */
25995b0e655SMarek Szyprowski 	dma_contiguous_reserve(arm_dma_limit);
260c7909509SMarek Szyprowski 
261716a3dc2SRussell King 	arm_memblock_steal_permitted = false;
2622778f620SRussell King 	memblock_dump_all();
2632778f620SRussell King }
2642778f620SRussell King 
2658d717a52SRussell King void __init bootmem_init(void)
26690072059SRussell King {
2678e58caefSGrygorii Strashko 	memblock_allow_resize();
268dde5828fSRussell King 
269071d184aSDoug Berger 	find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
270dde5828fSRussell King 
271071d184aSDoug Berger 	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
272071d184aSDoug Berger 		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
273d30eae47SVladimir Murzin 
274b7a69ac3SRussell King 	/*
275657e12fdSRussell King 	 * Sparsemem tries to allocate bootmem in memory_present(),
276657e12fdSRussell King 	 * so must be done after the fixed reservations
277657e12fdSRussell King 	 */
27814b5f54bSPeng Fan 	memblocks_present();
27990072059SRussell King 
280b7a69ac3SRussell King 	/*
281b7a69ac3SRussell King 	 * sparse_init() needs the bootmem allocator up and running.
282b7a69ac3SRussell King 	 */
283b7a69ac3SRussell King 	sparse_init();
284b7a69ac3SRussell King 
285b7a69ac3SRussell King 	/*
286be370302SRussell King 	 * Now free the memory - free_area_init_node needs
287b7a69ac3SRussell King 	 * the sparse mem_map arrays initialized by sparse_init()
288b7a69ac3SRussell King 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
289b7a69ac3SRussell King 	 */
290071d184aSDoug Berger 	zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
29190072059SRussell King }
29290072059SRussell King 
29354d52573SStephen Boyd /*
29454d52573SStephen Boyd  * Poison init memory with an undefined instruction (ARM) or a branch to an
29554d52573SStephen Boyd  * undefined instruction (Thumb).
29654d52573SStephen Boyd  */
29754d52573SStephen Boyd static inline void poison_init_mem(void *s, size_t count)
29854d52573SStephen Boyd {
29954d52573SStephen Boyd 	u32 *p = (u32 *)s;
300bf912d99SJamie Iles 	for (; count != 0; count -= 4)
30154d52573SStephen Boyd 		*p++ = 0xe7fddef0;
30254d52573SStephen Boyd }
30354d52573SStephen Boyd 
304a013053dSRussell King static inline void
305be370302SRussell King free_memmap(unsigned long start_pfn, unsigned long end_pfn)
306a013053dSRussell King {
307a013053dSRussell King 	struct page *start_pg, *end_pg;
30856bc6286SVitaly Andrianov 	phys_addr_t pg, pgend;
309a013053dSRussell King 
310a013053dSRussell King 	/*
311a013053dSRussell King 	 * Convert start_pfn/end_pfn to a struct page pointer.
312a013053dSRussell King 	 */
3133257f43dSCatalin Marinas 	start_pg = pfn_to_page(start_pfn - 1) + 1;
3149af386c8SWill Deacon 	end_pg = pfn_to_page(end_pfn - 1) + 1;
315a013053dSRussell King 
316a013053dSRussell King 	/*
317a013053dSRussell King 	 * Convert to physical addresses, and
318a013053dSRussell King 	 * round start upwards and end downwards.
319a013053dSRussell King 	 */
32056bc6286SVitaly Andrianov 	pg = PAGE_ALIGN(__pa(start_pg));
32156bc6286SVitaly Andrianov 	pgend = __pa(end_pg) & PAGE_MASK;
322a013053dSRussell King 
323a013053dSRussell King 	/*
324a013053dSRussell King 	 * If there are free pages between these,
325a013053dSRussell King 	 * free the section of the memmap array.
326a013053dSRussell King 	 */
327a013053dSRussell King 	if (pg < pgend)
328cfb66586SSantosh Shilimkar 		memblock_free_early(pg, pgend - pg);
329a013053dSRussell King }
330a013053dSRussell King 
331a013053dSRussell King /*
332a013053dSRussell King  * The mem_map array can get very big.  Free the unused area of the memory map.
333a013053dSRussell King  */
3341c2f87c2SLaura Abbott static void __init free_unused_memmap(void)
335a013053dSRussell King {
3361c2f87c2SLaura Abbott 	unsigned long start, prev_end = 0;
3371c2f87c2SLaura Abbott 	struct memblock_region *reg;
338a013053dSRussell King 
339a013053dSRussell King 	/*
3403260e529SMichael Bohan 	 * This relies on each bank being in address order.
3413260e529SMichael Bohan 	 * The banks are sorted previously in bootmem_init().
342a013053dSRussell King 	 */
3431c2f87c2SLaura Abbott 	for_each_memblock(memory, reg) {
3441c2f87c2SLaura Abbott 		start = memblock_region_memory_base_pfn(reg);
345a013053dSRussell King 
3469af386c8SWill Deacon #ifdef CONFIG_SPARSEMEM
3479af386c8SWill Deacon 		/*
3489af386c8SWill Deacon 		 * Take care not to free memmap entries that don't exist
3499af386c8SWill Deacon 		 * due to SPARSEMEM sections which aren't present.
3509af386c8SWill Deacon 		 */
3511c2f87c2SLaura Abbott 		start = min(start,
3521c2f87c2SLaura Abbott 				 ALIGN(prev_end, PAGES_PER_SECTION));
353002ea9eeSLinus Walleij #else
354002ea9eeSLinus Walleij 		/*
355002ea9eeSLinus Walleij 		 * Align down here since the VM subsystem insists that the
356002ea9eeSLinus Walleij 		 * memmap entries are valid from the bank start aligned to
357002ea9eeSLinus Walleij 		 * MAX_ORDER_NR_PAGES.
358002ea9eeSLinus Walleij 		 */
3591c2f87c2SLaura Abbott 		start = round_down(start, MAX_ORDER_NR_PAGES);
3609af386c8SWill Deacon #endif
361a013053dSRussell King 		/*
362a013053dSRussell King 		 * If we had a previous bank, and there is a space
363a013053dSRussell King 		 * between the current bank and the previous, free it.
364a013053dSRussell King 		 */
3651c2f87c2SLaura Abbott 		if (prev_end && prev_end < start)
3661c2f87c2SLaura Abbott 			free_memmap(prev_end, start);
367a013053dSRussell King 
3683260e529SMichael Bohan 		/*
3693260e529SMichael Bohan 		 * Align up here since the VM subsystem insists that the
3703260e529SMichael Bohan 		 * memmap entries are valid from the bank end aligned to
3713260e529SMichael Bohan 		 * MAX_ORDER_NR_PAGES.
3723260e529SMichael Bohan 		 */
3731c2f87c2SLaura Abbott 		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
3741c2f87c2SLaura Abbott 				 MAX_ORDER_NR_PAGES);
375a013053dSRussell King 	}
3769af386c8SWill Deacon 
3779af386c8SWill Deacon #ifdef CONFIG_SPARSEMEM
3781c2f87c2SLaura Abbott 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
3791c2f87c2SLaura Abbott 		free_memmap(prev_end,
3801c2f87c2SLaura Abbott 			    ALIGN(prev_end, PAGES_PER_SECTION));
3819af386c8SWill Deacon #endif
382a013053dSRussell King }
383a013053dSRussell King 
38483db0384SJiang Liu #ifdef CONFIG_HIGHMEM
38583db0384SJiang Liu static inline void free_area_high(unsigned long pfn, unsigned long end)
38683db0384SJiang Liu {
387dd6911efSJiang Liu 	for (; pfn < end; pfn++)
388dd6911efSJiang Liu 		free_highmem_page(pfn_to_page(pfn));
38983db0384SJiang Liu }
39083db0384SJiang Liu #endif
39183db0384SJiang Liu 
392d0e775afSRussell King static void __init free_highpages(void)
393d0e775afSRussell King {
394d0e775afSRussell King #ifdef CONFIG_HIGHMEM
39526ba47b1SSantosh Shilimkar 	unsigned long max_low = max_low_pfn;
396df4f14c7SRussell King 	struct memblock_region *mem, *res;
397d0e775afSRussell King 
398d0e775afSRussell King 	/* set highmem page free */
399df4f14c7SRussell King 	for_each_memblock(memory, mem) {
400df4f14c7SRussell King 		unsigned long start = memblock_region_memory_base_pfn(mem);
401df4f14c7SRussell King 		unsigned long end = memblock_region_memory_end_pfn(mem);
402df4f14c7SRussell King 
403df4f14c7SRussell King 		/* Ignore complete lowmem entries */
404df4f14c7SRussell King 		if (end <= max_low)
405df4f14c7SRussell King 			continue;
406df4f14c7SRussell King 
40709414d00SArd Biesheuvel 		if (memblock_is_nomap(mem))
40809414d00SArd Biesheuvel 			continue;
40909414d00SArd Biesheuvel 
410df4f14c7SRussell King 		/* Truncate partial highmem entries */
411df4f14c7SRussell King 		if (start < max_low)
412df4f14c7SRussell King 			start = max_low;
413df4f14c7SRussell King 
414df4f14c7SRussell King 		/* Find and exclude any reserved regions */
415df4f14c7SRussell King 		for_each_memblock(reserved, res) {
416df4f14c7SRussell King 			unsigned long res_start, res_end;
417df4f14c7SRussell King 
418df4f14c7SRussell King 			res_start = memblock_region_reserved_base_pfn(res);
419df4f14c7SRussell King 			res_end = memblock_region_reserved_end_pfn(res);
420df4f14c7SRussell King 
421df4f14c7SRussell King 			if (res_end < start)
422df4f14c7SRussell King 				continue;
423df4f14c7SRussell King 			if (res_start < start)
424df4f14c7SRussell King 				res_start = start;
425df4f14c7SRussell King 			if (res_start > end)
426df4f14c7SRussell King 				res_start = end;
427df4f14c7SRussell King 			if (res_end > end)
428df4f14c7SRussell King 				res_end = end;
429df4f14c7SRussell King 			if (res_start != start)
43083db0384SJiang Liu 				free_area_high(start, res_start);
431df4f14c7SRussell King 			start = res_end;
432df4f14c7SRussell King 			if (start == end)
433df4f14c7SRussell King 				break;
434df4f14c7SRussell King 		}
435df4f14c7SRussell King 
436df4f14c7SRussell King 		/* And now free anything which remains */
437df4f14c7SRussell King 		if (start < end)
43883db0384SJiang Liu 			free_area_high(start, end);
439d0e775afSRussell King 	}
440d0e775afSRussell King #endif
441d0e775afSRussell King }
442d0e775afSRussell King 
4431da177e4SLinus Torvalds /*
4441da177e4SLinus Torvalds  * mem_init() marks the free areas in the mem_map and tells us how much
4451da177e4SLinus Torvalds  * memory is free.  This is done after various parts of the system have
4461da177e4SLinus Torvalds  * claimed their memory after the kernel image.
4471da177e4SLinus Torvalds  */
4481da177e4SLinus Torvalds void __init mem_init(void)
4491da177e4SLinus Torvalds {
4501dbd30e9SLinus Walleij #ifdef CONFIG_HAVE_TCM
4511dbd30e9SLinus Walleij 	/* These pointers are filled in on TCM detection */
4521dbd30e9SLinus Walleij 	extern u32 dtcm_end;
4531dbd30e9SLinus Walleij 	extern u32 itcm_end;
4541dbd30e9SLinus Walleij #endif
4551da177e4SLinus Torvalds 
456b3ba41f2SSantosh Shilimkar 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
4571da177e4SLinus Torvalds 
4581da177e4SLinus Torvalds 	/* this will put all unused low memory onto the freelists */
4591c2f87c2SLaura Abbott 	free_unused_memmap();
460c6ffc5caSMike Rapoport 	memblock_free_all();
4611da177e4SLinus Torvalds 
4621da177e4SLinus Torvalds #ifdef CONFIG_SA1111
4631da177e4SLinus Torvalds 	/* now that our DMA memory is actually so designated, we can free it */
464bfd65dd9SLinus Torvalds 	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
4651da177e4SLinus Torvalds #endif
4661da177e4SLinus Torvalds 
467d0e775afSRussell King 	free_highpages();
4683835f6cbSNicolas Pitre 
4692450c973SJiang Liu 	mem_init_print_info(NULL);
4701da177e4SLinus Torvalds 
471a1839272SFenkart/Bostandzhyan 	/*
472a1839272SFenkart/Bostandzhyan 	 * Check boundaries twice: Some fundamental inconsistencies can
473a1839272SFenkart/Bostandzhyan 	 * be detected at build time already.
474a1839272SFenkart/Bostandzhyan 	 */
475a1839272SFenkart/Bostandzhyan #ifdef CONFIG_MMU
476a1839272SFenkart/Bostandzhyan 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
477a1839272SFenkart/Bostandzhyan 	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
478a1839272SFenkart/Bostandzhyan #endif
479a1839272SFenkart/Bostandzhyan 
480a1839272SFenkart/Bostandzhyan #ifdef CONFIG_HIGHMEM
481a1839272SFenkart/Bostandzhyan 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
482a1839272SFenkart/Bostandzhyan 	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
483a1839272SFenkart/Bostandzhyan #endif
4841da177e4SLinus Torvalds }
4851da177e4SLinus Torvalds 
4860f5bf6d0SLaura Abbott #ifdef CONFIG_STRICT_KERNEL_RWX
4871e6b4811SKees Cook struct section_perm {
48825362dc4SKees Cook 	const char *name;
4891e6b4811SKees Cook 	unsigned long start;
4901e6b4811SKees Cook 	unsigned long end;
4911e6b4811SKees Cook 	pmdval_t mask;
4921e6b4811SKees Cook 	pmdval_t prot;
49380d6b0c2SKees Cook 	pmdval_t clear;
4941e6b4811SKees Cook };
4951e6b4811SKees Cook 
49664ac2e74SKees Cook /* First section-aligned location at or after __start_rodata. */
49764ac2e74SKees Cook extern char __start_rodata_section_aligned[];
49864ac2e74SKees Cook 
49980d6b0c2SKees Cook static struct section_perm nx_perms[] = {
5001e6b4811SKees Cook 	/* Make pages tables, etc before _stext RW (set NX). */
5011e6b4811SKees Cook 	{
50225362dc4SKees Cook 		.name	= "pre-text NX",
5031e6b4811SKees Cook 		.start	= PAGE_OFFSET,
5041e6b4811SKees Cook 		.end	= (unsigned long)_stext,
5051e6b4811SKees Cook 		.mask	= ~PMD_SECT_XN,
5061e6b4811SKees Cook 		.prot	= PMD_SECT_XN,
5071e6b4811SKees Cook 	},
5081e6b4811SKees Cook 	/* Make init RW (set NX). */
5091e6b4811SKees Cook 	{
51025362dc4SKees Cook 		.name	= "init NX",
5111e6b4811SKees Cook 		.start	= (unsigned long)__init_begin,
5121e6b4811SKees Cook 		.end	= (unsigned long)_sdata,
5131e6b4811SKees Cook 		.mask	= ~PMD_SECT_XN,
5141e6b4811SKees Cook 		.prot	= PMD_SECT_XN,
5151e6b4811SKees Cook 	},
51680d6b0c2SKees Cook 	/* Make rodata NX (set RO in ro_perms below). */
51780d6b0c2SKees Cook 	{
51825362dc4SKees Cook 		.name	= "rodata NX",
51964ac2e74SKees Cook 		.start  = (unsigned long)__start_rodata_section_aligned,
52080d6b0c2SKees Cook 		.end    = (unsigned long)__init_begin,
52180d6b0c2SKees Cook 		.mask   = ~PMD_SECT_XN,
52280d6b0c2SKees Cook 		.prot   = PMD_SECT_XN,
52380d6b0c2SKees Cook 	},
5241e6b4811SKees Cook };
5251e6b4811SKees Cook 
52680d6b0c2SKees Cook static struct section_perm ro_perms[] = {
52780d6b0c2SKees Cook 	/* Make kernel code and rodata RX (set RO). */
52880d6b0c2SKees Cook 	{
52925362dc4SKees Cook 		.name	= "text/rodata RO",
53080d6b0c2SKees Cook 		.start  = (unsigned long)_stext,
53180d6b0c2SKees Cook 		.end    = (unsigned long)__init_begin,
53280d6b0c2SKees Cook #ifdef CONFIG_ARM_LPAE
533400eeffaSPhilip Derrin 		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
534400eeffaSPhilip Derrin 		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
53580d6b0c2SKees Cook #else
53680d6b0c2SKees Cook 		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
53780d6b0c2SKees Cook 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
53880d6b0c2SKees Cook 		.clear  = PMD_SECT_AP_WRITE,
53980d6b0c2SKees Cook #endif
54080d6b0c2SKees Cook 	},
54180d6b0c2SKees Cook };
54280d6b0c2SKees Cook 
5431e6b4811SKees Cook /*
5441e6b4811SKees Cook  * Updates section permissions only for the current mm (sections are
5451e6b4811SKees Cook  * copied into each mm). During startup, this is the init_mm. Is only
5461e6b4811SKees Cook  * safe to be called with preemption disabled, as under stop_machine().
5471e6b4811SKees Cook  */
5481e6b4811SKees Cook static inline void section_update(unsigned long addr, pmdval_t mask,
54908925c2fSLaura Abbott 				  pmdval_t prot, struct mm_struct *mm)
5501e6b4811SKees Cook {
5511e6b4811SKees Cook 	pmd_t *pmd;
5521e6b4811SKees Cook 
5531e6b4811SKees Cook 	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
5541e6b4811SKees Cook 
5551e6b4811SKees Cook #ifdef CONFIG_ARM_LPAE
5561e6b4811SKees Cook 	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
5571e6b4811SKees Cook #else
5581e6b4811SKees Cook 	if (addr & SECTION_SIZE)
5591e6b4811SKees Cook 		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
5601e6b4811SKees Cook 	else
5611e6b4811SKees Cook 		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
5621e6b4811SKees Cook #endif
5631e6b4811SKees Cook 	flush_pmd_entry(pmd);
5641e6b4811SKees Cook 	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
5651e6b4811SKees Cook }
5661e6b4811SKees Cook 
5671e6b4811SKees Cook /* Make sure extended page tables are in use. */
5681e6b4811SKees Cook static inline bool arch_has_strict_perms(void)
5691e6b4811SKees Cook {
5701e6b4811SKees Cook 	if (cpu_architecture() < CPU_ARCH_ARMv6)
5711e6b4811SKees Cook 		return false;
5721e6b4811SKees Cook 
5731e6b4811SKees Cook 	return !!(get_cr() & CR_XP);
5741e6b4811SKees Cook }
5751e6b4811SKees Cook 
57608925c2fSLaura Abbott void set_section_perms(struct section_perm *perms, int n, bool set,
57708925c2fSLaura Abbott 			struct mm_struct *mm)
57808925c2fSLaura Abbott {
57908925c2fSLaura Abbott 	size_t i;
58008925c2fSLaura Abbott 	unsigned long addr;
58108925c2fSLaura Abbott 
58208925c2fSLaura Abbott 	if (!arch_has_strict_perms())
58308925c2fSLaura Abbott 		return;
58408925c2fSLaura Abbott 
58508925c2fSLaura Abbott 	for (i = 0; i < n; i++) {
58608925c2fSLaura Abbott 		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
58708925c2fSLaura Abbott 		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
58825362dc4SKees Cook 			pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
58925362dc4SKees Cook 				perms[i].name, perms[i].start, perms[i].end,
59008925c2fSLaura Abbott 				SECTION_SIZE);
59108925c2fSLaura Abbott 			continue;
5921e6b4811SKees Cook 		}
5931e6b4811SKees Cook 
59408925c2fSLaura Abbott 		for (addr = perms[i].start;
59508925c2fSLaura Abbott 		     addr < perms[i].end;
59608925c2fSLaura Abbott 		     addr += SECTION_SIZE)
59708925c2fSLaura Abbott 			section_update(addr, perms[i].mask,
59808925c2fSLaura Abbott 				set ? perms[i].prot : perms[i].clear, mm);
59908925c2fSLaura Abbott 	}
60008925c2fSLaura Abbott 
60108925c2fSLaura Abbott }
60208925c2fSLaura Abbott 
60311ce4b33SGrygorii Strashko /**
60411ce4b33SGrygorii Strashko  * update_sections_early intended to be called only through stop_machine
60511ce4b33SGrygorii Strashko  * framework and executed by only one CPU while all other CPUs will spin and
60611ce4b33SGrygorii Strashko  * wait, so no locking is required in this function.
60711ce4b33SGrygorii Strashko  */
60808925c2fSLaura Abbott static void update_sections_early(struct section_perm perms[], int n)
6091e6b4811SKees Cook {
61008925c2fSLaura Abbott 	struct task_struct *t, *s;
61108925c2fSLaura Abbott 
61208925c2fSLaura Abbott 	for_each_process(t) {
61308925c2fSLaura Abbott 		if (t->flags & PF_KTHREAD)
61408925c2fSLaura Abbott 			continue;
61508925c2fSLaura Abbott 		for_each_thread(t, s)
61608925c2fSLaura Abbott 			set_section_perms(perms, n, true, s->mm);
61708925c2fSLaura Abbott 	}
61808925c2fSLaura Abbott 	set_section_perms(perms, n, true, current->active_mm);
61908925c2fSLaura Abbott 	set_section_perms(perms, n, true, &init_mm);
62008925c2fSLaura Abbott }
62108925c2fSLaura Abbott 
62211ce4b33SGrygorii Strashko static int __fix_kernmem_perms(void *unused)
62308925c2fSLaura Abbott {
62408925c2fSLaura Abbott 	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
62508925c2fSLaura Abbott 	return 0;
62608925c2fSLaura Abbott }
62708925c2fSLaura Abbott 
62811ce4b33SGrygorii Strashko static void fix_kernmem_perms(void)
62908925c2fSLaura Abbott {
63008925c2fSLaura Abbott 	stop_machine(__fix_kernmem_perms, NULL, NULL);
6311e6b4811SKees Cook }
63280d6b0c2SKees Cook 
63311ce4b33SGrygorii Strashko static int __mark_rodata_ro(void *unused)
63408925c2fSLaura Abbott {
63508925c2fSLaura Abbott 	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
63608925c2fSLaura Abbott 	return 0;
63708925c2fSLaura Abbott }
63808925c2fSLaura Abbott 
639b4c7e2bdSSteven Rostedt (VMware) static int kernel_set_to_readonly __read_mostly;
640b4c7e2bdSSteven Rostedt (VMware) 
64180d6b0c2SKees Cook void mark_rodata_ro(void)
64280d6b0c2SKees Cook {
643b4c7e2bdSSteven Rostedt (VMware) 	kernel_set_to_readonly = 1;
64408925c2fSLaura Abbott 	stop_machine(__mark_rodata_ro, NULL, NULL);
645a8e53c15SJinbum Park 	debug_checkwx();
64680d6b0c2SKees Cook }
64780d6b0c2SKees Cook 
64880d6b0c2SKees Cook void set_kernel_text_rw(void)
64980d6b0c2SKees Cook {
650b4c7e2bdSSteven Rostedt (VMware) 	if (!kernel_set_to_readonly)
651b4c7e2bdSSteven Rostedt (VMware) 		return;
652b4c7e2bdSSteven Rostedt (VMware) 
65308925c2fSLaura Abbott 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
65408925c2fSLaura Abbott 				current->active_mm);
65580d6b0c2SKees Cook }
65680d6b0c2SKees Cook 
65780d6b0c2SKees Cook void set_kernel_text_ro(void)
65880d6b0c2SKees Cook {
659b4c7e2bdSSteven Rostedt (VMware) 	if (!kernel_set_to_readonly)
660b4c7e2bdSSteven Rostedt (VMware) 		return;
661b4c7e2bdSSteven Rostedt (VMware) 
66208925c2fSLaura Abbott 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
66308925c2fSLaura Abbott 				current->active_mm);
66480d6b0c2SKees Cook }
66580d6b0c2SKees Cook 
6661e6b4811SKees Cook #else
6671e6b4811SKees Cook static inline void fix_kernmem_perms(void) { }
6680f5bf6d0SLaura Abbott #endif /* CONFIG_STRICT_KERNEL_RWX */
6691e6b4811SKees Cook 
6701e6b4811SKees Cook void free_initmem(void)
6711e6b4811SKees Cook {
6721e6b4811SKees Cook 	fix_kernmem_perms();
673bc581770SLinus Walleij 
67454d52573SStephen Boyd 	poison_init_mem(__init_begin, __init_end - __init_begin);
6756db015e4SNicolas Pitre 	if (!machine_is_integrator() && !machine_is_cintegrator())
676dbe67df4SJiang Liu 		free_initmem_default(-1);
6771da177e4SLinus Torvalds }
6781da177e4SLinus Torvalds 
6791da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_INITRD
6801da177e4SLinus Torvalds void free_initrd_mem(unsigned long start, unsigned long end)
6811da177e4SLinus Torvalds {
682421520baSYalin Wang 	if (start == initrd_start)
683421520baSYalin Wang 		start = round_down(start, PAGE_SIZE);
684421520baSYalin Wang 	if (end == initrd_end)
685421520baSYalin Wang 		end = round_up(end, PAGE_SIZE);
686421520baSYalin Wang 
68754d52573SStephen Boyd 	poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
688dbe67df4SJiang Liu 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
6891da177e4SLinus Torvalds }
6901da177e4SLinus Torvalds #endif
691