xref: /openbmc/linux/arch/riscv/mm/init.c (revision 05f263c1)
150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
276d2a049SPalmer Dabbelt /*
376d2a049SPalmer Dabbelt  * Copyright (C) 2012 Regents of the University of California
4671f9a3eSAnup Patel  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5e53d2818SNick Kossifidis  * Copyright (C) 2020 FORTH-ICS/CARV
6e53d2818SNick Kossifidis  *  Nick Kossifidis <mick@ics.forth.gr>
776d2a049SPalmer Dabbelt  */
876d2a049SPalmer Dabbelt 
976d2a049SPalmer Dabbelt #include <linux/init.h>
1076d2a049SPalmer Dabbelt #include <linux/mm.h>
1176d2a049SPalmer Dabbelt #include <linux/memblock.h>
1257c8a661SMike Rapoport #include <linux/initrd.h>
1376d2a049SPalmer Dabbelt #include <linux/swap.h>
14ce3aca04SKefeng Wang #include <linux/swiotlb.h>
155ec9c4ffSChristoph Hellwig #include <linux/sizes.h>
160651c263SAnup Patel #include <linux/of_fdt.h>
1756409750SNick Kossifidis #include <linux/of_reserved_mem.h>
18922b0375SAlbert Ou #include <linux/libfdt.h>
19d27c3c90SZong Li #include <linux/set_memory.h>
20da815582SKefeng Wang #include <linux/dma-map-ops.h>
21e53d2818SNick Kossifidis #include <linux/crash_dump.h>
228ba1a8b7SKefeng Wang #include <linux/hugetlb.h>
2339b33072SAlexandre Ghiti #ifdef CONFIG_RELOCATABLE
2439b33072SAlexandre Ghiti #include <linux/elf.h>
2539b33072SAlexandre Ghiti #endif
2625abe0dbSAlexandre Ghiti #include <linux/kfence.h>
2776d2a049SPalmer Dabbelt 
28f2c17aabSAnup Patel #include <asm/fixmap.h>
29d2402048SNick Desaulniers #include <asm/io.h>
30d2402048SNick Desaulniers #include <asm/numa.h>
31d2402048SNick Desaulniers #include <asm/pgtable.h>
32d2402048SNick Desaulniers #include <asm/ptdump.h>
3376d2a049SPalmer Dabbelt #include <asm/sections.h>
342d268251SPalmer Dabbelt #include <asm/soc.h>
35d2402048SNick Desaulniers #include <asm/tlbflush.h>
3676d2a049SPalmer Dabbelt 
37ffaee272SPaul Walmsley #include "../kernel/head.h"
38ffaee272SPaul Walmsley 
39658e2c51SAlexandre Ghiti struct kernel_mapping kernel_map __ro_after_init;
40658e2c51SAlexandre Ghiti EXPORT_SYMBOL(kernel_map);
4144c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
42658e2c51SAlexandre Ghiti #define kernel_map	(*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
43658e2c51SAlexandre Ghiti #endif
44658e2c51SAlexandre Ghiti 
45e8a62cc2SAlexandre Ghiti #ifdef CONFIG_64BIT
469195c294SPalmer Dabbelt u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39;
47e8a62cc2SAlexandre Ghiti #else
4867ff2f26SJisheng Zhang u64 satp_mode __ro_after_init = SATP_MODE_32;
49e8a62cc2SAlexandre Ghiti #endif
50e8a62cc2SAlexandre Ghiti EXPORT_SYMBOL(satp_mode);
51e8a62cc2SAlexandre Ghiti 
5220aa4954Skernel test robot bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
53011f09d1SQinglin Pan bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
54e8a62cc2SAlexandre Ghiti EXPORT_SYMBOL(pgtable_l4_enabled);
55d10efa21SQinglin Pan EXPORT_SYMBOL(pgtable_l5_enabled);
56e8a62cc2SAlexandre Ghiti 
576d7f91d9SAlexandre Ghiti phys_addr_t phys_ram_base __ro_after_init;
586d7f91d9SAlexandre Ghiti EXPORT_SYMBOL(phys_ram_base);
596d7f91d9SAlexandre Ghiti 
60387181dcSAnup Patel unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
61387181dcSAnup Patel 							__page_aligned_bss;
62387181dcSAnup Patel EXPORT_SYMBOL(empty_zero_page);
63387181dcSAnup Patel 
64d90d45d7SAnup Patel extern char _start[];
6544c92257SVitaly Wool void *_dtb_early_va __initdata;
6644c92257SVitaly Wool uintptr_t _dtb_early_pa __initdata;
67d90d45d7SAnup Patel 
6801062356SJisheng Zhang static phys_addr_t dma32_phys_limit __initdata;
69da815582SKefeng Wang 
zone_sizes_init(void)7076d2a049SPalmer Dabbelt static void __init zone_sizes_init(void)
7176d2a049SPalmer Dabbelt {
725ec9c4ffSChristoph Hellwig 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
7376d2a049SPalmer Dabbelt 
74d5fad48cSZong Li #ifdef CONFIG_ZONE_DMA32
75da815582SKefeng Wang 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
76d5fad48cSZong Li #endif
775ec9c4ffSChristoph Hellwig 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
785ec9c4ffSChristoph Hellwig 
799691a071SMike Rapoport 	free_area_init(max_zone_pfns);
8076d2a049SPalmer Dabbelt }
8176d2a049SPalmer Dabbelt 
828fa3cdffSKefeng Wang #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
8326b8f69eSAlexandre Ghiti 
8426b8f69eSAlexandre Ghiti #define LOG2_SZ_1K  ilog2(SZ_1K)
8526b8f69eSAlexandre Ghiti #define LOG2_SZ_1M  ilog2(SZ_1M)
8626b8f69eSAlexandre Ghiti #define LOG2_SZ_1G  ilog2(SZ_1G)
8726b8f69eSAlexandre Ghiti #define LOG2_SZ_1T  ilog2(SZ_1T)
8826b8f69eSAlexandre Ghiti 
print_mlk(char * name,unsigned long b,unsigned long t)892cc6c4a0SYash Shah static inline void print_mlk(char *name, unsigned long b, unsigned long t)
902cc6c4a0SYash Shah {
912cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
9226b8f69eSAlexandre Ghiti 		  (((t) - (b)) >> LOG2_SZ_1K));
932cc6c4a0SYash Shah }
942cc6c4a0SYash Shah 
print_mlm(char * name,unsigned long b,unsigned long t)952cc6c4a0SYash Shah static inline void print_mlm(char *name, unsigned long b, unsigned long t)
962cc6c4a0SYash Shah {
972cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
9826b8f69eSAlexandre Ghiti 		  (((t) - (b)) >> LOG2_SZ_1M));
9926b8f69eSAlexandre Ghiti }
10026b8f69eSAlexandre Ghiti 
print_mlg(char * name,unsigned long b,unsigned long t)10126b8f69eSAlexandre Ghiti static inline void print_mlg(char *name, unsigned long b, unsigned long t)
10226b8f69eSAlexandre Ghiti {
10326b8f69eSAlexandre Ghiti 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld GB)\n", name, b, t,
10426b8f69eSAlexandre Ghiti 		   (((t) - (b)) >> LOG2_SZ_1G));
10526b8f69eSAlexandre Ghiti }
10626b8f69eSAlexandre Ghiti 
10726b8f69eSAlexandre Ghiti #ifdef CONFIG_64BIT
print_mlt(char * name,unsigned long b,unsigned long t)10826b8f69eSAlexandre Ghiti static inline void print_mlt(char *name, unsigned long b, unsigned long t)
10926b8f69eSAlexandre Ghiti {
11026b8f69eSAlexandre Ghiti 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld TB)\n", name, b, t,
11126b8f69eSAlexandre Ghiti 		   (((t) - (b)) >> LOG2_SZ_1T));
11226b8f69eSAlexandre Ghiti }
11326b8f69eSAlexandre Ghiti #else
11426b8f69eSAlexandre Ghiti #define print_mlt(n, b, t) do {} while (0)
11526b8f69eSAlexandre Ghiti #endif
11626b8f69eSAlexandre Ghiti 
print_ml(char * name,unsigned long b,unsigned long t)11726b8f69eSAlexandre Ghiti static inline void print_ml(char *name, unsigned long b, unsigned long t)
11826b8f69eSAlexandre Ghiti {
11926b8f69eSAlexandre Ghiti 	unsigned long diff = t - b;
12026b8f69eSAlexandre Ghiti 
12126b8f69eSAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10)
12226b8f69eSAlexandre Ghiti 		print_mlt(name, b, t);
12326b8f69eSAlexandre Ghiti 	else if ((diff >> LOG2_SZ_1G) >= 10)
12426b8f69eSAlexandre Ghiti 		print_mlg(name, b, t);
12526b8f69eSAlexandre Ghiti 	else if ((diff >> LOG2_SZ_1M) >= 10)
12626b8f69eSAlexandre Ghiti 		print_mlm(name, b, t);
12726b8f69eSAlexandre Ghiti 	else
12826b8f69eSAlexandre Ghiti 		print_mlk(name, b, t);
1292cc6c4a0SYash Shah }
1302cc6c4a0SYash Shah 
print_vm_layout(void)1311987501bSJisheng Zhang static void __init print_vm_layout(void)
1322cc6c4a0SYash Shah {
1332cc6c4a0SYash Shah 	pr_notice("Virtual kernel memory layout:\n");
13426b8f69eSAlexandre Ghiti 	print_ml("fixmap", (unsigned long)FIXADDR_START,
1352cc6c4a0SYash Shah 		(unsigned long)FIXADDR_TOP);
13626b8f69eSAlexandre Ghiti 	print_ml("pci io", (unsigned long)PCI_IO_START,
1372cc6c4a0SYash Shah 		(unsigned long)PCI_IO_END);
13826b8f69eSAlexandre Ghiti 	print_ml("vmemmap", (unsigned long)VMEMMAP_START,
1392cc6c4a0SYash Shah 		(unsigned long)VMEMMAP_END);
14026b8f69eSAlexandre Ghiti 	print_ml("vmalloc", (unsigned long)VMALLOC_START,
1412cc6c4a0SYash Shah 		(unsigned long)VMALLOC_END);
142f9293ad4SXianting Tian #ifdef CONFIG_64BIT
143f9293ad4SXianting Tian 	print_ml("modules", (unsigned long)MODULES_VADDR,
144f9293ad4SXianting Tian 		(unsigned long)MODULES_END);
145f9293ad4SXianting Tian #endif
14626b8f69eSAlexandre Ghiti 	print_ml("lowmem", (unsigned long)PAGE_OFFSET,
1472cc6c4a0SYash Shah 		(unsigned long)high_memory);
1480c34e79eSPalmer Dabbelt 	if (IS_ENABLED(CONFIG_64BIT)) {
149f7ae0233SAlexandre Ghiti #ifdef CONFIG_KASAN
15026b8f69eSAlexandre Ghiti 		print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
151f7ae0233SAlexandre Ghiti #endif
1520c34e79eSPalmer Dabbelt 
15339b33072SAlexandre Ghiti 		print_ml("kernel", (unsigned long)kernel_map.virt_addr,
1542bfc6cd8SAlexandre Ghiti 			 (unsigned long)ADDRESS_SPACE_END);
1552cc6c4a0SYash Shah 	}
1562cc6c4a0SYash Shah }
1572cc6c4a0SYash Shah #else
print_vm_layout(void)1582cc6c4a0SYash Shah static void print_vm_layout(void) { }
1592cc6c4a0SYash Shah #endif /* CONFIG_DEBUG_VM */
1602cc6c4a0SYash Shah 
mem_init(void)16176d2a049SPalmer Dabbelt void __init mem_init(void)
16276d2a049SPalmer Dabbelt {
16376d2a049SPalmer Dabbelt #ifdef CONFIG_FLATMEM
16476d2a049SPalmer Dabbelt 	BUG_ON(!mem_map);
16576d2a049SPalmer Dabbelt #endif /* CONFIG_FLATMEM */
16676d2a049SPalmer Dabbelt 
167c6af2aa9SChristoph Hellwig 	swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE);
168c6ffc5caSMike Rapoport 	memblock_free_all();
16976d2a049SPalmer Dabbelt 
1702cc6c4a0SYash Shah 	print_vm_layout();
17176d2a049SPalmer Dabbelt }
17276d2a049SPalmer Dabbelt 
173f7ae0233SAlexandre Ghiti /* Limit the memory size via mem. */
174f7ae0233SAlexandre Ghiti static phys_addr_t memory_limit;
175e792a03dSFrederik Haxel #ifdef CONFIG_XIP_KERNEL
176e792a03dSFrederik Haxel #define memory_limit	(*(phys_addr_t *)XIP_FIXUP(&memory_limit))
177e792a03dSFrederik Haxel #endif /* CONFIG_XIP_KERNEL */
178c9811e37SKefeng Wang 
early_mem(char * p)179c9811e37SKefeng Wang static int __init early_mem(char *p)
180c9811e37SKefeng Wang {
181c9811e37SKefeng Wang 	u64 size;
182c9811e37SKefeng Wang 
183c9811e37SKefeng Wang 	if (!p)
184c9811e37SKefeng Wang 		return 1;
185c9811e37SKefeng Wang 
186c9811e37SKefeng Wang 	size = memparse(p, &p) & PAGE_MASK;
187c9811e37SKefeng Wang 	memory_limit = min_t(u64, size, memory_limit);
188c9811e37SKefeng Wang 
189c9811e37SKefeng Wang 	pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20);
190c9811e37SKefeng Wang 
191c9811e37SKefeng Wang 	return 0;
192c9811e37SKefeng Wang }
193c9811e37SKefeng Wang early_param("mem", early_mem);
194c9811e37SKefeng Wang 
setup_bootmem(void)195f842f5ffSKefeng Wang static void __init setup_bootmem(void)
1960651c263SAnup Patel {
197ac51e005SZong Li 	phys_addr_t vmlinux_end = __pa_symbol(&_end);
19807aabe8fSJisheng Zhang 	phys_addr_t max_mapped_addr;
199fe036db7SJisheng Zhang 	phys_addr_t phys_ram_end, vmlinux_start;
2000651c263SAnup Patel 
201fe036db7SJisheng Zhang 	if (IS_ENABLED(CONFIG_XIP_KERNEL))
20244c92257SVitaly Wool 		vmlinux_start = __pa_symbol(&_sdata);
203fe036db7SJisheng Zhang 	else
204fe036db7SJisheng Zhang 		vmlinux_start = __pa_symbol(&_start);
20544c92257SVitaly Wool 
206c9811e37SKefeng Wang 	memblock_enforce_memory_limit(memory_limit);
2070651c263SAnup Patel 
2082bfc6cd8SAlexandre Ghiti 	/*
2098db6f937SGeert Uytterhoeven 	 * Make sure we align the reservation on PMD_SIZE since we will
2102bfc6cd8SAlexandre Ghiti 	 * map the kernel in the linear mapping as read-only: we do not want
2112bfc6cd8SAlexandre Ghiti 	 * any allocation to happen between _end and the next pmd aligned page.
2122bfc6cd8SAlexandre Ghiti 	 */
21307aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
2148db6f937SGeert Uytterhoeven 		vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
21507aabe8fSJisheng Zhang 	/*
21607aabe8fSJisheng Zhang 	 * Reserve from the start of the kernel to the end of the kernel
21707aabe8fSJisheng Zhang 	 */
2188db6f937SGeert Uytterhoeven 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
219d90d45d7SAnup Patel 
2206d7f91d9SAlexandre Ghiti 	phys_ram_end = memblock_end_of_DRAM();
221c3bcc65dSAlexandre Ghiti 
222c3bcc65dSAlexandre Ghiti 	/*
223c3bcc65dSAlexandre Ghiti 	 * Make sure we align the start of the memory on a PMD boundary so that
224c3bcc65dSAlexandre Ghiti 	 * at worst, we map the linear mapping with PMD mappings.
225c3bcc65dSAlexandre Ghiti 	 */
226fe036db7SJisheng Zhang 	if (!IS_ENABLED(CONFIG_XIP_KERNEL))
227c3bcc65dSAlexandre Ghiti 		phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
2283335068fSAlexandre Ghiti 
2293335068fSAlexandre Ghiti 	/*
2303335068fSAlexandre Ghiti 	 * In 64-bit, any use of __va/__pa before this point is wrong as we
2313335068fSAlexandre Ghiti 	 * did not know the start of DRAM before.
2323335068fSAlexandre Ghiti 	 */
233b008e327SSamuel Holland 	if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
2343335068fSAlexandre Ghiti 		kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
2353335068fSAlexandre Ghiti 
236abb8e86bSAtish Patra 	/*
237d63e501aSNam Cao 	 * Reserve physical address space that would be mapped to virtual
238d63e501aSNam Cao 	 * addresses greater than (void *)(-PAGE_SIZE) because:
239d63e501aSNam Cao 	 *  - This memory would overlap with ERR_PTR
240d63e501aSNam Cao 	 *  - This memory belongs to high memory, which is not supported
241d63e501aSNam Cao 	 *
242d63e501aSNam Cao 	 * This is not applicable to 64-bit kernel, because virtual addresses
243d63e501aSNam Cao 	 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
244d63e501aSNam Cao 	 * occupied by kernel mapping. Also it is unrealistic for high memory
245d63e501aSNam Cao 	 * to exist on 64-bit platforms.
246abb8e86bSAtish Patra 	 */
24707aabe8fSJisheng Zhang 	if (!IS_ENABLED(CONFIG_64BIT)) {
248d63e501aSNam Cao 		max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
249d63e501aSNam Cao 		memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
25007aabe8fSJisheng Zhang 	}
251abb8e86bSAtish Patra 
2526d7f91d9SAlexandre Ghiti 	min_low_pfn = PFN_UP(phys_ram_base);
2536d7f91d9SAlexandre Ghiti 	max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
254625e24a5SAlexandre Ghiti 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
255f6e5aedfSKefeng Wang 
256da815582SKefeng Wang 	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
257336e8eb2SGuo Ren 	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
2580651c263SAnup Patel 
259aec33b54SKefeng Wang 	reserve_initrd_mem();
260ef69d255SAlexandre Ghiti 
261ef69d255SAlexandre Ghiti 	/*
262ef69d255SAlexandre Ghiti 	 * No allocation should be done before reserving the memory as defined
263ef69d255SAlexandre Ghiti 	 * in the device tree, otherwise the allocation could end up in a
264ef69d255SAlexandre Ghiti 	 * reserved region.
265ef69d255SAlexandre Ghiti 	 */
266ef69d255SAlexandre Ghiti 	early_init_fdt_scan_reserved_mem();
267ef69d255SAlexandre Ghiti 
268922b0375SAlbert Ou 	/*
269f105aa94SVitaly Wool 	 * If DTB is built in, no need to reserve its memblock.
270f105aa94SVitaly Wool 	 * Otherwise, do reserve it but avoid using
271f105aa94SVitaly Wool 	 * early_init_fdt_reserve_self() since __pa() does
272922b0375SAlbert Ou 	 * not work for DTB pointers that are fixmap addresses
273922b0375SAlbert Ou 	 */
2741b50f956SAlexandre Ghiti 	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
275922b0375SAlbert Ou 		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
276922b0375SAlbert Ou 
277da815582SKefeng Wang 	dma_contiguous_reserve(dma32_phys_limit);
2788ba1a8b7SKefeng Wang 	if (IS_ENABLED(CONFIG_64BIT))
2798ba1a8b7SKefeng Wang 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
2800651c263SAnup Patel }
2816f1e9e94SAnup Patel 
2826bd33e1eSChristoph Hellwig #ifdef CONFIG_MMU
2830c34e79eSPalmer Dabbelt struct pt_alloc_ops pt_ops __initdata;
28444c92257SVitaly Wool 
2856f1e9e94SAnup Patel pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
286671f9a3eSAnup Patel pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
28701062356SJisheng Zhang static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
288671f9a3eSAnup Patel 
289671f9a3eSAnup Patel pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
290f2c17aabSAnup Patel 
29144c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
292805a3ebeSJisheng Zhang #define pt_ops			(*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
29344c92257SVitaly Wool #define trampoline_pg_dir      ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
29444c92257SVitaly Wool #define fixmap_pte             ((pte_t *)XIP_FIXUP(fixmap_pte))
29544c92257SVitaly Wool #define early_pg_dir           ((pgd_t *)XIP_FIXUP(early_pg_dir))
29644c92257SVitaly Wool #endif /* CONFIG_XIP_KERNEL */
29744c92257SVitaly Wool 
2984147b5e2SAnshuman Khandual static const pgprot_t protection_map[16] = {
2994147b5e2SAnshuman Khandual 	[VM_NONE]					= PAGE_NONE,
3004147b5e2SAnshuman Khandual 	[VM_READ]					= PAGE_READ,
3014147b5e2SAnshuman Khandual 	[VM_WRITE]					= PAGE_COPY,
3024147b5e2SAnshuman Khandual 	[VM_WRITE | VM_READ]				= PAGE_COPY,
3034147b5e2SAnshuman Khandual 	[VM_EXEC]					= PAGE_EXEC,
3044147b5e2SAnshuman Khandual 	[VM_EXEC | VM_READ]				= PAGE_READ_EXEC,
3054147b5e2SAnshuman Khandual 	[VM_EXEC | VM_WRITE]				= PAGE_COPY_EXEC,
3066569fc12SHsieh-Tseng Shen 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_EXEC,
3074147b5e2SAnshuman Khandual 	[VM_SHARED]					= PAGE_NONE,
3084147b5e2SAnshuman Khandual 	[VM_SHARED | VM_READ]				= PAGE_READ,
3094147b5e2SAnshuman Khandual 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
3104147b5e2SAnshuman Khandual 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
3114147b5e2SAnshuman Khandual 	[VM_SHARED | VM_EXEC]				= PAGE_EXEC,
3124147b5e2SAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READ_EXEC,
3134147b5e2SAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
3144147b5e2SAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
3154147b5e2SAnshuman Khandual };
3164147b5e2SAnshuman Khandual DECLARE_VM_GET_PAGE_PROT
3174147b5e2SAnshuman Khandual 
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t prot)318f2c17aabSAnup Patel void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
319f2c17aabSAnup Patel {
320f2c17aabSAnup Patel 	unsigned long addr = __fix_to_virt(idx);
321f2c17aabSAnup Patel 	pte_t *ptep;
322f2c17aabSAnup Patel 
323f2c17aabSAnup Patel 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
324f2c17aabSAnup Patel 
325f2c17aabSAnup Patel 	ptep = &fixmap_pte[pte_index(addr)];
326f2c17aabSAnup Patel 
32721190b74SGreentime Hu 	if (pgprot_val(prot))
328f2c17aabSAnup Patel 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
32921190b74SGreentime Hu 	else
330f2c17aabSAnup Patel 		pte_clear(&init_mm, addr, ptep);
331f2c17aabSAnup Patel 	local_flush_tlb_page(addr);
332f2c17aabSAnup Patel }
333f2c17aabSAnup Patel 
get_pte_virt_early(phys_addr_t pa)334e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
335671f9a3eSAnup Patel {
336671f9a3eSAnup Patel 	return (pte_t *)((uintptr_t)pa);
337671f9a3eSAnup Patel }
338e8dcb61fSAtish Patra 
get_pte_virt_fixmap(phys_addr_t pa)339e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
340e8dcb61fSAtish Patra {
341e8dcb61fSAtish Patra 	clear_fixmap(FIX_PTE);
342e8dcb61fSAtish Patra 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
343671f9a3eSAnup Patel }
344671f9a3eSAnup Patel 
get_pte_virt_late(phys_addr_t pa)34501062356SJisheng Zhang static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
346e8dcb61fSAtish Patra {
347e8dcb61fSAtish Patra 	return (pte_t *) __va(pa);
348e8dcb61fSAtish Patra }
349e8dcb61fSAtish Patra 
alloc_pte_early(uintptr_t va)350e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
351671f9a3eSAnup Patel {
352671f9a3eSAnup Patel 	/*
353671f9a3eSAnup Patel 	 * We only create PMD or PGD early mappings so we
354671f9a3eSAnup Patel 	 * should never reach here with MMU disabled.
355671f9a3eSAnup Patel 	 */
356e8dcb61fSAtish Patra 	BUG();
357e8dcb61fSAtish Patra }
358671f9a3eSAnup Patel 
alloc_pte_fixmap(uintptr_t va)359e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
360e8dcb61fSAtish Patra {
361671f9a3eSAnup Patel 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
362671f9a3eSAnup Patel }
363671f9a3eSAnup Patel 
alloc_pte_late(uintptr_t va)36401062356SJisheng Zhang static phys_addr_t __init alloc_pte_late(uintptr_t va)
365e8dcb61fSAtish Patra {
366380f2c1aSVishal Moola (Oracle) 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
367e8dcb61fSAtish Patra 
368380f2c1aSVishal Moola (Oracle) 	BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc));
369380f2c1aSVishal Moola (Oracle) 	return __pa((pte_t *)ptdesc_address(ptdesc));
370e8dcb61fSAtish Patra }
371e8dcb61fSAtish Patra 
create_pte_mapping(pte_t * ptep,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)372671f9a3eSAnup Patel static void __init create_pte_mapping(pte_t *ptep,
373671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
374671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
375671f9a3eSAnup Patel {
376974b9b2cSMike Rapoport 	uintptr_t pte_idx = pte_index(va);
377671f9a3eSAnup Patel 
378671f9a3eSAnup Patel 	BUG_ON(sz != PAGE_SIZE);
379671f9a3eSAnup Patel 
380974b9b2cSMike Rapoport 	if (pte_none(ptep[pte_idx]))
381974b9b2cSMike Rapoport 		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
382671f9a3eSAnup Patel }
383671f9a3eSAnup Patel 
384671f9a3eSAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
385671f9a3eSAnup Patel 
38601062356SJisheng Zhang static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
38701062356SJisheng Zhang static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
38801062356SJisheng Zhang static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
389671f9a3eSAnup Patel 
39044c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
39144c92257SVitaly Wool #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
39244c92257SVitaly Wool #define fixmap_pmd     ((pmd_t *)XIP_FIXUP(fixmap_pmd))
39344c92257SVitaly Wool #define early_pmd      ((pmd_t *)XIP_FIXUP(early_pmd))
39444c92257SVitaly Wool #endif /* CONFIG_XIP_KERNEL */
39544c92257SVitaly Wool 
396677b9eb8SQinglin Pan static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss;
397677b9eb8SQinglin Pan static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss;
398677b9eb8SQinglin Pan static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
399677b9eb8SQinglin Pan 
400677b9eb8SQinglin Pan #ifdef CONFIG_XIP_KERNEL
401677b9eb8SQinglin Pan #define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d))
402677b9eb8SQinglin Pan #define fixmap_p4d     ((p4d_t *)XIP_FIXUP(fixmap_p4d))
403677b9eb8SQinglin Pan #define early_p4d      ((p4d_t *)XIP_FIXUP(early_p4d))
404677b9eb8SQinglin Pan #endif /* CONFIG_XIP_KERNEL */
405677b9eb8SQinglin Pan 
406e8a62cc2SAlexandre Ghiti static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
407e8a62cc2SAlexandre Ghiti static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
408e8a62cc2SAlexandre Ghiti static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
409e8a62cc2SAlexandre Ghiti 
410e8a62cc2SAlexandre Ghiti #ifdef CONFIG_XIP_KERNEL
411e8a62cc2SAlexandre Ghiti #define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud))
412e8a62cc2SAlexandre Ghiti #define fixmap_pud     ((pud_t *)XIP_FIXUP(fixmap_pud))
413e8a62cc2SAlexandre Ghiti #define early_pud      ((pud_t *)XIP_FIXUP(early_pud))
414e8a62cc2SAlexandre Ghiti #endif /* CONFIG_XIP_KERNEL */
415e8a62cc2SAlexandre Ghiti 
get_pmd_virt_early(phys_addr_t pa)416e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
417671f9a3eSAnup Patel {
418e8dcb61fSAtish Patra 	/* Before MMU is enabled */
419671f9a3eSAnup Patel 	return (pmd_t *)((uintptr_t)pa);
420671f9a3eSAnup Patel }
421e8dcb61fSAtish Patra 
get_pmd_virt_fixmap(phys_addr_t pa)422e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
423e8dcb61fSAtish Patra {
424e8dcb61fSAtish Patra 	clear_fixmap(FIX_PMD);
425e8dcb61fSAtish Patra 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
426671f9a3eSAnup Patel }
427671f9a3eSAnup Patel 
get_pmd_virt_late(phys_addr_t pa)42801062356SJisheng Zhang static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
429e8dcb61fSAtish Patra {
430e8dcb61fSAtish Patra 	return (pmd_t *) __va(pa);
431e8dcb61fSAtish Patra }
432e8dcb61fSAtish Patra 
alloc_pmd_early(uintptr_t va)433e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_early(uintptr_t va)
434671f9a3eSAnup Patel {
435e8a62cc2SAlexandre Ghiti 	BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT);
436671f9a3eSAnup Patel 
4370f02de44SAlexandre Ghiti 	return (uintptr_t)early_pmd;
438671f9a3eSAnup Patel }
439671f9a3eSAnup Patel 
alloc_pmd_fixmap(uintptr_t va)440e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
441e8dcb61fSAtish Patra {
442e8dcb61fSAtish Patra 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
443e8dcb61fSAtish Patra }
444e8dcb61fSAtish Patra 
alloc_pmd_late(uintptr_t va)44501062356SJisheng Zhang static phys_addr_t __init alloc_pmd_late(uintptr_t va)
446e8dcb61fSAtish Patra {
447380f2c1aSVishal Moola (Oracle) 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
448e8dcb61fSAtish Patra 
449380f2c1aSVishal Moola (Oracle) 	BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc));
450380f2c1aSVishal Moola (Oracle) 	return __pa((pmd_t *)ptdesc_address(ptdesc));
451e8dcb61fSAtish Patra }
452e8dcb61fSAtish Patra 
create_pmd_mapping(pmd_t * pmdp,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)453671f9a3eSAnup Patel static void __init create_pmd_mapping(pmd_t *pmdp,
454671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
455671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
456671f9a3eSAnup Patel {
457671f9a3eSAnup Patel 	pte_t *ptep;
458671f9a3eSAnup Patel 	phys_addr_t pte_phys;
459974b9b2cSMike Rapoport 	uintptr_t pmd_idx = pmd_index(va);
460671f9a3eSAnup Patel 
461671f9a3eSAnup Patel 	if (sz == PMD_SIZE) {
462974b9b2cSMike Rapoport 		if (pmd_none(pmdp[pmd_idx]))
463974b9b2cSMike Rapoport 			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
464671f9a3eSAnup Patel 		return;
465671f9a3eSAnup Patel 	}
466671f9a3eSAnup Patel 
467974b9b2cSMike Rapoport 	if (pmd_none(pmdp[pmd_idx])) {
468e8dcb61fSAtish Patra 		pte_phys = pt_ops.alloc_pte(va);
469974b9b2cSMike Rapoport 		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
470e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
471671f9a3eSAnup Patel 		memset(ptep, 0, PAGE_SIZE);
472671f9a3eSAnup Patel 	} else {
473974b9b2cSMike Rapoport 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
474e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
475671f9a3eSAnup Patel 	}
476671f9a3eSAnup Patel 
477671f9a3eSAnup Patel 	create_pte_mapping(ptep, va, pa, sz, prot);
478671f9a3eSAnup Patel }
479671f9a3eSAnup Patel 
get_pud_virt_early(phys_addr_t pa)480e8a62cc2SAlexandre Ghiti static pud_t *__init get_pud_virt_early(phys_addr_t pa)
481e8a62cc2SAlexandre Ghiti {
482e8a62cc2SAlexandre Ghiti 	return (pud_t *)((uintptr_t)pa);
483e8a62cc2SAlexandre Ghiti }
484e8a62cc2SAlexandre Ghiti 
get_pud_virt_fixmap(phys_addr_t pa)485e8a62cc2SAlexandre Ghiti static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
486e8a62cc2SAlexandre Ghiti {
487e8a62cc2SAlexandre Ghiti 	clear_fixmap(FIX_PUD);
488e8a62cc2SAlexandre Ghiti 	return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
489e8a62cc2SAlexandre Ghiti }
490e8a62cc2SAlexandre Ghiti 
get_pud_virt_late(phys_addr_t pa)491e8a62cc2SAlexandre Ghiti static pud_t *__init get_pud_virt_late(phys_addr_t pa)
492e8a62cc2SAlexandre Ghiti {
493e8a62cc2SAlexandre Ghiti 	return (pud_t *)__va(pa);
494e8a62cc2SAlexandre Ghiti }
495e8a62cc2SAlexandre Ghiti 
alloc_pud_early(uintptr_t va)496e8a62cc2SAlexandre Ghiti static phys_addr_t __init alloc_pud_early(uintptr_t va)
497e8a62cc2SAlexandre Ghiti {
498e8a62cc2SAlexandre Ghiti 	/* Only one PUD is available for early mapping */
499e8a62cc2SAlexandre Ghiti 	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
500e8a62cc2SAlexandre Ghiti 
501e8a62cc2SAlexandre Ghiti 	return (uintptr_t)early_pud;
502e8a62cc2SAlexandre Ghiti }
503e8a62cc2SAlexandre Ghiti 
alloc_pud_fixmap(uintptr_t va)504e8a62cc2SAlexandre Ghiti static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
505e8a62cc2SAlexandre Ghiti {
506e8a62cc2SAlexandre Ghiti 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
507e8a62cc2SAlexandre Ghiti }
508e8a62cc2SAlexandre Ghiti 
alloc_pud_late(uintptr_t va)509e8a62cc2SAlexandre Ghiti static phys_addr_t alloc_pud_late(uintptr_t va)
510e8a62cc2SAlexandre Ghiti {
511e8a62cc2SAlexandre Ghiti 	unsigned long vaddr;
512e8a62cc2SAlexandre Ghiti 
513e8a62cc2SAlexandre Ghiti 	vaddr = __get_free_page(GFP_KERNEL);
514e8a62cc2SAlexandre Ghiti 	BUG_ON(!vaddr);
515e8a62cc2SAlexandre Ghiti 	return __pa(vaddr);
516e8a62cc2SAlexandre Ghiti }
517e8a62cc2SAlexandre Ghiti 
get_p4d_virt_early(phys_addr_t pa)518677b9eb8SQinglin Pan static p4d_t *__init get_p4d_virt_early(phys_addr_t pa)
519677b9eb8SQinglin Pan {
520677b9eb8SQinglin Pan 	return (p4d_t *)((uintptr_t)pa);
521677b9eb8SQinglin Pan }
522677b9eb8SQinglin Pan 
get_p4d_virt_fixmap(phys_addr_t pa)523677b9eb8SQinglin Pan static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
524677b9eb8SQinglin Pan {
525677b9eb8SQinglin Pan 	clear_fixmap(FIX_P4D);
526677b9eb8SQinglin Pan 	return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
527677b9eb8SQinglin Pan }
528677b9eb8SQinglin Pan 
get_p4d_virt_late(phys_addr_t pa)529677b9eb8SQinglin Pan static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
530677b9eb8SQinglin Pan {
531677b9eb8SQinglin Pan 	return (p4d_t *)__va(pa);
532677b9eb8SQinglin Pan }
533677b9eb8SQinglin Pan 
alloc_p4d_early(uintptr_t va)534677b9eb8SQinglin Pan static phys_addr_t __init alloc_p4d_early(uintptr_t va)
535677b9eb8SQinglin Pan {
536677b9eb8SQinglin Pan 	/* Only one P4D is available for early mapping */
537677b9eb8SQinglin Pan 	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
538677b9eb8SQinglin Pan 
539677b9eb8SQinglin Pan 	return (uintptr_t)early_p4d;
540677b9eb8SQinglin Pan }
541677b9eb8SQinglin Pan 
alloc_p4d_fixmap(uintptr_t va)542677b9eb8SQinglin Pan static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
543677b9eb8SQinglin Pan {
544677b9eb8SQinglin Pan 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
545677b9eb8SQinglin Pan }
546677b9eb8SQinglin Pan 
alloc_p4d_late(uintptr_t va)547677b9eb8SQinglin Pan static phys_addr_t alloc_p4d_late(uintptr_t va)
548677b9eb8SQinglin Pan {
549677b9eb8SQinglin Pan 	unsigned long vaddr;
550677b9eb8SQinglin Pan 
551677b9eb8SQinglin Pan 	vaddr = __get_free_page(GFP_KERNEL);
552677b9eb8SQinglin Pan 	BUG_ON(!vaddr);
553677b9eb8SQinglin Pan 	return __pa(vaddr);
554677b9eb8SQinglin Pan }
555677b9eb8SQinglin Pan 
create_pud_mapping(pud_t * pudp,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)556e8a62cc2SAlexandre Ghiti static void __init create_pud_mapping(pud_t *pudp,
557e8a62cc2SAlexandre Ghiti 				      uintptr_t va, phys_addr_t pa,
558e8a62cc2SAlexandre Ghiti 				      phys_addr_t sz, pgprot_t prot)
559e8a62cc2SAlexandre Ghiti {
560e8a62cc2SAlexandre Ghiti 	pmd_t *nextp;
561e8a62cc2SAlexandre Ghiti 	phys_addr_t next_phys;
562e8a62cc2SAlexandre Ghiti 	uintptr_t pud_index = pud_index(va);
563e8a62cc2SAlexandre Ghiti 
564e8a62cc2SAlexandre Ghiti 	if (sz == PUD_SIZE) {
565e8a62cc2SAlexandre Ghiti 		if (pud_val(pudp[pud_index]) == 0)
566e8a62cc2SAlexandre Ghiti 			pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot);
567e8a62cc2SAlexandre Ghiti 		return;
568e8a62cc2SAlexandre Ghiti 	}
569e8a62cc2SAlexandre Ghiti 
570e8a62cc2SAlexandre Ghiti 	if (pud_val(pudp[pud_index]) == 0) {
571e8a62cc2SAlexandre Ghiti 		next_phys = pt_ops.alloc_pmd(va);
572e8a62cc2SAlexandre Ghiti 		pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE);
573e8a62cc2SAlexandre Ghiti 		nextp = pt_ops.get_pmd_virt(next_phys);
574e8a62cc2SAlexandre Ghiti 		memset(nextp, 0, PAGE_SIZE);
575e8a62cc2SAlexandre Ghiti 	} else {
576e8a62cc2SAlexandre Ghiti 		next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index]));
577e8a62cc2SAlexandre Ghiti 		nextp = pt_ops.get_pmd_virt(next_phys);
578e8a62cc2SAlexandre Ghiti 	}
579e8a62cc2SAlexandre Ghiti 
580e8a62cc2SAlexandre Ghiti 	create_pmd_mapping(nextp, va, pa, sz, prot);
581e8a62cc2SAlexandre Ghiti }
582e8a62cc2SAlexandre Ghiti 
create_p4d_mapping(p4d_t * p4dp,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)583677b9eb8SQinglin Pan static void __init create_p4d_mapping(p4d_t *p4dp,
584677b9eb8SQinglin Pan 				      uintptr_t va, phys_addr_t pa,
585677b9eb8SQinglin Pan 				      phys_addr_t sz, pgprot_t prot)
586677b9eb8SQinglin Pan {
587677b9eb8SQinglin Pan 	pud_t *nextp;
588677b9eb8SQinglin Pan 	phys_addr_t next_phys;
589677b9eb8SQinglin Pan 	uintptr_t p4d_index = p4d_index(va);
590677b9eb8SQinglin Pan 
591677b9eb8SQinglin Pan 	if (sz == P4D_SIZE) {
592677b9eb8SQinglin Pan 		if (p4d_val(p4dp[p4d_index]) == 0)
593677b9eb8SQinglin Pan 			p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot);
594677b9eb8SQinglin Pan 		return;
595677b9eb8SQinglin Pan 	}
596677b9eb8SQinglin Pan 
597677b9eb8SQinglin Pan 	if (p4d_val(p4dp[p4d_index]) == 0) {
598677b9eb8SQinglin Pan 		next_phys = pt_ops.alloc_pud(va);
599677b9eb8SQinglin Pan 		p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE);
600677b9eb8SQinglin Pan 		nextp = pt_ops.get_pud_virt(next_phys);
601677b9eb8SQinglin Pan 		memset(nextp, 0, PAGE_SIZE);
602677b9eb8SQinglin Pan 	} else {
603677b9eb8SQinglin Pan 		next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index]));
604677b9eb8SQinglin Pan 		nextp = pt_ops.get_pud_virt(next_phys);
605677b9eb8SQinglin Pan 	}
606677b9eb8SQinglin Pan 
607677b9eb8SQinglin Pan 	create_pud_mapping(nextp, va, pa, sz, prot);
608677b9eb8SQinglin Pan }
609677b9eb8SQinglin Pan 
610677b9eb8SQinglin Pan #define pgd_next_t		p4d_t
611677b9eb8SQinglin Pan #define alloc_pgd_next(__va)	(pgtable_l5_enabled ?			\
612677b9eb8SQinglin Pan 		pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ?		\
613677b9eb8SQinglin Pan 		pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va)))
614677b9eb8SQinglin Pan #define get_pgd_next_virt(__pa)	(pgtable_l5_enabled ?			\
615677b9eb8SQinglin Pan 		pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ?	\
616677b9eb8SQinglin Pan 		pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa)))
617671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
618677b9eb8SQinglin Pan 				(pgtable_l5_enabled ?			\
619677b9eb8SQinglin Pan 		create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \
620e8a62cc2SAlexandre Ghiti 				(pgtable_l4_enabled ?			\
621677b9eb8SQinglin Pan 		create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) :	\
622677b9eb8SQinglin Pan 		create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot)))
623677b9eb8SQinglin Pan #define fixmap_pgd_next		(pgtable_l5_enabled ?			\
624677b9eb8SQinglin Pan 		(uintptr_t)fixmap_p4d : (pgtable_l4_enabled ?		\
625677b9eb8SQinglin Pan 		(uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd))
626677b9eb8SQinglin Pan #define trampoline_pgd_next	(pgtable_l5_enabled ?			\
627677b9eb8SQinglin Pan 		(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?	\
628677b9eb8SQinglin Pan 		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
629671f9a3eSAnup Patel #else
630671f9a3eSAnup Patel #define pgd_next_t		pte_t
631e8dcb61fSAtish Patra #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
632e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
633671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
634671f9a3eSAnup Patel 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
635e8a62cc2SAlexandre Ghiti #define fixmap_pgd_next		((uintptr_t)fixmap_pte)
636f83050a8SPalmer Dabbelt #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
637f83050a8SPalmer Dabbelt #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
638f83050a8SPalmer Dabbelt #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
639e8a62cc2SAlexandre Ghiti #endif /* __PAGETABLE_PMD_FOLDED */
640671f9a3eSAnup Patel 
create_pgd_mapping(pgd_t * pgdp,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)641b91540d5SAtish Patra void __init create_pgd_mapping(pgd_t *pgdp,
642671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
643671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
644671f9a3eSAnup Patel {
645671f9a3eSAnup Patel 	pgd_next_t *nextp;
646671f9a3eSAnup Patel 	phys_addr_t next_phys;
647974b9b2cSMike Rapoport 	uintptr_t pgd_idx = pgd_index(va);
648671f9a3eSAnup Patel 
649671f9a3eSAnup Patel 	if (sz == PGDIR_SIZE) {
650974b9b2cSMike Rapoport 		if (pgd_val(pgdp[pgd_idx]) == 0)
651974b9b2cSMike Rapoport 			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
652671f9a3eSAnup Patel 		return;
653671f9a3eSAnup Patel 	}
654671f9a3eSAnup Patel 
655974b9b2cSMike Rapoport 	if (pgd_val(pgdp[pgd_idx]) == 0) {
656671f9a3eSAnup Patel 		next_phys = alloc_pgd_next(va);
657974b9b2cSMike Rapoport 		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
658671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
659671f9a3eSAnup Patel 		memset(nextp, 0, PAGE_SIZE);
660671f9a3eSAnup Patel 	} else {
661974b9b2cSMike Rapoport 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
662671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
663671f9a3eSAnup Patel 	}
664671f9a3eSAnup Patel 
665671f9a3eSAnup Patel 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
666671f9a3eSAnup Patel }
667671f9a3eSAnup Patel 
best_map_size(phys_addr_t pa,uintptr_t va,phys_addr_t size)66849a0a373SAlexandre Ghiti static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
66949a0a373SAlexandre Ghiti 				      phys_addr_t size)
670671f9a3eSAnup Patel {
671*05f263c1SNam Cao 	if (debug_pagealloc_enabled())
672*05f263c1SNam Cao 		return PAGE_SIZE;
673*05f263c1SNam Cao 
6745f03d4f2SAlexandre Ghiti 	if (pgtable_l5_enabled &&
6755f03d4f2SAlexandre Ghiti 	    !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
6763335068fSAlexandre Ghiti 		return P4D_SIZE;
6773335068fSAlexandre Ghiti 
6785f03d4f2SAlexandre Ghiti 	if (pgtable_l4_enabled &&
6795f03d4f2SAlexandre Ghiti 	    !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
6803335068fSAlexandre Ghiti 		return PUD_SIZE;
6813335068fSAlexandre Ghiti 
6825f03d4f2SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) &&
6835f03d4f2SAlexandre Ghiti 	    !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
6840fdc636cSZong Li 		return PMD_SIZE;
6856ff8ca3fSQinglin Pan 
6866ff8ca3fSQinglin Pan 	return PAGE_SIZE;
687671f9a3eSAnup Patel }
688671f9a3eSAnup Patel 
68944c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
6904b1c70aaSPalmer Dabbelt #define phys_ram_base  (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
691805a3ebeSJisheng Zhang extern char _xiprom[], _exiprom[], __data_loc;
692805a3ebeSJisheng Zhang 
69344c92257SVitaly Wool /* called from head.S with MMU off */
__copy_data(void)69444c92257SVitaly Wool asmlinkage void __init __copy_data(void)
69544c92257SVitaly Wool {
696f9ace4edSVitaly Wool 	void *from = (void *)(&__data_loc);
69744c92257SVitaly Wool 	void *to = (void *)CONFIG_PHYS_RAM_BASE;
698f9ace4edSVitaly Wool 	size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata));
69944c92257SVitaly Wool 
70044c92257SVitaly Wool 	memcpy(to, from, sz);
70144c92257SVitaly Wool }
70244c92257SVitaly Wool #endif
70344c92257SVitaly Wool 
704e5c35fa0SAlexandre Ghiti #ifdef CONFIG_STRICT_KERNEL_RWX
pgprot_from_va(uintptr_t va)705e5c35fa0SAlexandre Ghiti static __init pgprot_t pgprot_from_va(uintptr_t va)
706e5c35fa0SAlexandre Ghiti {
707e5c35fa0SAlexandre Ghiti 	if (is_va_kernel_text(va))
708e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL_READ_EXEC;
709e5c35fa0SAlexandre Ghiti 
710e5c35fa0SAlexandre Ghiti 	/*
711e5c35fa0SAlexandre Ghiti 	 * In 64-bit kernel, the kernel mapping is outside the linear mapping so
712e5c35fa0SAlexandre Ghiti 	 * we must protect its linear mapping alias from being executed and
713e5c35fa0SAlexandre Ghiti 	 * written.
714e5c35fa0SAlexandre Ghiti 	 * And rodata section is marked readonly in mark_rodata_ro.
715e5c35fa0SAlexandre Ghiti 	 */
716e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
717e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL_READ;
718e5c35fa0SAlexandre Ghiti 
719e5c35fa0SAlexandre Ghiti 	return PAGE_KERNEL;
720e5c35fa0SAlexandre Ghiti }
721e5c35fa0SAlexandre Ghiti 
mark_rodata_ro(void)722e5c35fa0SAlexandre Ghiti void mark_rodata_ro(void)
723e5c35fa0SAlexandre Ghiti {
724e5c35fa0SAlexandre Ghiti 	set_kernel_memory(__start_rodata, _data, set_memory_ro);
725e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT))
726e5c35fa0SAlexandre Ghiti 		set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
727e5c35fa0SAlexandre Ghiti 				  set_memory_ro);
728e5c35fa0SAlexandre Ghiti 
729e5c35fa0SAlexandre Ghiti 	debug_checkwx();
730e5c35fa0SAlexandre Ghiti }
731e5c35fa0SAlexandre Ghiti #else
pgprot_from_va(uintptr_t va)732e5c35fa0SAlexandre Ghiti static __init pgprot_t pgprot_from_va(uintptr_t va)
733e5c35fa0SAlexandre Ghiti {
734e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
735e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL;
736e5c35fa0SAlexandre Ghiti 
737e5c35fa0SAlexandre Ghiti 	return PAGE_KERNEL_EXEC;
738e5c35fa0SAlexandre Ghiti }
739e5c35fa0SAlexandre Ghiti #endif /* CONFIG_STRICT_KERNEL_RWX */
740e5c35fa0SAlexandre Ghiti 
741d9e418d0SPalmer Dabbelt #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
74226e7aacbSAlexandre Ghiti u64 __pi_set_satp_mode_from_cmdline(uintptr_t dtb_pa);
74326e7aacbSAlexandre Ghiti 
disable_pgtable_l5(void)744011f09d1SQinglin Pan static void __init disable_pgtable_l5(void)
745011f09d1SQinglin Pan {
746011f09d1SQinglin Pan 	pgtable_l5_enabled = false;
747011f09d1SQinglin Pan 	kernel_map.page_offset = PAGE_OFFSET_L4;
748011f09d1SQinglin Pan 	satp_mode = SATP_MODE_48;
749011f09d1SQinglin Pan }
750011f09d1SQinglin Pan 
disable_pgtable_l4(void)751e8a62cc2SAlexandre Ghiti static void __init disable_pgtable_l4(void)
752e8a62cc2SAlexandre Ghiti {
753e8a62cc2SAlexandre Ghiti 	pgtable_l4_enabled = false;
754e8a62cc2SAlexandre Ghiti 	kernel_map.page_offset = PAGE_OFFSET_L3;
755e8a62cc2SAlexandre Ghiti 	satp_mode = SATP_MODE_39;
756e8a62cc2SAlexandre Ghiti }
757e8a62cc2SAlexandre Ghiti 
print_no4lvl(char * p)75826e7aacbSAlexandre Ghiti static int __init print_no4lvl(char *p)
75926e7aacbSAlexandre Ghiti {
76026e7aacbSAlexandre Ghiti 	pr_info("Disabled 4-level and 5-level paging");
76126e7aacbSAlexandre Ghiti 	return 0;
76226e7aacbSAlexandre Ghiti }
76326e7aacbSAlexandre Ghiti early_param("no4lvl", print_no4lvl);
76426e7aacbSAlexandre Ghiti 
print_no5lvl(char * p)76526e7aacbSAlexandre Ghiti static int __init print_no5lvl(char *p)
76626e7aacbSAlexandre Ghiti {
76726e7aacbSAlexandre Ghiti 	pr_info("Disabled 5-level paging");
76826e7aacbSAlexandre Ghiti 	return 0;
76926e7aacbSAlexandre Ghiti }
77026e7aacbSAlexandre Ghiti early_param("no5lvl", print_no5lvl);
77126e7aacbSAlexandre Ghiti 
772e8a62cc2SAlexandre Ghiti /*
773e8a62cc2SAlexandre Ghiti  * There is a simple way to determine if 4-level is supported by the
774e8a62cc2SAlexandre Ghiti  * underlying hardware: establish 1:1 mapping in 4-level page table mode
775e8a62cc2SAlexandre Ghiti  * then read SATP to see if the configuration was taken into account
776e8a62cc2SAlexandre Ghiti  * meaning sv48 is supported.
777e8a62cc2SAlexandre Ghiti  */
set_satp_mode(uintptr_t dtb_pa)77826e7aacbSAlexandre Ghiti static __init void set_satp_mode(uintptr_t dtb_pa)
779e8a62cc2SAlexandre Ghiti {
780e8a62cc2SAlexandre Ghiti 	u64 identity_satp, hw_satp;
781011f09d1SQinglin Pan 	uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
78226e7aacbSAlexandre Ghiti 	u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa);
78326e7aacbSAlexandre Ghiti 
78426e7aacbSAlexandre Ghiti 	if (satp_mode_cmdline == SATP_MODE_57) {
78526e7aacbSAlexandre Ghiti 		disable_pgtable_l5();
78626e7aacbSAlexandre Ghiti 	} else if (satp_mode_cmdline == SATP_MODE_48) {
78726e7aacbSAlexandre Ghiti 		disable_pgtable_l5();
78826e7aacbSAlexandre Ghiti 		disable_pgtable_l4();
78926e7aacbSAlexandre Ghiti 		return;
79026e7aacbSAlexandre Ghiti 	}
791e8a62cc2SAlexandre Ghiti 
792011f09d1SQinglin Pan 	create_p4d_mapping(early_p4d,
793e8a62cc2SAlexandre Ghiti 			set_satp_mode_pmd, (uintptr_t)early_pud,
794011f09d1SQinglin Pan 			P4D_SIZE, PAGE_TABLE);
795e8a62cc2SAlexandre Ghiti 	create_pud_mapping(early_pud,
796e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd, (uintptr_t)early_pmd,
797e8a62cc2SAlexandre Ghiti 			   PUD_SIZE, PAGE_TABLE);
798e8a62cc2SAlexandre Ghiti 	/* Handle the case where set_satp_mode straddles 2 PMDs */
799e8a62cc2SAlexandre Ghiti 	create_pmd_mapping(early_pmd,
800e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd, set_satp_mode_pmd,
801e8a62cc2SAlexandre Ghiti 			   PMD_SIZE, PAGE_KERNEL_EXEC);
802e8a62cc2SAlexandre Ghiti 	create_pmd_mapping(early_pmd,
803e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd + PMD_SIZE,
804e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd + PMD_SIZE,
805e8a62cc2SAlexandre Ghiti 			   PMD_SIZE, PAGE_KERNEL_EXEC);
806011f09d1SQinglin Pan retry:
807011f09d1SQinglin Pan 	create_pgd_mapping(early_pg_dir,
808011f09d1SQinglin Pan 			   set_satp_mode_pmd,
80926e7aacbSAlexandre Ghiti 			   pgtable_l5_enabled ?
81026e7aacbSAlexandre Ghiti 				(uintptr_t)early_p4d : (uintptr_t)early_pud,
811011f09d1SQinglin Pan 			   PGDIR_SIZE, PAGE_TABLE);
812e8a62cc2SAlexandre Ghiti 
813e8a62cc2SAlexandre Ghiti 	identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
814e8a62cc2SAlexandre Ghiti 
815e8a62cc2SAlexandre Ghiti 	local_flush_tlb_all();
816e8a62cc2SAlexandre Ghiti 	csr_write(CSR_SATP, identity_satp);
817e8a62cc2SAlexandre Ghiti 	hw_satp = csr_swap(CSR_SATP, 0ULL);
818e8a62cc2SAlexandre Ghiti 	local_flush_tlb_all();
819e8a62cc2SAlexandre Ghiti 
820011f09d1SQinglin Pan 	if (hw_satp != identity_satp) {
82126e7aacbSAlexandre Ghiti 		if (pgtable_l5_enabled) {
822011f09d1SQinglin Pan 			disable_pgtable_l5();
823d5fdade9SAnup Patel 			memset(early_pg_dir, 0, PAGE_SIZE);
824011f09d1SQinglin Pan 			goto retry;
825011f09d1SQinglin Pan 		}
826e8a62cc2SAlexandre Ghiti 		disable_pgtable_l4();
827011f09d1SQinglin Pan 	}
828e8a62cc2SAlexandre Ghiti 
829e8a62cc2SAlexandre Ghiti 	memset(early_pg_dir, 0, PAGE_SIZE);
830011f09d1SQinglin Pan 	memset(early_p4d, 0, PAGE_SIZE);
831e8a62cc2SAlexandre Ghiti 	memset(early_pud, 0, PAGE_SIZE);
832e8a62cc2SAlexandre Ghiti 	memset(early_pmd, 0, PAGE_SIZE);
833e8a62cc2SAlexandre Ghiti }
834e8a62cc2SAlexandre Ghiti #endif
835e8a62cc2SAlexandre Ghiti 
836387181dcSAnup Patel /*
837387181dcSAnup Patel  * setup_vm() is called from head.S with MMU-off.
838387181dcSAnup Patel  *
839387181dcSAnup Patel  * Following requirements should be honoured for setup_vm() to work
840387181dcSAnup Patel  * correctly:
841387181dcSAnup Patel  * 1) It should use PC-relative addressing for accessing kernel symbols.
842387181dcSAnup Patel  *    To achieve this we always use GCC cmodel=medany.
843387181dcSAnup Patel  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
844387181dcSAnup Patel  *    so disable compiler instrumentation when FTRACE is enabled.
845387181dcSAnup Patel  *
846387181dcSAnup Patel  * Currently, the above requirements are honoured by using custom CFLAGS
847387181dcSAnup Patel  * for init.o in mm/Makefile.
848387181dcSAnup Patel  */
849387181dcSAnup Patel 
850387181dcSAnup Patel #ifndef __riscv_cmodel_medany
8516a527b67SPaul Walmsley #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
852387181dcSAnup Patel #endif
853387181dcSAnup Patel 
85439b33072SAlexandre Ghiti #ifdef CONFIG_RELOCATABLE
85539b33072SAlexandre Ghiti extern unsigned long __rela_dyn_start, __rela_dyn_end;
85639b33072SAlexandre Ghiti 
relocate_kernel(void)85739b33072SAlexandre Ghiti static void __init relocate_kernel(void)
85839b33072SAlexandre Ghiti {
85939b33072SAlexandre Ghiti 	Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
86039b33072SAlexandre Ghiti 	/*
86139b33072SAlexandre Ghiti 	 * This holds the offset between the linked virtual address and the
86239b33072SAlexandre Ghiti 	 * relocated virtual address.
86339b33072SAlexandre Ghiti 	 */
86439b33072SAlexandre Ghiti 	uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
86539b33072SAlexandre Ghiti 	/*
86639b33072SAlexandre Ghiti 	 * This holds the offset between kernel linked virtual address and
86739b33072SAlexandre Ghiti 	 * physical address.
86839b33072SAlexandre Ghiti 	 */
86939b33072SAlexandre Ghiti 	uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
87039b33072SAlexandre Ghiti 
87139b33072SAlexandre Ghiti 	for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
87239b33072SAlexandre Ghiti 		Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
87339b33072SAlexandre Ghiti 		Elf64_Addr relocated_addr = rela->r_addend;
87439b33072SAlexandre Ghiti 
87539b33072SAlexandre Ghiti 		if (rela->r_info != R_RISCV_RELATIVE)
87639b33072SAlexandre Ghiti 			continue;
87739b33072SAlexandre Ghiti 
87839b33072SAlexandre Ghiti 		/*
87939b33072SAlexandre Ghiti 		 * Make sure to not relocate vdso symbols like rt_sigreturn
88039b33072SAlexandre Ghiti 		 * which are linked from the address 0 in vmlinux since
88139b33072SAlexandre Ghiti 		 * vdso symbol addresses are actually used as an offset from
88239b33072SAlexandre Ghiti 		 * mm->context.vdso in VDSO_OFFSET macro.
88339b33072SAlexandre Ghiti 		 */
88439b33072SAlexandre Ghiti 		if (relocated_addr >= KERNEL_LINK_ADDR)
88539b33072SAlexandre Ghiti 			relocated_addr += reloc_offset;
88639b33072SAlexandre Ghiti 
88739b33072SAlexandre Ghiti 		*(Elf64_Addr *)addr = relocated_addr;
88839b33072SAlexandre Ghiti 	}
88939b33072SAlexandre Ghiti }
89039b33072SAlexandre Ghiti #endif /* CONFIG_RELOCATABLE */
89139b33072SAlexandre Ghiti 
89244c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
create_kernel_page_table(pgd_t * pgdir,__always_unused bool early)893526f83dfSAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir,
894e5c35fa0SAlexandre Ghiti 					    __always_unused bool early)
89544c92257SVitaly Wool {
89644c92257SVitaly Wool 	uintptr_t va, end_va;
89744c92257SVitaly Wool 
89844c92257SVitaly Wool 	/* Map the flash resident part */
899658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
900526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
90144c92257SVitaly Wool 		create_pgd_mapping(pgdir, va,
902658e2c51SAlexandre Ghiti 				   kernel_map.xiprom + (va - kernel_map.virt_addr),
903526f83dfSAlexandre Ghiti 				   PMD_SIZE, PAGE_KERNEL_EXEC);
90444c92257SVitaly Wool 
90544c92257SVitaly Wool 	/* Map the data in RAM */
906658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
907526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
90844c92257SVitaly Wool 		create_pgd_mapping(pgdir, va,
909658e2c51SAlexandre Ghiti 				   kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
910526f83dfSAlexandre Ghiti 				   PMD_SIZE, PAGE_KERNEL);
91144c92257SVitaly Wool }
91244c92257SVitaly Wool #else
create_kernel_page_table(pgd_t * pgdir,bool early)913526f83dfSAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
9142bfc6cd8SAlexandre Ghiti {
9152bfc6cd8SAlexandre Ghiti 	uintptr_t va, end_va;
9162bfc6cd8SAlexandre Ghiti 
917658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + kernel_map.size;
918526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
9192bfc6cd8SAlexandre Ghiti 		create_pgd_mapping(pgdir, va,
920658e2c51SAlexandre Ghiti 				   kernel_map.phys_addr + (va - kernel_map.virt_addr),
921526f83dfSAlexandre Ghiti 				   PMD_SIZE,
922e5c35fa0SAlexandre Ghiti 				   early ?
923e5c35fa0SAlexandre Ghiti 					PAGE_KERNEL_EXEC : pgprot_from_va(va));
9242bfc6cd8SAlexandre Ghiti }
92544c92257SVitaly Wool #endif
9262bfc6cd8SAlexandre Ghiti 
927fe45ffa4SAlexandre Ghiti /*
928fe45ffa4SAlexandre Ghiti  * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
929fe45ffa4SAlexandre Ghiti  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
930fe45ffa4SAlexandre Ghiti  * entry.
931fe45ffa4SAlexandre Ghiti  */
create_fdt_early_page_table(uintptr_t fix_fdt_va,uintptr_t dtb_pa)932e4ef93edSSong Shuai static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
933ef69d255SAlexandre Ghiti 					       uintptr_t dtb_pa)
934fe45ffa4SAlexandre Ghiti {
93533d418daSAlexandre Ghiti #ifndef CONFIG_BUILTIN_DTB
936fe45ffa4SAlexandre Ghiti 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
937fe45ffa4SAlexandre Ghiti 
938ef69d255SAlexandre Ghiti 	/* Make sure the fdt fixmap address is always aligned on PMD size */
939ef69d255SAlexandre Ghiti 	BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
940fe45ffa4SAlexandre Ghiti 
941ef69d255SAlexandre Ghiti 	/* In 32-bit only, the fdt lies in its own PGD */
942ef69d255SAlexandre Ghiti 	if (!IS_ENABLED(CONFIG_64BIT)) {
943ef69d255SAlexandre Ghiti 		create_pgd_mapping(early_pg_dir, fix_fdt_va,
944ef69d255SAlexandre Ghiti 				   pa, MAX_FDT_SIZE, PAGE_KERNEL);
945ef69d255SAlexandre Ghiti 	} else {
946ef69d255SAlexandre Ghiti 		create_pmd_mapping(fixmap_pmd, fix_fdt_va,
947fe45ffa4SAlexandre Ghiti 				   pa, PMD_SIZE, PAGE_KERNEL);
948ef69d255SAlexandre Ghiti 		create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE,
949fe45ffa4SAlexandre Ghiti 				   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
950fe45ffa4SAlexandre Ghiti 	}
951fe45ffa4SAlexandre Ghiti 
952ef69d255SAlexandre Ghiti 	dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1));
953fe45ffa4SAlexandre Ghiti #else
954fe45ffa4SAlexandre Ghiti 	/*
955fe45ffa4SAlexandre Ghiti 	 * For 64-bit kernel, __va can't be used since it would return a linear
956fe45ffa4SAlexandre Ghiti 	 * mapping address whereas dtb_early_va will be used before
957fe45ffa4SAlexandre Ghiti 	 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
958fe45ffa4SAlexandre Ghiti 	 * kernel is mapped in the linear mapping, that makes no difference.
959fe45ffa4SAlexandre Ghiti 	 */
960e792a03dSFrederik Haxel 	dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
961fe45ffa4SAlexandre Ghiti #endif
962fe45ffa4SAlexandre Ghiti 
963fe45ffa4SAlexandre Ghiti 	dtb_early_pa = dtb_pa;
964fe45ffa4SAlexandre Ghiti }
965fe45ffa4SAlexandre Ghiti 
966840125a9SAlexandre Ghiti /*
967840125a9SAlexandre Ghiti  * MMU is not enabled, the page tables are allocated directly using
968840125a9SAlexandre Ghiti  * early_pmd/pud/p4d and the address returned is the physical one.
969840125a9SAlexandre Ghiti  */
pt_ops_set_early(void)9709c375cfcSJisheng Zhang static void __init pt_ops_set_early(void)
971840125a9SAlexandre Ghiti {
972840125a9SAlexandre Ghiti 	pt_ops.alloc_pte = alloc_pte_early;
973840125a9SAlexandre Ghiti 	pt_ops.get_pte_virt = get_pte_virt_early;
974840125a9SAlexandre Ghiti #ifndef __PAGETABLE_PMD_FOLDED
975840125a9SAlexandre Ghiti 	pt_ops.alloc_pmd = alloc_pmd_early;
976840125a9SAlexandre Ghiti 	pt_ops.get_pmd_virt = get_pmd_virt_early;
977e8a62cc2SAlexandre Ghiti 	pt_ops.alloc_pud = alloc_pud_early;
978e8a62cc2SAlexandre Ghiti 	pt_ops.get_pud_virt = get_pud_virt_early;
979677b9eb8SQinglin Pan 	pt_ops.alloc_p4d = alloc_p4d_early;
980677b9eb8SQinglin Pan 	pt_ops.get_p4d_virt = get_p4d_virt_early;
981840125a9SAlexandre Ghiti #endif
982840125a9SAlexandre Ghiti }
983840125a9SAlexandre Ghiti 
984840125a9SAlexandre Ghiti /*
985840125a9SAlexandre Ghiti  * MMU is enabled but page table setup is not complete yet.
986840125a9SAlexandre Ghiti  * fixmap page table alloc functions must be used as a means to temporarily
987840125a9SAlexandre Ghiti  * map the allocated physical pages since the linear mapping does not exist yet.
988840125a9SAlexandre Ghiti  *
989840125a9SAlexandre Ghiti  * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
990840125a9SAlexandre Ghiti  * but it will be used as described above.
991840125a9SAlexandre Ghiti  */
pt_ops_set_fixmap(void)9929c375cfcSJisheng Zhang static void __init pt_ops_set_fixmap(void)
993840125a9SAlexandre Ghiti {
994583286e2SSamuel Holland 	pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap);
995583286e2SSamuel Holland 	pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap);
996840125a9SAlexandre Ghiti #ifndef __PAGETABLE_PMD_FOLDED
997583286e2SSamuel Holland 	pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap);
998583286e2SSamuel Holland 	pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap);
999583286e2SSamuel Holland 	pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap);
1000583286e2SSamuel Holland 	pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap);
1001583286e2SSamuel Holland 	pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap);
1002583286e2SSamuel Holland 	pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap);
1003840125a9SAlexandre Ghiti #endif
1004840125a9SAlexandre Ghiti }
1005840125a9SAlexandre Ghiti 
1006840125a9SAlexandre Ghiti /*
1007840125a9SAlexandre Ghiti  * MMU is enabled and page table setup is complete, so from now, we can use
1008840125a9SAlexandre Ghiti  * generic page allocation functions to setup page table.
1009840125a9SAlexandre Ghiti  */
pt_ops_set_late(void)10109c375cfcSJisheng Zhang static void __init pt_ops_set_late(void)
1011840125a9SAlexandre Ghiti {
1012840125a9SAlexandre Ghiti 	pt_ops.alloc_pte = alloc_pte_late;
1013840125a9SAlexandre Ghiti 	pt_ops.get_pte_virt = get_pte_virt_late;
1014840125a9SAlexandre Ghiti #ifndef __PAGETABLE_PMD_FOLDED
1015840125a9SAlexandre Ghiti 	pt_ops.alloc_pmd = alloc_pmd_late;
1016840125a9SAlexandre Ghiti 	pt_ops.get_pmd_virt = get_pmd_virt_late;
1017e8a62cc2SAlexandre Ghiti 	pt_ops.alloc_pud = alloc_pud_late;
1018e8a62cc2SAlexandre Ghiti 	pt_ops.get_pud_virt = get_pud_virt_late;
1019677b9eb8SQinglin Pan 	pt_ops.alloc_p4d = alloc_p4d_late;
1020677b9eb8SQinglin Pan 	pt_ops.get_p4d_virt = get_p4d_virt_late;
1021840125a9SAlexandre Ghiti #endif
1022840125a9SAlexandre Ghiti }
1023840125a9SAlexandre Ghiti 
102484fe419dSAlexandre Ghiti #ifdef CONFIG_RANDOMIZE_BASE
102584fe419dSAlexandre Ghiti extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
102684fe419dSAlexandre Ghiti extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
102784fe419dSAlexandre Ghiti 
print_nokaslr(char * p)102884fe419dSAlexandre Ghiti static int __init print_nokaslr(char *p)
102984fe419dSAlexandre Ghiti {
103084fe419dSAlexandre Ghiti 	pr_info("Disabled KASLR");
103184fe419dSAlexandre Ghiti 	return 0;
103284fe419dSAlexandre Ghiti }
103384fe419dSAlexandre Ghiti early_param("nokaslr", print_nokaslr);
103484fe419dSAlexandre Ghiti 
kaslr_offset(void)103584fe419dSAlexandre Ghiti unsigned long kaslr_offset(void)
103684fe419dSAlexandre Ghiti {
103784fe419dSAlexandre Ghiti 	return kernel_map.virt_offset;
103884fe419dSAlexandre Ghiti }
103984fe419dSAlexandre Ghiti #endif
104084fe419dSAlexandre Ghiti 
setup_vm(uintptr_t dtb_pa)1041671f9a3eSAnup Patel asmlinkage void __init setup_vm(uintptr_t dtb_pa)
10426f1e9e94SAnup Patel {
10436f3e5fd2SAlexandre Ghiti 	pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
10446f1e9e94SAnup Patel 
104584fe419dSAlexandre Ghiti #ifdef CONFIG_RANDOMIZE_BASE
104684fe419dSAlexandre Ghiti 	if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
104784fe419dSAlexandre Ghiti 		u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
104884fe419dSAlexandre Ghiti 		u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
104984fe419dSAlexandre Ghiti 		u32 nr_pos;
105084fe419dSAlexandre Ghiti 
105184fe419dSAlexandre Ghiti 		/*
105284fe419dSAlexandre Ghiti 		 * Compute the number of positions available: we are limited
105384fe419dSAlexandre Ghiti 		 * by the early page table that only has one PUD and we must
105484fe419dSAlexandre Ghiti 		 * be aligned on PMD_SIZE.
105584fe419dSAlexandre Ghiti 		 */
105684fe419dSAlexandre Ghiti 		nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
105784fe419dSAlexandre Ghiti 
105884fe419dSAlexandre Ghiti 		kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
105984fe419dSAlexandre Ghiti 	}
106084fe419dSAlexandre Ghiti #endif
106184fe419dSAlexandre Ghiti 
106284fe419dSAlexandre Ghiti 	kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
1063658e2c51SAlexandre Ghiti 
106444c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
1065b73ffafaSAlexandre Ghiti #ifdef CONFIG_64BIT
1066e792a03dSFrederik Haxel 	kernel_map.page_offset = PAGE_OFFSET_L3;
1067b73ffafaSAlexandre Ghiti #else
1068b73ffafaSAlexandre Ghiti 	kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
1069b73ffafaSAlexandre Ghiti #endif
1070658e2c51SAlexandre Ghiti 	kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
1071658e2c51SAlexandre Ghiti 	kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
107244c92257SVitaly Wool 
10736d7f91d9SAlexandre Ghiti 	phys_ram_base = CONFIG_PHYS_RAM_BASE;
1074658e2c51SAlexandre Ghiti 	kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
1075658e2c51SAlexandre Ghiti 	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
107644c92257SVitaly Wool 
1077658e2c51SAlexandre Ghiti 	kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
107844c92257SVitaly Wool #else
1079e792a03dSFrederik Haxel 	kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
1080658e2c51SAlexandre Ghiti 	kernel_map.phys_addr = (uintptr_t)(&_start);
1081658e2c51SAlexandre Ghiti 	kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
108244c92257SVitaly Wool #endif
1083e8a62cc2SAlexandre Ghiti 
1084e8a62cc2SAlexandre Ghiti #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
108526e7aacbSAlexandre Ghiti 	set_satp_mode(dtb_pa);
1086e8a62cc2SAlexandre Ghiti #endif
1087e8a62cc2SAlexandre Ghiti 
10883335068fSAlexandre Ghiti 	/*
10893335068fSAlexandre Ghiti 	 * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem,
10903335068fSAlexandre Ghiti 	 * where we have the system memory layout: this allows us to align
10913335068fSAlexandre Ghiti 	 * the physical and virtual mappings and then make use of PUD/P4D/PGD
10923335068fSAlexandre Ghiti 	 * for the linear mapping. This is only possible because the kernel
10933335068fSAlexandre Ghiti 	 * mapping lies outside the linear mapping.
10943335068fSAlexandre Ghiti 	 * In 32-bit however, as the kernel resides in the linear mapping,
10953335068fSAlexandre Ghiti 	 * setup_vm_final can not change the mapping established here,
10963335068fSAlexandre Ghiti 	 * otherwise the same kernel addresses would get mapped to different
10973335068fSAlexandre Ghiti 	 * physical addresses (if the start of dram is different from the
10983335068fSAlexandre Ghiti 	 * kernel physical address start).
10993335068fSAlexandre Ghiti 	 */
11003335068fSAlexandre Ghiti 	kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
11013335068fSAlexandre Ghiti 				0UL : PAGE_OFFSET - kernel_map.phys_addr;
1102658e2c51SAlexandre Ghiti 	kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
11032bfc6cd8SAlexandre Ghiti 
1104f7ae0233SAlexandre Ghiti 	/*
1105f7ae0233SAlexandre Ghiti 	 * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
1106f7ae0233SAlexandre Ghiti 	 * kernel, whereas for 64-bit kernel, the end of the virtual address
1107f7ae0233SAlexandre Ghiti 	 * space is occupied by the modules/BPF/kernel mappings which reduces
1108f7ae0233SAlexandre Ghiti 	 * the available size of the linear mapping.
1109f7ae0233SAlexandre Ghiti 	 */
1110f7ae0233SAlexandre Ghiti 	memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
1111f7ae0233SAlexandre Ghiti 
11126f1e9e94SAnup Patel 	/* Sanity check alignment and size */
11136f1e9e94SAnup Patel 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
1114526f83dfSAlexandre Ghiti 	BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
1115671f9a3eSAnup Patel 
1116db6b84a3SAlexandre Ghiti #ifdef CONFIG_64BIT
1117db6b84a3SAlexandre Ghiti 	/*
1118db6b84a3SAlexandre Ghiti 	 * The last 4K bytes of the addressable memory can not be mapped because
1119db6b84a3SAlexandre Ghiti 	 * of IS_ERR_VALUE macro.
1120db6b84a3SAlexandre Ghiti 	 */
1121db6b84a3SAlexandre Ghiti 	BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
1122e8dcb61fSAtish Patra #endif
1123671f9a3eSAnup Patel 
112439b33072SAlexandre Ghiti #ifdef CONFIG_RELOCATABLE
112539b33072SAlexandre Ghiti 	/*
112639b33072SAlexandre Ghiti 	 * Early page table uses only one PUD, which makes it possible
112739b33072SAlexandre Ghiti 	 * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset
112839b33072SAlexandre Ghiti 	 * makes the kernel cross over a PUD_SIZE boundary, raise a bug
112939b33072SAlexandre Ghiti 	 * since a part of the kernel would not get mapped.
113039b33072SAlexandre Ghiti 	 */
113139b33072SAlexandre Ghiti 	BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
113239b33072SAlexandre Ghiti 	relocate_kernel();
113339b33072SAlexandre Ghiti #endif
113439b33072SAlexandre Ghiti 
1135a35707c3SHeiko Stuebner 	apply_early_boot_alternatives();
1136840125a9SAlexandre Ghiti 	pt_ops_set_early();
1137840125a9SAlexandre Ghiti 
11386f1e9e94SAnup Patel 	/* Setup early PGD for fixmap */
11396f1e9e94SAnup Patel 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
1140e8a62cc2SAlexandre Ghiti 			   fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
11416f1e9e94SAnup Patel 
11426f1e9e94SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
1143677b9eb8SQinglin Pan 	/* Setup fixmap P4D and PUD */
1144677b9eb8SQinglin Pan 	if (pgtable_l5_enabled)
1145677b9eb8SQinglin Pan 		create_p4d_mapping(fixmap_p4d, FIXADDR_START,
1146677b9eb8SQinglin Pan 				   (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE);
1147e8a62cc2SAlexandre Ghiti 	/* Setup fixmap PUD and PMD */
1148e8a62cc2SAlexandre Ghiti 	if (pgtable_l4_enabled)
1149e8a62cc2SAlexandre Ghiti 		create_pud_mapping(fixmap_pud, FIXADDR_START,
1150e8a62cc2SAlexandre Ghiti 				   (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE);
1151671f9a3eSAnup Patel 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
1152671f9a3eSAnup Patel 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
1153671f9a3eSAnup Patel 	/* Setup trampoline PGD and PMD */
1154658e2c51SAlexandre Ghiti 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
1155e8a62cc2SAlexandre Ghiti 			   trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);
1156677b9eb8SQinglin Pan 	if (pgtable_l5_enabled)
1157677b9eb8SQinglin Pan 		create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr,
1158677b9eb8SQinglin Pan 				   (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE);
1159e8a62cc2SAlexandre Ghiti 	if (pgtable_l4_enabled)
1160e8a62cc2SAlexandre Ghiti 		create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
1161e8a62cc2SAlexandre Ghiti 				   (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
116244c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
1163658e2c51SAlexandre Ghiti 	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
1164658e2c51SAlexandre Ghiti 			   kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
116544c92257SVitaly Wool #else
1166658e2c51SAlexandre Ghiti 	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
1167658e2c51SAlexandre Ghiti 			   kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
116844c92257SVitaly Wool #endif
11696f1e9e94SAnup Patel #else
1170671f9a3eSAnup Patel 	/* Setup trampoline PGD */
1171658e2c51SAlexandre Ghiti 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
1172658e2c51SAlexandre Ghiti 			   kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
1173671f9a3eSAnup Patel #endif
11746f1e9e94SAnup Patel 
1175671f9a3eSAnup Patel 	/*
11762bfc6cd8SAlexandre Ghiti 	 * Setup early PGD covering entire kernel which will allow
1177671f9a3eSAnup Patel 	 * us to reach paging_init(). We map all memory banks later
1178671f9a3eSAnup Patel 	 * in setup_vm_final() below.
1179671f9a3eSAnup Patel 	 */
1180526f83dfSAlexandre Ghiti 	create_kernel_page_table(early_pg_dir, true);
1181f2c17aabSAnup Patel 
1182fe45ffa4SAlexandre Ghiti 	/* Setup early mapping for FDT early scan */
1183e4ef93edSSong Shuai 	create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
11846262f661SAtish Patra 
11856262f661SAtish Patra 	/*
11866262f661SAtish Patra 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
11876262f661SAtish Patra 	 * range can not span multiple pmds.
11886262f661SAtish Patra 	 */
1189e8a62cc2SAlexandre Ghiti 	BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
11906262f661SAtish Patra 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
11916262f661SAtish Patra 
11926262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
11936262f661SAtish Patra 	/*
11946262f661SAtish Patra 	 * Early ioremap fixmap is already created as it lies within first 2MB
11956262f661SAtish Patra 	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
11966262f661SAtish Patra 	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
11976262f661SAtish Patra 	 * the user if not.
11986262f661SAtish Patra 	 */
11996262f661SAtish Patra 	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
12006262f661SAtish Patra 	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
12016262f661SAtish Patra 	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
12026262f661SAtish Patra 		WARN_ON(1);
12036262f661SAtish Patra 		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
12046262f661SAtish Patra 			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
12056262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
12066262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_BEGIN));
12076262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
12086262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_END));
12096262f661SAtish Patra 
12106262f661SAtish Patra 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
12116262f661SAtish Patra 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
12126262f661SAtish Patra 	}
12136262f661SAtish Patra #endif
1214840125a9SAlexandre Ghiti 
1215840125a9SAlexandre Ghiti 	pt_ops_set_fixmap();
12166f1e9e94SAnup Patel }
1217f2c17aabSAnup Patel 
create_linear_mapping_range(phys_addr_t start,phys_addr_t end,uintptr_t fixed_map_size)12188589e346SAlexandre Ghiti static void __init create_linear_mapping_range(phys_addr_t start,
121925abe0dbSAlexandre Ghiti 					       phys_addr_t end,
122025abe0dbSAlexandre Ghiti 					       uintptr_t fixed_map_size)
1221671f9a3eSAnup Patel {
12228589e346SAlexandre Ghiti 	phys_addr_t pa;
1223671f9a3eSAnup Patel 	uintptr_t va, map_size;
1224671f9a3eSAnup Patel 
12258589e346SAlexandre Ghiti 	for (pa = start; pa < end; pa += map_size) {
12268589e346SAlexandre Ghiti 		va = (uintptr_t)__va(pa);
122725abe0dbSAlexandre Ghiti 		map_size = fixed_map_size ? fixed_map_size :
122849a0a373SAlexandre Ghiti 					    best_map_size(pa, va, end - pa);
12298589e346SAlexandre Ghiti 
12308589e346SAlexandre Ghiti 		create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
12318589e346SAlexandre Ghiti 				   pgprot_from_va(va));
12328589e346SAlexandre Ghiti 	}
12338589e346SAlexandre Ghiti }
12348589e346SAlexandre Ghiti 
create_linear_mapping_page_table(void)12358589e346SAlexandre Ghiti static void __init create_linear_mapping_page_table(void)
12368589e346SAlexandre Ghiti {
12378589e346SAlexandre Ghiti 	phys_addr_t start, end;
123825abe0dbSAlexandre Ghiti 	phys_addr_t kfence_pool __maybe_unused;
1239671f9a3eSAnup Patel 	u64 i;
1240671f9a3eSAnup Patel 
12413335068fSAlexandre Ghiti #ifdef CONFIG_STRICT_KERNEL_RWX
12423335068fSAlexandre Ghiti 	phys_addr_t ktext_start = __pa_symbol(_start);
12433335068fSAlexandre Ghiti 	phys_addr_t ktext_size = __init_data_begin - _start;
12443335068fSAlexandre Ghiti 	phys_addr_t krodata_start = __pa_symbol(__start_rodata);
12453335068fSAlexandre Ghiti 	phys_addr_t krodata_size = _data - __start_rodata;
12463335068fSAlexandre Ghiti 
12473335068fSAlexandre Ghiti 	/* Isolate kernel text and rodata so they don't get mapped with a PUD */
12483335068fSAlexandre Ghiti 	memblock_mark_nomap(ktext_start,  ktext_size);
12493335068fSAlexandre Ghiti 	memblock_mark_nomap(krodata_start, krodata_size);
12503335068fSAlexandre Ghiti #endif
12513335068fSAlexandre Ghiti 
125225abe0dbSAlexandre Ghiti #ifdef CONFIG_KFENCE
125325abe0dbSAlexandre Ghiti 	/*
125425abe0dbSAlexandre Ghiti 	 *  kfence pool must be backed by PAGE_SIZE mappings, so allocate it
125525abe0dbSAlexandre Ghiti 	 *  before we setup the linear mapping so that we avoid using hugepages
125625abe0dbSAlexandre Ghiti 	 *  for this region.
125725abe0dbSAlexandre Ghiti 	 */
125825abe0dbSAlexandre Ghiti 	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
125925abe0dbSAlexandre Ghiti 	BUG_ON(!kfence_pool);
126025abe0dbSAlexandre Ghiti 
126125abe0dbSAlexandre Ghiti 	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
126225abe0dbSAlexandre Ghiti 	__kfence_pool = __va(kfence_pool);
126325abe0dbSAlexandre Ghiti #endif
126425abe0dbSAlexandre Ghiti 
12652bfc6cd8SAlexandre Ghiti 	/* Map all memory banks in the linear mapping */
1266b10d6bcaSMike Rapoport 	for_each_mem_range(i, &start, &end) {
1267671f9a3eSAnup Patel 		if (start >= end)
1268671f9a3eSAnup Patel 			break;
1269671f9a3eSAnup Patel 		if (start <= __pa(PAGE_OFFSET) &&
1270671f9a3eSAnup Patel 		    __pa(PAGE_OFFSET) < end)
1271671f9a3eSAnup Patel 			start = __pa(PAGE_OFFSET);
1272c99127c4SAlexandre Ghiti 		if (end >= __pa(PAGE_OFFSET) + memory_limit)
1273c99127c4SAlexandre Ghiti 			end = __pa(PAGE_OFFSET) + memory_limit;
1274671f9a3eSAnup Patel 
127525abe0dbSAlexandre Ghiti 		create_linear_mapping_range(start, end, 0);
12768589e346SAlexandre Ghiti 	}
12773335068fSAlexandre Ghiti 
12783335068fSAlexandre Ghiti #ifdef CONFIG_STRICT_KERNEL_RWX
127925abe0dbSAlexandre Ghiti 	create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
12803335068fSAlexandre Ghiti 	create_linear_mapping_range(krodata_start,
128125abe0dbSAlexandre Ghiti 				    krodata_start + krodata_size, 0);
12823335068fSAlexandre Ghiti 
12833335068fSAlexandre Ghiti 	memblock_clear_nomap(ktext_start,  ktext_size);
12843335068fSAlexandre Ghiti 	memblock_clear_nomap(krodata_start, krodata_size);
12853335068fSAlexandre Ghiti #endif
128625abe0dbSAlexandre Ghiti 
128725abe0dbSAlexandre Ghiti #ifdef CONFIG_KFENCE
128825abe0dbSAlexandre Ghiti 	create_linear_mapping_range(kfence_pool,
128925abe0dbSAlexandre Ghiti 				    kfence_pool + KFENCE_POOL_SIZE,
129025abe0dbSAlexandre Ghiti 				    PAGE_SIZE);
129125abe0dbSAlexandre Ghiti 
129225abe0dbSAlexandre Ghiti 	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
129325abe0dbSAlexandre Ghiti #endif
12948589e346SAlexandre Ghiti }
12952bfc6cd8SAlexandre Ghiti 
setup_vm_final(void)12968589e346SAlexandre Ghiti static void __init setup_vm_final(void)
12978589e346SAlexandre Ghiti {
1298671f9a3eSAnup Patel 	/* Setup swapper PGD for fixmap */
1299ef69d255SAlexandre Ghiti #if !defined(CONFIG_64BIT)
1300ef69d255SAlexandre Ghiti 	/*
1301ef69d255SAlexandre Ghiti 	 * In 32-bit, the device tree lies in a pgd entry, so it must be copied
1302ef69d255SAlexandre Ghiti 	 * directly in swapper_pg_dir in addition to the pgd entry that points
1303ef69d255SAlexandre Ghiti 	 * to fixmap_pte.
1304ef69d255SAlexandre Ghiti 	 */
1305ef69d255SAlexandre Ghiti 	unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT));
1306ef69d255SAlexandre Ghiti 
1307ef69d255SAlexandre Ghiti 	set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]);
1308ef69d255SAlexandre Ghiti #endif
1309671f9a3eSAnup Patel 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
1310671f9a3eSAnup Patel 			   __pa_symbol(fixmap_pgd_next),
1311671f9a3eSAnup Patel 			   PGDIR_SIZE, PAGE_TABLE);
1312671f9a3eSAnup Patel 
13138589e346SAlexandre Ghiti 	/* Map the linear mapping */
13148589e346SAlexandre Ghiti 	create_linear_mapping_page_table();
1315671f9a3eSAnup Patel 
13162bfc6cd8SAlexandre Ghiti 	/* Map the kernel */
131707aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT))
1318526f83dfSAlexandre Ghiti 		create_kernel_page_table(swapper_pg_dir, false);
13192bfc6cd8SAlexandre Ghiti 
13202efad17eSAlexandre Ghiti #ifdef CONFIG_KASAN
13212efad17eSAlexandre Ghiti 	kasan_swapper_init();
13222efad17eSAlexandre Ghiti #endif
13232efad17eSAlexandre Ghiti 
1324671f9a3eSAnup Patel 	/* Clear fixmap PTE and PMD mappings */
1325671f9a3eSAnup Patel 	clear_fixmap(FIX_PTE);
1326671f9a3eSAnup Patel 	clear_fixmap(FIX_PMD);
1327e8a62cc2SAlexandre Ghiti 	clear_fixmap(FIX_PUD);
1328677b9eb8SQinglin Pan 	clear_fixmap(FIX_P4D);
1329671f9a3eSAnup Patel 
1330671f9a3eSAnup Patel 	/* Move to swapper page table */
1331e8a62cc2SAlexandre Ghiti 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
1332671f9a3eSAnup Patel 	local_flush_tlb_all();
1333e8dcb61fSAtish Patra 
1334840125a9SAlexandre Ghiti 	pt_ops_set_late();
1335671f9a3eSAnup Patel }
13366bd33e1eSChristoph Hellwig #else
setup_vm(uintptr_t dtb_pa)13376bd33e1eSChristoph Hellwig asmlinkage void __init setup_vm(uintptr_t dtb_pa)
13386bd33e1eSChristoph Hellwig {
13396bd33e1eSChristoph Hellwig 	dtb_early_va = (void *)dtb_pa;
1340a78c6f59SAtish Patra 	dtb_early_pa = dtb_pa;
13416bd33e1eSChristoph Hellwig }
13426bd33e1eSChristoph Hellwig 
setup_vm_final(void)13436bd33e1eSChristoph Hellwig static inline void setup_vm_final(void)
13446bd33e1eSChristoph Hellwig {
13456bd33e1eSChristoph Hellwig }
13466bd33e1eSChristoph Hellwig #endif /* CONFIG_MMU */
1347671f9a3eSAnup Patel 
13485882e5acSChen Jiahao /* Reserve 128M low memory by default for swiotlb buffer */
13495882e5acSChen Jiahao #define DEFAULT_CRASH_KERNEL_LOW_SIZE	(128UL << 20)
13505882e5acSChen Jiahao 
reserve_crashkernel_low(unsigned long long low_size)13515882e5acSChen Jiahao static int __init reserve_crashkernel_low(unsigned long long low_size)
13525882e5acSChen Jiahao {
13535882e5acSChen Jiahao 	unsigned long long low_base;
13545882e5acSChen Jiahao 
13555882e5acSChen Jiahao 	low_base = memblock_phys_alloc_range(low_size, PMD_SIZE, 0, dma32_phys_limit);
13565882e5acSChen Jiahao 	if (!low_base) {
13575882e5acSChen Jiahao 		pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
13585882e5acSChen Jiahao 		return -ENOMEM;
13595882e5acSChen Jiahao 	}
13605882e5acSChen Jiahao 
13615882e5acSChen Jiahao 	pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
13625882e5acSChen Jiahao 		low_base, low_base + low_size, low_size >> 20);
13635882e5acSChen Jiahao 
13645882e5acSChen Jiahao 	crashk_low_res.start = low_base;
13655882e5acSChen Jiahao 	crashk_low_res.end = low_base + low_size - 1;
13665882e5acSChen Jiahao 
13675882e5acSChen Jiahao 	return 0;
13685882e5acSChen Jiahao }
13695882e5acSChen Jiahao 
1370e53d2818SNick Kossifidis /*
1371e53d2818SNick Kossifidis  * reserve_crashkernel() - reserves memory for crash kernel
1372e53d2818SNick Kossifidis  *
1373e53d2818SNick Kossifidis  * This function reserves memory area given in "crashkernel=" kernel command
1374e53d2818SNick Kossifidis  * line parameter. The memory reserved is used by dump capture kernel when
1375e53d2818SNick Kossifidis  * primary kernel is crashing.
1376e53d2818SNick Kossifidis  */
reserve_crashkernel(void)1377e53d2818SNick Kossifidis static void __init reserve_crashkernel(void)
1378e53d2818SNick Kossifidis {
1379e53d2818SNick Kossifidis 	unsigned long long crash_base = 0;
1380e53d2818SNick Kossifidis 	unsigned long long crash_size = 0;
13815882e5acSChen Jiahao 	unsigned long long crash_low_size = 0;
1382e53d2818SNick Kossifidis 	unsigned long search_start = memblock_start_of_DRAM();
13835882e5acSChen Jiahao 	unsigned long search_end = (unsigned long)dma32_phys_limit;
13845882e5acSChen Jiahao 	char *cmdline = boot_command_line;
13855882e5acSChen Jiahao 	bool fixed_base = false;
13865882e5acSChen Jiahao 	bool high = false;
1387e53d2818SNick Kossifidis 
1388e53d2818SNick Kossifidis 	int ret = 0;
1389e53d2818SNick Kossifidis 
1390d414cb37SJisheng Zhang 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
1391d414cb37SJisheng Zhang 		return;
139256409750SNick Kossifidis 	/*
139356409750SNick Kossifidis 	 * Don't reserve a region for a crash kernel on a crash kernel
139456409750SNick Kossifidis 	 * since it doesn't make much sense and we have limited memory
139556409750SNick Kossifidis 	 * resources.
139656409750SNick Kossifidis 	 */
139756409750SNick Kossifidis 	if (is_kdump_kernel()) {
139856409750SNick Kossifidis 		pr_info("crashkernel: ignoring reservation request\n");
139956409750SNick Kossifidis 		return;
140056409750SNick Kossifidis 	}
140156409750SNick Kossifidis 
14025882e5acSChen Jiahao 	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
1403e53d2818SNick Kossifidis 				&crash_size, &crash_base);
14045882e5acSChen Jiahao 	if (ret == -ENOENT) {
14055882e5acSChen Jiahao 		/* Fallback to crashkernel=X,[high,low] */
14065882e5acSChen Jiahao 		ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
1407e53d2818SNick Kossifidis 		if (ret || !crash_size)
1408e53d2818SNick Kossifidis 			return;
1409e53d2818SNick Kossifidis 
14105882e5acSChen Jiahao 		/*
14115882e5acSChen Jiahao 		 * crashkernel=Y,low is valid only when crashkernel=X,high
14125882e5acSChen Jiahao 		 * is passed.
14135882e5acSChen Jiahao 		 */
14145882e5acSChen Jiahao 		ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
14155882e5acSChen Jiahao 		if (ret == -ENOENT)
14165882e5acSChen Jiahao 			crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
14175882e5acSChen Jiahao 		else if (ret)
14185882e5acSChen Jiahao 			return;
14195882e5acSChen Jiahao 
14205882e5acSChen Jiahao 		search_start = (unsigned long)dma32_phys_limit;
14215882e5acSChen Jiahao 		search_end = memblock_end_of_DRAM();
14225882e5acSChen Jiahao 		high = true;
14235882e5acSChen Jiahao 	} else if (ret || !crash_size) {
14245882e5acSChen Jiahao 		/* Invalid argument value specified */
14255882e5acSChen Jiahao 		return;
14265882e5acSChen Jiahao 	}
14275882e5acSChen Jiahao 
1428e53d2818SNick Kossifidis 	crash_size = PAGE_ALIGN(crash_size);
1429e53d2818SNick Kossifidis 
1430a7259df7SMike Rapoport 	if (crash_base) {
14315882e5acSChen Jiahao 		fixed_base = true;
1432a7259df7SMike Rapoport 		search_start = crash_base;
1433a7259df7SMike Rapoport 		search_end = crash_base + crash_size;
1434a7259df7SMike Rapoport 	}
1435a7259df7SMike Rapoport 
1436e53d2818SNick Kossifidis 	/*
1437e53d2818SNick Kossifidis 	 * Current riscv boot protocol requires 2MB alignment for
1438e53d2818SNick Kossifidis 	 * RV64 and 4MB alignment for RV32 (hugepage size)
1439decf89f8SNick Kossifidis 	 *
1440decf89f8SNick Kossifidis 	 * Try to alloc from 32bit addressible physical memory so that
1441decf89f8SNick Kossifidis 	 * swiotlb can work on the crash kernel.
1442e53d2818SNick Kossifidis 	 */
1443a7259df7SMike Rapoport 	crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
14445882e5acSChen Jiahao 					       search_start, search_end);
1445decf89f8SNick Kossifidis 	if (crash_base == 0) {
14465882e5acSChen Jiahao 		/*
14475882e5acSChen Jiahao 		 * For crashkernel=size[KMG]@offset[KMG], print out failure
14485882e5acSChen Jiahao 		 * message if can't reserve the specified region.
14495882e5acSChen Jiahao 		 */
14505882e5acSChen Jiahao 		if (fixed_base) {
14515882e5acSChen Jiahao 			pr_warn("crashkernel: allocating failed with given size@offset\n");
14525882e5acSChen Jiahao 			return;
14535882e5acSChen Jiahao 		}
14545882e5acSChen Jiahao 
14555882e5acSChen Jiahao 		if (high) {
14565882e5acSChen Jiahao 			/*
14575882e5acSChen Jiahao 			 * For crashkernel=size[KMG],high, if the first attempt was
14585882e5acSChen Jiahao 			 * for high memory, fall back to low memory.
14595882e5acSChen Jiahao 			 */
14605882e5acSChen Jiahao 			search_start = memblock_start_of_DRAM();
14615882e5acSChen Jiahao 			search_end = (unsigned long)dma32_phys_limit;
14625882e5acSChen Jiahao 		} else {
14635882e5acSChen Jiahao 			/*
14645882e5acSChen Jiahao 			 * For crashkernel=size[KMG], if the first attempt was for
14655882e5acSChen Jiahao 			 * low memory, fall back to high memory, the minimum required
14665882e5acSChen Jiahao 			 * low memory will be reserved later.
14675882e5acSChen Jiahao 			 */
14685882e5acSChen Jiahao 			search_start = (unsigned long)dma32_phys_limit;
14695882e5acSChen Jiahao 			search_end = memblock_end_of_DRAM();
14705882e5acSChen Jiahao 			crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
14715882e5acSChen Jiahao 		}
14725882e5acSChen Jiahao 
1473decf89f8SNick Kossifidis 		crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1474a7259df7SMike Rapoport 						       search_start, search_end);
1475e53d2818SNick Kossifidis 		if (crash_base == 0) {
1476e53d2818SNick Kossifidis 			pr_warn("crashkernel: couldn't allocate %lldKB\n",
1477e53d2818SNick Kossifidis 				crash_size >> 10);
1478e53d2818SNick Kossifidis 			return;
1479e53d2818SNick Kossifidis 		}
1480decf89f8SNick Kossifidis 	}
1481e53d2818SNick Kossifidis 
14825882e5acSChen Jiahao 	if ((crash_base >= dma32_phys_limit) && crash_low_size &&
14835882e5acSChen Jiahao 	     reserve_crashkernel_low(crash_low_size)) {
14845882e5acSChen Jiahao 		memblock_phys_free(crash_base, crash_size);
14855882e5acSChen Jiahao 		return;
14865882e5acSChen Jiahao 	}
14875882e5acSChen Jiahao 
1488e53d2818SNick Kossifidis 	pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
1489e53d2818SNick Kossifidis 		crash_base, crash_base + crash_size, crash_size >> 20);
1490e53d2818SNick Kossifidis 
1491e53d2818SNick Kossifidis 	crashk_res.start = crash_base;
1492e53d2818SNick Kossifidis 	crashk_res.end = crash_base + crash_size - 1;
1493e53d2818SNick Kossifidis }
1494e53d2818SNick Kossifidis 
paging_init(void)1495671f9a3eSAnup Patel void __init paging_init(void)
1496671f9a3eSAnup Patel {
1497f842f5ffSKefeng Wang 	setup_bootmem();
1498671f9a3eSAnup Patel 	setup_vm_final();
149985fadc0dSWoody Zhang 
150085fadc0dSWoody Zhang 	/* Depend on that Linear Mapping is ready */
150185fadc0dSWoody Zhang 	memblock_allow_resize();
1502cbd34f4bSAtish Patra }
1503cbd34f4bSAtish Patra 
misc_mem_init(void)1504cbd34f4bSAtish Patra void __init misc_mem_init(void)
1505cbd34f4bSAtish Patra {
1506f6e5aedfSKefeng Wang 	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
15074f0e8eefSAtish Patra 	arch_numa_init();
1508cbd34f4bSAtish Patra 	sparse_init();
1509d3770844SVincent Chen #ifdef CONFIG_SPARSEMEM_VMEMMAP
1510d3770844SVincent Chen 	/* The entire VMEMMAP region has been populated. Flush TLB for this region */
1511d3770844SVincent Chen 	local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
1512d3770844SVincent Chen #endif
1513671f9a3eSAnup Patel 	zone_sizes_init();
1514e53d2818SNick Kossifidis 	reserve_crashkernel();
15154f0e8eefSAtish Patra 	memblock_dump_all();
15166f1e9e94SAnup Patel }
1517d95f1a54SLogan Gunthorpe 
15189fe57d8cSKefeng Wang #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1519d95f1a54SLogan Gunthorpe int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1520d95f1a54SLogan Gunthorpe 			       struct vmem_altmap *altmap)
1521d95f1a54SLogan Gunthorpe {
15221d9cfee7SAnshuman Khandual 	return vmemmap_populate_basepages(start, end, node, NULL);
1523d95f1a54SLogan Gunthorpe }
1524d95f1a54SLogan Gunthorpe #endif
15257d3332beSBjörn Töpel 
15267d3332beSBjörn Töpel #if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
15277d3332beSBjörn Töpel /*
15287d3332beSBjörn Töpel  * Pre-allocates page-table pages for a specific area in the kernel
15297d3332beSBjörn Töpel  * page-table. Only the level which needs to be synchronized between
15307d3332beSBjörn Töpel  * all page-tables is allocated because the synchronization can be
15317d3332beSBjörn Töpel  * expensive.
15327d3332beSBjörn Töpel  */
preallocate_pgd_pages_range(unsigned long start,unsigned long end,const char * area)15337d3332beSBjörn Töpel static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end,
15347d3332beSBjörn Töpel 					       const char *area)
15357d3332beSBjörn Töpel {
15367d3332beSBjörn Töpel 	unsigned long addr;
15377d3332beSBjörn Töpel 	const char *lvl;
15387d3332beSBjörn Töpel 
15397d3332beSBjörn Töpel 	for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
15407d3332beSBjörn Töpel 		pgd_t *pgd = pgd_offset_k(addr);
15417d3332beSBjörn Töpel 		p4d_t *p4d;
15427d3332beSBjörn Töpel 		pud_t *pud;
15437d3332beSBjörn Töpel 		pmd_t *pmd;
15447d3332beSBjörn Töpel 
15457d3332beSBjörn Töpel 		lvl = "p4d";
15467d3332beSBjörn Töpel 		p4d = p4d_alloc(&init_mm, pgd, addr);
15477d3332beSBjörn Töpel 		if (!p4d)
15487d3332beSBjörn Töpel 			goto failed;
15497d3332beSBjörn Töpel 
15507d3332beSBjörn Töpel 		if (pgtable_l5_enabled)
15517d3332beSBjörn Töpel 			continue;
15527d3332beSBjörn Töpel 
15537d3332beSBjörn Töpel 		lvl = "pud";
15547d3332beSBjörn Töpel 		pud = pud_alloc(&init_mm, p4d, addr);
15557d3332beSBjörn Töpel 		if (!pud)
15567d3332beSBjörn Töpel 			goto failed;
15577d3332beSBjörn Töpel 
15587d3332beSBjörn Töpel 		if (pgtable_l4_enabled)
15597d3332beSBjörn Töpel 			continue;
15607d3332beSBjörn Töpel 
15617d3332beSBjörn Töpel 		lvl = "pmd";
15627d3332beSBjörn Töpel 		pmd = pmd_alloc(&init_mm, pud, addr);
15637d3332beSBjörn Töpel 		if (!pmd)
15647d3332beSBjörn Töpel 			goto failed;
15657d3332beSBjörn Töpel 	}
15667d3332beSBjörn Töpel 	return;
15677d3332beSBjörn Töpel 
15687d3332beSBjörn Töpel failed:
15697d3332beSBjörn Töpel 	/*
15707d3332beSBjörn Töpel 	 * The pages have to be there now or they will be missing in
15717d3332beSBjörn Töpel 	 * process page-tables later.
15727d3332beSBjörn Töpel 	 */
15737d3332beSBjörn Töpel 	panic("Failed to pre-allocate %s pages for %s area\n", lvl, area);
15747d3332beSBjörn Töpel }
15757d3332beSBjörn Töpel 
pgtable_cache_init(void)15767d3332beSBjörn Töpel void __init pgtable_cache_init(void)
15777d3332beSBjörn Töpel {
15787d3332beSBjörn Töpel 	preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc");
15797d3332beSBjörn Töpel 	if (IS_ENABLED(CONFIG_MODULES))
15807d3332beSBjörn Töpel 		preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
15817d3332beSBjörn Töpel }
15827d3332beSBjörn Töpel #endif
1583