xref: /openbmc/linux/arch/riscv/mm/init.c (revision d5fdade9)
150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
276d2a049SPalmer Dabbelt /*
376d2a049SPalmer Dabbelt  * Copyright (C) 2012 Regents of the University of California
4671f9a3eSAnup Patel  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5e53d2818SNick Kossifidis  * Copyright (C) 2020 FORTH-ICS/CARV
6e53d2818SNick Kossifidis  *  Nick Kossifidis <mick@ics.forth.gr>
776d2a049SPalmer Dabbelt  */
876d2a049SPalmer Dabbelt 
976d2a049SPalmer Dabbelt #include <linux/init.h>
1076d2a049SPalmer Dabbelt #include <linux/mm.h>
1176d2a049SPalmer Dabbelt #include <linux/memblock.h>
1257c8a661SMike Rapoport #include <linux/initrd.h>
1376d2a049SPalmer Dabbelt #include <linux/swap.h>
14ce3aca04SKefeng Wang #include <linux/swiotlb.h>
155ec9c4ffSChristoph Hellwig #include <linux/sizes.h>
160651c263SAnup Patel #include <linux/of_fdt.h>
1756409750SNick Kossifidis #include <linux/of_reserved_mem.h>
18922b0375SAlbert Ou #include <linux/libfdt.h>
19d27c3c90SZong Li #include <linux/set_memory.h>
20da815582SKefeng Wang #include <linux/dma-map-ops.h>
21e53d2818SNick Kossifidis #include <linux/crash_dump.h>
228ba1a8b7SKefeng Wang #include <linux/hugetlb.h>
2376d2a049SPalmer Dabbelt 
24f2c17aabSAnup Patel #include <asm/fixmap.h>
2576d2a049SPalmer Dabbelt #include <asm/tlbflush.h>
2676d2a049SPalmer Dabbelt #include <asm/sections.h>
272d268251SPalmer Dabbelt #include <asm/soc.h>
2876d2a049SPalmer Dabbelt #include <asm/io.h>
29b422d28bSZong Li #include <asm/ptdump.h>
304f0e8eefSAtish Patra #include <asm/numa.h>
3176d2a049SPalmer Dabbelt 
32ffaee272SPaul Walmsley #include "../kernel/head.h"
33ffaee272SPaul Walmsley 
34658e2c51SAlexandre Ghiti struct kernel_mapping kernel_map __ro_after_init;
35658e2c51SAlexandre Ghiti EXPORT_SYMBOL(kernel_map);
3644c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
37658e2c51SAlexandre Ghiti #define kernel_map	(*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
38658e2c51SAlexandre Ghiti #endif
39658e2c51SAlexandre Ghiti 
40e8a62cc2SAlexandre Ghiti #ifdef CONFIG_64BIT
419195c294SPalmer Dabbelt u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39;
42e8a62cc2SAlexandre Ghiti #else
4367ff2f26SJisheng Zhang u64 satp_mode __ro_after_init = SATP_MODE_32;
44e8a62cc2SAlexandre Ghiti #endif
45e8a62cc2SAlexandre Ghiti EXPORT_SYMBOL(satp_mode);
46e8a62cc2SAlexandre Ghiti 
4720aa4954Skernel test robot bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
48011f09d1SQinglin Pan bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
49e8a62cc2SAlexandre Ghiti EXPORT_SYMBOL(pgtable_l4_enabled);
50d10efa21SQinglin Pan EXPORT_SYMBOL(pgtable_l5_enabled);
51e8a62cc2SAlexandre Ghiti 
526d7f91d9SAlexandre Ghiti phys_addr_t phys_ram_base __ro_after_init;
536d7f91d9SAlexandre Ghiti EXPORT_SYMBOL(phys_ram_base);
546d7f91d9SAlexandre Ghiti 
55387181dcSAnup Patel unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
56387181dcSAnup Patel 							__page_aligned_bss;
57387181dcSAnup Patel EXPORT_SYMBOL(empty_zero_page);
58387181dcSAnup Patel 
59d90d45d7SAnup Patel extern char _start[];
608f3a2b4aSAnup Patel #define DTB_EARLY_BASE_VA      PGDIR_SIZE
6144c92257SVitaly Wool void *_dtb_early_va __initdata;
6244c92257SVitaly Wool uintptr_t _dtb_early_pa __initdata;
63d90d45d7SAnup Patel 
6401062356SJisheng Zhang static phys_addr_t dma32_phys_limit __initdata;
65da815582SKefeng Wang 
6676d2a049SPalmer Dabbelt static void __init zone_sizes_init(void)
6776d2a049SPalmer Dabbelt {
685ec9c4ffSChristoph Hellwig 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
6976d2a049SPalmer Dabbelt 
70d5fad48cSZong Li #ifdef CONFIG_ZONE_DMA32
71da815582SKefeng Wang 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
72d5fad48cSZong Li #endif
735ec9c4ffSChristoph Hellwig 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
745ec9c4ffSChristoph Hellwig 
759691a071SMike Rapoport 	free_area_init(max_zone_pfns);
7676d2a049SPalmer Dabbelt }
7776d2a049SPalmer Dabbelt 
788fa3cdffSKefeng Wang #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
792cc6c4a0SYash Shah static inline void print_mlk(char *name, unsigned long b, unsigned long t)
802cc6c4a0SYash Shah {
812cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
822cc6c4a0SYash Shah 		  (((t) - (b)) >> 10));
832cc6c4a0SYash Shah }
842cc6c4a0SYash Shah 
852cc6c4a0SYash Shah static inline void print_mlm(char *name, unsigned long b, unsigned long t)
862cc6c4a0SYash Shah {
872cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
882cc6c4a0SYash Shah 		  (((t) - (b)) >> 20));
892cc6c4a0SYash Shah }
902cc6c4a0SYash Shah 
911987501bSJisheng Zhang static void __init print_vm_layout(void)
922cc6c4a0SYash Shah {
932cc6c4a0SYash Shah 	pr_notice("Virtual kernel memory layout:\n");
942cc6c4a0SYash Shah 	print_mlk("fixmap", (unsigned long)FIXADDR_START,
952cc6c4a0SYash Shah 		  (unsigned long)FIXADDR_TOP);
962cc6c4a0SYash Shah 	print_mlm("pci io", (unsigned long)PCI_IO_START,
972cc6c4a0SYash Shah 		  (unsigned long)PCI_IO_END);
982cc6c4a0SYash Shah 	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
992cc6c4a0SYash Shah 		  (unsigned long)VMEMMAP_END);
1002cc6c4a0SYash Shah 	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
1012cc6c4a0SYash Shah 		  (unsigned long)VMALLOC_END);
1022cc6c4a0SYash Shah 	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
1032cc6c4a0SYash Shah 		  (unsigned long)high_memory);
1040c34e79eSPalmer Dabbelt 	if (IS_ENABLED(CONFIG_64BIT)) {
105f7ae0233SAlexandre Ghiti #ifdef CONFIG_KASAN
106f7ae0233SAlexandre Ghiti 		print_mlm("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
107f7ae0233SAlexandre Ghiti #endif
1080c34e79eSPalmer Dabbelt 
1092bfc6cd8SAlexandre Ghiti 		print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
1102bfc6cd8SAlexandre Ghiti 			  (unsigned long)ADDRESS_SPACE_END);
1112cc6c4a0SYash Shah 	}
1122cc6c4a0SYash Shah }
1132cc6c4a0SYash Shah #else
1142cc6c4a0SYash Shah static void print_vm_layout(void) { }
1152cc6c4a0SYash Shah #endif /* CONFIG_DEBUG_VM */
1162cc6c4a0SYash Shah 
11776d2a049SPalmer Dabbelt void __init mem_init(void)
11876d2a049SPalmer Dabbelt {
11976d2a049SPalmer Dabbelt #ifdef CONFIG_FLATMEM
12076d2a049SPalmer Dabbelt 	BUG_ON(!mem_map);
12176d2a049SPalmer Dabbelt #endif /* CONFIG_FLATMEM */
12276d2a049SPalmer Dabbelt 
123ce3aca04SKefeng Wang #ifdef CONFIG_SWIOTLB
124ce3aca04SKefeng Wang 	if (swiotlb_force == SWIOTLB_FORCE ||
125ce3aca04SKefeng Wang 	    max_pfn > PFN_DOWN(dma32_phys_limit))
126ce3aca04SKefeng Wang 		swiotlb_init(1);
127ce3aca04SKefeng Wang 	else
128ce3aca04SKefeng Wang 		swiotlb_force = SWIOTLB_NO_FORCE;
129ce3aca04SKefeng Wang #endif
130c6ffc5caSMike Rapoport 	memblock_free_all();
13176d2a049SPalmer Dabbelt 
1322cc6c4a0SYash Shah 	print_vm_layout();
13376d2a049SPalmer Dabbelt }
13476d2a049SPalmer Dabbelt 
135f7ae0233SAlexandre Ghiti /* Limit the memory size via mem. */
136f7ae0233SAlexandre Ghiti static phys_addr_t memory_limit;
137c9811e37SKefeng Wang 
138c9811e37SKefeng Wang static int __init early_mem(char *p)
139c9811e37SKefeng Wang {
140c9811e37SKefeng Wang 	u64 size;
141c9811e37SKefeng Wang 
142c9811e37SKefeng Wang 	if (!p)
143c9811e37SKefeng Wang 		return 1;
144c9811e37SKefeng Wang 
145c9811e37SKefeng Wang 	size = memparse(p, &p) & PAGE_MASK;
146c9811e37SKefeng Wang 	memory_limit = min_t(u64, size, memory_limit);
147c9811e37SKefeng Wang 
148c9811e37SKefeng Wang 	pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20);
149c9811e37SKefeng Wang 
150c9811e37SKefeng Wang 	return 0;
151c9811e37SKefeng Wang }
152c9811e37SKefeng Wang early_param("mem", early_mem);
153c9811e37SKefeng Wang 
154f842f5ffSKefeng Wang static void __init setup_bootmem(void)
1550651c263SAnup Patel {
156ac51e005SZong Li 	phys_addr_t vmlinux_end = __pa_symbol(&_end);
15707aabe8fSJisheng Zhang 	phys_addr_t max_mapped_addr;
158fe036db7SJisheng Zhang 	phys_addr_t phys_ram_end, vmlinux_start;
1590651c263SAnup Patel 
160fe036db7SJisheng Zhang 	if (IS_ENABLED(CONFIG_XIP_KERNEL))
16144c92257SVitaly Wool 		vmlinux_start = __pa_symbol(&_sdata);
162fe036db7SJisheng Zhang 	else
163fe036db7SJisheng Zhang 		vmlinux_start = __pa_symbol(&_start);
16444c92257SVitaly Wool 
165c9811e37SKefeng Wang 	memblock_enforce_memory_limit(memory_limit);
1660651c263SAnup Patel 
1672bfc6cd8SAlexandre Ghiti 	/*
1688db6f937SGeert Uytterhoeven 	 * Make sure we align the reservation on PMD_SIZE since we will
1692bfc6cd8SAlexandre Ghiti 	 * map the kernel in the linear mapping as read-only: we do not want
1702bfc6cd8SAlexandre Ghiti 	 * any allocation to happen between _end and the next pmd aligned page.
1712bfc6cd8SAlexandre Ghiti 	 */
17207aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
1738db6f937SGeert Uytterhoeven 		vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
17407aabe8fSJisheng Zhang 	/*
17507aabe8fSJisheng Zhang 	 * Reserve from the start of the kernel to the end of the kernel
17607aabe8fSJisheng Zhang 	 */
1778db6f937SGeert Uytterhoeven 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
178d90d45d7SAnup Patel 
1796d7f91d9SAlexandre Ghiti 	phys_ram_end = memblock_end_of_DRAM();
180fe036db7SJisheng Zhang 	if (!IS_ENABLED(CONFIG_XIP_KERNEL))
1816d7f91d9SAlexandre Ghiti 		phys_ram_base = memblock_start_of_DRAM();
182abb8e86bSAtish Patra 	/*
183abb8e86bSAtish Patra 	 * memblock allocator is not aware of the fact that last 4K bytes of
184abb8e86bSAtish Patra 	 * the addressable memory can not be mapped because of IS_ERR_VALUE
185abb8e86bSAtish Patra 	 * macro. Make sure that last 4k bytes are not usable by memblock
186db6b84a3SAlexandre Ghiti 	 * if end of dram is equal to maximum addressable memory.  For 64-bit
187db6b84a3SAlexandre Ghiti 	 * kernel, this problem can't happen here as the end of the virtual
188db6b84a3SAlexandre Ghiti 	 * address space is occupied by the kernel mapping then this check must
189fdf3a7a1SAlexandre Ghiti 	 * be done as soon as the kernel mapping base address is determined.
190abb8e86bSAtish Patra 	 */
19107aabe8fSJisheng Zhang 	if (!IS_ENABLED(CONFIG_64BIT)) {
192db6b84a3SAlexandre Ghiti 		max_mapped_addr = __pa(~(ulong)0);
1936d7f91d9SAlexandre Ghiti 		if (max_mapped_addr == (phys_ram_end - 1))
194abb8e86bSAtish Patra 			memblock_set_current_limit(max_mapped_addr - 4096);
19507aabe8fSJisheng Zhang 	}
196abb8e86bSAtish Patra 
1976d7f91d9SAlexandre Ghiti 	min_low_pfn = PFN_UP(phys_ram_base);
1986d7f91d9SAlexandre Ghiti 	max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
199625e24a5SAlexandre Ghiti 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
200f6e5aedfSKefeng Wang 
201da815582SKefeng Wang 	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
202336e8eb2SGuo Ren 	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
2030651c263SAnup Patel 
204aec33b54SKefeng Wang 	reserve_initrd_mem();
205922b0375SAlbert Ou 	/*
206f105aa94SVitaly Wool 	 * If DTB is built in, no need to reserve its memblock.
207f105aa94SVitaly Wool 	 * Otherwise, do reserve it but avoid using
208f105aa94SVitaly Wool 	 * early_init_fdt_reserve_self() since __pa() does
209922b0375SAlbert Ou 	 * not work for DTB pointers that are fixmap addresses
210922b0375SAlbert Ou 	 */
211f105aa94SVitaly Wool 	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
212922b0375SAlbert Ou 		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
213922b0375SAlbert Ou 
2140651c263SAnup Patel 	early_init_fdt_scan_reserved_mem();
215da815582SKefeng Wang 	dma_contiguous_reserve(dma32_phys_limit);
2168ba1a8b7SKefeng Wang 	if (IS_ENABLED(CONFIG_64BIT))
2178ba1a8b7SKefeng Wang 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
2180651c263SAnup Patel 	memblock_allow_resize();
2190651c263SAnup Patel }
2206f1e9e94SAnup Patel 
2216bd33e1eSChristoph Hellwig #ifdef CONFIG_MMU
2220c34e79eSPalmer Dabbelt struct pt_alloc_ops pt_ops __initdata;
22344c92257SVitaly Wool 
224fb31f0a4SKenneth Lee unsigned long riscv_pfn_base __ro_after_init;
225fb31f0a4SKenneth Lee EXPORT_SYMBOL(riscv_pfn_base);
226387181dcSAnup Patel 
2276f1e9e94SAnup Patel pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
228671f9a3eSAnup Patel pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
22901062356SJisheng Zhang static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
230671f9a3eSAnup Patel 
231671f9a3eSAnup Patel pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
232677b9eb8SQinglin Pan static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
233e8a62cc2SAlexandre Ghiti static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
234fe45ffa4SAlexandre Ghiti static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
235f2c17aabSAnup Patel 
23644c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
237805a3ebeSJisheng Zhang #define pt_ops			(*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
238ca0cb9a6SPalmer Dabbelt #define riscv_pfn_base         (*(unsigned long  *)XIP_FIXUP(&riscv_pfn_base))
23944c92257SVitaly Wool #define trampoline_pg_dir      ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
24044c92257SVitaly Wool #define fixmap_pte             ((pte_t *)XIP_FIXUP(fixmap_pte))
24144c92257SVitaly Wool #define early_pg_dir           ((pgd_t *)XIP_FIXUP(early_pg_dir))
24244c92257SVitaly Wool #endif /* CONFIG_XIP_KERNEL */
24344c92257SVitaly Wool 
244f2c17aabSAnup Patel void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
245f2c17aabSAnup Patel {
246f2c17aabSAnup Patel 	unsigned long addr = __fix_to_virt(idx);
247f2c17aabSAnup Patel 	pte_t *ptep;
248f2c17aabSAnup Patel 
249f2c17aabSAnup Patel 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
250f2c17aabSAnup Patel 
251f2c17aabSAnup Patel 	ptep = &fixmap_pte[pte_index(addr)];
252f2c17aabSAnup Patel 
25321190b74SGreentime Hu 	if (pgprot_val(prot))
254f2c17aabSAnup Patel 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
25521190b74SGreentime Hu 	else
256f2c17aabSAnup Patel 		pte_clear(&init_mm, addr, ptep);
257f2c17aabSAnup Patel 	local_flush_tlb_page(addr);
258f2c17aabSAnup Patel }
259f2c17aabSAnup Patel 
260e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
261671f9a3eSAnup Patel {
262671f9a3eSAnup Patel 	return (pte_t *)((uintptr_t)pa);
263671f9a3eSAnup Patel }
264e8dcb61fSAtish Patra 
265e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
266e8dcb61fSAtish Patra {
267e8dcb61fSAtish Patra 	clear_fixmap(FIX_PTE);
268e8dcb61fSAtish Patra 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
269671f9a3eSAnup Patel }
270671f9a3eSAnup Patel 
27101062356SJisheng Zhang static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
272e8dcb61fSAtish Patra {
273e8dcb61fSAtish Patra 	return (pte_t *) __va(pa);
274e8dcb61fSAtish Patra }
275e8dcb61fSAtish Patra 
276e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
277671f9a3eSAnup Patel {
278671f9a3eSAnup Patel 	/*
279671f9a3eSAnup Patel 	 * We only create PMD or PGD early mappings so we
280671f9a3eSAnup Patel 	 * should never reach here with MMU disabled.
281671f9a3eSAnup Patel 	 */
282e8dcb61fSAtish Patra 	BUG();
283e8dcb61fSAtish Patra }
284671f9a3eSAnup Patel 
285e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
286e8dcb61fSAtish Patra {
287671f9a3eSAnup Patel 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
288671f9a3eSAnup Patel }
289671f9a3eSAnup Patel 
29001062356SJisheng Zhang static phys_addr_t __init alloc_pte_late(uintptr_t va)
291e8dcb61fSAtish Patra {
292e8dcb61fSAtish Patra 	unsigned long vaddr;
293e8dcb61fSAtish Patra 
294e8dcb61fSAtish Patra 	vaddr = __get_free_page(GFP_KERNEL);
295e75e6bf4Szhouchuangao 	BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
296e75e6bf4Szhouchuangao 
297e8dcb61fSAtish Patra 	return __pa(vaddr);
298e8dcb61fSAtish Patra }
299e8dcb61fSAtish Patra 
300671f9a3eSAnup Patel static void __init create_pte_mapping(pte_t *ptep,
301671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
302671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
303671f9a3eSAnup Patel {
304974b9b2cSMike Rapoport 	uintptr_t pte_idx = pte_index(va);
305671f9a3eSAnup Patel 
306671f9a3eSAnup Patel 	BUG_ON(sz != PAGE_SIZE);
307671f9a3eSAnup Patel 
308974b9b2cSMike Rapoport 	if (pte_none(ptep[pte_idx]))
309974b9b2cSMike Rapoport 		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
310671f9a3eSAnup Patel }
311671f9a3eSAnup Patel 
312671f9a3eSAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
313671f9a3eSAnup Patel 
31401062356SJisheng Zhang static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
31501062356SJisheng Zhang static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
31601062356SJisheng Zhang static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
317671f9a3eSAnup Patel 
31844c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
31944c92257SVitaly Wool #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
32044c92257SVitaly Wool #define fixmap_pmd     ((pmd_t *)XIP_FIXUP(fixmap_pmd))
32144c92257SVitaly Wool #define early_pmd      ((pmd_t *)XIP_FIXUP(early_pmd))
32244c92257SVitaly Wool #endif /* CONFIG_XIP_KERNEL */
32344c92257SVitaly Wool 
324677b9eb8SQinglin Pan static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss;
325677b9eb8SQinglin Pan static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss;
326677b9eb8SQinglin Pan static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
327677b9eb8SQinglin Pan 
328677b9eb8SQinglin Pan #ifdef CONFIG_XIP_KERNEL
329677b9eb8SQinglin Pan #define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d))
330677b9eb8SQinglin Pan #define fixmap_p4d     ((p4d_t *)XIP_FIXUP(fixmap_p4d))
331677b9eb8SQinglin Pan #define early_p4d      ((p4d_t *)XIP_FIXUP(early_p4d))
332677b9eb8SQinglin Pan #endif /* CONFIG_XIP_KERNEL */
333677b9eb8SQinglin Pan 
334e8a62cc2SAlexandre Ghiti static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
335e8a62cc2SAlexandre Ghiti static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
336e8a62cc2SAlexandre Ghiti static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
337e8a62cc2SAlexandre Ghiti 
338e8a62cc2SAlexandre Ghiti #ifdef CONFIG_XIP_KERNEL
339e8a62cc2SAlexandre Ghiti #define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud))
340e8a62cc2SAlexandre Ghiti #define fixmap_pud     ((pud_t *)XIP_FIXUP(fixmap_pud))
341e8a62cc2SAlexandre Ghiti #define early_pud      ((pud_t *)XIP_FIXUP(early_pud))
342e8a62cc2SAlexandre Ghiti #endif /* CONFIG_XIP_KERNEL */
343e8a62cc2SAlexandre Ghiti 
344e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
345671f9a3eSAnup Patel {
346e8dcb61fSAtish Patra 	/* Before MMU is enabled */
347671f9a3eSAnup Patel 	return (pmd_t *)((uintptr_t)pa);
348671f9a3eSAnup Patel }
349e8dcb61fSAtish Patra 
350e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
351e8dcb61fSAtish Patra {
352e8dcb61fSAtish Patra 	clear_fixmap(FIX_PMD);
353e8dcb61fSAtish Patra 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
354671f9a3eSAnup Patel }
355671f9a3eSAnup Patel 
35601062356SJisheng Zhang static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
357e8dcb61fSAtish Patra {
358e8dcb61fSAtish Patra 	return (pmd_t *) __va(pa);
359e8dcb61fSAtish Patra }
360e8dcb61fSAtish Patra 
361e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_early(uintptr_t va)
362671f9a3eSAnup Patel {
363e8a62cc2SAlexandre Ghiti 	BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT);
364671f9a3eSAnup Patel 
3650f02de44SAlexandre Ghiti 	return (uintptr_t)early_pmd;
366671f9a3eSAnup Patel }
367671f9a3eSAnup Patel 
368e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
369e8dcb61fSAtish Patra {
370e8dcb61fSAtish Patra 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
371e8dcb61fSAtish Patra }
372e8dcb61fSAtish Patra 
37301062356SJisheng Zhang static phys_addr_t __init alloc_pmd_late(uintptr_t va)
374e8dcb61fSAtish Patra {
375e8dcb61fSAtish Patra 	unsigned long vaddr;
376e8dcb61fSAtish Patra 
377e8dcb61fSAtish Patra 	vaddr = __get_free_page(GFP_KERNEL);
3785a7ac592SKefeng Wang 	BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
3795a7ac592SKefeng Wang 
380e8dcb61fSAtish Patra 	return __pa(vaddr);
381e8dcb61fSAtish Patra }
382e8dcb61fSAtish Patra 
383671f9a3eSAnup Patel static void __init create_pmd_mapping(pmd_t *pmdp,
384671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
385671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
386671f9a3eSAnup Patel {
387671f9a3eSAnup Patel 	pte_t *ptep;
388671f9a3eSAnup Patel 	phys_addr_t pte_phys;
389974b9b2cSMike Rapoport 	uintptr_t pmd_idx = pmd_index(va);
390671f9a3eSAnup Patel 
391671f9a3eSAnup Patel 	if (sz == PMD_SIZE) {
392974b9b2cSMike Rapoport 		if (pmd_none(pmdp[pmd_idx]))
393974b9b2cSMike Rapoport 			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
394671f9a3eSAnup Patel 		return;
395671f9a3eSAnup Patel 	}
396671f9a3eSAnup Patel 
397974b9b2cSMike Rapoport 	if (pmd_none(pmdp[pmd_idx])) {
398e8dcb61fSAtish Patra 		pte_phys = pt_ops.alloc_pte(va);
399974b9b2cSMike Rapoport 		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
400e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
401671f9a3eSAnup Patel 		memset(ptep, 0, PAGE_SIZE);
402671f9a3eSAnup Patel 	} else {
403974b9b2cSMike Rapoport 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
404e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
405671f9a3eSAnup Patel 	}
406671f9a3eSAnup Patel 
407671f9a3eSAnup Patel 	create_pte_mapping(ptep, va, pa, sz, prot);
408671f9a3eSAnup Patel }
409671f9a3eSAnup Patel 
410e8a62cc2SAlexandre Ghiti static pud_t *__init get_pud_virt_early(phys_addr_t pa)
411e8a62cc2SAlexandre Ghiti {
412e8a62cc2SAlexandre Ghiti 	return (pud_t *)((uintptr_t)pa);
413e8a62cc2SAlexandre Ghiti }
414e8a62cc2SAlexandre Ghiti 
415e8a62cc2SAlexandre Ghiti static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
416e8a62cc2SAlexandre Ghiti {
417e8a62cc2SAlexandre Ghiti 	clear_fixmap(FIX_PUD);
418e8a62cc2SAlexandre Ghiti 	return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
419e8a62cc2SAlexandre Ghiti }
420e8a62cc2SAlexandre Ghiti 
421e8a62cc2SAlexandre Ghiti static pud_t *__init get_pud_virt_late(phys_addr_t pa)
422e8a62cc2SAlexandre Ghiti {
423e8a62cc2SAlexandre Ghiti 	return (pud_t *)__va(pa);
424e8a62cc2SAlexandre Ghiti }
425e8a62cc2SAlexandre Ghiti 
426e8a62cc2SAlexandre Ghiti static phys_addr_t __init alloc_pud_early(uintptr_t va)
427e8a62cc2SAlexandre Ghiti {
428e8a62cc2SAlexandre Ghiti 	/* Only one PUD is available for early mapping */
429e8a62cc2SAlexandre Ghiti 	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
430e8a62cc2SAlexandre Ghiti 
431e8a62cc2SAlexandre Ghiti 	return (uintptr_t)early_pud;
432e8a62cc2SAlexandre Ghiti }
433e8a62cc2SAlexandre Ghiti 
434e8a62cc2SAlexandre Ghiti static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
435e8a62cc2SAlexandre Ghiti {
436e8a62cc2SAlexandre Ghiti 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
437e8a62cc2SAlexandre Ghiti }
438e8a62cc2SAlexandre Ghiti 
439e8a62cc2SAlexandre Ghiti static phys_addr_t alloc_pud_late(uintptr_t va)
440e8a62cc2SAlexandre Ghiti {
441e8a62cc2SAlexandre Ghiti 	unsigned long vaddr;
442e8a62cc2SAlexandre Ghiti 
443e8a62cc2SAlexandre Ghiti 	vaddr = __get_free_page(GFP_KERNEL);
444e8a62cc2SAlexandre Ghiti 	BUG_ON(!vaddr);
445e8a62cc2SAlexandre Ghiti 	return __pa(vaddr);
446e8a62cc2SAlexandre Ghiti }
447e8a62cc2SAlexandre Ghiti 
448677b9eb8SQinglin Pan static p4d_t *__init get_p4d_virt_early(phys_addr_t pa)
449677b9eb8SQinglin Pan {
450677b9eb8SQinglin Pan 	return (p4d_t *)((uintptr_t)pa);
451677b9eb8SQinglin Pan }
452677b9eb8SQinglin Pan 
453677b9eb8SQinglin Pan static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
454677b9eb8SQinglin Pan {
455677b9eb8SQinglin Pan 	clear_fixmap(FIX_P4D);
456677b9eb8SQinglin Pan 	return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
457677b9eb8SQinglin Pan }
458677b9eb8SQinglin Pan 
459677b9eb8SQinglin Pan static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
460677b9eb8SQinglin Pan {
461677b9eb8SQinglin Pan 	return (p4d_t *)__va(pa);
462677b9eb8SQinglin Pan }
463677b9eb8SQinglin Pan 
464677b9eb8SQinglin Pan static phys_addr_t __init alloc_p4d_early(uintptr_t va)
465677b9eb8SQinglin Pan {
466677b9eb8SQinglin Pan 	/* Only one P4D is available for early mapping */
467677b9eb8SQinglin Pan 	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
468677b9eb8SQinglin Pan 
469677b9eb8SQinglin Pan 	return (uintptr_t)early_p4d;
470677b9eb8SQinglin Pan }
471677b9eb8SQinglin Pan 
472677b9eb8SQinglin Pan static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
473677b9eb8SQinglin Pan {
474677b9eb8SQinglin Pan 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
475677b9eb8SQinglin Pan }
476677b9eb8SQinglin Pan 
477677b9eb8SQinglin Pan static phys_addr_t alloc_p4d_late(uintptr_t va)
478677b9eb8SQinglin Pan {
479677b9eb8SQinglin Pan 	unsigned long vaddr;
480677b9eb8SQinglin Pan 
481677b9eb8SQinglin Pan 	vaddr = __get_free_page(GFP_KERNEL);
482677b9eb8SQinglin Pan 	BUG_ON(!vaddr);
483677b9eb8SQinglin Pan 	return __pa(vaddr);
484677b9eb8SQinglin Pan }
485677b9eb8SQinglin Pan 
486e8a62cc2SAlexandre Ghiti static void __init create_pud_mapping(pud_t *pudp,
487e8a62cc2SAlexandre Ghiti 				      uintptr_t va, phys_addr_t pa,
488e8a62cc2SAlexandre Ghiti 				      phys_addr_t sz, pgprot_t prot)
489e8a62cc2SAlexandre Ghiti {
490e8a62cc2SAlexandre Ghiti 	pmd_t *nextp;
491e8a62cc2SAlexandre Ghiti 	phys_addr_t next_phys;
492e8a62cc2SAlexandre Ghiti 	uintptr_t pud_index = pud_index(va);
493e8a62cc2SAlexandre Ghiti 
494e8a62cc2SAlexandre Ghiti 	if (sz == PUD_SIZE) {
495e8a62cc2SAlexandre Ghiti 		if (pud_val(pudp[pud_index]) == 0)
496e8a62cc2SAlexandre Ghiti 			pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot);
497e8a62cc2SAlexandre Ghiti 		return;
498e8a62cc2SAlexandre Ghiti 	}
499e8a62cc2SAlexandre Ghiti 
500e8a62cc2SAlexandre Ghiti 	if (pud_val(pudp[pud_index]) == 0) {
501e8a62cc2SAlexandre Ghiti 		next_phys = pt_ops.alloc_pmd(va);
502e8a62cc2SAlexandre Ghiti 		pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE);
503e8a62cc2SAlexandre Ghiti 		nextp = pt_ops.get_pmd_virt(next_phys);
504e8a62cc2SAlexandre Ghiti 		memset(nextp, 0, PAGE_SIZE);
505e8a62cc2SAlexandre Ghiti 	} else {
506e8a62cc2SAlexandre Ghiti 		next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index]));
507e8a62cc2SAlexandre Ghiti 		nextp = pt_ops.get_pmd_virt(next_phys);
508e8a62cc2SAlexandre Ghiti 	}
509e8a62cc2SAlexandre Ghiti 
510e8a62cc2SAlexandre Ghiti 	create_pmd_mapping(nextp, va, pa, sz, prot);
511e8a62cc2SAlexandre Ghiti }
512e8a62cc2SAlexandre Ghiti 
513677b9eb8SQinglin Pan static void __init create_p4d_mapping(p4d_t *p4dp,
514677b9eb8SQinglin Pan 				      uintptr_t va, phys_addr_t pa,
515677b9eb8SQinglin Pan 				      phys_addr_t sz, pgprot_t prot)
516677b9eb8SQinglin Pan {
517677b9eb8SQinglin Pan 	pud_t *nextp;
518677b9eb8SQinglin Pan 	phys_addr_t next_phys;
519677b9eb8SQinglin Pan 	uintptr_t p4d_index = p4d_index(va);
520677b9eb8SQinglin Pan 
521677b9eb8SQinglin Pan 	if (sz == P4D_SIZE) {
522677b9eb8SQinglin Pan 		if (p4d_val(p4dp[p4d_index]) == 0)
523677b9eb8SQinglin Pan 			p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot);
524677b9eb8SQinglin Pan 		return;
525677b9eb8SQinglin Pan 	}
526677b9eb8SQinglin Pan 
527677b9eb8SQinglin Pan 	if (p4d_val(p4dp[p4d_index]) == 0) {
528677b9eb8SQinglin Pan 		next_phys = pt_ops.alloc_pud(va);
529677b9eb8SQinglin Pan 		p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE);
530677b9eb8SQinglin Pan 		nextp = pt_ops.get_pud_virt(next_phys);
531677b9eb8SQinglin Pan 		memset(nextp, 0, PAGE_SIZE);
532677b9eb8SQinglin Pan 	} else {
533677b9eb8SQinglin Pan 		next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index]));
534677b9eb8SQinglin Pan 		nextp = pt_ops.get_pud_virt(next_phys);
535677b9eb8SQinglin Pan 	}
536677b9eb8SQinglin Pan 
537677b9eb8SQinglin Pan 	create_pud_mapping(nextp, va, pa, sz, prot);
538677b9eb8SQinglin Pan }
539677b9eb8SQinglin Pan 
540677b9eb8SQinglin Pan #define pgd_next_t		p4d_t
541677b9eb8SQinglin Pan #define alloc_pgd_next(__va)	(pgtable_l5_enabled ?			\
542677b9eb8SQinglin Pan 		pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ?		\
543677b9eb8SQinglin Pan 		pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va)))
544677b9eb8SQinglin Pan #define get_pgd_next_virt(__pa)	(pgtable_l5_enabled ?			\
545677b9eb8SQinglin Pan 		pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ?	\
546677b9eb8SQinglin Pan 		pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa)))
547671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
548677b9eb8SQinglin Pan 				(pgtable_l5_enabled ?			\
549677b9eb8SQinglin Pan 		create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \
550e8a62cc2SAlexandre Ghiti 				(pgtable_l4_enabled ?			\
551677b9eb8SQinglin Pan 		create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) :	\
552677b9eb8SQinglin Pan 		create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot)))
553677b9eb8SQinglin Pan #define fixmap_pgd_next		(pgtable_l5_enabled ?			\
554677b9eb8SQinglin Pan 		(uintptr_t)fixmap_p4d : (pgtable_l4_enabled ?		\
555677b9eb8SQinglin Pan 		(uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd))
556677b9eb8SQinglin Pan #define trampoline_pgd_next	(pgtable_l5_enabled ?			\
557677b9eb8SQinglin Pan 		(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?	\
558677b9eb8SQinglin Pan 		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
559677b9eb8SQinglin Pan #define early_dtb_pgd_next	(pgtable_l5_enabled ?			\
560677b9eb8SQinglin Pan 		(uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ?	\
561677b9eb8SQinglin Pan 		(uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd))
562671f9a3eSAnup Patel #else
563671f9a3eSAnup Patel #define pgd_next_t		pte_t
564e8dcb61fSAtish Patra #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
565e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
566671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
567671f9a3eSAnup Patel 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
568e8a62cc2SAlexandre Ghiti #define fixmap_pgd_next		((uintptr_t)fixmap_pte)
569e8a62cc2SAlexandre Ghiti #define early_dtb_pgd_next	((uintptr_t)early_dtb_pmd)
570677b9eb8SQinglin Pan #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot)
571e8a62cc2SAlexandre Ghiti #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot)
572fe45ffa4SAlexandre Ghiti #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
573e8a62cc2SAlexandre Ghiti #endif /* __PAGETABLE_PMD_FOLDED */
574671f9a3eSAnup Patel 
575b91540d5SAtish Patra void __init create_pgd_mapping(pgd_t *pgdp,
576671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
577671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
578671f9a3eSAnup Patel {
579671f9a3eSAnup Patel 	pgd_next_t *nextp;
580671f9a3eSAnup Patel 	phys_addr_t next_phys;
581974b9b2cSMike Rapoport 	uintptr_t pgd_idx = pgd_index(va);
582671f9a3eSAnup Patel 
583671f9a3eSAnup Patel 	if (sz == PGDIR_SIZE) {
584974b9b2cSMike Rapoport 		if (pgd_val(pgdp[pgd_idx]) == 0)
585974b9b2cSMike Rapoport 			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
586671f9a3eSAnup Patel 		return;
587671f9a3eSAnup Patel 	}
588671f9a3eSAnup Patel 
589974b9b2cSMike Rapoport 	if (pgd_val(pgdp[pgd_idx]) == 0) {
590671f9a3eSAnup Patel 		next_phys = alloc_pgd_next(va);
591974b9b2cSMike Rapoport 		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
592671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
593671f9a3eSAnup Patel 		memset(nextp, 0, PAGE_SIZE);
594671f9a3eSAnup Patel 	} else {
595974b9b2cSMike Rapoport 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
596671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
597671f9a3eSAnup Patel 	}
598671f9a3eSAnup Patel 
599671f9a3eSAnup Patel 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
600671f9a3eSAnup Patel }
601671f9a3eSAnup Patel 
602671f9a3eSAnup Patel static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
603671f9a3eSAnup Patel {
6040fdc636cSZong Li 	/* Upgrade to PMD_SIZE mappings whenever possible */
6050fdc636cSZong Li 	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
6060fdc636cSZong Li 		return PAGE_SIZE;
607671f9a3eSAnup Patel 
6080fdc636cSZong Li 	return PMD_SIZE;
609671f9a3eSAnup Patel }
610671f9a3eSAnup Patel 
61144c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
6124b1c70aaSPalmer Dabbelt #define phys_ram_base  (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
613805a3ebeSJisheng Zhang extern char _xiprom[], _exiprom[], __data_loc;
614805a3ebeSJisheng Zhang 
61544c92257SVitaly Wool /* called from head.S with MMU off */
61644c92257SVitaly Wool asmlinkage void __init __copy_data(void)
61744c92257SVitaly Wool {
618f9ace4edSVitaly Wool 	void *from = (void *)(&__data_loc);
61944c92257SVitaly Wool 	void *to = (void *)CONFIG_PHYS_RAM_BASE;
620f9ace4edSVitaly Wool 	size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata));
62144c92257SVitaly Wool 
62244c92257SVitaly Wool 	memcpy(to, from, sz);
62344c92257SVitaly Wool }
62444c92257SVitaly Wool #endif
62544c92257SVitaly Wool 
626e5c35fa0SAlexandre Ghiti #ifdef CONFIG_STRICT_KERNEL_RWX
627e5c35fa0SAlexandre Ghiti static __init pgprot_t pgprot_from_va(uintptr_t va)
628e5c35fa0SAlexandre Ghiti {
629e5c35fa0SAlexandre Ghiti 	if (is_va_kernel_text(va))
630e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL_READ_EXEC;
631e5c35fa0SAlexandre Ghiti 
632e5c35fa0SAlexandre Ghiti 	/*
633e5c35fa0SAlexandre Ghiti 	 * In 64-bit kernel, the kernel mapping is outside the linear mapping so
634e5c35fa0SAlexandre Ghiti 	 * we must protect its linear mapping alias from being executed and
635e5c35fa0SAlexandre Ghiti 	 * written.
636e5c35fa0SAlexandre Ghiti 	 * And rodata section is marked readonly in mark_rodata_ro.
637e5c35fa0SAlexandre Ghiti 	 */
638e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
639e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL_READ;
640e5c35fa0SAlexandre Ghiti 
641e5c35fa0SAlexandre Ghiti 	return PAGE_KERNEL;
642e5c35fa0SAlexandre Ghiti }
643e5c35fa0SAlexandre Ghiti 
644e5c35fa0SAlexandre Ghiti void mark_rodata_ro(void)
645e5c35fa0SAlexandre Ghiti {
646e5c35fa0SAlexandre Ghiti 	set_kernel_memory(__start_rodata, _data, set_memory_ro);
647e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT))
648e5c35fa0SAlexandre Ghiti 		set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
649e5c35fa0SAlexandre Ghiti 				  set_memory_ro);
650e5c35fa0SAlexandre Ghiti 
651e5c35fa0SAlexandre Ghiti 	debug_checkwx();
652e5c35fa0SAlexandre Ghiti }
653e5c35fa0SAlexandre Ghiti #else
654e5c35fa0SAlexandre Ghiti static __init pgprot_t pgprot_from_va(uintptr_t va)
655e5c35fa0SAlexandre Ghiti {
656e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
657e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL;
658e5c35fa0SAlexandre Ghiti 
659e5c35fa0SAlexandre Ghiti 	return PAGE_KERNEL_EXEC;
660e5c35fa0SAlexandre Ghiti }
661e5c35fa0SAlexandre Ghiti #endif /* CONFIG_STRICT_KERNEL_RWX */
662e5c35fa0SAlexandre Ghiti 
663e8a62cc2SAlexandre Ghiti #ifdef CONFIG_64BIT
664011f09d1SQinglin Pan static void __init disable_pgtable_l5(void)
665011f09d1SQinglin Pan {
666011f09d1SQinglin Pan 	pgtable_l5_enabled = false;
667011f09d1SQinglin Pan 	kernel_map.page_offset = PAGE_OFFSET_L4;
668011f09d1SQinglin Pan 	satp_mode = SATP_MODE_48;
669011f09d1SQinglin Pan }
670011f09d1SQinglin Pan 
671e8a62cc2SAlexandre Ghiti static void __init disable_pgtable_l4(void)
672e8a62cc2SAlexandre Ghiti {
673e8a62cc2SAlexandre Ghiti 	pgtable_l4_enabled = false;
674e8a62cc2SAlexandre Ghiti 	kernel_map.page_offset = PAGE_OFFSET_L3;
675e8a62cc2SAlexandre Ghiti 	satp_mode = SATP_MODE_39;
676e8a62cc2SAlexandre Ghiti }
677e8a62cc2SAlexandre Ghiti 
678e8a62cc2SAlexandre Ghiti /*
679e8a62cc2SAlexandre Ghiti  * There is a simple way to determine if 4-level is supported by the
680e8a62cc2SAlexandre Ghiti  * underlying hardware: establish 1:1 mapping in 4-level page table mode
681e8a62cc2SAlexandre Ghiti  * then read SATP to see if the configuration was taken into account
682e8a62cc2SAlexandre Ghiti  * meaning sv48 is supported.
683e8a62cc2SAlexandre Ghiti  */
684e8a62cc2SAlexandre Ghiti static __init void set_satp_mode(void)
685e8a62cc2SAlexandre Ghiti {
686e8a62cc2SAlexandre Ghiti 	u64 identity_satp, hw_satp;
687011f09d1SQinglin Pan 	uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
688011f09d1SQinglin Pan 	bool check_l4 = false;
689e8a62cc2SAlexandre Ghiti 
690011f09d1SQinglin Pan 	create_p4d_mapping(early_p4d,
691e8a62cc2SAlexandre Ghiti 			set_satp_mode_pmd, (uintptr_t)early_pud,
692011f09d1SQinglin Pan 			P4D_SIZE, PAGE_TABLE);
693e8a62cc2SAlexandre Ghiti 	create_pud_mapping(early_pud,
694e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd, (uintptr_t)early_pmd,
695e8a62cc2SAlexandre Ghiti 			   PUD_SIZE, PAGE_TABLE);
696e8a62cc2SAlexandre Ghiti 	/* Handle the case where set_satp_mode straddles 2 PMDs */
697e8a62cc2SAlexandre Ghiti 	create_pmd_mapping(early_pmd,
698e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd, set_satp_mode_pmd,
699e8a62cc2SAlexandre Ghiti 			   PMD_SIZE, PAGE_KERNEL_EXEC);
700e8a62cc2SAlexandre Ghiti 	create_pmd_mapping(early_pmd,
701e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd + PMD_SIZE,
702e8a62cc2SAlexandre Ghiti 			   set_satp_mode_pmd + PMD_SIZE,
703e8a62cc2SAlexandre Ghiti 			   PMD_SIZE, PAGE_KERNEL_EXEC);
704011f09d1SQinglin Pan retry:
705011f09d1SQinglin Pan 	create_pgd_mapping(early_pg_dir,
706011f09d1SQinglin Pan 			   set_satp_mode_pmd,
707011f09d1SQinglin Pan 			   check_l4 ? (uintptr_t)early_pud : (uintptr_t)early_p4d,
708011f09d1SQinglin Pan 			   PGDIR_SIZE, PAGE_TABLE);
709e8a62cc2SAlexandre Ghiti 
710e8a62cc2SAlexandre Ghiti 	identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
711e8a62cc2SAlexandre Ghiti 
712e8a62cc2SAlexandre Ghiti 	local_flush_tlb_all();
713e8a62cc2SAlexandre Ghiti 	csr_write(CSR_SATP, identity_satp);
714e8a62cc2SAlexandre Ghiti 	hw_satp = csr_swap(CSR_SATP, 0ULL);
715e8a62cc2SAlexandre Ghiti 	local_flush_tlb_all();
716e8a62cc2SAlexandre Ghiti 
717011f09d1SQinglin Pan 	if (hw_satp != identity_satp) {
718011f09d1SQinglin Pan 		if (!check_l4) {
719011f09d1SQinglin Pan 			disable_pgtable_l5();
720011f09d1SQinglin Pan 			check_l4 = true;
721*d5fdade9SAnup Patel 			memset(early_pg_dir, 0, PAGE_SIZE);
722011f09d1SQinglin Pan 			goto retry;
723011f09d1SQinglin Pan 		}
724e8a62cc2SAlexandre Ghiti 		disable_pgtable_l4();
725011f09d1SQinglin Pan 	}
726e8a62cc2SAlexandre Ghiti 
727e8a62cc2SAlexandre Ghiti 	memset(early_pg_dir, 0, PAGE_SIZE);
728011f09d1SQinglin Pan 	memset(early_p4d, 0, PAGE_SIZE);
729e8a62cc2SAlexandre Ghiti 	memset(early_pud, 0, PAGE_SIZE);
730e8a62cc2SAlexandre Ghiti 	memset(early_pmd, 0, PAGE_SIZE);
731e8a62cc2SAlexandre Ghiti }
732e8a62cc2SAlexandre Ghiti #endif
733e8a62cc2SAlexandre Ghiti 
734387181dcSAnup Patel /*
735387181dcSAnup Patel  * setup_vm() is called from head.S with MMU-off.
736387181dcSAnup Patel  *
737387181dcSAnup Patel  * Following requirements should be honoured for setup_vm() to work
738387181dcSAnup Patel  * correctly:
739387181dcSAnup Patel  * 1) It should use PC-relative addressing for accessing kernel symbols.
740387181dcSAnup Patel  *    To achieve this we always use GCC cmodel=medany.
741387181dcSAnup Patel  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
742387181dcSAnup Patel  *    so disable compiler instrumentation when FTRACE is enabled.
743387181dcSAnup Patel  *
744387181dcSAnup Patel  * Currently, the above requirements are honoured by using custom CFLAGS
745387181dcSAnup Patel  * for init.o in mm/Makefile.
746387181dcSAnup Patel  */
747387181dcSAnup Patel 
748387181dcSAnup Patel #ifndef __riscv_cmodel_medany
7496a527b67SPaul Walmsley #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
750387181dcSAnup Patel #endif
751387181dcSAnup Patel 
75244c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
753526f83dfSAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir,
754e5c35fa0SAlexandre Ghiti 					    __always_unused bool early)
75544c92257SVitaly Wool {
75644c92257SVitaly Wool 	uintptr_t va, end_va;
75744c92257SVitaly Wool 
75844c92257SVitaly Wool 	/* Map the flash resident part */
759658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
760526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
76144c92257SVitaly Wool 		create_pgd_mapping(pgdir, va,
762658e2c51SAlexandre Ghiti 				   kernel_map.xiprom + (va - kernel_map.virt_addr),
763526f83dfSAlexandre Ghiti 				   PMD_SIZE, PAGE_KERNEL_EXEC);
76444c92257SVitaly Wool 
76544c92257SVitaly Wool 	/* Map the data in RAM */
766658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
767526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
76844c92257SVitaly Wool 		create_pgd_mapping(pgdir, va,
769658e2c51SAlexandre Ghiti 				   kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
770526f83dfSAlexandre Ghiti 				   PMD_SIZE, PAGE_KERNEL);
77144c92257SVitaly Wool }
77244c92257SVitaly Wool #else
773526f83dfSAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
7742bfc6cd8SAlexandre Ghiti {
7752bfc6cd8SAlexandre Ghiti 	uintptr_t va, end_va;
7762bfc6cd8SAlexandre Ghiti 
777658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + kernel_map.size;
778526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
7792bfc6cd8SAlexandre Ghiti 		create_pgd_mapping(pgdir, va,
780658e2c51SAlexandre Ghiti 				   kernel_map.phys_addr + (va - kernel_map.virt_addr),
781526f83dfSAlexandre Ghiti 				   PMD_SIZE,
782e5c35fa0SAlexandre Ghiti 				   early ?
783e5c35fa0SAlexandre Ghiti 					PAGE_KERNEL_EXEC : pgprot_from_va(va));
7842bfc6cd8SAlexandre Ghiti }
78544c92257SVitaly Wool #endif
7862bfc6cd8SAlexandre Ghiti 
787fe45ffa4SAlexandre Ghiti /*
788fe45ffa4SAlexandre Ghiti  * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
789fe45ffa4SAlexandre Ghiti  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
790fe45ffa4SAlexandre Ghiti  * entry.
791fe45ffa4SAlexandre Ghiti  */
792fe45ffa4SAlexandre Ghiti static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
793fe45ffa4SAlexandre Ghiti {
794fe45ffa4SAlexandre Ghiti #ifndef CONFIG_BUILTIN_DTB
795fe45ffa4SAlexandre Ghiti 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
796fe45ffa4SAlexandre Ghiti 
797fe45ffa4SAlexandre Ghiti 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
798e8a62cc2SAlexandre Ghiti 			   IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa,
799fe45ffa4SAlexandre Ghiti 			   PGDIR_SIZE,
800fe45ffa4SAlexandre Ghiti 			   IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
801fe45ffa4SAlexandre Ghiti 
802677b9eb8SQinglin Pan 	if (pgtable_l5_enabled)
803677b9eb8SQinglin Pan 		create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA,
804677b9eb8SQinglin Pan 				   (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE);
805677b9eb8SQinglin Pan 
806677b9eb8SQinglin Pan 	if (pgtable_l4_enabled)
807e8a62cc2SAlexandre Ghiti 		create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
808e8a62cc2SAlexandre Ghiti 				   (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
809e8a62cc2SAlexandre Ghiti 
810fe45ffa4SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT)) {
811fe45ffa4SAlexandre Ghiti 		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
812fe45ffa4SAlexandre Ghiti 				   pa, PMD_SIZE, PAGE_KERNEL);
813fe45ffa4SAlexandre Ghiti 		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
814fe45ffa4SAlexandre Ghiti 				   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
815fe45ffa4SAlexandre Ghiti 	}
816fe45ffa4SAlexandre Ghiti 
817fe45ffa4SAlexandre Ghiti 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
818fe45ffa4SAlexandre Ghiti #else
819fe45ffa4SAlexandre Ghiti 	/*
820fe45ffa4SAlexandre Ghiti 	 * For 64-bit kernel, __va can't be used since it would return a linear
821fe45ffa4SAlexandre Ghiti 	 * mapping address whereas dtb_early_va will be used before
822fe45ffa4SAlexandre Ghiti 	 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
823fe45ffa4SAlexandre Ghiti 	 * kernel is mapped in the linear mapping, that makes no difference.
824fe45ffa4SAlexandre Ghiti 	 */
825fe45ffa4SAlexandre Ghiti 	dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
826fe45ffa4SAlexandre Ghiti #endif
827fe45ffa4SAlexandre Ghiti 
828fe45ffa4SAlexandre Ghiti 	dtb_early_pa = dtb_pa;
829fe45ffa4SAlexandre Ghiti }
830fe45ffa4SAlexandre Ghiti 
831840125a9SAlexandre Ghiti /*
832840125a9SAlexandre Ghiti  * MMU is not enabled, the page tables are allocated directly using
833840125a9SAlexandre Ghiti  * early_pmd/pud/p4d and the address returned is the physical one.
834840125a9SAlexandre Ghiti  */
8350c34e79eSPalmer Dabbelt void __init pt_ops_set_early(void)
836840125a9SAlexandre Ghiti {
837840125a9SAlexandre Ghiti 	pt_ops.alloc_pte = alloc_pte_early;
838840125a9SAlexandre Ghiti 	pt_ops.get_pte_virt = get_pte_virt_early;
839840125a9SAlexandre Ghiti #ifndef __PAGETABLE_PMD_FOLDED
840840125a9SAlexandre Ghiti 	pt_ops.alloc_pmd = alloc_pmd_early;
841840125a9SAlexandre Ghiti 	pt_ops.get_pmd_virt = get_pmd_virt_early;
842e8a62cc2SAlexandre Ghiti 	pt_ops.alloc_pud = alloc_pud_early;
843e8a62cc2SAlexandre Ghiti 	pt_ops.get_pud_virt = get_pud_virt_early;
844677b9eb8SQinglin Pan 	pt_ops.alloc_p4d = alloc_p4d_early;
845677b9eb8SQinglin Pan 	pt_ops.get_p4d_virt = get_p4d_virt_early;
846840125a9SAlexandre Ghiti #endif
847840125a9SAlexandre Ghiti }
848840125a9SAlexandre Ghiti 
849840125a9SAlexandre Ghiti /*
850840125a9SAlexandre Ghiti  * MMU is enabled but page table setup is not complete yet.
851840125a9SAlexandre Ghiti  * fixmap page table alloc functions must be used as a means to temporarily
852840125a9SAlexandre Ghiti  * map the allocated physical pages since the linear mapping does not exist yet.
853840125a9SAlexandre Ghiti  *
854840125a9SAlexandre Ghiti  * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
855840125a9SAlexandre Ghiti  * but it will be used as described above.
856840125a9SAlexandre Ghiti  */
8570c34e79eSPalmer Dabbelt void __init pt_ops_set_fixmap(void)
858840125a9SAlexandre Ghiti {
859840125a9SAlexandre Ghiti 	pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap);
860840125a9SAlexandre Ghiti 	pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap);
861840125a9SAlexandre Ghiti #ifndef __PAGETABLE_PMD_FOLDED
862840125a9SAlexandre Ghiti 	pt_ops.alloc_pmd = kernel_mapping_pa_to_va((uintptr_t)alloc_pmd_fixmap);
863840125a9SAlexandre Ghiti 	pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap);
864e8a62cc2SAlexandre Ghiti 	pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap);
865e8a62cc2SAlexandre Ghiti 	pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap);
866677b9eb8SQinglin Pan 	pt_ops.alloc_p4d = kernel_mapping_pa_to_va((uintptr_t)alloc_p4d_fixmap);
867677b9eb8SQinglin Pan 	pt_ops.get_p4d_virt = kernel_mapping_pa_to_va((uintptr_t)get_p4d_virt_fixmap);
868840125a9SAlexandre Ghiti #endif
869840125a9SAlexandre Ghiti }
870840125a9SAlexandre Ghiti 
871840125a9SAlexandre Ghiti /*
872840125a9SAlexandre Ghiti  * MMU is enabled and page table setup is complete, so from now, we can use
873840125a9SAlexandre Ghiti  * generic page allocation functions to setup page table.
874840125a9SAlexandre Ghiti  */
8750c34e79eSPalmer Dabbelt void __init pt_ops_set_late(void)
876840125a9SAlexandre Ghiti {
877840125a9SAlexandre Ghiti 	pt_ops.alloc_pte = alloc_pte_late;
878840125a9SAlexandre Ghiti 	pt_ops.get_pte_virt = get_pte_virt_late;
879840125a9SAlexandre Ghiti #ifndef __PAGETABLE_PMD_FOLDED
880840125a9SAlexandre Ghiti 	pt_ops.alloc_pmd = alloc_pmd_late;
881840125a9SAlexandre Ghiti 	pt_ops.get_pmd_virt = get_pmd_virt_late;
882e8a62cc2SAlexandre Ghiti 	pt_ops.alloc_pud = alloc_pud_late;
883e8a62cc2SAlexandre Ghiti 	pt_ops.get_pud_virt = get_pud_virt_late;
884677b9eb8SQinglin Pan 	pt_ops.alloc_p4d = alloc_p4d_late;
885677b9eb8SQinglin Pan 	pt_ops.get_p4d_virt = get_p4d_virt_late;
886840125a9SAlexandre Ghiti #endif
887840125a9SAlexandre Ghiti }
888840125a9SAlexandre Ghiti 
889671f9a3eSAnup Patel asmlinkage void __init setup_vm(uintptr_t dtb_pa)
8906f1e9e94SAnup Patel {
8916f3e5fd2SAlexandre Ghiti 	pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
8926f1e9e94SAnup Patel 
893658e2c51SAlexandre Ghiti 	kernel_map.virt_addr = KERNEL_LINK_ADDR;
894e8a62cc2SAlexandre Ghiti 	kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
895658e2c51SAlexandre Ghiti 
89644c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
897658e2c51SAlexandre Ghiti 	kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
898658e2c51SAlexandre Ghiti 	kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
89944c92257SVitaly Wool 
9006d7f91d9SAlexandre Ghiti 	phys_ram_base = CONFIG_PHYS_RAM_BASE;
901658e2c51SAlexandre Ghiti 	kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
902658e2c51SAlexandre Ghiti 	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
90344c92257SVitaly Wool 
904658e2c51SAlexandre Ghiti 	kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
90544c92257SVitaly Wool #else
906658e2c51SAlexandre Ghiti 	kernel_map.phys_addr = (uintptr_t)(&_start);
907658e2c51SAlexandre Ghiti 	kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
90844c92257SVitaly Wool #endif
909e8a62cc2SAlexandre Ghiti 
910e8a62cc2SAlexandre Ghiti #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
911e8a62cc2SAlexandre Ghiti 	set_satp_mode();
912e8a62cc2SAlexandre Ghiti #endif
913e8a62cc2SAlexandre Ghiti 
914658e2c51SAlexandre Ghiti 	kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
915658e2c51SAlexandre Ghiti 	kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
9162bfc6cd8SAlexandre Ghiti 
917fb31f0a4SKenneth Lee 	riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
9186f1e9e94SAnup Patel 
919f7ae0233SAlexandre Ghiti 	/*
920f7ae0233SAlexandre Ghiti 	 * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
921f7ae0233SAlexandre Ghiti 	 * kernel, whereas for 64-bit kernel, the end of the virtual address
922f7ae0233SAlexandre Ghiti 	 * space is occupied by the modules/BPF/kernel mappings which reduces
923f7ae0233SAlexandre Ghiti 	 * the available size of the linear mapping.
924f7ae0233SAlexandre Ghiti 	 */
925f7ae0233SAlexandre Ghiti 	memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
926f7ae0233SAlexandre Ghiti 
9276f1e9e94SAnup Patel 	/* Sanity check alignment and size */
9286f1e9e94SAnup Patel 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
929526f83dfSAlexandre Ghiti 	BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
930671f9a3eSAnup Patel 
931db6b84a3SAlexandre Ghiti #ifdef CONFIG_64BIT
932db6b84a3SAlexandre Ghiti 	/*
933db6b84a3SAlexandre Ghiti 	 * The last 4K bytes of the addressable memory can not be mapped because
934db6b84a3SAlexandre Ghiti 	 * of IS_ERR_VALUE macro.
935db6b84a3SAlexandre Ghiti 	 */
936db6b84a3SAlexandre Ghiti 	BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
937e8dcb61fSAtish Patra #endif
938671f9a3eSAnup Patel 
939840125a9SAlexandre Ghiti 	pt_ops_set_early();
940840125a9SAlexandre Ghiti 
9416f1e9e94SAnup Patel 	/* Setup early PGD for fixmap */
9426f1e9e94SAnup Patel 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
943e8a62cc2SAlexandre Ghiti 			   fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
9446f1e9e94SAnup Patel 
9456f1e9e94SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
946677b9eb8SQinglin Pan 	/* Setup fixmap P4D and PUD */
947677b9eb8SQinglin Pan 	if (pgtable_l5_enabled)
948677b9eb8SQinglin Pan 		create_p4d_mapping(fixmap_p4d, FIXADDR_START,
949677b9eb8SQinglin Pan 				   (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE);
950e8a62cc2SAlexandre Ghiti 	/* Setup fixmap PUD and PMD */
951e8a62cc2SAlexandre Ghiti 	if (pgtable_l4_enabled)
952e8a62cc2SAlexandre Ghiti 		create_pud_mapping(fixmap_pud, FIXADDR_START,
953e8a62cc2SAlexandre Ghiti 				   (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE);
954671f9a3eSAnup Patel 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
955671f9a3eSAnup Patel 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
956671f9a3eSAnup Patel 	/* Setup trampoline PGD and PMD */
957658e2c51SAlexandre Ghiti 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
958e8a62cc2SAlexandre Ghiti 			   trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);
959677b9eb8SQinglin Pan 	if (pgtable_l5_enabled)
960677b9eb8SQinglin Pan 		create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr,
961677b9eb8SQinglin Pan 				   (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE);
962e8a62cc2SAlexandre Ghiti 	if (pgtable_l4_enabled)
963e8a62cc2SAlexandre Ghiti 		create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
964e8a62cc2SAlexandre Ghiti 				   (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
96544c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
966658e2c51SAlexandre Ghiti 	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
967658e2c51SAlexandre Ghiti 			   kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
96844c92257SVitaly Wool #else
969658e2c51SAlexandre Ghiti 	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
970658e2c51SAlexandre Ghiti 			   kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
97144c92257SVitaly Wool #endif
9726f1e9e94SAnup Patel #else
973671f9a3eSAnup Patel 	/* Setup trampoline PGD */
974658e2c51SAlexandre Ghiti 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
975658e2c51SAlexandre Ghiti 			   kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
976671f9a3eSAnup Patel #endif
9776f1e9e94SAnup Patel 
978671f9a3eSAnup Patel 	/*
9792bfc6cd8SAlexandre Ghiti 	 * Setup early PGD covering entire kernel which will allow
980671f9a3eSAnup Patel 	 * us to reach paging_init(). We map all memory banks later
981671f9a3eSAnup Patel 	 * in setup_vm_final() below.
982671f9a3eSAnup Patel 	 */
983526f83dfSAlexandre Ghiti 	create_kernel_page_table(early_pg_dir, true);
984f2c17aabSAnup Patel 
985fe45ffa4SAlexandre Ghiti 	/* Setup early mapping for FDT early scan */
986fe45ffa4SAlexandre Ghiti 	create_fdt_early_page_table(early_pg_dir, dtb_pa);
9876262f661SAtish Patra 
9886262f661SAtish Patra 	/*
9896262f661SAtish Patra 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
9906262f661SAtish Patra 	 * range can not span multiple pmds.
9916262f661SAtish Patra 	 */
992e8a62cc2SAlexandre Ghiti 	BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
9936262f661SAtish Patra 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
9946262f661SAtish Patra 
9956262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
9966262f661SAtish Patra 	/*
9976262f661SAtish Patra 	 * Early ioremap fixmap is already created as it lies within first 2MB
9986262f661SAtish Patra 	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
9996262f661SAtish Patra 	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
10006262f661SAtish Patra 	 * the user if not.
10016262f661SAtish Patra 	 */
10026262f661SAtish Patra 	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
10036262f661SAtish Patra 	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
10046262f661SAtish Patra 	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
10056262f661SAtish Patra 		WARN_ON(1);
10066262f661SAtish Patra 		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
10076262f661SAtish Patra 			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
10086262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
10096262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_BEGIN));
10106262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
10116262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_END));
10126262f661SAtish Patra 
10136262f661SAtish Patra 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
10146262f661SAtish Patra 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
10156262f661SAtish Patra 	}
10166262f661SAtish Patra #endif
1017840125a9SAlexandre Ghiti 
1018840125a9SAlexandre Ghiti 	pt_ops_set_fixmap();
10196f1e9e94SAnup Patel }
1020f2c17aabSAnup Patel 
1021671f9a3eSAnup Patel static void __init setup_vm_final(void)
1022671f9a3eSAnup Patel {
1023671f9a3eSAnup Patel 	uintptr_t va, map_size;
1024671f9a3eSAnup Patel 	phys_addr_t pa, start, end;
1025b10d6bcaSMike Rapoport 	u64 i;
1026671f9a3eSAnup Patel 
1027671f9a3eSAnup Patel 	/* Setup swapper PGD for fixmap */
1028671f9a3eSAnup Patel 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
1029ac51e005SZong Li 			   __pa_symbol(fixmap_pgd_next),
1030671f9a3eSAnup Patel 			   PGDIR_SIZE, PAGE_TABLE);
1031671f9a3eSAnup Patel 
10322bfc6cd8SAlexandre Ghiti 	/* Map all memory banks in the linear mapping */
1033b10d6bcaSMike Rapoport 	for_each_mem_range(i, &start, &end) {
1034671f9a3eSAnup Patel 		if (start >= end)
1035671f9a3eSAnup Patel 			break;
1036671f9a3eSAnup Patel 		if (start <= __pa(PAGE_OFFSET) &&
1037671f9a3eSAnup Patel 		    __pa(PAGE_OFFSET) < end)
1038671f9a3eSAnup Patel 			start = __pa(PAGE_OFFSET);
1039c99127c4SAlexandre Ghiti 		if (end >= __pa(PAGE_OFFSET) + memory_limit)
1040c99127c4SAlexandre Ghiti 			end = __pa(PAGE_OFFSET) + memory_limit;
1041671f9a3eSAnup Patel 
1042671f9a3eSAnup Patel 		map_size = best_map_size(start, end - start);
1043671f9a3eSAnup Patel 		for (pa = start; pa < end; pa += map_size) {
1044671f9a3eSAnup Patel 			va = (uintptr_t)__va(pa);
10452bfc6cd8SAlexandre Ghiti 
1046e5c35fa0SAlexandre Ghiti 			create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
1047e5c35fa0SAlexandre Ghiti 					   pgprot_from_va(va));
1048671f9a3eSAnup Patel 		}
1049671f9a3eSAnup Patel 	}
1050671f9a3eSAnup Patel 
10512bfc6cd8SAlexandre Ghiti 	/* Map the kernel */
105207aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT))
1053526f83dfSAlexandre Ghiti 		create_kernel_page_table(swapper_pg_dir, false);
10542bfc6cd8SAlexandre Ghiti 
10552efad17eSAlexandre Ghiti #ifdef CONFIG_KASAN
10562efad17eSAlexandre Ghiti 	kasan_swapper_init();
10572efad17eSAlexandre Ghiti #endif
10582efad17eSAlexandre Ghiti 
1059671f9a3eSAnup Patel 	/* Clear fixmap PTE and PMD mappings */
1060671f9a3eSAnup Patel 	clear_fixmap(FIX_PTE);
1061671f9a3eSAnup Patel 	clear_fixmap(FIX_PMD);
1062e8a62cc2SAlexandre Ghiti 	clear_fixmap(FIX_PUD);
1063677b9eb8SQinglin Pan 	clear_fixmap(FIX_P4D);
1064671f9a3eSAnup Patel 
1065671f9a3eSAnup Patel 	/* Move to swapper page table */
1066e8a62cc2SAlexandre Ghiti 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
1067671f9a3eSAnup Patel 	local_flush_tlb_all();
1068e8dcb61fSAtish Patra 
1069840125a9SAlexandre Ghiti 	pt_ops_set_late();
1070671f9a3eSAnup Patel }
10716bd33e1eSChristoph Hellwig #else
10726bd33e1eSChristoph Hellwig asmlinkage void __init setup_vm(uintptr_t dtb_pa)
10736bd33e1eSChristoph Hellwig {
10746bd33e1eSChristoph Hellwig 	dtb_early_va = (void *)dtb_pa;
1075a78c6f59SAtish Patra 	dtb_early_pa = dtb_pa;
10766bd33e1eSChristoph Hellwig }
10776bd33e1eSChristoph Hellwig 
10786bd33e1eSChristoph Hellwig static inline void setup_vm_final(void)
10796bd33e1eSChristoph Hellwig {
10806bd33e1eSChristoph Hellwig }
10816bd33e1eSChristoph Hellwig #endif /* CONFIG_MMU */
1082671f9a3eSAnup Patel 
1083e53d2818SNick Kossifidis /*
1084e53d2818SNick Kossifidis  * reserve_crashkernel() - reserves memory for crash kernel
1085e53d2818SNick Kossifidis  *
1086e53d2818SNick Kossifidis  * This function reserves memory area given in "crashkernel=" kernel command
1087e53d2818SNick Kossifidis  * line parameter. The memory reserved is used by dump capture kernel when
1088e53d2818SNick Kossifidis  * primary kernel is crashing.
1089e53d2818SNick Kossifidis  */
1090e53d2818SNick Kossifidis static void __init reserve_crashkernel(void)
1091e53d2818SNick Kossifidis {
1092e53d2818SNick Kossifidis 	unsigned long long crash_base = 0;
1093e53d2818SNick Kossifidis 	unsigned long long crash_size = 0;
1094e53d2818SNick Kossifidis 	unsigned long search_start = memblock_start_of_DRAM();
1095e53d2818SNick Kossifidis 	unsigned long search_end = memblock_end_of_DRAM();
1096e53d2818SNick Kossifidis 
1097e53d2818SNick Kossifidis 	int ret = 0;
1098e53d2818SNick Kossifidis 
1099d414cb37SJisheng Zhang 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
1100d414cb37SJisheng Zhang 		return;
110156409750SNick Kossifidis 	/*
110256409750SNick Kossifidis 	 * Don't reserve a region for a crash kernel on a crash kernel
110356409750SNick Kossifidis 	 * since it doesn't make much sense and we have limited memory
110456409750SNick Kossifidis 	 * resources.
110556409750SNick Kossifidis 	 */
110656409750SNick Kossifidis 	if (is_kdump_kernel()) {
110756409750SNick Kossifidis 		pr_info("crashkernel: ignoring reservation request\n");
110856409750SNick Kossifidis 		return;
110956409750SNick Kossifidis 	}
111056409750SNick Kossifidis 
1111e53d2818SNick Kossifidis 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
1112e53d2818SNick Kossifidis 				&crash_size, &crash_base);
1113e53d2818SNick Kossifidis 	if (ret || !crash_size)
1114e53d2818SNick Kossifidis 		return;
1115e53d2818SNick Kossifidis 
1116e53d2818SNick Kossifidis 	crash_size = PAGE_ALIGN(crash_size);
1117e53d2818SNick Kossifidis 
1118a7259df7SMike Rapoport 	if (crash_base) {
1119a7259df7SMike Rapoport 		search_start = crash_base;
1120a7259df7SMike Rapoport 		search_end = crash_base + crash_size;
1121a7259df7SMike Rapoport 	}
1122a7259df7SMike Rapoport 
1123e53d2818SNick Kossifidis 	/*
1124e53d2818SNick Kossifidis 	 * Current riscv boot protocol requires 2MB alignment for
1125e53d2818SNick Kossifidis 	 * RV64 and 4MB alignment for RV32 (hugepage size)
1126decf89f8SNick Kossifidis 	 *
1127decf89f8SNick Kossifidis 	 * Try to alloc from 32bit addressible physical memory so that
1128decf89f8SNick Kossifidis 	 * swiotlb can work on the crash kernel.
1129e53d2818SNick Kossifidis 	 */
1130a7259df7SMike Rapoport 	crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1131decf89f8SNick Kossifidis 					       search_start,
1132decf89f8SNick Kossifidis 					       min(search_end, (unsigned long) SZ_4G));
1133decf89f8SNick Kossifidis 	if (crash_base == 0) {
1134decf89f8SNick Kossifidis 		/* Try again without restricting region to 32bit addressible memory */
1135decf89f8SNick Kossifidis 		crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1136a7259df7SMike Rapoport 						search_start, search_end);
1137e53d2818SNick Kossifidis 		if (crash_base == 0) {
1138e53d2818SNick Kossifidis 			pr_warn("crashkernel: couldn't allocate %lldKB\n",
1139e53d2818SNick Kossifidis 				crash_size >> 10);
1140e53d2818SNick Kossifidis 			return;
1141e53d2818SNick Kossifidis 		}
1142decf89f8SNick Kossifidis 	}
1143e53d2818SNick Kossifidis 
1144e53d2818SNick Kossifidis 	pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
1145e53d2818SNick Kossifidis 		crash_base, crash_base + crash_size, crash_size >> 20);
1146e53d2818SNick Kossifidis 
1147e53d2818SNick Kossifidis 	crashk_res.start = crash_base;
1148e53d2818SNick Kossifidis 	crashk_res.end = crash_base + crash_size - 1;
1149e53d2818SNick Kossifidis }
1150e53d2818SNick Kossifidis 
1151671f9a3eSAnup Patel void __init paging_init(void)
1152671f9a3eSAnup Patel {
1153f842f5ffSKefeng Wang 	setup_bootmem();
1154671f9a3eSAnup Patel 	setup_vm_final();
1155cbd34f4bSAtish Patra }
1156cbd34f4bSAtish Patra 
1157cbd34f4bSAtish Patra void __init misc_mem_init(void)
1158cbd34f4bSAtish Patra {
1159f6e5aedfSKefeng Wang 	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
11604f0e8eefSAtish Patra 	arch_numa_init();
1161cbd34f4bSAtish Patra 	sparse_init();
1162671f9a3eSAnup Patel 	zone_sizes_init();
1163e53d2818SNick Kossifidis 	reserve_crashkernel();
11644f0e8eefSAtish Patra 	memblock_dump_all();
11656f1e9e94SAnup Patel }
1166d95f1a54SLogan Gunthorpe 
11679fe57d8cSKefeng Wang #ifdef CONFIG_SPARSEMEM_VMEMMAP
1168d95f1a54SLogan Gunthorpe int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1169d95f1a54SLogan Gunthorpe 			       struct vmem_altmap *altmap)
1170d95f1a54SLogan Gunthorpe {
11711d9cfee7SAnshuman Khandual 	return vmemmap_populate_basepages(start, end, node, NULL);
1172d95f1a54SLogan Gunthorpe }
1173d95f1a54SLogan Gunthorpe #endif
1174