xref: /openbmc/linux/arch/riscv/mm/init.c (revision fe036db7)
150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
276d2a049SPalmer Dabbelt /*
376d2a049SPalmer Dabbelt  * Copyright (C) 2012 Regents of the University of California
4671f9a3eSAnup Patel  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5e53d2818SNick Kossifidis  * Copyright (C) 2020 FORTH-ICS/CARV
6e53d2818SNick Kossifidis  *  Nick Kossifidis <mick@ics.forth.gr>
776d2a049SPalmer Dabbelt  */
876d2a049SPalmer Dabbelt 
976d2a049SPalmer Dabbelt #include <linux/init.h>
1076d2a049SPalmer Dabbelt #include <linux/mm.h>
1176d2a049SPalmer Dabbelt #include <linux/memblock.h>
1257c8a661SMike Rapoport #include <linux/initrd.h>
1376d2a049SPalmer Dabbelt #include <linux/swap.h>
14ce3aca04SKefeng Wang #include <linux/swiotlb.h>
155ec9c4ffSChristoph Hellwig #include <linux/sizes.h>
160651c263SAnup Patel #include <linux/of_fdt.h>
1756409750SNick Kossifidis #include <linux/of_reserved_mem.h>
18922b0375SAlbert Ou #include <linux/libfdt.h>
19d27c3c90SZong Li #include <linux/set_memory.h>
20da815582SKefeng Wang #include <linux/dma-map-ops.h>
21e53d2818SNick Kossifidis #include <linux/crash_dump.h>
228ba1a8b7SKefeng Wang #include <linux/hugetlb.h>
2376d2a049SPalmer Dabbelt 
24f2c17aabSAnup Patel #include <asm/fixmap.h>
2576d2a049SPalmer Dabbelt #include <asm/tlbflush.h>
2676d2a049SPalmer Dabbelt #include <asm/sections.h>
272d268251SPalmer Dabbelt #include <asm/soc.h>
2876d2a049SPalmer Dabbelt #include <asm/io.h>
29b422d28bSZong Li #include <asm/ptdump.h>
304f0e8eefSAtish Patra #include <asm/numa.h>
3176d2a049SPalmer Dabbelt 
32ffaee272SPaul Walmsley #include "../kernel/head.h"
33ffaee272SPaul Walmsley 
34658e2c51SAlexandre Ghiti struct kernel_mapping kernel_map __ro_after_init;
35658e2c51SAlexandre Ghiti EXPORT_SYMBOL(kernel_map);
3644c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
37658e2c51SAlexandre Ghiti #define kernel_map	(*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
38658e2c51SAlexandre Ghiti #endif
39658e2c51SAlexandre Ghiti 
406d7f91d9SAlexandre Ghiti phys_addr_t phys_ram_base __ro_after_init;
416d7f91d9SAlexandre Ghiti EXPORT_SYMBOL(phys_ram_base);
426d7f91d9SAlexandre Ghiti 
43658e2c51SAlexandre Ghiti #ifdef CONFIG_XIP_KERNEL
44f9ace4edSVitaly Wool extern char _xiprom[], _exiprom[], __data_loc;
4544c92257SVitaly Wool #endif
462bfc6cd8SAlexandre Ghiti 
47387181dcSAnup Patel unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
48387181dcSAnup Patel 							__page_aligned_bss;
49387181dcSAnup Patel EXPORT_SYMBOL(empty_zero_page);
50387181dcSAnup Patel 
51d90d45d7SAnup Patel extern char _start[];
528f3a2b4aSAnup Patel #define DTB_EARLY_BASE_VA      PGDIR_SIZE
5344c92257SVitaly Wool void *_dtb_early_va __initdata;
5444c92257SVitaly Wool uintptr_t _dtb_early_pa __initdata;
55d90d45d7SAnup Patel 
56e8dcb61fSAtish Patra struct pt_alloc_ops {
57e8dcb61fSAtish Patra 	pte_t *(*get_pte_virt)(phys_addr_t pa);
58e8dcb61fSAtish Patra 	phys_addr_t (*alloc_pte)(uintptr_t va);
59e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
60e8dcb61fSAtish Patra 	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
61e8dcb61fSAtish Patra 	phys_addr_t (*alloc_pmd)(uintptr_t va);
62e8dcb61fSAtish Patra #endif
63e8dcb61fSAtish Patra };
6476d2a049SPalmer Dabbelt 
6501062356SJisheng Zhang static phys_addr_t dma32_phys_limit __initdata;
66da815582SKefeng Wang 
6776d2a049SPalmer Dabbelt static void __init zone_sizes_init(void)
6876d2a049SPalmer Dabbelt {
695ec9c4ffSChristoph Hellwig 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
7076d2a049SPalmer Dabbelt 
71d5fad48cSZong Li #ifdef CONFIG_ZONE_DMA32
72da815582SKefeng Wang 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
73d5fad48cSZong Li #endif
745ec9c4ffSChristoph Hellwig 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
755ec9c4ffSChristoph Hellwig 
769691a071SMike Rapoport 	free_area_init(max_zone_pfns);
7776d2a049SPalmer Dabbelt }
7876d2a049SPalmer Dabbelt 
798fa3cdffSKefeng Wang #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
802cc6c4a0SYash Shah static inline void print_mlk(char *name, unsigned long b, unsigned long t)
812cc6c4a0SYash Shah {
822cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
832cc6c4a0SYash Shah 		  (((t) - (b)) >> 10));
842cc6c4a0SYash Shah }
852cc6c4a0SYash Shah 
862cc6c4a0SYash Shah static inline void print_mlm(char *name, unsigned long b, unsigned long t)
872cc6c4a0SYash Shah {
882cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
892cc6c4a0SYash Shah 		  (((t) - (b)) >> 20));
902cc6c4a0SYash Shah }
912cc6c4a0SYash Shah 
921987501bSJisheng Zhang static void __init print_vm_layout(void)
932cc6c4a0SYash Shah {
942cc6c4a0SYash Shah 	pr_notice("Virtual kernel memory layout:\n");
952cc6c4a0SYash Shah 	print_mlk("fixmap", (unsigned long)FIXADDR_START,
962cc6c4a0SYash Shah 		  (unsigned long)FIXADDR_TOP);
972cc6c4a0SYash Shah 	print_mlm("pci io", (unsigned long)PCI_IO_START,
982cc6c4a0SYash Shah 		  (unsigned long)PCI_IO_END);
992cc6c4a0SYash Shah 	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
1002cc6c4a0SYash Shah 		  (unsigned long)VMEMMAP_END);
1012cc6c4a0SYash Shah 	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
1022cc6c4a0SYash Shah 		  (unsigned long)VMALLOC_END);
1032cc6c4a0SYash Shah 	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
1042cc6c4a0SYash Shah 		  (unsigned long)high_memory);
10507aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT))
1062bfc6cd8SAlexandre Ghiti 		print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
1072bfc6cd8SAlexandre Ghiti 			  (unsigned long)ADDRESS_SPACE_END);
1082cc6c4a0SYash Shah }
1092cc6c4a0SYash Shah #else
1102cc6c4a0SYash Shah static void print_vm_layout(void) { }
1112cc6c4a0SYash Shah #endif /* CONFIG_DEBUG_VM */
1122cc6c4a0SYash Shah 
11376d2a049SPalmer Dabbelt void __init mem_init(void)
11476d2a049SPalmer Dabbelt {
11576d2a049SPalmer Dabbelt #ifdef CONFIG_FLATMEM
11676d2a049SPalmer Dabbelt 	BUG_ON(!mem_map);
11776d2a049SPalmer Dabbelt #endif /* CONFIG_FLATMEM */
11876d2a049SPalmer Dabbelt 
119ce3aca04SKefeng Wang #ifdef CONFIG_SWIOTLB
120ce3aca04SKefeng Wang 	if (swiotlb_force == SWIOTLB_FORCE ||
121ce3aca04SKefeng Wang 	    max_pfn > PFN_DOWN(dma32_phys_limit))
122ce3aca04SKefeng Wang 		swiotlb_init(1);
123ce3aca04SKefeng Wang 	else
124ce3aca04SKefeng Wang 		swiotlb_force = SWIOTLB_NO_FORCE;
125ce3aca04SKefeng Wang #endif
12676d2a049SPalmer Dabbelt 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
127c6ffc5caSMike Rapoport 	memblock_free_all();
12876d2a049SPalmer Dabbelt 
1292cc6c4a0SYash Shah 	print_vm_layout();
13076d2a049SPalmer Dabbelt }
13176d2a049SPalmer Dabbelt 
132c9811e37SKefeng Wang /*
133c09dc9e1SAlexandre Ghiti  * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
134c09dc9e1SAlexandre Ghiti  * whereas for 64-bit kernel, the end of the virtual address space is occupied
135c09dc9e1SAlexandre Ghiti  * by the modules/BPF/kernel mappings which reduces the available size of the
136c09dc9e1SAlexandre Ghiti  * linear mapping.
137c09dc9e1SAlexandre Ghiti  * Limit the memory size via mem.
138c9811e37SKefeng Wang  */
139c09dc9e1SAlexandre Ghiti #ifdef CONFIG_64BIT
140c09dc9e1SAlexandre Ghiti static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
141c09dc9e1SAlexandre Ghiti #else
142c9811e37SKefeng Wang static phys_addr_t memory_limit = -PAGE_OFFSET;
143c09dc9e1SAlexandre Ghiti #endif
144c9811e37SKefeng Wang 
145c9811e37SKefeng Wang static int __init early_mem(char *p)
146c9811e37SKefeng Wang {
147c9811e37SKefeng Wang 	u64 size;
148c9811e37SKefeng Wang 
149c9811e37SKefeng Wang 	if (!p)
150c9811e37SKefeng Wang 		return 1;
151c9811e37SKefeng Wang 
152c9811e37SKefeng Wang 	size = memparse(p, &p) & PAGE_MASK;
153c9811e37SKefeng Wang 	memory_limit = min_t(u64, size, memory_limit);
154c9811e37SKefeng Wang 
155c9811e37SKefeng Wang 	pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20);
156c9811e37SKefeng Wang 
157c9811e37SKefeng Wang 	return 0;
158c9811e37SKefeng Wang }
159c9811e37SKefeng Wang early_param("mem", early_mem);
160c9811e37SKefeng Wang 
161f842f5ffSKefeng Wang static void __init setup_bootmem(void)
1620651c263SAnup Patel {
163ac51e005SZong Li 	phys_addr_t vmlinux_end = __pa_symbol(&_end);
16407aabe8fSJisheng Zhang 	phys_addr_t max_mapped_addr;
165*fe036db7SJisheng Zhang 	phys_addr_t phys_ram_end, vmlinux_start;
1660651c263SAnup Patel 
167*fe036db7SJisheng Zhang 	if (IS_ENABLED(CONFIG_XIP_KERNEL))
16844c92257SVitaly Wool 		vmlinux_start = __pa_symbol(&_sdata);
169*fe036db7SJisheng Zhang 	else
170*fe036db7SJisheng Zhang 		vmlinux_start = __pa_symbol(&_start);
17144c92257SVitaly Wool 
172c9811e37SKefeng Wang 	memblock_enforce_memory_limit(memory_limit);
1730651c263SAnup Patel 
1742bfc6cd8SAlexandre Ghiti 	/*
1758db6f937SGeert Uytterhoeven 	 * Make sure we align the reservation on PMD_SIZE since we will
1762bfc6cd8SAlexandre Ghiti 	 * map the kernel in the linear mapping as read-only: we do not want
1772bfc6cd8SAlexandre Ghiti 	 * any allocation to happen between _end and the next pmd aligned page.
1782bfc6cd8SAlexandre Ghiti 	 */
17907aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
1808db6f937SGeert Uytterhoeven 		vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
18107aabe8fSJisheng Zhang 	/*
18207aabe8fSJisheng Zhang 	 * Reserve from the start of the kernel to the end of the kernel
18307aabe8fSJisheng Zhang 	 */
1848db6f937SGeert Uytterhoeven 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
185d90d45d7SAnup Patel 
1866d7f91d9SAlexandre Ghiti 	phys_ram_end = memblock_end_of_DRAM();
187*fe036db7SJisheng Zhang 	if (!IS_ENABLED(CONFIG_XIP_KERNEL))
1886d7f91d9SAlexandre Ghiti 		phys_ram_base = memblock_start_of_DRAM();
189abb8e86bSAtish Patra 	/*
190abb8e86bSAtish Patra 	 * memblock allocator is not aware of the fact that last 4K bytes of
191abb8e86bSAtish Patra 	 * the addressable memory can not be mapped because of IS_ERR_VALUE
192abb8e86bSAtish Patra 	 * macro. Make sure that last 4k bytes are not usable by memblock
193db6b84a3SAlexandre Ghiti 	 * if end of dram is equal to maximum addressable memory.  For 64-bit
194db6b84a3SAlexandre Ghiti 	 * kernel, this problem can't happen here as the end of the virtual
195db6b84a3SAlexandre Ghiti 	 * address space is occupied by the kernel mapping then this check must
196fdf3a7a1SAlexandre Ghiti 	 * be done as soon as the kernel mapping base address is determined.
197abb8e86bSAtish Patra 	 */
19807aabe8fSJisheng Zhang 	if (!IS_ENABLED(CONFIG_64BIT)) {
199db6b84a3SAlexandre Ghiti 		max_mapped_addr = __pa(~(ulong)0);
2006d7f91d9SAlexandre Ghiti 		if (max_mapped_addr == (phys_ram_end - 1))
201abb8e86bSAtish Patra 			memblock_set_current_limit(max_mapped_addr - 4096);
20207aabe8fSJisheng Zhang 	}
203abb8e86bSAtish Patra 
2046d7f91d9SAlexandre Ghiti 	min_low_pfn = PFN_UP(phys_ram_base);
2056d7f91d9SAlexandre Ghiti 	max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
206f6e5aedfSKefeng Wang 
207da815582SKefeng Wang 	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
208336e8eb2SGuo Ren 	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
2090651c263SAnup Patel 
210aec33b54SKefeng Wang 	reserve_initrd_mem();
211922b0375SAlbert Ou 	/*
212f105aa94SVitaly Wool 	 * If DTB is built in, no need to reserve its memblock.
213f105aa94SVitaly Wool 	 * Otherwise, do reserve it but avoid using
214f105aa94SVitaly Wool 	 * early_init_fdt_reserve_self() since __pa() does
215922b0375SAlbert Ou 	 * not work for DTB pointers that are fixmap addresses
216922b0375SAlbert Ou 	 */
217f105aa94SVitaly Wool 	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
218922b0375SAlbert Ou 		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
219922b0375SAlbert Ou 
2200651c263SAnup Patel 	early_init_fdt_scan_reserved_mem();
221da815582SKefeng Wang 	dma_contiguous_reserve(dma32_phys_limit);
2228ba1a8b7SKefeng Wang 	if (IS_ENABLED(CONFIG_64BIT))
2238ba1a8b7SKefeng Wang 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
2240651c263SAnup Patel 	memblock_allow_resize();
2250651c263SAnup Patel }
2266f1e9e94SAnup Patel 
2276bd33e1eSChristoph Hellwig #ifdef CONFIG_MMU
2283274a6efSJisheng Zhang static struct pt_alloc_ops pt_ops __initdata;
22944c92257SVitaly Wool 
23044c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
2313274a6efSJisheng Zhang #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
23244c92257SVitaly Wool #endif
233e8dcb61fSAtish Patra 
234fb31f0a4SKenneth Lee unsigned long riscv_pfn_base __ro_after_init;
235fb31f0a4SKenneth Lee EXPORT_SYMBOL(riscv_pfn_base);
236387181dcSAnup Patel 
2376f1e9e94SAnup Patel pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
238671f9a3eSAnup Patel pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
23901062356SJisheng Zhang static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
240671f9a3eSAnup Patel 
241671f9a3eSAnup Patel pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
242fe45ffa4SAlexandre Ghiti static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
243f2c17aabSAnup Patel 
24444c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
24544c92257SVitaly Wool #define trampoline_pg_dir      ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
24644c92257SVitaly Wool #define fixmap_pte             ((pte_t *)XIP_FIXUP(fixmap_pte))
24744c92257SVitaly Wool #define early_pg_dir           ((pgd_t *)XIP_FIXUP(early_pg_dir))
24844c92257SVitaly Wool #endif /* CONFIG_XIP_KERNEL */
24944c92257SVitaly Wool 
250f2c17aabSAnup Patel void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
251f2c17aabSAnup Patel {
252f2c17aabSAnup Patel 	unsigned long addr = __fix_to_virt(idx);
253f2c17aabSAnup Patel 	pte_t *ptep;
254f2c17aabSAnup Patel 
255f2c17aabSAnup Patel 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
256f2c17aabSAnup Patel 
257f2c17aabSAnup Patel 	ptep = &fixmap_pte[pte_index(addr)];
258f2c17aabSAnup Patel 
25921190b74SGreentime Hu 	if (pgprot_val(prot))
260f2c17aabSAnup Patel 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
26121190b74SGreentime Hu 	else
262f2c17aabSAnup Patel 		pte_clear(&init_mm, addr, ptep);
263f2c17aabSAnup Patel 	local_flush_tlb_page(addr);
264f2c17aabSAnup Patel }
265f2c17aabSAnup Patel 
266e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
267671f9a3eSAnup Patel {
268671f9a3eSAnup Patel 	return (pte_t *)((uintptr_t)pa);
269671f9a3eSAnup Patel }
270e8dcb61fSAtish Patra 
271e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
272e8dcb61fSAtish Patra {
273e8dcb61fSAtish Patra 	clear_fixmap(FIX_PTE);
274e8dcb61fSAtish Patra 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
275671f9a3eSAnup Patel }
276671f9a3eSAnup Patel 
27701062356SJisheng Zhang static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
278e8dcb61fSAtish Patra {
279e8dcb61fSAtish Patra 	return (pte_t *) __va(pa);
280e8dcb61fSAtish Patra }
281e8dcb61fSAtish Patra 
282e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
283671f9a3eSAnup Patel {
284671f9a3eSAnup Patel 	/*
285671f9a3eSAnup Patel 	 * We only create PMD or PGD early mappings so we
286671f9a3eSAnup Patel 	 * should never reach here with MMU disabled.
287671f9a3eSAnup Patel 	 */
288e8dcb61fSAtish Patra 	BUG();
289e8dcb61fSAtish Patra }
290671f9a3eSAnup Patel 
291e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
292e8dcb61fSAtish Patra {
293671f9a3eSAnup Patel 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
294671f9a3eSAnup Patel }
295671f9a3eSAnup Patel 
29601062356SJisheng Zhang static phys_addr_t __init alloc_pte_late(uintptr_t va)
297e8dcb61fSAtish Patra {
298e8dcb61fSAtish Patra 	unsigned long vaddr;
299e8dcb61fSAtish Patra 
300e8dcb61fSAtish Patra 	vaddr = __get_free_page(GFP_KERNEL);
301e75e6bf4Szhouchuangao 	BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
302e75e6bf4Szhouchuangao 
303e8dcb61fSAtish Patra 	return __pa(vaddr);
304e8dcb61fSAtish Patra }
305e8dcb61fSAtish Patra 
306671f9a3eSAnup Patel static void __init create_pte_mapping(pte_t *ptep,
307671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
308671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
309671f9a3eSAnup Patel {
310974b9b2cSMike Rapoport 	uintptr_t pte_idx = pte_index(va);
311671f9a3eSAnup Patel 
312671f9a3eSAnup Patel 	BUG_ON(sz != PAGE_SIZE);
313671f9a3eSAnup Patel 
314974b9b2cSMike Rapoport 	if (pte_none(ptep[pte_idx]))
315974b9b2cSMike Rapoport 		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
316671f9a3eSAnup Patel }
317671f9a3eSAnup Patel 
318671f9a3eSAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
319671f9a3eSAnup Patel 
32001062356SJisheng Zhang static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
32101062356SJisheng Zhang static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
32201062356SJisheng Zhang static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
323671f9a3eSAnup Patel 
32444c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
32544c92257SVitaly Wool #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
32644c92257SVitaly Wool #define fixmap_pmd     ((pmd_t *)XIP_FIXUP(fixmap_pmd))
32744c92257SVitaly Wool #define early_pmd      ((pmd_t *)XIP_FIXUP(early_pmd))
32844c92257SVitaly Wool #endif /* CONFIG_XIP_KERNEL */
32944c92257SVitaly Wool 
330e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
331671f9a3eSAnup Patel {
332e8dcb61fSAtish Patra 	/* Before MMU is enabled */
333671f9a3eSAnup Patel 	return (pmd_t *)((uintptr_t)pa);
334671f9a3eSAnup Patel }
335e8dcb61fSAtish Patra 
336e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
337e8dcb61fSAtish Patra {
338e8dcb61fSAtish Patra 	clear_fixmap(FIX_PMD);
339e8dcb61fSAtish Patra 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
340671f9a3eSAnup Patel }
341671f9a3eSAnup Patel 
34201062356SJisheng Zhang static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
343e8dcb61fSAtish Patra {
344e8dcb61fSAtish Patra 	return (pmd_t *) __va(pa);
345e8dcb61fSAtish Patra }
346e8dcb61fSAtish Patra 
347e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_early(uintptr_t va)
348671f9a3eSAnup Patel {
349658e2c51SAlexandre Ghiti 	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
350671f9a3eSAnup Patel 
3510f02de44SAlexandre Ghiti 	return (uintptr_t)early_pmd;
352671f9a3eSAnup Patel }
353671f9a3eSAnup Patel 
354e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
355e8dcb61fSAtish Patra {
356e8dcb61fSAtish Patra 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
357e8dcb61fSAtish Patra }
358e8dcb61fSAtish Patra 
35901062356SJisheng Zhang static phys_addr_t __init alloc_pmd_late(uintptr_t va)
360e8dcb61fSAtish Patra {
361e8dcb61fSAtish Patra 	unsigned long vaddr;
362e8dcb61fSAtish Patra 
363e8dcb61fSAtish Patra 	vaddr = __get_free_page(GFP_KERNEL);
3645a7ac592SKefeng Wang 	BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
3655a7ac592SKefeng Wang 
366e8dcb61fSAtish Patra 	return __pa(vaddr);
367e8dcb61fSAtish Patra }
368e8dcb61fSAtish Patra 
369671f9a3eSAnup Patel static void __init create_pmd_mapping(pmd_t *pmdp,
370671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
371671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
372671f9a3eSAnup Patel {
373671f9a3eSAnup Patel 	pte_t *ptep;
374671f9a3eSAnup Patel 	phys_addr_t pte_phys;
375974b9b2cSMike Rapoport 	uintptr_t pmd_idx = pmd_index(va);
376671f9a3eSAnup Patel 
377671f9a3eSAnup Patel 	if (sz == PMD_SIZE) {
378974b9b2cSMike Rapoport 		if (pmd_none(pmdp[pmd_idx]))
379974b9b2cSMike Rapoport 			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
380671f9a3eSAnup Patel 		return;
381671f9a3eSAnup Patel 	}
382671f9a3eSAnup Patel 
383974b9b2cSMike Rapoport 	if (pmd_none(pmdp[pmd_idx])) {
384e8dcb61fSAtish Patra 		pte_phys = pt_ops.alloc_pte(va);
385974b9b2cSMike Rapoport 		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
386e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
387671f9a3eSAnup Patel 		memset(ptep, 0, PAGE_SIZE);
388671f9a3eSAnup Patel 	} else {
389974b9b2cSMike Rapoport 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
390e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
391671f9a3eSAnup Patel 	}
392671f9a3eSAnup Patel 
393671f9a3eSAnup Patel 	create_pte_mapping(ptep, va, pa, sz, prot);
394671f9a3eSAnup Patel }
395671f9a3eSAnup Patel 
396671f9a3eSAnup Patel #define pgd_next_t		pmd_t
397e8dcb61fSAtish Patra #define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
398e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
399671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
400671f9a3eSAnup Patel 	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
401671f9a3eSAnup Patel #define fixmap_pgd_next		fixmap_pmd
402671f9a3eSAnup Patel #else
403671f9a3eSAnup Patel #define pgd_next_t		pte_t
404e8dcb61fSAtish Patra #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
405e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
406671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
407671f9a3eSAnup Patel 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
408671f9a3eSAnup Patel #define fixmap_pgd_next		fixmap_pte
409fe45ffa4SAlexandre Ghiti #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
410671f9a3eSAnup Patel #endif
411671f9a3eSAnup Patel 
412b91540d5SAtish Patra void __init create_pgd_mapping(pgd_t *pgdp,
413671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
414671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
415671f9a3eSAnup Patel {
416671f9a3eSAnup Patel 	pgd_next_t *nextp;
417671f9a3eSAnup Patel 	phys_addr_t next_phys;
418974b9b2cSMike Rapoport 	uintptr_t pgd_idx = pgd_index(va);
419671f9a3eSAnup Patel 
420671f9a3eSAnup Patel 	if (sz == PGDIR_SIZE) {
421974b9b2cSMike Rapoport 		if (pgd_val(pgdp[pgd_idx]) == 0)
422974b9b2cSMike Rapoport 			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
423671f9a3eSAnup Patel 		return;
424671f9a3eSAnup Patel 	}
425671f9a3eSAnup Patel 
426974b9b2cSMike Rapoport 	if (pgd_val(pgdp[pgd_idx]) == 0) {
427671f9a3eSAnup Patel 		next_phys = alloc_pgd_next(va);
428974b9b2cSMike Rapoport 		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
429671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
430671f9a3eSAnup Patel 		memset(nextp, 0, PAGE_SIZE);
431671f9a3eSAnup Patel 	} else {
432974b9b2cSMike Rapoport 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
433671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
434671f9a3eSAnup Patel 	}
435671f9a3eSAnup Patel 
436671f9a3eSAnup Patel 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
437671f9a3eSAnup Patel }
438671f9a3eSAnup Patel 
439671f9a3eSAnup Patel static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
440671f9a3eSAnup Patel {
4410fdc636cSZong Li 	/* Upgrade to PMD_SIZE mappings whenever possible */
4420fdc636cSZong Li 	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
4430fdc636cSZong Li 		return PAGE_SIZE;
444671f9a3eSAnup Patel 
4450fdc636cSZong Li 	return PMD_SIZE;
446671f9a3eSAnup Patel }
447671f9a3eSAnup Patel 
44844c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
44944c92257SVitaly Wool /* called from head.S with MMU off */
45044c92257SVitaly Wool asmlinkage void __init __copy_data(void)
45144c92257SVitaly Wool {
452f9ace4edSVitaly Wool 	void *from = (void *)(&__data_loc);
45344c92257SVitaly Wool 	void *to = (void *)CONFIG_PHYS_RAM_BASE;
454f9ace4edSVitaly Wool 	size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata));
45544c92257SVitaly Wool 
45644c92257SVitaly Wool 	memcpy(to, from, sz);
45744c92257SVitaly Wool }
45844c92257SVitaly Wool #endif
45944c92257SVitaly Wool 
460e5c35fa0SAlexandre Ghiti #ifdef CONFIG_STRICT_KERNEL_RWX
461e5c35fa0SAlexandre Ghiti static __init pgprot_t pgprot_from_va(uintptr_t va)
462e5c35fa0SAlexandre Ghiti {
463e5c35fa0SAlexandre Ghiti 	if (is_va_kernel_text(va))
464e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL_READ_EXEC;
465e5c35fa0SAlexandre Ghiti 
466e5c35fa0SAlexandre Ghiti 	/*
467e5c35fa0SAlexandre Ghiti 	 * In 64-bit kernel, the kernel mapping is outside the linear mapping so
468e5c35fa0SAlexandre Ghiti 	 * we must protect its linear mapping alias from being executed and
469e5c35fa0SAlexandre Ghiti 	 * written.
470e5c35fa0SAlexandre Ghiti 	 * And rodata section is marked readonly in mark_rodata_ro.
471e5c35fa0SAlexandre Ghiti 	 */
472e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
473e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL_READ;
474e5c35fa0SAlexandre Ghiti 
475e5c35fa0SAlexandre Ghiti 	return PAGE_KERNEL;
476e5c35fa0SAlexandre Ghiti }
477e5c35fa0SAlexandre Ghiti 
478e5c35fa0SAlexandre Ghiti void mark_rodata_ro(void)
479e5c35fa0SAlexandre Ghiti {
480e5c35fa0SAlexandre Ghiti 	set_kernel_memory(__start_rodata, _data, set_memory_ro);
481e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT))
482e5c35fa0SAlexandre Ghiti 		set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
483e5c35fa0SAlexandre Ghiti 				  set_memory_ro);
484e5c35fa0SAlexandre Ghiti 
485e5c35fa0SAlexandre Ghiti 	debug_checkwx();
486e5c35fa0SAlexandre Ghiti }
487e5c35fa0SAlexandre Ghiti #else
488e5c35fa0SAlexandre Ghiti static __init pgprot_t pgprot_from_va(uintptr_t va)
489e5c35fa0SAlexandre Ghiti {
490e5c35fa0SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
491e5c35fa0SAlexandre Ghiti 		return PAGE_KERNEL;
492e5c35fa0SAlexandre Ghiti 
493e5c35fa0SAlexandre Ghiti 	return PAGE_KERNEL_EXEC;
494e5c35fa0SAlexandre Ghiti }
495e5c35fa0SAlexandre Ghiti #endif /* CONFIG_STRICT_KERNEL_RWX */
496e5c35fa0SAlexandre Ghiti 
497387181dcSAnup Patel /*
498387181dcSAnup Patel  * setup_vm() is called from head.S with MMU-off.
499387181dcSAnup Patel  *
500387181dcSAnup Patel  * Following requirements should be honoured for setup_vm() to work
501387181dcSAnup Patel  * correctly:
502387181dcSAnup Patel  * 1) It should use PC-relative addressing for accessing kernel symbols.
503387181dcSAnup Patel  *    To achieve this we always use GCC cmodel=medany.
504387181dcSAnup Patel  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
505387181dcSAnup Patel  *    so disable compiler instrumentation when FTRACE is enabled.
506387181dcSAnup Patel  *
507387181dcSAnup Patel  * Currently, the above requirements are honoured by using custom CFLAGS
508387181dcSAnup Patel  * for init.o in mm/Makefile.
509387181dcSAnup Patel  */
510387181dcSAnup Patel 
511387181dcSAnup Patel #ifndef __riscv_cmodel_medany
5126a527b67SPaul Walmsley #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
513387181dcSAnup Patel #endif
514387181dcSAnup Patel 
51544c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
516526f83dfSAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir,
517e5c35fa0SAlexandre Ghiti 					    __always_unused bool early)
51844c92257SVitaly Wool {
51944c92257SVitaly Wool 	uintptr_t va, end_va;
52044c92257SVitaly Wool 
52144c92257SVitaly Wool 	/* Map the flash resident part */
522658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
523526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
52444c92257SVitaly Wool 		create_pgd_mapping(pgdir, va,
525658e2c51SAlexandre Ghiti 				   kernel_map.xiprom + (va - kernel_map.virt_addr),
526526f83dfSAlexandre Ghiti 				   PMD_SIZE, PAGE_KERNEL_EXEC);
52744c92257SVitaly Wool 
52844c92257SVitaly Wool 	/* Map the data in RAM */
529658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
530526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
53144c92257SVitaly Wool 		create_pgd_mapping(pgdir, va,
532658e2c51SAlexandre Ghiti 				   kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
533526f83dfSAlexandre Ghiti 				   PMD_SIZE, PAGE_KERNEL);
53444c92257SVitaly Wool }
53544c92257SVitaly Wool #else
536526f83dfSAlexandre Ghiti static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
5372bfc6cd8SAlexandre Ghiti {
5382bfc6cd8SAlexandre Ghiti 	uintptr_t va, end_va;
5392bfc6cd8SAlexandre Ghiti 
540658e2c51SAlexandre Ghiti 	end_va = kernel_map.virt_addr + kernel_map.size;
541526f83dfSAlexandre Ghiti 	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
5422bfc6cd8SAlexandre Ghiti 		create_pgd_mapping(pgdir, va,
543658e2c51SAlexandre Ghiti 				   kernel_map.phys_addr + (va - kernel_map.virt_addr),
544526f83dfSAlexandre Ghiti 				   PMD_SIZE,
545e5c35fa0SAlexandre Ghiti 				   early ?
546e5c35fa0SAlexandre Ghiti 					PAGE_KERNEL_EXEC : pgprot_from_va(va));
5472bfc6cd8SAlexandre Ghiti }
54844c92257SVitaly Wool #endif
5492bfc6cd8SAlexandre Ghiti 
550fe45ffa4SAlexandre Ghiti /*
551fe45ffa4SAlexandre Ghiti  * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
552fe45ffa4SAlexandre Ghiti  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
553fe45ffa4SAlexandre Ghiti  * entry.
554fe45ffa4SAlexandre Ghiti  */
555fe45ffa4SAlexandre Ghiti static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
556fe45ffa4SAlexandre Ghiti {
557fe45ffa4SAlexandre Ghiti #ifndef CONFIG_BUILTIN_DTB
558fe45ffa4SAlexandre Ghiti 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
559fe45ffa4SAlexandre Ghiti 
560fe45ffa4SAlexandre Ghiti 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
561fe45ffa4SAlexandre Ghiti 			   IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa,
562fe45ffa4SAlexandre Ghiti 			   PGDIR_SIZE,
563fe45ffa4SAlexandre Ghiti 			   IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
564fe45ffa4SAlexandre Ghiti 
565fe45ffa4SAlexandre Ghiti 	if (IS_ENABLED(CONFIG_64BIT)) {
566fe45ffa4SAlexandre Ghiti 		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
567fe45ffa4SAlexandre Ghiti 				   pa, PMD_SIZE, PAGE_KERNEL);
568fe45ffa4SAlexandre Ghiti 		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
569fe45ffa4SAlexandre Ghiti 				   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
570fe45ffa4SAlexandre Ghiti 	}
571fe45ffa4SAlexandre Ghiti 
572fe45ffa4SAlexandre Ghiti 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
573fe45ffa4SAlexandre Ghiti #else
574fe45ffa4SAlexandre Ghiti 	/*
575fe45ffa4SAlexandre Ghiti 	 * For 64-bit kernel, __va can't be used since it would return a linear
576fe45ffa4SAlexandre Ghiti 	 * mapping address whereas dtb_early_va will be used before
577fe45ffa4SAlexandre Ghiti 	 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
578fe45ffa4SAlexandre Ghiti 	 * kernel is mapped in the linear mapping, that makes no difference.
579fe45ffa4SAlexandre Ghiti 	 */
580fe45ffa4SAlexandre Ghiti 	dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
581fe45ffa4SAlexandre Ghiti #endif
582fe45ffa4SAlexandre Ghiti 
583fe45ffa4SAlexandre Ghiti 	dtb_early_pa = dtb_pa;
584fe45ffa4SAlexandre Ghiti }
585fe45ffa4SAlexandre Ghiti 
586671f9a3eSAnup Patel asmlinkage void __init setup_vm(uintptr_t dtb_pa)
5876f1e9e94SAnup Patel {
5886f3e5fd2SAlexandre Ghiti 	pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
5896f1e9e94SAnup Patel 
590658e2c51SAlexandre Ghiti 	kernel_map.virt_addr = KERNEL_LINK_ADDR;
591658e2c51SAlexandre Ghiti 
59244c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
593658e2c51SAlexandre Ghiti 	kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
594658e2c51SAlexandre Ghiti 	kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
59544c92257SVitaly Wool 
5966d7f91d9SAlexandre Ghiti 	phys_ram_base = CONFIG_PHYS_RAM_BASE;
597658e2c51SAlexandre Ghiti 	kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
598658e2c51SAlexandre Ghiti 	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
59944c92257SVitaly Wool 
600658e2c51SAlexandre Ghiti 	kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
60144c92257SVitaly Wool #else
602658e2c51SAlexandre Ghiti 	kernel_map.phys_addr = (uintptr_t)(&_start);
603658e2c51SAlexandre Ghiti 	kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
60444c92257SVitaly Wool #endif
605658e2c51SAlexandre Ghiti 	kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
606658e2c51SAlexandre Ghiti 	kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
6072bfc6cd8SAlexandre Ghiti 
608fb31f0a4SKenneth Lee 	riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
6096f1e9e94SAnup Patel 
6106f1e9e94SAnup Patel 	/* Sanity check alignment and size */
6116f1e9e94SAnup Patel 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
612526f83dfSAlexandre Ghiti 	BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
613671f9a3eSAnup Patel 
614e8dcb61fSAtish Patra 	pt_ops.alloc_pte = alloc_pte_early;
615e8dcb61fSAtish Patra 	pt_ops.get_pte_virt = get_pte_virt_early;
616e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
617e8dcb61fSAtish Patra 	pt_ops.alloc_pmd = alloc_pmd_early;
618e8dcb61fSAtish Patra 	pt_ops.get_pmd_virt = get_pmd_virt_early;
619e8dcb61fSAtish Patra #endif
620671f9a3eSAnup Patel 	/* Setup early PGD for fixmap */
621671f9a3eSAnup Patel 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
622671f9a3eSAnup Patel 			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
6236f1e9e94SAnup Patel 
6246f1e9e94SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
625671f9a3eSAnup Patel 	/* Setup fixmap PMD */
626671f9a3eSAnup Patel 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
627671f9a3eSAnup Patel 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
628671f9a3eSAnup Patel 	/* Setup trampoline PGD and PMD */
629658e2c51SAlexandre Ghiti 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
630671f9a3eSAnup Patel 			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
63144c92257SVitaly Wool #ifdef CONFIG_XIP_KERNEL
632658e2c51SAlexandre Ghiti 	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
633658e2c51SAlexandre Ghiti 			   kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
63444c92257SVitaly Wool #else
635658e2c51SAlexandre Ghiti 	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
636658e2c51SAlexandre Ghiti 			   kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
63744c92257SVitaly Wool #endif
6386f1e9e94SAnup Patel #else
639671f9a3eSAnup Patel 	/* Setup trampoline PGD */
640658e2c51SAlexandre Ghiti 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
641658e2c51SAlexandre Ghiti 			   kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
642671f9a3eSAnup Patel #endif
6436f1e9e94SAnup Patel 
644671f9a3eSAnup Patel 	/*
6452bfc6cd8SAlexandre Ghiti 	 * Setup early PGD covering entire kernel which will allow
646671f9a3eSAnup Patel 	 * us to reach paging_init(). We map all memory banks later
647671f9a3eSAnup Patel 	 * in setup_vm_final() below.
648671f9a3eSAnup Patel 	 */
649526f83dfSAlexandre Ghiti 	create_kernel_page_table(early_pg_dir, true);
650f2c17aabSAnup Patel 
651fe45ffa4SAlexandre Ghiti 	/* Setup early mapping for FDT early scan */
652fe45ffa4SAlexandre Ghiti 	create_fdt_early_page_table(early_pg_dir, dtb_pa);
6536262f661SAtish Patra 
6546262f661SAtish Patra 	/*
6556262f661SAtish Patra 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
6566262f661SAtish Patra 	 * range can not span multiple pmds.
6576262f661SAtish Patra 	 */
6586262f661SAtish Patra 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
6596262f661SAtish Patra 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
6606262f661SAtish Patra 
6616262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
6626262f661SAtish Patra 	/*
6636262f661SAtish Patra 	 * Early ioremap fixmap is already created as it lies within first 2MB
6646262f661SAtish Patra 	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
6656262f661SAtish Patra 	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
6666262f661SAtish Patra 	 * the user if not.
6676262f661SAtish Patra 	 */
6686262f661SAtish Patra 	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
6696262f661SAtish Patra 	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
6706262f661SAtish Patra 	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
6716262f661SAtish Patra 		WARN_ON(1);
6726262f661SAtish Patra 		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
6736262f661SAtish Patra 			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
6746262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
6756262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_BEGIN));
6766262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
6776262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_END));
6786262f661SAtish Patra 
6796262f661SAtish Patra 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
6806262f661SAtish Patra 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
6816262f661SAtish Patra 	}
6826262f661SAtish Patra #endif
6836f1e9e94SAnup Patel }
684f2c17aabSAnup Patel 
685671f9a3eSAnup Patel static void __init setup_vm_final(void)
686671f9a3eSAnup Patel {
687671f9a3eSAnup Patel 	uintptr_t va, map_size;
688671f9a3eSAnup Patel 	phys_addr_t pa, start, end;
689b10d6bcaSMike Rapoport 	u64 i;
690671f9a3eSAnup Patel 
691e8dcb61fSAtish Patra 	/**
692e8dcb61fSAtish Patra 	 * MMU is enabled at this point. But page table setup is not complete yet.
693e8dcb61fSAtish Patra 	 * fixmap page table alloc functions should be used at this point
694e8dcb61fSAtish Patra 	 */
695e8dcb61fSAtish Patra 	pt_ops.alloc_pte = alloc_pte_fixmap;
696e8dcb61fSAtish Patra 	pt_ops.get_pte_virt = get_pte_virt_fixmap;
697e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
698e8dcb61fSAtish Patra 	pt_ops.alloc_pmd = alloc_pmd_fixmap;
699e8dcb61fSAtish Patra 	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
700e8dcb61fSAtish Patra #endif
701671f9a3eSAnup Patel 	/* Setup swapper PGD for fixmap */
702671f9a3eSAnup Patel 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
703ac51e005SZong Li 			   __pa_symbol(fixmap_pgd_next),
704671f9a3eSAnup Patel 			   PGDIR_SIZE, PAGE_TABLE);
705671f9a3eSAnup Patel 
7062bfc6cd8SAlexandre Ghiti 	/* Map all memory banks in the linear mapping */
707b10d6bcaSMike Rapoport 	for_each_mem_range(i, &start, &end) {
708671f9a3eSAnup Patel 		if (start >= end)
709671f9a3eSAnup Patel 			break;
710671f9a3eSAnup Patel 		if (start <= __pa(PAGE_OFFSET) &&
711671f9a3eSAnup Patel 		    __pa(PAGE_OFFSET) < end)
712671f9a3eSAnup Patel 			start = __pa(PAGE_OFFSET);
713c99127c4SAlexandre Ghiti 		if (end >= __pa(PAGE_OFFSET) + memory_limit)
714c99127c4SAlexandre Ghiti 			end = __pa(PAGE_OFFSET) + memory_limit;
715671f9a3eSAnup Patel 
716671f9a3eSAnup Patel 		map_size = best_map_size(start, end - start);
717671f9a3eSAnup Patel 		for (pa = start; pa < end; pa += map_size) {
718671f9a3eSAnup Patel 			va = (uintptr_t)__va(pa);
7192bfc6cd8SAlexandre Ghiti 
720e5c35fa0SAlexandre Ghiti 			create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
721e5c35fa0SAlexandre Ghiti 					   pgprot_from_va(va));
722671f9a3eSAnup Patel 		}
723671f9a3eSAnup Patel 	}
724671f9a3eSAnup Patel 
7252bfc6cd8SAlexandre Ghiti 	/* Map the kernel */
72607aabe8fSJisheng Zhang 	if (IS_ENABLED(CONFIG_64BIT))
727526f83dfSAlexandre Ghiti 		create_kernel_page_table(swapper_pg_dir, false);
7282bfc6cd8SAlexandre Ghiti 
729671f9a3eSAnup Patel 	/* Clear fixmap PTE and PMD mappings */
730671f9a3eSAnup Patel 	clear_fixmap(FIX_PTE);
731671f9a3eSAnup Patel 	clear_fixmap(FIX_PMD);
732671f9a3eSAnup Patel 
733671f9a3eSAnup Patel 	/* Move to swapper page table */
734ac51e005SZong Li 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
735671f9a3eSAnup Patel 	local_flush_tlb_all();
736e8dcb61fSAtish Patra 
737e8dcb61fSAtish Patra 	/* generic page allocation functions must be used to setup page table */
738e8dcb61fSAtish Patra 	pt_ops.alloc_pte = alloc_pte_late;
739e8dcb61fSAtish Patra 	pt_ops.get_pte_virt = get_pte_virt_late;
740e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
741e8dcb61fSAtish Patra 	pt_ops.alloc_pmd = alloc_pmd_late;
742e8dcb61fSAtish Patra 	pt_ops.get_pmd_virt = get_pmd_virt_late;
743e8dcb61fSAtish Patra #endif
744671f9a3eSAnup Patel }
7456bd33e1eSChristoph Hellwig #else
7466bd33e1eSChristoph Hellwig asmlinkage void __init setup_vm(uintptr_t dtb_pa)
7476bd33e1eSChristoph Hellwig {
7486bd33e1eSChristoph Hellwig 	dtb_early_va = (void *)dtb_pa;
749a78c6f59SAtish Patra 	dtb_early_pa = dtb_pa;
7506bd33e1eSChristoph Hellwig }
7516bd33e1eSChristoph Hellwig 
7526bd33e1eSChristoph Hellwig static inline void setup_vm_final(void)
7536bd33e1eSChristoph Hellwig {
7546bd33e1eSChristoph Hellwig }
7556bd33e1eSChristoph Hellwig #endif /* CONFIG_MMU */
756671f9a3eSAnup Patel 
757e53d2818SNick Kossifidis #ifdef CONFIG_KEXEC_CORE
758e53d2818SNick Kossifidis /*
759e53d2818SNick Kossifidis  * reserve_crashkernel() - reserves memory for crash kernel
760e53d2818SNick Kossifidis  *
761e53d2818SNick Kossifidis  * This function reserves memory area given in "crashkernel=" kernel command
762e53d2818SNick Kossifidis  * line parameter. The memory reserved is used by dump capture kernel when
763e53d2818SNick Kossifidis  * primary kernel is crashing.
764e53d2818SNick Kossifidis  */
765e53d2818SNick Kossifidis static void __init reserve_crashkernel(void)
766e53d2818SNick Kossifidis {
767e53d2818SNick Kossifidis 	unsigned long long crash_base = 0;
768e53d2818SNick Kossifidis 	unsigned long long crash_size = 0;
769e53d2818SNick Kossifidis 	unsigned long search_start = memblock_start_of_DRAM();
770e53d2818SNick Kossifidis 	unsigned long search_end = memblock_end_of_DRAM();
771e53d2818SNick Kossifidis 
772e53d2818SNick Kossifidis 	int ret = 0;
773e53d2818SNick Kossifidis 
77456409750SNick Kossifidis 	/*
77556409750SNick Kossifidis 	 * Don't reserve a region for a crash kernel on a crash kernel
77656409750SNick Kossifidis 	 * since it doesn't make much sense and we have limited memory
77756409750SNick Kossifidis 	 * resources.
77856409750SNick Kossifidis 	 */
77956409750SNick Kossifidis 	if (is_kdump_kernel()) {
78056409750SNick Kossifidis 		pr_info("crashkernel: ignoring reservation request\n");
78156409750SNick Kossifidis 		return;
78256409750SNick Kossifidis 	}
78356409750SNick Kossifidis 
784e53d2818SNick Kossifidis 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
785e53d2818SNick Kossifidis 				&crash_size, &crash_base);
786e53d2818SNick Kossifidis 	if (ret || !crash_size)
787e53d2818SNick Kossifidis 		return;
788e53d2818SNick Kossifidis 
789e53d2818SNick Kossifidis 	crash_size = PAGE_ALIGN(crash_size);
790e53d2818SNick Kossifidis 
791a7259df7SMike Rapoport 	if (crash_base) {
792a7259df7SMike Rapoport 		search_start = crash_base;
793a7259df7SMike Rapoport 		search_end = crash_base + crash_size;
794a7259df7SMike Rapoport 	}
795a7259df7SMike Rapoport 
796e53d2818SNick Kossifidis 	/*
797e53d2818SNick Kossifidis 	 * Current riscv boot protocol requires 2MB alignment for
798e53d2818SNick Kossifidis 	 * RV64 and 4MB alignment for RV32 (hugepage size)
799decf89f8SNick Kossifidis 	 *
800decf89f8SNick Kossifidis 	 * Try to alloc from 32bit addressible physical memory so that
801decf89f8SNick Kossifidis 	 * swiotlb can work on the crash kernel.
802e53d2818SNick Kossifidis 	 */
803a7259df7SMike Rapoport 	crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
804decf89f8SNick Kossifidis 					       search_start,
805decf89f8SNick Kossifidis 					       min(search_end, (unsigned long) SZ_4G));
806decf89f8SNick Kossifidis 	if (crash_base == 0) {
807decf89f8SNick Kossifidis 		/* Try again without restricting region to 32bit addressible memory */
808decf89f8SNick Kossifidis 		crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
809a7259df7SMike Rapoport 						search_start, search_end);
810e53d2818SNick Kossifidis 		if (crash_base == 0) {
811e53d2818SNick Kossifidis 			pr_warn("crashkernel: couldn't allocate %lldKB\n",
812e53d2818SNick Kossifidis 				crash_size >> 10);
813e53d2818SNick Kossifidis 			return;
814e53d2818SNick Kossifidis 		}
815decf89f8SNick Kossifidis 	}
816e53d2818SNick Kossifidis 
817e53d2818SNick Kossifidis 	pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
818e53d2818SNick Kossifidis 		crash_base, crash_base + crash_size, crash_size >> 20);
819e53d2818SNick Kossifidis 
820e53d2818SNick Kossifidis 	crashk_res.start = crash_base;
821e53d2818SNick Kossifidis 	crashk_res.end = crash_base + crash_size - 1;
822e53d2818SNick Kossifidis }
823e53d2818SNick Kossifidis #endif /* CONFIG_KEXEC_CORE */
824e53d2818SNick Kossifidis 
825671f9a3eSAnup Patel void __init paging_init(void)
826671f9a3eSAnup Patel {
827f842f5ffSKefeng Wang 	setup_bootmem();
828671f9a3eSAnup Patel 	setup_vm_final();
829cbd34f4bSAtish Patra }
830cbd34f4bSAtish Patra 
831cbd34f4bSAtish Patra void __init misc_mem_init(void)
832cbd34f4bSAtish Patra {
833f6e5aedfSKefeng Wang 	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
8344f0e8eefSAtish Patra 	arch_numa_init();
835cbd34f4bSAtish Patra 	sparse_init();
836671f9a3eSAnup Patel 	zone_sizes_init();
837e53d2818SNick Kossifidis #ifdef CONFIG_KEXEC_CORE
838e53d2818SNick Kossifidis 	reserve_crashkernel();
839e53d2818SNick Kossifidis #endif
8404f0e8eefSAtish Patra 	memblock_dump_all();
8416f1e9e94SAnup Patel }
842d95f1a54SLogan Gunthorpe 
8439fe57d8cSKefeng Wang #ifdef CONFIG_SPARSEMEM_VMEMMAP
844d95f1a54SLogan Gunthorpe int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
845d95f1a54SLogan Gunthorpe 			       struct vmem_altmap *altmap)
846d95f1a54SLogan Gunthorpe {
8471d9cfee7SAnshuman Khandual 	return vmemmap_populate_basepages(start, end, node, NULL);
848d95f1a54SLogan Gunthorpe }
849d95f1a54SLogan Gunthorpe #endif
850