xref: /openbmc/linux/arch/riscv/mm/init.c (revision 0f02de44)
150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
276d2a049SPalmer Dabbelt /*
376d2a049SPalmer Dabbelt  * Copyright (C) 2012 Regents of the University of California
4671f9a3eSAnup Patel  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
576d2a049SPalmer Dabbelt  */
676d2a049SPalmer Dabbelt 
776d2a049SPalmer Dabbelt #include <linux/init.h>
876d2a049SPalmer Dabbelt #include <linux/mm.h>
976d2a049SPalmer Dabbelt #include <linux/memblock.h>
1057c8a661SMike Rapoport #include <linux/initrd.h>
1176d2a049SPalmer Dabbelt #include <linux/swap.h>
125ec9c4ffSChristoph Hellwig #include <linux/sizes.h>
130651c263SAnup Patel #include <linux/of_fdt.h>
14922b0375SAlbert Ou #include <linux/libfdt.h>
15d27c3c90SZong Li #include <linux/set_memory.h>
16da815582SKefeng Wang #include <linux/dma-map-ops.h>
1776d2a049SPalmer Dabbelt 
18f2c17aabSAnup Patel #include <asm/fixmap.h>
1976d2a049SPalmer Dabbelt #include <asm/tlbflush.h>
2076d2a049SPalmer Dabbelt #include <asm/sections.h>
212d268251SPalmer Dabbelt #include <asm/soc.h>
2276d2a049SPalmer Dabbelt #include <asm/io.h>
23b422d28bSZong Li #include <asm/ptdump.h>
244f0e8eefSAtish Patra #include <asm/numa.h>
2576d2a049SPalmer Dabbelt 
26ffaee272SPaul Walmsley #include "../kernel/head.h"
27ffaee272SPaul Walmsley 
28387181dcSAnup Patel unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
29387181dcSAnup Patel 							__page_aligned_bss;
30387181dcSAnup Patel EXPORT_SYMBOL(empty_zero_page);
31387181dcSAnup Patel 
32d90d45d7SAnup Patel extern char _start[];
338f3a2b4aSAnup Patel #define DTB_EARLY_BASE_VA      PGDIR_SIZE
348f3a2b4aSAnup Patel void *dtb_early_va __initdata;
358f3a2b4aSAnup Patel uintptr_t dtb_early_pa __initdata;
36d90d45d7SAnup Patel 
37e8dcb61fSAtish Patra struct pt_alloc_ops {
38e8dcb61fSAtish Patra 	pte_t *(*get_pte_virt)(phys_addr_t pa);
39e8dcb61fSAtish Patra 	phys_addr_t (*alloc_pte)(uintptr_t va);
40e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
41e8dcb61fSAtish Patra 	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
42e8dcb61fSAtish Patra 	phys_addr_t (*alloc_pmd)(uintptr_t va);
43e8dcb61fSAtish Patra #endif
44e8dcb61fSAtish Patra };
4576d2a049SPalmer Dabbelt 
46da815582SKefeng Wang static phys_addr_t dma32_phys_limit __ro_after_init;
47da815582SKefeng Wang 
4876d2a049SPalmer Dabbelt static void __init zone_sizes_init(void)
4976d2a049SPalmer Dabbelt {
505ec9c4ffSChristoph Hellwig 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
5176d2a049SPalmer Dabbelt 
52d5fad48cSZong Li #ifdef CONFIG_ZONE_DMA32
53da815582SKefeng Wang 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
54d5fad48cSZong Li #endif
555ec9c4ffSChristoph Hellwig 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
565ec9c4ffSChristoph Hellwig 
579691a071SMike Rapoport 	free_area_init(max_zone_pfns);
5876d2a049SPalmer Dabbelt }
5976d2a049SPalmer Dabbelt 
606bd33e1eSChristoph Hellwig static void setup_zero_page(void)
6176d2a049SPalmer Dabbelt {
6276d2a049SPalmer Dabbelt 	memset((void *)empty_zero_page, 0, PAGE_SIZE);
6376d2a049SPalmer Dabbelt }
6476d2a049SPalmer Dabbelt 
658fa3cdffSKefeng Wang #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
662cc6c4a0SYash Shah static inline void print_mlk(char *name, unsigned long b, unsigned long t)
672cc6c4a0SYash Shah {
682cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
692cc6c4a0SYash Shah 		  (((t) - (b)) >> 10));
702cc6c4a0SYash Shah }
712cc6c4a0SYash Shah 
722cc6c4a0SYash Shah static inline void print_mlm(char *name, unsigned long b, unsigned long t)
732cc6c4a0SYash Shah {
742cc6c4a0SYash Shah 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
752cc6c4a0SYash Shah 		  (((t) - (b)) >> 20));
762cc6c4a0SYash Shah }
772cc6c4a0SYash Shah 
782cc6c4a0SYash Shah static void print_vm_layout(void)
792cc6c4a0SYash Shah {
802cc6c4a0SYash Shah 	pr_notice("Virtual kernel memory layout:\n");
812cc6c4a0SYash Shah 	print_mlk("fixmap", (unsigned long)FIXADDR_START,
822cc6c4a0SYash Shah 		  (unsigned long)FIXADDR_TOP);
832cc6c4a0SYash Shah 	print_mlm("pci io", (unsigned long)PCI_IO_START,
842cc6c4a0SYash Shah 		  (unsigned long)PCI_IO_END);
852cc6c4a0SYash Shah 	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
862cc6c4a0SYash Shah 		  (unsigned long)VMEMMAP_END);
872cc6c4a0SYash Shah 	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
882cc6c4a0SYash Shah 		  (unsigned long)VMALLOC_END);
892cc6c4a0SYash Shah 	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
902cc6c4a0SYash Shah 		  (unsigned long)high_memory);
912cc6c4a0SYash Shah }
922cc6c4a0SYash Shah #else
932cc6c4a0SYash Shah static void print_vm_layout(void) { }
942cc6c4a0SYash Shah #endif /* CONFIG_DEBUG_VM */
952cc6c4a0SYash Shah 
9676d2a049SPalmer Dabbelt void __init mem_init(void)
9776d2a049SPalmer Dabbelt {
9876d2a049SPalmer Dabbelt #ifdef CONFIG_FLATMEM
9976d2a049SPalmer Dabbelt 	BUG_ON(!mem_map);
10076d2a049SPalmer Dabbelt #endif /* CONFIG_FLATMEM */
10176d2a049SPalmer Dabbelt 
10276d2a049SPalmer Dabbelt 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
103c6ffc5caSMike Rapoport 	memblock_free_all();
10476d2a049SPalmer Dabbelt 
10576d2a049SPalmer Dabbelt 	mem_init_print_info(NULL);
1062cc6c4a0SYash Shah 	print_vm_layout();
10776d2a049SPalmer Dabbelt }
10876d2a049SPalmer Dabbelt 
1090651c263SAnup Patel void __init setup_bootmem(void)
1100651c263SAnup Patel {
1111bd14a66SAtish Patra 	phys_addr_t mem_start = 0;
1121bd14a66SAtish Patra 	phys_addr_t start, end = 0;
113ac51e005SZong Li 	phys_addr_t vmlinux_end = __pa_symbol(&_end);
114ac51e005SZong Li 	phys_addr_t vmlinux_start = __pa_symbol(&_start);
115b10d6bcaSMike Rapoport 	u64 i;
1160651c263SAnup Patel 
1170651c263SAnup Patel 	/* Find the memory region containing the kernel */
118b10d6bcaSMike Rapoport 	for_each_mem_range(i, &start, &end) {
119b10d6bcaSMike Rapoport 		phys_addr_t size = end - start;
1201bd14a66SAtish Patra 		if (!mem_start)
121b10d6bcaSMike Rapoport 			mem_start = start;
122b10d6bcaSMike Rapoport 		if (start <= vmlinux_start && vmlinux_end <= end)
123b10d6bcaSMike Rapoport 			BUG_ON(size == 0);
124fa5a1983SAtish Patra 	}
125f05baddeSAnup Patel 
126f05baddeSAnup Patel 	/*
1271bd14a66SAtish Patra 	 * The maximal physical memory size is -PAGE_OFFSET.
1281bd14a66SAtish Patra 	 * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
1291bd14a66SAtish Patra 	 * as it is unusable by kernel.
130f05baddeSAnup Patel 	 */
131de043da0SAtish Patra 	memblock_enforce_memory_limit(-PAGE_OFFSET);
1320651c263SAnup Patel 
133d90d45d7SAnup Patel 	/* Reserve from the start of the kernel to the end of the kernel */
134d90d45d7SAnup Patel 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
135d90d45d7SAnup Patel 
136c749bb2dSVincent Chen 	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
137c749bb2dSVincent Chen 	max_low_pfn = max_pfn;
138da815582SKefeng Wang 	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
139d0d8aae6SAtish Patra 	set_max_mapnr(max_low_pfn);
1400651c263SAnup Patel 
141aec33b54SKefeng Wang 	reserve_initrd_mem();
142922b0375SAlbert Ou 	/*
143f105aa94SVitaly Wool 	 * If DTB is built in, no need to reserve its memblock.
144f105aa94SVitaly Wool 	 * Otherwise, do reserve it but avoid using
145f105aa94SVitaly Wool 	 * early_init_fdt_reserve_self() since __pa() does
146922b0375SAlbert Ou 	 * not work for DTB pointers that are fixmap addresses
147922b0375SAlbert Ou 	 */
148f105aa94SVitaly Wool 	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
149922b0375SAlbert Ou 		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
150922b0375SAlbert Ou 
1510651c263SAnup Patel 	early_init_fdt_scan_reserved_mem();
152da815582SKefeng Wang 	dma_contiguous_reserve(dma32_phys_limit);
1530651c263SAnup Patel 	memblock_allow_resize();
1540651c263SAnup Patel }
1556f1e9e94SAnup Patel 
1566bd33e1eSChristoph Hellwig #ifdef CONFIG_MMU
157e8dcb61fSAtish Patra static struct pt_alloc_ops pt_ops;
158e8dcb61fSAtish Patra 
159387181dcSAnup Patel unsigned long va_pa_offset;
160387181dcSAnup Patel EXPORT_SYMBOL(va_pa_offset);
161387181dcSAnup Patel unsigned long pfn_base;
162387181dcSAnup Patel EXPORT_SYMBOL(pfn_base);
163387181dcSAnup Patel 
1646f1e9e94SAnup Patel pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
165671f9a3eSAnup Patel pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
166f2c17aabSAnup Patel pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
167671f9a3eSAnup Patel 
168671f9a3eSAnup Patel pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
169f2c17aabSAnup Patel 
170f2c17aabSAnup Patel void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
171f2c17aabSAnup Patel {
172f2c17aabSAnup Patel 	unsigned long addr = __fix_to_virt(idx);
173f2c17aabSAnup Patel 	pte_t *ptep;
174f2c17aabSAnup Patel 
175f2c17aabSAnup Patel 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
176f2c17aabSAnup Patel 
177f2c17aabSAnup Patel 	ptep = &fixmap_pte[pte_index(addr)];
178f2c17aabSAnup Patel 
17921190b74SGreentime Hu 	if (pgprot_val(prot))
180f2c17aabSAnup Patel 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
18121190b74SGreentime Hu 	else
182f2c17aabSAnup Patel 		pte_clear(&init_mm, addr, ptep);
183f2c17aabSAnup Patel 	local_flush_tlb_page(addr);
184f2c17aabSAnup Patel }
185f2c17aabSAnup Patel 
186e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
187671f9a3eSAnup Patel {
188671f9a3eSAnup Patel 	return (pte_t *)((uintptr_t)pa);
189671f9a3eSAnup Patel }
190e8dcb61fSAtish Patra 
191e8dcb61fSAtish Patra static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
192e8dcb61fSAtish Patra {
193e8dcb61fSAtish Patra 	clear_fixmap(FIX_PTE);
194e8dcb61fSAtish Patra 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
195671f9a3eSAnup Patel }
196671f9a3eSAnup Patel 
197e8dcb61fSAtish Patra static inline pte_t *get_pte_virt_late(phys_addr_t pa)
198e8dcb61fSAtish Patra {
199e8dcb61fSAtish Patra 	return (pte_t *) __va(pa);
200e8dcb61fSAtish Patra }
201e8dcb61fSAtish Patra 
202e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
203671f9a3eSAnup Patel {
204671f9a3eSAnup Patel 	/*
205671f9a3eSAnup Patel 	 * We only create PMD or PGD early mappings so we
206671f9a3eSAnup Patel 	 * should never reach here with MMU disabled.
207671f9a3eSAnup Patel 	 */
208e8dcb61fSAtish Patra 	BUG();
209e8dcb61fSAtish Patra }
210671f9a3eSAnup Patel 
211e8dcb61fSAtish Patra static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
212e8dcb61fSAtish Patra {
213671f9a3eSAnup Patel 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
214671f9a3eSAnup Patel }
215671f9a3eSAnup Patel 
216e8dcb61fSAtish Patra static phys_addr_t alloc_pte_late(uintptr_t va)
217e8dcb61fSAtish Patra {
218e8dcb61fSAtish Patra 	unsigned long vaddr;
219e8dcb61fSAtish Patra 
220e8dcb61fSAtish Patra 	vaddr = __get_free_page(GFP_KERNEL);
221e8dcb61fSAtish Patra 	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
222e8dcb61fSAtish Patra 		BUG();
223e8dcb61fSAtish Patra 	return __pa(vaddr);
224e8dcb61fSAtish Patra }
225e8dcb61fSAtish Patra 
226671f9a3eSAnup Patel static void __init create_pte_mapping(pte_t *ptep,
227671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
228671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
229671f9a3eSAnup Patel {
230974b9b2cSMike Rapoport 	uintptr_t pte_idx = pte_index(va);
231671f9a3eSAnup Patel 
232671f9a3eSAnup Patel 	BUG_ON(sz != PAGE_SIZE);
233671f9a3eSAnup Patel 
234974b9b2cSMike Rapoport 	if (pte_none(ptep[pte_idx]))
235974b9b2cSMike Rapoport 		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
236671f9a3eSAnup Patel }
237671f9a3eSAnup Patel 
238671f9a3eSAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
239671f9a3eSAnup Patel 
240671f9a3eSAnup Patel pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
241671f9a3eSAnup Patel pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
242*0f02de44SAlexandre Ghiti pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
2431074dd44SAnup Patel pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
244671f9a3eSAnup Patel 
245e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
246671f9a3eSAnup Patel {
247e8dcb61fSAtish Patra 	/* Before MMU is enabled */
248671f9a3eSAnup Patel 	return (pmd_t *)((uintptr_t)pa);
249671f9a3eSAnup Patel }
250e8dcb61fSAtish Patra 
251e8dcb61fSAtish Patra static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
252e8dcb61fSAtish Patra {
253e8dcb61fSAtish Patra 	clear_fixmap(FIX_PMD);
254e8dcb61fSAtish Patra 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
255671f9a3eSAnup Patel }
256671f9a3eSAnup Patel 
257e8dcb61fSAtish Patra static pmd_t *get_pmd_virt_late(phys_addr_t pa)
258e8dcb61fSAtish Patra {
259e8dcb61fSAtish Patra 	return (pmd_t *) __va(pa);
260e8dcb61fSAtish Patra }
261e8dcb61fSAtish Patra 
262e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_early(uintptr_t va)
263671f9a3eSAnup Patel {
264*0f02de44SAlexandre Ghiti 	BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
265671f9a3eSAnup Patel 
266*0f02de44SAlexandre Ghiti 	return (uintptr_t)early_pmd;
267671f9a3eSAnup Patel }
268671f9a3eSAnup Patel 
269e8dcb61fSAtish Patra static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
270e8dcb61fSAtish Patra {
271e8dcb61fSAtish Patra 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
272e8dcb61fSAtish Patra }
273e8dcb61fSAtish Patra 
274e8dcb61fSAtish Patra static phys_addr_t alloc_pmd_late(uintptr_t va)
275e8dcb61fSAtish Patra {
276e8dcb61fSAtish Patra 	unsigned long vaddr;
277e8dcb61fSAtish Patra 
278e8dcb61fSAtish Patra 	vaddr = __get_free_page(GFP_KERNEL);
279e8dcb61fSAtish Patra 	BUG_ON(!vaddr);
280e8dcb61fSAtish Patra 	return __pa(vaddr);
281e8dcb61fSAtish Patra }
282e8dcb61fSAtish Patra 
283671f9a3eSAnup Patel static void __init create_pmd_mapping(pmd_t *pmdp,
284671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
285671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
286671f9a3eSAnup Patel {
287671f9a3eSAnup Patel 	pte_t *ptep;
288671f9a3eSAnup Patel 	phys_addr_t pte_phys;
289974b9b2cSMike Rapoport 	uintptr_t pmd_idx = pmd_index(va);
290671f9a3eSAnup Patel 
291671f9a3eSAnup Patel 	if (sz == PMD_SIZE) {
292974b9b2cSMike Rapoport 		if (pmd_none(pmdp[pmd_idx]))
293974b9b2cSMike Rapoport 			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
294671f9a3eSAnup Patel 		return;
295671f9a3eSAnup Patel 	}
296671f9a3eSAnup Patel 
297974b9b2cSMike Rapoport 	if (pmd_none(pmdp[pmd_idx])) {
298e8dcb61fSAtish Patra 		pte_phys = pt_ops.alloc_pte(va);
299974b9b2cSMike Rapoport 		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
300e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
301671f9a3eSAnup Patel 		memset(ptep, 0, PAGE_SIZE);
302671f9a3eSAnup Patel 	} else {
303974b9b2cSMike Rapoport 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
304e8dcb61fSAtish Patra 		ptep = pt_ops.get_pte_virt(pte_phys);
305671f9a3eSAnup Patel 	}
306671f9a3eSAnup Patel 
307671f9a3eSAnup Patel 	create_pte_mapping(ptep, va, pa, sz, prot);
308671f9a3eSAnup Patel }
309671f9a3eSAnup Patel 
310671f9a3eSAnup Patel #define pgd_next_t		pmd_t
311e8dcb61fSAtish Patra #define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
312e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
313671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
314671f9a3eSAnup Patel 	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
315671f9a3eSAnup Patel #define fixmap_pgd_next		fixmap_pmd
316671f9a3eSAnup Patel #else
317671f9a3eSAnup Patel #define pgd_next_t		pte_t
318e8dcb61fSAtish Patra #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
319e8dcb61fSAtish Patra #define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
320671f9a3eSAnup Patel #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
321671f9a3eSAnup Patel 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
322671f9a3eSAnup Patel #define fixmap_pgd_next		fixmap_pte
323671f9a3eSAnup Patel #endif
324671f9a3eSAnup Patel 
325b91540d5SAtish Patra void __init create_pgd_mapping(pgd_t *pgdp,
326671f9a3eSAnup Patel 				      uintptr_t va, phys_addr_t pa,
327671f9a3eSAnup Patel 				      phys_addr_t sz, pgprot_t prot)
328671f9a3eSAnup Patel {
329671f9a3eSAnup Patel 	pgd_next_t *nextp;
330671f9a3eSAnup Patel 	phys_addr_t next_phys;
331974b9b2cSMike Rapoport 	uintptr_t pgd_idx = pgd_index(va);
332671f9a3eSAnup Patel 
333671f9a3eSAnup Patel 	if (sz == PGDIR_SIZE) {
334974b9b2cSMike Rapoport 		if (pgd_val(pgdp[pgd_idx]) == 0)
335974b9b2cSMike Rapoport 			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
336671f9a3eSAnup Patel 		return;
337671f9a3eSAnup Patel 	}
338671f9a3eSAnup Patel 
339974b9b2cSMike Rapoport 	if (pgd_val(pgdp[pgd_idx]) == 0) {
340671f9a3eSAnup Patel 		next_phys = alloc_pgd_next(va);
341974b9b2cSMike Rapoport 		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
342671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
343671f9a3eSAnup Patel 		memset(nextp, 0, PAGE_SIZE);
344671f9a3eSAnup Patel 	} else {
345974b9b2cSMike Rapoport 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
346671f9a3eSAnup Patel 		nextp = get_pgd_next_virt(next_phys);
347671f9a3eSAnup Patel 	}
348671f9a3eSAnup Patel 
349671f9a3eSAnup Patel 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
350671f9a3eSAnup Patel }
351671f9a3eSAnup Patel 
352671f9a3eSAnup Patel static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
353671f9a3eSAnup Patel {
3540fdc636cSZong Li 	/* Upgrade to PMD_SIZE mappings whenever possible */
3550fdc636cSZong Li 	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
3560fdc636cSZong Li 		return PAGE_SIZE;
357671f9a3eSAnup Patel 
3580fdc636cSZong Li 	return PMD_SIZE;
359671f9a3eSAnup Patel }
360671f9a3eSAnup Patel 
361387181dcSAnup Patel /*
362387181dcSAnup Patel  * setup_vm() is called from head.S with MMU-off.
363387181dcSAnup Patel  *
364387181dcSAnup Patel  * Following requirements should be honoured for setup_vm() to work
365387181dcSAnup Patel  * correctly:
366387181dcSAnup Patel  * 1) It should use PC-relative addressing for accessing kernel symbols.
367387181dcSAnup Patel  *    To achieve this we always use GCC cmodel=medany.
368387181dcSAnup Patel  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
369387181dcSAnup Patel  *    so disable compiler instrumentation when FTRACE is enabled.
370387181dcSAnup Patel  *
371387181dcSAnup Patel  * Currently, the above requirements are honoured by using custom CFLAGS
372387181dcSAnup Patel  * for init.o in mm/Makefile.
373387181dcSAnup Patel  */
374387181dcSAnup Patel 
375387181dcSAnup Patel #ifndef __riscv_cmodel_medany
3766a527b67SPaul Walmsley #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
377387181dcSAnup Patel #endif
378387181dcSAnup Patel 
379671f9a3eSAnup Patel asmlinkage void __init setup_vm(uintptr_t dtb_pa)
3806f1e9e94SAnup Patel {
3818f3a2b4aSAnup Patel 	uintptr_t va, pa, end_va;
382671f9a3eSAnup Patel 	uintptr_t load_pa = (uintptr_t)(&_start);
383671f9a3eSAnup Patel 	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
384*0f02de44SAlexandre Ghiti 	uintptr_t map_size;
3856262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
3866262f661SAtish Patra 	pmd_t fix_bmap_spmd, fix_bmap_epmd;
3876262f661SAtish Patra #endif
3886f1e9e94SAnup Patel 
389671f9a3eSAnup Patel 	va_pa_offset = PAGE_OFFSET - load_pa;
390671f9a3eSAnup Patel 	pfn_base = PFN_DOWN(load_pa);
391671f9a3eSAnup Patel 
392671f9a3eSAnup Patel 	/*
393671f9a3eSAnup Patel 	 * Enforce boot alignment requirements of RV32 and
394671f9a3eSAnup Patel 	 * RV64 by only allowing PMD or PGD mappings.
395671f9a3eSAnup Patel 	 */
396*0f02de44SAlexandre Ghiti 	map_size = PMD_SIZE;
3976f1e9e94SAnup Patel 
3986f1e9e94SAnup Patel 	/* Sanity check alignment and size */
3996f1e9e94SAnup Patel 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
400671f9a3eSAnup Patel 	BUG_ON((load_pa % map_size) != 0);
401671f9a3eSAnup Patel 
402e8dcb61fSAtish Patra 	pt_ops.alloc_pte = alloc_pte_early;
403e8dcb61fSAtish Patra 	pt_ops.get_pte_virt = get_pte_virt_early;
404e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
405e8dcb61fSAtish Patra 	pt_ops.alloc_pmd = alloc_pmd_early;
406e8dcb61fSAtish Patra 	pt_ops.get_pmd_virt = get_pmd_virt_early;
407e8dcb61fSAtish Patra #endif
408671f9a3eSAnup Patel 	/* Setup early PGD for fixmap */
409671f9a3eSAnup Patel 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
410671f9a3eSAnup Patel 			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
4116f1e9e94SAnup Patel 
4126f1e9e94SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
413671f9a3eSAnup Patel 	/* Setup fixmap PMD */
414671f9a3eSAnup Patel 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
415671f9a3eSAnup Patel 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
416671f9a3eSAnup Patel 	/* Setup trampoline PGD and PMD */
417671f9a3eSAnup Patel 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
418671f9a3eSAnup Patel 			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
419671f9a3eSAnup Patel 	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
420671f9a3eSAnup Patel 			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
4216f1e9e94SAnup Patel #else
422671f9a3eSAnup Patel 	/* Setup trampoline PGD */
423671f9a3eSAnup Patel 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
424671f9a3eSAnup Patel 			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
425671f9a3eSAnup Patel #endif
4266f1e9e94SAnup Patel 
427671f9a3eSAnup Patel 	/*
428671f9a3eSAnup Patel 	 * Setup early PGD covering entire kernel which will allows
429671f9a3eSAnup Patel 	 * us to reach paging_init(). We map all memory banks later
430671f9a3eSAnup Patel 	 * in setup_vm_final() below.
431671f9a3eSAnup Patel 	 */
432671f9a3eSAnup Patel 	end_va = PAGE_OFFSET + load_sz;
433671f9a3eSAnup Patel 	for (va = PAGE_OFFSET; va < end_va; va += map_size)
434671f9a3eSAnup Patel 		create_pgd_mapping(early_pg_dir, va,
435671f9a3eSAnup Patel 				   load_pa + (va - PAGE_OFFSET),
436671f9a3eSAnup Patel 				   map_size, PAGE_KERNEL_EXEC);
437f2c17aabSAnup Patel 
4381074dd44SAnup Patel #ifndef __PAGETABLE_PMD_FOLDED
4391074dd44SAnup Patel 	/* Setup early PMD for DTB */
4401074dd44SAnup Patel 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
4411074dd44SAnup Patel 			   (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
442f105aa94SVitaly Wool #ifndef CONFIG_BUILTIN_DTB
4431074dd44SAnup Patel 	/* Create two consecutive PMD mappings for FDT early scan */
4441074dd44SAnup Patel 	pa = dtb_pa & ~(PMD_SIZE - 1);
4451074dd44SAnup Patel 	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
4461074dd44SAnup Patel 			   pa, PMD_SIZE, PAGE_KERNEL);
4471074dd44SAnup Patel 	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
4481074dd44SAnup Patel 			   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
4491074dd44SAnup Patel 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
450f105aa94SVitaly Wool #else /* CONFIG_BUILTIN_DTB */
451f105aa94SVitaly Wool 	dtb_early_va = __va(dtb_pa);
452f105aa94SVitaly Wool #endif /* CONFIG_BUILTIN_DTB */
4531074dd44SAnup Patel #else
454f105aa94SVitaly Wool #ifndef CONFIG_BUILTIN_DTB
4558f3a2b4aSAnup Patel 	/* Create two consecutive PGD mappings for FDT early scan */
4568f3a2b4aSAnup Patel 	pa = dtb_pa & ~(PGDIR_SIZE - 1);
4578f3a2b4aSAnup Patel 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
4588f3a2b4aSAnup Patel 			   pa, PGDIR_SIZE, PAGE_KERNEL);
4598f3a2b4aSAnup Patel 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
4608f3a2b4aSAnup Patel 			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
4618f3a2b4aSAnup Patel 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
462f105aa94SVitaly Wool #else /* CONFIG_BUILTIN_DTB */
463f105aa94SVitaly Wool 	dtb_early_va = __va(dtb_pa);
464f105aa94SVitaly Wool #endif /* CONFIG_BUILTIN_DTB */
4651074dd44SAnup Patel #endif
466922b0375SAlbert Ou 	dtb_early_pa = dtb_pa;
4676262f661SAtish Patra 
4686262f661SAtish Patra 	/*
4696262f661SAtish Patra 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
4706262f661SAtish Patra 	 * range can not span multiple pmds.
4716262f661SAtish Patra 	 */
4726262f661SAtish Patra 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
4736262f661SAtish Patra 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
4746262f661SAtish Patra 
4756262f661SAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
4766262f661SAtish Patra 	/*
4776262f661SAtish Patra 	 * Early ioremap fixmap is already created as it lies within first 2MB
4786262f661SAtish Patra 	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
4796262f661SAtish Patra 	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
4806262f661SAtish Patra 	 * the user if not.
4816262f661SAtish Patra 	 */
4826262f661SAtish Patra 	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
4836262f661SAtish Patra 	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
4846262f661SAtish Patra 	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
4856262f661SAtish Patra 		WARN_ON(1);
4866262f661SAtish Patra 		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
4876262f661SAtish Patra 			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
4886262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
4896262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_BEGIN));
4906262f661SAtish Patra 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
4916262f661SAtish Patra 			fix_to_virt(FIX_BTMAP_END));
4926262f661SAtish Patra 
4936262f661SAtish Patra 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
4946262f661SAtish Patra 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
4956262f661SAtish Patra 	}
4966262f661SAtish Patra #endif
4976f1e9e94SAnup Patel }
498f2c17aabSAnup Patel 
499671f9a3eSAnup Patel static void __init setup_vm_final(void)
500671f9a3eSAnup Patel {
501671f9a3eSAnup Patel 	uintptr_t va, map_size;
502671f9a3eSAnup Patel 	phys_addr_t pa, start, end;
503b10d6bcaSMike Rapoport 	u64 i;
504671f9a3eSAnup Patel 
505e8dcb61fSAtish Patra 	/**
506e8dcb61fSAtish Patra 	 * MMU is enabled at this point. But page table setup is not complete yet.
507e8dcb61fSAtish Patra 	 * fixmap page table alloc functions should be used at this point
508e8dcb61fSAtish Patra 	 */
509e8dcb61fSAtish Patra 	pt_ops.alloc_pte = alloc_pte_fixmap;
510e8dcb61fSAtish Patra 	pt_ops.get_pte_virt = get_pte_virt_fixmap;
511e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
512e8dcb61fSAtish Patra 	pt_ops.alloc_pmd = alloc_pmd_fixmap;
513e8dcb61fSAtish Patra 	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
514e8dcb61fSAtish Patra #endif
515671f9a3eSAnup Patel 	/* Setup swapper PGD for fixmap */
516671f9a3eSAnup Patel 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
517ac51e005SZong Li 			   __pa_symbol(fixmap_pgd_next),
518671f9a3eSAnup Patel 			   PGDIR_SIZE, PAGE_TABLE);
519671f9a3eSAnup Patel 
520671f9a3eSAnup Patel 	/* Map all memory banks */
521b10d6bcaSMike Rapoport 	for_each_mem_range(i, &start, &end) {
522671f9a3eSAnup Patel 		if (start >= end)
523671f9a3eSAnup Patel 			break;
524671f9a3eSAnup Patel 		if (start <= __pa(PAGE_OFFSET) &&
525671f9a3eSAnup Patel 		    __pa(PAGE_OFFSET) < end)
526671f9a3eSAnup Patel 			start = __pa(PAGE_OFFSET);
527671f9a3eSAnup Patel 
528671f9a3eSAnup Patel 		map_size = best_map_size(start, end - start);
529671f9a3eSAnup Patel 		for (pa = start; pa < end; pa += map_size) {
530671f9a3eSAnup Patel 			va = (uintptr_t)__va(pa);
531671f9a3eSAnup Patel 			create_pgd_mapping(swapper_pg_dir, va, pa,
532671f9a3eSAnup Patel 					   map_size, PAGE_KERNEL_EXEC);
533671f9a3eSAnup Patel 		}
534671f9a3eSAnup Patel 	}
535671f9a3eSAnup Patel 
536671f9a3eSAnup Patel 	/* Clear fixmap PTE and PMD mappings */
537671f9a3eSAnup Patel 	clear_fixmap(FIX_PTE);
538671f9a3eSAnup Patel 	clear_fixmap(FIX_PMD);
539671f9a3eSAnup Patel 
540671f9a3eSAnup Patel 	/* Move to swapper page table */
541ac51e005SZong Li 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
542671f9a3eSAnup Patel 	local_flush_tlb_all();
543e8dcb61fSAtish Patra 
544e8dcb61fSAtish Patra 	/* generic page allocation functions must be used to setup page table */
545e8dcb61fSAtish Patra 	pt_ops.alloc_pte = alloc_pte_late;
546e8dcb61fSAtish Patra 	pt_ops.get_pte_virt = get_pte_virt_late;
547e8dcb61fSAtish Patra #ifndef __PAGETABLE_PMD_FOLDED
548e8dcb61fSAtish Patra 	pt_ops.alloc_pmd = alloc_pmd_late;
549e8dcb61fSAtish Patra 	pt_ops.get_pmd_virt = get_pmd_virt_late;
550e8dcb61fSAtish Patra #endif
551671f9a3eSAnup Patel }
5526bd33e1eSChristoph Hellwig #else
5536bd33e1eSChristoph Hellwig asmlinkage void __init setup_vm(uintptr_t dtb_pa)
5546bd33e1eSChristoph Hellwig {
5556bd33e1eSChristoph Hellwig 	dtb_early_va = (void *)dtb_pa;
556a78c6f59SAtish Patra 	dtb_early_pa = dtb_pa;
5576bd33e1eSChristoph Hellwig }
5586bd33e1eSChristoph Hellwig 
5596bd33e1eSChristoph Hellwig static inline void setup_vm_final(void)
5606bd33e1eSChristoph Hellwig {
5616bd33e1eSChristoph Hellwig }
5626bd33e1eSChristoph Hellwig #endif /* CONFIG_MMU */
563671f9a3eSAnup Patel 
564d27c3c90SZong Li #ifdef CONFIG_STRICT_KERNEL_RWX
56519a00869SAtish Patra void protect_kernel_text_data(void)
566d27c3c90SZong Li {
56719a00869SAtish Patra 	unsigned long text_start = (unsigned long)_start;
56819a00869SAtish Patra 	unsigned long init_text_start = (unsigned long)__init_text_begin;
56919a00869SAtish Patra 	unsigned long init_data_start = (unsigned long)__init_data_begin;
570d27c3c90SZong Li 	unsigned long rodata_start = (unsigned long)__start_rodata;
571d27c3c90SZong Li 	unsigned long data_start = (unsigned long)_data;
572d27c3c90SZong Li 	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
573d27c3c90SZong Li 
57419a00869SAtish Patra 	set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
57519a00869SAtish Patra 	set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
57619a00869SAtish Patra 	set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
57719a00869SAtish Patra 	/* rodata section is marked readonly in mark_rodata_ro */
578d27c3c90SZong Li 	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
579d27c3c90SZong Li 	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
58019a00869SAtish Patra }
58119a00869SAtish Patra 
58219a00869SAtish Patra void mark_rodata_ro(void)
58319a00869SAtish Patra {
58419a00869SAtish Patra 	unsigned long rodata_start = (unsigned long)__start_rodata;
58519a00869SAtish Patra 	unsigned long data_start = (unsigned long)_data;
58619a00869SAtish Patra 
58719a00869SAtish Patra 	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
588b422d28bSZong Li 
589b422d28bSZong Li 	debug_checkwx();
590d27c3c90SZong Li }
591d27c3c90SZong Li #endif
592d27c3c90SZong Li 
593671f9a3eSAnup Patel void __init paging_init(void)
594671f9a3eSAnup Patel {
595671f9a3eSAnup Patel 	setup_vm_final();
596671f9a3eSAnup Patel 	setup_zero_page();
597cbd34f4bSAtish Patra }
598cbd34f4bSAtish Patra 
599cbd34f4bSAtish Patra void __init misc_mem_init(void)
600cbd34f4bSAtish Patra {
6014f0e8eefSAtish Patra 	arch_numa_init();
602cbd34f4bSAtish Patra 	sparse_init();
603671f9a3eSAnup Patel 	zone_sizes_init();
6044f0e8eefSAtish Patra 	memblock_dump_all();
6056f1e9e94SAnup Patel }
606d95f1a54SLogan Gunthorpe 
6079fe57d8cSKefeng Wang #ifdef CONFIG_SPARSEMEM_VMEMMAP
608d95f1a54SLogan Gunthorpe int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
609d95f1a54SLogan Gunthorpe 			       struct vmem_altmap *altmap)
610d95f1a54SLogan Gunthorpe {
6111d9cfee7SAnshuman Khandual 	return vmemmap_populate_basepages(start, end, node, NULL);
612d95f1a54SLogan Gunthorpe }
613d95f1a54SLogan Gunthorpe #endif
614