xref: /openbmc/linux/arch/riscv/mm/init.c (revision 2b076054)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5  */
6 
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/memblock.h>
10 #include <linux/initrd.h>
11 #include <linux/swap.h>
12 #include <linux/sizes.h>
13 #include <linux/of_fdt.h>
14 #include <linux/libfdt.h>
15 #include <linux/set_memory.h>
16 
17 #include <asm/fixmap.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/soc.h>
21 #include <asm/io.h>
22 #include <asm/ptdump.h>
23 
24 #include "../kernel/head.h"
25 
26 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
27 							__page_aligned_bss;
28 EXPORT_SYMBOL(empty_zero_page);
29 
30 extern char _start[];
31 #define DTB_EARLY_BASE_VA      PGDIR_SIZE
32 void *dtb_early_va __initdata;
33 uintptr_t dtb_early_pa __initdata;
34 
35 struct pt_alloc_ops {
36 	pte_t *(*get_pte_virt)(phys_addr_t pa);
37 	phys_addr_t (*alloc_pte)(uintptr_t va);
38 #ifndef __PAGETABLE_PMD_FOLDED
39 	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
40 	phys_addr_t (*alloc_pmd)(uintptr_t va);
41 #endif
42 };
43 
44 static void __init zone_sizes_init(void)
45 {
46 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
47 
48 #ifdef CONFIG_ZONE_DMA32
49 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
50 			(unsigned long) PFN_PHYS(max_low_pfn)));
51 #endif
52 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
53 
54 	free_area_init(max_zone_pfns);
55 }
56 
57 static void setup_zero_page(void)
58 {
59 	memset((void *)empty_zero_page, 0, PAGE_SIZE);
60 }
61 
62 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
63 static inline void print_mlk(char *name, unsigned long b, unsigned long t)
64 {
65 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
66 		  (((t) - (b)) >> 10));
67 }
68 
69 static inline void print_mlm(char *name, unsigned long b, unsigned long t)
70 {
71 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
72 		  (((t) - (b)) >> 20));
73 }
74 
75 static void print_vm_layout(void)
76 {
77 	pr_notice("Virtual kernel memory layout:\n");
78 	print_mlk("fixmap", (unsigned long)FIXADDR_START,
79 		  (unsigned long)FIXADDR_TOP);
80 	print_mlm("pci io", (unsigned long)PCI_IO_START,
81 		  (unsigned long)PCI_IO_END);
82 	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
83 		  (unsigned long)VMEMMAP_END);
84 	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
85 		  (unsigned long)VMALLOC_END);
86 	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
87 		  (unsigned long)high_memory);
88 }
89 #else
90 static void print_vm_layout(void) { }
91 #endif /* CONFIG_DEBUG_VM */
92 
93 void __init mem_init(void)
94 {
95 #ifdef CONFIG_FLATMEM
96 	BUG_ON(!mem_map);
97 #endif /* CONFIG_FLATMEM */
98 
99 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
100 	memblock_free_all();
101 
102 	mem_init_print_info(NULL);
103 	print_vm_layout();
104 }
105 
106 #ifdef CONFIG_BLK_DEV_INITRD
107 static void __init setup_initrd(void)
108 {
109 	phys_addr_t start;
110 	unsigned long size;
111 
112 	/* Ignore the virtul address computed during device tree parsing */
113 	initrd_start = initrd_end = 0;
114 
115 	if (!phys_initrd_size)
116 		return;
117 	/*
118 	 * Round the memory region to page boundaries as per free_initrd_mem()
119 	 * This allows us to detect whether the pages overlapping the initrd
120 	 * are in use, but more importantly, reserves the entire set of pages
121 	 * as we don't want these pages allocated for other purposes.
122 	 */
123 	start = round_down(phys_initrd_start, PAGE_SIZE);
124 	size = phys_initrd_size + (phys_initrd_start - start);
125 	size = round_up(size, PAGE_SIZE);
126 
127 	if (!memblock_is_region_memory(start, size)) {
128 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
129 		       (u64)start, size);
130 		goto disable;
131 	}
132 
133 	if (memblock_is_region_reserved(start, size)) {
134 		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
135 		       (u64)start, size);
136 		goto disable;
137 	}
138 
139 	memblock_reserve(start, size);
140 	/* Now convert initrd to virtual addresses */
141 	initrd_start = (unsigned long)__va(phys_initrd_start);
142 	initrd_end = initrd_start + phys_initrd_size;
143 	initrd_below_start_ok = 1;
144 
145 	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
146 		(void *)(initrd_start), size);
147 	return;
148 disable:
149 	pr_cont(" - disabling initrd\n");
150 	initrd_start = 0;
151 	initrd_end = 0;
152 }
153 #endif /* CONFIG_BLK_DEV_INITRD */
154 
155 void __init setup_bootmem(void)
156 {
157 	phys_addr_t mem_size = 0;
158 	phys_addr_t total_mem = 0;
159 	phys_addr_t mem_start, start, end = 0;
160 	phys_addr_t vmlinux_end = __pa_symbol(&_end);
161 	phys_addr_t vmlinux_start = __pa_symbol(&_start);
162 	u64 i;
163 
164 	/* Find the memory region containing the kernel */
165 	for_each_mem_range(i, &start, &end) {
166 		phys_addr_t size = end - start;
167 		if (!total_mem)
168 			mem_start = start;
169 		if (start <= vmlinux_start && vmlinux_end <= end)
170 			BUG_ON(size == 0);
171 		total_mem = total_mem + size;
172 	}
173 
174 	/*
175 	 * Remove memblock from the end of usable area to the
176 	 * end of region
177 	 */
178 	mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
179 	if (mem_start + mem_size < end)
180 		memblock_remove(mem_start + mem_size,
181 				end - mem_start - mem_size);
182 
183 	/* Reserve from the start of the kernel to the end of the kernel */
184 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
185 
186 	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
187 	max_low_pfn = max_pfn;
188 	set_max_mapnr(max_low_pfn);
189 
190 #ifdef CONFIG_BLK_DEV_INITRD
191 	setup_initrd();
192 #endif /* CONFIG_BLK_DEV_INITRD */
193 
194 	/*
195 	 * Avoid using early_init_fdt_reserve_self() since __pa() does
196 	 * not work for DTB pointers that are fixmap addresses
197 	 */
198 	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
199 
200 	early_init_fdt_scan_reserved_mem();
201 	memblock_allow_resize();
202 	memblock_dump_all();
203 }
204 
205 #ifdef CONFIG_MMU
206 static struct pt_alloc_ops pt_ops;
207 
208 unsigned long va_pa_offset;
209 EXPORT_SYMBOL(va_pa_offset);
210 unsigned long pfn_base;
211 EXPORT_SYMBOL(pfn_base);
212 
213 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
214 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
215 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
216 
217 #define MAX_EARLY_MAPPING_SIZE	SZ_128M
218 
219 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
220 
221 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
222 {
223 	unsigned long addr = __fix_to_virt(idx);
224 	pte_t *ptep;
225 
226 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
227 
228 	ptep = &fixmap_pte[pte_index(addr)];
229 
230 	if (pgprot_val(prot))
231 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
232 	else
233 		pte_clear(&init_mm, addr, ptep);
234 	local_flush_tlb_page(addr);
235 }
236 
237 static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
238 {
239 	return (pte_t *)((uintptr_t)pa);
240 }
241 
242 static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
243 {
244 	clear_fixmap(FIX_PTE);
245 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
246 }
247 
248 static inline pte_t *get_pte_virt_late(phys_addr_t pa)
249 {
250 	return (pte_t *) __va(pa);
251 }
252 
253 static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
254 {
255 	/*
256 	 * We only create PMD or PGD early mappings so we
257 	 * should never reach here with MMU disabled.
258 	 */
259 	BUG();
260 }
261 
262 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
263 {
264 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
265 }
266 
267 static phys_addr_t alloc_pte_late(uintptr_t va)
268 {
269 	unsigned long vaddr;
270 
271 	vaddr = __get_free_page(GFP_KERNEL);
272 	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
273 		BUG();
274 	return __pa(vaddr);
275 }
276 
277 static void __init create_pte_mapping(pte_t *ptep,
278 				      uintptr_t va, phys_addr_t pa,
279 				      phys_addr_t sz, pgprot_t prot)
280 {
281 	uintptr_t pte_idx = pte_index(va);
282 
283 	BUG_ON(sz != PAGE_SIZE);
284 
285 	if (pte_none(ptep[pte_idx]))
286 		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
287 }
288 
289 #ifndef __PAGETABLE_PMD_FOLDED
290 
291 pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
292 pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
293 
294 #if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
295 #define NUM_EARLY_PMDS		1UL
296 #else
297 #define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
298 #endif
299 pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
300 
301 static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
302 {
303 	/* Before MMU is enabled */
304 	return (pmd_t *)((uintptr_t)pa);
305 }
306 
307 static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
308 {
309 	clear_fixmap(FIX_PMD);
310 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
311 }
312 
313 static pmd_t *get_pmd_virt_late(phys_addr_t pa)
314 {
315 	return (pmd_t *) __va(pa);
316 }
317 
318 static phys_addr_t __init alloc_pmd_early(uintptr_t va)
319 {
320 	uintptr_t pmd_num;
321 
322 	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
323 	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
324 	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
325 }
326 
327 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
328 {
329 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
330 }
331 
332 static phys_addr_t alloc_pmd_late(uintptr_t va)
333 {
334 	unsigned long vaddr;
335 
336 	vaddr = __get_free_page(GFP_KERNEL);
337 	BUG_ON(!vaddr);
338 	return __pa(vaddr);
339 }
340 
341 static void __init create_pmd_mapping(pmd_t *pmdp,
342 				      uintptr_t va, phys_addr_t pa,
343 				      phys_addr_t sz, pgprot_t prot)
344 {
345 	pte_t *ptep;
346 	phys_addr_t pte_phys;
347 	uintptr_t pmd_idx = pmd_index(va);
348 
349 	if (sz == PMD_SIZE) {
350 		if (pmd_none(pmdp[pmd_idx]))
351 			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
352 		return;
353 	}
354 
355 	if (pmd_none(pmdp[pmd_idx])) {
356 		pte_phys = pt_ops.alloc_pte(va);
357 		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
358 		ptep = pt_ops.get_pte_virt(pte_phys);
359 		memset(ptep, 0, PAGE_SIZE);
360 	} else {
361 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
362 		ptep = pt_ops.get_pte_virt(pte_phys);
363 	}
364 
365 	create_pte_mapping(ptep, va, pa, sz, prot);
366 }
367 
368 #define pgd_next_t		pmd_t
369 #define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
370 #define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
371 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
372 	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
373 #define fixmap_pgd_next		fixmap_pmd
374 #else
375 #define pgd_next_t		pte_t
376 #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
377 #define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
378 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
379 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
380 #define fixmap_pgd_next		fixmap_pte
381 #endif
382 
383 void __init create_pgd_mapping(pgd_t *pgdp,
384 				      uintptr_t va, phys_addr_t pa,
385 				      phys_addr_t sz, pgprot_t prot)
386 {
387 	pgd_next_t *nextp;
388 	phys_addr_t next_phys;
389 	uintptr_t pgd_idx = pgd_index(va);
390 
391 	if (sz == PGDIR_SIZE) {
392 		if (pgd_val(pgdp[pgd_idx]) == 0)
393 			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
394 		return;
395 	}
396 
397 	if (pgd_val(pgdp[pgd_idx]) == 0) {
398 		next_phys = alloc_pgd_next(va);
399 		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
400 		nextp = get_pgd_next_virt(next_phys);
401 		memset(nextp, 0, PAGE_SIZE);
402 	} else {
403 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
404 		nextp = get_pgd_next_virt(next_phys);
405 	}
406 
407 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
408 }
409 
410 static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
411 {
412 	/* Upgrade to PMD_SIZE mappings whenever possible */
413 	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
414 		return PAGE_SIZE;
415 
416 	return PMD_SIZE;
417 }
418 
419 /*
420  * setup_vm() is called from head.S with MMU-off.
421  *
422  * Following requirements should be honoured for setup_vm() to work
423  * correctly:
424  * 1) It should use PC-relative addressing for accessing kernel symbols.
425  *    To achieve this we always use GCC cmodel=medany.
426  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
427  *    so disable compiler instrumentation when FTRACE is enabled.
428  *
429  * Currently, the above requirements are honoured by using custom CFLAGS
430  * for init.o in mm/Makefile.
431  */
432 
433 #ifndef __riscv_cmodel_medany
434 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
435 #endif
436 
437 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
438 {
439 	uintptr_t va, pa, end_va;
440 	uintptr_t load_pa = (uintptr_t)(&_start);
441 	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
442 	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
443 #ifndef __PAGETABLE_PMD_FOLDED
444 	pmd_t fix_bmap_spmd, fix_bmap_epmd;
445 #endif
446 
447 	va_pa_offset = PAGE_OFFSET - load_pa;
448 	pfn_base = PFN_DOWN(load_pa);
449 
450 	/*
451 	 * Enforce boot alignment requirements of RV32 and
452 	 * RV64 by only allowing PMD or PGD mappings.
453 	 */
454 	BUG_ON(map_size == PAGE_SIZE);
455 
456 	/* Sanity check alignment and size */
457 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
458 	BUG_ON((load_pa % map_size) != 0);
459 	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
460 
461 	pt_ops.alloc_pte = alloc_pte_early;
462 	pt_ops.get_pte_virt = get_pte_virt_early;
463 #ifndef __PAGETABLE_PMD_FOLDED
464 	pt_ops.alloc_pmd = alloc_pmd_early;
465 	pt_ops.get_pmd_virt = get_pmd_virt_early;
466 #endif
467 	/* Setup early PGD for fixmap */
468 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
469 			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
470 
471 #ifndef __PAGETABLE_PMD_FOLDED
472 	/* Setup fixmap PMD */
473 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
474 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
475 	/* Setup trampoline PGD and PMD */
476 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
477 			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
478 	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
479 			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
480 #else
481 	/* Setup trampoline PGD */
482 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
483 			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
484 #endif
485 
486 	/*
487 	 * Setup early PGD covering entire kernel which will allows
488 	 * us to reach paging_init(). We map all memory banks later
489 	 * in setup_vm_final() below.
490 	 */
491 	end_va = PAGE_OFFSET + load_sz;
492 	for (va = PAGE_OFFSET; va < end_va; va += map_size)
493 		create_pgd_mapping(early_pg_dir, va,
494 				   load_pa + (va - PAGE_OFFSET),
495 				   map_size, PAGE_KERNEL_EXEC);
496 
497 	/* Create two consecutive PGD mappings for FDT early scan */
498 	pa = dtb_pa & ~(PGDIR_SIZE - 1);
499 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
500 			   pa, PGDIR_SIZE, PAGE_KERNEL);
501 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
502 			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
503 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
504 	dtb_early_pa = dtb_pa;
505 
506 	/*
507 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
508 	 * range can not span multiple pmds.
509 	 */
510 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
511 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
512 
513 #ifndef __PAGETABLE_PMD_FOLDED
514 	/*
515 	 * Early ioremap fixmap is already created as it lies within first 2MB
516 	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
517 	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
518 	 * the user if not.
519 	 */
520 	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
521 	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
522 	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
523 		WARN_ON(1);
524 		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
525 			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
526 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
527 			fix_to_virt(FIX_BTMAP_BEGIN));
528 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
529 			fix_to_virt(FIX_BTMAP_END));
530 
531 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
532 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
533 	}
534 #endif
535 }
536 
537 static void __init setup_vm_final(void)
538 {
539 	uintptr_t va, map_size;
540 	phys_addr_t pa, start, end;
541 	u64 i;
542 
543 	/**
544 	 * MMU is enabled at this point. But page table setup is not complete yet.
545 	 * fixmap page table alloc functions should be used at this point
546 	 */
547 	pt_ops.alloc_pte = alloc_pte_fixmap;
548 	pt_ops.get_pte_virt = get_pte_virt_fixmap;
549 #ifndef __PAGETABLE_PMD_FOLDED
550 	pt_ops.alloc_pmd = alloc_pmd_fixmap;
551 	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
552 #endif
553 	/* Setup swapper PGD for fixmap */
554 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
555 			   __pa_symbol(fixmap_pgd_next),
556 			   PGDIR_SIZE, PAGE_TABLE);
557 
558 	/* Map all memory banks */
559 	for_each_mem_range(i, &start, &end) {
560 		if (start >= end)
561 			break;
562 		if (start <= __pa(PAGE_OFFSET) &&
563 		    __pa(PAGE_OFFSET) < end)
564 			start = __pa(PAGE_OFFSET);
565 
566 		map_size = best_map_size(start, end - start);
567 		for (pa = start; pa < end; pa += map_size) {
568 			va = (uintptr_t)__va(pa);
569 			create_pgd_mapping(swapper_pg_dir, va, pa,
570 					   map_size, PAGE_KERNEL_EXEC);
571 		}
572 	}
573 
574 	/* Clear fixmap PTE and PMD mappings */
575 	clear_fixmap(FIX_PTE);
576 	clear_fixmap(FIX_PMD);
577 
578 	/* Move to swapper page table */
579 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
580 	local_flush_tlb_all();
581 
582 	/* generic page allocation functions must be used to setup page table */
583 	pt_ops.alloc_pte = alloc_pte_late;
584 	pt_ops.get_pte_virt = get_pte_virt_late;
585 #ifndef __PAGETABLE_PMD_FOLDED
586 	pt_ops.alloc_pmd = alloc_pmd_late;
587 	pt_ops.get_pmd_virt = get_pmd_virt_late;
588 #endif
589 }
590 #else
591 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
592 {
593 #ifdef CONFIG_BUILTIN_DTB
594 	dtb_early_va = soc_lookup_builtin_dtb();
595 	if (!dtb_early_va) {
596 		/* Fallback to first available DTS */
597 		dtb_early_va = (void *) __dtb_start;
598 	}
599 #else
600 	dtb_early_va = (void *)dtb_pa;
601 #endif
602 	dtb_early_pa = dtb_pa;
603 }
604 
605 static inline void setup_vm_final(void)
606 {
607 }
608 #endif /* CONFIG_MMU */
609 
610 #ifdef CONFIG_STRICT_KERNEL_RWX
611 void mark_rodata_ro(void)
612 {
613 	unsigned long text_start = (unsigned long)_text;
614 	unsigned long text_end = (unsigned long)_etext;
615 	unsigned long rodata_start = (unsigned long)__start_rodata;
616 	unsigned long data_start = (unsigned long)_data;
617 	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
618 
619 	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
620 	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
621 	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
622 	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
623 
624 	debug_checkwx();
625 }
626 #endif
627 
628 static void __init resource_init(void)
629 {
630 	struct memblock_region *region;
631 
632 	for_each_mem_region(region) {
633 		struct resource *res;
634 
635 		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
636 		if (!res)
637 			panic("%s: Failed to allocate %zu bytes\n", __func__,
638 			      sizeof(struct resource));
639 
640 		if (memblock_is_nomap(region)) {
641 			res->name = "reserved";
642 			res->flags = IORESOURCE_MEM;
643 		} else {
644 			res->name = "System RAM";
645 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
646 		}
647 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
648 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
649 
650 		request_resource(&iomem_resource, res);
651 	}
652 }
653 
654 void __init paging_init(void)
655 {
656 	setup_vm_final();
657 	sparse_init();
658 	setup_zero_page();
659 	zone_sizes_init();
660 	resource_init();
661 }
662 
663 #ifdef CONFIG_SPARSEMEM_VMEMMAP
664 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
665 			       struct vmem_altmap *altmap)
666 {
667 	return vmemmap_populate_basepages(start, end, node, NULL);
668 }
669 #endif
670