xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision 9f2ac64d6ca60db99132e08628ac2899f956a0ec)
18ad8b727SNick Hu // SPDX-License-Identifier: GPL-2.0
28ad8b727SNick Hu // Copyright (C) 2019 Andes Technology Corporation
38ad8b727SNick Hu 
48ad8b727SNick Hu #include <linux/pfn.h>
58ad8b727SNick Hu #include <linux/init_task.h>
68ad8b727SNick Hu #include <linux/kasan.h>
78ad8b727SNick Hu #include <linux/kernel.h>
88ad8b727SNick Hu #include <linux/memblock.h>
9ca5999fdSMike Rapoport #include <linux/pgtable.h>
1065fddcfcSMike Rapoport #include <asm/tlbflush.h>
118ad8b727SNick Hu #include <asm/fixmap.h>
12e178d670SNylon Chen #include <asm/pgalloc.h>
13e178d670SNylon Chen 
14e8a62cc2SAlexandre Ghiti /*
15e8a62cc2SAlexandre Ghiti  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16e8a62cc2SAlexandre Ghiti  * which is right before the kernel.
17e8a62cc2SAlexandre Ghiti  *
18e8a62cc2SAlexandre Ghiti  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19e8a62cc2SAlexandre Ghiti  * the page global directory with kasan_early_shadow_pmd.
20e8a62cc2SAlexandre Ghiti  *
21e8a62cc2SAlexandre Ghiti  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22e8a62cc2SAlexandre Ghiti  * must be divided as follows:
23e8a62cc2SAlexandre Ghiti  * - the first PGD entry, although incomplete, is populated with
24e8a62cc2SAlexandre Ghiti  *   kasan_early_shadow_pud/p4d
25e8a62cc2SAlexandre Ghiti  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26e8a62cc2SAlexandre Ghiti  * - the last PGD entry is shared with the kernel mapping so populated at the
27e8a62cc2SAlexandre Ghiti  *   lower levels pud/p4d
28e8a62cc2SAlexandre Ghiti  *
29e8a62cc2SAlexandre Ghiti  * In addition, when shallow populating a kasan region (for example vmalloc),
30e8a62cc2SAlexandre Ghiti  * this region may also not be aligned on PGDIR size, so we must go down to the
31e8a62cc2SAlexandre Ghiti  * pud level too.
32e8a62cc2SAlexandre Ghiti  */
33e8a62cc2SAlexandre Ghiti 
348ad8b727SNick Hu extern pgd_t early_pg_dir[PTRS_PER_PGD];
358ad8b727SNick Hu 
361987501bSJisheng Zhang static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
378ad8b727SNick Hu {
38d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
39d127c19cSAlexandre Ghiti 	pte_t *ptep, *base_pte;
40a0a31fd8SZong Li 
41d127c19cSAlexandre Ghiti 	if (pmd_none(*pmd))
42d127c19cSAlexandre Ghiti 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43d127c19cSAlexandre Ghiti 	else
44d127c19cSAlexandre Ghiti 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
458ad8b727SNick Hu 
46d127c19cSAlexandre Ghiti 	ptep = base_pte + pte_index(vaddr);
47d127c19cSAlexandre Ghiti 
48d127c19cSAlexandre Ghiti 	do {
49d127c19cSAlexandre Ghiti 		if (pte_none(*ptep)) {
50d127c19cSAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51d127c19cSAlexandre Ghiti 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52d127c19cSAlexandre Ghiti 		}
53d127c19cSAlexandre Ghiti 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54d127c19cSAlexandre Ghiti 
55d127c19cSAlexandre Ghiti 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
568ad8b727SNick Hu }
578ad8b727SNick Hu 
58e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59d127c19cSAlexandre Ghiti {
60d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
61d127c19cSAlexandre Ghiti 	pmd_t *pmdp, *base_pmd;
62d127c19cSAlexandre Ghiti 	unsigned long next;
638ad8b727SNick Hu 
64e8a62cc2SAlexandre Ghiti 	if (pud_none(*pud)) {
65e8a62cc2SAlexandre Ghiti 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66e8a62cc2SAlexandre Ghiti 	} else {
67e8a62cc2SAlexandre Ghiti 		base_pmd = (pmd_t *)pud_pgtable(*pud);
68d127c19cSAlexandre Ghiti 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69d127c19cSAlexandre Ghiti 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70e8a62cc2SAlexandre Ghiti 	}
71d127c19cSAlexandre Ghiti 
72d127c19cSAlexandre Ghiti 	pmdp = base_pmd + pmd_index(vaddr);
73d127c19cSAlexandre Ghiti 
74d127c19cSAlexandre Ghiti 	do {
75d127c19cSAlexandre Ghiti 		next = pmd_addr_end(vaddr, end);
76d7fbcf40SAlexandre Ghiti 
77d7fbcf40SAlexandre Ghiti 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78d7fbcf40SAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79d7fbcf40SAlexandre Ghiti 			if (phys_addr) {
80d7fbcf40SAlexandre Ghiti 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81d7fbcf40SAlexandre Ghiti 				continue;
82d7fbcf40SAlexandre Ghiti 			}
83d7fbcf40SAlexandre Ghiti 		}
84d7fbcf40SAlexandre Ghiti 
85d127c19cSAlexandre Ghiti 		kasan_populate_pte(pmdp, vaddr, next);
86d127c19cSAlexandre Ghiti 	} while (pmdp++, vaddr = next, vaddr != end);
87d127c19cSAlexandre Ghiti 
88d127c19cSAlexandre Ghiti 	/*
89d127c19cSAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
90d127c19cSAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
91d127c19cSAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
92d127c19cSAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
93d127c19cSAlexandre Ghiti 	 */
94e8a62cc2SAlexandre Ghiti 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95d127c19cSAlexandre Ghiti }
96d127c19cSAlexandre Ghiti 
97e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pud(pgd_t *pgd,
98e8a62cc2SAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
99e8a62cc2SAlexandre Ghiti 				      bool early)
100d127c19cSAlexandre Ghiti {
101d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
102e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
103e8a62cc2SAlexandre Ghiti 	unsigned long next;
104e8a62cc2SAlexandre Ghiti 
105e8a62cc2SAlexandre Ghiti 	if (early) {
106e8a62cc2SAlexandre Ghiti 		/*
107e8a62cc2SAlexandre Ghiti 		 * We can't use pgd_page_vaddr here as it would return a linear
108e8a62cc2SAlexandre Ghiti 		 * mapping address but it is not mapped yet, but when populating
109e8a62cc2SAlexandre Ghiti 		 * early_pg_dir, we need the physical address and when populating
110e8a62cc2SAlexandre Ghiti 		 * swapper_pg_dir, we need the kernel virtual address so use
111e8a62cc2SAlexandre Ghiti 		 * pt_ops facility.
112e8a62cc2SAlexandre Ghiti 		 */
113e8a62cc2SAlexandre Ghiti 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
1148fbdccd2SQinglin Pan 	} else if (pgd_none(*pgd)) {
1158fbdccd2SQinglin Pan 		base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
116*9f2ac64dSQinglin Pan 		memcpy(base_pud, (void *)kasan_early_shadow_pud,
117*9f2ac64dSQinglin Pan 			sizeof(pud_t) * PTRS_PER_PUD);
118e8a62cc2SAlexandre Ghiti 	} else {
119e8a62cc2SAlexandre Ghiti 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
120e4fcfe6eSAlexandre Ghiti 		if (base_pud == lm_alias(kasan_early_shadow_pud)) {
121e8a62cc2SAlexandre Ghiti 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
122e4fcfe6eSAlexandre Ghiti 			memcpy(base_pud, (void *)kasan_early_shadow_pud,
123e4fcfe6eSAlexandre Ghiti 			       sizeof(pud_t) * PTRS_PER_PUD);
124e4fcfe6eSAlexandre Ghiti 		}
125e8a62cc2SAlexandre Ghiti 	}
126e8a62cc2SAlexandre Ghiti 
127e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
128e8a62cc2SAlexandre Ghiti 
129e8a62cc2SAlexandre Ghiti 	do {
130e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
131e8a62cc2SAlexandre Ghiti 
132e8a62cc2SAlexandre Ghiti 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
133e8a62cc2SAlexandre Ghiti 			if (early) {
134e8a62cc2SAlexandre Ghiti 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
135e8a62cc2SAlexandre Ghiti 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
136e8a62cc2SAlexandre Ghiti 				continue;
137e8a62cc2SAlexandre Ghiti 			} else {
138e8a62cc2SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
139e8a62cc2SAlexandre Ghiti 				if (phys_addr) {
140e8a62cc2SAlexandre Ghiti 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
141e8a62cc2SAlexandre Ghiti 					continue;
142e8a62cc2SAlexandre Ghiti 				}
143e8a62cc2SAlexandre Ghiti 			}
144e8a62cc2SAlexandre Ghiti 		}
145e8a62cc2SAlexandre Ghiti 
146e8a62cc2SAlexandre Ghiti 		kasan_populate_pmd(pudp, vaddr, next);
147e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
148e8a62cc2SAlexandre Ghiti 
149e8a62cc2SAlexandre Ghiti 	/*
150e8a62cc2SAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
151e8a62cc2SAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
152e8a62cc2SAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
153e8a62cc2SAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
154e8a62cc2SAlexandre Ghiti 	 */
155e8a62cc2SAlexandre Ghiti 	if (!early)
156e8a62cc2SAlexandre Ghiti 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
157e8a62cc2SAlexandre Ghiti }
158e8a62cc2SAlexandre Ghiti 
1598fbdccd2SQinglin Pan static void __init kasan_populate_p4d(pgd_t *pgd,
1608fbdccd2SQinglin Pan 				      unsigned long vaddr, unsigned long end,
1618fbdccd2SQinglin Pan 				      bool early)
1628fbdccd2SQinglin Pan {
1638fbdccd2SQinglin Pan 	phys_addr_t phys_addr;
1648fbdccd2SQinglin Pan 	p4d_t *p4dp, *base_p4d;
1658fbdccd2SQinglin Pan 	unsigned long next;
1668fbdccd2SQinglin Pan 
1678fbdccd2SQinglin Pan 	if (early) {
1688fbdccd2SQinglin Pan 		/*
1698fbdccd2SQinglin Pan 		 * We can't use pgd_page_vaddr here as it would return a linear
1708fbdccd2SQinglin Pan 		 * mapping address but it is not mapped yet, but when populating
1718fbdccd2SQinglin Pan 		 * early_pg_dir, we need the physical address and when populating
1728fbdccd2SQinglin Pan 		 * swapper_pg_dir, we need the kernel virtual address so use
1738fbdccd2SQinglin Pan 		 * pt_ops facility.
1748fbdccd2SQinglin Pan 		 */
1758fbdccd2SQinglin Pan 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
1768fbdccd2SQinglin Pan 	} else {
1778fbdccd2SQinglin Pan 		base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
178*9f2ac64dSQinglin Pan 		if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
1798fbdccd2SQinglin Pan 			base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
180*9f2ac64dSQinglin Pan 			memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
181*9f2ac64dSQinglin Pan 				sizeof(p4d_t) * PTRS_PER_P4D);
182*9f2ac64dSQinglin Pan 		}
1838fbdccd2SQinglin Pan 	}
1848fbdccd2SQinglin Pan 
1858fbdccd2SQinglin Pan 	p4dp = base_p4d + p4d_index(vaddr);
1868fbdccd2SQinglin Pan 
1878fbdccd2SQinglin Pan 	do {
1888fbdccd2SQinglin Pan 		next = p4d_addr_end(vaddr, end);
1898fbdccd2SQinglin Pan 
1908fbdccd2SQinglin Pan 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
1918fbdccd2SQinglin Pan 			if (early) {
1928fbdccd2SQinglin Pan 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
1938fbdccd2SQinglin Pan 				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
1948fbdccd2SQinglin Pan 				continue;
1958fbdccd2SQinglin Pan 			} else {
1968fbdccd2SQinglin Pan 				phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
1978fbdccd2SQinglin Pan 				if (phys_addr) {
1988fbdccd2SQinglin Pan 					set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
1998fbdccd2SQinglin Pan 					continue;
2008fbdccd2SQinglin Pan 				}
2018fbdccd2SQinglin Pan 			}
2028fbdccd2SQinglin Pan 		}
2038fbdccd2SQinglin Pan 
2048fbdccd2SQinglin Pan 		kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
2058fbdccd2SQinglin Pan 	} while (p4dp++, vaddr = next, vaddr != end);
2068fbdccd2SQinglin Pan 
2078fbdccd2SQinglin Pan 	/*
2088fbdccd2SQinglin Pan 	 * Wait for the whole P4D to be populated before setting the P4D in
2098fbdccd2SQinglin Pan 	 * the page table, otherwise, if we did set the P4D before populating
2108fbdccd2SQinglin Pan 	 * it entirely, memblock could allocate a page at a physical address
2118fbdccd2SQinglin Pan 	 * where KASAN is not populated yet and then we'd get a page fault.
2128fbdccd2SQinglin Pan 	 */
2138fbdccd2SQinglin Pan 	if (!early)
2148fbdccd2SQinglin Pan 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
2158fbdccd2SQinglin Pan }
2168fbdccd2SQinglin Pan 
2178fbdccd2SQinglin Pan #define kasan_early_shadow_pgd_next			(pgtable_l5_enabled ?	\
2188fbdccd2SQinglin Pan 				(uintptr_t)kasan_early_shadow_p4d :		\
2198fbdccd2SQinglin Pan 							(pgtable_l4_enabled ?	\
220e8a62cc2SAlexandre Ghiti 				(uintptr_t)kasan_early_shadow_pud :		\
2218fbdccd2SQinglin Pan 				(uintptr_t)kasan_early_shadow_pmd))
222e8a62cc2SAlexandre Ghiti #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
2238fbdccd2SQinglin Pan 		(pgtable_l5_enabled ?						\
2248fbdccd2SQinglin Pan 		kasan_populate_p4d(pgdp, vaddr, next, early) :			\
225e8a62cc2SAlexandre Ghiti 		(pgtable_l4_enabled ?						\
226e8a62cc2SAlexandre Ghiti 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
2278fbdccd2SQinglin Pan 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
228e8a62cc2SAlexandre Ghiti 
2292efad17eSAlexandre Ghiti static void __init kasan_populate_pgd(pgd_t *pgdp,
2302efad17eSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
2312efad17eSAlexandre Ghiti 				      bool early)
232d127c19cSAlexandre Ghiti {
233d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
234d127c19cSAlexandre Ghiti 	unsigned long next;
235d127c19cSAlexandre Ghiti 
236d127c19cSAlexandre Ghiti 	do {
237d127c19cSAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
238d7fbcf40SAlexandre Ghiti 
2392efad17eSAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
2402efad17eSAlexandre Ghiti 			if (early) {
2412efad17eSAlexandre Ghiti 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
2422efad17eSAlexandre Ghiti 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
2432efad17eSAlexandre Ghiti 				continue;
2442efad17eSAlexandre Ghiti 			} else if (pgd_page_vaddr(*pgdp) ==
2452efad17eSAlexandre Ghiti 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
246d7fbcf40SAlexandre Ghiti 				/*
2472efad17eSAlexandre Ghiti 				 * pgdp can't be none since kasan_early_init
2482efad17eSAlexandre Ghiti 				 * initialized all KASAN shadow region with
2492efad17eSAlexandre Ghiti 				 * kasan_early_shadow_pud: if this is still the
2502efad17eSAlexandre Ghiti 				 * case, that means we can try to allocate a
2512efad17eSAlexandre Ghiti 				 * hugepage as a replacement.
252d7fbcf40SAlexandre Ghiti 				 */
253d7fbcf40SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
254d7fbcf40SAlexandre Ghiti 				if (phys_addr) {
255d7fbcf40SAlexandre Ghiti 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
256d7fbcf40SAlexandre Ghiti 					continue;
257d7fbcf40SAlexandre Ghiti 				}
258d7fbcf40SAlexandre Ghiti 			}
2592efad17eSAlexandre Ghiti 		}
260d7fbcf40SAlexandre Ghiti 
261e8a62cc2SAlexandre Ghiti 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
262d127c19cSAlexandre Ghiti 	} while (pgdp++, vaddr = next, vaddr != end);
263d127c19cSAlexandre Ghiti }
264d127c19cSAlexandre Ghiti 
2652efad17eSAlexandre Ghiti asmlinkage void __init kasan_early_init(void)
2662efad17eSAlexandre Ghiti {
2672efad17eSAlexandre Ghiti 	uintptr_t i;
2682efad17eSAlexandre Ghiti 
2692efad17eSAlexandre Ghiti 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
2702efad17eSAlexandre Ghiti 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
2712efad17eSAlexandre Ghiti 
2722efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PTE; ++i)
2732efad17eSAlexandre Ghiti 		set_pte(kasan_early_shadow_pte + i,
274a3d32803SAlexandre Ghiti 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
2752efad17eSAlexandre Ghiti 
2762efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PMD; ++i)
2772efad17eSAlexandre Ghiti 		set_pmd(kasan_early_shadow_pmd + i,
2782efad17eSAlexandre Ghiti 			pfn_pmd(PFN_DOWN
2792efad17eSAlexandre Ghiti 				(__pa((uintptr_t)kasan_early_shadow_pte)),
2802efad17eSAlexandre Ghiti 				PAGE_TABLE));
2812efad17eSAlexandre Ghiti 
2822efad17eSAlexandre Ghiti 	if (pgtable_l4_enabled) {
2832efad17eSAlexandre Ghiti 		for (i = 0; i < PTRS_PER_PUD; ++i)
2842efad17eSAlexandre Ghiti 			set_pud(kasan_early_shadow_pud + i,
2852efad17eSAlexandre Ghiti 				pfn_pud(PFN_DOWN
2862efad17eSAlexandre Ghiti 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
2872efad17eSAlexandre Ghiti 					PAGE_TABLE));
2882efad17eSAlexandre Ghiti 	}
2892efad17eSAlexandre Ghiti 
2908fbdccd2SQinglin Pan 	if (pgtable_l5_enabled) {
2918fbdccd2SQinglin Pan 		for (i = 0; i < PTRS_PER_P4D; ++i)
2928fbdccd2SQinglin Pan 			set_p4d(kasan_early_shadow_p4d + i,
2938fbdccd2SQinglin Pan 				pfn_p4d(PFN_DOWN
2948fbdccd2SQinglin Pan 					(__pa(((uintptr_t)kasan_early_shadow_pud))),
2958fbdccd2SQinglin Pan 					PAGE_TABLE));
2968fbdccd2SQinglin Pan 	}
2978fbdccd2SQinglin Pan 
2982efad17eSAlexandre Ghiti 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
2992efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
3002efad17eSAlexandre Ghiti 
3012efad17eSAlexandre Ghiti 	local_flush_tlb_all();
3022efad17eSAlexandre Ghiti }
3032efad17eSAlexandre Ghiti 
3042efad17eSAlexandre Ghiti void __init kasan_swapper_init(void)
3052efad17eSAlexandre Ghiti {
3062efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
3072efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
3082efad17eSAlexandre Ghiti 
3092efad17eSAlexandre Ghiti 	local_flush_tlb_all();
3102efad17eSAlexandre Ghiti }
3112efad17eSAlexandre Ghiti 
312d127c19cSAlexandre Ghiti static void __init kasan_populate(void *start, void *end)
313d127c19cSAlexandre Ghiti {
314d127c19cSAlexandre Ghiti 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
315d127c19cSAlexandre Ghiti 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
316d127c19cSAlexandre Ghiti 
3172efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
3188ad8b727SNick Hu 
3194cb699d0SVincent Chen 	local_flush_tlb_all();
3209484e2aeSAlexandre Ghiti 	memset(start, KASAN_SHADOW_INIT, end - start);
3218ad8b727SNick Hu }
3228ad8b727SNick Hu 
3238fbdccd2SQinglin Pan static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
3248fbdccd2SQinglin Pan 					      unsigned long vaddr, unsigned long end)
3258fbdccd2SQinglin Pan {
3268fbdccd2SQinglin Pan 	unsigned long next;
3278fbdccd2SQinglin Pan 	pmd_t *pmdp, *base_pmd;
3288fbdccd2SQinglin Pan 	bool is_kasan_pte;
3298fbdccd2SQinglin Pan 
3308fbdccd2SQinglin Pan 	base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
3318fbdccd2SQinglin Pan 	pmdp = base_pmd + pmd_index(vaddr);
3328fbdccd2SQinglin Pan 
3338fbdccd2SQinglin Pan 	do {
3348fbdccd2SQinglin Pan 		next = pmd_addr_end(vaddr, end);
3358fbdccd2SQinglin Pan 		is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
3368fbdccd2SQinglin Pan 
3378fbdccd2SQinglin Pan 		if (is_kasan_pte)
3388fbdccd2SQinglin Pan 			pmd_clear(pmdp);
3398fbdccd2SQinglin Pan 	} while (pmdp++, vaddr = next, vaddr != end);
3408fbdccd2SQinglin Pan }
3418fbdccd2SQinglin Pan 
342e8a62cc2SAlexandre Ghiti static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
3438fbdccd2SQinglin Pan 					      unsigned long vaddr, unsigned long end)
344e8a62cc2SAlexandre Ghiti {
345e8a62cc2SAlexandre Ghiti 	unsigned long next;
346e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
347e8a62cc2SAlexandre Ghiti 	pmd_t *base_pmd;
348e8a62cc2SAlexandre Ghiti 	bool is_kasan_pmd;
349e8a62cc2SAlexandre Ghiti 
350e8a62cc2SAlexandre Ghiti 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
351e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
352e8a62cc2SAlexandre Ghiti 
353e8a62cc2SAlexandre Ghiti 	do {
354e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
355e8a62cc2SAlexandre Ghiti 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
356e8a62cc2SAlexandre Ghiti 
3578fbdccd2SQinglin Pan 		if (!is_kasan_pmd)
3588fbdccd2SQinglin Pan 			continue;
3598fbdccd2SQinglin Pan 
360e8a62cc2SAlexandre Ghiti 		base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
361e8a62cc2SAlexandre Ghiti 		set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
3628fbdccd2SQinglin Pan 
3638fbdccd2SQinglin Pan 		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
3648fbdccd2SQinglin Pan 			continue;
3658fbdccd2SQinglin Pan 
3668fbdccd2SQinglin Pan 		memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
3678fbdccd2SQinglin Pan 		kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
368e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
369e8a62cc2SAlexandre Ghiti }
370e8a62cc2SAlexandre Ghiti 
3718fbdccd2SQinglin Pan static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
3728fbdccd2SQinglin Pan 					      unsigned long vaddr, unsigned long end)
3738fbdccd2SQinglin Pan {
3748fbdccd2SQinglin Pan 	unsigned long next;
3758fbdccd2SQinglin Pan 	p4d_t *p4dp, *base_p4d;
3768fbdccd2SQinglin Pan 	pud_t *base_pud;
3778fbdccd2SQinglin Pan 	bool is_kasan_pud;
3788fbdccd2SQinglin Pan 
3798fbdccd2SQinglin Pan 	base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
3808fbdccd2SQinglin Pan 	p4dp = base_p4d + p4d_index(vaddr);
3818fbdccd2SQinglin Pan 
3828fbdccd2SQinglin Pan 	do {
3838fbdccd2SQinglin Pan 		next = p4d_addr_end(vaddr, end);
3848fbdccd2SQinglin Pan 		is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
3858fbdccd2SQinglin Pan 
3868fbdccd2SQinglin Pan 		if (!is_kasan_pud)
3878fbdccd2SQinglin Pan 			continue;
3888fbdccd2SQinglin Pan 
3898fbdccd2SQinglin Pan 		base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
3908fbdccd2SQinglin Pan 		set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
3918fbdccd2SQinglin Pan 
3928fbdccd2SQinglin Pan 		if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
3938fbdccd2SQinglin Pan 			continue;
3948fbdccd2SQinglin Pan 
3958fbdccd2SQinglin Pan 		memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
3968fbdccd2SQinglin Pan 		kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
3978fbdccd2SQinglin Pan 	} while (p4dp++, vaddr = next, vaddr != end);
3988fbdccd2SQinglin Pan }
3998fbdccd2SQinglin Pan 
4008fbdccd2SQinglin Pan #define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)			\
4018fbdccd2SQinglin Pan 		(pgtable_l5_enabled ?						\
4028fbdccd2SQinglin Pan 		kasan_shallow_populate_p4d(pgdp, vaddr, next) :			\
4038fbdccd2SQinglin Pan 		(pgtable_l4_enabled ?						\
4048fbdccd2SQinglin Pan 		kasan_shallow_populate_pud(pgdp, vaddr, next) :			\
4058fbdccd2SQinglin Pan 		kasan_shallow_populate_pmd(pgdp, vaddr, next)))
4068fbdccd2SQinglin Pan 
4072da073c1SAlexandre Ghiti static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
4082da073c1SAlexandre Ghiti {
4092da073c1SAlexandre Ghiti 	unsigned long next;
4102da073c1SAlexandre Ghiti 	void *p;
4112da073c1SAlexandre Ghiti 	pgd_t *pgd_k = pgd_offset_k(vaddr);
412e8a62cc2SAlexandre Ghiti 	bool is_kasan_pgd_next;
4132da073c1SAlexandre Ghiti 
4142da073c1SAlexandre Ghiti 	do {
4152da073c1SAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
416e8a62cc2SAlexandre Ghiti 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
417e8a62cc2SAlexandre Ghiti 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
418e8a62cc2SAlexandre Ghiti 
419e8a62cc2SAlexandre Ghiti 		if (is_kasan_pgd_next) {
4202da073c1SAlexandre Ghiti 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
4212da073c1SAlexandre Ghiti 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
4222da073c1SAlexandre Ghiti 		}
423e8a62cc2SAlexandre Ghiti 
424e8a62cc2SAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
425e8a62cc2SAlexandre Ghiti 			continue;
426e8a62cc2SAlexandre Ghiti 
4278fbdccd2SQinglin Pan 		memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
4288fbdccd2SQinglin Pan 		kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
4292da073c1SAlexandre Ghiti 	} while (pgd_k++, vaddr = next, vaddr != end);
4302da073c1SAlexandre Ghiti }
4312da073c1SAlexandre Ghiti 
43278947bdfSPalmer Dabbelt static void __init kasan_shallow_populate(void *start, void *end)
433e178d670SNylon Chen {
434e178d670SNylon Chen 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
435e178d670SNylon Chen 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
436e178d670SNylon Chen 
4372da073c1SAlexandre Ghiti 	kasan_shallow_populate_pgd(vaddr, vend);
438f3773dd0SAlexandre Ghiti 	local_flush_tlb_all();
4398ad8b727SNick Hu }
4408ad8b727SNick Hu 
4418ad8b727SNick Hu void __init kasan_init(void)
4428ad8b727SNick Hu {
443314b7817SJisheng Zhang 	phys_addr_t p_start, p_end;
444b10d6bcaSMike Rapoport 	u64 i;
4458ad8b727SNick Hu 
446e178d670SNylon Chen 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
447e178d670SNylon Chen 		kasan_shallow_populate(
448e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
449e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
4508ad8b727SNick Hu 
4512bfc6cd8SAlexandre Ghiti 	/* Populate the linear mapping */
452314b7817SJisheng Zhang 	for_each_mem_range(i, &p_start, &p_end) {
453314b7817SJisheng Zhang 		void *start = (void *)__va(p_start);
454314b7817SJisheng Zhang 		void *end = (void *)__va(p_end);
4558ad8b727SNick Hu 
4568ad8b727SNick Hu 		if (start >= end)
4578ad8b727SNick Hu 			break;
4588ad8b727SNick Hu 
459d127c19cSAlexandre Ghiti 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
4609d8c7d92SYang Li 	}
4618ad8b727SNick Hu 
4622bfc6cd8SAlexandre Ghiti 	/* Populate kernel, BPF, modules mapping */
4632bfc6cd8SAlexandre Ghiti 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
4643a02764cSJisheng Zhang 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
4652bfc6cd8SAlexandre Ghiti 
4668ad8b727SNick Hu 	for (i = 0; i < PTRS_PER_PTE; i++)
4678ad8b727SNick Hu 		set_pte(&kasan_early_shadow_pte[i],
4688ad8b727SNick Hu 			mk_pte(virt_to_page(kasan_early_shadow_page),
4698458ca14SZong Li 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
4708458ca14SZong Li 					_PAGE_ACCESSED)));
4718ad8b727SNick Hu 
4729484e2aeSAlexandre Ghiti 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
4738ad8b727SNick Hu 	init_task.kasan_depth = 0;
4748ad8b727SNick Hu }
475