xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision 8fbdccd2b17335e1881a23865e98c63fcc345938)
18ad8b727SNick Hu // SPDX-License-Identifier: GPL-2.0
28ad8b727SNick Hu // Copyright (C) 2019 Andes Technology Corporation
38ad8b727SNick Hu 
48ad8b727SNick Hu #include <linux/pfn.h>
58ad8b727SNick Hu #include <linux/init_task.h>
68ad8b727SNick Hu #include <linux/kasan.h>
78ad8b727SNick Hu #include <linux/kernel.h>
88ad8b727SNick Hu #include <linux/memblock.h>
9ca5999fdSMike Rapoport #include <linux/pgtable.h>
1065fddcfcSMike Rapoport #include <asm/tlbflush.h>
118ad8b727SNick Hu #include <asm/fixmap.h>
12e178d670SNylon Chen #include <asm/pgalloc.h>
13e178d670SNylon Chen 
14e8a62cc2SAlexandre Ghiti /*
15e8a62cc2SAlexandre Ghiti  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16e8a62cc2SAlexandre Ghiti  * which is right before the kernel.
17e8a62cc2SAlexandre Ghiti  *
18e8a62cc2SAlexandre Ghiti  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19e8a62cc2SAlexandre Ghiti  * the page global directory with kasan_early_shadow_pmd.
20e8a62cc2SAlexandre Ghiti  *
21e8a62cc2SAlexandre Ghiti  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22e8a62cc2SAlexandre Ghiti  * must be divided as follows:
23e8a62cc2SAlexandre Ghiti  * - the first PGD entry, although incomplete, is populated with
24e8a62cc2SAlexandre Ghiti  *   kasan_early_shadow_pud/p4d
25e8a62cc2SAlexandre Ghiti  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26e8a62cc2SAlexandre Ghiti  * - the last PGD entry is shared with the kernel mapping so populated at the
27e8a62cc2SAlexandre Ghiti  *   lower levels pud/p4d
28e8a62cc2SAlexandre Ghiti  *
29e8a62cc2SAlexandre Ghiti  * In addition, when shallow populating a kasan region (for example vmalloc),
30e8a62cc2SAlexandre Ghiti  * this region may also not be aligned on PGDIR size, so we must go down to the
31e8a62cc2SAlexandre Ghiti  * pud level too.
32e8a62cc2SAlexandre Ghiti  */
33e8a62cc2SAlexandre Ghiti 
348ad8b727SNick Hu extern pgd_t early_pg_dir[PTRS_PER_PGD];
358ad8b727SNick Hu 
361987501bSJisheng Zhang static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
378ad8b727SNick Hu {
38d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
39d127c19cSAlexandre Ghiti 	pte_t *ptep, *base_pte;
40a0a31fd8SZong Li 
41d127c19cSAlexandre Ghiti 	if (pmd_none(*pmd))
42d127c19cSAlexandre Ghiti 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43d127c19cSAlexandre Ghiti 	else
44d127c19cSAlexandre Ghiti 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
458ad8b727SNick Hu 
46d127c19cSAlexandre Ghiti 	ptep = base_pte + pte_index(vaddr);
47d127c19cSAlexandre Ghiti 
48d127c19cSAlexandre Ghiti 	do {
49d127c19cSAlexandre Ghiti 		if (pte_none(*ptep)) {
50d127c19cSAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51d127c19cSAlexandre Ghiti 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52d127c19cSAlexandre Ghiti 		}
53d127c19cSAlexandre Ghiti 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54d127c19cSAlexandre Ghiti 
55d127c19cSAlexandre Ghiti 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
568ad8b727SNick Hu }
578ad8b727SNick Hu 
58e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59d127c19cSAlexandre Ghiti {
60d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
61d127c19cSAlexandre Ghiti 	pmd_t *pmdp, *base_pmd;
62d127c19cSAlexandre Ghiti 	unsigned long next;
638ad8b727SNick Hu 
64e8a62cc2SAlexandre Ghiti 	if (pud_none(*pud)) {
65e8a62cc2SAlexandre Ghiti 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66e8a62cc2SAlexandre Ghiti 	} else {
67e8a62cc2SAlexandre Ghiti 		base_pmd = (pmd_t *)pud_pgtable(*pud);
68d127c19cSAlexandre Ghiti 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69d127c19cSAlexandre Ghiti 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70e8a62cc2SAlexandre Ghiti 	}
71d127c19cSAlexandre Ghiti 
72d127c19cSAlexandre Ghiti 	pmdp = base_pmd + pmd_index(vaddr);
73d127c19cSAlexandre Ghiti 
74d127c19cSAlexandre Ghiti 	do {
75d127c19cSAlexandre Ghiti 		next = pmd_addr_end(vaddr, end);
76d7fbcf40SAlexandre Ghiti 
77d7fbcf40SAlexandre Ghiti 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78d7fbcf40SAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79d7fbcf40SAlexandre Ghiti 			if (phys_addr) {
80d7fbcf40SAlexandre Ghiti 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81d7fbcf40SAlexandre Ghiti 				continue;
82d7fbcf40SAlexandre Ghiti 			}
83d7fbcf40SAlexandre Ghiti 		}
84d7fbcf40SAlexandre Ghiti 
85d127c19cSAlexandre Ghiti 		kasan_populate_pte(pmdp, vaddr, next);
86d127c19cSAlexandre Ghiti 	} while (pmdp++, vaddr = next, vaddr != end);
87d127c19cSAlexandre Ghiti 
88d127c19cSAlexandre Ghiti 	/*
89d127c19cSAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
90d127c19cSAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
91d127c19cSAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
92d127c19cSAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
93d127c19cSAlexandre Ghiti 	 */
94e8a62cc2SAlexandre Ghiti 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95d127c19cSAlexandre Ghiti }
96d127c19cSAlexandre Ghiti 
97e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pud(pgd_t *pgd,
98e8a62cc2SAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
99e8a62cc2SAlexandre Ghiti 				      bool early)
100d127c19cSAlexandre Ghiti {
101d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
102e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
103e8a62cc2SAlexandre Ghiti 	unsigned long next;
104e8a62cc2SAlexandre Ghiti 
105e8a62cc2SAlexandre Ghiti 	if (early) {
106e8a62cc2SAlexandre Ghiti 		/*
107e8a62cc2SAlexandre Ghiti 		 * We can't use pgd_page_vaddr here as it would return a linear
108e8a62cc2SAlexandre Ghiti 		 * mapping address but it is not mapped yet, but when populating
109e8a62cc2SAlexandre Ghiti 		 * early_pg_dir, we need the physical address and when populating
110e8a62cc2SAlexandre Ghiti 		 * swapper_pg_dir, we need the kernel virtual address so use
111e8a62cc2SAlexandre Ghiti 		 * pt_ops facility.
112e8a62cc2SAlexandre Ghiti 		 */
113e8a62cc2SAlexandre Ghiti 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114*8fbdccd2SQinglin Pan 	} else if (pgd_none(*pgd)) {
115*8fbdccd2SQinglin Pan 		base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
116e8a62cc2SAlexandre Ghiti 	} else {
117e8a62cc2SAlexandre Ghiti 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
118e8a62cc2SAlexandre Ghiti 		if (base_pud == lm_alias(kasan_early_shadow_pud))
119e8a62cc2SAlexandre Ghiti 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
120e8a62cc2SAlexandre Ghiti 	}
121e8a62cc2SAlexandre Ghiti 
122e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
123e8a62cc2SAlexandre Ghiti 
124e8a62cc2SAlexandre Ghiti 	do {
125e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
126e8a62cc2SAlexandre Ghiti 
127e8a62cc2SAlexandre Ghiti 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
128e8a62cc2SAlexandre Ghiti 			if (early) {
129e8a62cc2SAlexandre Ghiti 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
130e8a62cc2SAlexandre Ghiti 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
131e8a62cc2SAlexandre Ghiti 				continue;
132e8a62cc2SAlexandre Ghiti 			} else {
133e8a62cc2SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
134e8a62cc2SAlexandre Ghiti 				if (phys_addr) {
135e8a62cc2SAlexandre Ghiti 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
136e8a62cc2SAlexandre Ghiti 					continue;
137e8a62cc2SAlexandre Ghiti 				}
138e8a62cc2SAlexandre Ghiti 			}
139e8a62cc2SAlexandre Ghiti 		}
140e8a62cc2SAlexandre Ghiti 
141e8a62cc2SAlexandre Ghiti 		kasan_populate_pmd(pudp, vaddr, next);
142e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
143e8a62cc2SAlexandre Ghiti 
144e8a62cc2SAlexandre Ghiti 	/*
145e8a62cc2SAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
146e8a62cc2SAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
147e8a62cc2SAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
148e8a62cc2SAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
149e8a62cc2SAlexandre Ghiti 	 */
150e8a62cc2SAlexandre Ghiti 	if (!early)
151e8a62cc2SAlexandre Ghiti 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
152e8a62cc2SAlexandre Ghiti }
153e8a62cc2SAlexandre Ghiti 
154*8fbdccd2SQinglin Pan static void __init kasan_populate_p4d(pgd_t *pgd,
155*8fbdccd2SQinglin Pan 				      unsigned long vaddr, unsigned long end,
156*8fbdccd2SQinglin Pan 				      bool early)
157*8fbdccd2SQinglin Pan {
158*8fbdccd2SQinglin Pan 	phys_addr_t phys_addr;
159*8fbdccd2SQinglin Pan 	p4d_t *p4dp, *base_p4d;
160*8fbdccd2SQinglin Pan 	unsigned long next;
161*8fbdccd2SQinglin Pan 
162*8fbdccd2SQinglin Pan 	if (early) {
163*8fbdccd2SQinglin Pan 		/*
164*8fbdccd2SQinglin Pan 		 * We can't use pgd_page_vaddr here as it would return a linear
165*8fbdccd2SQinglin Pan 		 * mapping address but it is not mapped yet, but when populating
166*8fbdccd2SQinglin Pan 		 * early_pg_dir, we need the physical address and when populating
167*8fbdccd2SQinglin Pan 		 * swapper_pg_dir, we need the kernel virtual address so use
168*8fbdccd2SQinglin Pan 		 * pt_ops facility.
169*8fbdccd2SQinglin Pan 		 */
170*8fbdccd2SQinglin Pan 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
171*8fbdccd2SQinglin Pan 	} else {
172*8fbdccd2SQinglin Pan 		base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
173*8fbdccd2SQinglin Pan 		if (base_p4d == lm_alias(kasan_early_shadow_p4d))
174*8fbdccd2SQinglin Pan 			base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
175*8fbdccd2SQinglin Pan 	}
176*8fbdccd2SQinglin Pan 
177*8fbdccd2SQinglin Pan 	p4dp = base_p4d + p4d_index(vaddr);
178*8fbdccd2SQinglin Pan 
179*8fbdccd2SQinglin Pan 	do {
180*8fbdccd2SQinglin Pan 		next = p4d_addr_end(vaddr, end);
181*8fbdccd2SQinglin Pan 
182*8fbdccd2SQinglin Pan 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
183*8fbdccd2SQinglin Pan 			if (early) {
184*8fbdccd2SQinglin Pan 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
185*8fbdccd2SQinglin Pan 				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
186*8fbdccd2SQinglin Pan 				continue;
187*8fbdccd2SQinglin Pan 			} else {
188*8fbdccd2SQinglin Pan 				phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
189*8fbdccd2SQinglin Pan 				if (phys_addr) {
190*8fbdccd2SQinglin Pan 					set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
191*8fbdccd2SQinglin Pan 					continue;
192*8fbdccd2SQinglin Pan 				}
193*8fbdccd2SQinglin Pan 			}
194*8fbdccd2SQinglin Pan 		}
195*8fbdccd2SQinglin Pan 
196*8fbdccd2SQinglin Pan 		kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
197*8fbdccd2SQinglin Pan 	} while (p4dp++, vaddr = next, vaddr != end);
198*8fbdccd2SQinglin Pan 
199*8fbdccd2SQinglin Pan 	/*
200*8fbdccd2SQinglin Pan 	 * Wait for the whole P4D to be populated before setting the P4D in
201*8fbdccd2SQinglin Pan 	 * the page table, otherwise, if we did set the P4D before populating
202*8fbdccd2SQinglin Pan 	 * it entirely, memblock could allocate a page at a physical address
203*8fbdccd2SQinglin Pan 	 * where KASAN is not populated yet and then we'd get a page fault.
204*8fbdccd2SQinglin Pan 	 */
205*8fbdccd2SQinglin Pan 	if (!early)
206*8fbdccd2SQinglin Pan 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
207*8fbdccd2SQinglin Pan }
208*8fbdccd2SQinglin Pan 
209*8fbdccd2SQinglin Pan #define kasan_early_shadow_pgd_next			(pgtable_l5_enabled ?	\
210*8fbdccd2SQinglin Pan 				(uintptr_t)kasan_early_shadow_p4d :		\
211*8fbdccd2SQinglin Pan 							(pgtable_l4_enabled ?	\
212e8a62cc2SAlexandre Ghiti 				(uintptr_t)kasan_early_shadow_pud :		\
213*8fbdccd2SQinglin Pan 				(uintptr_t)kasan_early_shadow_pmd))
214e8a62cc2SAlexandre Ghiti #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
215*8fbdccd2SQinglin Pan 		(pgtable_l5_enabled ?						\
216*8fbdccd2SQinglin Pan 		kasan_populate_p4d(pgdp, vaddr, next, early) :			\
217e8a62cc2SAlexandre Ghiti 		(pgtable_l4_enabled ?						\
218e8a62cc2SAlexandre Ghiti 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
219*8fbdccd2SQinglin Pan 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
220e8a62cc2SAlexandre Ghiti 
2212efad17eSAlexandre Ghiti static void __init kasan_populate_pgd(pgd_t *pgdp,
2222efad17eSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
2232efad17eSAlexandre Ghiti 				      bool early)
224d127c19cSAlexandre Ghiti {
225d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
226d127c19cSAlexandre Ghiti 	unsigned long next;
227d127c19cSAlexandre Ghiti 
228d127c19cSAlexandre Ghiti 	do {
229d127c19cSAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
230d7fbcf40SAlexandre Ghiti 
2312efad17eSAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
2322efad17eSAlexandre Ghiti 			if (early) {
2332efad17eSAlexandre Ghiti 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
2342efad17eSAlexandre Ghiti 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
2352efad17eSAlexandre Ghiti 				continue;
2362efad17eSAlexandre Ghiti 			} else if (pgd_page_vaddr(*pgdp) ==
2372efad17eSAlexandre Ghiti 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
238d7fbcf40SAlexandre Ghiti 				/*
2392efad17eSAlexandre Ghiti 				 * pgdp can't be none since kasan_early_init
2402efad17eSAlexandre Ghiti 				 * initialized all KASAN shadow region with
2412efad17eSAlexandre Ghiti 				 * kasan_early_shadow_pud: if this is still the
2422efad17eSAlexandre Ghiti 				 * case, that means we can try to allocate a
2432efad17eSAlexandre Ghiti 				 * hugepage as a replacement.
244d7fbcf40SAlexandre Ghiti 				 */
245d7fbcf40SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
246d7fbcf40SAlexandre Ghiti 				if (phys_addr) {
247d7fbcf40SAlexandre Ghiti 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
248d7fbcf40SAlexandre Ghiti 					continue;
249d7fbcf40SAlexandre Ghiti 				}
250d7fbcf40SAlexandre Ghiti 			}
2512efad17eSAlexandre Ghiti 		}
252d7fbcf40SAlexandre Ghiti 
253e8a62cc2SAlexandre Ghiti 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
254d127c19cSAlexandre Ghiti 	} while (pgdp++, vaddr = next, vaddr != end);
255d127c19cSAlexandre Ghiti }
256d127c19cSAlexandre Ghiti 
2572efad17eSAlexandre Ghiti asmlinkage void __init kasan_early_init(void)
2582efad17eSAlexandre Ghiti {
2592efad17eSAlexandre Ghiti 	uintptr_t i;
2602efad17eSAlexandre Ghiti 
2612efad17eSAlexandre Ghiti 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
2622efad17eSAlexandre Ghiti 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
2632efad17eSAlexandre Ghiti 
2642efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PTE; ++i)
2652efad17eSAlexandre Ghiti 		set_pte(kasan_early_shadow_pte + i,
2662efad17eSAlexandre Ghiti 			mk_pte(virt_to_page(kasan_early_shadow_page),
2672efad17eSAlexandre Ghiti 			       PAGE_KERNEL));
2682efad17eSAlexandre Ghiti 
2692efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PMD; ++i)
2702efad17eSAlexandre Ghiti 		set_pmd(kasan_early_shadow_pmd + i,
2712efad17eSAlexandre Ghiti 			pfn_pmd(PFN_DOWN
2722efad17eSAlexandre Ghiti 				(__pa((uintptr_t)kasan_early_shadow_pte)),
2732efad17eSAlexandre Ghiti 				PAGE_TABLE));
2742efad17eSAlexandre Ghiti 
2752efad17eSAlexandre Ghiti 	if (pgtable_l4_enabled) {
2762efad17eSAlexandre Ghiti 		for (i = 0; i < PTRS_PER_PUD; ++i)
2772efad17eSAlexandre Ghiti 			set_pud(kasan_early_shadow_pud + i,
2782efad17eSAlexandre Ghiti 				pfn_pud(PFN_DOWN
2792efad17eSAlexandre Ghiti 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
2802efad17eSAlexandre Ghiti 					PAGE_TABLE));
2812efad17eSAlexandre Ghiti 	}
2822efad17eSAlexandre Ghiti 
283*8fbdccd2SQinglin Pan 	if (pgtable_l5_enabled) {
284*8fbdccd2SQinglin Pan 		for (i = 0; i < PTRS_PER_P4D; ++i)
285*8fbdccd2SQinglin Pan 			set_p4d(kasan_early_shadow_p4d + i,
286*8fbdccd2SQinglin Pan 				pfn_p4d(PFN_DOWN
287*8fbdccd2SQinglin Pan 					(__pa(((uintptr_t)kasan_early_shadow_pud))),
288*8fbdccd2SQinglin Pan 					PAGE_TABLE));
289*8fbdccd2SQinglin Pan 	}
290*8fbdccd2SQinglin Pan 
2912efad17eSAlexandre Ghiti 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
2922efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
2932efad17eSAlexandre Ghiti 
2942efad17eSAlexandre Ghiti 	local_flush_tlb_all();
2952efad17eSAlexandre Ghiti }
2962efad17eSAlexandre Ghiti 
2972efad17eSAlexandre Ghiti void __init kasan_swapper_init(void)
2982efad17eSAlexandre Ghiti {
2992efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
3002efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
3012efad17eSAlexandre Ghiti 
3022efad17eSAlexandre Ghiti 	local_flush_tlb_all();
3032efad17eSAlexandre Ghiti }
3042efad17eSAlexandre Ghiti 
305d127c19cSAlexandre Ghiti static void __init kasan_populate(void *start, void *end)
306d127c19cSAlexandre Ghiti {
307d127c19cSAlexandre Ghiti 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
308d127c19cSAlexandre Ghiti 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
309d127c19cSAlexandre Ghiti 
3102efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
3118ad8b727SNick Hu 
3124cb699d0SVincent Chen 	local_flush_tlb_all();
3139484e2aeSAlexandre Ghiti 	memset(start, KASAN_SHADOW_INIT, end - start);
3148ad8b727SNick Hu }
3158ad8b727SNick Hu 
316*8fbdccd2SQinglin Pan static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
317*8fbdccd2SQinglin Pan 					      unsigned long vaddr, unsigned long end)
318*8fbdccd2SQinglin Pan {
319*8fbdccd2SQinglin Pan 	unsigned long next;
320*8fbdccd2SQinglin Pan 	pmd_t *pmdp, *base_pmd;
321*8fbdccd2SQinglin Pan 	bool is_kasan_pte;
322*8fbdccd2SQinglin Pan 
323*8fbdccd2SQinglin Pan 	base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
324*8fbdccd2SQinglin Pan 	pmdp = base_pmd + pmd_index(vaddr);
325*8fbdccd2SQinglin Pan 
326*8fbdccd2SQinglin Pan 	do {
327*8fbdccd2SQinglin Pan 		next = pmd_addr_end(vaddr, end);
328*8fbdccd2SQinglin Pan 		is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
329*8fbdccd2SQinglin Pan 
330*8fbdccd2SQinglin Pan 		if (is_kasan_pte)
331*8fbdccd2SQinglin Pan 			pmd_clear(pmdp);
332*8fbdccd2SQinglin Pan 	} while (pmdp++, vaddr = next, vaddr != end);
333*8fbdccd2SQinglin Pan }
334*8fbdccd2SQinglin Pan 
335e8a62cc2SAlexandre Ghiti static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
336*8fbdccd2SQinglin Pan 					      unsigned long vaddr, unsigned long end)
337e8a62cc2SAlexandre Ghiti {
338e8a62cc2SAlexandre Ghiti 	unsigned long next;
339e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
340e8a62cc2SAlexandre Ghiti 	pmd_t *base_pmd;
341e8a62cc2SAlexandre Ghiti 	bool is_kasan_pmd;
342e8a62cc2SAlexandre Ghiti 
343e8a62cc2SAlexandre Ghiti 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
344e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
345e8a62cc2SAlexandre Ghiti 
346e8a62cc2SAlexandre Ghiti 	do {
347e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
348e8a62cc2SAlexandre Ghiti 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
349e8a62cc2SAlexandre Ghiti 
350*8fbdccd2SQinglin Pan 		if (!is_kasan_pmd)
351*8fbdccd2SQinglin Pan 			continue;
352*8fbdccd2SQinglin Pan 
353e8a62cc2SAlexandre Ghiti 		base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
354e8a62cc2SAlexandre Ghiti 		set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
355*8fbdccd2SQinglin Pan 
356*8fbdccd2SQinglin Pan 		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
357*8fbdccd2SQinglin Pan 			continue;
358*8fbdccd2SQinglin Pan 
359*8fbdccd2SQinglin Pan 		memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
360*8fbdccd2SQinglin Pan 		kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
361e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
362e8a62cc2SAlexandre Ghiti }
363e8a62cc2SAlexandre Ghiti 
364*8fbdccd2SQinglin Pan static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
365*8fbdccd2SQinglin Pan 					      unsigned long vaddr, unsigned long end)
366*8fbdccd2SQinglin Pan {
367*8fbdccd2SQinglin Pan 	unsigned long next;
368*8fbdccd2SQinglin Pan 	p4d_t *p4dp, *base_p4d;
369*8fbdccd2SQinglin Pan 	pud_t *base_pud;
370*8fbdccd2SQinglin Pan 	bool is_kasan_pud;
371*8fbdccd2SQinglin Pan 
372*8fbdccd2SQinglin Pan 	base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
373*8fbdccd2SQinglin Pan 	p4dp = base_p4d + p4d_index(vaddr);
374*8fbdccd2SQinglin Pan 
375*8fbdccd2SQinglin Pan 	do {
376*8fbdccd2SQinglin Pan 		next = p4d_addr_end(vaddr, end);
377*8fbdccd2SQinglin Pan 		is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
378*8fbdccd2SQinglin Pan 
379*8fbdccd2SQinglin Pan 		if (!is_kasan_pud)
380*8fbdccd2SQinglin Pan 			continue;
381*8fbdccd2SQinglin Pan 
382*8fbdccd2SQinglin Pan 		base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
383*8fbdccd2SQinglin Pan 		set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
384*8fbdccd2SQinglin Pan 
385*8fbdccd2SQinglin Pan 		if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
386*8fbdccd2SQinglin Pan 			continue;
387*8fbdccd2SQinglin Pan 
388*8fbdccd2SQinglin Pan 		memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
389*8fbdccd2SQinglin Pan 		kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
390*8fbdccd2SQinglin Pan 	} while (p4dp++, vaddr = next, vaddr != end);
391*8fbdccd2SQinglin Pan }
392*8fbdccd2SQinglin Pan 
393*8fbdccd2SQinglin Pan #define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)			\
394*8fbdccd2SQinglin Pan 		(pgtable_l5_enabled ?						\
395*8fbdccd2SQinglin Pan 		kasan_shallow_populate_p4d(pgdp, vaddr, next) :			\
396*8fbdccd2SQinglin Pan 		(pgtable_l4_enabled ?						\
397*8fbdccd2SQinglin Pan 		kasan_shallow_populate_pud(pgdp, vaddr, next) :			\
398*8fbdccd2SQinglin Pan 		kasan_shallow_populate_pmd(pgdp, vaddr, next)))
399*8fbdccd2SQinglin Pan 
4002da073c1SAlexandre Ghiti static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
4012da073c1SAlexandre Ghiti {
4022da073c1SAlexandre Ghiti 	unsigned long next;
4032da073c1SAlexandre Ghiti 	void *p;
4042da073c1SAlexandre Ghiti 	pgd_t *pgd_k = pgd_offset_k(vaddr);
405e8a62cc2SAlexandre Ghiti 	bool is_kasan_pgd_next;
4062da073c1SAlexandre Ghiti 
4072da073c1SAlexandre Ghiti 	do {
4082da073c1SAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
409e8a62cc2SAlexandre Ghiti 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
410e8a62cc2SAlexandre Ghiti 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
411e8a62cc2SAlexandre Ghiti 
412e8a62cc2SAlexandre Ghiti 		if (is_kasan_pgd_next) {
4132da073c1SAlexandre Ghiti 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
4142da073c1SAlexandre Ghiti 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
4152da073c1SAlexandre Ghiti 		}
416e8a62cc2SAlexandre Ghiti 
417e8a62cc2SAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
418e8a62cc2SAlexandre Ghiti 			continue;
419e8a62cc2SAlexandre Ghiti 
420*8fbdccd2SQinglin Pan 		memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
421*8fbdccd2SQinglin Pan 		kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
4222da073c1SAlexandre Ghiti 	} while (pgd_k++, vaddr = next, vaddr != end);
4232da073c1SAlexandre Ghiti }
4242da073c1SAlexandre Ghiti 
42578947bdfSPalmer Dabbelt static void __init kasan_shallow_populate(void *start, void *end)
426e178d670SNylon Chen {
427e178d670SNylon Chen 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
428e178d670SNylon Chen 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
429e178d670SNylon Chen 
4302da073c1SAlexandre Ghiti 	kasan_shallow_populate_pgd(vaddr, vend);
431f3773dd0SAlexandre Ghiti 	local_flush_tlb_all();
4328ad8b727SNick Hu }
4338ad8b727SNick Hu 
4348ad8b727SNick Hu void __init kasan_init(void)
4358ad8b727SNick Hu {
436314b7817SJisheng Zhang 	phys_addr_t p_start, p_end;
437b10d6bcaSMike Rapoport 	u64 i;
4388ad8b727SNick Hu 
439e178d670SNylon Chen 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
440e178d670SNylon Chen 		kasan_shallow_populate(
441e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
442e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
4438ad8b727SNick Hu 
4442bfc6cd8SAlexandre Ghiti 	/* Populate the linear mapping */
445314b7817SJisheng Zhang 	for_each_mem_range(i, &p_start, &p_end) {
446314b7817SJisheng Zhang 		void *start = (void *)__va(p_start);
447314b7817SJisheng Zhang 		void *end = (void *)__va(p_end);
4488ad8b727SNick Hu 
4498ad8b727SNick Hu 		if (start >= end)
4508ad8b727SNick Hu 			break;
4518ad8b727SNick Hu 
452d127c19cSAlexandre Ghiti 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
4539d8c7d92SYang Li 	}
4548ad8b727SNick Hu 
4552bfc6cd8SAlexandre Ghiti 	/* Populate kernel, BPF, modules mapping */
4562bfc6cd8SAlexandre Ghiti 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
4573a02764cSJisheng Zhang 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
4582bfc6cd8SAlexandre Ghiti 
4598ad8b727SNick Hu 	for (i = 0; i < PTRS_PER_PTE; i++)
4608ad8b727SNick Hu 		set_pte(&kasan_early_shadow_pte[i],
4618ad8b727SNick Hu 			mk_pte(virt_to_page(kasan_early_shadow_page),
4628458ca14SZong Li 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
4638458ca14SZong Li 					_PAGE_ACCESSED)));
4648ad8b727SNick Hu 
4659484e2aeSAlexandre Ghiti 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
4668ad8b727SNick Hu 	init_task.kasan_depth = 0;
4678ad8b727SNick Hu }
468