xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision e8a62cc26ddf53a3c6ba2a8d33036cf7b84f3923)
18ad8b727SNick Hu // SPDX-License-Identifier: GPL-2.0
28ad8b727SNick Hu // Copyright (C) 2019 Andes Technology Corporation
38ad8b727SNick Hu 
48ad8b727SNick Hu #include <linux/pfn.h>
58ad8b727SNick Hu #include <linux/init_task.h>
68ad8b727SNick Hu #include <linux/kasan.h>
78ad8b727SNick Hu #include <linux/kernel.h>
88ad8b727SNick Hu #include <linux/memblock.h>
9ca5999fdSMike Rapoport #include <linux/pgtable.h>
1065fddcfcSMike Rapoport #include <asm/tlbflush.h>
118ad8b727SNick Hu #include <asm/fixmap.h>
12e178d670SNylon Chen #include <asm/pgalloc.h>
13e178d670SNylon Chen 
14*e8a62cc2SAlexandre Ghiti /*
15*e8a62cc2SAlexandre Ghiti  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16*e8a62cc2SAlexandre Ghiti  * which is right before the kernel.
17*e8a62cc2SAlexandre Ghiti  *
18*e8a62cc2SAlexandre Ghiti  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19*e8a62cc2SAlexandre Ghiti  * the page global directory with kasan_early_shadow_pmd.
20*e8a62cc2SAlexandre Ghiti  *
21*e8a62cc2SAlexandre Ghiti  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22*e8a62cc2SAlexandre Ghiti  * must be divided as follows:
23*e8a62cc2SAlexandre Ghiti  * - the first PGD entry, although incomplete, is populated with
24*e8a62cc2SAlexandre Ghiti  *   kasan_early_shadow_pud/p4d
25*e8a62cc2SAlexandre Ghiti  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26*e8a62cc2SAlexandre Ghiti  * - the last PGD entry is shared with the kernel mapping so populated at the
27*e8a62cc2SAlexandre Ghiti  *   lower levels pud/p4d
28*e8a62cc2SAlexandre Ghiti  *
29*e8a62cc2SAlexandre Ghiti  * In addition, when shallow populating a kasan region (for example vmalloc),
30*e8a62cc2SAlexandre Ghiti  * this region may also not be aligned on PGDIR size, so we must go down to the
31*e8a62cc2SAlexandre Ghiti  * pud level too.
32*e8a62cc2SAlexandre Ghiti  */
33*e8a62cc2SAlexandre Ghiti 
348ad8b727SNick Hu extern pgd_t early_pg_dir[PTRS_PER_PGD];
35*e8a62cc2SAlexandre Ghiti extern struct pt_alloc_ops _pt_ops __initdata;
36*e8a62cc2SAlexandre Ghiti #define pt_ops	_pt_ops
378ad8b727SNick Hu 
381987501bSJisheng Zhang static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
398ad8b727SNick Hu {
40d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
41d127c19cSAlexandre Ghiti 	pte_t *ptep, *base_pte;
42a0a31fd8SZong Li 
43d127c19cSAlexandre Ghiti 	if (pmd_none(*pmd))
44d127c19cSAlexandre Ghiti 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
45d127c19cSAlexandre Ghiti 	else
46d127c19cSAlexandre Ghiti 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
478ad8b727SNick Hu 
48d127c19cSAlexandre Ghiti 	ptep = base_pte + pte_index(vaddr);
49d127c19cSAlexandre Ghiti 
50d127c19cSAlexandre Ghiti 	do {
51d127c19cSAlexandre Ghiti 		if (pte_none(*ptep)) {
52d127c19cSAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
53d127c19cSAlexandre Ghiti 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
54d127c19cSAlexandre Ghiti 		}
55d127c19cSAlexandre Ghiti 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
56d127c19cSAlexandre Ghiti 
57d127c19cSAlexandre Ghiti 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
588ad8b727SNick Hu }
598ad8b727SNick Hu 
60*e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
61d127c19cSAlexandre Ghiti {
62d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
63d127c19cSAlexandre Ghiti 	pmd_t *pmdp, *base_pmd;
64d127c19cSAlexandre Ghiti 	unsigned long next;
658ad8b727SNick Hu 
66*e8a62cc2SAlexandre Ghiti 	if (pud_none(*pud)) {
67*e8a62cc2SAlexandre Ghiti 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
68*e8a62cc2SAlexandre Ghiti 	} else {
69*e8a62cc2SAlexandre Ghiti 		base_pmd = (pmd_t *)pud_pgtable(*pud);
70d127c19cSAlexandre Ghiti 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
71d127c19cSAlexandre Ghiti 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
72*e8a62cc2SAlexandre Ghiti 	}
73d127c19cSAlexandre Ghiti 
74d127c19cSAlexandre Ghiti 	pmdp = base_pmd + pmd_index(vaddr);
75d127c19cSAlexandre Ghiti 
76d127c19cSAlexandre Ghiti 	do {
77d127c19cSAlexandre Ghiti 		next = pmd_addr_end(vaddr, end);
78d7fbcf40SAlexandre Ghiti 
79d7fbcf40SAlexandre Ghiti 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
80d7fbcf40SAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
81d7fbcf40SAlexandre Ghiti 			if (phys_addr) {
82d7fbcf40SAlexandre Ghiti 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
83d7fbcf40SAlexandre Ghiti 				continue;
84d7fbcf40SAlexandre Ghiti 			}
85d7fbcf40SAlexandre Ghiti 		}
86d7fbcf40SAlexandre Ghiti 
87d127c19cSAlexandre Ghiti 		kasan_populate_pte(pmdp, vaddr, next);
88d127c19cSAlexandre Ghiti 	} while (pmdp++, vaddr = next, vaddr != end);
89d127c19cSAlexandre Ghiti 
90d127c19cSAlexandre Ghiti 	/*
91d127c19cSAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
92d127c19cSAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
93d127c19cSAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
94d127c19cSAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
95d127c19cSAlexandre Ghiti 	 */
96*e8a62cc2SAlexandre Ghiti 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
97d127c19cSAlexandre Ghiti }
98d127c19cSAlexandre Ghiti 
99*e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pud(pgd_t *pgd,
100*e8a62cc2SAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
101*e8a62cc2SAlexandre Ghiti 				      bool early)
102*e8a62cc2SAlexandre Ghiti {
103*e8a62cc2SAlexandre Ghiti 	phys_addr_t phys_addr;
104*e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
105*e8a62cc2SAlexandre Ghiti 	unsigned long next;
106*e8a62cc2SAlexandre Ghiti 
107*e8a62cc2SAlexandre Ghiti 	if (early) {
108*e8a62cc2SAlexandre Ghiti 		/*
109*e8a62cc2SAlexandre Ghiti 		 * We can't use pgd_page_vaddr here as it would return a linear
110*e8a62cc2SAlexandre Ghiti 		 * mapping address but it is not mapped yet, but when populating
111*e8a62cc2SAlexandre Ghiti 		 * early_pg_dir, we need the physical address and when populating
112*e8a62cc2SAlexandre Ghiti 		 * swapper_pg_dir, we need the kernel virtual address so use
113*e8a62cc2SAlexandre Ghiti 		 * pt_ops facility.
114*e8a62cc2SAlexandre Ghiti 		 */
115*e8a62cc2SAlexandre Ghiti 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
116*e8a62cc2SAlexandre Ghiti 	} else {
117*e8a62cc2SAlexandre Ghiti 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
118*e8a62cc2SAlexandre Ghiti 		if (base_pud == lm_alias(kasan_early_shadow_pud))
119*e8a62cc2SAlexandre Ghiti 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
120*e8a62cc2SAlexandre Ghiti 	}
121*e8a62cc2SAlexandre Ghiti 
122*e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
123*e8a62cc2SAlexandre Ghiti 
124*e8a62cc2SAlexandre Ghiti 	do {
125*e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
126*e8a62cc2SAlexandre Ghiti 
127*e8a62cc2SAlexandre Ghiti 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
128*e8a62cc2SAlexandre Ghiti 			if (early) {
129*e8a62cc2SAlexandre Ghiti 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
130*e8a62cc2SAlexandre Ghiti 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
131*e8a62cc2SAlexandre Ghiti 				continue;
132*e8a62cc2SAlexandre Ghiti 			} else {
133*e8a62cc2SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
134*e8a62cc2SAlexandre Ghiti 				if (phys_addr) {
135*e8a62cc2SAlexandre Ghiti 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
136*e8a62cc2SAlexandre Ghiti 					continue;
137*e8a62cc2SAlexandre Ghiti 				}
138*e8a62cc2SAlexandre Ghiti 			}
139*e8a62cc2SAlexandre Ghiti 		}
140*e8a62cc2SAlexandre Ghiti 
141*e8a62cc2SAlexandre Ghiti 		kasan_populate_pmd(pudp, vaddr, next);
142*e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
143*e8a62cc2SAlexandre Ghiti 
144*e8a62cc2SAlexandre Ghiti 	/*
145*e8a62cc2SAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
146*e8a62cc2SAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
147*e8a62cc2SAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
148*e8a62cc2SAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
149*e8a62cc2SAlexandre Ghiti 	 */
150*e8a62cc2SAlexandre Ghiti 	if (!early)
151*e8a62cc2SAlexandre Ghiti 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
152*e8a62cc2SAlexandre Ghiti }
153*e8a62cc2SAlexandre Ghiti 
154*e8a62cc2SAlexandre Ghiti #define kasan_early_shadow_pgd_next			(pgtable_l4_enabled ?	\
155*e8a62cc2SAlexandre Ghiti 				(uintptr_t)kasan_early_shadow_pud :		\
156*e8a62cc2SAlexandre Ghiti 				(uintptr_t)kasan_early_shadow_pmd)
157*e8a62cc2SAlexandre Ghiti #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
158*e8a62cc2SAlexandre Ghiti 		(pgtable_l4_enabled ?						\
159*e8a62cc2SAlexandre Ghiti 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
160*e8a62cc2SAlexandre Ghiti 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
161*e8a62cc2SAlexandre Ghiti 
1622efad17eSAlexandre Ghiti static void __init kasan_populate_pgd(pgd_t *pgdp,
1632efad17eSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
1642efad17eSAlexandre Ghiti 				      bool early)
165d127c19cSAlexandre Ghiti {
166d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
167d127c19cSAlexandre Ghiti 	unsigned long next;
168d127c19cSAlexandre Ghiti 
169d127c19cSAlexandre Ghiti 	do {
170d127c19cSAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
171d7fbcf40SAlexandre Ghiti 
1722efad17eSAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
1732efad17eSAlexandre Ghiti 			if (early) {
1742efad17eSAlexandre Ghiti 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
1752efad17eSAlexandre Ghiti 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
1762efad17eSAlexandre Ghiti 				continue;
1772efad17eSAlexandre Ghiti 			} else if (pgd_page_vaddr(*pgdp) ==
1782efad17eSAlexandre Ghiti 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
179d7fbcf40SAlexandre Ghiti 				/*
1802efad17eSAlexandre Ghiti 				 * pgdp can't be none since kasan_early_init
1812efad17eSAlexandre Ghiti 				 * initialized all KASAN shadow region with
1822efad17eSAlexandre Ghiti 				 * kasan_early_shadow_pud: if this is still the
1832efad17eSAlexandre Ghiti 				 * case, that means we can try to allocate a
1842efad17eSAlexandre Ghiti 				 * hugepage as a replacement.
185d7fbcf40SAlexandre Ghiti 				 */
186d7fbcf40SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
187d7fbcf40SAlexandre Ghiti 				if (phys_addr) {
188d7fbcf40SAlexandre Ghiti 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
189d7fbcf40SAlexandre Ghiti 					continue;
190d7fbcf40SAlexandre Ghiti 				}
191d7fbcf40SAlexandre Ghiti 			}
1922efad17eSAlexandre Ghiti 		}
193d7fbcf40SAlexandre Ghiti 
194*e8a62cc2SAlexandre Ghiti 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
195d127c19cSAlexandre Ghiti 	} while (pgdp++, vaddr = next, vaddr != end);
196d127c19cSAlexandre Ghiti }
197d127c19cSAlexandre Ghiti 
1982efad17eSAlexandre Ghiti asmlinkage void __init kasan_early_init(void)
1992efad17eSAlexandre Ghiti {
2002efad17eSAlexandre Ghiti 	uintptr_t i;
2012efad17eSAlexandre Ghiti 
2022efad17eSAlexandre Ghiti 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
2032efad17eSAlexandre Ghiti 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
2042efad17eSAlexandre Ghiti 
2052efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PTE; ++i)
2062efad17eSAlexandre Ghiti 		set_pte(kasan_early_shadow_pte + i,
2072efad17eSAlexandre Ghiti 			mk_pte(virt_to_page(kasan_early_shadow_page),
2082efad17eSAlexandre Ghiti 			       PAGE_KERNEL));
2092efad17eSAlexandre Ghiti 
2102efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PMD; ++i)
2112efad17eSAlexandre Ghiti 		set_pmd(kasan_early_shadow_pmd + i,
2122efad17eSAlexandre Ghiti 			pfn_pmd(PFN_DOWN
2132efad17eSAlexandre Ghiti 				(__pa((uintptr_t)kasan_early_shadow_pte)),
2142efad17eSAlexandre Ghiti 				PAGE_TABLE));
2152efad17eSAlexandre Ghiti 
2162efad17eSAlexandre Ghiti 	if (pgtable_l4_enabled) {
2172efad17eSAlexandre Ghiti 		for (i = 0; i < PTRS_PER_PUD; ++i)
2182efad17eSAlexandre Ghiti 			set_pud(kasan_early_shadow_pud + i,
2192efad17eSAlexandre Ghiti 				pfn_pud(PFN_DOWN
2202efad17eSAlexandre Ghiti 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
2212efad17eSAlexandre Ghiti 					PAGE_TABLE));
2222efad17eSAlexandre Ghiti 	}
2232efad17eSAlexandre Ghiti 
2242efad17eSAlexandre Ghiti 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
2252efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
2262efad17eSAlexandre Ghiti 
2272efad17eSAlexandre Ghiti 	local_flush_tlb_all();
2282efad17eSAlexandre Ghiti }
2292efad17eSAlexandre Ghiti 
2302efad17eSAlexandre Ghiti void __init kasan_swapper_init(void)
2312efad17eSAlexandre Ghiti {
2322efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
2332efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
2342efad17eSAlexandre Ghiti 
2352efad17eSAlexandre Ghiti 	local_flush_tlb_all();
2362efad17eSAlexandre Ghiti }
2372efad17eSAlexandre Ghiti 
238d127c19cSAlexandre Ghiti static void __init kasan_populate(void *start, void *end)
239d127c19cSAlexandre Ghiti {
240d127c19cSAlexandre Ghiti 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
241d127c19cSAlexandre Ghiti 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
242d127c19cSAlexandre Ghiti 
2432efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
2448ad8b727SNick Hu 
2454cb699d0SVincent Chen 	local_flush_tlb_all();
2469484e2aeSAlexandre Ghiti 	memset(start, KASAN_SHADOW_INIT, end - start);
2478ad8b727SNick Hu }
2488ad8b727SNick Hu 
249*e8a62cc2SAlexandre Ghiti static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
250*e8a62cc2SAlexandre Ghiti 					      unsigned long vaddr, unsigned long end,
251*e8a62cc2SAlexandre Ghiti 					      bool kasan_populate)
252*e8a62cc2SAlexandre Ghiti {
253*e8a62cc2SAlexandre Ghiti 	unsigned long next;
254*e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
255*e8a62cc2SAlexandre Ghiti 	pmd_t *base_pmd;
256*e8a62cc2SAlexandre Ghiti 	bool is_kasan_pmd;
257*e8a62cc2SAlexandre Ghiti 
258*e8a62cc2SAlexandre Ghiti 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
259*e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
260*e8a62cc2SAlexandre Ghiti 
261*e8a62cc2SAlexandre Ghiti 	if (kasan_populate)
262*e8a62cc2SAlexandre Ghiti 		memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
263*e8a62cc2SAlexandre Ghiti 		       sizeof(pud_t) * PTRS_PER_PUD);
264*e8a62cc2SAlexandre Ghiti 
265*e8a62cc2SAlexandre Ghiti 	do {
266*e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
267*e8a62cc2SAlexandre Ghiti 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
268*e8a62cc2SAlexandre Ghiti 
269*e8a62cc2SAlexandre Ghiti 		if (is_kasan_pmd) {
270*e8a62cc2SAlexandre Ghiti 			base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
271*e8a62cc2SAlexandre Ghiti 			set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
272*e8a62cc2SAlexandre Ghiti 		}
273*e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
274*e8a62cc2SAlexandre Ghiti }
275*e8a62cc2SAlexandre Ghiti 
2762da073c1SAlexandre Ghiti static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
2772da073c1SAlexandre Ghiti {
2782da073c1SAlexandre Ghiti 	unsigned long next;
2792da073c1SAlexandre Ghiti 	void *p;
2802da073c1SAlexandre Ghiti 	pgd_t *pgd_k = pgd_offset_k(vaddr);
281*e8a62cc2SAlexandre Ghiti 	bool is_kasan_pgd_next;
2822da073c1SAlexandre Ghiti 
2832da073c1SAlexandre Ghiti 	do {
2842da073c1SAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
285*e8a62cc2SAlexandre Ghiti 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
286*e8a62cc2SAlexandre Ghiti 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
287*e8a62cc2SAlexandre Ghiti 
288*e8a62cc2SAlexandre Ghiti 		if (is_kasan_pgd_next) {
2892da073c1SAlexandre Ghiti 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
2902da073c1SAlexandre Ghiti 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
2912da073c1SAlexandre Ghiti 		}
292*e8a62cc2SAlexandre Ghiti 
293*e8a62cc2SAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
294*e8a62cc2SAlexandre Ghiti 			continue;
295*e8a62cc2SAlexandre Ghiti 
296*e8a62cc2SAlexandre Ghiti 		kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
2972da073c1SAlexandre Ghiti 	} while (pgd_k++, vaddr = next, vaddr != end);
2982da073c1SAlexandre Ghiti }
2992da073c1SAlexandre Ghiti 
30078947bdfSPalmer Dabbelt static void __init kasan_shallow_populate(void *start, void *end)
301e178d670SNylon Chen {
302e178d670SNylon Chen 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
303e178d670SNylon Chen 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
304e178d670SNylon Chen 
3052da073c1SAlexandre Ghiti 	kasan_shallow_populate_pgd(vaddr, vend);
306f3773dd0SAlexandre Ghiti 	local_flush_tlb_all();
3078ad8b727SNick Hu }
3088ad8b727SNick Hu 
3098ad8b727SNick Hu void __init kasan_init(void)
3108ad8b727SNick Hu {
311314b7817SJisheng Zhang 	phys_addr_t p_start, p_end;
312b10d6bcaSMike Rapoport 	u64 i;
3138ad8b727SNick Hu 
314e178d670SNylon Chen 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
315e178d670SNylon Chen 		kasan_shallow_populate(
316e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
317e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
3188ad8b727SNick Hu 
3192bfc6cd8SAlexandre Ghiti 	/* Populate the linear mapping */
320314b7817SJisheng Zhang 	for_each_mem_range(i, &p_start, &p_end) {
321314b7817SJisheng Zhang 		void *start = (void *)__va(p_start);
322314b7817SJisheng Zhang 		void *end = (void *)__va(p_end);
3238ad8b727SNick Hu 
3248ad8b727SNick Hu 		if (start >= end)
3258ad8b727SNick Hu 			break;
3268ad8b727SNick Hu 
327d127c19cSAlexandre Ghiti 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
3289d8c7d92SYang Li 	}
3298ad8b727SNick Hu 
3302bfc6cd8SAlexandre Ghiti 	/* Populate kernel, BPF, modules mapping */
3312bfc6cd8SAlexandre Ghiti 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
3323a02764cSJisheng Zhang 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
3332bfc6cd8SAlexandre Ghiti 
3348ad8b727SNick Hu 	for (i = 0; i < PTRS_PER_PTE; i++)
3358ad8b727SNick Hu 		set_pte(&kasan_early_shadow_pte[i],
3368ad8b727SNick Hu 			mk_pte(virt_to_page(kasan_early_shadow_page),
3378458ca14SZong Li 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
3388458ca14SZong Li 					_PAGE_ACCESSED)));
3398ad8b727SNick Hu 
3409484e2aeSAlexandre Ghiti 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
3418ad8b727SNick Hu 	init_task.kasan_depth = 0;
3428ad8b727SNick Hu }
343