xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision a3d328037846d013bb4c7f3777241e190e4c75e1)
18ad8b727SNick Hu // SPDX-License-Identifier: GPL-2.0
28ad8b727SNick Hu // Copyright (C) 2019 Andes Technology Corporation
38ad8b727SNick Hu 
48ad8b727SNick Hu #include <linux/pfn.h>
58ad8b727SNick Hu #include <linux/init_task.h>
68ad8b727SNick Hu #include <linux/kasan.h>
78ad8b727SNick Hu #include <linux/kernel.h>
88ad8b727SNick Hu #include <linux/memblock.h>
9ca5999fdSMike Rapoport #include <linux/pgtable.h>
1065fddcfcSMike Rapoport #include <asm/tlbflush.h>
118ad8b727SNick Hu #include <asm/fixmap.h>
12e178d670SNylon Chen #include <asm/pgalloc.h>
13e178d670SNylon Chen 
14e8a62cc2SAlexandre Ghiti /*
15e8a62cc2SAlexandre Ghiti  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16e8a62cc2SAlexandre Ghiti  * which is right before the kernel.
17e8a62cc2SAlexandre Ghiti  *
18e8a62cc2SAlexandre Ghiti  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19e8a62cc2SAlexandre Ghiti  * the page global directory with kasan_early_shadow_pmd.
20e8a62cc2SAlexandre Ghiti  *
21e8a62cc2SAlexandre Ghiti  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22e8a62cc2SAlexandre Ghiti  * must be divided as follows:
23e8a62cc2SAlexandre Ghiti  * - the first PGD entry, although incomplete, is populated with
24e8a62cc2SAlexandre Ghiti  *   kasan_early_shadow_pud/p4d
25e8a62cc2SAlexandre Ghiti  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26e8a62cc2SAlexandre Ghiti  * - the last PGD entry is shared with the kernel mapping so populated at the
27e8a62cc2SAlexandre Ghiti  *   lower levels pud/p4d
28e8a62cc2SAlexandre Ghiti  *
29e8a62cc2SAlexandre Ghiti  * In addition, when shallow populating a kasan region (for example vmalloc),
30e8a62cc2SAlexandre Ghiti  * this region may also not be aligned on PGDIR size, so we must go down to the
31e8a62cc2SAlexandre Ghiti  * pud level too.
32e8a62cc2SAlexandre Ghiti  */
33e8a62cc2SAlexandre Ghiti 
348ad8b727SNick Hu extern pgd_t early_pg_dir[PTRS_PER_PGD];
358ad8b727SNick Hu 
361987501bSJisheng Zhang static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
378ad8b727SNick Hu {
38d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
39d127c19cSAlexandre Ghiti 	pte_t *ptep, *base_pte;
40a0a31fd8SZong Li 
41d127c19cSAlexandre Ghiti 	if (pmd_none(*pmd))
42d127c19cSAlexandre Ghiti 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43d127c19cSAlexandre Ghiti 	else
44d127c19cSAlexandre Ghiti 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
458ad8b727SNick Hu 
46d127c19cSAlexandre Ghiti 	ptep = base_pte + pte_index(vaddr);
47d127c19cSAlexandre Ghiti 
48d127c19cSAlexandre Ghiti 	do {
49d127c19cSAlexandre Ghiti 		if (pte_none(*ptep)) {
50d127c19cSAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51d127c19cSAlexandre Ghiti 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52d127c19cSAlexandre Ghiti 		}
53d127c19cSAlexandre Ghiti 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54d127c19cSAlexandre Ghiti 
55d127c19cSAlexandre Ghiti 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
568ad8b727SNick Hu }
578ad8b727SNick Hu 
58e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59d127c19cSAlexandre Ghiti {
60d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
61d127c19cSAlexandre Ghiti 	pmd_t *pmdp, *base_pmd;
62d127c19cSAlexandre Ghiti 	unsigned long next;
638ad8b727SNick Hu 
64e8a62cc2SAlexandre Ghiti 	if (pud_none(*pud)) {
65e8a62cc2SAlexandre Ghiti 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66e8a62cc2SAlexandre Ghiti 	} else {
67e8a62cc2SAlexandre Ghiti 		base_pmd = (pmd_t *)pud_pgtable(*pud);
68d127c19cSAlexandre Ghiti 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69d127c19cSAlexandre Ghiti 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70e8a62cc2SAlexandre Ghiti 	}
71d127c19cSAlexandre Ghiti 
72d127c19cSAlexandre Ghiti 	pmdp = base_pmd + pmd_index(vaddr);
73d127c19cSAlexandre Ghiti 
74d127c19cSAlexandre Ghiti 	do {
75d127c19cSAlexandre Ghiti 		next = pmd_addr_end(vaddr, end);
76d7fbcf40SAlexandre Ghiti 
77d7fbcf40SAlexandre Ghiti 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78d7fbcf40SAlexandre Ghiti 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79d7fbcf40SAlexandre Ghiti 			if (phys_addr) {
80d7fbcf40SAlexandre Ghiti 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81d7fbcf40SAlexandre Ghiti 				continue;
82d7fbcf40SAlexandre Ghiti 			}
83d7fbcf40SAlexandre Ghiti 		}
84d7fbcf40SAlexandre Ghiti 
85d127c19cSAlexandre Ghiti 		kasan_populate_pte(pmdp, vaddr, next);
86d127c19cSAlexandre Ghiti 	} while (pmdp++, vaddr = next, vaddr != end);
87d127c19cSAlexandre Ghiti 
88d127c19cSAlexandre Ghiti 	/*
89d127c19cSAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
90d127c19cSAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
91d127c19cSAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
92d127c19cSAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
93d127c19cSAlexandre Ghiti 	 */
94e8a62cc2SAlexandre Ghiti 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95d127c19cSAlexandre Ghiti }
96d127c19cSAlexandre Ghiti 
97e8a62cc2SAlexandre Ghiti static void __init kasan_populate_pud(pgd_t *pgd,
98e8a62cc2SAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
99e8a62cc2SAlexandre Ghiti 				      bool early)
100d127c19cSAlexandre Ghiti {
101d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
102e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
103e8a62cc2SAlexandre Ghiti 	unsigned long next;
104e8a62cc2SAlexandre Ghiti 
105e8a62cc2SAlexandre Ghiti 	if (early) {
106e8a62cc2SAlexandre Ghiti 		/*
107e8a62cc2SAlexandre Ghiti 		 * We can't use pgd_page_vaddr here as it would return a linear
108e8a62cc2SAlexandre Ghiti 		 * mapping address but it is not mapped yet, but when populating
109e8a62cc2SAlexandre Ghiti 		 * early_pg_dir, we need the physical address and when populating
110e8a62cc2SAlexandre Ghiti 		 * swapper_pg_dir, we need the kernel virtual address so use
111e8a62cc2SAlexandre Ghiti 		 * pt_ops facility.
112e8a62cc2SAlexandre Ghiti 		 */
113e8a62cc2SAlexandre Ghiti 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114e8a62cc2SAlexandre Ghiti 	} else {
115e8a62cc2SAlexandre Ghiti 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
116e8a62cc2SAlexandre Ghiti 		if (base_pud == lm_alias(kasan_early_shadow_pud))
117e8a62cc2SAlexandre Ghiti 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
118e8a62cc2SAlexandre Ghiti 	}
119e8a62cc2SAlexandre Ghiti 
120e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
121e8a62cc2SAlexandre Ghiti 
122e8a62cc2SAlexandre Ghiti 	do {
123e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
124e8a62cc2SAlexandre Ghiti 
125e8a62cc2SAlexandre Ghiti 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
126e8a62cc2SAlexandre Ghiti 			if (early) {
127e8a62cc2SAlexandre Ghiti 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
128e8a62cc2SAlexandre Ghiti 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
129e8a62cc2SAlexandre Ghiti 				continue;
130e8a62cc2SAlexandre Ghiti 			} else {
131e8a62cc2SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
132e8a62cc2SAlexandre Ghiti 				if (phys_addr) {
133e8a62cc2SAlexandre Ghiti 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
134e8a62cc2SAlexandre Ghiti 					continue;
135e8a62cc2SAlexandre Ghiti 				}
136e8a62cc2SAlexandre Ghiti 			}
137e8a62cc2SAlexandre Ghiti 		}
138e8a62cc2SAlexandre Ghiti 
139e8a62cc2SAlexandre Ghiti 		kasan_populate_pmd(pudp, vaddr, next);
140e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
141e8a62cc2SAlexandre Ghiti 
142e8a62cc2SAlexandre Ghiti 	/*
143e8a62cc2SAlexandre Ghiti 	 * Wait for the whole PGD to be populated before setting the PGD in
144e8a62cc2SAlexandre Ghiti 	 * the page table, otherwise, if we did set the PGD before populating
145e8a62cc2SAlexandre Ghiti 	 * it entirely, memblock could allocate a page at a physical address
146e8a62cc2SAlexandre Ghiti 	 * where KASAN is not populated yet and then we'd get a page fault.
147e8a62cc2SAlexandre Ghiti 	 */
148e8a62cc2SAlexandre Ghiti 	if (!early)
149e8a62cc2SAlexandre Ghiti 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
150e8a62cc2SAlexandre Ghiti }
151e8a62cc2SAlexandre Ghiti 
152e8a62cc2SAlexandre Ghiti #define kasan_early_shadow_pgd_next			(pgtable_l4_enabled ?	\
153e8a62cc2SAlexandre Ghiti 				(uintptr_t)kasan_early_shadow_pud :		\
154e8a62cc2SAlexandre Ghiti 				(uintptr_t)kasan_early_shadow_pmd)
155e8a62cc2SAlexandre Ghiti #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
156e8a62cc2SAlexandre Ghiti 		(pgtable_l4_enabled ?						\
157e8a62cc2SAlexandre Ghiti 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
158e8a62cc2SAlexandre Ghiti 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
159e8a62cc2SAlexandre Ghiti 
1602efad17eSAlexandre Ghiti static void __init kasan_populate_pgd(pgd_t *pgdp,
1612efad17eSAlexandre Ghiti 				      unsigned long vaddr, unsigned long end,
1622efad17eSAlexandre Ghiti 				      bool early)
163d127c19cSAlexandre Ghiti {
164d127c19cSAlexandre Ghiti 	phys_addr_t phys_addr;
165d127c19cSAlexandre Ghiti 	unsigned long next;
166d127c19cSAlexandre Ghiti 
167d127c19cSAlexandre Ghiti 	do {
168d127c19cSAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
169d7fbcf40SAlexandre Ghiti 
1702efad17eSAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
1712efad17eSAlexandre Ghiti 			if (early) {
1722efad17eSAlexandre Ghiti 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
1732efad17eSAlexandre Ghiti 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
1742efad17eSAlexandre Ghiti 				continue;
1752efad17eSAlexandre Ghiti 			} else if (pgd_page_vaddr(*pgdp) ==
1762efad17eSAlexandre Ghiti 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
177d7fbcf40SAlexandre Ghiti 				/*
1782efad17eSAlexandre Ghiti 				 * pgdp can't be none since kasan_early_init
1792efad17eSAlexandre Ghiti 				 * initialized all KASAN shadow region with
1802efad17eSAlexandre Ghiti 				 * kasan_early_shadow_pud: if this is still the
1812efad17eSAlexandre Ghiti 				 * case, that means we can try to allocate a
1822efad17eSAlexandre Ghiti 				 * hugepage as a replacement.
183d7fbcf40SAlexandre Ghiti 				 */
184d7fbcf40SAlexandre Ghiti 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
185d7fbcf40SAlexandre Ghiti 				if (phys_addr) {
186d7fbcf40SAlexandre Ghiti 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
187d7fbcf40SAlexandre Ghiti 					continue;
188d7fbcf40SAlexandre Ghiti 				}
189d7fbcf40SAlexandre Ghiti 			}
1902efad17eSAlexandre Ghiti 		}
191d7fbcf40SAlexandre Ghiti 
192e8a62cc2SAlexandre Ghiti 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
193d127c19cSAlexandre Ghiti 	} while (pgdp++, vaddr = next, vaddr != end);
194d127c19cSAlexandre Ghiti }
195d127c19cSAlexandre Ghiti 
1962efad17eSAlexandre Ghiti asmlinkage void __init kasan_early_init(void)
1972efad17eSAlexandre Ghiti {
1982efad17eSAlexandre Ghiti 	uintptr_t i;
1992efad17eSAlexandre Ghiti 
2002efad17eSAlexandre Ghiti 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
2012efad17eSAlexandre Ghiti 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
2022efad17eSAlexandre Ghiti 
2032efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PTE; ++i)
2042efad17eSAlexandre Ghiti 		set_pte(kasan_early_shadow_pte + i,
205*a3d32803SAlexandre Ghiti 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
2062efad17eSAlexandre Ghiti 
2072efad17eSAlexandre Ghiti 	for (i = 0; i < PTRS_PER_PMD; ++i)
2082efad17eSAlexandre Ghiti 		set_pmd(kasan_early_shadow_pmd + i,
2092efad17eSAlexandre Ghiti 			pfn_pmd(PFN_DOWN
2102efad17eSAlexandre Ghiti 				(__pa((uintptr_t)kasan_early_shadow_pte)),
2112efad17eSAlexandre Ghiti 				PAGE_TABLE));
2122efad17eSAlexandre Ghiti 
2132efad17eSAlexandre Ghiti 	if (pgtable_l4_enabled) {
2142efad17eSAlexandre Ghiti 		for (i = 0; i < PTRS_PER_PUD; ++i)
2152efad17eSAlexandre Ghiti 			set_pud(kasan_early_shadow_pud + i,
2162efad17eSAlexandre Ghiti 				pfn_pud(PFN_DOWN
2172efad17eSAlexandre Ghiti 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
2182efad17eSAlexandre Ghiti 					PAGE_TABLE));
2192efad17eSAlexandre Ghiti 	}
2202efad17eSAlexandre Ghiti 
2212efad17eSAlexandre Ghiti 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
2222efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
2232efad17eSAlexandre Ghiti 
2242efad17eSAlexandre Ghiti 	local_flush_tlb_all();
2252efad17eSAlexandre Ghiti }
2262efad17eSAlexandre Ghiti 
2272efad17eSAlexandre Ghiti void __init kasan_swapper_init(void)
2282efad17eSAlexandre Ghiti {
2292efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
2302efad17eSAlexandre Ghiti 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
2312efad17eSAlexandre Ghiti 
2322efad17eSAlexandre Ghiti 	local_flush_tlb_all();
2332efad17eSAlexandre Ghiti }
2342efad17eSAlexandre Ghiti 
235d127c19cSAlexandre Ghiti static void __init kasan_populate(void *start, void *end)
236d127c19cSAlexandre Ghiti {
237d127c19cSAlexandre Ghiti 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
238d127c19cSAlexandre Ghiti 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
239d127c19cSAlexandre Ghiti 
2402efad17eSAlexandre Ghiti 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
2418ad8b727SNick Hu 
2424cb699d0SVincent Chen 	local_flush_tlb_all();
2439484e2aeSAlexandre Ghiti 	memset(start, KASAN_SHADOW_INIT, end - start);
2448ad8b727SNick Hu }
2458ad8b727SNick Hu 
246e8a62cc2SAlexandre Ghiti static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
247e8a62cc2SAlexandre Ghiti 					      unsigned long vaddr, unsigned long end,
248e8a62cc2SAlexandre Ghiti 					      bool kasan_populate)
249e8a62cc2SAlexandre Ghiti {
250e8a62cc2SAlexandre Ghiti 	unsigned long next;
251e8a62cc2SAlexandre Ghiti 	pud_t *pudp, *base_pud;
252e8a62cc2SAlexandre Ghiti 	pmd_t *base_pmd;
253e8a62cc2SAlexandre Ghiti 	bool is_kasan_pmd;
254e8a62cc2SAlexandre Ghiti 
255e8a62cc2SAlexandre Ghiti 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
256e8a62cc2SAlexandre Ghiti 	pudp = base_pud + pud_index(vaddr);
257e8a62cc2SAlexandre Ghiti 
258e8a62cc2SAlexandre Ghiti 	if (kasan_populate)
259e8a62cc2SAlexandre Ghiti 		memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
260e8a62cc2SAlexandre Ghiti 		       sizeof(pud_t) * PTRS_PER_PUD);
261e8a62cc2SAlexandre Ghiti 
262e8a62cc2SAlexandre Ghiti 	do {
263e8a62cc2SAlexandre Ghiti 		next = pud_addr_end(vaddr, end);
264e8a62cc2SAlexandre Ghiti 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
265e8a62cc2SAlexandre Ghiti 
266e8a62cc2SAlexandre Ghiti 		if (is_kasan_pmd) {
267e8a62cc2SAlexandre Ghiti 			base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
268e8a62cc2SAlexandre Ghiti 			set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
269e8a62cc2SAlexandre Ghiti 		}
270e8a62cc2SAlexandre Ghiti 	} while (pudp++, vaddr = next, vaddr != end);
271e8a62cc2SAlexandre Ghiti }
272e8a62cc2SAlexandre Ghiti 
2732da073c1SAlexandre Ghiti static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
2742da073c1SAlexandre Ghiti {
2752da073c1SAlexandre Ghiti 	unsigned long next;
2762da073c1SAlexandre Ghiti 	void *p;
2772da073c1SAlexandre Ghiti 	pgd_t *pgd_k = pgd_offset_k(vaddr);
278e8a62cc2SAlexandre Ghiti 	bool is_kasan_pgd_next;
2792da073c1SAlexandre Ghiti 
2802da073c1SAlexandre Ghiti 	do {
2812da073c1SAlexandre Ghiti 		next = pgd_addr_end(vaddr, end);
282e8a62cc2SAlexandre Ghiti 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
283e8a62cc2SAlexandre Ghiti 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
284e8a62cc2SAlexandre Ghiti 
285e8a62cc2SAlexandre Ghiti 		if (is_kasan_pgd_next) {
2862da073c1SAlexandre Ghiti 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
2872da073c1SAlexandre Ghiti 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
2882da073c1SAlexandre Ghiti 		}
289e8a62cc2SAlexandre Ghiti 
290e8a62cc2SAlexandre Ghiti 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
291e8a62cc2SAlexandre Ghiti 			continue;
292e8a62cc2SAlexandre Ghiti 
293e8a62cc2SAlexandre Ghiti 		kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
2942da073c1SAlexandre Ghiti 	} while (pgd_k++, vaddr = next, vaddr != end);
2952da073c1SAlexandre Ghiti }
2962da073c1SAlexandre Ghiti 
29778947bdfSPalmer Dabbelt static void __init kasan_shallow_populate(void *start, void *end)
298e178d670SNylon Chen {
299e178d670SNylon Chen 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
300e178d670SNylon Chen 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
301e178d670SNylon Chen 
3022da073c1SAlexandre Ghiti 	kasan_shallow_populate_pgd(vaddr, vend);
303f3773dd0SAlexandre Ghiti 	local_flush_tlb_all();
3048ad8b727SNick Hu }
3058ad8b727SNick Hu 
3068ad8b727SNick Hu void __init kasan_init(void)
3078ad8b727SNick Hu {
308314b7817SJisheng Zhang 	phys_addr_t p_start, p_end;
309b10d6bcaSMike Rapoport 	u64 i;
3108ad8b727SNick Hu 
311e178d670SNylon Chen 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
312e178d670SNylon Chen 		kasan_shallow_populate(
313e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
314e178d670SNylon Chen 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
3158ad8b727SNick Hu 
3162bfc6cd8SAlexandre Ghiti 	/* Populate the linear mapping */
317314b7817SJisheng Zhang 	for_each_mem_range(i, &p_start, &p_end) {
318314b7817SJisheng Zhang 		void *start = (void *)__va(p_start);
319314b7817SJisheng Zhang 		void *end = (void *)__va(p_end);
3208ad8b727SNick Hu 
3218ad8b727SNick Hu 		if (start >= end)
3228ad8b727SNick Hu 			break;
3238ad8b727SNick Hu 
324d127c19cSAlexandre Ghiti 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
3259d8c7d92SYang Li 	}
3268ad8b727SNick Hu 
3272bfc6cd8SAlexandre Ghiti 	/* Populate kernel, BPF, modules mapping */
3282bfc6cd8SAlexandre Ghiti 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
3293a02764cSJisheng Zhang 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
3302bfc6cd8SAlexandre Ghiti 
3318ad8b727SNick Hu 	for (i = 0; i < PTRS_PER_PTE; i++)
3328ad8b727SNick Hu 		set_pte(&kasan_early_shadow_pte[i],
3338ad8b727SNick Hu 			mk_pte(virt_to_page(kasan_early_shadow_page),
3348458ca14SZong Li 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
3358458ca14SZong Li 					_PAGE_ACCESSED)));
3368ad8b727SNick Hu 
3379484e2aeSAlexandre Ghiti 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
3388ad8b727SNick Hu 	init_task.kasan_depth = 0;
3398ad8b727SNick Hu }
340