xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision 913447d0)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22  * must be divided as follows:
23  * - the first PGD entry, although incomplete, is populated with
24  *   kasan_early_shadow_pud/p4d
25  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26  * - the last PGD entry is shared with the kernel mapping so populated at the
27  *   lower levels pud/p4d
28  *
29  * In addition, when shallow populating a kasan region (for example vmalloc),
30  * this region may also not be aligned on PGDIR size, so we must go down to the
31  * pud level too.
32  */
33 
34 extern pgd_t early_pg_dir[PTRS_PER_PGD];
35 
36 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
37 {
38 	phys_addr_t phys_addr;
39 	pte_t *ptep, *base_pte;
40 
41 	if (pmd_none(*pmd))
42 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43 	else
44 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
45 
46 	ptep = base_pte + pte_index(vaddr);
47 
48 	do {
49 		if (pte_none(*ptep)) {
50 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52 		}
53 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54 
55 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
56 }
57 
58 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59 {
60 	phys_addr_t phys_addr;
61 	pmd_t *pmdp, *base_pmd;
62 	unsigned long next;
63 
64 	if (pud_none(*pud)) {
65 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66 	} else {
67 		base_pmd = (pmd_t *)pud_pgtable(*pud);
68 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70 	}
71 
72 	pmdp = base_pmd + pmd_index(vaddr);
73 
74 	do {
75 		next = pmd_addr_end(vaddr, end);
76 
77 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79 			if (phys_addr) {
80 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81 				continue;
82 			}
83 		}
84 
85 		kasan_populate_pte(pmdp, vaddr, next);
86 	} while (pmdp++, vaddr = next, vaddr != end);
87 
88 	/*
89 	 * Wait for the whole PGD to be populated before setting the PGD in
90 	 * the page table, otherwise, if we did set the PGD before populating
91 	 * it entirely, memblock could allocate a page at a physical address
92 	 * where KASAN is not populated yet and then we'd get a page fault.
93 	 */
94 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95 }
96 
97 static void __init kasan_populate_pud(pgd_t *pgd,
98 				      unsigned long vaddr, unsigned long end,
99 				      bool early)
100 {
101 	phys_addr_t phys_addr;
102 	pud_t *pudp, *base_pud;
103 	unsigned long next;
104 
105 	if (early) {
106 		/*
107 		 * We can't use pgd_page_vaddr here as it would return a linear
108 		 * mapping address but it is not mapped yet, but when populating
109 		 * early_pg_dir, we need the physical address and when populating
110 		 * swapper_pg_dir, we need the kernel virtual address so use
111 		 * pt_ops facility.
112 		 */
113 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114 	} else if (pgd_none(*pgd)) {
115 		base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
116 		memcpy(base_pud, (void *)kasan_early_shadow_pud,
117 			sizeof(pud_t) * PTRS_PER_PUD);
118 	} else {
119 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
120 		if (base_pud == lm_alias(kasan_early_shadow_pud)) {
121 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
122 			memcpy(base_pud, (void *)kasan_early_shadow_pud,
123 			       sizeof(pud_t) * PTRS_PER_PUD);
124 		}
125 	}
126 
127 	pudp = base_pud + pud_index(vaddr);
128 
129 	do {
130 		next = pud_addr_end(vaddr, end);
131 
132 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
133 			if (early) {
134 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
135 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
136 				continue;
137 			} else {
138 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
139 				if (phys_addr) {
140 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
141 					continue;
142 				}
143 			}
144 		}
145 
146 		kasan_populate_pmd(pudp, vaddr, next);
147 	} while (pudp++, vaddr = next, vaddr != end);
148 
149 	/*
150 	 * Wait for the whole PGD to be populated before setting the PGD in
151 	 * the page table, otherwise, if we did set the PGD before populating
152 	 * it entirely, memblock could allocate a page at a physical address
153 	 * where KASAN is not populated yet and then we'd get a page fault.
154 	 */
155 	if (!early)
156 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
157 }
158 
159 static void __init kasan_populate_p4d(pgd_t *pgd,
160 				      unsigned long vaddr, unsigned long end,
161 				      bool early)
162 {
163 	phys_addr_t phys_addr;
164 	p4d_t *p4dp, *base_p4d;
165 	unsigned long next;
166 
167 	if (early) {
168 		/*
169 		 * We can't use pgd_page_vaddr here as it would return a linear
170 		 * mapping address but it is not mapped yet, but when populating
171 		 * early_pg_dir, we need the physical address and when populating
172 		 * swapper_pg_dir, we need the kernel virtual address so use
173 		 * pt_ops facility.
174 		 */
175 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
176 	} else {
177 		base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
178 		if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
179 			base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
180 			memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
181 				sizeof(p4d_t) * PTRS_PER_P4D);
182 		}
183 	}
184 
185 	p4dp = base_p4d + p4d_index(vaddr);
186 
187 	do {
188 		next = p4d_addr_end(vaddr, end);
189 
190 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
191 			if (early) {
192 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
193 				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
194 				continue;
195 			} else {
196 				phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
197 				if (phys_addr) {
198 					set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
199 					continue;
200 				}
201 			}
202 		}
203 
204 		kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
205 	} while (p4dp++, vaddr = next, vaddr != end);
206 
207 	/*
208 	 * Wait for the whole P4D to be populated before setting the P4D in
209 	 * the page table, otherwise, if we did set the P4D before populating
210 	 * it entirely, memblock could allocate a page at a physical address
211 	 * where KASAN is not populated yet and then we'd get a page fault.
212 	 */
213 	if (!early)
214 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
215 }
216 
217 #define kasan_early_shadow_pgd_next			(pgtable_l5_enabled ?	\
218 				(uintptr_t)kasan_early_shadow_p4d :		\
219 							(pgtable_l4_enabled ?	\
220 				(uintptr_t)kasan_early_shadow_pud :		\
221 				(uintptr_t)kasan_early_shadow_pmd))
222 #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
223 		(pgtable_l5_enabled ?						\
224 		kasan_populate_p4d(pgdp, vaddr, next, early) :			\
225 		(pgtable_l4_enabled ?						\
226 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
227 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
228 
229 static void __init kasan_populate_pgd(pgd_t *pgdp,
230 				      unsigned long vaddr, unsigned long end,
231 				      bool early)
232 {
233 	phys_addr_t phys_addr;
234 	unsigned long next;
235 
236 	do {
237 		next = pgd_addr_end(vaddr, end);
238 
239 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
240 			if (early) {
241 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
242 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
243 				continue;
244 			} else if (pgd_page_vaddr(*pgdp) ==
245 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
246 				/*
247 				 * pgdp can't be none since kasan_early_init
248 				 * initialized all KASAN shadow region with
249 				 * kasan_early_shadow_pud: if this is still the
250 				 * case, that means we can try to allocate a
251 				 * hugepage as a replacement.
252 				 */
253 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
254 				if (phys_addr) {
255 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
256 					continue;
257 				}
258 			}
259 		}
260 
261 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
262 	} while (pgdp++, vaddr = next, vaddr != end);
263 }
264 
265 asmlinkage void __init kasan_early_init(void)
266 {
267 	uintptr_t i;
268 
269 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
270 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
271 
272 	for (i = 0; i < PTRS_PER_PTE; ++i)
273 		set_pte(kasan_early_shadow_pte + i,
274 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
275 
276 	for (i = 0; i < PTRS_PER_PMD; ++i)
277 		set_pmd(kasan_early_shadow_pmd + i,
278 			pfn_pmd(PFN_DOWN
279 				(__pa((uintptr_t)kasan_early_shadow_pte)),
280 				PAGE_TABLE));
281 
282 	if (pgtable_l4_enabled) {
283 		for (i = 0; i < PTRS_PER_PUD; ++i)
284 			set_pud(kasan_early_shadow_pud + i,
285 				pfn_pud(PFN_DOWN
286 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
287 					PAGE_TABLE));
288 	}
289 
290 	if (pgtable_l5_enabled) {
291 		for (i = 0; i < PTRS_PER_P4D; ++i)
292 			set_p4d(kasan_early_shadow_p4d + i,
293 				pfn_p4d(PFN_DOWN
294 					(__pa(((uintptr_t)kasan_early_shadow_pud))),
295 					PAGE_TABLE));
296 	}
297 
298 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
299 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
300 
301 	local_flush_tlb_all();
302 }
303 
304 void __init kasan_swapper_init(void)
305 {
306 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
307 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
308 
309 	local_flush_tlb_all();
310 }
311 
312 static void __init kasan_populate(void *start, void *end)
313 {
314 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
315 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
316 
317 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
318 
319 	local_flush_tlb_all();
320 	memset(start, KASAN_SHADOW_INIT, end - start);
321 }
322 
323 static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
324 					      unsigned long vaddr, unsigned long end)
325 {
326 	unsigned long next;
327 	pmd_t *pmdp, *base_pmd;
328 	bool is_kasan_pte;
329 
330 	base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
331 	pmdp = base_pmd + pmd_index(vaddr);
332 
333 	do {
334 		next = pmd_addr_end(vaddr, end);
335 		is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
336 
337 		if (is_kasan_pte)
338 			pmd_clear(pmdp);
339 	} while (pmdp++, vaddr = next, vaddr != end);
340 }
341 
342 static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
343 					      unsigned long vaddr, unsigned long end)
344 {
345 	unsigned long next;
346 	pud_t *pudp, *base_pud;
347 	pmd_t *base_pmd;
348 	bool is_kasan_pmd;
349 
350 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
351 	pudp = base_pud + pud_index(vaddr);
352 
353 	do {
354 		next = pud_addr_end(vaddr, end);
355 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
356 
357 		if (!is_kasan_pmd)
358 			continue;
359 
360 		base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
361 		set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
362 
363 		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
364 			continue;
365 
366 		memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
367 		kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
368 	} while (pudp++, vaddr = next, vaddr != end);
369 }
370 
371 static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
372 					      unsigned long vaddr, unsigned long end)
373 {
374 	unsigned long next;
375 	p4d_t *p4dp, *base_p4d;
376 	pud_t *base_pud;
377 	bool is_kasan_pud;
378 
379 	base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
380 	p4dp = base_p4d + p4d_index(vaddr);
381 
382 	do {
383 		next = p4d_addr_end(vaddr, end);
384 		is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
385 
386 		if (!is_kasan_pud)
387 			continue;
388 
389 		base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
390 		set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
391 
392 		if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
393 			continue;
394 
395 		memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
396 		kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
397 	} while (p4dp++, vaddr = next, vaddr != end);
398 }
399 
400 #define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)			\
401 		(pgtable_l5_enabled ?						\
402 		kasan_shallow_populate_p4d(pgdp, vaddr, next) :			\
403 		(pgtable_l4_enabled ?						\
404 		kasan_shallow_populate_pud(pgdp, vaddr, next) :			\
405 		kasan_shallow_populate_pmd(pgdp, vaddr, next)))
406 
407 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
408 {
409 	unsigned long next;
410 	void *p;
411 	pgd_t *pgd_k = pgd_offset_k(vaddr);
412 	bool is_kasan_pgd_next;
413 
414 	do {
415 		next = pgd_addr_end(vaddr, end);
416 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
417 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
418 
419 		if (is_kasan_pgd_next) {
420 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
421 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
422 		}
423 
424 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
425 			continue;
426 
427 		memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
428 		kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
429 	} while (pgd_k++, vaddr = next, vaddr != end);
430 }
431 
432 static void __init kasan_shallow_populate(void *start, void *end)
433 {
434 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
435 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
436 
437 	kasan_shallow_populate_pgd(vaddr, vend);
438 	local_flush_tlb_all();
439 }
440 
441 void __init kasan_init(void)
442 {
443 	phys_addr_t p_start, p_end;
444 	u64 i;
445 
446 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
447 		kasan_shallow_populate(
448 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
449 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
450 
451 	/* Populate the linear mapping */
452 	for_each_mem_range(i, &p_start, &p_end) {
453 		void *start = (void *)__va(p_start);
454 		void *end = (void *)__va(p_end);
455 
456 		if (start >= end)
457 			break;
458 
459 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
460 	}
461 
462 	/* Populate kernel, BPF, modules mapping */
463 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
464 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
465 
466 	for (i = 0; i < PTRS_PER_PTE; i++)
467 		set_pte(&kasan_early_shadow_pte[i],
468 			mk_pte(virt_to_page(kasan_early_shadow_page),
469 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
470 					_PAGE_ACCESSED)));
471 
472 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
473 	init_task.kasan_depth = 0;
474 }
475