xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision 0e6774ec)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
22  * region is not and then we have to go down to the PUD level.
23  */
24 
25 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
26 static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
27 static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
28 
29 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
30 {
31 	phys_addr_t phys_addr;
32 	pte_t *ptep, *p;
33 
34 	if (pmd_none(pmdp_get(pmd))) {
35 		p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
36 		set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
37 	}
38 
39 	ptep = pte_offset_kernel(pmd, vaddr);
40 
41 	do {
42 		if (pte_none(ptep_get(ptep))) {
43 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
44 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
45 			memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
46 		}
47 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
48 }
49 
50 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
51 {
52 	phys_addr_t phys_addr;
53 	pmd_t *pmdp, *p;
54 	unsigned long next;
55 
56 	if (pud_none(pudp_get(pud))) {
57 		p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
58 		set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
59 	}
60 
61 	pmdp = pmd_offset(pud, vaddr);
62 
63 	do {
64 		next = pmd_addr_end(vaddr, end);
65 
66 		if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
67 		    (next - vaddr) >= PMD_SIZE) {
68 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
69 			if (phys_addr) {
70 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
71 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
72 				continue;
73 			}
74 		}
75 
76 		kasan_populate_pte(pmdp, vaddr, next);
77 	} while (pmdp++, vaddr = next, vaddr != end);
78 }
79 
80 static void __init kasan_populate_pud(p4d_t *p4d,
81 				      unsigned long vaddr, unsigned long end)
82 {
83 	phys_addr_t phys_addr;
84 	pud_t *pudp, *p;
85 	unsigned long next;
86 
87 	if (p4d_none(p4dp_get(p4d))) {
88 		p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
89 		set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
90 	}
91 
92 	pudp = pud_offset(p4d, vaddr);
93 
94 	do {
95 		next = pud_addr_end(vaddr, end);
96 
97 		if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
98 		    (next - vaddr) >= PUD_SIZE) {
99 			phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
100 			if (phys_addr) {
101 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
102 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
103 				continue;
104 			}
105 		}
106 
107 		kasan_populate_pmd(pudp, vaddr, next);
108 	} while (pudp++, vaddr = next, vaddr != end);
109 }
110 
111 static void __init kasan_populate_p4d(pgd_t *pgd,
112 				      unsigned long vaddr, unsigned long end)
113 {
114 	phys_addr_t phys_addr;
115 	p4d_t *p4dp, *p;
116 	unsigned long next;
117 
118 	if (pgd_none(pgdp_get(pgd))) {
119 		p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
120 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
121 	}
122 
123 	p4dp = p4d_offset(pgd, vaddr);
124 
125 	do {
126 		next = p4d_addr_end(vaddr, end);
127 
128 		if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
129 		    (next - vaddr) >= P4D_SIZE) {
130 			phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
131 			if (phys_addr) {
132 				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
133 				memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
134 				continue;
135 			}
136 		}
137 
138 		kasan_populate_pud(p4dp, vaddr, next);
139 	} while (p4dp++, vaddr = next, vaddr != end);
140 }
141 
142 static void __init kasan_populate_pgd(pgd_t *pgdp,
143 				      unsigned long vaddr, unsigned long end)
144 {
145 	phys_addr_t phys_addr;
146 	unsigned long next;
147 
148 	do {
149 		next = pgd_addr_end(vaddr, end);
150 
151 		if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
152 		    (next - vaddr) >= PGDIR_SIZE) {
153 			phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
154 			if (phys_addr) {
155 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
156 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
157 				continue;
158 			}
159 		}
160 
161 		kasan_populate_p4d(pgdp, vaddr, next);
162 	} while (pgdp++, vaddr = next, vaddr != end);
163 }
164 
165 static void __init kasan_early_clear_pud(p4d_t *p4dp,
166 					 unsigned long vaddr, unsigned long end)
167 {
168 	pud_t *pudp, *base_pud;
169 	unsigned long next;
170 
171 	if (!pgtable_l4_enabled) {
172 		pudp = (pud_t *)p4dp;
173 	} else {
174 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
175 		pudp = base_pud + pud_index(vaddr);
176 	}
177 
178 	do {
179 		next = pud_addr_end(vaddr, end);
180 
181 		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
182 			pud_clear(pudp);
183 			continue;
184 		}
185 
186 		BUG();
187 	} while (pudp++, vaddr = next, vaddr != end);
188 }
189 
190 static void __init kasan_early_clear_p4d(pgd_t *pgdp,
191 					 unsigned long vaddr, unsigned long end)
192 {
193 	p4d_t *p4dp, *base_p4d;
194 	unsigned long next;
195 
196 	if (!pgtable_l5_enabled) {
197 		p4dp = (p4d_t *)pgdp;
198 	} else {
199 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
200 		p4dp = base_p4d + p4d_index(vaddr);
201 	}
202 
203 	do {
204 		next = p4d_addr_end(vaddr, end);
205 
206 		if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
207 		    (next - vaddr) >= P4D_SIZE) {
208 			p4d_clear(p4dp);
209 			continue;
210 		}
211 
212 		kasan_early_clear_pud(p4dp, vaddr, next);
213 	} while (p4dp++, vaddr = next, vaddr != end);
214 }
215 
216 static void __init kasan_early_clear_pgd(pgd_t *pgdp,
217 					 unsigned long vaddr, unsigned long end)
218 {
219 	unsigned long next;
220 
221 	do {
222 		next = pgd_addr_end(vaddr, end);
223 
224 		if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
225 		    (next - vaddr) >= PGDIR_SIZE) {
226 			pgd_clear(pgdp);
227 			continue;
228 		}
229 
230 		kasan_early_clear_p4d(pgdp, vaddr, next);
231 	} while (pgdp++, vaddr = next, vaddr != end);
232 }
233 
234 static void __init kasan_early_populate_pud(p4d_t *p4dp,
235 					    unsigned long vaddr,
236 					    unsigned long end)
237 {
238 	pud_t *pudp, *base_pud;
239 	phys_addr_t phys_addr;
240 	unsigned long next;
241 
242 	if (!pgtable_l4_enabled) {
243 		pudp = (pud_t *)p4dp;
244 	} else {
245 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
246 		pudp = base_pud + pud_index(vaddr);
247 	}
248 
249 	do {
250 		next = pud_addr_end(vaddr, end);
251 
252 		if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
253 		    (next - vaddr) >= PUD_SIZE) {
254 			phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
255 			set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
256 			continue;
257 		}
258 
259 		BUG();
260 	} while (pudp++, vaddr = next, vaddr != end);
261 }
262 
263 static void __init kasan_early_populate_p4d(pgd_t *pgdp,
264 					    unsigned long vaddr,
265 					    unsigned long end)
266 {
267 	p4d_t *p4dp, *base_p4d;
268 	phys_addr_t phys_addr;
269 	unsigned long next;
270 
271 	/*
272 	 * We can't use pgd_page_vaddr here as it would return a linear
273 	 * mapping address but it is not mapped yet, but when populating
274 	 * early_pg_dir, we need the physical address and when populating
275 	 * swapper_pg_dir, we need the kernel virtual address so use
276 	 * pt_ops facility.
277 	 * Note that this test is then completely equivalent to
278 	 * p4dp = p4d_offset(pgdp, vaddr)
279 	 */
280 	if (!pgtable_l5_enabled) {
281 		p4dp = (p4d_t *)pgdp;
282 	} else {
283 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
284 		p4dp = base_p4d + p4d_index(vaddr);
285 	}
286 
287 	do {
288 		next = p4d_addr_end(vaddr, end);
289 
290 		if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
291 		    (next - vaddr) >= P4D_SIZE) {
292 			phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
293 			set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
294 			continue;
295 		}
296 
297 		kasan_early_populate_pud(p4dp, vaddr, next);
298 	} while (p4dp++, vaddr = next, vaddr != end);
299 }
300 
301 static void __init kasan_early_populate_pgd(pgd_t *pgdp,
302 					    unsigned long vaddr,
303 					    unsigned long end)
304 {
305 	phys_addr_t phys_addr;
306 	unsigned long next;
307 
308 	do {
309 		next = pgd_addr_end(vaddr, end);
310 
311 		if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
312 		    (next - vaddr) >= PGDIR_SIZE) {
313 			phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
314 			set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
315 			continue;
316 		}
317 
318 		kasan_early_populate_p4d(pgdp, vaddr, next);
319 	} while (pgdp++, vaddr = next, vaddr != end);
320 }
321 
322 asmlinkage void __init kasan_early_init(void)
323 {
324 	uintptr_t i;
325 
326 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
327 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
328 
329 	for (i = 0; i < PTRS_PER_PTE; ++i)
330 		set_pte(kasan_early_shadow_pte + i,
331 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
332 
333 	for (i = 0; i < PTRS_PER_PMD; ++i)
334 		set_pmd(kasan_early_shadow_pmd + i,
335 			pfn_pmd(PFN_DOWN
336 				(__pa((uintptr_t)kasan_early_shadow_pte)),
337 				PAGE_TABLE));
338 
339 	if (pgtable_l4_enabled) {
340 		for (i = 0; i < PTRS_PER_PUD; ++i)
341 			set_pud(kasan_early_shadow_pud + i,
342 				pfn_pud(PFN_DOWN
343 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
344 					PAGE_TABLE));
345 	}
346 
347 	if (pgtable_l5_enabled) {
348 		for (i = 0; i < PTRS_PER_P4D; ++i)
349 			set_p4d(kasan_early_shadow_p4d + i,
350 				pfn_p4d(PFN_DOWN
351 					(__pa(((uintptr_t)kasan_early_shadow_pud))),
352 					PAGE_TABLE));
353 	}
354 
355 	kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
356 				 KASAN_SHADOW_START, KASAN_SHADOW_END);
357 
358 	local_flush_tlb_all();
359 }
360 
361 void __init kasan_swapper_init(void)
362 {
363 	kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
364 				 KASAN_SHADOW_START, KASAN_SHADOW_END);
365 
366 	local_flush_tlb_all();
367 }
368 
369 static void __init kasan_populate(void *start, void *end)
370 {
371 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
372 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
373 
374 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
375 }
376 
377 static void __init kasan_shallow_populate_pud(p4d_t *p4d,
378 					      unsigned long vaddr, unsigned long end)
379 {
380 	unsigned long next;
381 	void *p;
382 	pud_t *pud_k = pud_offset(p4d, vaddr);
383 
384 	do {
385 		next = pud_addr_end(vaddr, end);
386 
387 		if (pud_none(pudp_get(pud_k))) {
388 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
389 			set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
390 			continue;
391 		}
392 
393 		BUG();
394 	} while (pud_k++, vaddr = next, vaddr != end);
395 }
396 
397 static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
398 					      unsigned long vaddr, unsigned long end)
399 {
400 	unsigned long next;
401 	void *p;
402 	p4d_t *p4d_k = p4d_offset(pgd, vaddr);
403 
404 	do {
405 		next = p4d_addr_end(vaddr, end);
406 
407 		if (p4d_none(p4dp_get(p4d_k))) {
408 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
409 			set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
410 			continue;
411 		}
412 
413 		kasan_shallow_populate_pud(p4d_k, vaddr, end);
414 	} while (p4d_k++, vaddr = next, vaddr != end);
415 }
416 
417 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
418 {
419 	unsigned long next;
420 	void *p;
421 	pgd_t *pgd_k = pgd_offset_k(vaddr);
422 
423 	do {
424 		next = pgd_addr_end(vaddr, end);
425 
426 		if (pgd_none(pgdp_get(pgd_k))) {
427 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
428 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
429 			continue;
430 		}
431 
432 		kasan_shallow_populate_p4d(pgd_k, vaddr, next);
433 	} while (pgd_k++, vaddr = next, vaddr != end);
434 }
435 
436 static void __init kasan_shallow_populate(void *start, void *end)
437 {
438 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
439 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
440 
441 	kasan_shallow_populate_pgd(vaddr, vend);
442 }
443 
444 static void __init create_tmp_mapping(void)
445 {
446 	void *ptr;
447 	p4d_t *base_p4d;
448 
449 	/*
450 	 * We need to clean the early mapping: this is hard to achieve "in-place",
451 	 * so install a temporary mapping like arm64 and x86 do.
452 	 */
453 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);
454 
455 	/* Copy the last p4d since it is shared with the kernel mapping. */
456 	if (pgtable_l5_enabled) {
457 		ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
458 		memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
459 		set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
460 			pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
461 		base_p4d = tmp_p4d;
462 	} else {
463 		base_p4d = (p4d_t *)tmp_pg_dir;
464 	}
465 
466 	/* Copy the last pud since it is shared with the kernel mapping. */
467 	if (pgtable_l4_enabled) {
468 		ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
469 		memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
470 		set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
471 			pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
472 	}
473 }
474 
475 void __init kasan_init(void)
476 {
477 	phys_addr_t p_start, p_end;
478 	u64 i;
479 
480 	create_tmp_mapping();
481 	csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
482 
483 	kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
484 			      KASAN_SHADOW_START, KASAN_SHADOW_END);
485 
486 	kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
487 				    (void *)kasan_mem_to_shadow((void *)VMALLOC_START));
488 
489 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
490 		kasan_shallow_populate(
491 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
492 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
493 		/* Shallow populate modules and BPF which are vmalloc-allocated */
494 		kasan_shallow_populate(
495 			(void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
496 			(void *)kasan_mem_to_shadow((void *)MODULES_END));
497 	} else {
498 		kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
499 					    (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
500 	}
501 
502 	/* Populate the linear mapping */
503 	for_each_mem_range(i, &p_start, &p_end) {
504 		void *start = (void *)__va(p_start);
505 		void *end = (void *)__va(p_end);
506 
507 		if (start >= end)
508 			break;
509 
510 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
511 	}
512 
513 	/* Populate kernel */
514 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
515 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
516 
517 	for (i = 0; i < PTRS_PER_PTE; i++)
518 		set_pte(&kasan_early_shadow_pte[i],
519 			mk_pte(virt_to_page(kasan_early_shadow_page),
520 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
521 					_PAGE_ACCESSED)));
522 
523 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
524 	init_task.kasan_depth = 0;
525 
526 	csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
527 	local_flush_tlb_all();
528 }
529