xref: /openbmc/linux/mm/kasan/init.c (revision b938fcf42739de8270e6ea41593722929c8a7dd0)
1 /*
2  * This file contains some kasan initialization code.
3  *
4  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 
13 #include <linux/memblock.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/pfn.h>
19 #include <linux/slab.h>
20 
21 #include <asm/page.h>
22 #include <asm/pgalloc.h>
23 
24 #include "kasan.h"
25 
26 /*
27  * This page serves two purposes:
28  *   - It used as early shadow memory. The entire shadow region populated
29  *     with this page, before we will be able to setup normal shadow memory.
30  *   - Latter it reused it as zero shadow to cover large ranges of memory
31  *     that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
32  */
33 unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
34 
35 #if CONFIG_PGTABLE_LEVELS > 4
36 p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
37 static inline bool kasan_p4d_table(pgd_t pgd)
38 {
39 	return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d));
40 }
41 #else
42 static inline bool kasan_p4d_table(pgd_t pgd)
43 {
44 	return 0;
45 }
46 #endif
47 #if CONFIG_PGTABLE_LEVELS > 3
48 pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
49 static inline bool kasan_pud_table(p4d_t p4d)
50 {
51 	return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud));
52 }
53 #else
54 static inline bool kasan_pud_table(p4d_t p4d)
55 {
56 	return 0;
57 }
58 #endif
59 #if CONFIG_PGTABLE_LEVELS > 2
60 pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
61 static inline bool kasan_pmd_table(pud_t pud)
62 {
63 	return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd));
64 }
65 #else
66 static inline bool kasan_pmd_table(pud_t pud)
67 {
68 	return 0;
69 }
70 #endif
71 pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
72 
73 static inline bool kasan_pte_table(pmd_t pmd)
74 {
75 	return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte));
76 }
77 
78 static inline bool kasan_zero_page_entry(pte_t pte)
79 {
80 	return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page));
81 }
82 
83 static __init void *early_alloc(size_t size, int node)
84 {
85 	return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
86 					MEMBLOCK_ALLOC_ACCESSIBLE, node);
87 }
88 
89 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
90 				unsigned long end)
91 {
92 	pte_t *pte = pte_offset_kernel(pmd, addr);
93 	pte_t zero_pte;
94 
95 	zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
96 	zero_pte = pte_wrprotect(zero_pte);
97 
98 	while (addr + PAGE_SIZE <= end) {
99 		set_pte_at(&init_mm, addr, pte, zero_pte);
100 		addr += PAGE_SIZE;
101 		pte = pte_offset_kernel(pmd, addr);
102 	}
103 }
104 
105 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
106 				unsigned long end)
107 {
108 	pmd_t *pmd = pmd_offset(pud, addr);
109 	unsigned long next;
110 
111 	do {
112 		next = pmd_addr_end(addr, end);
113 
114 		if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
115 			pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
116 			continue;
117 		}
118 
119 		if (pmd_none(*pmd)) {
120 			pte_t *p;
121 
122 			if (slab_is_available())
123 				p = pte_alloc_one_kernel(&init_mm, addr);
124 			else
125 				p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
126 			if (!p)
127 				return -ENOMEM;
128 
129 			pmd_populate_kernel(&init_mm, pmd, p);
130 		}
131 		zero_pte_populate(pmd, addr, next);
132 	} while (pmd++, addr = next, addr != end);
133 
134 	return 0;
135 }
136 
137 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
138 				unsigned long end)
139 {
140 	pud_t *pud = pud_offset(p4d, addr);
141 	unsigned long next;
142 
143 	do {
144 		next = pud_addr_end(addr, end);
145 		if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
146 			pmd_t *pmd;
147 
148 			pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
149 			pmd = pmd_offset(pud, addr);
150 			pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
151 			continue;
152 		}
153 
154 		if (pud_none(*pud)) {
155 			pmd_t *p;
156 
157 			if (slab_is_available()) {
158 				p = pmd_alloc(&init_mm, pud, addr);
159 				if (!p)
160 					return -ENOMEM;
161 			} else {
162 				pud_populate(&init_mm, pud,
163 					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
164 			}
165 		}
166 		zero_pmd_populate(pud, addr, next);
167 	} while (pud++, addr = next, addr != end);
168 
169 	return 0;
170 }
171 
172 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
173 				unsigned long end)
174 {
175 	p4d_t *p4d = p4d_offset(pgd, addr);
176 	unsigned long next;
177 
178 	do {
179 		next = p4d_addr_end(addr, end);
180 		if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
181 			pud_t *pud;
182 			pmd_t *pmd;
183 
184 			p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
185 			pud = pud_offset(p4d, addr);
186 			pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
187 			pmd = pmd_offset(pud, addr);
188 			pmd_populate_kernel(&init_mm, pmd,
189 						lm_alias(kasan_zero_pte));
190 			continue;
191 		}
192 
193 		if (p4d_none(*p4d)) {
194 			pud_t *p;
195 
196 			if (slab_is_available()) {
197 				p = pud_alloc(&init_mm, p4d, addr);
198 				if (!p)
199 					return -ENOMEM;
200 			} else {
201 				p4d_populate(&init_mm, p4d,
202 					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
203 			}
204 		}
205 		zero_pud_populate(p4d, addr, next);
206 	} while (p4d++, addr = next, addr != end);
207 
208 	return 0;
209 }
210 
211 /**
212  * kasan_populate_zero_shadow - populate shadow memory region with
213  *                               kasan_zero_page
214  * @shadow_start - start of the memory range to populate
215  * @shadow_end   - end of the memory range to populate
216  */
217 int __ref kasan_populate_zero_shadow(const void *shadow_start,
218 				const void *shadow_end)
219 {
220 	unsigned long addr = (unsigned long)shadow_start;
221 	unsigned long end = (unsigned long)shadow_end;
222 	pgd_t *pgd = pgd_offset_k(addr);
223 	unsigned long next;
224 
225 	do {
226 		next = pgd_addr_end(addr, end);
227 
228 		if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
229 			p4d_t *p4d;
230 			pud_t *pud;
231 			pmd_t *pmd;
232 
233 			/*
234 			 * kasan_zero_pud should be populated with pmds
235 			 * at this moment.
236 			 * [pud,pmd]_populate*() below needed only for
237 			 * 3,2 - level page tables where we don't have
238 			 * puds,pmds, so pgd_populate(), pud_populate()
239 			 * is noops.
240 			 *
241 			 * The ifndef is required to avoid build breakage.
242 			 *
243 			 * With 5level-fixup.h, pgd_populate() is not nop and
244 			 * we reference kasan_zero_p4d. It's not defined
245 			 * unless 5-level paging enabled.
246 			 *
247 			 * The ifndef can be dropped once all KASAN-enabled
248 			 * architectures will switch to pgtable-nop4d.h.
249 			 */
250 #ifndef __ARCH_HAS_5LEVEL_HACK
251 			pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
252 #endif
253 			p4d = p4d_offset(pgd, addr);
254 			p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
255 			pud = pud_offset(p4d, addr);
256 			pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
257 			pmd = pmd_offset(pud, addr);
258 			pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
259 			continue;
260 		}
261 
262 		if (pgd_none(*pgd)) {
263 			p4d_t *p;
264 
265 			if (slab_is_available()) {
266 				p = p4d_alloc(&init_mm, pgd, addr);
267 				if (!p)
268 					return -ENOMEM;
269 			} else {
270 				pgd_populate(&init_mm, pgd,
271 					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
272 			}
273 		}
274 		zero_p4d_populate(pgd, addr, next);
275 	} while (pgd++, addr = next, addr != end);
276 
277 	return 0;
278 }
279 
280 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
281 {
282 	pte_t *pte;
283 	int i;
284 
285 	for (i = 0; i < PTRS_PER_PTE; i++) {
286 		pte = pte_start + i;
287 		if (!pte_none(*pte))
288 			return;
289 	}
290 
291 	pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
292 	pmd_clear(pmd);
293 }
294 
295 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
296 {
297 	pmd_t *pmd;
298 	int i;
299 
300 	for (i = 0; i < PTRS_PER_PMD; i++) {
301 		pmd = pmd_start + i;
302 		if (!pmd_none(*pmd))
303 			return;
304 	}
305 
306 	pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
307 	pud_clear(pud);
308 }
309 
310 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
311 {
312 	pud_t *pud;
313 	int i;
314 
315 	for (i = 0; i < PTRS_PER_PUD; i++) {
316 		pud = pud_start + i;
317 		if (!pud_none(*pud))
318 			return;
319 	}
320 
321 	pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
322 	p4d_clear(p4d);
323 }
324 
325 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
326 {
327 	p4d_t *p4d;
328 	int i;
329 
330 	for (i = 0; i < PTRS_PER_P4D; i++) {
331 		p4d = p4d_start + i;
332 		if (!p4d_none(*p4d))
333 			return;
334 	}
335 
336 	p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
337 	pgd_clear(pgd);
338 }
339 
340 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
341 				unsigned long end)
342 {
343 	unsigned long next;
344 
345 	for (; addr < end; addr = next, pte++) {
346 		next = (addr + PAGE_SIZE) & PAGE_MASK;
347 		if (next > end)
348 			next = end;
349 
350 		if (!pte_present(*pte))
351 			continue;
352 
353 		if (WARN_ON(!kasan_zero_page_entry(*pte)))
354 			continue;
355 		pte_clear(&init_mm, addr, pte);
356 	}
357 }
358 
359 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
360 				unsigned long end)
361 {
362 	unsigned long next;
363 
364 	for (; addr < end; addr = next, pmd++) {
365 		pte_t *pte;
366 
367 		next = pmd_addr_end(addr, end);
368 
369 		if (!pmd_present(*pmd))
370 			continue;
371 
372 		if (kasan_pte_table(*pmd)) {
373 			if (IS_ALIGNED(addr, PMD_SIZE) &&
374 			    IS_ALIGNED(next, PMD_SIZE))
375 				pmd_clear(pmd);
376 			continue;
377 		}
378 		pte = pte_offset_kernel(pmd, addr);
379 		kasan_remove_pte_table(pte, addr, next);
380 		kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
381 	}
382 }
383 
384 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
385 				unsigned long end)
386 {
387 	unsigned long next;
388 
389 	for (; addr < end; addr = next, pud++) {
390 		pmd_t *pmd, *pmd_base;
391 
392 		next = pud_addr_end(addr, end);
393 
394 		if (!pud_present(*pud))
395 			continue;
396 
397 		if (kasan_pmd_table(*pud)) {
398 			if (IS_ALIGNED(addr, PUD_SIZE) &&
399 			    IS_ALIGNED(next, PUD_SIZE))
400 				pud_clear(pud);
401 			continue;
402 		}
403 		pmd = pmd_offset(pud, addr);
404 		pmd_base = pmd_offset(pud, 0);
405 		kasan_remove_pmd_table(pmd, addr, next);
406 		kasan_free_pmd(pmd_base, pud);
407 	}
408 }
409 
410 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
411 				unsigned long end)
412 {
413 	unsigned long next;
414 
415 	for (; addr < end; addr = next, p4d++) {
416 		pud_t *pud;
417 
418 		next = p4d_addr_end(addr, end);
419 
420 		if (!p4d_present(*p4d))
421 			continue;
422 
423 		if (kasan_pud_table(*p4d)) {
424 			if (IS_ALIGNED(addr, P4D_SIZE) &&
425 			    IS_ALIGNED(next, P4D_SIZE))
426 				p4d_clear(p4d);
427 			continue;
428 		}
429 		pud = pud_offset(p4d, addr);
430 		kasan_remove_pud_table(pud, addr, next);
431 		kasan_free_pud(pud_offset(p4d, 0), p4d);
432 	}
433 }
434 
435 void kasan_remove_zero_shadow(void *start, unsigned long size)
436 {
437 	unsigned long addr, end, next;
438 	pgd_t *pgd;
439 
440 	addr = (unsigned long)kasan_mem_to_shadow(start);
441 	end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
442 
443 	if (WARN_ON((unsigned long)start %
444 			(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
445 	    WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
446 		return;
447 
448 	for (; addr < end; addr = next) {
449 		p4d_t *p4d;
450 
451 		next = pgd_addr_end(addr, end);
452 
453 		pgd = pgd_offset_k(addr);
454 		if (!pgd_present(*pgd))
455 			continue;
456 
457 		if (kasan_p4d_table(*pgd)) {
458 			if (IS_ALIGNED(addr, PGDIR_SIZE) &&
459 			    IS_ALIGNED(next, PGDIR_SIZE))
460 				pgd_clear(pgd);
461 			continue;
462 		}
463 
464 		p4d = p4d_offset(pgd, addr);
465 		kasan_remove_p4d_table(p4d, addr, next);
466 		kasan_free_p4d(p4d_offset(pgd, 0), pgd);
467 	}
468 }
469 
470 int kasan_add_zero_shadow(void *start, unsigned long size)
471 {
472 	int ret;
473 	void *shadow_start, *shadow_end;
474 
475 	shadow_start = kasan_mem_to_shadow(start);
476 	shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
477 
478 	if (WARN_ON((unsigned long)start %
479 			(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
480 	    WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
481 		return -EINVAL;
482 
483 	ret = kasan_populate_zero_shadow(shadow_start, shadow_end);
484 	if (ret)
485 		kasan_remove_zero_shadow(shadow_start,
486 					size >> KASAN_SHADOW_SCALE_SHIFT);
487 	return ret;
488 }
489