xref: /openbmc/linux/arch/arm64/mm/kasan_init.c (revision 174cd4b1)
1 /*
2  * This file contains kasan initialization code for ARM64.
3  *
4  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/memblock.h>
17 #include <linux/start_kernel.h>
18 #include <linux/mm.h>
19 
20 #include <asm/mmu_context.h>
21 #include <asm/kernel-pgtable.h>
22 #include <asm/page.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
25 #include <asm/sections.h>
26 #include <asm/tlbflush.h>
27 
28 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
29 
30 /*
31  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
32  * directly on kernel symbols (bm_p*d). All the early functions are called too
33  * early to use lm_alias so __p*d_populate functions must be used to populate
34  * with the physical address from __pa_symbol.
35  */
36 
37 static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
38 					unsigned long end)
39 {
40 	pte_t *pte;
41 	unsigned long next;
42 
43 	if (pmd_none(*pmd))
44 		__pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
45 
46 	pte = pte_offset_kimg(pmd, addr);
47 	do {
48 		next = addr + PAGE_SIZE;
49 		set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
50 					PAGE_KERNEL));
51 	} while (pte++, addr = next, addr != end && pte_none(*pte));
52 }
53 
54 static void __init kasan_early_pmd_populate(pud_t *pud,
55 					unsigned long addr,
56 					unsigned long end)
57 {
58 	pmd_t *pmd;
59 	unsigned long next;
60 
61 	if (pud_none(*pud))
62 		__pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
63 
64 	pmd = pmd_offset_kimg(pud, addr);
65 	do {
66 		next = pmd_addr_end(addr, end);
67 		kasan_early_pte_populate(pmd, addr, next);
68 	} while (pmd++, addr = next, addr != end && pmd_none(*pmd));
69 }
70 
71 static void __init kasan_early_pud_populate(pgd_t *pgd,
72 					unsigned long addr,
73 					unsigned long end)
74 {
75 	pud_t *pud;
76 	unsigned long next;
77 
78 	if (pgd_none(*pgd))
79 		__pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
80 
81 	pud = pud_offset_kimg(pgd, addr);
82 	do {
83 		next = pud_addr_end(addr, end);
84 		kasan_early_pmd_populate(pud, addr, next);
85 	} while (pud++, addr = next, addr != end && pud_none(*pud));
86 }
87 
88 static void __init kasan_map_early_shadow(void)
89 {
90 	unsigned long addr = KASAN_SHADOW_START;
91 	unsigned long end = KASAN_SHADOW_END;
92 	unsigned long next;
93 	pgd_t *pgd;
94 
95 	pgd = pgd_offset_k(addr);
96 	do {
97 		next = pgd_addr_end(addr, end);
98 		kasan_early_pud_populate(pgd, addr, next);
99 	} while (pgd++, addr = next, addr != end);
100 }
101 
102 asmlinkage void __init kasan_early_init(void)
103 {
104 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
105 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
106 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
107 	kasan_map_early_shadow();
108 }
109 
110 /*
111  * Copy the current shadow region into a new pgdir.
112  */
113 void __init kasan_copy_shadow(pgd_t *pgdir)
114 {
115 	pgd_t *pgd, *pgd_new, *pgd_end;
116 
117 	pgd = pgd_offset_k(KASAN_SHADOW_START);
118 	pgd_end = pgd_offset_k(KASAN_SHADOW_END);
119 	pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
120 	do {
121 		set_pgd(pgd_new, *pgd);
122 	} while (pgd++, pgd_new++, pgd != pgd_end);
123 }
124 
125 static void __init clear_pgds(unsigned long start,
126 			unsigned long end)
127 {
128 	/*
129 	 * Remove references to kasan page tables from
130 	 * swapper_pg_dir. pgd_clear() can't be used
131 	 * here because it's nop on 2,3-level pagetable setups
132 	 */
133 	for (; start < end; start += PGDIR_SIZE)
134 		set_pgd(pgd_offset_k(start), __pgd(0));
135 }
136 
137 void __init kasan_init(void)
138 {
139 	u64 kimg_shadow_start, kimg_shadow_end;
140 	u64 mod_shadow_start, mod_shadow_end;
141 	struct memblock_region *reg;
142 	int i;
143 
144 	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
145 	kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
146 
147 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
148 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
149 
150 	/*
151 	 * We are going to perform proper setup of shadow memory.
152 	 * At first we should unmap early shadow (clear_pgds() call bellow).
153 	 * However, instrumented code couldn't execute without shadow memory.
154 	 * tmp_pg_dir used to keep early shadow mapped until full shadow
155 	 * setup will be finished.
156 	 */
157 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
158 	dsb(ishst);
159 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
160 
161 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
162 
163 	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
164 			 pfn_to_nid(virt_to_pfn(_text)));
165 
166 	/*
167 	 * vmemmap_populate() has populated the shadow region that covers the
168 	 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
169 	 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
170 	 * kasan_populate_zero_shadow() from replacing the page table entries
171 	 * (PMD or PTE) at the edges of the shadow region for the kernel
172 	 * image.
173 	 */
174 	kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
175 	kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
176 
177 	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
178 				   (void *)mod_shadow_start);
179 	kasan_populate_zero_shadow((void *)kimg_shadow_end,
180 				   kasan_mem_to_shadow((void *)PAGE_OFFSET));
181 
182 	if (kimg_shadow_start > mod_shadow_end)
183 		kasan_populate_zero_shadow((void *)mod_shadow_end,
184 					   (void *)kimg_shadow_start);
185 
186 	for_each_memblock(memory, reg) {
187 		void *start = (void *)__phys_to_virt(reg->base);
188 		void *end = (void *)__phys_to_virt(reg->base + reg->size);
189 
190 		if (start >= end)
191 			break;
192 
193 		/*
194 		 * end + 1 here is intentional. We check several shadow bytes in
195 		 * advance to slightly speed up fastpath. In some rare cases
196 		 * we could cross boundary of mapped shadow, so we just map
197 		 * some more here.
198 		 */
199 		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
200 				(unsigned long)kasan_mem_to_shadow(end) + 1,
201 				pfn_to_nid(virt_to_pfn(start)));
202 	}
203 
204 	/*
205 	 * KAsan may reuse the contents of kasan_zero_pte directly, so we
206 	 * should make sure that it maps the zero page read-only.
207 	 */
208 	for (i = 0; i < PTRS_PER_PTE; i++)
209 		set_pte(&kasan_zero_pte[i],
210 			pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
211 
212 	memset(kasan_zero_page, 0, PAGE_SIZE);
213 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
214 
215 	/* At this point kasan is fully initialized. Enable error messages */
216 	init_task.kasan_depth = 0;
217 	pr_info("KernelAddressSanitizer initialized\n");
218 }
219