xref: /openbmc/linux/arch/arm64/mm/kasan_init.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contains kasan initialization code for ARM64.
4  *
5  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  */
8 
9 #define pr_fmt(fmt) "kasan: " fmt
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/task.h>
13 #include <linux/memblock.h>
14 #include <linux/start_kernel.h>
15 #include <linux/mm.h>
16 
17 #include <asm/mmu_context.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/page.h>
20 #include <asm/pgalloc.h>
21 #include <asm/sections.h>
22 #include <asm/tlbflush.h>
23 
24 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
25 
26 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
27 
28 /*
29  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30  * directly on kernel symbols (bm_p*d). All the early functions are called too
31  * early to use lm_alias so __p*d_populate functions must be used to populate
32  * with the physical address from __pa_symbol.
33  */
34 
35 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
36 {
37 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
38 					      __pa(MAX_DMA_ADDRESS),
39 					      MEMBLOCK_ALLOC_KASAN, node);
40 	if (!p)
41 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
43 		      __pa(MAX_DMA_ADDRESS));
44 
45 	return __pa(p);
46 }
47 
48 static phys_addr_t __init kasan_alloc_raw_page(int node)
49 {
50 	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
51 						__pa(MAX_DMA_ADDRESS),
52 						MEMBLOCK_ALLOC_KASAN, node);
53 	if (!p)
54 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
55 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
56 		      __pa(MAX_DMA_ADDRESS));
57 
58 	return __pa(p);
59 }
60 
61 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
62 				      bool early)
63 {
64 	if (pmd_none(READ_ONCE(*pmdp))) {
65 		phys_addr_t pte_phys = early ?
66 				__pa_symbol(kasan_early_shadow_pte)
67 					: kasan_alloc_zeroed_page(node);
68 		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
69 	}
70 
71 	return early ? pte_offset_kimg(pmdp, addr)
72 		     : pte_offset_kernel(pmdp, addr);
73 }
74 
75 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
76 				      bool early)
77 {
78 	if (pud_none(READ_ONCE(*pudp))) {
79 		phys_addr_t pmd_phys = early ?
80 				__pa_symbol(kasan_early_shadow_pmd)
81 					: kasan_alloc_zeroed_page(node);
82 		__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
83 	}
84 
85 	return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
86 }
87 
88 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
89 				      bool early)
90 {
91 	if (p4d_none(READ_ONCE(*p4dp))) {
92 		phys_addr_t pud_phys = early ?
93 				__pa_symbol(kasan_early_shadow_pud)
94 					: kasan_alloc_zeroed_page(node);
95 		__p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
96 	}
97 
98 	return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
99 }
100 
101 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
102 				      unsigned long end, int node, bool early)
103 {
104 	unsigned long next;
105 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
106 
107 	do {
108 		phys_addr_t page_phys = early ?
109 				__pa_symbol(kasan_early_shadow_page)
110 					: kasan_alloc_raw_page(node);
111 		if (!early)
112 			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
113 		next = addr + PAGE_SIZE;
114 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
115 	} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
116 }
117 
118 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
119 				      unsigned long end, int node, bool early)
120 {
121 	unsigned long next;
122 	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
123 
124 	do {
125 		next = pmd_addr_end(addr, end);
126 		kasan_pte_populate(pmdp, addr, next, node, early);
127 	} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
128 }
129 
130 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
131 				      unsigned long end, int node, bool early)
132 {
133 	unsigned long next;
134 	pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
135 
136 	do {
137 		next = pud_addr_end(addr, end);
138 		kasan_pmd_populate(pudp, addr, next, node, early);
139 	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
140 }
141 
142 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
143 				      unsigned long end, int node, bool early)
144 {
145 	unsigned long next;
146 	p4d_t *p4dp = p4d_offset(pgdp, addr);
147 
148 	do {
149 		next = p4d_addr_end(addr, end);
150 		kasan_pud_populate(p4dp, addr, next, node, early);
151 	} while (p4dp++, addr = next, addr != end);
152 }
153 
154 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
155 				      int node, bool early)
156 {
157 	unsigned long next;
158 	pgd_t *pgdp;
159 
160 	pgdp = pgd_offset_k(addr);
161 	do {
162 		next = pgd_addr_end(addr, end);
163 		kasan_p4d_populate(pgdp, addr, next, node, early);
164 	} while (pgdp++, addr = next, addr != end);
165 }
166 
167 /* The early shadow maps everything to a single page of zeroes */
168 asmlinkage void __init kasan_early_init(void)
169 {
170 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
171 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
172 	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
173 	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
174 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
175 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
176 			   true);
177 }
178 
179 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
180 static void __init kasan_map_populate(unsigned long start, unsigned long end,
181 				      int node)
182 {
183 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
184 }
185 
186 /*
187  * Copy the current shadow region into a new pgdir.
188  */
189 void __init kasan_copy_shadow(pgd_t *pgdir)
190 {
191 	pgd_t *pgdp, *pgdp_new, *pgdp_end;
192 
193 	pgdp = pgd_offset_k(KASAN_SHADOW_START);
194 	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
195 	pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
196 	do {
197 		set_pgd(pgdp_new, READ_ONCE(*pgdp));
198 	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
199 }
200 
201 static void __init clear_pgds(unsigned long start,
202 			unsigned long end)
203 {
204 	/*
205 	 * Remove references to kasan page tables from
206 	 * swapper_pg_dir. pgd_clear() can't be used
207 	 * here because it's nop on 2,3-level pagetable setups
208 	 */
209 	for (; start < end; start += PGDIR_SIZE)
210 		set_pgd(pgd_offset_k(start), __pgd(0));
211 }
212 
213 static void __init kasan_init_shadow(void)
214 {
215 	u64 kimg_shadow_start, kimg_shadow_end;
216 	u64 mod_shadow_start, mod_shadow_end;
217 	phys_addr_t pa_start, pa_end;
218 	u64 i;
219 
220 	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
221 	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
222 
223 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
224 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
225 
226 	/*
227 	 * We are going to perform proper setup of shadow memory.
228 	 * At first we should unmap early shadow (clear_pgds() call below).
229 	 * However, instrumented code couldn't execute without shadow memory.
230 	 * tmp_pg_dir used to keep early shadow mapped until full shadow
231 	 * setup will be finished.
232 	 */
233 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
234 	dsb(ishst);
235 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
236 
237 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
238 
239 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
240 			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
241 
242 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
243 				   (void *)mod_shadow_start);
244 	kasan_populate_early_shadow((void *)kimg_shadow_end,
245 				   (void *)KASAN_SHADOW_END);
246 
247 	if (kimg_shadow_start > mod_shadow_end)
248 		kasan_populate_early_shadow((void *)mod_shadow_end,
249 					    (void *)kimg_shadow_start);
250 
251 	for_each_mem_range(i, &pa_start, &pa_end) {
252 		void *start = (void *)__phys_to_virt(pa_start);
253 		void *end = (void *)__phys_to_virt(pa_end);
254 
255 		if (start >= end)
256 			break;
257 
258 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
259 				   (unsigned long)kasan_mem_to_shadow(end),
260 				   early_pfn_to_nid(virt_to_pfn(start)));
261 	}
262 
263 	/*
264 	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
265 	 * so we should make sure that it maps the zero page read-only.
266 	 */
267 	for (i = 0; i < PTRS_PER_PTE; i++)
268 		set_pte(&kasan_early_shadow_pte[i],
269 			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
270 				PAGE_KERNEL_RO));
271 
272 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
273 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
274 }
275 
276 static void __init kasan_init_depth(void)
277 {
278 	init_task.kasan_depth = 0;
279 }
280 
281 void __init kasan_init(void)
282 {
283 	kasan_init_shadow();
284 	kasan_init_depth();
285 #if defined(CONFIG_KASAN_GENERIC)
286 	/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
287 	pr_info("KernelAddressSanitizer initialized\n");
288 #endif
289 }
290 
291 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
292