xref: /openbmc/linux/mm/kasan/hw_tags.c (revision 46f28427)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core hardware tag-based KASAN code.
4  *
5  * Copyright (c) 2020 Google, Inc.
6  * Author: Andrey Konovalov <andreyknvl@google.com>
7  */
8 
9 #define pr_fmt(fmt) "kasan: " fmt
10 
11 #include <linux/init.h>
12 #include <linux/kasan.h>
13 #include <linux/kernel.h>
14 #include <linux/memory.h>
15 #include <linux/mm.h>
16 #include <linux/static_key.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include "kasan.h"
21 
22 enum kasan_arg {
23 	KASAN_ARG_DEFAULT,
24 	KASAN_ARG_OFF,
25 	KASAN_ARG_ON,
26 };
27 
28 enum kasan_arg_mode {
29 	KASAN_ARG_MODE_DEFAULT,
30 	KASAN_ARG_MODE_SYNC,
31 	KASAN_ARG_MODE_ASYNC,
32 	KASAN_ARG_MODE_ASYMM,
33 };
34 
35 enum kasan_arg_vmalloc {
36 	KASAN_ARG_VMALLOC_DEFAULT,
37 	KASAN_ARG_VMALLOC_OFF,
38 	KASAN_ARG_VMALLOC_ON,
39 };
40 
41 static enum kasan_arg kasan_arg __ro_after_init;
42 static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
43 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
44 
45 /*
46  * Whether KASAN is enabled at all.
47  * The value remains false until KASAN is initialized by kasan_init_hw_tags().
48  */
49 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
50 EXPORT_SYMBOL(kasan_flag_enabled);
51 
52 /*
53  * Whether the selected mode is synchronous, asynchronous, or asymmetric.
54  * Defaults to KASAN_MODE_SYNC.
55  */
56 enum kasan_mode kasan_mode __ro_after_init;
57 EXPORT_SYMBOL_GPL(kasan_mode);
58 
59 /* Whether to enable vmalloc tagging. */
60 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
61 
62 #define PAGE_ALLOC_SAMPLE_DEFAULT	1
63 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT	3
64 
65 /*
66  * Sampling interval of page_alloc allocation (un)poisoning.
67  * Defaults to no sampling.
68  */
69 unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
70 
71 /*
72  * Minimum order of page_alloc allocations to be affected by sampling.
73  * The default value is chosen to match both
74  * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
75  */
76 unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
77 
78 DEFINE_PER_CPU(long, kasan_page_alloc_skip);
79 
80 /* kasan=off/on */
81 static int __init early_kasan_flag(char *arg)
82 {
83 	if (!arg)
84 		return -EINVAL;
85 
86 	if (!strcmp(arg, "off"))
87 		kasan_arg = KASAN_ARG_OFF;
88 	else if (!strcmp(arg, "on"))
89 		kasan_arg = KASAN_ARG_ON;
90 	else
91 		return -EINVAL;
92 
93 	return 0;
94 }
95 early_param("kasan", early_kasan_flag);
96 
97 /* kasan.mode=sync/async/asymm */
98 static int __init early_kasan_mode(char *arg)
99 {
100 	if (!arg)
101 		return -EINVAL;
102 
103 	if (!strcmp(arg, "sync"))
104 		kasan_arg_mode = KASAN_ARG_MODE_SYNC;
105 	else if (!strcmp(arg, "async"))
106 		kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
107 	else if (!strcmp(arg, "asymm"))
108 		kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
109 	else
110 		return -EINVAL;
111 
112 	return 0;
113 }
114 early_param("kasan.mode", early_kasan_mode);
115 
116 /* kasan.vmalloc=off/on */
117 static int __init early_kasan_flag_vmalloc(char *arg)
118 {
119 	if (!arg)
120 		return -EINVAL;
121 
122 	if (!strcmp(arg, "off"))
123 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
124 	else if (!strcmp(arg, "on"))
125 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_ON;
126 	else
127 		return -EINVAL;
128 
129 	return 0;
130 }
131 early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
132 
133 static inline const char *kasan_mode_info(void)
134 {
135 	if (kasan_mode == KASAN_MODE_ASYNC)
136 		return "async";
137 	else if (kasan_mode == KASAN_MODE_ASYMM)
138 		return "asymm";
139 	else
140 		return "sync";
141 }
142 
143 /* kasan.page_alloc.sample=<sampling interval> */
144 static int __init early_kasan_flag_page_alloc_sample(char *arg)
145 {
146 	int rv;
147 
148 	if (!arg)
149 		return -EINVAL;
150 
151 	rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
152 	if (rv)
153 		return rv;
154 
155 	if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
156 		kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
157 		return -EINVAL;
158 	}
159 
160 	return 0;
161 }
162 early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
163 
164 /* kasan.page_alloc.sample.order=<minimum page order> */
165 static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
166 {
167 	int rv;
168 
169 	if (!arg)
170 		return -EINVAL;
171 
172 	rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
173 	if (rv)
174 		return rv;
175 
176 	if (kasan_page_alloc_sample_order > INT_MAX) {
177 		kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
178 		return -EINVAL;
179 	}
180 
181 	return 0;
182 }
183 early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
184 
185 /*
186  * kasan_init_hw_tags_cpu() is called for each CPU.
187  * Not marked as __init as a CPU can be hot-plugged after boot.
188  */
189 void kasan_init_hw_tags_cpu(void)
190 {
191 	/*
192 	 * There's no need to check that the hardware is MTE-capable here,
193 	 * as this function is only called for MTE-capable hardware.
194 	 */
195 
196 	/*
197 	 * If KASAN is disabled via command line, don't initialize it.
198 	 * When this function is called, kasan_flag_enabled is not yet
199 	 * set by kasan_init_hw_tags(). Thus, check kasan_arg instead.
200 	 */
201 	if (kasan_arg == KASAN_ARG_OFF)
202 		return;
203 
204 	/*
205 	 * Enable async or asymm modes only when explicitly requested
206 	 * through the command line.
207 	 */
208 	kasan_enable_tagging();
209 }
210 
211 /* kasan_init_hw_tags() is called once on boot CPU. */
212 void __init kasan_init_hw_tags(void)
213 {
214 	/* If hardware doesn't support MTE, don't initialize KASAN. */
215 	if (!system_supports_mte())
216 		return;
217 
218 	/* If KASAN is disabled via command line, don't initialize it. */
219 	if (kasan_arg == KASAN_ARG_OFF)
220 		return;
221 
222 	switch (kasan_arg_mode) {
223 	case KASAN_ARG_MODE_DEFAULT:
224 		/* Default is specified by kasan_mode definition. */
225 		break;
226 	case KASAN_ARG_MODE_SYNC:
227 		kasan_mode = KASAN_MODE_SYNC;
228 		break;
229 	case KASAN_ARG_MODE_ASYNC:
230 		kasan_mode = KASAN_MODE_ASYNC;
231 		break;
232 	case KASAN_ARG_MODE_ASYMM:
233 		kasan_mode = KASAN_MODE_ASYMM;
234 		break;
235 	}
236 
237 	switch (kasan_arg_vmalloc) {
238 	case KASAN_ARG_VMALLOC_DEFAULT:
239 		/* Default is specified by kasan_flag_vmalloc definition. */
240 		break;
241 	case KASAN_ARG_VMALLOC_OFF:
242 		static_branch_disable(&kasan_flag_vmalloc);
243 		break;
244 	case KASAN_ARG_VMALLOC_ON:
245 		static_branch_enable(&kasan_flag_vmalloc);
246 		break;
247 	}
248 
249 	kasan_init_tags();
250 
251 	/* KASAN is now initialized, enable it. */
252 	static_branch_enable(&kasan_flag_enabled);
253 
254 	pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
255 		kasan_mode_info(),
256 		kasan_vmalloc_enabled() ? "on" : "off",
257 		kasan_stack_collection_enabled() ? "on" : "off");
258 }
259 
260 #ifdef CONFIG_KASAN_VMALLOC
261 
262 static void unpoison_vmalloc_pages(const void *addr, u8 tag)
263 {
264 	struct vm_struct *area;
265 	int i;
266 
267 	/*
268 	 * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
269 	 * (see the comment in __kasan_unpoison_vmalloc), all of the pages
270 	 * should belong to a single area.
271 	 */
272 	area = find_vm_area((void *)addr);
273 	if (WARN_ON(!area))
274 		return;
275 
276 	for (i = 0; i < area->nr_pages; i++) {
277 		struct page *page = area->pages[i];
278 
279 		page_kasan_tag_set(page, tag);
280 	}
281 }
282 
283 static void init_vmalloc_pages(const void *start, unsigned long size)
284 {
285 	const void *addr;
286 
287 	for (addr = start; addr < start + size; addr += PAGE_SIZE) {
288 		struct page *page = virt_to_page(addr);
289 
290 		clear_highpage_kasan_tagged(page);
291 	}
292 }
293 
294 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
295 				kasan_vmalloc_flags_t flags)
296 {
297 	u8 tag;
298 	unsigned long redzone_start, redzone_size;
299 
300 	if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) {
301 		if (flags & KASAN_VMALLOC_INIT)
302 			init_vmalloc_pages(start, size);
303 		return (void *)start;
304 	}
305 
306 	/*
307 	 * Don't tag non-VM_ALLOC mappings, as:
308 	 *
309 	 * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
310 	 *    supports tagging physical memory. Therefore, it can only tag a
311 	 *    single mapping of normal physical pages.
312 	 * 2. Hardware tag-based KASAN can only tag memory mapped with special
313 	 *    mapping protection bits, see arch_vmap_pgprot_tagged().
314 	 *    As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
315 	 *    providing these bits would require tracking all non-VM_ALLOC
316 	 *    mappers.
317 	 *
318 	 * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
319 	 * the first virtual mapping, which is created by vmalloc().
320 	 * Tagging the page_alloc memory backing that vmalloc() allocation is
321 	 * skipped, see ___GFP_SKIP_KASAN_UNPOISON.
322 	 *
323 	 * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
324 	 */
325 	if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
326 		WARN_ON(flags & KASAN_VMALLOC_INIT);
327 		return (void *)start;
328 	}
329 
330 	/*
331 	 * Don't tag executable memory.
332 	 * The kernel doesn't tolerate having the PC register tagged.
333 	 */
334 	if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
335 		WARN_ON(flags & KASAN_VMALLOC_INIT);
336 		return (void *)start;
337 	}
338 
339 	tag = kasan_random_tag();
340 	start = set_tag(start, tag);
341 
342 	/* Unpoison and initialize memory up to size. */
343 	kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
344 
345 	/*
346 	 * Explicitly poison and initialize the in-page vmalloc() redzone.
347 	 * Unlike software KASAN modes, hardware tag-based KASAN doesn't
348 	 * unpoison memory when populating shadow for vmalloc() space.
349 	 */
350 	redzone_start = round_up((unsigned long)start + size,
351 				 KASAN_GRANULE_SIZE);
352 	redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
353 	kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
354 		     flags & KASAN_VMALLOC_INIT);
355 
356 	/*
357 	 * Set per-page tag flags to allow accessing physical memory for the
358 	 * vmalloc() mapping through page_address(vmalloc_to_page()).
359 	 */
360 	unpoison_vmalloc_pages(start, tag);
361 
362 	return (void *)start;
363 }
364 
365 void __kasan_poison_vmalloc(const void *start, unsigned long size)
366 {
367 	/*
368 	 * No tagging here.
369 	 * The physical pages backing the vmalloc() allocation are poisoned
370 	 * through the usual page_alloc paths.
371 	 */
372 }
373 
374 #endif
375 
376 void kasan_enable_tagging(void)
377 {
378 	if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
379 		hw_enable_tagging_async();
380 	else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
381 		hw_enable_tagging_asymm();
382 	else
383 		hw_enable_tagging_sync();
384 }
385 
386 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
387 
388 EXPORT_SYMBOL_GPL(kasan_enable_tagging);
389 
390 void kasan_force_async_fault(void)
391 {
392 	hw_force_async_tag_fault();
393 }
394 EXPORT_SYMBOL_GPL(kasan_force_async_fault);
395 
396 #endif
397