xref: /openbmc/linux/mm/kasan/generic.c (revision d35ac6ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core generic KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/linkage.h>
20 #include <linux/memblock.h>
21 #include <linux/memory.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/printk.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/slab.h>
28 #include <linux/stacktrace.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/vmalloc.h>
32 #include <linux/bug.h>
33 
34 #include "kasan.h"
35 #include "../slab.h"
36 
37 /*
38  * All functions below always inlined so compiler could
39  * perform better optimizations in each of __asan_loadX/__assn_storeX
40  * depending on memory access size X.
41  */
42 
43 static __always_inline bool memory_is_poisoned_1(const void *addr)
44 {
45 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
46 
47 	if (unlikely(shadow_value)) {
48 		s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
49 		return unlikely(last_accessible_byte >= shadow_value);
50 	}
51 
52 	return false;
53 }
54 
55 static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
56 						unsigned long size)
57 {
58 	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
59 
60 	/*
61 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
62 	 * into 2 shadow bytes, so we need to check them both.
63 	 */
64 	if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
65 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
66 
67 	return memory_is_poisoned_1(addr + size - 1);
68 }
69 
70 static __always_inline bool memory_is_poisoned_16(const void *addr)
71 {
72 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
73 
74 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
75 	if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
76 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
77 
78 	return *shadow_addr;
79 }
80 
81 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
82 					size_t size)
83 {
84 	while (size) {
85 		if (unlikely(*start))
86 			return (unsigned long)start;
87 		start++;
88 		size--;
89 	}
90 
91 	return 0;
92 }
93 
94 static __always_inline unsigned long memory_is_nonzero(const void *start,
95 						const void *end)
96 {
97 	unsigned int words;
98 	unsigned long ret;
99 	unsigned int prefix = (unsigned long)start % 8;
100 
101 	if (end - start <= 16)
102 		return bytes_is_nonzero(start, end - start);
103 
104 	if (prefix) {
105 		prefix = 8 - prefix;
106 		ret = bytes_is_nonzero(start, prefix);
107 		if (unlikely(ret))
108 			return ret;
109 		start += prefix;
110 	}
111 
112 	words = (end - start) / 8;
113 	while (words) {
114 		if (unlikely(*(u64 *)start))
115 			return bytes_is_nonzero(start, 8);
116 		start += 8;
117 		words--;
118 	}
119 
120 	return bytes_is_nonzero(start, (end - start) % 8);
121 }
122 
123 static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
124 {
125 	unsigned long ret;
126 
127 	ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
128 			kasan_mem_to_shadow(addr + size - 1) + 1);
129 
130 	if (unlikely(ret)) {
131 		const void *last_byte = addr + size - 1;
132 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
133 
134 		if (unlikely(ret != (unsigned long)last_shadow ||
135 			(((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
136 			return true;
137 	}
138 	return false;
139 }
140 
141 static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
142 {
143 	if (__builtin_constant_p(size)) {
144 		switch (size) {
145 		case 1:
146 			return memory_is_poisoned_1(addr);
147 		case 2:
148 		case 4:
149 		case 8:
150 			return memory_is_poisoned_2_4_8(addr, size);
151 		case 16:
152 			return memory_is_poisoned_16(addr);
153 		default:
154 			BUILD_BUG();
155 		}
156 	}
157 
158 	return memory_is_poisoned_n(addr, size);
159 }
160 
161 static __always_inline bool check_region_inline(const void *addr,
162 						size_t size, bool write,
163 						unsigned long ret_ip)
164 {
165 	if (!kasan_arch_is_ready())
166 		return true;
167 
168 	if (unlikely(size == 0))
169 		return true;
170 
171 	if (unlikely(addr + size < addr))
172 		return !kasan_report(addr, size, write, ret_ip);
173 
174 	if (unlikely(!addr_has_metadata(addr)))
175 		return !kasan_report(addr, size, write, ret_ip);
176 
177 	if (likely(!memory_is_poisoned(addr, size)))
178 		return true;
179 
180 	return !kasan_report(addr, size, write, ret_ip);
181 }
182 
183 bool kasan_check_range(const void *addr, size_t size, bool write,
184 					unsigned long ret_ip)
185 {
186 	return check_region_inline(addr, size, write, ret_ip);
187 }
188 
189 bool kasan_byte_accessible(const void *addr)
190 {
191 	s8 shadow_byte;
192 
193 	if (!kasan_arch_is_ready())
194 		return true;
195 
196 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
197 
198 	return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
199 }
200 
201 void kasan_cache_shrink(struct kmem_cache *cache)
202 {
203 	kasan_quarantine_remove_cache(cache);
204 }
205 
206 void kasan_cache_shutdown(struct kmem_cache *cache)
207 {
208 	if (!__kmem_cache_empty(cache))
209 		kasan_quarantine_remove_cache(cache);
210 }
211 
212 static void register_global(struct kasan_global *global)
213 {
214 	size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
215 
216 	kasan_unpoison(global->beg, global->size, false);
217 
218 	kasan_poison(global->beg + aligned_size,
219 		     global->size_with_redzone - aligned_size,
220 		     KASAN_GLOBAL_REDZONE, false);
221 }
222 
223 void __asan_register_globals(void *ptr, ssize_t size)
224 {
225 	int i;
226 	struct kasan_global *globals = ptr;
227 
228 	for (i = 0; i < size; i++)
229 		register_global(&globals[i]);
230 }
231 EXPORT_SYMBOL(__asan_register_globals);
232 
233 void __asan_unregister_globals(void *ptr, ssize_t size)
234 {
235 }
236 EXPORT_SYMBOL(__asan_unregister_globals);
237 
238 #define DEFINE_ASAN_LOAD_STORE(size)					\
239 	void __asan_load##size(void *addr)				\
240 	{								\
241 		check_region_inline(addr, size, false, _RET_IP_);	\
242 	}								\
243 	EXPORT_SYMBOL(__asan_load##size);				\
244 	__alias(__asan_load##size)					\
245 	void __asan_load##size##_noabort(void *);			\
246 	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
247 	void __asan_store##size(void *addr)				\
248 	{								\
249 		check_region_inline(addr, size, true, _RET_IP_);	\
250 	}								\
251 	EXPORT_SYMBOL(__asan_store##size);				\
252 	__alias(__asan_store##size)					\
253 	void __asan_store##size##_noabort(void *);			\
254 	EXPORT_SYMBOL(__asan_store##size##_noabort)
255 
256 DEFINE_ASAN_LOAD_STORE(1);
257 DEFINE_ASAN_LOAD_STORE(2);
258 DEFINE_ASAN_LOAD_STORE(4);
259 DEFINE_ASAN_LOAD_STORE(8);
260 DEFINE_ASAN_LOAD_STORE(16);
261 
262 void __asan_loadN(void *addr, ssize_t size)
263 {
264 	kasan_check_range(addr, size, false, _RET_IP_);
265 }
266 EXPORT_SYMBOL(__asan_loadN);
267 
268 __alias(__asan_loadN)
269 void __asan_loadN_noabort(void *, ssize_t);
270 EXPORT_SYMBOL(__asan_loadN_noabort);
271 
272 void __asan_storeN(void *addr, ssize_t size)
273 {
274 	kasan_check_range(addr, size, true, _RET_IP_);
275 }
276 EXPORT_SYMBOL(__asan_storeN);
277 
278 __alias(__asan_storeN)
279 void __asan_storeN_noabort(void *, ssize_t);
280 EXPORT_SYMBOL(__asan_storeN_noabort);
281 
282 /* to shut up compiler complaints */
283 void __asan_handle_no_return(void) {}
284 EXPORT_SYMBOL(__asan_handle_no_return);
285 
286 /* Emitted by compiler to poison alloca()ed objects. */
287 void __asan_alloca_poison(void *addr, ssize_t size)
288 {
289 	size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
290 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
291 			rounded_up_size;
292 	size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
293 
294 	const void *left_redzone = (const void *)(addr -
295 			KASAN_ALLOCA_REDZONE_SIZE);
296 	const void *right_redzone = (const void *)(addr + rounded_up_size);
297 
298 	WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
299 
300 	kasan_unpoison((const void *)(addr + rounded_down_size),
301 			size - rounded_down_size, false);
302 	kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
303 		     KASAN_ALLOCA_LEFT, false);
304 	kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
305 		     KASAN_ALLOCA_RIGHT, false);
306 }
307 EXPORT_SYMBOL(__asan_alloca_poison);
308 
309 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
310 void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
311 {
312 	if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
313 		return;
314 
315 	kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
316 }
317 EXPORT_SYMBOL(__asan_allocas_unpoison);
318 
319 /* Emitted by the compiler to [un]poison local variables. */
320 #define DEFINE_ASAN_SET_SHADOW(byte) \
321 	void __asan_set_shadow_##byte(const void *addr, ssize_t size)	\
322 	{								\
323 		__memset((void *)addr, 0x##byte, size);			\
324 	}								\
325 	EXPORT_SYMBOL(__asan_set_shadow_##byte)
326 
327 DEFINE_ASAN_SET_SHADOW(00);
328 DEFINE_ASAN_SET_SHADOW(f1);
329 DEFINE_ASAN_SET_SHADOW(f2);
330 DEFINE_ASAN_SET_SHADOW(f3);
331 DEFINE_ASAN_SET_SHADOW(f5);
332 DEFINE_ASAN_SET_SHADOW(f8);
333 
334 /* Only allow cache merging when no per-object metadata is present. */
335 slab_flags_t kasan_never_merge(void)
336 {
337 	if (!kasan_requires_meta())
338 		return 0;
339 	return SLAB_KASAN;
340 }
341 
342 /*
343  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
344  * For larger allocations larger redzones are used.
345  */
346 static inline unsigned int optimal_redzone(unsigned int object_size)
347 {
348 	return
349 		object_size <= 64        - 16   ? 16 :
350 		object_size <= 128       - 32   ? 32 :
351 		object_size <= 512       - 64   ? 64 :
352 		object_size <= 4096      - 128  ? 128 :
353 		object_size <= (1 << 14) - 256  ? 256 :
354 		object_size <= (1 << 15) - 512  ? 512 :
355 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
356 }
357 
358 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
359 			  slab_flags_t *flags)
360 {
361 	unsigned int ok_size;
362 	unsigned int optimal_size;
363 
364 	if (!kasan_requires_meta())
365 		return;
366 
367 	/*
368 	 * SLAB_KASAN is used to mark caches that are sanitized by KASAN
369 	 * and that thus have per-object metadata.
370 	 * Currently this flag is used in two places:
371 	 * 1. In slab_ksize() to account for per-object metadata when
372 	 *    calculating the size of the accessible memory within the object.
373 	 * 2. In slab_common.c via kasan_never_merge() to prevent merging of
374 	 *    caches with per-object metadata.
375 	 */
376 	*flags |= SLAB_KASAN;
377 
378 	ok_size = *size;
379 
380 	/* Add alloc meta into redzone. */
381 	cache->kasan_info.alloc_meta_offset = *size;
382 	*size += sizeof(struct kasan_alloc_meta);
383 
384 	/*
385 	 * If alloc meta doesn't fit, don't add it.
386 	 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
387 	 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
388 	 * larger sizes.
389 	 */
390 	if (*size > KMALLOC_MAX_SIZE) {
391 		cache->kasan_info.alloc_meta_offset = 0;
392 		*size = ok_size;
393 		/* Continue, since free meta might still fit. */
394 	}
395 
396 	/*
397 	 * Add free meta into redzone when it's not possible to store
398 	 * it in the object. This is the case when:
399 	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
400 	 *    be touched after it was freed, or
401 	 * 2. Object has a constructor, which means it's expected to
402 	 *    retain its content until the next allocation, or
403 	 * 3. Object is too small.
404 	 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
405 	 */
406 	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
407 	    cache->object_size < sizeof(struct kasan_free_meta)) {
408 		ok_size = *size;
409 
410 		cache->kasan_info.free_meta_offset = *size;
411 		*size += sizeof(struct kasan_free_meta);
412 
413 		/* If free meta doesn't fit, don't add it. */
414 		if (*size > KMALLOC_MAX_SIZE) {
415 			cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
416 			*size = ok_size;
417 		}
418 	}
419 
420 	/* Calculate size with optimal redzone. */
421 	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
422 	/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
423 	if (optimal_size > KMALLOC_MAX_SIZE)
424 		optimal_size = KMALLOC_MAX_SIZE;
425 	/* Use optimal size if the size with added metas is not large enough. */
426 	if (*size < optimal_size)
427 		*size = optimal_size;
428 }
429 
430 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
431 					      const void *object)
432 {
433 	if (!cache->kasan_info.alloc_meta_offset)
434 		return NULL;
435 	return (void *)object + cache->kasan_info.alloc_meta_offset;
436 }
437 
438 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
439 					    const void *object)
440 {
441 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
442 	if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
443 		return NULL;
444 	return (void *)object + cache->kasan_info.free_meta_offset;
445 }
446 
447 void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
448 {
449 	struct kasan_alloc_meta *alloc_meta;
450 
451 	alloc_meta = kasan_get_alloc_meta(cache, object);
452 	if (alloc_meta)
453 		__memset(alloc_meta, 0, sizeof(*alloc_meta));
454 }
455 
456 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
457 {
458 	struct kasan_cache *info = &cache->kasan_info;
459 
460 	if (!kasan_requires_meta())
461 		return 0;
462 
463 	if (in_object)
464 		return (info->free_meta_offset ?
465 			0 : sizeof(struct kasan_free_meta));
466 	else
467 		return (info->alloc_meta_offset ?
468 			sizeof(struct kasan_alloc_meta) : 0) +
469 			((info->free_meta_offset &&
470 			info->free_meta_offset != KASAN_NO_FREE_META) ?
471 			sizeof(struct kasan_free_meta) : 0);
472 }
473 
474 static void __kasan_record_aux_stack(void *addr, bool can_alloc)
475 {
476 	struct slab *slab = kasan_addr_to_slab(addr);
477 	struct kmem_cache *cache;
478 	struct kasan_alloc_meta *alloc_meta;
479 	void *object;
480 
481 	if (is_kfence_address(addr) || !slab)
482 		return;
483 
484 	cache = slab->slab_cache;
485 	object = nearest_obj(cache, slab, addr);
486 	alloc_meta = kasan_get_alloc_meta(cache, object);
487 	if (!alloc_meta)
488 		return;
489 
490 	alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
491 	alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc);
492 }
493 
494 void kasan_record_aux_stack(void *addr)
495 {
496 	return __kasan_record_aux_stack(addr, true);
497 }
498 
499 void kasan_record_aux_stack_noalloc(void *addr)
500 {
501 	return __kasan_record_aux_stack(addr, false);
502 }
503 
504 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
505 {
506 	struct kasan_alloc_meta *alloc_meta;
507 
508 	alloc_meta = kasan_get_alloc_meta(cache, object);
509 	if (alloc_meta)
510 		kasan_set_track(&alloc_meta->alloc_track, flags);
511 }
512 
513 void kasan_save_free_info(struct kmem_cache *cache, void *object)
514 {
515 	struct kasan_free_meta *free_meta;
516 
517 	free_meta = kasan_get_free_meta(cache, object);
518 	if (!free_meta)
519 		return;
520 
521 	kasan_set_track(&free_meta->free_track, 0);
522 	/* The object was freed and has free track set. */
523 	*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
524 }
525