xref: /openbmc/linux/mm/kasan/generic.c (revision 4bb1eb3c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core generic KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
39 
40 #include "kasan.h"
41 #include "../slab.h"
42 
43 /*
44  * All functions below always inlined so compiler could
45  * perform better optimizations in each of __asan_loadX/__assn_storeX
46  * depending on memory access size X.
47  */
48 
49 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
50 {
51 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
52 
53 	if (unlikely(shadow_value)) {
54 		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
55 		return unlikely(last_accessible_byte >= shadow_value);
56 	}
57 
58 	return false;
59 }
60 
61 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
62 						unsigned long size)
63 {
64 	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
65 
66 	/*
67 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
68 	 * into 2 shadow bytes, so we need to check them both.
69 	 */
70 	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
71 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
72 
73 	return memory_is_poisoned_1(addr + size - 1);
74 }
75 
76 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
77 {
78 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
79 
80 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
81 	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
82 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
83 
84 	return *shadow_addr;
85 }
86 
87 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
88 					size_t size)
89 {
90 	while (size) {
91 		if (unlikely(*start))
92 			return (unsigned long)start;
93 		start++;
94 		size--;
95 	}
96 
97 	return 0;
98 }
99 
100 static __always_inline unsigned long memory_is_nonzero(const void *start,
101 						const void *end)
102 {
103 	unsigned int words;
104 	unsigned long ret;
105 	unsigned int prefix = (unsigned long)start % 8;
106 
107 	if (end - start <= 16)
108 		return bytes_is_nonzero(start, end - start);
109 
110 	if (prefix) {
111 		prefix = 8 - prefix;
112 		ret = bytes_is_nonzero(start, prefix);
113 		if (unlikely(ret))
114 			return ret;
115 		start += prefix;
116 	}
117 
118 	words = (end - start) / 8;
119 	while (words) {
120 		if (unlikely(*(u64 *)start))
121 			return bytes_is_nonzero(start, 8);
122 		start += 8;
123 		words--;
124 	}
125 
126 	return bytes_is_nonzero(start, (end - start) % 8);
127 }
128 
129 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
130 						size_t size)
131 {
132 	unsigned long ret;
133 
134 	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
135 			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
136 
137 	if (unlikely(ret)) {
138 		unsigned long last_byte = addr + size - 1;
139 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
140 
141 		if (unlikely(ret != (unsigned long)last_shadow ||
142 			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
143 			return true;
144 	}
145 	return false;
146 }
147 
148 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
149 {
150 	if (__builtin_constant_p(size)) {
151 		switch (size) {
152 		case 1:
153 			return memory_is_poisoned_1(addr);
154 		case 2:
155 		case 4:
156 		case 8:
157 			return memory_is_poisoned_2_4_8(addr, size);
158 		case 16:
159 			return memory_is_poisoned_16(addr);
160 		default:
161 			BUILD_BUG();
162 		}
163 	}
164 
165 	return memory_is_poisoned_n(addr, size);
166 }
167 
168 static __always_inline bool check_memory_region_inline(unsigned long addr,
169 						size_t size, bool write,
170 						unsigned long ret_ip)
171 {
172 	if (unlikely(size == 0))
173 		return true;
174 
175 	if (unlikely(addr + size < addr))
176 		return !kasan_report(addr, size, write, ret_ip);
177 
178 	if (unlikely((void *)addr <
179 		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
180 		return !kasan_report(addr, size, write, ret_ip);
181 	}
182 
183 	if (likely(!memory_is_poisoned(addr, size)))
184 		return true;
185 
186 	return !kasan_report(addr, size, write, ret_ip);
187 }
188 
189 bool check_memory_region(unsigned long addr, size_t size, bool write,
190 				unsigned long ret_ip)
191 {
192 	return check_memory_region_inline(addr, size, write, ret_ip);
193 }
194 
195 void kasan_cache_shrink(struct kmem_cache *cache)
196 {
197 	quarantine_remove_cache(cache);
198 }
199 
200 void kasan_cache_shutdown(struct kmem_cache *cache)
201 {
202 	if (!__kmem_cache_empty(cache))
203 		quarantine_remove_cache(cache);
204 }
205 
206 static void register_global(struct kasan_global *global)
207 {
208 	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
209 
210 	kasan_unpoison_shadow(global->beg, global->size);
211 
212 	kasan_poison_shadow(global->beg + aligned_size,
213 		global->size_with_redzone - aligned_size,
214 		KASAN_GLOBAL_REDZONE);
215 }
216 
217 void __asan_register_globals(struct kasan_global *globals, size_t size)
218 {
219 	int i;
220 
221 	for (i = 0; i < size; i++)
222 		register_global(&globals[i]);
223 }
224 EXPORT_SYMBOL(__asan_register_globals);
225 
226 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
227 {
228 }
229 EXPORT_SYMBOL(__asan_unregister_globals);
230 
231 #define DEFINE_ASAN_LOAD_STORE(size)					\
232 	void __asan_load##size(unsigned long addr)			\
233 	{								\
234 		check_memory_region_inline(addr, size, false, _RET_IP_);\
235 	}								\
236 	EXPORT_SYMBOL(__asan_load##size);				\
237 	__alias(__asan_load##size)					\
238 	void __asan_load##size##_noabort(unsigned long);		\
239 	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
240 	void __asan_store##size(unsigned long addr)			\
241 	{								\
242 		check_memory_region_inline(addr, size, true, _RET_IP_);	\
243 	}								\
244 	EXPORT_SYMBOL(__asan_store##size);				\
245 	__alias(__asan_store##size)					\
246 	void __asan_store##size##_noabort(unsigned long);		\
247 	EXPORT_SYMBOL(__asan_store##size##_noabort)
248 
249 DEFINE_ASAN_LOAD_STORE(1);
250 DEFINE_ASAN_LOAD_STORE(2);
251 DEFINE_ASAN_LOAD_STORE(4);
252 DEFINE_ASAN_LOAD_STORE(8);
253 DEFINE_ASAN_LOAD_STORE(16);
254 
255 void __asan_loadN(unsigned long addr, size_t size)
256 {
257 	check_memory_region(addr, size, false, _RET_IP_);
258 }
259 EXPORT_SYMBOL(__asan_loadN);
260 
261 __alias(__asan_loadN)
262 void __asan_loadN_noabort(unsigned long, size_t);
263 EXPORT_SYMBOL(__asan_loadN_noabort);
264 
265 void __asan_storeN(unsigned long addr, size_t size)
266 {
267 	check_memory_region(addr, size, true, _RET_IP_);
268 }
269 EXPORT_SYMBOL(__asan_storeN);
270 
271 __alias(__asan_storeN)
272 void __asan_storeN_noabort(unsigned long, size_t);
273 EXPORT_SYMBOL(__asan_storeN_noabort);
274 
275 /* to shut up compiler complaints */
276 void __asan_handle_no_return(void) {}
277 EXPORT_SYMBOL(__asan_handle_no_return);
278 
279 /* Emitted by compiler to poison alloca()ed objects. */
280 void __asan_alloca_poison(unsigned long addr, size_t size)
281 {
282 	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
283 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
284 			rounded_up_size;
285 	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
286 
287 	const void *left_redzone = (const void *)(addr -
288 			KASAN_ALLOCA_REDZONE_SIZE);
289 	const void *right_redzone = (const void *)(addr + rounded_up_size);
290 
291 	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
292 
293 	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
294 			      size - rounded_down_size);
295 	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
296 			KASAN_ALLOCA_LEFT);
297 	kasan_poison_shadow(right_redzone,
298 			padding_size + KASAN_ALLOCA_REDZONE_SIZE,
299 			KASAN_ALLOCA_RIGHT);
300 }
301 EXPORT_SYMBOL(__asan_alloca_poison);
302 
303 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
304 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
305 {
306 	if (unlikely(!stack_top || stack_top > stack_bottom))
307 		return;
308 
309 	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
310 }
311 EXPORT_SYMBOL(__asan_allocas_unpoison);
312 
313 /* Emitted by the compiler to [un]poison local variables. */
314 #define DEFINE_ASAN_SET_SHADOW(byte) \
315 	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
316 	{								\
317 		__memset((void *)addr, 0x##byte, size);			\
318 	}								\
319 	EXPORT_SYMBOL(__asan_set_shadow_##byte)
320 
321 DEFINE_ASAN_SET_SHADOW(00);
322 DEFINE_ASAN_SET_SHADOW(f1);
323 DEFINE_ASAN_SET_SHADOW(f2);
324 DEFINE_ASAN_SET_SHADOW(f3);
325 DEFINE_ASAN_SET_SHADOW(f5);
326 DEFINE_ASAN_SET_SHADOW(f8);
327 
328 void kasan_record_aux_stack(void *addr)
329 {
330 	struct page *page = kasan_addr_to_page(addr);
331 	struct kmem_cache *cache;
332 	struct kasan_alloc_meta *alloc_info;
333 	void *object;
334 
335 	if (!(page && PageSlab(page)))
336 		return;
337 
338 	cache = page->slab_cache;
339 	object = nearest_obj(cache, page, addr);
340 	alloc_info = get_alloc_info(cache, object);
341 
342 	/*
343 	 * record the last two call_rcu() call stacks.
344 	 */
345 	alloc_info->aux_stack[1] = alloc_info->aux_stack[0];
346 	alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
347 }
348 
349 void kasan_set_free_info(struct kmem_cache *cache,
350 				void *object, u8 tag)
351 {
352 	struct kasan_free_meta *free_meta;
353 
354 	free_meta = get_free_info(cache, object);
355 	kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
356 
357 	/*
358 	 *  the object was freed and has free track set
359 	 */
360 	*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
361 }
362 
363 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
364 				void *object, u8 tag)
365 {
366 	if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
367 		return NULL;
368 	return &get_free_info(cache, object)->free_track;
369 }
370