xref: /openbmc/linux/mm/kasan/generic.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core generic KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #define DISABLE_BRANCH_PROFILING
19 
20 #include <linux/export.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/kasan.h>
24 #include <linux/kernel.h>
25 #include <linux/kmemleak.h>
26 #include <linux/linkage.h>
27 #include <linux/memblock.h>
28 #include <linux/memory.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/printk.h>
32 #include <linux/sched.h>
33 #include <linux/sched/task_stack.h>
34 #include <linux/slab.h>
35 #include <linux/stacktrace.h>
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/vmalloc.h>
39 #include <linux/bug.h>
40 
41 #include "kasan.h"
42 #include "../slab.h"
43 
44 /*
45  * All functions below always inlined so compiler could
46  * perform better optimizations in each of __asan_loadX/__assn_storeX
47  * depending on memory access size X.
48  */
49 
50 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
51 {
52 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
53 
54 	if (unlikely(shadow_value)) {
55 		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
56 		return unlikely(last_accessible_byte >= shadow_value);
57 	}
58 
59 	return false;
60 }
61 
62 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
63 						unsigned long size)
64 {
65 	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
66 
67 	/*
68 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
69 	 * into 2 shadow bytes, so we need to check them both.
70 	 */
71 	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
72 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
73 
74 	return memory_is_poisoned_1(addr + size - 1);
75 }
76 
77 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
78 {
79 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
80 
81 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
82 	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
83 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
84 
85 	return *shadow_addr;
86 }
87 
88 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
89 					size_t size)
90 {
91 	while (size) {
92 		if (unlikely(*start))
93 			return (unsigned long)start;
94 		start++;
95 		size--;
96 	}
97 
98 	return 0;
99 }
100 
101 static __always_inline unsigned long memory_is_nonzero(const void *start,
102 						const void *end)
103 {
104 	unsigned int words;
105 	unsigned long ret;
106 	unsigned int prefix = (unsigned long)start % 8;
107 
108 	if (end - start <= 16)
109 		return bytes_is_nonzero(start, end - start);
110 
111 	if (prefix) {
112 		prefix = 8 - prefix;
113 		ret = bytes_is_nonzero(start, prefix);
114 		if (unlikely(ret))
115 			return ret;
116 		start += prefix;
117 	}
118 
119 	words = (end - start) / 8;
120 	while (words) {
121 		if (unlikely(*(u64 *)start))
122 			return bytes_is_nonzero(start, 8);
123 		start += 8;
124 		words--;
125 	}
126 
127 	return bytes_is_nonzero(start, (end - start) % 8);
128 }
129 
130 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
131 						size_t size)
132 {
133 	unsigned long ret;
134 
135 	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
136 			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
137 
138 	if (unlikely(ret)) {
139 		unsigned long last_byte = addr + size - 1;
140 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
141 
142 		if (unlikely(ret != (unsigned long)last_shadow ||
143 			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
144 			return true;
145 	}
146 	return false;
147 }
148 
149 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
150 {
151 	if (__builtin_constant_p(size)) {
152 		switch (size) {
153 		case 1:
154 			return memory_is_poisoned_1(addr);
155 		case 2:
156 		case 4:
157 		case 8:
158 			return memory_is_poisoned_2_4_8(addr, size);
159 		case 16:
160 			return memory_is_poisoned_16(addr);
161 		default:
162 			BUILD_BUG();
163 		}
164 	}
165 
166 	return memory_is_poisoned_n(addr, size);
167 }
168 
169 static __always_inline void check_memory_region_inline(unsigned long addr,
170 						size_t size, bool write,
171 						unsigned long ret_ip)
172 {
173 	if (unlikely(size == 0))
174 		return;
175 
176 	if (unlikely((void *)addr <
177 		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
178 		kasan_report(addr, size, write, ret_ip);
179 		return;
180 	}
181 
182 	if (likely(!memory_is_poisoned(addr, size)))
183 		return;
184 
185 	kasan_report(addr, size, write, ret_ip);
186 }
187 
188 void check_memory_region(unsigned long addr, size_t size, bool write,
189 				unsigned long ret_ip)
190 {
191 	check_memory_region_inline(addr, size, write, ret_ip);
192 }
193 
194 void kasan_cache_shrink(struct kmem_cache *cache)
195 {
196 	quarantine_remove_cache(cache);
197 }
198 
199 void kasan_cache_shutdown(struct kmem_cache *cache)
200 {
201 	if (!__kmem_cache_empty(cache))
202 		quarantine_remove_cache(cache);
203 }
204 
205 static void register_global(struct kasan_global *global)
206 {
207 	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
208 
209 	kasan_unpoison_shadow(global->beg, global->size);
210 
211 	kasan_poison_shadow(global->beg + aligned_size,
212 		global->size_with_redzone - aligned_size,
213 		KASAN_GLOBAL_REDZONE);
214 }
215 
216 void __asan_register_globals(struct kasan_global *globals, size_t size)
217 {
218 	int i;
219 
220 	for (i = 0; i < size; i++)
221 		register_global(&globals[i]);
222 }
223 EXPORT_SYMBOL(__asan_register_globals);
224 
225 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
226 {
227 }
228 EXPORT_SYMBOL(__asan_unregister_globals);
229 
230 #define DEFINE_ASAN_LOAD_STORE(size)					\
231 	void __asan_load##size(unsigned long addr)			\
232 	{								\
233 		check_memory_region_inline(addr, size, false, _RET_IP_);\
234 	}								\
235 	EXPORT_SYMBOL(__asan_load##size);				\
236 	__alias(__asan_load##size)					\
237 	void __asan_load##size##_noabort(unsigned long);		\
238 	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
239 	void __asan_store##size(unsigned long addr)			\
240 	{								\
241 		check_memory_region_inline(addr, size, true, _RET_IP_);	\
242 	}								\
243 	EXPORT_SYMBOL(__asan_store##size);				\
244 	__alias(__asan_store##size)					\
245 	void __asan_store##size##_noabort(unsigned long);		\
246 	EXPORT_SYMBOL(__asan_store##size##_noabort)
247 
248 DEFINE_ASAN_LOAD_STORE(1);
249 DEFINE_ASAN_LOAD_STORE(2);
250 DEFINE_ASAN_LOAD_STORE(4);
251 DEFINE_ASAN_LOAD_STORE(8);
252 DEFINE_ASAN_LOAD_STORE(16);
253 
254 void __asan_loadN(unsigned long addr, size_t size)
255 {
256 	check_memory_region(addr, size, false, _RET_IP_);
257 }
258 EXPORT_SYMBOL(__asan_loadN);
259 
260 __alias(__asan_loadN)
261 void __asan_loadN_noabort(unsigned long, size_t);
262 EXPORT_SYMBOL(__asan_loadN_noabort);
263 
264 void __asan_storeN(unsigned long addr, size_t size)
265 {
266 	check_memory_region(addr, size, true, _RET_IP_);
267 }
268 EXPORT_SYMBOL(__asan_storeN);
269 
270 __alias(__asan_storeN)
271 void __asan_storeN_noabort(unsigned long, size_t);
272 EXPORT_SYMBOL(__asan_storeN_noabort);
273 
274 /* to shut up compiler complaints */
275 void __asan_handle_no_return(void) {}
276 EXPORT_SYMBOL(__asan_handle_no_return);
277 
278 /* Emitted by compiler to poison alloca()ed objects. */
279 void __asan_alloca_poison(unsigned long addr, size_t size)
280 {
281 	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
282 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
283 			rounded_up_size;
284 	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
285 
286 	const void *left_redzone = (const void *)(addr -
287 			KASAN_ALLOCA_REDZONE_SIZE);
288 	const void *right_redzone = (const void *)(addr + rounded_up_size);
289 
290 	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
291 
292 	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
293 			      size - rounded_down_size);
294 	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
295 			KASAN_ALLOCA_LEFT);
296 	kasan_poison_shadow(right_redzone,
297 			padding_size + KASAN_ALLOCA_REDZONE_SIZE,
298 			KASAN_ALLOCA_RIGHT);
299 }
300 EXPORT_SYMBOL(__asan_alloca_poison);
301 
302 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
303 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
304 {
305 	if (unlikely(!stack_top || stack_top > stack_bottom))
306 		return;
307 
308 	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
309 }
310 EXPORT_SYMBOL(__asan_allocas_unpoison);
311 
312 /* Emitted by the compiler to [un]poison local variables. */
313 #define DEFINE_ASAN_SET_SHADOW(byte) \
314 	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
315 	{								\
316 		__memset((void *)addr, 0x##byte, size);			\
317 	}								\
318 	EXPORT_SYMBOL(__asan_set_shadow_##byte)
319 
320 DEFINE_ASAN_SET_SHADOW(00);
321 DEFINE_ASAN_SET_SHADOW(f1);
322 DEFINE_ASAN_SET_SHADOW(f2);
323 DEFINE_ASAN_SET_SHADOW(f3);
324 DEFINE_ASAN_SET_SHADOW(f5);
325 DEFINE_ASAN_SET_SHADOW(f8);
326