xref: /openbmc/linux/mm/kmsan/instrumentation.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN compiler API.
4  *
5  * This file implements __msan_XXX hooks that Clang inserts into the code
6  * compiled with -fsanitize=kernel-memory.
7  * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN
8  * instrumentation works.
9  *
10  * Copyright (C) 2017-2022 Google LLC
11  * Author: Alexander Potapenko <glider@google.com>
12  *
13  */
14 
15 #include "kmsan.h"
16 #include <linux/gfp.h>
17 #include <linux/kmsan_string.h>
18 #include <linux/mm.h>
19 #include <linux/uaccess.h>
20 
21 static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
22 {
23 	if ((u64)addr < TASK_SIZE)
24 		return true;
25 	if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
26 		return true;
27 	return false;
28 }
29 
30 static inline struct shadow_origin_ptr
31 get_shadow_origin_ptr(void *addr, u64 size, bool store)
32 {
33 	unsigned long ua_flags = user_access_save();
34 	struct shadow_origin_ptr ret;
35 
36 	ret = kmsan_get_shadow_origin_ptr(addr, size, store);
37 	user_access_restore(ua_flags);
38 	return ret;
39 }
40 
41 /* Get shadow and origin pointers for a memory load with non-standard size. */
42 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
43 							uintptr_t size)
44 {
45 	return get_shadow_origin_ptr(addr, size, /*store*/ false);
46 }
47 EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);
48 
49 /* Get shadow and origin pointers for a memory store with non-standard size. */
50 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
51 							 uintptr_t size)
52 {
53 	return get_shadow_origin_ptr(addr, size, /*store*/ true);
54 }
55 EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);
56 
57 /*
58  * Declare functions that obtain shadow/origin pointers for loads and stores
59  * with fixed size.
60  */
61 #define DECLARE_METADATA_PTR_GETTER(size)                                  \
62 	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
63 		void *addr)                                                \
64 	{                                                                  \
65 		return get_shadow_origin_ptr(addr, size, /*store*/ false); \
66 	}                                                                  \
67 	EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size);                \
68 	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
69 		void *addr)                                                \
70 	{                                                                  \
71 		return get_shadow_origin_ptr(addr, size, /*store*/ true);  \
72 	}                                                                  \
73 	EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)
74 
75 DECLARE_METADATA_PTR_GETTER(1);
76 DECLARE_METADATA_PTR_GETTER(2);
77 DECLARE_METADATA_PTR_GETTER(4);
78 DECLARE_METADATA_PTR_GETTER(8);
79 
80 /*
81  * Handle a memory store performed by inline assembly. KMSAN conservatively
82  * attempts to unpoison the outputs of asm() directives to prevent false
83  * positives caused by missed stores.
84  */
85 void __msan_instrument_asm_store(void *addr, uintptr_t size)
86 {
87 	unsigned long ua_flags;
88 
89 	if (!kmsan_enabled || kmsan_in_runtime())
90 		return;
91 
92 	ua_flags = user_access_save();
93 	/*
94 	 * Most of the accesses are below 32 bytes. The two exceptions so far
95 	 * are clwb() (64 bytes) and FPU state (512 bytes).
96 	 * It's unlikely that the assembly will touch more than 512 bytes.
97 	 */
98 	if (size > 512) {
99 		WARN_ONCE(1, "assembly store size too big: %ld\n", size);
100 		size = 8;
101 	}
102 	if (is_bad_asm_addr(addr, size, /*is_store*/ true)) {
103 		user_access_restore(ua_flags);
104 		return;
105 	}
106 	kmsan_enter_runtime();
107 	/* Unpoisoning the memory on best effort. */
108 	kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
109 	kmsan_leave_runtime();
110 	user_access_restore(ua_flags);
111 }
112 EXPORT_SYMBOL(__msan_instrument_asm_store);
113 
114 /*
115  * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset
116  * intrinsics with calls to respective __msan_ functions. We use
117  * get_param0_metadata() and set_retval_metadata() to store the shadow/origin
118  * values for the destination argument of these functions and use them for the
119  * functions' return values.
120  */
121 static inline void get_param0_metadata(u64 *shadow,
122 				       depot_stack_handle_t *origin)
123 {
124 	struct kmsan_ctx *ctx = kmsan_get_context();
125 
126 	*shadow = *(u64 *)(ctx->cstate.param_tls);
127 	*origin = ctx->cstate.param_origin_tls[0];
128 }
129 
130 static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin)
131 {
132 	struct kmsan_ctx *ctx = kmsan_get_context();
133 
134 	*(u64 *)(ctx->cstate.retval_tls) = shadow;
135 	ctx->cstate.retval_origin_tls = origin;
136 }
137 
138 /* Handle llvm.memmove intrinsic. */
139 void *__msan_memmove(void *dst, const void *src, uintptr_t n)
140 {
141 	depot_stack_handle_t origin;
142 	void *result;
143 	u64 shadow;
144 
145 	get_param0_metadata(&shadow, &origin);
146 	result = __memmove(dst, src, n);
147 	if (!n)
148 		/* Some people call memmove() with zero length. */
149 		return result;
150 	if (!kmsan_enabled || kmsan_in_runtime())
151 		return result;
152 
153 	kmsan_enter_runtime();
154 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
155 	kmsan_leave_runtime();
156 
157 	set_retval_metadata(shadow, origin);
158 	return result;
159 }
160 EXPORT_SYMBOL(__msan_memmove);
161 
162 /* Handle llvm.memcpy intrinsic. */
163 void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
164 {
165 	depot_stack_handle_t origin;
166 	void *result;
167 	u64 shadow;
168 
169 	get_param0_metadata(&shadow, &origin);
170 	result = __memcpy(dst, src, n);
171 	if (!n)
172 		/* Some people call memcpy() with zero length. */
173 		return result;
174 
175 	if (!kmsan_enabled || kmsan_in_runtime())
176 		return result;
177 
178 	kmsan_enter_runtime();
179 	/* Using memmove instead of memcpy doesn't affect correctness. */
180 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
181 	kmsan_leave_runtime();
182 
183 	set_retval_metadata(shadow, origin);
184 	return result;
185 }
186 EXPORT_SYMBOL(__msan_memcpy);
187 
188 /* Handle llvm.memset intrinsic. */
189 void *__msan_memset(void *dst, int c, uintptr_t n)
190 {
191 	depot_stack_handle_t origin;
192 	void *result;
193 	u64 shadow;
194 
195 	get_param0_metadata(&shadow, &origin);
196 	result = __memset(dst, c, n);
197 	if (!kmsan_enabled || kmsan_in_runtime())
198 		return result;
199 
200 	kmsan_enter_runtime();
201 	/*
202 	 * Clang doesn't pass parameter metadata here, so it is impossible to
203 	 * use shadow of @c to set up the shadow for @dst.
204 	 */
205 	kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
206 	kmsan_leave_runtime();
207 
208 	set_retval_metadata(shadow, origin);
209 	return result;
210 }
211 EXPORT_SYMBOL(__msan_memset);
212 
213 /*
214  * Create a new origin from an old one. This is done when storing an
215  * uninitialized value to memory. When reporting an error, KMSAN unrolls and
216  * prints the whole chain of stores that preceded the use of this value.
217  */
218 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
219 {
220 	depot_stack_handle_t ret = 0;
221 	unsigned long ua_flags;
222 
223 	if (!kmsan_enabled || kmsan_in_runtime())
224 		return ret;
225 
226 	ua_flags = user_access_save();
227 
228 	/* Creating new origins may allocate memory. */
229 	kmsan_enter_runtime();
230 	ret = kmsan_internal_chain_origin(origin);
231 	kmsan_leave_runtime();
232 	user_access_restore(ua_flags);
233 	return ret;
234 }
235 EXPORT_SYMBOL(__msan_chain_origin);
236 
237 /* Poison a local variable when entering a function. */
238 void __msan_poison_alloca(void *address, uintptr_t size, char *descr)
239 {
240 	depot_stack_handle_t handle;
241 	unsigned long entries[4];
242 	unsigned long ua_flags;
243 
244 	if (!kmsan_enabled || kmsan_in_runtime())
245 		return;
246 
247 	ua_flags = user_access_save();
248 	entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
249 	entries[1] = (u64)descr;
250 	entries[2] = (u64)__builtin_return_address(0);
251 	/*
252 	 * With frame pointers enabled, it is possible to quickly fetch the
253 	 * second frame of the caller stack without calling the unwinder.
254 	 * Without them, simply do not bother.
255 	 */
256 	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER))
257 		entries[3] = (u64)__builtin_return_address(1);
258 	else
259 		entries[3] = 0;
260 
261 	/* stack_depot_save() may allocate memory. */
262 	kmsan_enter_runtime();
263 	handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC);
264 	kmsan_leave_runtime();
265 
266 	kmsan_internal_set_shadow_origin(address, size, -1, handle,
267 					 /*checked*/ true);
268 	user_access_restore(ua_flags);
269 }
270 EXPORT_SYMBOL(__msan_poison_alloca);
271 
272 /* Unpoison a local variable. */
273 void __msan_unpoison_alloca(void *address, uintptr_t size)
274 {
275 	if (!kmsan_enabled || kmsan_in_runtime())
276 		return;
277 
278 	kmsan_enter_runtime();
279 	kmsan_internal_unpoison_memory(address, size, /*checked*/ true);
280 	kmsan_leave_runtime();
281 }
282 EXPORT_SYMBOL(__msan_unpoison_alloca);
283 
284 /*
285  * Report that an uninitialized value with the given origin was used in a way
286  * that constituted undefined behavior.
287  */
288 void __msan_warning(u32 origin)
289 {
290 	if (!kmsan_enabled || kmsan_in_runtime())
291 		return;
292 	kmsan_enter_runtime();
293 	kmsan_report(origin, /*address*/ 0, /*size*/ 0,
294 		     /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ 0,
295 		     REASON_ANY);
296 	kmsan_leave_runtime();
297 }
298 EXPORT_SYMBOL(__msan_warning);
299 
300 /*
301  * At the beginning of an instrumented function, obtain the pointer to
302  * `struct kmsan_context_state` holding the metadata for function parameters.
303  */
304 struct kmsan_context_state *__msan_get_context_state(void)
305 {
306 	return &kmsan_get_context()->cstate;
307 }
308 EXPORT_SYMBOL(__msan_get_context_state);
309