1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KMSAN compiler API. 4 * 5 * This file implements __msan_XXX hooks that Clang inserts into the code 6 * compiled with -fsanitize=kernel-memory. 7 * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN 8 * instrumentation works. 9 * 10 * Copyright (C) 2017-2022 Google LLC 11 * Author: Alexander Potapenko <glider@google.com> 12 * 13 */ 14 15 #include "kmsan.h" 16 #include <linux/gfp.h> 17 #include <linux/kmsan_string.h> 18 #include <linux/mm.h> 19 #include <linux/uaccess.h> 20 21 static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store) 22 { 23 if ((u64)addr < TASK_SIZE) 24 return true; 25 if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW)) 26 return true; 27 return false; 28 } 29 30 static inline struct shadow_origin_ptr 31 get_shadow_origin_ptr(void *addr, u64 size, bool store) 32 { 33 unsigned long ua_flags = user_access_save(); 34 struct shadow_origin_ptr ret; 35 36 ret = kmsan_get_shadow_origin_ptr(addr, size, store); 37 user_access_restore(ua_flags); 38 return ret; 39 } 40 41 /* Get shadow and origin pointers for a memory load with non-standard size. */ 42 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr, 43 uintptr_t size) 44 { 45 return get_shadow_origin_ptr(addr, size, /*store*/ false); 46 } 47 EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n); 48 49 /* Get shadow and origin pointers for a memory store with non-standard size. */ 50 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr, 51 uintptr_t size) 52 { 53 return get_shadow_origin_ptr(addr, size, /*store*/ true); 54 } 55 EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n); 56 57 /* 58 * Declare functions that obtain shadow/origin pointers for loads and stores 59 * with fixed size. 60 */ 61 #define DECLARE_METADATA_PTR_GETTER(size) \ 62 struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size( \ 63 void *addr) \ 64 { \ 65 return get_shadow_origin_ptr(addr, size, /*store*/ false); \ 66 } \ 67 EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size); \ 68 struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size( \ 69 void *addr) \ 70 { \ 71 return get_shadow_origin_ptr(addr, size, /*store*/ true); \ 72 } \ 73 EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size) 74 75 DECLARE_METADATA_PTR_GETTER(1); 76 DECLARE_METADATA_PTR_GETTER(2); 77 DECLARE_METADATA_PTR_GETTER(4); 78 DECLARE_METADATA_PTR_GETTER(8); 79 80 /* 81 * Handle a memory store performed by inline assembly. KMSAN conservatively 82 * attempts to unpoison the outputs of asm() directives to prevent false 83 * positives caused by missed stores. 84 * 85 * __msan_instrument_asm_store() may be called for inline assembly code when 86 * entering or leaving IRQ. We omit the check for kmsan_in_runtime() to ensure 87 * the memory written to in these cases is also marked as initialized. 88 */ 89 void __msan_instrument_asm_store(void *addr, uintptr_t size) 90 { 91 unsigned long ua_flags; 92 93 if (!kmsan_enabled) 94 return; 95 96 ua_flags = user_access_save(); 97 /* 98 * Most of the accesses are below 32 bytes. The two exceptions so far 99 * are clwb() (64 bytes) and FPU state (512 bytes). 100 * It's unlikely that the assembly will touch more than 512 bytes. 101 */ 102 if (size > 512) { 103 WARN_ONCE(1, "assembly store size too big: %ld\n", size); 104 size = 8; 105 } 106 if (is_bad_asm_addr(addr, size, /*is_store*/ true)) { 107 user_access_restore(ua_flags); 108 return; 109 } 110 /* Unpoisoning the memory on best effort. */ 111 kmsan_internal_unpoison_memory(addr, size, /*checked*/ false); 112 user_access_restore(ua_flags); 113 } 114 EXPORT_SYMBOL(__msan_instrument_asm_store); 115 116 /* 117 * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset 118 * intrinsics with calls to respective __msan_ functions. We use 119 * get_param0_metadata() and set_retval_metadata() to store the shadow/origin 120 * values for the destination argument of these functions and use them for the 121 * functions' return values. 122 */ 123 static inline void get_param0_metadata(u64 *shadow, 124 depot_stack_handle_t *origin) 125 { 126 struct kmsan_ctx *ctx = kmsan_get_context(); 127 128 *shadow = *(u64 *)(ctx->cstate.param_tls); 129 *origin = ctx->cstate.param_origin_tls[0]; 130 } 131 132 static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin) 133 { 134 struct kmsan_ctx *ctx = kmsan_get_context(); 135 136 *(u64 *)(ctx->cstate.retval_tls) = shadow; 137 ctx->cstate.retval_origin_tls = origin; 138 } 139 140 /* Handle llvm.memmove intrinsic. */ 141 void *__msan_memmove(void *dst, const void *src, uintptr_t n) 142 { 143 depot_stack_handle_t origin; 144 void *result; 145 u64 shadow; 146 147 get_param0_metadata(&shadow, &origin); 148 result = __memmove(dst, src, n); 149 if (!n) 150 /* Some people call memmove() with zero length. */ 151 return result; 152 if (!kmsan_enabled || kmsan_in_runtime()) 153 return result; 154 155 kmsan_enter_runtime(); 156 kmsan_internal_memmove_metadata(dst, (void *)src, n); 157 kmsan_leave_runtime(); 158 159 set_retval_metadata(shadow, origin); 160 return result; 161 } 162 EXPORT_SYMBOL(__msan_memmove); 163 164 /* Handle llvm.memcpy intrinsic. */ 165 void *__msan_memcpy(void *dst, const void *src, uintptr_t n) 166 { 167 depot_stack_handle_t origin; 168 void *result; 169 u64 shadow; 170 171 get_param0_metadata(&shadow, &origin); 172 result = __memcpy(dst, src, n); 173 if (!n) 174 /* Some people call memcpy() with zero length. */ 175 return result; 176 177 if (!kmsan_enabled || kmsan_in_runtime()) 178 return result; 179 180 kmsan_enter_runtime(); 181 /* Using memmove instead of memcpy doesn't affect correctness. */ 182 kmsan_internal_memmove_metadata(dst, (void *)src, n); 183 kmsan_leave_runtime(); 184 185 set_retval_metadata(shadow, origin); 186 return result; 187 } 188 EXPORT_SYMBOL(__msan_memcpy); 189 190 /* Handle llvm.memset intrinsic. */ 191 void *__msan_memset(void *dst, int c, uintptr_t n) 192 { 193 depot_stack_handle_t origin; 194 void *result; 195 u64 shadow; 196 197 get_param0_metadata(&shadow, &origin); 198 result = __memset(dst, c, n); 199 if (!kmsan_enabled || kmsan_in_runtime()) 200 return result; 201 202 kmsan_enter_runtime(); 203 /* 204 * Clang doesn't pass parameter metadata here, so it is impossible to 205 * use shadow of @c to set up the shadow for @dst. 206 */ 207 kmsan_internal_unpoison_memory(dst, n, /*checked*/ false); 208 kmsan_leave_runtime(); 209 210 set_retval_metadata(shadow, origin); 211 return result; 212 } 213 EXPORT_SYMBOL(__msan_memset); 214 215 /* 216 * Create a new origin from an old one. This is done when storing an 217 * uninitialized value to memory. When reporting an error, KMSAN unrolls and 218 * prints the whole chain of stores that preceded the use of this value. 219 */ 220 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin) 221 { 222 depot_stack_handle_t ret = 0; 223 unsigned long ua_flags; 224 225 if (!kmsan_enabled || kmsan_in_runtime()) 226 return ret; 227 228 ua_flags = user_access_save(); 229 230 /* Creating new origins may allocate memory. */ 231 kmsan_enter_runtime(); 232 ret = kmsan_internal_chain_origin(origin); 233 kmsan_leave_runtime(); 234 user_access_restore(ua_flags); 235 return ret; 236 } 237 EXPORT_SYMBOL(__msan_chain_origin); 238 239 /* Poison a local variable when entering a function. */ 240 void __msan_poison_alloca(void *address, uintptr_t size, char *descr) 241 { 242 depot_stack_handle_t handle; 243 unsigned long entries[4]; 244 unsigned long ua_flags; 245 246 if (!kmsan_enabled || kmsan_in_runtime()) 247 return; 248 249 ua_flags = user_access_save(); 250 entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN; 251 entries[1] = (u64)descr; 252 entries[2] = (u64)__builtin_return_address(0); 253 /* 254 * With frame pointers enabled, it is possible to quickly fetch the 255 * second frame of the caller stack without calling the unwinder. 256 * Without them, simply do not bother. 257 */ 258 if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) 259 entries[3] = (u64)__builtin_return_address(1); 260 else 261 entries[3] = 0; 262 263 /* stack_depot_save() may allocate memory. */ 264 kmsan_enter_runtime(); 265 handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC); 266 kmsan_leave_runtime(); 267 268 kmsan_internal_set_shadow_origin(address, size, -1, handle, 269 /*checked*/ true); 270 user_access_restore(ua_flags); 271 } 272 EXPORT_SYMBOL(__msan_poison_alloca); 273 274 /* Unpoison a local variable. */ 275 void __msan_unpoison_alloca(void *address, uintptr_t size) 276 { 277 if (!kmsan_enabled || kmsan_in_runtime()) 278 return; 279 280 kmsan_enter_runtime(); 281 kmsan_internal_unpoison_memory(address, size, /*checked*/ true); 282 kmsan_leave_runtime(); 283 } 284 EXPORT_SYMBOL(__msan_unpoison_alloca); 285 286 /* 287 * Report that an uninitialized value with the given origin was used in a way 288 * that constituted undefined behavior. 289 */ 290 void __msan_warning(u32 origin) 291 { 292 if (!kmsan_enabled || kmsan_in_runtime()) 293 return; 294 kmsan_enter_runtime(); 295 kmsan_report(origin, /*address*/ 0, /*size*/ 0, 296 /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ 0, 297 REASON_ANY); 298 kmsan_leave_runtime(); 299 } 300 EXPORT_SYMBOL(__msan_warning); 301 302 /* 303 * At the beginning of an instrumented function, obtain the pointer to 304 * `struct kmsan_context_state` holding the metadata for function parameters. 305 */ 306 struct kmsan_context_state *__msan_get_context_state(void) 307 { 308 return &kmsan_get_context()->cstate; 309 } 310 EXPORT_SYMBOL(__msan_get_context_state); 311