xref: /openbmc/linux/mm/kmsan/kmsan.h (revision c0605cd6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Functions used by the KMSAN runtime.
4  *
5  * Copyright (C) 2017-2022 Google LLC
6  * Author: Alexander Potapenko <glider@google.com>
7  *
8  */
9 
10 #ifndef __MM_KMSAN_KMSAN_H
11 #define __MM_KMSAN_KMSAN_H
12 
13 #include <asm/pgtable_64_types.h>
14 #include <linux/irqflags.h>
15 #include <linux/sched.h>
16 #include <linux/stackdepot.h>
17 #include <linux/stacktrace.h>
18 #include <linux/nmi.h>
19 #include <linux/mm.h>
20 #include <linux/printk.h>
21 
22 #define KMSAN_ALLOCA_MAGIC_ORIGIN 0xabcd0100
23 #define KMSAN_CHAIN_MAGIC_ORIGIN 0xabcd0200
24 
25 #define KMSAN_POISON_NOCHECK 0x0
26 #define KMSAN_POISON_CHECK 0x1
27 #define KMSAN_POISON_FREE 0x2
28 
29 #define KMSAN_ORIGIN_SIZE 4
30 #define KMSAN_MAX_ORIGIN_DEPTH 7
31 
32 #define KMSAN_STACK_DEPTH 64
33 
34 #define KMSAN_META_SHADOW (false)
35 #define KMSAN_META_ORIGIN (true)
36 
37 extern bool kmsan_enabled;
38 extern int panic_on_kmsan;
39 
40 /*
41  * KMSAN performs a lot of consistency checks that are currently enabled by
42  * default. BUG_ON is normally discouraged in the kernel, unless used for
43  * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
44  * recover if something goes wrong.
45  */
46 #define KMSAN_WARN_ON(cond)                                           \
47 	({                                                            \
48 		const bool __cond = WARN_ON(cond);                    \
49 		if (unlikely(__cond)) {                               \
50 			WRITE_ONCE(kmsan_enabled, false);             \
51 			if (panic_on_kmsan) {                         \
52 				/* Can't call panic() here because */ \
53 				/* of uaccess checks. */              \
54 				BUG();                                \
55 			}                                             \
56 		}                                                     \
57 		__cond;                                               \
58 	})
59 
60 /*
61  * A pair of metadata pointers to be returned by the instrumentation functions.
62  */
63 struct shadow_origin_ptr {
64 	void *shadow, *origin;
65 };
66 
67 struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *addr, u64 size,
68 						     bool store);
69 void *kmsan_get_metadata(void *addr, bool is_origin);
70 void __init kmsan_init_alloc_meta_for_range(void *start, void *end);
71 
72 enum kmsan_bug_reason {
73 	REASON_ANY,
74 	REASON_COPY_TO_USER,
75 	REASON_SUBMIT_URB,
76 };
77 
78 void kmsan_print_origin(depot_stack_handle_t origin);
79 
80 /**
81  * kmsan_report() - Report a use of uninitialized value.
82  * @origin:    Stack ID of the uninitialized value.
83  * @address:   Address at which the memory access happens.
84  * @size:      Memory access size.
85  * @off_first: Offset (from @address) of the first byte to be reported.
86  * @off_last:  Offset (from @address) of the last byte to be reported.
87  * @user_addr: When non-NULL, denotes the userspace address to which the kernel
88  *             is leaking data.
89  * @reason:    Error type from enum kmsan_bug_reason.
90  *
91  * kmsan_report() prints an error message for a consequent group of bytes
92  * sharing the same origin. If an uninitialized value is used in a comparison,
93  * this function is called once without specifying the addresses. When checking
94  * a memory range, KMSAN may call kmsan_report() multiple times with the same
95  * @address, @size, @user_addr and @reason, but different @off_first and
96  * @off_last corresponding to different @origin values.
97  */
98 void kmsan_report(depot_stack_handle_t origin, void *address, int size,
99 		  int off_first, int off_last, const void *user_addr,
100 		  enum kmsan_bug_reason reason);
101 
102 DECLARE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
103 
104 static __always_inline struct kmsan_ctx *kmsan_get_context(void)
105 {
106 	return in_task() ? &current->kmsan_ctx : raw_cpu_ptr(&kmsan_percpu_ctx);
107 }
108 
109 /*
110  * When a compiler hook or KMSAN runtime function is invoked, it may make a
111  * call to instrumented code and eventually call itself recursively. To avoid
112  * that, we guard the runtime entry regions with
113  * kmsan_enter_runtime()/kmsan_leave_runtime() and exit the hook if
114  * kmsan_in_runtime() is true.
115  *
116  * Non-runtime code may occasionally get executed in nested IRQs from the
117  * runtime code (e.g. when called via smp_call_function_single()). Because some
118  * KMSAN routines may take locks (e.g. for memory allocation), we conservatively
119  * bail out instead of calling them. To minimize the effect of this (potentially
120  * missing initialization events) kmsan_in_runtime() is not checked in
121  * non-blocking runtime functions.
122  */
123 static __always_inline bool kmsan_in_runtime(void)
124 {
125 	if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
126 		return true;
127 	return kmsan_get_context()->kmsan_in_runtime;
128 }
129 
130 static __always_inline void kmsan_enter_runtime(void)
131 {
132 	struct kmsan_ctx *ctx;
133 
134 	ctx = kmsan_get_context();
135 	KMSAN_WARN_ON(ctx->kmsan_in_runtime++);
136 }
137 
138 static __always_inline void kmsan_leave_runtime(void)
139 {
140 	struct kmsan_ctx *ctx = kmsan_get_context();
141 
142 	KMSAN_WARN_ON(--ctx->kmsan_in_runtime);
143 }
144 
145 depot_stack_handle_t kmsan_save_stack(void);
146 depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
147 						 unsigned int extra_bits);
148 
149 /*
150  * Pack and unpack the origin chain depth and UAF flag to/from the extra bits
151  * provided by the stack depot.
152  * The UAF flag is stored in the lowest bit, followed by the depth in the upper
153  * bits.
154  * set_dsh_extra_bits() is responsible for clamping the value.
155  */
156 static __always_inline unsigned int kmsan_extra_bits(unsigned int depth,
157 						     bool uaf)
158 {
159 	return (depth << 1) | uaf;
160 }
161 
162 static __always_inline bool kmsan_uaf_from_eb(unsigned int extra_bits)
163 {
164 	return extra_bits & 1;
165 }
166 
167 static __always_inline unsigned int kmsan_depth_from_eb(unsigned int extra_bits)
168 {
169 	return extra_bits >> 1;
170 }
171 
172 /*
173  * kmsan_internal_ functions are supposed to be very simple and not require the
174  * kmsan_in_runtime() checks.
175  */
176 void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n);
177 void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
178 				  unsigned int poison_flags);
179 void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked);
180 void kmsan_internal_set_shadow_origin(void *address, size_t size, int b,
181 				      u32 origin, bool checked);
182 depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id);
183 
184 void kmsan_internal_task_create(struct task_struct *task);
185 
186 bool kmsan_metadata_is_contiguous(void *addr, size_t size);
187 void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
188 				 int reason);
189 
190 struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
191 void kmsan_setup_meta(struct page *page, struct page *shadow,
192 		      struct page *origin, int order);
193 
194 /*
195  * kmsan_internal_is_module_addr() and kmsan_internal_is_vmalloc_addr() are
196  * non-instrumented versions of is_module_address() and is_vmalloc_addr() that
197  * are safe to call from KMSAN runtime without recursion.
198  */
199 static inline bool kmsan_internal_is_module_addr(void *vaddr)
200 {
201 	return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END);
202 }
203 
204 static inline bool kmsan_internal_is_vmalloc_addr(void *addr)
205 {
206 	return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END);
207 }
208 
209 #endif /* __MM_KMSAN_KMSAN_H */
210