1f80be457SAlexander Potapenko /* SPDX-License-Identifier: GPL-2.0 */
2f80be457SAlexander Potapenko /*
3f80be457SAlexander Potapenko * Functions used by the KMSAN runtime.
4f80be457SAlexander Potapenko *
5f80be457SAlexander Potapenko * Copyright (C) 2017-2022 Google LLC
6f80be457SAlexander Potapenko * Author: Alexander Potapenko <glider@google.com>
7f80be457SAlexander Potapenko *
8f80be457SAlexander Potapenko */
9f80be457SAlexander Potapenko
10f80be457SAlexander Potapenko #ifndef __MM_KMSAN_KMSAN_H
11f80be457SAlexander Potapenko #define __MM_KMSAN_KMSAN_H
12f80be457SAlexander Potapenko
13f80be457SAlexander Potapenko #include <asm/pgtable_64_types.h>
14f80be457SAlexander Potapenko #include <linux/irqflags.h>
15f80be457SAlexander Potapenko #include <linux/sched.h>
16f80be457SAlexander Potapenko #include <linux/stackdepot.h>
17f80be457SAlexander Potapenko #include <linux/stacktrace.h>
18f80be457SAlexander Potapenko #include <linux/nmi.h>
19f80be457SAlexander Potapenko #include <linux/mm.h>
20f80be457SAlexander Potapenko #include <linux/printk.h>
21f80be457SAlexander Potapenko
22f80be457SAlexander Potapenko #define KMSAN_ALLOCA_MAGIC_ORIGIN 0xabcd0100
23f80be457SAlexander Potapenko #define KMSAN_CHAIN_MAGIC_ORIGIN 0xabcd0200
24f80be457SAlexander Potapenko
25f80be457SAlexander Potapenko #define KMSAN_POISON_NOCHECK 0x0
26f80be457SAlexander Potapenko #define KMSAN_POISON_CHECK 0x1
27f80be457SAlexander Potapenko #define KMSAN_POISON_FREE 0x2
28f80be457SAlexander Potapenko
29f80be457SAlexander Potapenko #define KMSAN_ORIGIN_SIZE 4
30f80be457SAlexander Potapenko #define KMSAN_MAX_ORIGIN_DEPTH 7
31f80be457SAlexander Potapenko
32f80be457SAlexander Potapenko #define KMSAN_STACK_DEPTH 64
33f80be457SAlexander Potapenko
34f80be457SAlexander Potapenko #define KMSAN_META_SHADOW (false)
35f80be457SAlexander Potapenko #define KMSAN_META_ORIGIN (true)
36f80be457SAlexander Potapenko
37f80be457SAlexander Potapenko extern bool kmsan_enabled;
38f80be457SAlexander Potapenko extern int panic_on_kmsan;
39f80be457SAlexander Potapenko
40f80be457SAlexander Potapenko /*
41f80be457SAlexander Potapenko * KMSAN performs a lot of consistency checks that are currently enabled by
42f80be457SAlexander Potapenko * default. BUG_ON is normally discouraged in the kernel, unless used for
43f80be457SAlexander Potapenko * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
44f80be457SAlexander Potapenko * recover if something goes wrong.
45f80be457SAlexander Potapenko */
46f80be457SAlexander Potapenko #define KMSAN_WARN_ON(cond) \
47f80be457SAlexander Potapenko ({ \
48f80be457SAlexander Potapenko const bool __cond = WARN_ON(cond); \
49f80be457SAlexander Potapenko if (unlikely(__cond)) { \
50f80be457SAlexander Potapenko WRITE_ONCE(kmsan_enabled, false); \
51f80be457SAlexander Potapenko if (panic_on_kmsan) { \
52f80be457SAlexander Potapenko /* Can't call panic() here because */ \
53f80be457SAlexander Potapenko /* of uaccess checks. */ \
54f80be457SAlexander Potapenko BUG(); \
55f80be457SAlexander Potapenko } \
56f80be457SAlexander Potapenko } \
57f80be457SAlexander Potapenko __cond; \
58f80be457SAlexander Potapenko })
59f80be457SAlexander Potapenko
60f80be457SAlexander Potapenko /*
61f80be457SAlexander Potapenko * A pair of metadata pointers to be returned by the instrumentation functions.
62f80be457SAlexander Potapenko */
63f80be457SAlexander Potapenko struct shadow_origin_ptr {
64f80be457SAlexander Potapenko void *shadow, *origin;
65f80be457SAlexander Potapenko };
66f80be457SAlexander Potapenko
67f80be457SAlexander Potapenko struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *addr, u64 size,
68f80be457SAlexander Potapenko bool store);
69f80be457SAlexander Potapenko void *kmsan_get_metadata(void *addr, bool is_origin);
703c206509SAlexander Potapenko void __init kmsan_init_alloc_meta_for_range(void *start, void *end);
71f80be457SAlexander Potapenko
72f80be457SAlexander Potapenko enum kmsan_bug_reason {
73f80be457SAlexander Potapenko REASON_ANY,
74f80be457SAlexander Potapenko REASON_COPY_TO_USER,
75f80be457SAlexander Potapenko REASON_SUBMIT_URB,
76f80be457SAlexander Potapenko };
77f80be457SAlexander Potapenko
78f80be457SAlexander Potapenko void kmsan_print_origin(depot_stack_handle_t origin);
79f80be457SAlexander Potapenko
80f80be457SAlexander Potapenko /**
81f80be457SAlexander Potapenko * kmsan_report() - Report a use of uninitialized value.
82f80be457SAlexander Potapenko * @origin: Stack ID of the uninitialized value.
83f80be457SAlexander Potapenko * @address: Address at which the memory access happens.
84f80be457SAlexander Potapenko * @size: Memory access size.
85f80be457SAlexander Potapenko * @off_first: Offset (from @address) of the first byte to be reported.
86f80be457SAlexander Potapenko * @off_last: Offset (from @address) of the last byte to be reported.
87f80be457SAlexander Potapenko * @user_addr: When non-NULL, denotes the userspace address to which the kernel
88f80be457SAlexander Potapenko * is leaking data.
89f80be457SAlexander Potapenko * @reason: Error type from enum kmsan_bug_reason.
90f80be457SAlexander Potapenko *
91f80be457SAlexander Potapenko * kmsan_report() prints an error message for a consequent group of bytes
92f80be457SAlexander Potapenko * sharing the same origin. If an uninitialized value is used in a comparison,
93f80be457SAlexander Potapenko * this function is called once without specifying the addresses. When checking
94f80be457SAlexander Potapenko * a memory range, KMSAN may call kmsan_report() multiple times with the same
95f80be457SAlexander Potapenko * @address, @size, @user_addr and @reason, but different @off_first and
96f80be457SAlexander Potapenko * @off_last corresponding to different @origin values.
97f80be457SAlexander Potapenko */
98f80be457SAlexander Potapenko void kmsan_report(depot_stack_handle_t origin, void *address, int size,
99f80be457SAlexander Potapenko int off_first, int off_last, const void *user_addr,
100f80be457SAlexander Potapenko enum kmsan_bug_reason reason);
101f80be457SAlexander Potapenko
102f80be457SAlexander Potapenko DECLARE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
103f80be457SAlexander Potapenko
kmsan_get_context(void)104f80be457SAlexander Potapenko static __always_inline struct kmsan_ctx *kmsan_get_context(void)
105f80be457SAlexander Potapenko {
106f80be457SAlexander Potapenko return in_task() ? ¤t->kmsan_ctx : raw_cpu_ptr(&kmsan_percpu_ctx);
107f80be457SAlexander Potapenko }
108f80be457SAlexander Potapenko
109f80be457SAlexander Potapenko /*
110f80be457SAlexander Potapenko * When a compiler hook or KMSAN runtime function is invoked, it may make a
111f80be457SAlexander Potapenko * call to instrumented code and eventually call itself recursively. To avoid
112f80be457SAlexander Potapenko * that, we guard the runtime entry regions with
113f80be457SAlexander Potapenko * kmsan_enter_runtime()/kmsan_leave_runtime() and exit the hook if
114f80be457SAlexander Potapenko * kmsan_in_runtime() is true.
115f80be457SAlexander Potapenko *
116f80be457SAlexander Potapenko * Non-runtime code may occasionally get executed in nested IRQs from the
117f80be457SAlexander Potapenko * runtime code (e.g. when called via smp_call_function_single()). Because some
118f80be457SAlexander Potapenko * KMSAN routines may take locks (e.g. for memory allocation), we conservatively
119f80be457SAlexander Potapenko * bail out instead of calling them. To minimize the effect of this (potentially
120f80be457SAlexander Potapenko * missing initialization events) kmsan_in_runtime() is not checked in
121f80be457SAlexander Potapenko * non-blocking runtime functions.
122f80be457SAlexander Potapenko */
kmsan_in_runtime(void)123f80be457SAlexander Potapenko static __always_inline bool kmsan_in_runtime(void)
124f80be457SAlexander Potapenko {
125f80be457SAlexander Potapenko if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
126f80be457SAlexander Potapenko return true;
127*cbadaf71SAlexander Potapenko if (in_nmi())
128*cbadaf71SAlexander Potapenko return true;
129f80be457SAlexander Potapenko return kmsan_get_context()->kmsan_in_runtime;
130f80be457SAlexander Potapenko }
131f80be457SAlexander Potapenko
kmsan_enter_runtime(void)132f80be457SAlexander Potapenko static __always_inline void kmsan_enter_runtime(void)
133f80be457SAlexander Potapenko {
134f80be457SAlexander Potapenko struct kmsan_ctx *ctx;
135f80be457SAlexander Potapenko
136f80be457SAlexander Potapenko ctx = kmsan_get_context();
137f80be457SAlexander Potapenko KMSAN_WARN_ON(ctx->kmsan_in_runtime++);
138f80be457SAlexander Potapenko }
139f80be457SAlexander Potapenko
kmsan_leave_runtime(void)140f80be457SAlexander Potapenko static __always_inline void kmsan_leave_runtime(void)
141f80be457SAlexander Potapenko {
142f80be457SAlexander Potapenko struct kmsan_ctx *ctx = kmsan_get_context();
143f80be457SAlexander Potapenko
144f80be457SAlexander Potapenko KMSAN_WARN_ON(--ctx->kmsan_in_runtime);
145f80be457SAlexander Potapenko }
146f80be457SAlexander Potapenko
147f80be457SAlexander Potapenko depot_stack_handle_t kmsan_save_stack(void);
148f80be457SAlexander Potapenko depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
149f80be457SAlexander Potapenko unsigned int extra_bits);
150f80be457SAlexander Potapenko
151f80be457SAlexander Potapenko /*
152f80be457SAlexander Potapenko * Pack and unpack the origin chain depth and UAF flag to/from the extra bits
153f80be457SAlexander Potapenko * provided by the stack depot.
154f80be457SAlexander Potapenko * The UAF flag is stored in the lowest bit, followed by the depth in the upper
155f80be457SAlexander Potapenko * bits.
156f80be457SAlexander Potapenko * set_dsh_extra_bits() is responsible for clamping the value.
157f80be457SAlexander Potapenko */
kmsan_extra_bits(unsigned int depth,bool uaf)158f80be457SAlexander Potapenko static __always_inline unsigned int kmsan_extra_bits(unsigned int depth,
159f80be457SAlexander Potapenko bool uaf)
160f80be457SAlexander Potapenko {
161f80be457SAlexander Potapenko return (depth << 1) | uaf;
162f80be457SAlexander Potapenko }
163f80be457SAlexander Potapenko
kmsan_uaf_from_eb(unsigned int extra_bits)164f80be457SAlexander Potapenko static __always_inline bool kmsan_uaf_from_eb(unsigned int extra_bits)
165f80be457SAlexander Potapenko {
166f80be457SAlexander Potapenko return extra_bits & 1;
167f80be457SAlexander Potapenko }
168f80be457SAlexander Potapenko
kmsan_depth_from_eb(unsigned int extra_bits)169f80be457SAlexander Potapenko static __always_inline unsigned int kmsan_depth_from_eb(unsigned int extra_bits)
170f80be457SAlexander Potapenko {
171f80be457SAlexander Potapenko return extra_bits >> 1;
172f80be457SAlexander Potapenko }
173f80be457SAlexander Potapenko
174f80be457SAlexander Potapenko /*
175f80be457SAlexander Potapenko * kmsan_internal_ functions are supposed to be very simple and not require the
176f80be457SAlexander Potapenko * kmsan_in_runtime() checks.
177f80be457SAlexander Potapenko */
178f80be457SAlexander Potapenko void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n);
179f80be457SAlexander Potapenko void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
180f80be457SAlexander Potapenko unsigned int poison_flags);
181f80be457SAlexander Potapenko void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked);
182f80be457SAlexander Potapenko void kmsan_internal_set_shadow_origin(void *address, size_t size, int b,
183f80be457SAlexander Potapenko u32 origin, bool checked);
184f80be457SAlexander Potapenko depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id);
185f80be457SAlexander Potapenko
18650b5e49cSAlexander Potapenko void kmsan_internal_task_create(struct task_struct *task);
18750b5e49cSAlexander Potapenko
188f80be457SAlexander Potapenko bool kmsan_metadata_is_contiguous(void *addr, size_t size);
189f80be457SAlexander Potapenko void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
190f80be457SAlexander Potapenko int reason);
191f80be457SAlexander Potapenko
192f80be457SAlexander Potapenko struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
1933c206509SAlexander Potapenko void kmsan_setup_meta(struct page *page, struct page *shadow,
1943c206509SAlexander Potapenko struct page *origin, int order);
195f80be457SAlexander Potapenko
196f80be457SAlexander Potapenko /*
197f80be457SAlexander Potapenko * kmsan_internal_is_module_addr() and kmsan_internal_is_vmalloc_addr() are
198f80be457SAlexander Potapenko * non-instrumented versions of is_module_address() and is_vmalloc_addr() that
199f80be457SAlexander Potapenko * are safe to call from KMSAN runtime without recursion.
200f80be457SAlexander Potapenko */
kmsan_internal_is_module_addr(void * vaddr)201f80be457SAlexander Potapenko static inline bool kmsan_internal_is_module_addr(void *vaddr)
202f80be457SAlexander Potapenko {
203f80be457SAlexander Potapenko return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END);
204f80be457SAlexander Potapenko }
205f80be457SAlexander Potapenko
kmsan_internal_is_vmalloc_addr(void * addr)206f80be457SAlexander Potapenko static inline bool kmsan_internal_is_vmalloc_addr(void *addr)
207f80be457SAlexander Potapenko {
208f80be457SAlexander Potapenko return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END);
209f80be457SAlexander Potapenko }
210f80be457SAlexander Potapenko
211f80be457SAlexander Potapenko #endif /* __MM_KMSAN_KMSAN_H */
212