xref: /openbmc/linux/include/linux/kfence.h (revision df0e68c1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
4  * handler integration. For more info see Documentation/dev-tools/kfence.rst.
5  *
6  * Copyright (C) 2020, Google LLC.
7  */
8 
9 #ifndef _LINUX_KFENCE_H
10 #define _LINUX_KFENCE_H
11 
12 #include <linux/mm.h>
13 #include <linux/types.h>
14 
15 #ifdef CONFIG_KFENCE
16 
17 #include <linux/atomic.h>
18 #include <linux/static_key.h>
19 
20 /*
21  * We allocate an even number of pages, as it simplifies calculations to map
22  * address to metadata indices; effectively, the very first page serves as an
23  * extended guard page, but otherwise has no special purpose.
24  */
25 #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
26 extern char *__kfence_pool;
27 
28 DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
29 extern atomic_t kfence_allocation_gate;
30 
31 /**
32  * is_kfence_address() - check if an address belongs to KFENCE pool
33  * @addr: address to check
34  *
35  * Return: true or false depending on whether the address is within the KFENCE
36  * object range.
37  *
38  * KFENCE objects live in a separate page range and are not to be intermixed
39  * with regular heap objects (e.g. KFENCE objects must never be added to the
40  * allocator freelists). Failing to do so may and will result in heap
41  * corruptions, therefore is_kfence_address() must be used to check whether
42  * an object requires specific handling.
43  *
44  * Note: This function may be used in fast-paths, and is performance critical.
45  * Future changes should take this into account; for instance, we want to avoid
46  * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
47  * constant (until immediate patching support is added to the kernel).
48  */
49 static __always_inline bool is_kfence_address(const void *addr)
50 {
51 	/*
52 	 * The __kfence_pool != NULL check is required to deal with the case
53 	 * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
54 	 * the slow-path after the range-check!
55 	 */
56 	return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
57 }
58 
59 /**
60  * kfence_alloc_pool() - allocate the KFENCE pool via memblock
61  */
62 void __init kfence_alloc_pool(void);
63 
64 /**
65  * kfence_init() - perform KFENCE initialization at boot time
66  *
67  * Requires that kfence_alloc_pool() was called before. This sets up the
68  * allocation gate timer, and requires that workqueues are available.
69  */
70 void __init kfence_init(void);
71 
72 /**
73  * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
74  * @s: cache being shut down
75  *
76  * Before shutting down a cache, one must ensure there are no remaining objects
77  * allocated from it. Because KFENCE objects are not referenced from the cache
78  * directly, we need to check them here.
79  *
80  * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
81  * not return if allocated objects still exist: it prints an error message and
82  * simply aborts destruction of a cache, leaking memory.
83  *
84  * If the only such objects are KFENCE objects, we will not leak the entire
85  * cache, but instead try to provide more useful debug info by making allocated
86  * objects "zombie allocations". Objects may then still be used or freed (which
87  * is handled gracefully), but usage will result in showing KFENCE error reports
88  * which include stack traces to the user of the object, the original allocation
89  * site, and caller to shutdown_cache().
90  */
91 void kfence_shutdown_cache(struct kmem_cache *s);
92 
93 /*
94  * Allocate a KFENCE object. Allocators must not call this function directly,
95  * use kfence_alloc() instead.
96  */
97 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
98 
99 /**
100  * kfence_alloc() - allocate a KFENCE object with a low probability
101  * @s:     struct kmem_cache with object requirements
102  * @size:  exact size of the object to allocate (can be less than @s->size
103  *         e.g. for kmalloc caches)
104  * @flags: GFP flags
105  *
106  * Return:
107  * * NULL     - must proceed with allocating as usual,
108  * * non-NULL - pointer to a KFENCE object.
109  *
110  * kfence_alloc() should be inserted into the heap allocation fast path,
111  * allowing it to transparently return KFENCE-allocated objects with a low
112  * probability using a static branch (the probability is controlled by the
113  * kfence.sample_interval boot parameter).
114  */
115 static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
116 {
117 #if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
118 	if (!static_branch_unlikely(&kfence_allocation_key))
119 		return NULL;
120 #else
121 	if (!static_branch_likely(&kfence_allocation_key))
122 		return NULL;
123 #endif
124 	if (likely(atomic_read(&kfence_allocation_gate)))
125 		return NULL;
126 	return __kfence_alloc(s, size, flags);
127 }
128 
129 /**
130  * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
131  * @addr: pointer to a heap object
132  *
133  * Return:
134  * * 0     - not a KFENCE object, must call __ksize() instead,
135  * * non-0 - this many bytes can be accessed without causing a memory error.
136  *
137  * kfence_ksize() returns the number of bytes requested for a KFENCE object at
138  * allocation time. This number may be less than the object size of the
139  * corresponding struct kmem_cache.
140  */
141 size_t kfence_ksize(const void *addr);
142 
143 /**
144  * kfence_object_start() - find the beginning of a KFENCE object
145  * @addr: address within a KFENCE-allocated object
146  *
147  * Return: address of the beginning of the object.
148  *
149  * SL[AU]B-allocated objects are laid out within a page one by one, so it is
150  * easy to calculate the beginning of an object given a pointer inside it and
151  * the object size. The same is not true for KFENCE, which places a single
152  * object at either end of the page. This helper function is used to find the
153  * beginning of a KFENCE-allocated object.
154  */
155 void *kfence_object_start(const void *addr);
156 
157 /**
158  * __kfence_free() - release a KFENCE heap object to KFENCE pool
159  * @addr: object to be freed
160  *
161  * Requires: is_kfence_address(addr)
162  *
163  * Release a KFENCE object and mark it as freed.
164  */
165 void __kfence_free(void *addr);
166 
167 /**
168  * kfence_free() - try to release an arbitrary heap object to KFENCE pool
169  * @addr: object to be freed
170  *
171  * Return:
172  * * false - object doesn't belong to KFENCE pool and was ignored,
173  * * true  - object was released to KFENCE pool.
174  *
175  * Release a KFENCE object and mark it as freed. May be called on any object,
176  * even non-KFENCE objects, to simplify integration of the hooks into the
177  * allocator's free codepath. The allocator must check the return value to
178  * determine if it was a KFENCE object or not.
179  */
180 static __always_inline __must_check bool kfence_free(void *addr)
181 {
182 	if (!is_kfence_address(addr))
183 		return false;
184 	__kfence_free(addr);
185 	return true;
186 }
187 
188 /**
189  * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
190  * @addr: faulting address
191  * @is_write: is access a write
192  * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
193  *
194  * Return:
195  * * false - address outside KFENCE pool,
196  * * true  - page fault handled by KFENCE, no additional handling required.
197  *
198  * A page fault inside KFENCE pool indicates a memory error, such as an
199  * out-of-bounds access, a use-after-free or an invalid memory access. In these
200  * cases KFENCE prints an error message and marks the offending page as
201  * present, so that the kernel can proceed.
202  */
203 bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
204 
205 #else /* CONFIG_KFENCE */
206 
207 static inline bool is_kfence_address(const void *addr) { return false; }
208 static inline void kfence_alloc_pool(void) { }
209 static inline void kfence_init(void) { }
210 static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
211 static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
212 static inline size_t kfence_ksize(const void *addr) { return 0; }
213 static inline void *kfence_object_start(const void *addr) { return NULL; }
214 static inline void __kfence_free(void *addr) { }
215 static inline bool __must_check kfence_free(void *addr) { return false; }
216 static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
217 							 struct pt_regs *regs)
218 {
219 	return false;
220 }
221 
222 #endif
223 
224 #endif /* _LINUX_KFENCE_H */
225