xref: /openbmc/linux/include/linux/kfence.h (revision 0ce20dd8)
1*0ce20dd8SAlexander Potapenko /* SPDX-License-Identifier: GPL-2.0 */
2*0ce20dd8SAlexander Potapenko /*
3*0ce20dd8SAlexander Potapenko  * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
4*0ce20dd8SAlexander Potapenko  * handler integration. For more info see Documentation/dev-tools/kfence.rst.
5*0ce20dd8SAlexander Potapenko  *
6*0ce20dd8SAlexander Potapenko  * Copyright (C) 2020, Google LLC.
7*0ce20dd8SAlexander Potapenko  */
8*0ce20dd8SAlexander Potapenko 
9*0ce20dd8SAlexander Potapenko #ifndef _LINUX_KFENCE_H
10*0ce20dd8SAlexander Potapenko #define _LINUX_KFENCE_H
11*0ce20dd8SAlexander Potapenko 
12*0ce20dd8SAlexander Potapenko #include <linux/mm.h>
13*0ce20dd8SAlexander Potapenko #include <linux/types.h>
14*0ce20dd8SAlexander Potapenko 
15*0ce20dd8SAlexander Potapenko #ifdef CONFIG_KFENCE
16*0ce20dd8SAlexander Potapenko 
17*0ce20dd8SAlexander Potapenko /*
18*0ce20dd8SAlexander Potapenko  * We allocate an even number of pages, as it simplifies calculations to map
19*0ce20dd8SAlexander Potapenko  * address to metadata indices; effectively, the very first page serves as an
20*0ce20dd8SAlexander Potapenko  * extended guard page, but otherwise has no special purpose.
21*0ce20dd8SAlexander Potapenko  */
22*0ce20dd8SAlexander Potapenko #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
23*0ce20dd8SAlexander Potapenko extern char *__kfence_pool;
24*0ce20dd8SAlexander Potapenko 
25*0ce20dd8SAlexander Potapenko #ifdef CONFIG_KFENCE_STATIC_KEYS
26*0ce20dd8SAlexander Potapenko #include <linux/static_key.h>
27*0ce20dd8SAlexander Potapenko DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
28*0ce20dd8SAlexander Potapenko #else
29*0ce20dd8SAlexander Potapenko #include <linux/atomic.h>
30*0ce20dd8SAlexander Potapenko extern atomic_t kfence_allocation_gate;
31*0ce20dd8SAlexander Potapenko #endif
32*0ce20dd8SAlexander Potapenko 
33*0ce20dd8SAlexander Potapenko /**
34*0ce20dd8SAlexander Potapenko  * is_kfence_address() - check if an address belongs to KFENCE pool
35*0ce20dd8SAlexander Potapenko  * @addr: address to check
36*0ce20dd8SAlexander Potapenko  *
37*0ce20dd8SAlexander Potapenko  * Return: true or false depending on whether the address is within the KFENCE
38*0ce20dd8SAlexander Potapenko  * object range.
39*0ce20dd8SAlexander Potapenko  *
40*0ce20dd8SAlexander Potapenko  * KFENCE objects live in a separate page range and are not to be intermixed
41*0ce20dd8SAlexander Potapenko  * with regular heap objects (e.g. KFENCE objects must never be added to the
42*0ce20dd8SAlexander Potapenko  * allocator freelists). Failing to do so may and will result in heap
43*0ce20dd8SAlexander Potapenko  * corruptions, therefore is_kfence_address() must be used to check whether
44*0ce20dd8SAlexander Potapenko  * an object requires specific handling.
45*0ce20dd8SAlexander Potapenko  *
46*0ce20dd8SAlexander Potapenko  * Note: This function may be used in fast-paths, and is performance critical.
47*0ce20dd8SAlexander Potapenko  * Future changes should take this into account; for instance, we want to avoid
48*0ce20dd8SAlexander Potapenko  * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
49*0ce20dd8SAlexander Potapenko  * constant (until immediate patching support is added to the kernel).
50*0ce20dd8SAlexander Potapenko  */
51*0ce20dd8SAlexander Potapenko static __always_inline bool is_kfence_address(const void *addr)
52*0ce20dd8SAlexander Potapenko {
53*0ce20dd8SAlexander Potapenko 	/*
54*0ce20dd8SAlexander Potapenko 	 * The non-NULL check is required in case the __kfence_pool pointer was
55*0ce20dd8SAlexander Potapenko 	 * never initialized; keep it in the slow-path after the range-check.
56*0ce20dd8SAlexander Potapenko 	 */
57*0ce20dd8SAlexander Potapenko 	return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
58*0ce20dd8SAlexander Potapenko }
59*0ce20dd8SAlexander Potapenko 
60*0ce20dd8SAlexander Potapenko /**
61*0ce20dd8SAlexander Potapenko  * kfence_alloc_pool() - allocate the KFENCE pool via memblock
62*0ce20dd8SAlexander Potapenko  */
63*0ce20dd8SAlexander Potapenko void __init kfence_alloc_pool(void);
64*0ce20dd8SAlexander Potapenko 
65*0ce20dd8SAlexander Potapenko /**
66*0ce20dd8SAlexander Potapenko  * kfence_init() - perform KFENCE initialization at boot time
67*0ce20dd8SAlexander Potapenko  *
68*0ce20dd8SAlexander Potapenko  * Requires that kfence_alloc_pool() was called before. This sets up the
69*0ce20dd8SAlexander Potapenko  * allocation gate timer, and requires that workqueues are available.
70*0ce20dd8SAlexander Potapenko  */
71*0ce20dd8SAlexander Potapenko void __init kfence_init(void);
72*0ce20dd8SAlexander Potapenko 
73*0ce20dd8SAlexander Potapenko /**
74*0ce20dd8SAlexander Potapenko  * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
75*0ce20dd8SAlexander Potapenko  * @s: cache being shut down
76*0ce20dd8SAlexander Potapenko  *
77*0ce20dd8SAlexander Potapenko  * Before shutting down a cache, one must ensure there are no remaining objects
78*0ce20dd8SAlexander Potapenko  * allocated from it. Because KFENCE objects are not referenced from the cache
79*0ce20dd8SAlexander Potapenko  * directly, we need to check them here.
80*0ce20dd8SAlexander Potapenko  *
81*0ce20dd8SAlexander Potapenko  * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
82*0ce20dd8SAlexander Potapenko  * not return if allocated objects still exist: it prints an error message and
83*0ce20dd8SAlexander Potapenko  * simply aborts destruction of a cache, leaking memory.
84*0ce20dd8SAlexander Potapenko  *
85*0ce20dd8SAlexander Potapenko  * If the only such objects are KFENCE objects, we will not leak the entire
86*0ce20dd8SAlexander Potapenko  * cache, but instead try to provide more useful debug info by making allocated
87*0ce20dd8SAlexander Potapenko  * objects "zombie allocations". Objects may then still be used or freed (which
88*0ce20dd8SAlexander Potapenko  * is handled gracefully), but usage will result in showing KFENCE error reports
89*0ce20dd8SAlexander Potapenko  * which include stack traces to the user of the object, the original allocation
90*0ce20dd8SAlexander Potapenko  * site, and caller to shutdown_cache().
91*0ce20dd8SAlexander Potapenko  */
92*0ce20dd8SAlexander Potapenko void kfence_shutdown_cache(struct kmem_cache *s);
93*0ce20dd8SAlexander Potapenko 
94*0ce20dd8SAlexander Potapenko /*
95*0ce20dd8SAlexander Potapenko  * Allocate a KFENCE object. Allocators must not call this function directly,
96*0ce20dd8SAlexander Potapenko  * use kfence_alloc() instead.
97*0ce20dd8SAlexander Potapenko  */
98*0ce20dd8SAlexander Potapenko void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
99*0ce20dd8SAlexander Potapenko 
100*0ce20dd8SAlexander Potapenko /**
101*0ce20dd8SAlexander Potapenko  * kfence_alloc() - allocate a KFENCE object with a low probability
102*0ce20dd8SAlexander Potapenko  * @s:     struct kmem_cache with object requirements
103*0ce20dd8SAlexander Potapenko  * @size:  exact size of the object to allocate (can be less than @s->size
104*0ce20dd8SAlexander Potapenko  *         e.g. for kmalloc caches)
105*0ce20dd8SAlexander Potapenko  * @flags: GFP flags
106*0ce20dd8SAlexander Potapenko  *
107*0ce20dd8SAlexander Potapenko  * Return:
108*0ce20dd8SAlexander Potapenko  * * NULL     - must proceed with allocating as usual,
109*0ce20dd8SAlexander Potapenko  * * non-NULL - pointer to a KFENCE object.
110*0ce20dd8SAlexander Potapenko  *
111*0ce20dd8SAlexander Potapenko  * kfence_alloc() should be inserted into the heap allocation fast path,
112*0ce20dd8SAlexander Potapenko  * allowing it to transparently return KFENCE-allocated objects with a low
113*0ce20dd8SAlexander Potapenko  * probability using a static branch (the probability is controlled by the
114*0ce20dd8SAlexander Potapenko  * kfence.sample_interval boot parameter).
115*0ce20dd8SAlexander Potapenko  */
116*0ce20dd8SAlexander Potapenko static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
117*0ce20dd8SAlexander Potapenko {
118*0ce20dd8SAlexander Potapenko #ifdef CONFIG_KFENCE_STATIC_KEYS
119*0ce20dd8SAlexander Potapenko 	if (static_branch_unlikely(&kfence_allocation_key))
120*0ce20dd8SAlexander Potapenko #else
121*0ce20dd8SAlexander Potapenko 	if (unlikely(!atomic_read(&kfence_allocation_gate)))
122*0ce20dd8SAlexander Potapenko #endif
123*0ce20dd8SAlexander Potapenko 		return __kfence_alloc(s, size, flags);
124*0ce20dd8SAlexander Potapenko 	return NULL;
125*0ce20dd8SAlexander Potapenko }
126*0ce20dd8SAlexander Potapenko 
127*0ce20dd8SAlexander Potapenko /**
128*0ce20dd8SAlexander Potapenko  * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
129*0ce20dd8SAlexander Potapenko  * @addr: pointer to a heap object
130*0ce20dd8SAlexander Potapenko  *
131*0ce20dd8SAlexander Potapenko  * Return:
132*0ce20dd8SAlexander Potapenko  * * 0     - not a KFENCE object, must call __ksize() instead,
133*0ce20dd8SAlexander Potapenko  * * non-0 - this many bytes can be accessed without causing a memory error.
134*0ce20dd8SAlexander Potapenko  *
135*0ce20dd8SAlexander Potapenko  * kfence_ksize() returns the number of bytes requested for a KFENCE object at
136*0ce20dd8SAlexander Potapenko  * allocation time. This number may be less than the object size of the
137*0ce20dd8SAlexander Potapenko  * corresponding struct kmem_cache.
138*0ce20dd8SAlexander Potapenko  */
139*0ce20dd8SAlexander Potapenko size_t kfence_ksize(const void *addr);
140*0ce20dd8SAlexander Potapenko 
141*0ce20dd8SAlexander Potapenko /**
142*0ce20dd8SAlexander Potapenko  * kfence_object_start() - find the beginning of a KFENCE object
143*0ce20dd8SAlexander Potapenko  * @addr: address within a KFENCE-allocated object
144*0ce20dd8SAlexander Potapenko  *
145*0ce20dd8SAlexander Potapenko  * Return: address of the beginning of the object.
146*0ce20dd8SAlexander Potapenko  *
147*0ce20dd8SAlexander Potapenko  * SL[AU]B-allocated objects are laid out within a page one by one, so it is
148*0ce20dd8SAlexander Potapenko  * easy to calculate the beginning of an object given a pointer inside it and
149*0ce20dd8SAlexander Potapenko  * the object size. The same is not true for KFENCE, which places a single
150*0ce20dd8SAlexander Potapenko  * object at either end of the page. This helper function is used to find the
151*0ce20dd8SAlexander Potapenko  * beginning of a KFENCE-allocated object.
152*0ce20dd8SAlexander Potapenko  */
153*0ce20dd8SAlexander Potapenko void *kfence_object_start(const void *addr);
154*0ce20dd8SAlexander Potapenko 
155*0ce20dd8SAlexander Potapenko /**
156*0ce20dd8SAlexander Potapenko  * __kfence_free() - release a KFENCE heap object to KFENCE pool
157*0ce20dd8SAlexander Potapenko  * @addr: object to be freed
158*0ce20dd8SAlexander Potapenko  *
159*0ce20dd8SAlexander Potapenko  * Requires: is_kfence_address(addr)
160*0ce20dd8SAlexander Potapenko  *
161*0ce20dd8SAlexander Potapenko  * Release a KFENCE object and mark it as freed.
162*0ce20dd8SAlexander Potapenko  */
163*0ce20dd8SAlexander Potapenko void __kfence_free(void *addr);
164*0ce20dd8SAlexander Potapenko 
165*0ce20dd8SAlexander Potapenko /**
166*0ce20dd8SAlexander Potapenko  * kfence_free() - try to release an arbitrary heap object to KFENCE pool
167*0ce20dd8SAlexander Potapenko  * @addr: object to be freed
168*0ce20dd8SAlexander Potapenko  *
169*0ce20dd8SAlexander Potapenko  * Return:
170*0ce20dd8SAlexander Potapenko  * * false - object doesn't belong to KFENCE pool and was ignored,
171*0ce20dd8SAlexander Potapenko  * * true  - object was released to KFENCE pool.
172*0ce20dd8SAlexander Potapenko  *
173*0ce20dd8SAlexander Potapenko  * Release a KFENCE object and mark it as freed. May be called on any object,
174*0ce20dd8SAlexander Potapenko  * even non-KFENCE objects, to simplify integration of the hooks into the
175*0ce20dd8SAlexander Potapenko  * allocator's free codepath. The allocator must check the return value to
176*0ce20dd8SAlexander Potapenko  * determine if it was a KFENCE object or not.
177*0ce20dd8SAlexander Potapenko  */
178*0ce20dd8SAlexander Potapenko static __always_inline __must_check bool kfence_free(void *addr)
179*0ce20dd8SAlexander Potapenko {
180*0ce20dd8SAlexander Potapenko 	if (!is_kfence_address(addr))
181*0ce20dd8SAlexander Potapenko 		return false;
182*0ce20dd8SAlexander Potapenko 	__kfence_free(addr);
183*0ce20dd8SAlexander Potapenko 	return true;
184*0ce20dd8SAlexander Potapenko }
185*0ce20dd8SAlexander Potapenko 
186*0ce20dd8SAlexander Potapenko /**
187*0ce20dd8SAlexander Potapenko  * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
188*0ce20dd8SAlexander Potapenko  * @addr: faulting address
189*0ce20dd8SAlexander Potapenko  *
190*0ce20dd8SAlexander Potapenko  * Return:
191*0ce20dd8SAlexander Potapenko  * * false - address outside KFENCE pool,
192*0ce20dd8SAlexander Potapenko  * * true  - page fault handled by KFENCE, no additional handling required.
193*0ce20dd8SAlexander Potapenko  *
194*0ce20dd8SAlexander Potapenko  * A page fault inside KFENCE pool indicates a memory error, such as an
195*0ce20dd8SAlexander Potapenko  * out-of-bounds access, a use-after-free or an invalid memory access. In these
196*0ce20dd8SAlexander Potapenko  * cases KFENCE prints an error message and marks the offending page as
197*0ce20dd8SAlexander Potapenko  * present, so that the kernel can proceed.
198*0ce20dd8SAlexander Potapenko  */
199*0ce20dd8SAlexander Potapenko bool __must_check kfence_handle_page_fault(unsigned long addr);
200*0ce20dd8SAlexander Potapenko 
201*0ce20dd8SAlexander Potapenko #else /* CONFIG_KFENCE */
202*0ce20dd8SAlexander Potapenko 
203*0ce20dd8SAlexander Potapenko static inline bool is_kfence_address(const void *addr) { return false; }
204*0ce20dd8SAlexander Potapenko static inline void kfence_alloc_pool(void) { }
205*0ce20dd8SAlexander Potapenko static inline void kfence_init(void) { }
206*0ce20dd8SAlexander Potapenko static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
207*0ce20dd8SAlexander Potapenko static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
208*0ce20dd8SAlexander Potapenko static inline size_t kfence_ksize(const void *addr) { return 0; }
209*0ce20dd8SAlexander Potapenko static inline void *kfence_object_start(const void *addr) { return NULL; }
210*0ce20dd8SAlexander Potapenko static inline void __kfence_free(void *addr) { }
211*0ce20dd8SAlexander Potapenko static inline bool __must_check kfence_free(void *addr) { return false; }
212*0ce20dd8SAlexander Potapenko static inline bool __must_check kfence_handle_page_fault(unsigned long addr) { return false; }
213*0ce20dd8SAlexander Potapenko 
214*0ce20dd8SAlexander Potapenko #endif
215*0ce20dd8SAlexander Potapenko 
216*0ce20dd8SAlexander Potapenko #endif /* _LINUX_KFENCE_H */
217