1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault 4 * handler integration. For more info see Documentation/dev-tools/kfence.rst. 5 * 6 * Copyright (C) 2020, Google LLC. 7 */ 8 9 #ifndef _LINUX_KFENCE_H 10 #define _LINUX_KFENCE_H 11 12 #include <linux/mm.h> 13 #include <linux/types.h> 14 15 #ifdef CONFIG_KFENCE 16 17 #include <linux/atomic.h> 18 #include <linux/static_key.h> 19 20 extern unsigned long kfence_sample_interval; 21 22 /* 23 * We allocate an even number of pages, as it simplifies calculations to map 24 * address to metadata indices; effectively, the very first page serves as an 25 * extended guard page, but otherwise has no special purpose. 26 */ 27 #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) 28 extern char *__kfence_pool; 29 30 DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); 31 extern atomic_t kfence_allocation_gate; 32 33 /** 34 * is_kfence_address() - check if an address belongs to KFENCE pool 35 * @addr: address to check 36 * 37 * Return: true or false depending on whether the address is within the KFENCE 38 * object range. 39 * 40 * KFENCE objects live in a separate page range and are not to be intermixed 41 * with regular heap objects (e.g. KFENCE objects must never be added to the 42 * allocator freelists). Failing to do so may and will result in heap 43 * corruptions, therefore is_kfence_address() must be used to check whether 44 * an object requires specific handling. 45 * 46 * Note: This function may be used in fast-paths, and is performance critical. 47 * Future changes should take this into account; for instance, we want to avoid 48 * introducing another load and therefore need to keep KFENCE_POOL_SIZE a 49 * constant (until immediate patching support is added to the kernel). 50 */ 51 static __always_inline bool is_kfence_address(const void *addr) 52 { 53 /* 54 * The __kfence_pool != NULL check is required to deal with the case 55 * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in 56 * the slow-path after the range-check! 57 */ 58 return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); 59 } 60 61 /** 62 * kfence_alloc_pool() - allocate the KFENCE pool via memblock 63 */ 64 void __init kfence_alloc_pool(void); 65 66 /** 67 * kfence_init() - perform KFENCE initialization at boot time 68 * 69 * Requires that kfence_alloc_pool() was called before. This sets up the 70 * allocation gate timer, and requires that workqueues are available. 71 */ 72 void __init kfence_init(void); 73 74 /** 75 * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects 76 * @s: cache being shut down 77 * 78 * Before shutting down a cache, one must ensure there are no remaining objects 79 * allocated from it. Because KFENCE objects are not referenced from the cache 80 * directly, we need to check them here. 81 * 82 * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does 83 * not return if allocated objects still exist: it prints an error message and 84 * simply aborts destruction of a cache, leaking memory. 85 * 86 * If the only such objects are KFENCE objects, we will not leak the entire 87 * cache, but instead try to provide more useful debug info by making allocated 88 * objects "zombie allocations". Objects may then still be used or freed (which 89 * is handled gracefully), but usage will result in showing KFENCE error reports 90 * which include stack traces to the user of the object, the original allocation 91 * site, and caller to shutdown_cache(). 92 */ 93 void kfence_shutdown_cache(struct kmem_cache *s); 94 95 /* 96 * Allocate a KFENCE object. Allocators must not call this function directly, 97 * use kfence_alloc() instead. 98 */ 99 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); 100 101 /** 102 * kfence_alloc() - allocate a KFENCE object with a low probability 103 * @s: struct kmem_cache with object requirements 104 * @size: exact size of the object to allocate (can be less than @s->size 105 * e.g. for kmalloc caches) 106 * @flags: GFP flags 107 * 108 * Return: 109 * * NULL - must proceed with allocating as usual, 110 * * non-NULL - pointer to a KFENCE object. 111 * 112 * kfence_alloc() should be inserted into the heap allocation fast path, 113 * allowing it to transparently return KFENCE-allocated objects with a low 114 * probability using a static branch (the probability is controlled by the 115 * kfence.sample_interval boot parameter). 116 */ 117 static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) 118 { 119 #if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 120 if (!static_branch_unlikely(&kfence_allocation_key)) 121 return NULL; 122 #else 123 if (!static_branch_likely(&kfence_allocation_key)) 124 return NULL; 125 #endif 126 if (likely(atomic_read(&kfence_allocation_gate))) 127 return NULL; 128 return __kfence_alloc(s, size, flags); 129 } 130 131 /** 132 * kfence_ksize() - get actual amount of memory allocated for a KFENCE object 133 * @addr: pointer to a heap object 134 * 135 * Return: 136 * * 0 - not a KFENCE object, must call __ksize() instead, 137 * * non-0 - this many bytes can be accessed without causing a memory error. 138 * 139 * kfence_ksize() returns the number of bytes requested for a KFENCE object at 140 * allocation time. This number may be less than the object size of the 141 * corresponding struct kmem_cache. 142 */ 143 size_t kfence_ksize(const void *addr); 144 145 /** 146 * kfence_object_start() - find the beginning of a KFENCE object 147 * @addr: address within a KFENCE-allocated object 148 * 149 * Return: address of the beginning of the object. 150 * 151 * SL[AU]B-allocated objects are laid out within a page one by one, so it is 152 * easy to calculate the beginning of an object given a pointer inside it and 153 * the object size. The same is not true for KFENCE, which places a single 154 * object at either end of the page. This helper function is used to find the 155 * beginning of a KFENCE-allocated object. 156 */ 157 void *kfence_object_start(const void *addr); 158 159 /** 160 * __kfence_free() - release a KFENCE heap object to KFENCE pool 161 * @addr: object to be freed 162 * 163 * Requires: is_kfence_address(addr) 164 * 165 * Release a KFENCE object and mark it as freed. 166 */ 167 void __kfence_free(void *addr); 168 169 /** 170 * kfence_free() - try to release an arbitrary heap object to KFENCE pool 171 * @addr: object to be freed 172 * 173 * Return: 174 * * false - object doesn't belong to KFENCE pool and was ignored, 175 * * true - object was released to KFENCE pool. 176 * 177 * Release a KFENCE object and mark it as freed. May be called on any object, 178 * even non-KFENCE objects, to simplify integration of the hooks into the 179 * allocator's free codepath. The allocator must check the return value to 180 * determine if it was a KFENCE object or not. 181 */ 182 static __always_inline __must_check bool kfence_free(void *addr) 183 { 184 if (!is_kfence_address(addr)) 185 return false; 186 __kfence_free(addr); 187 return true; 188 } 189 190 /** 191 * kfence_handle_page_fault() - perform page fault handling for KFENCE pages 192 * @addr: faulting address 193 * @is_write: is access a write 194 * @regs: current struct pt_regs (can be NULL, but shows full stack trace) 195 * 196 * Return: 197 * * false - address outside KFENCE pool, 198 * * true - page fault handled by KFENCE, no additional handling required. 199 * 200 * A page fault inside KFENCE pool indicates a memory error, such as an 201 * out-of-bounds access, a use-after-free or an invalid memory access. In these 202 * cases KFENCE prints an error message and marks the offending page as 203 * present, so that the kernel can proceed. 204 */ 205 bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs); 206 207 #else /* CONFIG_KFENCE */ 208 209 static inline bool is_kfence_address(const void *addr) { return false; } 210 static inline void kfence_alloc_pool(void) { } 211 static inline void kfence_init(void) { } 212 static inline void kfence_shutdown_cache(struct kmem_cache *s) { } 213 static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } 214 static inline size_t kfence_ksize(const void *addr) { return 0; } 215 static inline void *kfence_object_start(const void *addr) { return NULL; } 216 static inline void __kfence_free(void *addr) { } 217 static inline bool __must_check kfence_free(void *addr) { return false; } 218 static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, 219 struct pt_regs *regs) 220 { 221 return false; 222 } 223 224 #endif 225 226 #endif /* _LINUX_KFENCE_H */ 227