1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_STATIC_CALL_H 3 #define _LINUX_STATIC_CALL_H 4 5 /* 6 * Static call support 7 * 8 * Static calls use code patching to hard-code function pointers into direct 9 * branch instructions. They give the flexibility of function pointers, but 10 * with improved performance. This is especially important for cases where 11 * retpolines would otherwise be used, as retpolines can significantly impact 12 * performance. 13 * 14 * 15 * API overview: 16 * 17 * DECLARE_STATIC_CALL(name, func); 18 * DEFINE_STATIC_CALL(name, func); 19 * DEFINE_STATIC_CALL_NULL(name, typename); 20 * static_call(name)(args...); 21 * static_call_cond(name)(args...); 22 * static_call_update(name, func); 23 * static_call_query(name); 24 * 25 * Usage example: 26 * 27 * # Start with the following functions (with identical prototypes): 28 * int func_a(int arg1, int arg2); 29 * int func_b(int arg1, int arg2); 30 * 31 * # Define a 'my_name' reference, associated with func_a() by default 32 * DEFINE_STATIC_CALL(my_name, func_a); 33 * 34 * # Call func_a() 35 * static_call(my_name)(arg1, arg2); 36 * 37 * # Update 'my_name' to point to func_b() 38 * static_call_update(my_name, &func_b); 39 * 40 * # Call func_b() 41 * static_call(my_name)(arg1, arg2); 42 * 43 * 44 * Implementation details: 45 * 46 * This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL). 47 * Otherwise basic indirect calls are used (with function pointers). 48 * 49 * Each static_call() site calls into a trampoline associated with the name. 50 * The trampoline has a direct branch to the default function. Updates to a 51 * name will modify the trampoline's branch destination. 52 * 53 * If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites 54 * themselves will be patched at runtime to call the functions directly, 55 * rather than calling through the trampoline. This requires objtool or a 56 * compiler plugin to detect all the static_call() sites and annotate them 57 * in the .static_call_sites section. 58 * 59 * 60 * Notes on NULL function pointers: 61 * 62 * Static_call()s support NULL functions, with many of the caveats that 63 * regular function pointers have. 64 * 65 * Clearly calling a NULL function pointer is 'BAD', so too for 66 * static_call()s (although when HAVE_STATIC_CALL it might not be immediately 67 * fatal). A NULL static_call can be the result of: 68 * 69 * DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int)); 70 * 71 * which is equivalent to declaring a NULL function pointer with just a 72 * typename: 73 * 74 * void (*my_func_ptr)(int arg1) = NULL; 75 * 76 * or using static_call_update() with a NULL function. In both cases the 77 * HAVE_STATIC_CALL implementation will patch the trampoline with a RET 78 * instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE 79 * architectures can patch the trampoline call to a NOP. 80 * 81 * In all cases, any argument evaluation is unconditional. Unlike a regular 82 * conditional function pointer call: 83 * 84 * if (my_func_ptr) 85 * my_func_ptr(arg1) 86 * 87 * where the argument evaludation also depends on the pointer value. 88 * 89 * When calling a static_call that can be NULL, use: 90 * 91 * static_call_cond(name)(arg1); 92 * 93 * which will include the required value tests to avoid NULL-pointer 94 * dereferences. 95 * 96 * To query which function is currently set to be called, use: 97 * 98 * func = static_call_query(name); 99 */ 100 101 #include <linux/types.h> 102 #include <linux/cpu.h> 103 #include <linux/static_call_types.h> 104 105 #ifdef CONFIG_HAVE_STATIC_CALL 106 #include <asm/static_call.h> 107 108 /* 109 * Either @site or @tramp can be NULL. 110 */ 111 extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail); 112 113 #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name) 114 115 #else 116 #define STATIC_CALL_TRAMP_ADDR(name) NULL 117 #endif 118 119 #define static_call_update(name, func) \ 120 ({ \ 121 BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ 122 __static_call_update(&STATIC_CALL_KEY(name), \ 123 STATIC_CALL_TRAMP_ADDR(name), func); \ 124 }) 125 126 #define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func)) 127 128 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 129 130 extern int __init static_call_init(void); 131 132 struct static_call_mod { 133 struct static_call_mod *next; 134 struct module *mod; /* for vmlinux, mod == NULL */ 135 struct static_call_site *sites; 136 }; 137 138 /* For finding the key associated with a trampoline */ 139 struct static_call_tramp_key { 140 s32 tramp; 141 s32 key; 142 }; 143 144 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func); 145 extern int static_call_mod_init(struct module *mod); 146 extern int static_call_text_reserved(void *start, void *end); 147 148 extern long __static_call_return0(void); 149 150 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 151 DECLARE_STATIC_CALL(name, _func); \ 152 struct static_call_key STATIC_CALL_KEY(name) = { \ 153 .func = _func_init, \ 154 .type = 1, \ 155 }; \ 156 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) 157 158 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 159 DECLARE_STATIC_CALL(name, _func); \ 160 struct static_call_key STATIC_CALL_KEY(name) = { \ 161 .func = NULL, \ 162 .type = 1, \ 163 }; \ 164 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 165 166 #define static_call_cond(name) (void)__static_call(name) 167 168 #define EXPORT_STATIC_CALL(name) \ 169 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ 170 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) 171 #define EXPORT_STATIC_CALL_GPL(name) \ 172 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ 173 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) 174 175 /* Leave the key unexported, so modules can't change static call targets: */ 176 #define EXPORT_STATIC_CALL_TRAMP(name) \ 177 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \ 178 ARCH_ADD_TRAMP_KEY(name) 179 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \ 180 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \ 181 ARCH_ADD_TRAMP_KEY(name) 182 183 #elif defined(CONFIG_HAVE_STATIC_CALL) 184 185 static inline int static_call_init(void) { return 0; } 186 187 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 188 DECLARE_STATIC_CALL(name, _func); \ 189 struct static_call_key STATIC_CALL_KEY(name) = { \ 190 .func = _func_init, \ 191 }; \ 192 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) 193 194 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 195 DECLARE_STATIC_CALL(name, _func); \ 196 struct static_call_key STATIC_CALL_KEY(name) = { \ 197 .func = NULL, \ 198 }; \ 199 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 200 201 202 #define static_call_cond(name) (void)__static_call(name) 203 204 static inline 205 void __static_call_update(struct static_call_key *key, void *tramp, void *func) 206 { 207 cpus_read_lock(); 208 WRITE_ONCE(key->func, func); 209 arch_static_call_transform(NULL, tramp, func, false); 210 cpus_read_unlock(); 211 } 212 213 static inline int static_call_text_reserved(void *start, void *end) 214 { 215 return 0; 216 } 217 218 static inline long __static_call_return0(void) 219 { 220 return 0; 221 } 222 223 #define EXPORT_STATIC_CALL(name) \ 224 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ 225 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) 226 #define EXPORT_STATIC_CALL_GPL(name) \ 227 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ 228 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) 229 230 /* Leave the key unexported, so modules can't change static call targets: */ 231 #define EXPORT_STATIC_CALL_TRAMP(name) \ 232 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) 233 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \ 234 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) 235 236 #else /* Generic implementation */ 237 238 static inline int static_call_init(void) { return 0; } 239 240 static inline long __static_call_return0(void) 241 { 242 return 0; 243 } 244 245 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 246 DECLARE_STATIC_CALL(name, _func); \ 247 struct static_call_key STATIC_CALL_KEY(name) = { \ 248 .func = _func_init, \ 249 } 250 251 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 252 DECLARE_STATIC_CALL(name, _func); \ 253 struct static_call_key STATIC_CALL_KEY(name) = { \ 254 .func = NULL, \ 255 } 256 257 static inline void __static_call_nop(void) { } 258 259 /* 260 * This horrific hack takes care of two things: 261 * 262 * - it ensures the compiler will only load the function pointer ONCE, 263 * which avoids a reload race. 264 * 265 * - it ensures the argument evaluation is unconditional, similar 266 * to the HAVE_STATIC_CALL variant. 267 * 268 * Sadly current GCC/Clang (10 for both) do not optimize this properly 269 * and will emit an indirect call for the NULL case :-( 270 */ 271 #define __static_call_cond(name) \ 272 ({ \ 273 void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \ 274 if (!func) \ 275 func = &__static_call_nop; \ 276 (typeof(STATIC_CALL_TRAMP(name))*)func; \ 277 }) 278 279 #define static_call_cond(name) (void)__static_call_cond(name) 280 281 static inline 282 void __static_call_update(struct static_call_key *key, void *tramp, void *func) 283 { 284 WRITE_ONCE(key->func, func); 285 } 286 287 static inline int static_call_text_reserved(void *start, void *end) 288 { 289 return 0; 290 } 291 292 #define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name)) 293 #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)) 294 295 #endif /* CONFIG_HAVE_STATIC_CALL */ 296 297 #define DEFINE_STATIC_CALL(name, _func) \ 298 __DEFINE_STATIC_CALL(name, _func, _func) 299 300 #define DEFINE_STATIC_CALL_RET0(name, _func) \ 301 __DEFINE_STATIC_CALL(name, _func, __static_call_return0) 302 303 #endif /* _LINUX_STATIC_CALL_H */ 304