1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_STATIC_CALL_H 3 #define _LINUX_STATIC_CALL_H 4 5 /* 6 * Static call support 7 * 8 * Static calls use code patching to hard-code function pointers into direct 9 * branch instructions. They give the flexibility of function pointers, but 10 * with improved performance. This is especially important for cases where 11 * retpolines would otherwise be used, as retpolines can significantly impact 12 * performance. 13 * 14 * 15 * API overview: 16 * 17 * DECLARE_STATIC_CALL(name, func); 18 * DEFINE_STATIC_CALL(name, func); 19 * DEFINE_STATIC_CALL_NULL(name, typename); 20 * static_call(name)(args...); 21 * static_call_cond(name)(args...); 22 * static_call_update(name, func); 23 * 24 * Usage example: 25 * 26 * # Start with the following functions (with identical prototypes): 27 * int func_a(int arg1, int arg2); 28 * int func_b(int arg1, int arg2); 29 * 30 * # Define a 'my_name' reference, associated with func_a() by default 31 * DEFINE_STATIC_CALL(my_name, func_a); 32 * 33 * # Call func_a() 34 * static_call(my_name)(arg1, arg2); 35 * 36 * # Update 'my_name' to point to func_b() 37 * static_call_update(my_name, &func_b); 38 * 39 * # Call func_b() 40 * static_call(my_name)(arg1, arg2); 41 * 42 * 43 * Implementation details: 44 * 45 * This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL). 46 * Otherwise basic indirect calls are used (with function pointers). 47 * 48 * Each static_call() site calls into a trampoline associated with the name. 49 * The trampoline has a direct branch to the default function. Updates to a 50 * name will modify the trampoline's branch destination. 51 * 52 * If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites 53 * themselves will be patched at runtime to call the functions directly, 54 * rather than calling through the trampoline. This requires objtool or a 55 * compiler plugin to detect all the static_call() sites and annotate them 56 * in the .static_call_sites section. 57 * 58 * 59 * Notes on NULL function pointers: 60 * 61 * Static_call()s support NULL functions, with many of the caveats that 62 * regular function pointers have. 63 * 64 * Clearly calling a NULL function pointer is 'BAD', so too for 65 * static_call()s (although when HAVE_STATIC_CALL it might not be immediately 66 * fatal). A NULL static_call can be the result of: 67 * 68 * DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int)); 69 * 70 * which is equivalent to declaring a NULL function pointer with just a 71 * typename: 72 * 73 * void (*my_func_ptr)(int arg1) = NULL; 74 * 75 * or using static_call_update() with a NULL function. In both cases the 76 * HAVE_STATIC_CALL implementation will patch the trampoline with a RET 77 * instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE 78 * architectures can patch the trampoline call to a NOP. 79 * 80 * In all cases, any argument evaluation is unconditional. Unlike a regular 81 * conditional function pointer call: 82 * 83 * if (my_func_ptr) 84 * my_func_ptr(arg1) 85 * 86 * where the argument evaludation also depends on the pointer value. 87 * 88 * When calling a static_call that can be NULL, use: 89 * 90 * static_call_cond(name)(arg1); 91 * 92 * which will include the required value tests to avoid NULL-pointer 93 * dereferences. 94 */ 95 96 #include <linux/types.h> 97 #include <linux/cpu.h> 98 #include <linux/static_call_types.h> 99 100 #ifdef CONFIG_HAVE_STATIC_CALL 101 #include <asm/static_call.h> 102 103 /* 104 * Either @site or @tramp can be NULL. 105 */ 106 extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail); 107 108 #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name) 109 110 #else 111 #define STATIC_CALL_TRAMP_ADDR(name) NULL 112 #endif 113 114 #define static_call_update(name, func) \ 115 ({ \ 116 BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ 117 __static_call_update(&STATIC_CALL_KEY(name), \ 118 STATIC_CALL_TRAMP_ADDR(name), func); \ 119 }) 120 121 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 122 123 extern int __init static_call_init(void); 124 125 struct static_call_mod { 126 struct static_call_mod *next; 127 struct module *mod; /* for vmlinux, mod == NULL */ 128 struct static_call_site *sites; 129 }; 130 131 struct static_call_key { 132 void *func; 133 union { 134 /* bit 0: 0 = mods, 1 = sites */ 135 unsigned long type; 136 struct static_call_mod *mods; 137 struct static_call_site *sites; 138 }; 139 }; 140 141 /* For finding the key associated with a trampoline */ 142 struct static_call_tramp_key { 143 s32 tramp; 144 s32 key; 145 }; 146 147 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func); 148 extern int static_call_mod_init(struct module *mod); 149 extern int static_call_text_reserved(void *start, void *end); 150 151 extern long __static_call_return0(void); 152 153 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 154 DECLARE_STATIC_CALL(name, _func); \ 155 struct static_call_key STATIC_CALL_KEY(name) = { \ 156 .func = _func_init, \ 157 .type = 1, \ 158 }; \ 159 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) 160 161 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 162 DECLARE_STATIC_CALL(name, _func); \ 163 struct static_call_key STATIC_CALL_KEY(name) = { \ 164 .func = NULL, \ 165 .type = 1, \ 166 }; \ 167 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 168 169 #define static_call_cond(name) (void)__static_call(name) 170 171 #define EXPORT_STATIC_CALL(name) \ 172 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ 173 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) 174 #define EXPORT_STATIC_CALL_GPL(name) \ 175 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ 176 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) 177 178 /* Leave the key unexported, so modules can't change static call targets: */ 179 #define EXPORT_STATIC_CALL_TRAMP(name) \ 180 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \ 181 ARCH_ADD_TRAMP_KEY(name) 182 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \ 183 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \ 184 ARCH_ADD_TRAMP_KEY(name) 185 186 #elif defined(CONFIG_HAVE_STATIC_CALL) 187 188 static inline int static_call_init(void) { return 0; } 189 190 struct static_call_key { 191 void *func; 192 }; 193 194 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 195 DECLARE_STATIC_CALL(name, _func); \ 196 struct static_call_key STATIC_CALL_KEY(name) = { \ 197 .func = _func_init, \ 198 }; \ 199 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) 200 201 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 202 DECLARE_STATIC_CALL(name, _func); \ 203 struct static_call_key STATIC_CALL_KEY(name) = { \ 204 .func = NULL, \ 205 }; \ 206 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 207 208 #define static_call_cond(name) (void)__static_call(name) 209 210 static inline 211 void __static_call_update(struct static_call_key *key, void *tramp, void *func) 212 { 213 cpus_read_lock(); 214 WRITE_ONCE(key->func, func); 215 arch_static_call_transform(NULL, tramp, func, false); 216 cpus_read_unlock(); 217 } 218 219 static inline int static_call_text_reserved(void *start, void *end) 220 { 221 return 0; 222 } 223 224 static inline long __static_call_return0(void) 225 { 226 return 0; 227 } 228 229 #define EXPORT_STATIC_CALL(name) \ 230 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ 231 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) 232 #define EXPORT_STATIC_CALL_GPL(name) \ 233 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ 234 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) 235 236 /* Leave the key unexported, so modules can't change static call targets: */ 237 #define EXPORT_STATIC_CALL_TRAMP(name) \ 238 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) 239 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \ 240 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) 241 242 #else /* Generic implementation */ 243 244 static inline int static_call_init(void) { return 0; } 245 246 struct static_call_key { 247 void *func; 248 }; 249 250 static inline long __static_call_return0(void) 251 { 252 return 0; 253 } 254 255 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 256 DECLARE_STATIC_CALL(name, _func); \ 257 struct static_call_key STATIC_CALL_KEY(name) = { \ 258 .func = _func_init, \ 259 } 260 261 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 262 DECLARE_STATIC_CALL(name, _func); \ 263 struct static_call_key STATIC_CALL_KEY(name) = { \ 264 .func = NULL, \ 265 } 266 267 static inline void __static_call_nop(void) { } 268 269 /* 270 * This horrific hack takes care of two things: 271 * 272 * - it ensures the compiler will only load the function pointer ONCE, 273 * which avoids a reload race. 274 * 275 * - it ensures the argument evaluation is unconditional, similar 276 * to the HAVE_STATIC_CALL variant. 277 * 278 * Sadly current GCC/Clang (10 for both) do not optimize this properly 279 * and will emit an indirect call for the NULL case :-( 280 */ 281 #define __static_call_cond(name) \ 282 ({ \ 283 void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \ 284 if (!func) \ 285 func = &__static_call_nop; \ 286 (typeof(STATIC_CALL_TRAMP(name))*)func; \ 287 }) 288 289 #define static_call_cond(name) (void)__static_call_cond(name) 290 291 static inline 292 void __static_call_update(struct static_call_key *key, void *tramp, void *func) 293 { 294 WRITE_ONCE(key->func, func); 295 } 296 297 static inline int static_call_text_reserved(void *start, void *end) 298 { 299 return 0; 300 } 301 302 #define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name)) 303 #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)) 304 305 #endif /* CONFIG_HAVE_STATIC_CALL */ 306 307 #define DEFINE_STATIC_CALL(name, _func) \ 308 __DEFINE_STATIC_CALL(name, _func, _func) 309 310 #define DEFINE_STATIC_CALL_RET0(name, _func) \ 311 __DEFINE_STATIC_CALL(name, _func, __static_call_return0) 312 313 #endif /* _LINUX_STATIC_CALL_H */ 314