1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_STATIC_CALL_H
3 #define _LINUX_STATIC_CALL_H
4
5 /*
6 * Static call support
7 *
8 * Static calls use code patching to hard-code function pointers into direct
9 * branch instructions. They give the flexibility of function pointers, but
10 * with improved performance. This is especially important for cases where
11 * retpolines would otherwise be used, as retpolines can significantly impact
12 * performance.
13 *
14 *
15 * API overview:
16 *
17 * DECLARE_STATIC_CALL(name, func);
18 * DEFINE_STATIC_CALL(name, func);
19 * DEFINE_STATIC_CALL_NULL(name, typename);
20 * DEFINE_STATIC_CALL_RET0(name, typename);
21 *
22 * __static_call_return0;
23 *
24 * static_call(name)(args...);
25 * static_call_cond(name)(args...);
26 * static_call_update(name, func);
27 * static_call_query(name);
28 *
29 * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
30 *
31 * Usage example:
32 *
33 * # Start with the following functions (with identical prototypes):
34 * int func_a(int arg1, int arg2);
35 * int func_b(int arg1, int arg2);
36 *
37 * # Define a 'my_name' reference, associated with func_a() by default
38 * DEFINE_STATIC_CALL(my_name, func_a);
39 *
40 * # Call func_a()
41 * static_call(my_name)(arg1, arg2);
42 *
43 * # Update 'my_name' to point to func_b()
44 * static_call_update(my_name, &func_b);
45 *
46 * # Call func_b()
47 * static_call(my_name)(arg1, arg2);
48 *
49 *
50 * Implementation details:
51 *
52 * This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
53 * Otherwise basic indirect calls are used (with function pointers).
54 *
55 * Each static_call() site calls into a trampoline associated with the name.
56 * The trampoline has a direct branch to the default function. Updates to a
57 * name will modify the trampoline's branch destination.
58 *
59 * If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
60 * themselves will be patched at runtime to call the functions directly,
61 * rather than calling through the trampoline. This requires objtool or a
62 * compiler plugin to detect all the static_call() sites and annotate them
63 * in the .static_call_sites section.
64 *
65 *
66 * Notes on NULL function pointers:
67 *
68 * Static_call()s support NULL functions, with many of the caveats that
69 * regular function pointers have.
70 *
71 * Clearly calling a NULL function pointer is 'BAD', so too for
72 * static_call()s (although when HAVE_STATIC_CALL it might not be immediately
73 * fatal). A NULL static_call can be the result of:
74 *
75 * DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
76 *
77 * which is equivalent to declaring a NULL function pointer with just a
78 * typename:
79 *
80 * void (*my_func_ptr)(int arg1) = NULL;
81 *
82 * or using static_call_update() with a NULL function. In both cases the
83 * HAVE_STATIC_CALL implementation will patch the trampoline with a RET
84 * instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
85 * architectures can patch the trampoline call to a NOP.
86 *
87 * In all cases, any argument evaluation is unconditional. Unlike a regular
88 * conditional function pointer call:
89 *
90 * if (my_func_ptr)
91 * my_func_ptr(arg1)
92 *
93 * where the argument evaludation also depends on the pointer value.
94 *
95 * When calling a static_call that can be NULL, use:
96 *
97 * static_call_cond(name)(arg1);
98 *
99 * which will include the required value tests to avoid NULL-pointer
100 * dereferences.
101 *
102 * To query which function is currently set to be called, use:
103 *
104 * func = static_call_query(name);
105 *
106 *
107 * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
108 *
109 * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
110 * conditional void function call, DEFINE_STATIC_CALL_RET0 /
111 * __static_call_return0 optimize the do nothing return 0 function.
112 *
113 * This feature is strictly UB per the C standard (since it casts a function
114 * pointer to a different signature) and relies on the architecture ABI to
115 * make things work. In particular it relies on Caller Stack-cleanup and the
116 * whole return register being clobbered for short return values. All normal
117 * CDECL style ABIs conform.
118 *
119 * In particular the x86_64 implementation replaces the 5 byte CALL
120 * instruction at the callsite with a 5 byte clear of the RAX register,
121 * completely eliding any function call overhead.
122 *
123 * Notably argument setup is unconditional.
124 *
125 *
126 * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
127 *
128 * The difference is that the _TRAMP variant tries to only export the
129 * trampoline with the result that a module can use static_call{,_cond}() but
130 * not static_call_update().
131 *
132 */
133
134 #include <linux/types.h>
135 #include <linux/cpu.h>
136 #include <linux/static_call_types.h>
137
138 #ifdef CONFIG_HAVE_STATIC_CALL
139 #include <asm/static_call.h>
140
141 /*
142 * Either @site or @tramp can be NULL.
143 */
144 extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
145
146 #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
147
148 #else
149 #define STATIC_CALL_TRAMP_ADDR(name) NULL
150 #endif
151
152 #define static_call_update(name, func) \
153 ({ \
154 typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \
155 __static_call_update(&STATIC_CALL_KEY(name), \
156 STATIC_CALL_TRAMP_ADDR(name), __F); \
157 })
158
159 #define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
160
161 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
162
163 extern int static_call_initialized;
164
165 extern int __init static_call_init(void);
166
167 extern void static_call_force_reinit(void);
168
169 struct static_call_mod {
170 struct static_call_mod *next;
171 struct module *mod; /* for vmlinux, mod == NULL */
172 struct static_call_site *sites;
173 };
174
175 /* For finding the key associated with a trampoline */
176 struct static_call_tramp_key {
177 s32 tramp;
178 s32 key;
179 };
180
181 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
182 extern int static_call_mod_init(struct module *mod);
183 extern int static_call_text_reserved(void *start, void *end);
184
185 extern long __static_call_return0(void);
186
187 #define DEFINE_STATIC_CALL(name, _func) \
188 DECLARE_STATIC_CALL(name, _func); \
189 struct static_call_key STATIC_CALL_KEY(name) = { \
190 .func = _func, \
191 .type = 1, \
192 }; \
193 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
194
195 #define DEFINE_STATIC_CALL_NULL(name, _func) \
196 DECLARE_STATIC_CALL(name, _func); \
197 struct static_call_key STATIC_CALL_KEY(name) = { \
198 .func = NULL, \
199 .type = 1, \
200 }; \
201 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
202
203 #define DEFINE_STATIC_CALL_RET0(name, _func) \
204 DECLARE_STATIC_CALL(name, _func); \
205 struct static_call_key STATIC_CALL_KEY(name) = { \
206 .func = __static_call_return0, \
207 .type = 1, \
208 }; \
209 ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
210
211 #define static_call_cond(name) (void)__static_call(name)
212
213 #define EXPORT_STATIC_CALL(name) \
214 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
215 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
216 #define EXPORT_STATIC_CALL_GPL(name) \
217 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
218 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
219
220 /* Leave the key unexported, so modules can't change static call targets: */
221 #define EXPORT_STATIC_CALL_TRAMP(name) \
222 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
223 ARCH_ADD_TRAMP_KEY(name)
224 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
225 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
226 ARCH_ADD_TRAMP_KEY(name)
227
228 #elif defined(CONFIG_HAVE_STATIC_CALL)
229
230 #define static_call_initialized 0
231
static_call_init(void)232 static inline int static_call_init(void) { return 0; }
233
234 #define DEFINE_STATIC_CALL(name, _func) \
235 DECLARE_STATIC_CALL(name, _func); \
236 struct static_call_key STATIC_CALL_KEY(name) = { \
237 .func = _func, \
238 }; \
239 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
240
241 #define DEFINE_STATIC_CALL_NULL(name, _func) \
242 DECLARE_STATIC_CALL(name, _func); \
243 struct static_call_key STATIC_CALL_KEY(name) = { \
244 .func = NULL, \
245 }; \
246 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
247
248 #define DEFINE_STATIC_CALL_RET0(name, _func) \
249 DECLARE_STATIC_CALL(name, _func); \
250 struct static_call_key STATIC_CALL_KEY(name) = { \
251 .func = __static_call_return0, \
252 }; \
253 ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
254
255 #define static_call_cond(name) (void)__static_call(name)
256
257 static inline
__static_call_update(struct static_call_key * key,void * tramp,void * func)258 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
259 {
260 cpus_read_lock();
261 WRITE_ONCE(key->func, func);
262 arch_static_call_transform(NULL, tramp, func, false);
263 cpus_read_unlock();
264 }
265
static_call_text_reserved(void * start,void * end)266 static inline int static_call_text_reserved(void *start, void *end)
267 {
268 return 0;
269 }
270
271 extern long __static_call_return0(void);
272
273 #define EXPORT_STATIC_CALL(name) \
274 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
275 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
276 #define EXPORT_STATIC_CALL_GPL(name) \
277 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
278 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
279
280 /* Leave the key unexported, so modules can't change static call targets: */
281 #define EXPORT_STATIC_CALL_TRAMP(name) \
282 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
283 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
284 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
285
286 #else /* Generic implementation */
287
288 #define static_call_initialized 0
289
static_call_init(void)290 static inline int static_call_init(void) { return 0; }
291
__static_call_return0(void)292 static inline long __static_call_return0(void)
293 {
294 return 0;
295 }
296
297 #define __DEFINE_STATIC_CALL(name, _func, _func_init) \
298 DECLARE_STATIC_CALL(name, _func); \
299 struct static_call_key STATIC_CALL_KEY(name) = { \
300 .func = _func_init, \
301 }
302
303 #define DEFINE_STATIC_CALL(name, _func) \
304 __DEFINE_STATIC_CALL(name, _func, _func)
305
306 #define DEFINE_STATIC_CALL_NULL(name, _func) \
307 __DEFINE_STATIC_CALL(name, _func, NULL)
308
309 #define DEFINE_STATIC_CALL_RET0(name, _func) \
310 __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
311
__static_call_nop(void)312 static inline void __static_call_nop(void) { }
313
314 /*
315 * This horrific hack takes care of two things:
316 *
317 * - it ensures the compiler will only load the function pointer ONCE,
318 * which avoids a reload race.
319 *
320 * - it ensures the argument evaluation is unconditional, similar
321 * to the HAVE_STATIC_CALL variant.
322 *
323 * Sadly current GCC/Clang (10 for both) do not optimize this properly
324 * and will emit an indirect call for the NULL case :-(
325 */
326 #define __static_call_cond(name) \
327 ({ \
328 void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
329 if (!func) \
330 func = &__static_call_nop; \
331 (typeof(STATIC_CALL_TRAMP(name))*)func; \
332 })
333
334 #define static_call_cond(name) (void)__static_call_cond(name)
335
336 static inline
__static_call_update(struct static_call_key * key,void * tramp,void * func)337 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
338 {
339 WRITE_ONCE(key->func, func);
340 }
341
static_call_text_reserved(void * start,void * end)342 static inline int static_call_text_reserved(void *start, void *end)
343 {
344 return 0;
345 }
346
347 #define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
348 #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
349
350 #endif /* CONFIG_HAVE_STATIC_CALL */
351
352 #endif /* _LINUX_STATIC_CALL_H */
353