xref: /openbmc/linux/tools/lib/bpf/bpf_tracing.h (revision 6562c9ac)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 #ifndef __BPF_TRACING_H__
3 #define __BPF_TRACING_H__
4 
5 #include <bpf/bpf_helpers.h>
6 
7 /* Scan the ARCH passed in from ARCH env variable (see Makefile) */
8 #if defined(__TARGET_ARCH_x86)
9 	#define bpf_target_x86
10 	#define bpf_target_defined
11 #elif defined(__TARGET_ARCH_s390)
12 	#define bpf_target_s390
13 	#define bpf_target_defined
14 #elif defined(__TARGET_ARCH_arm)
15 	#define bpf_target_arm
16 	#define bpf_target_defined
17 #elif defined(__TARGET_ARCH_arm64)
18 	#define bpf_target_arm64
19 	#define bpf_target_defined
20 #elif defined(__TARGET_ARCH_mips)
21 	#define bpf_target_mips
22 	#define bpf_target_defined
23 #elif defined(__TARGET_ARCH_powerpc)
24 	#define bpf_target_powerpc
25 	#define bpf_target_defined
26 #elif defined(__TARGET_ARCH_sparc)
27 	#define bpf_target_sparc
28 	#define bpf_target_defined
29 #elif defined(__TARGET_ARCH_riscv)
30 	#define bpf_target_riscv
31 	#define bpf_target_defined
32 #elif defined(__TARGET_ARCH_arc)
33 	#define bpf_target_arc
34 	#define bpf_target_defined
35 #else
36 
37 /* Fall back to what the compiler says */
38 #if defined(__x86_64__)
39 	#define bpf_target_x86
40 	#define bpf_target_defined
41 #elif defined(__s390__)
42 	#define bpf_target_s390
43 	#define bpf_target_defined
44 #elif defined(__arm__)
45 	#define bpf_target_arm
46 	#define bpf_target_defined
47 #elif defined(__aarch64__)
48 	#define bpf_target_arm64
49 	#define bpf_target_defined
50 #elif defined(__mips__)
51 	#define bpf_target_mips
52 	#define bpf_target_defined
53 #elif defined(__powerpc__)
54 	#define bpf_target_powerpc
55 	#define bpf_target_defined
56 #elif defined(__sparc__)
57 	#define bpf_target_sparc
58 	#define bpf_target_defined
59 #elif defined(__riscv) && __riscv_xlen == 64
60 	#define bpf_target_riscv
61 	#define bpf_target_defined
62 #elif defined(__arc__)
63 	#define bpf_target_arc
64 	#define bpf_target_defined
65 #endif /* no compiler target */
66 
67 #endif
68 
69 #ifndef __BPF_TARGET_MISSING
70 #define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
71 #endif
72 
73 #if defined(bpf_target_x86)
74 
75 #if defined(__KERNEL__) || defined(__VMLINUX_H__)
76 
77 #define __PT_PARM1_REG di
78 #define __PT_PARM2_REG si
79 #define __PT_PARM3_REG dx
80 #define __PT_PARM4_REG cx
81 #define __PT_PARM5_REG r8
82 #define __PT_RET_REG sp
83 #define __PT_FP_REG bp
84 #define __PT_RC_REG ax
85 #define __PT_SP_REG sp
86 #define __PT_IP_REG ip
87 /* syscall uses r10 for PARM4 */
88 #define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
89 #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
90 
91 #else
92 
93 #ifdef __i386__
94 
95 #define __PT_PARM1_REG eax
96 #define __PT_PARM2_REG edx
97 #define __PT_PARM3_REG ecx
98 /* i386 kernel is built with -mregparm=3 */
99 #define __PT_PARM4_REG __unsupported__
100 #define __PT_PARM5_REG __unsupported__
101 #define __PT_RET_REG esp
102 #define __PT_FP_REG ebp
103 #define __PT_RC_REG eax
104 #define __PT_SP_REG esp
105 #define __PT_IP_REG eip
106 
107 #else /* __i386__ */
108 
109 #define __PT_PARM1_REG rdi
110 #define __PT_PARM2_REG rsi
111 #define __PT_PARM3_REG rdx
112 #define __PT_PARM4_REG rcx
113 #define __PT_PARM5_REG r8
114 #define __PT_RET_REG rsp
115 #define __PT_FP_REG rbp
116 #define __PT_RC_REG rax
117 #define __PT_SP_REG rsp
118 #define __PT_IP_REG rip
119 /* syscall uses r10 for PARM4 */
120 #define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
121 #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
122 
123 #endif /* __i386__ */
124 
125 #endif /* __KERNEL__ || __VMLINUX_H__ */
126 
127 #elif defined(bpf_target_s390)
128 
129 struct pt_regs___s390 {
130 	unsigned long orig_gpr2;
131 };
132 
133 /* s390 provides user_pt_regs instead of struct pt_regs to userspace */
134 #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
135 #define __PT_PARM1_REG gprs[2]
136 #define __PT_PARM2_REG gprs[3]
137 #define __PT_PARM3_REG gprs[4]
138 #define __PT_PARM4_REG gprs[5]
139 #define __PT_PARM5_REG gprs[6]
140 #define __PT_RET_REG grps[14]
141 #define __PT_FP_REG gprs[11]	/* Works only with CONFIG_FRAME_POINTER */
142 #define __PT_RC_REG gprs[2]
143 #define __PT_SP_REG gprs[15]
144 #define __PT_IP_REG psw.addr
145 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
146 #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
147 
148 #elif defined(bpf_target_arm)
149 
150 #define __PT_PARM1_REG uregs[0]
151 #define __PT_PARM2_REG uregs[1]
152 #define __PT_PARM3_REG uregs[2]
153 #define __PT_PARM4_REG uregs[3]
154 #define __PT_PARM5_REG uregs[4]
155 #define __PT_RET_REG uregs[14]
156 #define __PT_FP_REG uregs[11]	/* Works only with CONFIG_FRAME_POINTER */
157 #define __PT_RC_REG uregs[0]
158 #define __PT_SP_REG uregs[13]
159 #define __PT_IP_REG uregs[12]
160 
161 #elif defined(bpf_target_arm64)
162 
163 struct pt_regs___arm64 {
164 	unsigned long orig_x0;
165 };
166 
167 /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
168 #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
169 #define __PT_PARM1_REG regs[0]
170 #define __PT_PARM2_REG regs[1]
171 #define __PT_PARM3_REG regs[2]
172 #define __PT_PARM4_REG regs[3]
173 #define __PT_PARM5_REG regs[4]
174 #define __PT_RET_REG regs[30]
175 #define __PT_FP_REG regs[29]	/* Works only with CONFIG_FRAME_POINTER */
176 #define __PT_RC_REG regs[0]
177 #define __PT_SP_REG sp
178 #define __PT_IP_REG pc
179 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
180 #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
181 
182 #elif defined(bpf_target_mips)
183 
184 #define __PT_PARM1_REG regs[4]
185 #define __PT_PARM2_REG regs[5]
186 #define __PT_PARM3_REG regs[6]
187 #define __PT_PARM4_REG regs[7]
188 #define __PT_PARM5_REG regs[8]
189 #define __PT_RET_REG regs[31]
190 #define __PT_FP_REG regs[30]	/* Works only with CONFIG_FRAME_POINTER */
191 #define __PT_RC_REG regs[2]
192 #define __PT_SP_REG regs[29]
193 #define __PT_IP_REG cp0_epc
194 
195 #elif defined(bpf_target_powerpc)
196 
197 #define __PT_PARM1_REG gpr[3]
198 #define __PT_PARM2_REG gpr[4]
199 #define __PT_PARM3_REG gpr[5]
200 #define __PT_PARM4_REG gpr[6]
201 #define __PT_PARM5_REG gpr[7]
202 #define __PT_RET_REG regs[31]
203 #define __PT_FP_REG __unsupported__
204 #define __PT_RC_REG gpr[3]
205 #define __PT_SP_REG sp
206 #define __PT_IP_REG nip
207 /* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
208 #define PT_REGS_SYSCALL_REGS(ctx) ctx
209 
210 #elif defined(bpf_target_sparc)
211 
212 #define __PT_PARM1_REG u_regs[UREG_I0]
213 #define __PT_PARM2_REG u_regs[UREG_I1]
214 #define __PT_PARM3_REG u_regs[UREG_I2]
215 #define __PT_PARM4_REG u_regs[UREG_I3]
216 #define __PT_PARM5_REG u_regs[UREG_I4]
217 #define __PT_RET_REG u_regs[UREG_I7]
218 #define __PT_FP_REG __unsupported__
219 #define __PT_RC_REG u_regs[UREG_I0]
220 #define __PT_SP_REG u_regs[UREG_FP]
221 /* Should this also be a bpf_target check for the sparc case? */
222 #if defined(__arch64__)
223 #define __PT_IP_REG tpc
224 #else
225 #define __PT_IP_REG pc
226 #endif
227 
228 #elif defined(bpf_target_riscv)
229 
230 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
231 #define __PT_PARM1_REG a0
232 #define __PT_PARM2_REG a1
233 #define __PT_PARM3_REG a2
234 #define __PT_PARM4_REG a3
235 #define __PT_PARM5_REG a4
236 #define __PT_RET_REG ra
237 #define __PT_FP_REG s0
238 #define __PT_RC_REG a0
239 #define __PT_SP_REG sp
240 #define __PT_IP_REG pc
241 /* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
242 #define PT_REGS_SYSCALL_REGS(ctx) ctx
243 
244 #elif defined(bpf_target_arc)
245 
246 /* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
247 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
248 #define __PT_PARM1_REG scratch.r0
249 #define __PT_PARM2_REG scratch.r1
250 #define __PT_PARM3_REG scratch.r2
251 #define __PT_PARM4_REG scratch.r3
252 #define __PT_PARM5_REG scratch.r4
253 #define __PT_RET_REG scratch.blink
254 #define __PT_FP_REG __unsupported__
255 #define __PT_RC_REG scratch.r0
256 #define __PT_SP_REG scratch.sp
257 #define __PT_IP_REG scratch.ret
258 /* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
259 #define PT_REGS_SYSCALL_REGS(ctx) ctx
260 
261 #endif
262 
263 #if defined(bpf_target_defined)
264 
265 struct pt_regs;
266 
267 /* allow some architecutres to override `struct pt_regs` */
268 #ifndef __PT_REGS_CAST
269 #define __PT_REGS_CAST(x) (x)
270 #endif
271 
272 #define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
273 #define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
274 #define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
275 #define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
276 #define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
277 #define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
278 #define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
279 #define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
280 #define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
281 #define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
282 
283 #define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
284 #define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
285 #define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
286 #define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
287 #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
288 #define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
289 #define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
290 #define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
291 #define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
292 #define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
293 
294 #if defined(bpf_target_powerpc)
295 
296 #define BPF_KPROBE_READ_RET_IP(ip, ctx)		({ (ip) = (ctx)->link; })
297 #define BPF_KRETPROBE_READ_RET_IP		BPF_KPROBE_READ_RET_IP
298 
299 #elif defined(bpf_target_sparc)
300 
301 #define BPF_KPROBE_READ_RET_IP(ip, ctx)		({ (ip) = PT_REGS_RET(ctx); })
302 #define BPF_KRETPROBE_READ_RET_IP		BPF_KPROBE_READ_RET_IP
303 
304 #else
305 
306 #define BPF_KPROBE_READ_RET_IP(ip, ctx)					    \
307 	({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
308 #define BPF_KRETPROBE_READ_RET_IP(ip, ctx)				    \
309 	({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
310 
311 #endif
312 
313 #ifndef PT_REGS_PARM1_SYSCALL
314 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x)
315 #endif
316 #define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x)
317 #define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x)
318 #ifndef PT_REGS_PARM4_SYSCALL
319 #define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x)
320 #endif
321 #define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x)
322 
323 #ifndef PT_REGS_PARM1_CORE_SYSCALL
324 #define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x)
325 #endif
326 #define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x)
327 #define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x)
328 #ifndef PT_REGS_PARM4_CORE_SYSCALL
329 #define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x)
330 #endif
331 #define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x)
332 
333 #else /* defined(bpf_target_defined) */
334 
335 #define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
336 #define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
337 #define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
338 #define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
339 #define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
340 #define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
341 #define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
342 #define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
343 #define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
344 #define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
345 
346 #define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
347 #define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
348 #define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
349 #define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
350 #define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
351 #define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
352 #define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
353 #define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
354 #define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
355 #define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
356 
357 #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
358 #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
359 
360 #define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
361 #define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
362 #define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
363 #define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
364 #define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
365 
366 #define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
367 #define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
368 #define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
369 #define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
370 #define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
371 
372 #endif /* defined(bpf_target_defined) */
373 
374 /*
375  * When invoked from a syscall handler kprobe, returns a pointer to a
376  * struct pt_regs containing syscall arguments and suitable for passing to
377  * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
378  */
379 #ifndef PT_REGS_SYSCALL_REGS
380 /* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
381 #define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
382 #endif
383 
384 #ifndef ___bpf_concat
385 #define ___bpf_concat(a, b) a ## b
386 #endif
387 #ifndef ___bpf_apply
388 #define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
389 #endif
390 #ifndef ___bpf_nth
391 #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
392 #endif
393 #ifndef ___bpf_narg
394 #define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
395 #endif
396 
397 #define ___bpf_ctx_cast0()            ctx
398 #define ___bpf_ctx_cast1(x)           ___bpf_ctx_cast0(), (void *)ctx[0]
399 #define ___bpf_ctx_cast2(x, args...)  ___bpf_ctx_cast1(args), (void *)ctx[1]
400 #define ___bpf_ctx_cast3(x, args...)  ___bpf_ctx_cast2(args), (void *)ctx[2]
401 #define ___bpf_ctx_cast4(x, args...)  ___bpf_ctx_cast3(args), (void *)ctx[3]
402 #define ___bpf_ctx_cast5(x, args...)  ___bpf_ctx_cast4(args), (void *)ctx[4]
403 #define ___bpf_ctx_cast6(x, args...)  ___bpf_ctx_cast5(args), (void *)ctx[5]
404 #define ___bpf_ctx_cast7(x, args...)  ___bpf_ctx_cast6(args), (void *)ctx[6]
405 #define ___bpf_ctx_cast8(x, args...)  ___bpf_ctx_cast7(args), (void *)ctx[7]
406 #define ___bpf_ctx_cast9(x, args...)  ___bpf_ctx_cast8(args), (void *)ctx[8]
407 #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
408 #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
409 #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
410 #define ___bpf_ctx_cast(args...)      ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
411 
412 /*
413  * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
414  * similar kinds of BPF programs, that accept input arguments as a single
415  * pointer to untyped u64 array, where each u64 can actually be a typed
416  * pointer or integer of different size. Instead of requring user to write
417  * manual casts and work with array elements by index, BPF_PROG macro
418  * allows user to declare a list of named and typed input arguments in the
419  * same syntax as for normal C function. All the casting is hidden and
420  * performed transparently, while user code can just assume working with
421  * function arguments of specified type and name.
422  *
423  * Original raw context argument is preserved as well as 'ctx' argument.
424  * This is useful when using BPF helpers that expect original context
425  * as one of the parameters (e.g., for bpf_perf_event_output()).
426  */
427 #define BPF_PROG(name, args...)						    \
428 name(unsigned long long *ctx);						    \
429 static __attribute__((always_inline)) typeof(name(0))			    \
430 ____##name(unsigned long long *ctx, ##args);				    \
431 typeof(name(0)) name(unsigned long long *ctx)				    \
432 {									    \
433 	_Pragma("GCC diagnostic push")					    \
434 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
435 	return ____##name(___bpf_ctx_cast(args));			    \
436 	_Pragma("GCC diagnostic pop")					    \
437 }									    \
438 static __attribute__((always_inline)) typeof(name(0))			    \
439 ____##name(unsigned long long *ctx, ##args)
440 
441 struct pt_regs;
442 
443 #define ___bpf_kprobe_args0()           ctx
444 #define ___bpf_kprobe_args1(x)          ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
445 #define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
446 #define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
447 #define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
448 #define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
449 #define ___bpf_kprobe_args(args...)     ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
450 
451 /*
452  * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
453  * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
454  * low-level way of getting kprobe input arguments from struct pt_regs, and
455  * provides a familiar typed and named function arguments syntax and
456  * semantics of accessing kprobe input paremeters.
457  *
458  * Original struct pt_regs* context is preserved as 'ctx' argument. This might
459  * be necessary when using BPF helpers like bpf_perf_event_output().
460  */
461 #define BPF_KPROBE(name, args...)					    \
462 name(struct pt_regs *ctx);						    \
463 static __attribute__((always_inline)) typeof(name(0))			    \
464 ____##name(struct pt_regs *ctx, ##args);				    \
465 typeof(name(0)) name(struct pt_regs *ctx)				    \
466 {									    \
467 	_Pragma("GCC diagnostic push")					    \
468 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
469 	return ____##name(___bpf_kprobe_args(args));			    \
470 	_Pragma("GCC diagnostic pop")					    \
471 }									    \
472 static __attribute__((always_inline)) typeof(name(0))			    \
473 ____##name(struct pt_regs *ctx, ##args)
474 
475 #define ___bpf_kretprobe_args0()       ctx
476 #define ___bpf_kretprobe_args1(x)      ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
477 #define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
478 
479 /*
480  * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
481  * return value (in addition to `struct pt_regs *ctx`), but no input
482  * arguments, because they will be clobbered by the time probed function
483  * returns.
484  */
485 #define BPF_KRETPROBE(name, args...)					    \
486 name(struct pt_regs *ctx);						    \
487 static __attribute__((always_inline)) typeof(name(0))			    \
488 ____##name(struct pt_regs *ctx, ##args);				    \
489 typeof(name(0)) name(struct pt_regs *ctx)				    \
490 {									    \
491 	_Pragma("GCC diagnostic push")					    \
492 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
493 	return ____##name(___bpf_kretprobe_args(args));			    \
494 	_Pragma("GCC diagnostic pop")					    \
495 }									    \
496 static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
497 
498 /* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
499 #define ___bpf_syscall_args0()           ctx
500 #define ___bpf_syscall_args1(x)          ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
501 #define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
502 #define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
503 #define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
504 #define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
505 #define ___bpf_syscall_args(args...)     ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
506 
507 /* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
508 #define ___bpf_syswrap_args0()           ctx
509 #define ___bpf_syswrap_args1(x)          ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
510 #define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
511 #define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
512 #define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
513 #define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
514 #define ___bpf_syswrap_args(args...)     ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
515 
516 /*
517  * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
518  * tracing syscall functions, like __x64_sys_close. It hides the underlying
519  * platform-specific low-level way of getting syscall input arguments from
520  * struct pt_regs, and provides a familiar typed and named function arguments
521  * syntax and semantics of accessing syscall input parameters.
522  *
523  * Original struct pt_regs * context is preserved as 'ctx' argument. This might
524  * be necessary when using BPF helpers like bpf_perf_event_output().
525  *
526  * At the moment BPF_KSYSCALL does not transparently handle all the calling
527  * convention quirks for the following syscalls:
528  *
529  * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
530  * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
531  *            CONFIG_CLONE_BACKWARDS3.
532  * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
533  * - compat syscalls.
534  *
535  * This may or may not change in the future. User needs to take extra measures
536  * to handle such quirks explicitly, if necessary.
537  *
538  * This macro relies on BPF CO-RE support and virtual __kconfig externs.
539  */
540 #define BPF_KSYSCALL(name, args...)					    \
541 name(struct pt_regs *ctx);						    \
542 extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig;			    \
543 static __attribute__((always_inline)) typeof(name(0))			    \
544 ____##name(struct pt_regs *ctx, ##args);				    \
545 typeof(name(0)) name(struct pt_regs *ctx)				    \
546 {									    \
547 	struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER		    \
548 			       ? (struct pt_regs *)PT_REGS_PARM1(ctx)	    \
549 			       : ctx;					    \
550 	_Pragma("GCC diagnostic push")					    \
551 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
552 	if (LINUX_HAS_SYSCALL_WRAPPER)					    \
553 		return ____##name(___bpf_syswrap_args(args));		    \
554 	else								    \
555 		return ____##name(___bpf_syscall_args(args));		    \
556 	_Pragma("GCC diagnostic pop")					    \
557 }									    \
558 static __attribute__((always_inline)) typeof(name(0))			    \
559 ____##name(struct pt_regs *ctx, ##args)
560 
561 #define BPF_KPROBE_SYSCALL BPF_KSYSCALL
562 
563 #endif
564