xref: /openbmc/linux/tools/lib/bpf/bpf_tracing.h (revision 384a13ca)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 #ifndef __BPF_TRACING_H__
3 #define __BPF_TRACING_H__
4 
5 #include <bpf/bpf_helpers.h>
6 
7 /* Scan the ARCH passed in from ARCH env variable (see Makefile) */
8 #if defined(__TARGET_ARCH_x86)
9 	#define bpf_target_x86
10 	#define bpf_target_defined
11 #elif defined(__TARGET_ARCH_s390)
12 	#define bpf_target_s390
13 	#define bpf_target_defined
14 #elif defined(__TARGET_ARCH_arm)
15 	#define bpf_target_arm
16 	#define bpf_target_defined
17 #elif defined(__TARGET_ARCH_arm64)
18 	#define bpf_target_arm64
19 	#define bpf_target_defined
20 #elif defined(__TARGET_ARCH_mips)
21 	#define bpf_target_mips
22 	#define bpf_target_defined
23 #elif defined(__TARGET_ARCH_powerpc)
24 	#define bpf_target_powerpc
25 	#define bpf_target_defined
26 #elif defined(__TARGET_ARCH_sparc)
27 	#define bpf_target_sparc
28 	#define bpf_target_defined
29 #elif defined(__TARGET_ARCH_riscv)
30 	#define bpf_target_riscv
31 	#define bpf_target_defined
32 #elif defined(__TARGET_ARCH_arc)
33 	#define bpf_target_arc
34 	#define bpf_target_defined
35 #elif defined(__TARGET_ARCH_loongarch)
36 	#define bpf_target_loongarch
37 	#define bpf_target_defined
38 #else
39 
40 /* Fall back to what the compiler says */
41 #if defined(__x86_64__)
42 	#define bpf_target_x86
43 	#define bpf_target_defined
44 #elif defined(__s390__)
45 	#define bpf_target_s390
46 	#define bpf_target_defined
47 #elif defined(__arm__)
48 	#define bpf_target_arm
49 	#define bpf_target_defined
50 #elif defined(__aarch64__)
51 	#define bpf_target_arm64
52 	#define bpf_target_defined
53 #elif defined(__mips__)
54 	#define bpf_target_mips
55 	#define bpf_target_defined
56 #elif defined(__powerpc__)
57 	#define bpf_target_powerpc
58 	#define bpf_target_defined
59 #elif defined(__sparc__)
60 	#define bpf_target_sparc
61 	#define bpf_target_defined
62 #elif defined(__riscv) && __riscv_xlen == 64
63 	#define bpf_target_riscv
64 	#define bpf_target_defined
65 #elif defined(__arc__)
66 	#define bpf_target_arc
67 	#define bpf_target_defined
68 #elif defined(__loongarch__)
69 	#define bpf_target_loongarch
70 	#define bpf_target_defined
71 #endif /* no compiler target */
72 
73 #endif
74 
75 #ifndef __BPF_TARGET_MISSING
76 #define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
77 #endif
78 
79 #if defined(bpf_target_x86)
80 
81 #if defined(__KERNEL__) || defined(__VMLINUX_H__)
82 
83 #define __PT_PARM1_REG di
84 #define __PT_PARM2_REG si
85 #define __PT_PARM3_REG dx
86 #define __PT_PARM4_REG cx
87 #define __PT_PARM5_REG r8
88 #define __PT_RET_REG sp
89 #define __PT_FP_REG bp
90 #define __PT_RC_REG ax
91 #define __PT_SP_REG sp
92 #define __PT_IP_REG ip
93 /* syscall uses r10 for PARM4 */
94 #define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
95 #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
96 
97 #else
98 
99 #ifdef __i386__
100 
101 #define __PT_PARM1_REG eax
102 #define __PT_PARM2_REG edx
103 #define __PT_PARM3_REG ecx
104 /* i386 kernel is built with -mregparm=3 */
105 #define __PT_PARM4_REG __unsupported__
106 #define __PT_PARM5_REG __unsupported__
107 #define __PT_RET_REG esp
108 #define __PT_FP_REG ebp
109 #define __PT_RC_REG eax
110 #define __PT_SP_REG esp
111 #define __PT_IP_REG eip
112 
113 #else /* __i386__ */
114 
115 #define __PT_PARM1_REG rdi
116 #define __PT_PARM2_REG rsi
117 #define __PT_PARM3_REG rdx
118 #define __PT_PARM4_REG rcx
119 #define __PT_PARM5_REG r8
120 #define __PT_RET_REG rsp
121 #define __PT_FP_REG rbp
122 #define __PT_RC_REG rax
123 #define __PT_SP_REG rsp
124 #define __PT_IP_REG rip
125 /* syscall uses r10 for PARM4 */
126 #define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
127 #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
128 
129 #endif /* __i386__ */
130 
131 #endif /* __KERNEL__ || __VMLINUX_H__ */
132 
133 #elif defined(bpf_target_s390)
134 
135 struct pt_regs___s390 {
136 	unsigned long orig_gpr2;
137 };
138 
139 /* s390 provides user_pt_regs instead of struct pt_regs to userspace */
140 #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
141 #define __PT_PARM1_REG gprs[2]
142 #define __PT_PARM2_REG gprs[3]
143 #define __PT_PARM3_REG gprs[4]
144 #define __PT_PARM4_REG gprs[5]
145 #define __PT_PARM5_REG gprs[6]
146 #define __PT_RET_REG gprs[14]
147 #define __PT_FP_REG gprs[11]	/* Works only with CONFIG_FRAME_POINTER */
148 #define __PT_RC_REG gprs[2]
149 #define __PT_SP_REG gprs[15]
150 #define __PT_IP_REG psw.addr
151 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
152 #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
153 
154 #elif defined(bpf_target_arm)
155 
156 #define __PT_PARM1_REG uregs[0]
157 #define __PT_PARM2_REG uregs[1]
158 #define __PT_PARM3_REG uregs[2]
159 #define __PT_PARM4_REG uregs[3]
160 #define __PT_PARM5_REG uregs[4]
161 #define __PT_RET_REG uregs[14]
162 #define __PT_FP_REG uregs[11]	/* Works only with CONFIG_FRAME_POINTER */
163 #define __PT_RC_REG uregs[0]
164 #define __PT_SP_REG uregs[13]
165 #define __PT_IP_REG uregs[12]
166 
167 #elif defined(bpf_target_arm64)
168 
169 struct pt_regs___arm64 {
170 	unsigned long orig_x0;
171 };
172 
173 /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
174 #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
175 #define __PT_PARM1_REG regs[0]
176 #define __PT_PARM2_REG regs[1]
177 #define __PT_PARM3_REG regs[2]
178 #define __PT_PARM4_REG regs[3]
179 #define __PT_PARM5_REG regs[4]
180 #define __PT_RET_REG regs[30]
181 #define __PT_FP_REG regs[29]	/* Works only with CONFIG_FRAME_POINTER */
182 #define __PT_RC_REG regs[0]
183 #define __PT_SP_REG sp
184 #define __PT_IP_REG pc
185 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
186 #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
187 
188 #elif defined(bpf_target_mips)
189 
190 #define __PT_PARM1_REG regs[4]
191 #define __PT_PARM2_REG regs[5]
192 #define __PT_PARM3_REG regs[6]
193 #define __PT_PARM4_REG regs[7]
194 #define __PT_PARM5_REG regs[8]
195 #define __PT_RET_REG regs[31]
196 #define __PT_FP_REG regs[30]	/* Works only with CONFIG_FRAME_POINTER */
197 #define __PT_RC_REG regs[2]
198 #define __PT_SP_REG regs[29]
199 #define __PT_IP_REG cp0_epc
200 
201 #elif defined(bpf_target_powerpc)
202 
203 #define __PT_PARM1_REG gpr[3]
204 #define __PT_PARM2_REG gpr[4]
205 #define __PT_PARM3_REG gpr[5]
206 #define __PT_PARM4_REG gpr[6]
207 #define __PT_PARM5_REG gpr[7]
208 #define __PT_RET_REG regs[31]
209 #define __PT_FP_REG __unsupported__
210 #define __PT_RC_REG gpr[3]
211 #define __PT_SP_REG sp
212 #define __PT_IP_REG nip
213 /* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
214 #define PT_REGS_SYSCALL_REGS(ctx) ctx
215 
216 #elif defined(bpf_target_sparc)
217 
218 #define __PT_PARM1_REG u_regs[UREG_I0]
219 #define __PT_PARM2_REG u_regs[UREG_I1]
220 #define __PT_PARM3_REG u_regs[UREG_I2]
221 #define __PT_PARM4_REG u_regs[UREG_I3]
222 #define __PT_PARM5_REG u_regs[UREG_I4]
223 #define __PT_RET_REG u_regs[UREG_I7]
224 #define __PT_FP_REG __unsupported__
225 #define __PT_RC_REG u_regs[UREG_I0]
226 #define __PT_SP_REG u_regs[UREG_FP]
227 /* Should this also be a bpf_target check for the sparc case? */
228 #if defined(__arch64__)
229 #define __PT_IP_REG tpc
230 #else
231 #define __PT_IP_REG pc
232 #endif
233 
234 #elif defined(bpf_target_riscv)
235 
236 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
237 #define __PT_PARM1_REG a0
238 #define __PT_PARM2_REG a1
239 #define __PT_PARM3_REG a2
240 #define __PT_PARM4_REG a3
241 #define __PT_PARM5_REG a4
242 #define __PT_RET_REG ra
243 #define __PT_FP_REG s0
244 #define __PT_RC_REG a0
245 #define __PT_SP_REG sp
246 #define __PT_IP_REG pc
247 /* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
248 #define PT_REGS_SYSCALL_REGS(ctx) ctx
249 
250 #elif defined(bpf_target_arc)
251 
252 /* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
253 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
254 #define __PT_PARM1_REG scratch.r0
255 #define __PT_PARM2_REG scratch.r1
256 #define __PT_PARM3_REG scratch.r2
257 #define __PT_PARM4_REG scratch.r3
258 #define __PT_PARM5_REG scratch.r4
259 #define __PT_RET_REG scratch.blink
260 #define __PT_FP_REG __unsupported__
261 #define __PT_RC_REG scratch.r0
262 #define __PT_SP_REG scratch.sp
263 #define __PT_IP_REG scratch.ret
264 /* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
265 #define PT_REGS_SYSCALL_REGS(ctx) ctx
266 
267 #elif defined(bpf_target_loongarch)
268 
269 /* https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html */
270 
271 #define __PT_PARM1_REG regs[4]
272 #define __PT_PARM2_REG regs[5]
273 #define __PT_PARM3_REG regs[6]
274 #define __PT_PARM4_REG regs[7]
275 #define __PT_PARM5_REG regs[8]
276 #define __PT_RET_REG regs[1]
277 #define __PT_FP_REG regs[22]
278 #define __PT_RC_REG regs[4]
279 #define __PT_SP_REG regs[3]
280 #define __PT_IP_REG csr_era
281 /* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */
282 #define PT_REGS_SYSCALL_REGS(ctx) ctx
283 
284 #endif
285 
286 #if defined(bpf_target_defined)
287 
288 struct pt_regs;
289 
290 /* allow some architecutres to override `struct pt_regs` */
291 #ifndef __PT_REGS_CAST
292 #define __PT_REGS_CAST(x) (x)
293 #endif
294 
295 #define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
296 #define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
297 #define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
298 #define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
299 #define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
300 #define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
301 #define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
302 #define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
303 #define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
304 #define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
305 
306 #define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
307 #define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
308 #define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
309 #define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
310 #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
311 #define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
312 #define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
313 #define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
314 #define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
315 #define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
316 
317 #if defined(bpf_target_powerpc)
318 
319 #define BPF_KPROBE_READ_RET_IP(ip, ctx)		({ (ip) = (ctx)->link; })
320 #define BPF_KRETPROBE_READ_RET_IP		BPF_KPROBE_READ_RET_IP
321 
322 #elif defined(bpf_target_sparc)
323 
324 #define BPF_KPROBE_READ_RET_IP(ip, ctx)		({ (ip) = PT_REGS_RET(ctx); })
325 #define BPF_KRETPROBE_READ_RET_IP		BPF_KPROBE_READ_RET_IP
326 
327 #else
328 
329 #define BPF_KPROBE_READ_RET_IP(ip, ctx)					    \
330 	({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
331 #define BPF_KRETPROBE_READ_RET_IP(ip, ctx)				    \
332 	({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
333 
334 #endif
335 
336 #ifndef PT_REGS_PARM1_SYSCALL
337 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x)
338 #endif
339 #define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x)
340 #define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x)
341 #ifndef PT_REGS_PARM4_SYSCALL
342 #define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x)
343 #endif
344 #define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x)
345 
346 #ifndef PT_REGS_PARM1_CORE_SYSCALL
347 #define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x)
348 #endif
349 #define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x)
350 #define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x)
351 #ifndef PT_REGS_PARM4_CORE_SYSCALL
352 #define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x)
353 #endif
354 #define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x)
355 
356 #else /* defined(bpf_target_defined) */
357 
358 #define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
359 #define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
360 #define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
361 #define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
362 #define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
363 #define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
364 #define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
365 #define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
366 #define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
367 #define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
368 
369 #define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
370 #define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
371 #define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
372 #define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
373 #define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
374 #define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
375 #define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
376 #define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
377 #define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
378 #define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
379 
380 #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
381 #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
382 
383 #define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
384 #define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
385 #define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
386 #define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
387 #define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
388 
389 #define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
390 #define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
391 #define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
392 #define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
393 #define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
394 
395 #endif /* defined(bpf_target_defined) */
396 
397 /*
398  * When invoked from a syscall handler kprobe, returns a pointer to a
399  * struct pt_regs containing syscall arguments and suitable for passing to
400  * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
401  */
402 #ifndef PT_REGS_SYSCALL_REGS
403 /* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
404 #define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
405 #endif
406 
407 #ifndef ___bpf_concat
408 #define ___bpf_concat(a, b) a ## b
409 #endif
410 #ifndef ___bpf_apply
411 #define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
412 #endif
413 #ifndef ___bpf_nth
414 #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
415 #endif
416 #ifndef ___bpf_narg
417 #define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
418 #endif
419 
420 #define ___bpf_ctx_cast0()            ctx
421 #define ___bpf_ctx_cast1(x)           ___bpf_ctx_cast0(), (void *)ctx[0]
422 #define ___bpf_ctx_cast2(x, args...)  ___bpf_ctx_cast1(args), (void *)ctx[1]
423 #define ___bpf_ctx_cast3(x, args...)  ___bpf_ctx_cast2(args), (void *)ctx[2]
424 #define ___bpf_ctx_cast4(x, args...)  ___bpf_ctx_cast3(args), (void *)ctx[3]
425 #define ___bpf_ctx_cast5(x, args...)  ___bpf_ctx_cast4(args), (void *)ctx[4]
426 #define ___bpf_ctx_cast6(x, args...)  ___bpf_ctx_cast5(args), (void *)ctx[5]
427 #define ___bpf_ctx_cast7(x, args...)  ___bpf_ctx_cast6(args), (void *)ctx[6]
428 #define ___bpf_ctx_cast8(x, args...)  ___bpf_ctx_cast7(args), (void *)ctx[7]
429 #define ___bpf_ctx_cast9(x, args...)  ___bpf_ctx_cast8(args), (void *)ctx[8]
430 #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
431 #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
432 #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
433 #define ___bpf_ctx_cast(args...)      ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
434 
435 /*
436  * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
437  * similar kinds of BPF programs, that accept input arguments as a single
438  * pointer to untyped u64 array, where each u64 can actually be a typed
439  * pointer or integer of different size. Instead of requring user to write
440  * manual casts and work with array elements by index, BPF_PROG macro
441  * allows user to declare a list of named and typed input arguments in the
442  * same syntax as for normal C function. All the casting is hidden and
443  * performed transparently, while user code can just assume working with
444  * function arguments of specified type and name.
445  *
446  * Original raw context argument is preserved as well as 'ctx' argument.
447  * This is useful when using BPF helpers that expect original context
448  * as one of the parameters (e.g., for bpf_perf_event_output()).
449  */
450 #define BPF_PROG(name, args...)						    \
451 name(unsigned long long *ctx);						    \
452 static __always_inline typeof(name(0))					    \
453 ____##name(unsigned long long *ctx, ##args);				    \
454 typeof(name(0)) name(unsigned long long *ctx)				    \
455 {									    \
456 	_Pragma("GCC diagnostic push")					    \
457 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
458 	return ____##name(___bpf_ctx_cast(args));			    \
459 	_Pragma("GCC diagnostic pop")					    \
460 }									    \
461 static __always_inline typeof(name(0))					    \
462 ____##name(unsigned long long *ctx, ##args)
463 
464 #ifndef ___bpf_nth2
465 #define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13,	\
466 		    _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
467 #endif
468 #ifndef ___bpf_narg2
469 #define ___bpf_narg2(...)	\
470 	___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7,	\
471 		    6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
472 #endif
473 
474 #define ___bpf_treg_cnt(t) \
475 	__builtin_choose_expr(sizeof(t) == 1, 1,	\
476 	__builtin_choose_expr(sizeof(t) == 2, 1,	\
477 	__builtin_choose_expr(sizeof(t) == 4, 1,	\
478 	__builtin_choose_expr(sizeof(t) == 8, 1,	\
479 	__builtin_choose_expr(sizeof(t) == 16, 2,	\
480 			      (void)0)))))
481 
482 #define ___bpf_reg_cnt0()		(0)
483 #define ___bpf_reg_cnt1(t, x)		(___bpf_reg_cnt0() + ___bpf_treg_cnt(t))
484 #define ___bpf_reg_cnt2(t, x, args...)	(___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t))
485 #define ___bpf_reg_cnt3(t, x, args...)	(___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t))
486 #define ___bpf_reg_cnt4(t, x, args...)	(___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t))
487 #define ___bpf_reg_cnt5(t, x, args...)	(___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t))
488 #define ___bpf_reg_cnt6(t, x, args...)	(___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t))
489 #define ___bpf_reg_cnt7(t, x, args...)	(___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t))
490 #define ___bpf_reg_cnt8(t, x, args...)	(___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t))
491 #define ___bpf_reg_cnt9(t, x, args...)	(___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t))
492 #define ___bpf_reg_cnt10(t, x, args...)	(___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t))
493 #define ___bpf_reg_cnt11(t, x, args...)	(___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t))
494 #define ___bpf_reg_cnt12(t, x, args...)	(___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t))
495 #define ___bpf_reg_cnt(args...)	 ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args)
496 
497 #define ___bpf_union_arg(t, x, n) \
498 	__builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \
499 	__builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
500 	__builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
501 	__builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \
502 	__builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \
503 			      (void)0)))))
504 
505 #define ___bpf_ctx_arg0(n, args...)
506 #define ___bpf_ctx_arg1(n, t, x)		, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x))
507 #define ___bpf_ctx_arg2(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args)
508 #define ___bpf_ctx_arg3(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args)
509 #define ___bpf_ctx_arg4(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args)
510 #define ___bpf_ctx_arg5(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args)
511 #define ___bpf_ctx_arg6(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args)
512 #define ___bpf_ctx_arg7(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args)
513 #define ___bpf_ctx_arg8(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args)
514 #define ___bpf_ctx_arg9(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args)
515 #define ___bpf_ctx_arg10(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args)
516 #define ___bpf_ctx_arg11(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args)
517 #define ___bpf_ctx_arg12(n, t, x, args...)	, ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args)
518 #define ___bpf_ctx_arg(args...)	___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args)
519 
520 #define ___bpf_ctx_decl0()
521 #define ___bpf_ctx_decl1(t, x)			, t x
522 #define ___bpf_ctx_decl2(t, x, args...)		, t x ___bpf_ctx_decl1(args)
523 #define ___bpf_ctx_decl3(t, x, args...)		, t x ___bpf_ctx_decl2(args)
524 #define ___bpf_ctx_decl4(t, x, args...)		, t x ___bpf_ctx_decl3(args)
525 #define ___bpf_ctx_decl5(t, x, args...)		, t x ___bpf_ctx_decl4(args)
526 #define ___bpf_ctx_decl6(t, x, args...)		, t x ___bpf_ctx_decl5(args)
527 #define ___bpf_ctx_decl7(t, x, args...)		, t x ___bpf_ctx_decl6(args)
528 #define ___bpf_ctx_decl8(t, x, args...)		, t x ___bpf_ctx_decl7(args)
529 #define ___bpf_ctx_decl9(t, x, args...)		, t x ___bpf_ctx_decl8(args)
530 #define ___bpf_ctx_decl10(t, x, args...)	, t x ___bpf_ctx_decl9(args)
531 #define ___bpf_ctx_decl11(t, x, args...)	, t x ___bpf_ctx_decl10(args)
532 #define ___bpf_ctx_decl12(t, x, args...)	, t x ___bpf_ctx_decl11(args)
533 #define ___bpf_ctx_decl(args...)	___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args)
534 
535 /*
536  * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
537  * arguments. Since each struct argument might take one or two u64 values
538  * in the trampoline stack, argument type size is needed to place proper number
539  * of u64 values for each argument. Therefore, BPF_PROG2 has different
540  * syntax from BPF_PROG. For example, for the following BPF_PROG syntax:
541  *
542  *   int BPF_PROG(test2, int a, int b) { ... }
543  *
544  * the corresponding BPF_PROG2 syntax is:
545  *
546  *   int BPF_PROG2(test2, int, a, int, b) { ... }
547  *
548  * where type and the corresponding argument name are separated by comma.
549  *
550  * Use BPF_PROG2 macro if one of the arguments might be a struct/union larger
551  * than 8 bytes:
552  *
553  *   int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
554  *		   int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
555  *   {
556  *        // access a, b, c, d, e, and ret directly
557  *        ...
558  *   }
559  */
560 #define BPF_PROG2(name, args...)						\
561 name(unsigned long long *ctx);							\
562 static __always_inline typeof(name(0))						\
563 ____##name(unsigned long long *ctx ___bpf_ctx_decl(args));			\
564 typeof(name(0)) name(unsigned long long *ctx)					\
565 {										\
566 	return ____##name(ctx ___bpf_ctx_arg(args));				\
567 }										\
568 static __always_inline typeof(name(0))						\
569 ____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
570 
571 struct pt_regs;
572 
573 #define ___bpf_kprobe_args0()           ctx
574 #define ___bpf_kprobe_args1(x)          ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
575 #define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
576 #define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
577 #define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
578 #define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
579 #define ___bpf_kprobe_args(args...)     ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
580 
581 /*
582  * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
583  * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
584  * low-level way of getting kprobe input arguments from struct pt_regs, and
585  * provides a familiar typed and named function arguments syntax and
586  * semantics of accessing kprobe input paremeters.
587  *
588  * Original struct pt_regs* context is preserved as 'ctx' argument. This might
589  * be necessary when using BPF helpers like bpf_perf_event_output().
590  */
591 #define BPF_KPROBE(name, args...)					    \
592 name(struct pt_regs *ctx);						    \
593 static __always_inline typeof(name(0))					    \
594 ____##name(struct pt_regs *ctx, ##args);				    \
595 typeof(name(0)) name(struct pt_regs *ctx)				    \
596 {									    \
597 	_Pragma("GCC diagnostic push")					    \
598 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
599 	return ____##name(___bpf_kprobe_args(args));			    \
600 	_Pragma("GCC diagnostic pop")					    \
601 }									    \
602 static __always_inline typeof(name(0))					    \
603 ____##name(struct pt_regs *ctx, ##args)
604 
605 #define ___bpf_kretprobe_args0()       ctx
606 #define ___bpf_kretprobe_args1(x)      ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
607 #define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
608 
609 /*
610  * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
611  * return value (in addition to `struct pt_regs *ctx`), but no input
612  * arguments, because they will be clobbered by the time probed function
613  * returns.
614  */
615 #define BPF_KRETPROBE(name, args...)					    \
616 name(struct pt_regs *ctx);						    \
617 static __always_inline typeof(name(0))					    \
618 ____##name(struct pt_regs *ctx, ##args);				    \
619 typeof(name(0)) name(struct pt_regs *ctx)				    \
620 {									    \
621 	_Pragma("GCC diagnostic push")					    \
622 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
623 	return ____##name(___bpf_kretprobe_args(args));			    \
624 	_Pragma("GCC diagnostic pop")					    \
625 }									    \
626 static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
627 
628 /* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
629 #define ___bpf_syscall_args0()           ctx
630 #define ___bpf_syscall_args1(x)          ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
631 #define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
632 #define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
633 #define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
634 #define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
635 #define ___bpf_syscall_args(args...)     ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
636 
637 /* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
638 #define ___bpf_syswrap_args0()           ctx
639 #define ___bpf_syswrap_args1(x)          ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
640 #define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
641 #define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
642 #define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
643 #define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
644 #define ___bpf_syswrap_args(args...)     ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
645 
646 /*
647  * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
648  * tracing syscall functions, like __x64_sys_close. It hides the underlying
649  * platform-specific low-level way of getting syscall input arguments from
650  * struct pt_regs, and provides a familiar typed and named function arguments
651  * syntax and semantics of accessing syscall input parameters.
652  *
653  * Original struct pt_regs * context is preserved as 'ctx' argument. This might
654  * be necessary when using BPF helpers like bpf_perf_event_output().
655  *
656  * At the moment BPF_KSYSCALL does not transparently handle all the calling
657  * convention quirks for the following syscalls:
658  *
659  * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
660  * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
661  *            CONFIG_CLONE_BACKWARDS3.
662  * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
663  * - compat syscalls.
664  *
665  * This may or may not change in the future. User needs to take extra measures
666  * to handle such quirks explicitly, if necessary.
667  *
668  * This macro relies on BPF CO-RE support and virtual __kconfig externs.
669  */
670 #define BPF_KSYSCALL(name, args...)					    \
671 name(struct pt_regs *ctx);						    \
672 extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig;			    \
673 static __always_inline typeof(name(0))					    \
674 ____##name(struct pt_regs *ctx, ##args);				    \
675 typeof(name(0)) name(struct pt_regs *ctx)				    \
676 {									    \
677 	struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER		    \
678 			       ? (struct pt_regs *)PT_REGS_PARM1(ctx)	    \
679 			       : ctx;					    \
680 	_Pragma("GCC diagnostic push")					    \
681 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")		    \
682 	if (LINUX_HAS_SYSCALL_WRAPPER)					    \
683 		return ____##name(___bpf_syswrap_args(args));		    \
684 	else								    \
685 		return ____##name(___bpf_syscall_args(args));		    \
686 	_Pragma("GCC diagnostic pop")					    \
687 }									    \
688 static __always_inline typeof(name(0))					    \
689 ____##name(struct pt_regs *ctx, ##args)
690 
691 #define BPF_KPROBE_SYSCALL BPF_KSYSCALL
692 
693 #endif
694