xref: /openbmc/linux/tools/lib/bpf/bpf.c (revision bb6d3fb3)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <string.h>
26 #include <memory.h>
27 #include <unistd.h>
28 #include <asm/unistd.h>
29 #include <errno.h>
30 #include <linux/bpf.h>
31 #include "bpf.h"
32 #include "libbpf.h"
33 #include "libbpf_internal.h"
34 
35 /* make sure libbpf doesn't use kernel-only integer typedefs */
36 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
37 
38 /*
39  * When building perf, unistd.h is overridden. __NR_bpf is
40  * required to be defined explicitly.
41  */
42 #ifndef __NR_bpf
43 # if defined(__i386__)
44 #  define __NR_bpf 357
45 # elif defined(__x86_64__)
46 #  define __NR_bpf 321
47 # elif defined(__aarch64__)
48 #  define __NR_bpf 280
49 # elif defined(__sparc__)
50 #  define __NR_bpf 349
51 # elif defined(__s390__)
52 #  define __NR_bpf 351
53 # elif defined(__arc__)
54 #  define __NR_bpf 280
55 # else
56 #  error __NR_bpf not defined. libbpf does not support your arch.
57 # endif
58 #endif
59 
60 static inline __u64 ptr_to_u64(const void *ptr)
61 {
62 	return (__u64) (unsigned long) ptr;
63 }
64 
65 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
66 			  unsigned int size)
67 {
68 	return syscall(__NR_bpf, cmd, attr, size);
69 }
70 
71 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
72 {
73 	int fd;
74 
75 	do {
76 		fd = sys_bpf(BPF_PROG_LOAD, attr, size);
77 	} while (fd < 0 && errno == EAGAIN);
78 
79 	return fd;
80 }
81 
82 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
83 {
84 	union bpf_attr attr;
85 
86 	memset(&attr, '\0', sizeof(attr));
87 
88 	attr.map_type = create_attr->map_type;
89 	attr.key_size = create_attr->key_size;
90 	attr.value_size = create_attr->value_size;
91 	attr.max_entries = create_attr->max_entries;
92 	attr.map_flags = create_attr->map_flags;
93 	if (create_attr->name)
94 		memcpy(attr.map_name, create_attr->name,
95 		       min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
96 	attr.numa_node = create_attr->numa_node;
97 	attr.btf_fd = create_attr->btf_fd;
98 	attr.btf_key_type_id = create_attr->btf_key_type_id;
99 	attr.btf_value_type_id = create_attr->btf_value_type_id;
100 	attr.map_ifindex = create_attr->map_ifindex;
101 	if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
102 		attr.btf_vmlinux_value_type_id =
103 			create_attr->btf_vmlinux_value_type_id;
104 	else
105 		attr.inner_map_fd = create_attr->inner_map_fd;
106 
107 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
108 }
109 
110 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
111 			int key_size, int value_size, int max_entries,
112 			__u32 map_flags, int node)
113 {
114 	struct bpf_create_map_attr map_attr = {};
115 
116 	map_attr.name = name;
117 	map_attr.map_type = map_type;
118 	map_attr.map_flags = map_flags;
119 	map_attr.key_size = key_size;
120 	map_attr.value_size = value_size;
121 	map_attr.max_entries = max_entries;
122 	if (node >= 0) {
123 		map_attr.numa_node = node;
124 		map_attr.map_flags |= BPF_F_NUMA_NODE;
125 	}
126 
127 	return bpf_create_map_xattr(&map_attr);
128 }
129 
130 int bpf_create_map(enum bpf_map_type map_type, int key_size,
131 		   int value_size, int max_entries, __u32 map_flags)
132 {
133 	struct bpf_create_map_attr map_attr = {};
134 
135 	map_attr.map_type = map_type;
136 	map_attr.map_flags = map_flags;
137 	map_attr.key_size = key_size;
138 	map_attr.value_size = value_size;
139 	map_attr.max_entries = max_entries;
140 
141 	return bpf_create_map_xattr(&map_attr);
142 }
143 
144 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
145 			int key_size, int value_size, int max_entries,
146 			__u32 map_flags)
147 {
148 	struct bpf_create_map_attr map_attr = {};
149 
150 	map_attr.name = name;
151 	map_attr.map_type = map_type;
152 	map_attr.map_flags = map_flags;
153 	map_attr.key_size = key_size;
154 	map_attr.value_size = value_size;
155 	map_attr.max_entries = max_entries;
156 
157 	return bpf_create_map_xattr(&map_attr);
158 }
159 
160 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
161 			       int key_size, int inner_map_fd, int max_entries,
162 			       __u32 map_flags, int node)
163 {
164 	union bpf_attr attr;
165 
166 	memset(&attr, '\0', sizeof(attr));
167 
168 	attr.map_type = map_type;
169 	attr.key_size = key_size;
170 	attr.value_size = 4;
171 	attr.inner_map_fd = inner_map_fd;
172 	attr.max_entries = max_entries;
173 	attr.map_flags = map_flags;
174 	if (name)
175 		memcpy(attr.map_name, name,
176 		       min(strlen(name), BPF_OBJ_NAME_LEN - 1));
177 
178 	if (node >= 0) {
179 		attr.map_flags |= BPF_F_NUMA_NODE;
180 		attr.numa_node = node;
181 	}
182 
183 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
184 }
185 
186 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
187 			  int key_size, int inner_map_fd, int max_entries,
188 			  __u32 map_flags)
189 {
190 	return bpf_create_map_in_map_node(map_type, name, key_size,
191 					  inner_map_fd, max_entries, map_flags,
192 					  -1);
193 }
194 
195 static void *
196 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
197 			__u32 actual_rec_size, __u32 expected_rec_size)
198 {
199 	__u64 info_len = (__u64)actual_rec_size * cnt;
200 	void *info, *nrecord;
201 	int i;
202 
203 	info = malloc(info_len);
204 	if (!info)
205 		return NULL;
206 
207 	/* zero out bytes kernel does not understand */
208 	nrecord = info;
209 	for (i = 0; i < cnt; i++) {
210 		memcpy(nrecord, orecord, expected_rec_size);
211 		memset(nrecord + expected_rec_size, 0,
212 		       actual_rec_size - expected_rec_size);
213 		orecord += actual_rec_size;
214 		nrecord += actual_rec_size;
215 	}
216 
217 	return info;
218 }
219 
220 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
221 			   char *log_buf, size_t log_buf_sz)
222 {
223 	void *finfo = NULL, *linfo = NULL;
224 	union bpf_attr attr;
225 	__u32 log_level;
226 	int fd;
227 
228 	if (!load_attr || !log_buf != !log_buf_sz)
229 		return -EINVAL;
230 
231 	log_level = load_attr->log_level;
232 	if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
233 		return -EINVAL;
234 
235 	memset(&attr, 0, sizeof(attr));
236 	attr.prog_type = load_attr->prog_type;
237 	attr.expected_attach_type = load_attr->expected_attach_type;
238 	if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS) {
239 		attr.attach_btf_id = load_attr->attach_btf_id;
240 	} else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
241 		   attr.prog_type == BPF_PROG_TYPE_EXT) {
242 		attr.attach_btf_id = load_attr->attach_btf_id;
243 		attr.attach_prog_fd = load_attr->attach_prog_fd;
244 	} else {
245 		attr.prog_ifindex = load_attr->prog_ifindex;
246 		attr.kern_version = load_attr->kern_version;
247 	}
248 	attr.insn_cnt = (__u32)load_attr->insns_cnt;
249 	attr.insns = ptr_to_u64(load_attr->insns);
250 	attr.license = ptr_to_u64(load_attr->license);
251 
252 	attr.log_level = log_level;
253 	if (log_level) {
254 		attr.log_buf = ptr_to_u64(log_buf);
255 		attr.log_size = log_buf_sz;
256 	} else {
257 		attr.log_buf = ptr_to_u64(NULL);
258 		attr.log_size = 0;
259 	}
260 
261 	attr.prog_btf_fd = load_attr->prog_btf_fd;
262 	attr.func_info_rec_size = load_attr->func_info_rec_size;
263 	attr.func_info_cnt = load_attr->func_info_cnt;
264 	attr.func_info = ptr_to_u64(load_attr->func_info);
265 	attr.line_info_rec_size = load_attr->line_info_rec_size;
266 	attr.line_info_cnt = load_attr->line_info_cnt;
267 	attr.line_info = ptr_to_u64(load_attr->line_info);
268 	if (load_attr->name)
269 		memcpy(attr.prog_name, load_attr->name,
270 		       min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
271 	attr.prog_flags = load_attr->prog_flags;
272 
273 	fd = sys_bpf_prog_load(&attr, sizeof(attr));
274 	if (fd >= 0)
275 		return fd;
276 
277 	/* After bpf_prog_load, the kernel may modify certain attributes
278 	 * to give user space a hint how to deal with loading failure.
279 	 * Check to see whether we can make some changes and load again.
280 	 */
281 	while (errno == E2BIG && (!finfo || !linfo)) {
282 		if (!finfo && attr.func_info_cnt &&
283 		    attr.func_info_rec_size < load_attr->func_info_rec_size) {
284 			/* try with corrected func info records */
285 			finfo = alloc_zero_tailing_info(load_attr->func_info,
286 							load_attr->func_info_cnt,
287 							load_attr->func_info_rec_size,
288 							attr.func_info_rec_size);
289 			if (!finfo)
290 				goto done;
291 
292 			attr.func_info = ptr_to_u64(finfo);
293 			attr.func_info_rec_size = load_attr->func_info_rec_size;
294 		} else if (!linfo && attr.line_info_cnt &&
295 			   attr.line_info_rec_size <
296 			   load_attr->line_info_rec_size) {
297 			linfo = alloc_zero_tailing_info(load_attr->line_info,
298 							load_attr->line_info_cnt,
299 							load_attr->line_info_rec_size,
300 							attr.line_info_rec_size);
301 			if (!linfo)
302 				goto done;
303 
304 			attr.line_info = ptr_to_u64(linfo);
305 			attr.line_info_rec_size = load_attr->line_info_rec_size;
306 		} else {
307 			break;
308 		}
309 
310 		fd = sys_bpf_prog_load(&attr, sizeof(attr));
311 
312 		if (fd >= 0)
313 			goto done;
314 	}
315 
316 	if (log_level || !log_buf)
317 		goto done;
318 
319 	/* Try again with log */
320 	attr.log_buf = ptr_to_u64(log_buf);
321 	attr.log_size = log_buf_sz;
322 	attr.log_level = 1;
323 	log_buf[0] = 0;
324 	fd = sys_bpf_prog_load(&attr, sizeof(attr));
325 done:
326 	free(finfo);
327 	free(linfo);
328 	return fd;
329 }
330 
331 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
332 		     size_t insns_cnt, const char *license,
333 		     __u32 kern_version, char *log_buf,
334 		     size_t log_buf_sz)
335 {
336 	struct bpf_load_program_attr load_attr;
337 
338 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
339 	load_attr.prog_type = type;
340 	load_attr.expected_attach_type = 0;
341 	load_attr.name = NULL;
342 	load_attr.insns = insns;
343 	load_attr.insns_cnt = insns_cnt;
344 	load_attr.license = license;
345 	load_attr.kern_version = kern_version;
346 
347 	return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
348 }
349 
350 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
351 		       size_t insns_cnt, __u32 prog_flags, const char *license,
352 		       __u32 kern_version, char *log_buf, size_t log_buf_sz,
353 		       int log_level)
354 {
355 	union bpf_attr attr;
356 
357 	memset(&attr, 0, sizeof(attr));
358 	attr.prog_type = type;
359 	attr.insn_cnt = (__u32)insns_cnt;
360 	attr.insns = ptr_to_u64(insns);
361 	attr.license = ptr_to_u64(license);
362 	attr.log_buf = ptr_to_u64(log_buf);
363 	attr.log_size = log_buf_sz;
364 	attr.log_level = log_level;
365 	log_buf[0] = 0;
366 	attr.kern_version = kern_version;
367 	attr.prog_flags = prog_flags;
368 
369 	return sys_bpf_prog_load(&attr, sizeof(attr));
370 }
371 
372 int bpf_map_update_elem(int fd, const void *key, const void *value,
373 			__u64 flags)
374 {
375 	union bpf_attr attr;
376 
377 	memset(&attr, 0, sizeof(attr));
378 	attr.map_fd = fd;
379 	attr.key = ptr_to_u64(key);
380 	attr.value = ptr_to_u64(value);
381 	attr.flags = flags;
382 
383 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
384 }
385 
386 int bpf_map_lookup_elem(int fd, const void *key, void *value)
387 {
388 	union bpf_attr attr;
389 
390 	memset(&attr, 0, sizeof(attr));
391 	attr.map_fd = fd;
392 	attr.key = ptr_to_u64(key);
393 	attr.value = ptr_to_u64(value);
394 
395 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
396 }
397 
398 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
399 {
400 	union bpf_attr attr;
401 
402 	memset(&attr, 0, sizeof(attr));
403 	attr.map_fd = fd;
404 	attr.key = ptr_to_u64(key);
405 	attr.value = ptr_to_u64(value);
406 	attr.flags = flags;
407 
408 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
409 }
410 
411 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
412 {
413 	union bpf_attr attr;
414 
415 	memset(&attr, 0, sizeof(attr));
416 	attr.map_fd = fd;
417 	attr.key = ptr_to_u64(key);
418 	attr.value = ptr_to_u64(value);
419 
420 	return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
421 }
422 
423 int bpf_map_delete_elem(int fd, const void *key)
424 {
425 	union bpf_attr attr;
426 
427 	memset(&attr, 0, sizeof(attr));
428 	attr.map_fd = fd;
429 	attr.key = ptr_to_u64(key);
430 
431 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
432 }
433 
434 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
435 {
436 	union bpf_attr attr;
437 
438 	memset(&attr, 0, sizeof(attr));
439 	attr.map_fd = fd;
440 	attr.key = ptr_to_u64(key);
441 	attr.next_key = ptr_to_u64(next_key);
442 
443 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
444 }
445 
446 int bpf_map_freeze(int fd)
447 {
448 	union bpf_attr attr;
449 
450 	memset(&attr, 0, sizeof(attr));
451 	attr.map_fd = fd;
452 
453 	return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
454 }
455 
456 static int bpf_map_batch_common(int cmd, int fd, void  *in_batch,
457 				void *out_batch, void *keys, void *values,
458 				__u32 *count,
459 				const struct bpf_map_batch_opts *opts)
460 {
461 	union bpf_attr attr;
462 	int ret;
463 
464 	if (!OPTS_VALID(opts, bpf_map_batch_opts))
465 		return -EINVAL;
466 
467 	memset(&attr, 0, sizeof(attr));
468 	attr.batch.map_fd = fd;
469 	attr.batch.in_batch = ptr_to_u64(in_batch);
470 	attr.batch.out_batch = ptr_to_u64(out_batch);
471 	attr.batch.keys = ptr_to_u64(keys);
472 	attr.batch.values = ptr_to_u64(values);
473 	attr.batch.count = *count;
474 	attr.batch.elem_flags  = OPTS_GET(opts, elem_flags, 0);
475 	attr.batch.flags = OPTS_GET(opts, flags, 0);
476 
477 	ret = sys_bpf(cmd, &attr, sizeof(attr));
478 	*count = attr.batch.count;
479 
480 	return ret;
481 }
482 
483 int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
484 			 const struct bpf_map_batch_opts *opts)
485 {
486 	return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
487 				    NULL, keys, NULL, count, opts);
488 }
489 
490 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
491 			 void *values, __u32 *count,
492 			 const struct bpf_map_batch_opts *opts)
493 {
494 	return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
495 				    out_batch, keys, values, count, opts);
496 }
497 
498 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
499 				    void *keys, void *values, __u32 *count,
500 				    const struct bpf_map_batch_opts *opts)
501 {
502 	return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
503 				    fd, in_batch, out_batch, keys, values,
504 				    count, opts);
505 }
506 
507 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
508 			 const struct bpf_map_batch_opts *opts)
509 {
510 	return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
511 				    keys, values, count, opts);
512 }
513 
514 int bpf_obj_pin(int fd, const char *pathname)
515 {
516 	union bpf_attr attr;
517 
518 	memset(&attr, 0, sizeof(attr));
519 	attr.pathname = ptr_to_u64((void *)pathname);
520 	attr.bpf_fd = fd;
521 
522 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
523 }
524 
525 int bpf_obj_get(const char *pathname)
526 {
527 	union bpf_attr attr;
528 
529 	memset(&attr, 0, sizeof(attr));
530 	attr.pathname = ptr_to_u64((void *)pathname);
531 
532 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
533 }
534 
535 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
536 		    unsigned int flags)
537 {
538 	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
539 		.flags = flags,
540 	);
541 
542 	return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
543 }
544 
545 int bpf_prog_attach_xattr(int prog_fd, int target_fd,
546 			  enum bpf_attach_type type,
547 			  const struct bpf_prog_attach_opts *opts)
548 {
549 	union bpf_attr attr;
550 
551 	if (!OPTS_VALID(opts, bpf_prog_attach_opts))
552 		return -EINVAL;
553 
554 	memset(&attr, 0, sizeof(attr));
555 	attr.target_fd	   = target_fd;
556 	attr.attach_bpf_fd = prog_fd;
557 	attr.attach_type   = type;
558 	attr.attach_flags  = OPTS_GET(opts, flags, 0);
559 	attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
560 
561 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
562 }
563 
564 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
565 {
566 	union bpf_attr attr;
567 
568 	memset(&attr, 0, sizeof(attr));
569 	attr.target_fd	 = target_fd;
570 	attr.attach_type = type;
571 
572 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
573 }
574 
575 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
576 {
577 	union bpf_attr attr;
578 
579 	memset(&attr, 0, sizeof(attr));
580 	attr.target_fd	 = target_fd;
581 	attr.attach_bpf_fd = prog_fd;
582 	attr.attach_type = type;
583 
584 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
585 }
586 
587 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
588 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
589 {
590 	union bpf_attr attr;
591 	int ret;
592 
593 	memset(&attr, 0, sizeof(attr));
594 	attr.query.target_fd	= target_fd;
595 	attr.query.attach_type	= type;
596 	attr.query.query_flags	= query_flags;
597 	attr.query.prog_cnt	= *prog_cnt;
598 	attr.query.prog_ids	= ptr_to_u64(prog_ids);
599 
600 	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
601 	if (attach_flags)
602 		*attach_flags = attr.query.attach_flags;
603 	*prog_cnt = attr.query.prog_cnt;
604 	return ret;
605 }
606 
607 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
608 		      void *data_out, __u32 *size_out, __u32 *retval,
609 		      __u32 *duration)
610 {
611 	union bpf_attr attr;
612 	int ret;
613 
614 	memset(&attr, 0, sizeof(attr));
615 	attr.test.prog_fd = prog_fd;
616 	attr.test.data_in = ptr_to_u64(data);
617 	attr.test.data_out = ptr_to_u64(data_out);
618 	attr.test.data_size_in = size;
619 	attr.test.repeat = repeat;
620 
621 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
622 	if (size_out)
623 		*size_out = attr.test.data_size_out;
624 	if (retval)
625 		*retval = attr.test.retval;
626 	if (duration)
627 		*duration = attr.test.duration;
628 	return ret;
629 }
630 
631 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
632 {
633 	union bpf_attr attr;
634 	int ret;
635 
636 	if (!test_attr->data_out && test_attr->data_size_out > 0)
637 		return -EINVAL;
638 
639 	memset(&attr, 0, sizeof(attr));
640 	attr.test.prog_fd = test_attr->prog_fd;
641 	attr.test.data_in = ptr_to_u64(test_attr->data_in);
642 	attr.test.data_out = ptr_to_u64(test_attr->data_out);
643 	attr.test.data_size_in = test_attr->data_size_in;
644 	attr.test.data_size_out = test_attr->data_size_out;
645 	attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
646 	attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
647 	attr.test.ctx_size_in = test_attr->ctx_size_in;
648 	attr.test.ctx_size_out = test_attr->ctx_size_out;
649 	attr.test.repeat = test_attr->repeat;
650 
651 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
652 	test_attr->data_size_out = attr.test.data_size_out;
653 	test_attr->ctx_size_out = attr.test.ctx_size_out;
654 	test_attr->retval = attr.test.retval;
655 	test_attr->duration = attr.test.duration;
656 	return ret;
657 }
658 
659 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
660 {
661 	union bpf_attr attr;
662 	int err;
663 
664 	memset(&attr, 0, sizeof(attr));
665 	attr.start_id = start_id;
666 
667 	err = sys_bpf(cmd, &attr, sizeof(attr));
668 	if (!err)
669 		*next_id = attr.next_id;
670 
671 	return err;
672 }
673 
674 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
675 {
676 	return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
677 }
678 
679 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
680 {
681 	return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
682 }
683 
684 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
685 {
686 	return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
687 }
688 
689 int bpf_prog_get_fd_by_id(__u32 id)
690 {
691 	union bpf_attr attr;
692 
693 	memset(&attr, 0, sizeof(attr));
694 	attr.prog_id = id;
695 
696 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
697 }
698 
699 int bpf_map_get_fd_by_id(__u32 id)
700 {
701 	union bpf_attr attr;
702 
703 	memset(&attr, 0, sizeof(attr));
704 	attr.map_id = id;
705 
706 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
707 }
708 
709 int bpf_btf_get_fd_by_id(__u32 id)
710 {
711 	union bpf_attr attr;
712 
713 	memset(&attr, 0, sizeof(attr));
714 	attr.btf_id = id;
715 
716 	return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
717 }
718 
719 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
720 {
721 	union bpf_attr attr;
722 	int err;
723 
724 	memset(&attr, 0, sizeof(attr));
725 	attr.info.bpf_fd = prog_fd;
726 	attr.info.info_len = *info_len;
727 	attr.info.info = ptr_to_u64(info);
728 
729 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
730 	if (!err)
731 		*info_len = attr.info.info_len;
732 
733 	return err;
734 }
735 
736 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
737 {
738 	union bpf_attr attr;
739 
740 	memset(&attr, 0, sizeof(attr));
741 	attr.raw_tracepoint.name = ptr_to_u64(name);
742 	attr.raw_tracepoint.prog_fd = prog_fd;
743 
744 	return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
745 }
746 
747 int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
748 		 bool do_log)
749 {
750 	union bpf_attr attr = {};
751 	int fd;
752 
753 	attr.btf = ptr_to_u64(btf);
754 	attr.btf_size = btf_size;
755 
756 retry:
757 	if (do_log && log_buf && log_buf_size) {
758 		attr.btf_log_level = 1;
759 		attr.btf_log_size = log_buf_size;
760 		attr.btf_log_buf = ptr_to_u64(log_buf);
761 	}
762 
763 	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
764 	if (fd == -1 && !do_log && log_buf && log_buf_size) {
765 		do_log = true;
766 		goto retry;
767 	}
768 
769 	return fd;
770 }
771 
772 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
773 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
774 		      __u64 *probe_addr)
775 {
776 	union bpf_attr attr = {};
777 	int err;
778 
779 	attr.task_fd_query.pid = pid;
780 	attr.task_fd_query.fd = fd;
781 	attr.task_fd_query.flags = flags;
782 	attr.task_fd_query.buf = ptr_to_u64(buf);
783 	attr.task_fd_query.buf_len = *buf_len;
784 
785 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
786 	*buf_len = attr.task_fd_query.buf_len;
787 	*prog_id = attr.task_fd_query.prog_id;
788 	*fd_type = attr.task_fd_query.fd_type;
789 	*probe_offset = attr.task_fd_query.probe_offset;
790 	*probe_addr = attr.task_fd_query.probe_addr;
791 
792 	return err;
793 }
794