xref: /openbmc/linux/tools/lib/bpf/bpf.c (revision abe9af53)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <string.h>
26 #include <memory.h>
27 #include <unistd.h>
28 #include <asm/unistd.h>
29 #include <errno.h>
30 #include <linux/bpf.h>
31 #include "bpf.h"
32 #include "libbpf.h"
33 #include "libbpf_internal.h"
34 
35 /*
36  * When building perf, unistd.h is overridden. __NR_bpf is
37  * required to be defined explicitly.
38  */
39 #ifndef __NR_bpf
40 # if defined(__i386__)
41 #  define __NR_bpf 357
42 # elif defined(__x86_64__)
43 #  define __NR_bpf 321
44 # elif defined(__aarch64__)
45 #  define __NR_bpf 280
46 # elif defined(__sparc__)
47 #  define __NR_bpf 349
48 # elif defined(__s390__)
49 #  define __NR_bpf 351
50 # elif defined(__arc__)
51 #  define __NR_bpf 280
52 # else
53 #  error __NR_bpf not defined. libbpf does not support your arch.
54 # endif
55 #endif
56 
57 static inline __u64 ptr_to_u64(const void *ptr)
58 {
59 	return (__u64) (unsigned long) ptr;
60 }
61 
62 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
63 			  unsigned int size)
64 {
65 	return syscall(__NR_bpf, cmd, attr, size);
66 }
67 
68 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
69 {
70 	int fd;
71 
72 	do {
73 		fd = sys_bpf(BPF_PROG_LOAD, attr, size);
74 	} while (fd < 0 && errno == EAGAIN);
75 
76 	return fd;
77 }
78 
79 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
80 {
81 	union bpf_attr attr;
82 
83 	memset(&attr, '\0', sizeof(attr));
84 
85 	attr.map_type = create_attr->map_type;
86 	attr.key_size = create_attr->key_size;
87 	attr.value_size = create_attr->value_size;
88 	attr.max_entries = create_attr->max_entries;
89 	attr.map_flags = create_attr->map_flags;
90 	if (create_attr->name)
91 		memcpy(attr.map_name, create_attr->name,
92 		       min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
93 	attr.numa_node = create_attr->numa_node;
94 	attr.btf_fd = create_attr->btf_fd;
95 	attr.btf_key_type_id = create_attr->btf_key_type_id;
96 	attr.btf_value_type_id = create_attr->btf_value_type_id;
97 	attr.map_ifindex = create_attr->map_ifindex;
98 	if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
99 		attr.btf_vmlinux_value_type_id =
100 			create_attr->btf_vmlinux_value_type_id;
101 	else
102 		attr.inner_map_fd = create_attr->inner_map_fd;
103 
104 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
105 }
106 
107 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
108 			int key_size, int value_size, int max_entries,
109 			__u32 map_flags, int node)
110 {
111 	struct bpf_create_map_attr map_attr = {};
112 
113 	map_attr.name = name;
114 	map_attr.map_type = map_type;
115 	map_attr.map_flags = map_flags;
116 	map_attr.key_size = key_size;
117 	map_attr.value_size = value_size;
118 	map_attr.max_entries = max_entries;
119 	if (node >= 0) {
120 		map_attr.numa_node = node;
121 		map_attr.map_flags |= BPF_F_NUMA_NODE;
122 	}
123 
124 	return bpf_create_map_xattr(&map_attr);
125 }
126 
127 int bpf_create_map(enum bpf_map_type map_type, int key_size,
128 		   int value_size, int max_entries, __u32 map_flags)
129 {
130 	struct bpf_create_map_attr map_attr = {};
131 
132 	map_attr.map_type = map_type;
133 	map_attr.map_flags = map_flags;
134 	map_attr.key_size = key_size;
135 	map_attr.value_size = value_size;
136 	map_attr.max_entries = max_entries;
137 
138 	return bpf_create_map_xattr(&map_attr);
139 }
140 
141 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
142 			int key_size, int value_size, int max_entries,
143 			__u32 map_flags)
144 {
145 	struct bpf_create_map_attr map_attr = {};
146 
147 	map_attr.name = name;
148 	map_attr.map_type = map_type;
149 	map_attr.map_flags = map_flags;
150 	map_attr.key_size = key_size;
151 	map_attr.value_size = value_size;
152 	map_attr.max_entries = max_entries;
153 
154 	return bpf_create_map_xattr(&map_attr);
155 }
156 
157 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
158 			       int key_size, int inner_map_fd, int max_entries,
159 			       __u32 map_flags, int node)
160 {
161 	union bpf_attr attr;
162 
163 	memset(&attr, '\0', sizeof(attr));
164 
165 	attr.map_type = map_type;
166 	attr.key_size = key_size;
167 	attr.value_size = 4;
168 	attr.inner_map_fd = inner_map_fd;
169 	attr.max_entries = max_entries;
170 	attr.map_flags = map_flags;
171 	if (name)
172 		memcpy(attr.map_name, name,
173 		       min(strlen(name), BPF_OBJ_NAME_LEN - 1));
174 
175 	if (node >= 0) {
176 		attr.map_flags |= BPF_F_NUMA_NODE;
177 		attr.numa_node = node;
178 	}
179 
180 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
181 }
182 
183 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
184 			  int key_size, int inner_map_fd, int max_entries,
185 			  __u32 map_flags)
186 {
187 	return bpf_create_map_in_map_node(map_type, name, key_size,
188 					  inner_map_fd, max_entries, map_flags,
189 					  -1);
190 }
191 
192 static void *
193 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
194 			__u32 actual_rec_size, __u32 expected_rec_size)
195 {
196 	__u64 info_len = (__u64)actual_rec_size * cnt;
197 	void *info, *nrecord;
198 	int i;
199 
200 	info = malloc(info_len);
201 	if (!info)
202 		return NULL;
203 
204 	/* zero out bytes kernel does not understand */
205 	nrecord = info;
206 	for (i = 0; i < cnt; i++) {
207 		memcpy(nrecord, orecord, expected_rec_size);
208 		memset(nrecord + expected_rec_size, 0,
209 		       actual_rec_size - expected_rec_size);
210 		orecord += actual_rec_size;
211 		nrecord += actual_rec_size;
212 	}
213 
214 	return info;
215 }
216 
217 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
218 			   char *log_buf, size_t log_buf_sz)
219 {
220 	void *finfo = NULL, *linfo = NULL;
221 	union bpf_attr attr;
222 	__u32 log_level;
223 	int fd;
224 
225 	if (!load_attr || !log_buf != !log_buf_sz)
226 		return -EINVAL;
227 
228 	log_level = load_attr->log_level;
229 	if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
230 		return -EINVAL;
231 
232 	memset(&attr, 0, sizeof(attr));
233 	attr.prog_type = load_attr->prog_type;
234 	attr.expected_attach_type = load_attr->expected_attach_type;
235 	if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
236 	    attr.prog_type == BPF_PROG_TYPE_LSM) {
237 		attr.attach_btf_id = load_attr->attach_btf_id;
238 	} else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
239 		   attr.prog_type == BPF_PROG_TYPE_EXT) {
240 		attr.attach_btf_id = load_attr->attach_btf_id;
241 		attr.attach_prog_fd = load_attr->attach_prog_fd;
242 	} else {
243 		attr.prog_ifindex = load_attr->prog_ifindex;
244 		attr.kern_version = load_attr->kern_version;
245 	}
246 	attr.insn_cnt = (__u32)load_attr->insns_cnt;
247 	attr.insns = ptr_to_u64(load_attr->insns);
248 	attr.license = ptr_to_u64(load_attr->license);
249 
250 	attr.log_level = log_level;
251 	if (log_level) {
252 		attr.log_buf = ptr_to_u64(log_buf);
253 		attr.log_size = log_buf_sz;
254 	} else {
255 		attr.log_buf = ptr_to_u64(NULL);
256 		attr.log_size = 0;
257 	}
258 
259 	attr.prog_btf_fd = load_attr->prog_btf_fd;
260 	attr.func_info_rec_size = load_attr->func_info_rec_size;
261 	attr.func_info_cnt = load_attr->func_info_cnt;
262 	attr.func_info = ptr_to_u64(load_attr->func_info);
263 	attr.line_info_rec_size = load_attr->line_info_rec_size;
264 	attr.line_info_cnt = load_attr->line_info_cnt;
265 	attr.line_info = ptr_to_u64(load_attr->line_info);
266 	if (load_attr->name)
267 		memcpy(attr.prog_name, load_attr->name,
268 		       min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
269 	attr.prog_flags = load_attr->prog_flags;
270 
271 	fd = sys_bpf_prog_load(&attr, sizeof(attr));
272 	if (fd >= 0)
273 		return fd;
274 
275 	/* After bpf_prog_load, the kernel may modify certain attributes
276 	 * to give user space a hint how to deal with loading failure.
277 	 * Check to see whether we can make some changes and load again.
278 	 */
279 	while (errno == E2BIG && (!finfo || !linfo)) {
280 		if (!finfo && attr.func_info_cnt &&
281 		    attr.func_info_rec_size < load_attr->func_info_rec_size) {
282 			/* try with corrected func info records */
283 			finfo = alloc_zero_tailing_info(load_attr->func_info,
284 							load_attr->func_info_cnt,
285 							load_attr->func_info_rec_size,
286 							attr.func_info_rec_size);
287 			if (!finfo)
288 				goto done;
289 
290 			attr.func_info = ptr_to_u64(finfo);
291 			attr.func_info_rec_size = load_attr->func_info_rec_size;
292 		} else if (!linfo && attr.line_info_cnt &&
293 			   attr.line_info_rec_size <
294 			   load_attr->line_info_rec_size) {
295 			linfo = alloc_zero_tailing_info(load_attr->line_info,
296 							load_attr->line_info_cnt,
297 							load_attr->line_info_rec_size,
298 							attr.line_info_rec_size);
299 			if (!linfo)
300 				goto done;
301 
302 			attr.line_info = ptr_to_u64(linfo);
303 			attr.line_info_rec_size = load_attr->line_info_rec_size;
304 		} else {
305 			break;
306 		}
307 
308 		fd = sys_bpf_prog_load(&attr, sizeof(attr));
309 
310 		if (fd >= 0)
311 			goto done;
312 	}
313 
314 	if (log_level || !log_buf)
315 		goto done;
316 
317 	/* Try again with log */
318 	attr.log_buf = ptr_to_u64(log_buf);
319 	attr.log_size = log_buf_sz;
320 	attr.log_level = 1;
321 	log_buf[0] = 0;
322 	fd = sys_bpf_prog_load(&attr, sizeof(attr));
323 done:
324 	free(finfo);
325 	free(linfo);
326 	return fd;
327 }
328 
329 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
330 		     size_t insns_cnt, const char *license,
331 		     __u32 kern_version, char *log_buf,
332 		     size_t log_buf_sz)
333 {
334 	struct bpf_load_program_attr load_attr;
335 
336 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
337 	load_attr.prog_type = type;
338 	load_attr.expected_attach_type = 0;
339 	load_attr.name = NULL;
340 	load_attr.insns = insns;
341 	load_attr.insns_cnt = insns_cnt;
342 	load_attr.license = license;
343 	load_attr.kern_version = kern_version;
344 
345 	return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
346 }
347 
348 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
349 		       size_t insns_cnt, __u32 prog_flags, const char *license,
350 		       __u32 kern_version, char *log_buf, size_t log_buf_sz,
351 		       int log_level)
352 {
353 	union bpf_attr attr;
354 
355 	memset(&attr, 0, sizeof(attr));
356 	attr.prog_type = type;
357 	attr.insn_cnt = (__u32)insns_cnt;
358 	attr.insns = ptr_to_u64(insns);
359 	attr.license = ptr_to_u64(license);
360 	attr.log_buf = ptr_to_u64(log_buf);
361 	attr.log_size = log_buf_sz;
362 	attr.log_level = log_level;
363 	log_buf[0] = 0;
364 	attr.kern_version = kern_version;
365 	attr.prog_flags = prog_flags;
366 
367 	return sys_bpf_prog_load(&attr, sizeof(attr));
368 }
369 
370 int bpf_map_update_elem(int fd, const void *key, const void *value,
371 			__u64 flags)
372 {
373 	union bpf_attr attr;
374 
375 	memset(&attr, 0, sizeof(attr));
376 	attr.map_fd = fd;
377 	attr.key = ptr_to_u64(key);
378 	attr.value = ptr_to_u64(value);
379 	attr.flags = flags;
380 
381 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
382 }
383 
384 int bpf_map_lookup_elem(int fd, const void *key, void *value)
385 {
386 	union bpf_attr attr;
387 
388 	memset(&attr, 0, sizeof(attr));
389 	attr.map_fd = fd;
390 	attr.key = ptr_to_u64(key);
391 	attr.value = ptr_to_u64(value);
392 
393 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
394 }
395 
396 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
397 {
398 	union bpf_attr attr;
399 
400 	memset(&attr, 0, sizeof(attr));
401 	attr.map_fd = fd;
402 	attr.key = ptr_to_u64(key);
403 	attr.value = ptr_to_u64(value);
404 	attr.flags = flags;
405 
406 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
407 }
408 
409 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
410 {
411 	union bpf_attr attr;
412 
413 	memset(&attr, 0, sizeof(attr));
414 	attr.map_fd = fd;
415 	attr.key = ptr_to_u64(key);
416 	attr.value = ptr_to_u64(value);
417 
418 	return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
419 }
420 
421 int bpf_map_delete_elem(int fd, const void *key)
422 {
423 	union bpf_attr attr;
424 
425 	memset(&attr, 0, sizeof(attr));
426 	attr.map_fd = fd;
427 	attr.key = ptr_to_u64(key);
428 
429 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
430 }
431 
432 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
433 {
434 	union bpf_attr attr;
435 
436 	memset(&attr, 0, sizeof(attr));
437 	attr.map_fd = fd;
438 	attr.key = ptr_to_u64(key);
439 	attr.next_key = ptr_to_u64(next_key);
440 
441 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
442 }
443 
444 int bpf_map_freeze(int fd)
445 {
446 	union bpf_attr attr;
447 
448 	memset(&attr, 0, sizeof(attr));
449 	attr.map_fd = fd;
450 
451 	return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
452 }
453 
454 static int bpf_map_batch_common(int cmd, int fd, void  *in_batch,
455 				void *out_batch, void *keys, void *values,
456 				__u32 *count,
457 				const struct bpf_map_batch_opts *opts)
458 {
459 	union bpf_attr attr;
460 	int ret;
461 
462 	if (!OPTS_VALID(opts, bpf_map_batch_opts))
463 		return -EINVAL;
464 
465 	memset(&attr, 0, sizeof(attr));
466 	attr.batch.map_fd = fd;
467 	attr.batch.in_batch = ptr_to_u64(in_batch);
468 	attr.batch.out_batch = ptr_to_u64(out_batch);
469 	attr.batch.keys = ptr_to_u64(keys);
470 	attr.batch.values = ptr_to_u64(values);
471 	attr.batch.count = *count;
472 	attr.batch.elem_flags  = OPTS_GET(opts, elem_flags, 0);
473 	attr.batch.flags = OPTS_GET(opts, flags, 0);
474 
475 	ret = sys_bpf(cmd, &attr, sizeof(attr));
476 	*count = attr.batch.count;
477 
478 	return ret;
479 }
480 
481 int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
482 			 const struct bpf_map_batch_opts *opts)
483 {
484 	return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
485 				    NULL, keys, NULL, count, opts);
486 }
487 
488 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
489 			 void *values, __u32 *count,
490 			 const struct bpf_map_batch_opts *opts)
491 {
492 	return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
493 				    out_batch, keys, values, count, opts);
494 }
495 
496 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
497 				    void *keys, void *values, __u32 *count,
498 				    const struct bpf_map_batch_opts *opts)
499 {
500 	return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
501 				    fd, in_batch, out_batch, keys, values,
502 				    count, opts);
503 }
504 
505 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
506 			 const struct bpf_map_batch_opts *opts)
507 {
508 	return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
509 				    keys, values, count, opts);
510 }
511 
512 int bpf_obj_pin(int fd, const char *pathname)
513 {
514 	union bpf_attr attr;
515 
516 	memset(&attr, 0, sizeof(attr));
517 	attr.pathname = ptr_to_u64((void *)pathname);
518 	attr.bpf_fd = fd;
519 
520 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
521 }
522 
523 int bpf_obj_get(const char *pathname)
524 {
525 	union bpf_attr attr;
526 
527 	memset(&attr, 0, sizeof(attr));
528 	attr.pathname = ptr_to_u64((void *)pathname);
529 
530 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
531 }
532 
533 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
534 		    unsigned int flags)
535 {
536 	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
537 		.flags = flags,
538 	);
539 
540 	return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
541 }
542 
543 int bpf_prog_attach_xattr(int prog_fd, int target_fd,
544 			  enum bpf_attach_type type,
545 			  const struct bpf_prog_attach_opts *opts)
546 {
547 	union bpf_attr attr;
548 
549 	if (!OPTS_VALID(opts, bpf_prog_attach_opts))
550 		return -EINVAL;
551 
552 	memset(&attr, 0, sizeof(attr));
553 	attr.target_fd	   = target_fd;
554 	attr.attach_bpf_fd = prog_fd;
555 	attr.attach_type   = type;
556 	attr.attach_flags  = OPTS_GET(opts, flags, 0);
557 	attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
558 
559 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
560 }
561 
562 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
563 {
564 	union bpf_attr attr;
565 
566 	memset(&attr, 0, sizeof(attr));
567 	attr.target_fd	 = target_fd;
568 	attr.attach_type = type;
569 
570 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
571 }
572 
573 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
574 {
575 	union bpf_attr attr;
576 
577 	memset(&attr, 0, sizeof(attr));
578 	attr.target_fd	 = target_fd;
579 	attr.attach_bpf_fd = prog_fd;
580 	attr.attach_type = type;
581 
582 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
583 }
584 
585 int bpf_link_create(int prog_fd, int target_fd,
586 		    enum bpf_attach_type attach_type,
587 		    const struct bpf_link_create_opts *opts)
588 {
589 	__u32 target_btf_id, iter_info_len;
590 	union bpf_attr attr;
591 
592 	if (!OPTS_VALID(opts, bpf_link_create_opts))
593 		return -EINVAL;
594 
595 	iter_info_len = OPTS_GET(opts, iter_info_len, 0);
596 	target_btf_id = OPTS_GET(opts, target_btf_id, 0);
597 
598 	if (iter_info_len && target_btf_id)
599 		return -EINVAL;
600 
601 	memset(&attr, 0, sizeof(attr));
602 	attr.link_create.prog_fd = prog_fd;
603 	attr.link_create.target_fd = target_fd;
604 	attr.link_create.attach_type = attach_type;
605 	attr.link_create.flags = OPTS_GET(opts, flags, 0);
606 
607 	if (iter_info_len) {
608 		attr.link_create.iter_info =
609 			ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
610 		attr.link_create.iter_info_len = iter_info_len;
611 	} else if (target_btf_id) {
612 		attr.link_create.target_btf_id = target_btf_id;
613 	}
614 
615 	return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
616 }
617 
618 int bpf_link_detach(int link_fd)
619 {
620 	union bpf_attr attr;
621 
622 	memset(&attr, 0, sizeof(attr));
623 	attr.link_detach.link_fd = link_fd;
624 
625 	return sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
626 }
627 
628 int bpf_link_update(int link_fd, int new_prog_fd,
629 		    const struct bpf_link_update_opts *opts)
630 {
631 	union bpf_attr attr;
632 
633 	if (!OPTS_VALID(opts, bpf_link_update_opts))
634 		return -EINVAL;
635 
636 	memset(&attr, 0, sizeof(attr));
637 	attr.link_update.link_fd = link_fd;
638 	attr.link_update.new_prog_fd = new_prog_fd;
639 	attr.link_update.flags = OPTS_GET(opts, flags, 0);
640 	attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
641 
642 	return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
643 }
644 
645 int bpf_iter_create(int link_fd)
646 {
647 	union bpf_attr attr;
648 
649 	memset(&attr, 0, sizeof(attr));
650 	attr.iter_create.link_fd = link_fd;
651 
652 	return sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
653 }
654 
655 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
656 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
657 {
658 	union bpf_attr attr;
659 	int ret;
660 
661 	memset(&attr, 0, sizeof(attr));
662 	attr.query.target_fd	= target_fd;
663 	attr.query.attach_type	= type;
664 	attr.query.query_flags	= query_flags;
665 	attr.query.prog_cnt	= *prog_cnt;
666 	attr.query.prog_ids	= ptr_to_u64(prog_ids);
667 
668 	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
669 	if (attach_flags)
670 		*attach_flags = attr.query.attach_flags;
671 	*prog_cnt = attr.query.prog_cnt;
672 	return ret;
673 }
674 
675 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
676 		      void *data_out, __u32 *size_out, __u32 *retval,
677 		      __u32 *duration)
678 {
679 	union bpf_attr attr;
680 	int ret;
681 
682 	memset(&attr, 0, sizeof(attr));
683 	attr.test.prog_fd = prog_fd;
684 	attr.test.data_in = ptr_to_u64(data);
685 	attr.test.data_out = ptr_to_u64(data_out);
686 	attr.test.data_size_in = size;
687 	attr.test.repeat = repeat;
688 
689 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
690 	if (size_out)
691 		*size_out = attr.test.data_size_out;
692 	if (retval)
693 		*retval = attr.test.retval;
694 	if (duration)
695 		*duration = attr.test.duration;
696 	return ret;
697 }
698 
699 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
700 {
701 	union bpf_attr attr;
702 	int ret;
703 
704 	if (!test_attr->data_out && test_attr->data_size_out > 0)
705 		return -EINVAL;
706 
707 	memset(&attr, 0, sizeof(attr));
708 	attr.test.prog_fd = test_attr->prog_fd;
709 	attr.test.data_in = ptr_to_u64(test_attr->data_in);
710 	attr.test.data_out = ptr_to_u64(test_attr->data_out);
711 	attr.test.data_size_in = test_attr->data_size_in;
712 	attr.test.data_size_out = test_attr->data_size_out;
713 	attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
714 	attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
715 	attr.test.ctx_size_in = test_attr->ctx_size_in;
716 	attr.test.ctx_size_out = test_attr->ctx_size_out;
717 	attr.test.repeat = test_attr->repeat;
718 
719 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
720 	test_attr->data_size_out = attr.test.data_size_out;
721 	test_attr->ctx_size_out = attr.test.ctx_size_out;
722 	test_attr->retval = attr.test.retval;
723 	test_attr->duration = attr.test.duration;
724 	return ret;
725 }
726 
727 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
728 {
729 	union bpf_attr attr;
730 	int ret;
731 
732 	if (!OPTS_VALID(opts, bpf_test_run_opts))
733 		return -EINVAL;
734 
735 	memset(&attr, 0, sizeof(attr));
736 	attr.test.prog_fd = prog_fd;
737 	attr.test.cpu = OPTS_GET(opts, cpu, 0);
738 	attr.test.flags = OPTS_GET(opts, flags, 0);
739 	attr.test.repeat = OPTS_GET(opts, repeat, 0);
740 	attr.test.duration = OPTS_GET(opts, duration, 0);
741 	attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
742 	attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
743 	attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
744 	attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
745 	attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
746 	attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
747 	attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
748 	attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
749 
750 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
751 	OPTS_SET(opts, data_size_out, attr.test.data_size_out);
752 	OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
753 	OPTS_SET(opts, duration, attr.test.duration);
754 	OPTS_SET(opts, retval, attr.test.retval);
755 	return ret;
756 }
757 
758 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
759 {
760 	union bpf_attr attr;
761 	int err;
762 
763 	memset(&attr, 0, sizeof(attr));
764 	attr.start_id = start_id;
765 
766 	err = sys_bpf(cmd, &attr, sizeof(attr));
767 	if (!err)
768 		*next_id = attr.next_id;
769 
770 	return err;
771 }
772 
773 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
774 {
775 	return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
776 }
777 
778 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
779 {
780 	return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
781 }
782 
783 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
784 {
785 	return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
786 }
787 
788 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
789 {
790 	return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
791 }
792 
793 int bpf_prog_get_fd_by_id(__u32 id)
794 {
795 	union bpf_attr attr;
796 
797 	memset(&attr, 0, sizeof(attr));
798 	attr.prog_id = id;
799 
800 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
801 }
802 
803 int bpf_map_get_fd_by_id(__u32 id)
804 {
805 	union bpf_attr attr;
806 
807 	memset(&attr, 0, sizeof(attr));
808 	attr.map_id = id;
809 
810 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
811 }
812 
813 int bpf_btf_get_fd_by_id(__u32 id)
814 {
815 	union bpf_attr attr;
816 
817 	memset(&attr, 0, sizeof(attr));
818 	attr.btf_id = id;
819 
820 	return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
821 }
822 
823 int bpf_link_get_fd_by_id(__u32 id)
824 {
825 	union bpf_attr attr;
826 
827 	memset(&attr, 0, sizeof(attr));
828 	attr.link_id = id;
829 
830 	return sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
831 }
832 
833 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
834 {
835 	union bpf_attr attr;
836 	int err;
837 
838 	memset(&attr, 0, sizeof(attr));
839 	attr.info.bpf_fd = bpf_fd;
840 	attr.info.info_len = *info_len;
841 	attr.info.info = ptr_to_u64(info);
842 
843 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
844 	if (!err)
845 		*info_len = attr.info.info_len;
846 
847 	return err;
848 }
849 
850 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
851 {
852 	union bpf_attr attr;
853 
854 	memset(&attr, 0, sizeof(attr));
855 	attr.raw_tracepoint.name = ptr_to_u64(name);
856 	attr.raw_tracepoint.prog_fd = prog_fd;
857 
858 	return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
859 }
860 
861 int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
862 		 bool do_log)
863 {
864 	union bpf_attr attr = {};
865 	int fd;
866 
867 	attr.btf = ptr_to_u64(btf);
868 	attr.btf_size = btf_size;
869 
870 retry:
871 	if (do_log && log_buf && log_buf_size) {
872 		attr.btf_log_level = 1;
873 		attr.btf_log_size = log_buf_size;
874 		attr.btf_log_buf = ptr_to_u64(log_buf);
875 	}
876 
877 	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
878 	if (fd == -1 && !do_log && log_buf && log_buf_size) {
879 		do_log = true;
880 		goto retry;
881 	}
882 
883 	return fd;
884 }
885 
886 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
887 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
888 		      __u64 *probe_addr)
889 {
890 	union bpf_attr attr = {};
891 	int err;
892 
893 	attr.task_fd_query.pid = pid;
894 	attr.task_fd_query.fd = fd;
895 	attr.task_fd_query.flags = flags;
896 	attr.task_fd_query.buf = ptr_to_u64(buf);
897 	attr.task_fd_query.buf_len = *buf_len;
898 
899 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
900 	*buf_len = attr.task_fd_query.buf_len;
901 	*prog_id = attr.task_fd_query.prog_id;
902 	*fd_type = attr.task_fd_query.fd_type;
903 	*probe_offset = attr.task_fd_query.probe_offset;
904 	*probe_addr = attr.task_fd_query.probe_addr;
905 
906 	return err;
907 }
908 
909 int bpf_enable_stats(enum bpf_stats_type type)
910 {
911 	union bpf_attr attr;
912 
913 	memset(&attr, 0, sizeof(attr));
914 	attr.enable_stats.type = type;
915 
916 	return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
917 }
918 
919 int bpf_prog_bind_map(int prog_fd, int map_fd,
920 		      const struct bpf_prog_bind_opts *opts)
921 {
922 	union bpf_attr attr;
923 
924 	if (!OPTS_VALID(opts, bpf_prog_bind_opts))
925 		return -EINVAL;
926 
927 	memset(&attr, 0, sizeof(attr));
928 	attr.prog_bind_map.prog_fd = prog_fd;
929 	attr.prog_bind_map.map_fd = map_fd;
930 	attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
931 
932 	return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
933 }
934