xref: /openbmc/linux/tools/lib/bpf/bpf.c (revision a266ef69)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <string.h>
26 #include <memory.h>
27 #include <unistd.h>
28 #include <asm/unistd.h>
29 #include <errno.h>
30 #include <linux/bpf.h>
31 #include <linux/filter.h>
32 #include <linux/kernel.h>
33 #include <limits.h>
34 #include <sys/resource.h>
35 #include "bpf.h"
36 #include "libbpf.h"
37 #include "libbpf_internal.h"
38 
39 /*
40  * When building perf, unistd.h is overridden. __NR_bpf is
41  * required to be defined explicitly.
42  */
43 #ifndef __NR_bpf
44 # if defined(__i386__)
45 #  define __NR_bpf 357
46 # elif defined(__x86_64__)
47 #  define __NR_bpf 321
48 # elif defined(__aarch64__)
49 #  define __NR_bpf 280
50 # elif defined(__sparc__)
51 #  define __NR_bpf 349
52 # elif defined(__s390__)
53 #  define __NR_bpf 351
54 # elif defined(__arc__)
55 #  define __NR_bpf 280
56 # elif defined(__mips__) && defined(_ABIO32)
57 #  define __NR_bpf 4355
58 # elif defined(__mips__) && defined(_ABIN32)
59 #  define __NR_bpf 6319
60 # elif defined(__mips__) && defined(_ABI64)
61 #  define __NR_bpf 5315
62 # else
63 #  error __NR_bpf not defined. libbpf does not support your arch.
64 # endif
65 #endif
66 
67 static inline __u64 ptr_to_u64(const void *ptr)
68 {
69 	return (__u64) (unsigned long) ptr;
70 }
71 
72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
73 			  unsigned int size)
74 {
75 	return syscall(__NR_bpf, cmd, attr, size);
76 }
77 
78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
79 			     unsigned int size)
80 {
81 	int fd;
82 
83 	fd = sys_bpf(cmd, attr, size);
84 	return ensure_good_fd(fd);
85 }
86 
87 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
88 {
89 	int fd;
90 
91 	do {
92 		fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
93 	} while (fd < 0 && errno == EAGAIN && --attempts > 0);
94 
95 	return fd;
96 }
97 
98 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
99  * memcg-based memory accounting for BPF maps and progs. This was done in [0].
100  * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
101  * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
102  *
103  *   [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
104  *   [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
105  */
106 int probe_memcg_account(void)
107 {
108 	const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
109 	struct bpf_insn insns[] = {
110 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
111 		BPF_EXIT_INSN(),
112 	};
113 	size_t insn_cnt = ARRAY_SIZE(insns);
114 	union bpf_attr attr;
115 	int prog_fd;
116 
117 	/* attempt loading freplace trying to use custom BTF */
118 	memset(&attr, 0, attr_sz);
119 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
120 	attr.insns = ptr_to_u64(insns);
121 	attr.insn_cnt = insn_cnt;
122 	attr.license = ptr_to_u64("GPL");
123 
124 	prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
125 	if (prog_fd >= 0) {
126 		close(prog_fd);
127 		return 1;
128 	}
129 	return 0;
130 }
131 
132 static bool memlock_bumped;
133 static rlim_t memlock_rlim = RLIM_INFINITY;
134 
135 int libbpf_set_memlock_rlim(size_t memlock_bytes)
136 {
137 	if (memlock_bumped)
138 		return libbpf_err(-EBUSY);
139 
140 	memlock_rlim = memlock_bytes;
141 	return 0;
142 }
143 
144 int bump_rlimit_memlock(void)
145 {
146 	struct rlimit rlim;
147 
148 	/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
149 	if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
150 		return 0;
151 
152 	memlock_bumped = true;
153 
154 	/* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
155 	if (memlock_rlim == 0)
156 		return 0;
157 
158 	rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
159 	if (setrlimit(RLIMIT_MEMLOCK, &rlim))
160 		return -errno;
161 
162 	return 0;
163 }
164 
165 int bpf_map_create(enum bpf_map_type map_type,
166 		   const char *map_name,
167 		   __u32 key_size,
168 		   __u32 value_size,
169 		   __u32 max_entries,
170 		   const struct bpf_map_create_opts *opts)
171 {
172 	const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
173 	union bpf_attr attr;
174 	int fd;
175 
176 	bump_rlimit_memlock();
177 
178 	memset(&attr, 0, attr_sz);
179 
180 	if (!OPTS_VALID(opts, bpf_map_create_opts))
181 		return libbpf_err(-EINVAL);
182 
183 	attr.map_type = map_type;
184 	if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
185 		libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
186 	attr.key_size = key_size;
187 	attr.value_size = value_size;
188 	attr.max_entries = max_entries;
189 
190 	attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
191 	attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
192 	attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
193 	attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
194 
195 	attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
196 	attr.map_flags = OPTS_GET(opts, map_flags, 0);
197 	attr.map_extra = OPTS_GET(opts, map_extra, 0);
198 	attr.numa_node = OPTS_GET(opts, numa_node, 0);
199 	attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
200 
201 	fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
202 	return libbpf_err_errno(fd);
203 }
204 
205 static void *
206 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
207 			__u32 actual_rec_size, __u32 expected_rec_size)
208 {
209 	__u64 info_len = (__u64)actual_rec_size * cnt;
210 	void *info, *nrecord;
211 	int i;
212 
213 	info = malloc(info_len);
214 	if (!info)
215 		return NULL;
216 
217 	/* zero out bytes kernel does not understand */
218 	nrecord = info;
219 	for (i = 0; i < cnt; i++) {
220 		memcpy(nrecord, orecord, expected_rec_size);
221 		memset(nrecord + expected_rec_size, 0,
222 		       actual_rec_size - expected_rec_size);
223 		orecord += actual_rec_size;
224 		nrecord += actual_rec_size;
225 	}
226 
227 	return info;
228 }
229 
230 int bpf_prog_load(enum bpf_prog_type prog_type,
231 		  const char *prog_name, const char *license,
232 		  const struct bpf_insn *insns, size_t insn_cnt,
233 		  const struct bpf_prog_load_opts *opts)
234 {
235 	const size_t attr_sz = offsetofend(union bpf_attr, fd_array);
236 	void *finfo = NULL, *linfo = NULL;
237 	const char *func_info, *line_info;
238 	__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
239 	__u32 func_info_rec_size, line_info_rec_size;
240 	int fd, attempts;
241 	union bpf_attr attr;
242 	char *log_buf;
243 
244 	bump_rlimit_memlock();
245 
246 	if (!OPTS_VALID(opts, bpf_prog_load_opts))
247 		return libbpf_err(-EINVAL);
248 
249 	attempts = OPTS_GET(opts, attempts, 0);
250 	if (attempts < 0)
251 		return libbpf_err(-EINVAL);
252 	if (attempts == 0)
253 		attempts = PROG_LOAD_ATTEMPTS;
254 
255 	memset(&attr, 0, attr_sz);
256 
257 	attr.prog_type = prog_type;
258 	attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
259 
260 	attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
261 	attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
262 	attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
263 	attr.kern_version = OPTS_GET(opts, kern_version, 0);
264 
265 	if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
266 		libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
267 	attr.license = ptr_to_u64(license);
268 
269 	if (insn_cnt > UINT_MAX)
270 		return libbpf_err(-E2BIG);
271 
272 	attr.insns = ptr_to_u64(insns);
273 	attr.insn_cnt = (__u32)insn_cnt;
274 
275 	attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
276 	attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
277 
278 	if (attach_prog_fd && attach_btf_obj_fd)
279 		return libbpf_err(-EINVAL);
280 
281 	attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
282 	if (attach_prog_fd)
283 		attr.attach_prog_fd = attach_prog_fd;
284 	else
285 		attr.attach_btf_obj_fd = attach_btf_obj_fd;
286 
287 	log_buf = OPTS_GET(opts, log_buf, NULL);
288 	log_size = OPTS_GET(opts, log_size, 0);
289 	log_level = OPTS_GET(opts, log_level, 0);
290 
291 	if (!!log_buf != !!log_size)
292 		return libbpf_err(-EINVAL);
293 	if (log_level > (4 | 2 | 1))
294 		return libbpf_err(-EINVAL);
295 	if (log_level && !log_buf)
296 		return libbpf_err(-EINVAL);
297 
298 	func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
299 	func_info = OPTS_GET(opts, func_info, NULL);
300 	attr.func_info_rec_size = func_info_rec_size;
301 	attr.func_info = ptr_to_u64(func_info);
302 	attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
303 
304 	line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
305 	line_info = OPTS_GET(opts, line_info, NULL);
306 	attr.line_info_rec_size = line_info_rec_size;
307 	attr.line_info = ptr_to_u64(line_info);
308 	attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
309 
310 	attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
311 
312 	if (log_level) {
313 		attr.log_buf = ptr_to_u64(log_buf);
314 		attr.log_size = log_size;
315 		attr.log_level = log_level;
316 	}
317 
318 	fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
319 	if (fd >= 0)
320 		return fd;
321 
322 	/* After bpf_prog_load, the kernel may modify certain attributes
323 	 * to give user space a hint how to deal with loading failure.
324 	 * Check to see whether we can make some changes and load again.
325 	 */
326 	while (errno == E2BIG && (!finfo || !linfo)) {
327 		if (!finfo && attr.func_info_cnt &&
328 		    attr.func_info_rec_size < func_info_rec_size) {
329 			/* try with corrected func info records */
330 			finfo = alloc_zero_tailing_info(func_info,
331 							attr.func_info_cnt,
332 							func_info_rec_size,
333 							attr.func_info_rec_size);
334 			if (!finfo) {
335 				errno = E2BIG;
336 				goto done;
337 			}
338 
339 			attr.func_info = ptr_to_u64(finfo);
340 			attr.func_info_rec_size = func_info_rec_size;
341 		} else if (!linfo && attr.line_info_cnt &&
342 			   attr.line_info_rec_size < line_info_rec_size) {
343 			linfo = alloc_zero_tailing_info(line_info,
344 							attr.line_info_cnt,
345 							line_info_rec_size,
346 							attr.line_info_rec_size);
347 			if (!linfo) {
348 				errno = E2BIG;
349 				goto done;
350 			}
351 
352 			attr.line_info = ptr_to_u64(linfo);
353 			attr.line_info_rec_size = line_info_rec_size;
354 		} else {
355 			break;
356 		}
357 
358 		fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
359 		if (fd >= 0)
360 			goto done;
361 	}
362 
363 	if (log_level == 0 && log_buf) {
364 		/* log_level == 0 with non-NULL log_buf requires retrying on error
365 		 * with log_level == 1 and log_buf/log_buf_size set, to get details of
366 		 * failure
367 		 */
368 		attr.log_buf = ptr_to_u64(log_buf);
369 		attr.log_size = log_size;
370 		attr.log_level = 1;
371 
372 		fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
373 	}
374 done:
375 	/* free() doesn't affect errno, so we don't need to restore it */
376 	free(finfo);
377 	free(linfo);
378 	return libbpf_err_errno(fd);
379 }
380 
381 int bpf_map_update_elem(int fd, const void *key, const void *value,
382 			__u64 flags)
383 {
384 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
385 	union bpf_attr attr;
386 	int ret;
387 
388 	memset(&attr, 0, attr_sz);
389 	attr.map_fd = fd;
390 	attr.key = ptr_to_u64(key);
391 	attr.value = ptr_to_u64(value);
392 	attr.flags = flags;
393 
394 	ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
395 	return libbpf_err_errno(ret);
396 }
397 
398 int bpf_map_lookup_elem(int fd, const void *key, void *value)
399 {
400 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
401 	union bpf_attr attr;
402 	int ret;
403 
404 	memset(&attr, 0, attr_sz);
405 	attr.map_fd = fd;
406 	attr.key = ptr_to_u64(key);
407 	attr.value = ptr_to_u64(value);
408 
409 	ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
410 	return libbpf_err_errno(ret);
411 }
412 
413 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
414 {
415 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
416 	union bpf_attr attr;
417 	int ret;
418 
419 	memset(&attr, 0, attr_sz);
420 	attr.map_fd = fd;
421 	attr.key = ptr_to_u64(key);
422 	attr.value = ptr_to_u64(value);
423 	attr.flags = flags;
424 
425 	ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
426 	return libbpf_err_errno(ret);
427 }
428 
429 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
430 {
431 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
432 	union bpf_attr attr;
433 	int ret;
434 
435 	memset(&attr, 0, attr_sz);
436 	attr.map_fd = fd;
437 	attr.key = ptr_to_u64(key);
438 	attr.value = ptr_to_u64(value);
439 
440 	ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
441 	return libbpf_err_errno(ret);
442 }
443 
444 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
445 {
446 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
447 	union bpf_attr attr;
448 	int ret;
449 
450 	memset(&attr, 0, attr_sz);
451 	attr.map_fd = fd;
452 	attr.key = ptr_to_u64(key);
453 	attr.value = ptr_to_u64(value);
454 	attr.flags = flags;
455 
456 	ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
457 	return libbpf_err_errno(ret);
458 }
459 
460 int bpf_map_delete_elem(int fd, const void *key)
461 {
462 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
463 	union bpf_attr attr;
464 	int ret;
465 
466 	memset(&attr, 0, attr_sz);
467 	attr.map_fd = fd;
468 	attr.key = ptr_to_u64(key);
469 
470 	ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
471 	return libbpf_err_errno(ret);
472 }
473 
474 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
475 {
476 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
477 	union bpf_attr attr;
478 	int ret;
479 
480 	memset(&attr, 0, attr_sz);
481 	attr.map_fd = fd;
482 	attr.key = ptr_to_u64(key);
483 	attr.flags = flags;
484 
485 	ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
486 	return libbpf_err_errno(ret);
487 }
488 
489 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
490 {
491 	const size_t attr_sz = offsetofend(union bpf_attr, next_key);
492 	union bpf_attr attr;
493 	int ret;
494 
495 	memset(&attr, 0, attr_sz);
496 	attr.map_fd = fd;
497 	attr.key = ptr_to_u64(key);
498 	attr.next_key = ptr_to_u64(next_key);
499 
500 	ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz);
501 	return libbpf_err_errno(ret);
502 }
503 
504 int bpf_map_freeze(int fd)
505 {
506 	const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
507 	union bpf_attr attr;
508 	int ret;
509 
510 	memset(&attr, 0, attr_sz);
511 	attr.map_fd = fd;
512 
513 	ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
514 	return libbpf_err_errno(ret);
515 }
516 
517 static int bpf_map_batch_common(int cmd, int fd, void  *in_batch,
518 				void *out_batch, void *keys, void *values,
519 				__u32 *count,
520 				const struct bpf_map_batch_opts *opts)
521 {
522 	const size_t attr_sz = offsetofend(union bpf_attr, batch);
523 	union bpf_attr attr;
524 	int ret;
525 
526 	if (!OPTS_VALID(opts, bpf_map_batch_opts))
527 		return libbpf_err(-EINVAL);
528 
529 	memset(&attr, 0, attr_sz);
530 	attr.batch.map_fd = fd;
531 	attr.batch.in_batch = ptr_to_u64(in_batch);
532 	attr.batch.out_batch = ptr_to_u64(out_batch);
533 	attr.batch.keys = ptr_to_u64(keys);
534 	attr.batch.values = ptr_to_u64(values);
535 	attr.batch.count = *count;
536 	attr.batch.elem_flags  = OPTS_GET(opts, elem_flags, 0);
537 	attr.batch.flags = OPTS_GET(opts, flags, 0);
538 
539 	ret = sys_bpf(cmd, &attr, attr_sz);
540 	*count = attr.batch.count;
541 
542 	return libbpf_err_errno(ret);
543 }
544 
545 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
546 			 const struct bpf_map_batch_opts *opts)
547 {
548 	return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
549 				    NULL, (void *)keys, NULL, count, opts);
550 }
551 
552 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
553 			 void *values, __u32 *count,
554 			 const struct bpf_map_batch_opts *opts)
555 {
556 	return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
557 				    out_batch, keys, values, count, opts);
558 }
559 
560 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
561 				    void *keys, void *values, __u32 *count,
562 				    const struct bpf_map_batch_opts *opts)
563 {
564 	return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
565 				    fd, in_batch, out_batch, keys, values,
566 				    count, opts);
567 }
568 
569 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
570 			 const struct bpf_map_batch_opts *opts)
571 {
572 	return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
573 				    (void *)keys, (void *)values, count, opts);
574 }
575 
576 int bpf_obj_pin(int fd, const char *pathname)
577 {
578 	const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
579 	union bpf_attr attr;
580 	int ret;
581 
582 	memset(&attr, 0, attr_sz);
583 	attr.pathname = ptr_to_u64((void *)pathname);
584 	attr.bpf_fd = fd;
585 
586 	ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
587 	return libbpf_err_errno(ret);
588 }
589 
590 int bpf_obj_get(const char *pathname)
591 {
592 	return bpf_obj_get_opts(pathname, NULL);
593 }
594 
595 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
596 {
597 	const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
598 	union bpf_attr attr;
599 	int fd;
600 
601 	if (!OPTS_VALID(opts, bpf_obj_get_opts))
602 		return libbpf_err(-EINVAL);
603 
604 	memset(&attr, 0, attr_sz);
605 	attr.pathname = ptr_to_u64((void *)pathname);
606 	attr.file_flags = OPTS_GET(opts, file_flags, 0);
607 
608 	fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz);
609 	return libbpf_err_errno(fd);
610 }
611 
612 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
613 		    unsigned int flags)
614 {
615 	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
616 		.flags = flags,
617 	);
618 
619 	return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
620 }
621 
622 int bpf_prog_attach_opts(int prog_fd, int target_fd,
623 			  enum bpf_attach_type type,
624 			  const struct bpf_prog_attach_opts *opts)
625 {
626 	const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
627 	union bpf_attr attr;
628 	int ret;
629 
630 	if (!OPTS_VALID(opts, bpf_prog_attach_opts))
631 		return libbpf_err(-EINVAL);
632 
633 	memset(&attr, 0, attr_sz);
634 	attr.target_fd	   = target_fd;
635 	attr.attach_bpf_fd = prog_fd;
636 	attr.attach_type   = type;
637 	attr.attach_flags  = OPTS_GET(opts, flags, 0);
638 	attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
639 
640 	ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
641 	return libbpf_err_errno(ret);
642 }
643 
644 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
645 {
646 	const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
647 	union bpf_attr attr;
648 	int ret;
649 
650 	memset(&attr, 0, attr_sz);
651 	attr.target_fd	 = target_fd;
652 	attr.attach_type = type;
653 
654 	ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
655 	return libbpf_err_errno(ret);
656 }
657 
658 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
659 {
660 	const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
661 	union bpf_attr attr;
662 	int ret;
663 
664 	memset(&attr, 0, attr_sz);
665 	attr.target_fd	 = target_fd;
666 	attr.attach_bpf_fd = prog_fd;
667 	attr.attach_type = type;
668 
669 	ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
670 	return libbpf_err_errno(ret);
671 }
672 
673 int bpf_link_create(int prog_fd, int target_fd,
674 		    enum bpf_attach_type attach_type,
675 		    const struct bpf_link_create_opts *opts)
676 {
677 	const size_t attr_sz = offsetofend(union bpf_attr, link_create);
678 	__u32 target_btf_id, iter_info_len;
679 	union bpf_attr attr;
680 	int fd, err;
681 
682 	if (!OPTS_VALID(opts, bpf_link_create_opts))
683 		return libbpf_err(-EINVAL);
684 
685 	iter_info_len = OPTS_GET(opts, iter_info_len, 0);
686 	target_btf_id = OPTS_GET(opts, target_btf_id, 0);
687 
688 	/* validate we don't have unexpected combinations of non-zero fields */
689 	if (iter_info_len || target_btf_id) {
690 		if (iter_info_len && target_btf_id)
691 			return libbpf_err(-EINVAL);
692 		if (!OPTS_ZEROED(opts, target_btf_id))
693 			return libbpf_err(-EINVAL);
694 	}
695 
696 	memset(&attr, 0, attr_sz);
697 	attr.link_create.prog_fd = prog_fd;
698 	attr.link_create.target_fd = target_fd;
699 	attr.link_create.attach_type = attach_type;
700 	attr.link_create.flags = OPTS_GET(opts, flags, 0);
701 
702 	if (target_btf_id) {
703 		attr.link_create.target_btf_id = target_btf_id;
704 		goto proceed;
705 	}
706 
707 	switch (attach_type) {
708 	case BPF_TRACE_ITER:
709 		attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
710 		attr.link_create.iter_info_len = iter_info_len;
711 		break;
712 	case BPF_PERF_EVENT:
713 		attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
714 		if (!OPTS_ZEROED(opts, perf_event))
715 			return libbpf_err(-EINVAL);
716 		break;
717 	case BPF_TRACE_KPROBE_MULTI:
718 		attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
719 		attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
720 		attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
721 		attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
722 		attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
723 		if (!OPTS_ZEROED(opts, kprobe_multi))
724 			return libbpf_err(-EINVAL);
725 		break;
726 	case BPF_TRACE_FENTRY:
727 	case BPF_TRACE_FEXIT:
728 	case BPF_MODIFY_RETURN:
729 	case BPF_LSM_MAC:
730 		attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
731 		if (!OPTS_ZEROED(opts, tracing))
732 			return libbpf_err(-EINVAL);
733 		break;
734 	default:
735 		if (!OPTS_ZEROED(opts, flags))
736 			return libbpf_err(-EINVAL);
737 		break;
738 	}
739 proceed:
740 	fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz);
741 	if (fd >= 0)
742 		return fd;
743 	/* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
744 	 * and other similar programs
745 	 */
746 	err = -errno;
747 	if (err != -EINVAL)
748 		return libbpf_err(err);
749 
750 	/* if user used features not supported by
751 	 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
752 	 */
753 	if (attr.link_create.target_fd || attr.link_create.target_btf_id)
754 		return libbpf_err(err);
755 	if (!OPTS_ZEROED(opts, sz))
756 		return libbpf_err(err);
757 
758 	/* otherwise, for few select kinds of programs that can be
759 	 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
760 	 * a fallback for older kernels
761 	 */
762 	switch (attach_type) {
763 	case BPF_TRACE_RAW_TP:
764 	case BPF_LSM_MAC:
765 	case BPF_TRACE_FENTRY:
766 	case BPF_TRACE_FEXIT:
767 	case BPF_MODIFY_RETURN:
768 		return bpf_raw_tracepoint_open(NULL, prog_fd);
769 	default:
770 		return libbpf_err(err);
771 	}
772 }
773 
774 int bpf_link_detach(int link_fd)
775 {
776 	const size_t attr_sz = offsetofend(union bpf_attr, link_detach);
777 	union bpf_attr attr;
778 	int ret;
779 
780 	memset(&attr, 0, attr_sz);
781 	attr.link_detach.link_fd = link_fd;
782 
783 	ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz);
784 	return libbpf_err_errno(ret);
785 }
786 
787 int bpf_link_update(int link_fd, int new_prog_fd,
788 		    const struct bpf_link_update_opts *opts)
789 {
790 	const size_t attr_sz = offsetofend(union bpf_attr, link_update);
791 	union bpf_attr attr;
792 	int ret;
793 
794 	if (!OPTS_VALID(opts, bpf_link_update_opts))
795 		return libbpf_err(-EINVAL);
796 
797 	memset(&attr, 0, attr_sz);
798 	attr.link_update.link_fd = link_fd;
799 	attr.link_update.new_prog_fd = new_prog_fd;
800 	attr.link_update.flags = OPTS_GET(opts, flags, 0);
801 	attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
802 
803 	ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
804 	return libbpf_err_errno(ret);
805 }
806 
807 int bpf_iter_create(int link_fd)
808 {
809 	const size_t attr_sz = offsetofend(union bpf_attr, iter_create);
810 	union bpf_attr attr;
811 	int fd;
812 
813 	memset(&attr, 0, attr_sz);
814 	attr.iter_create.link_fd = link_fd;
815 
816 	fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz);
817 	return libbpf_err_errno(fd);
818 }
819 
820 int bpf_prog_query_opts(int target_fd,
821 			enum bpf_attach_type type,
822 			struct bpf_prog_query_opts *opts)
823 {
824 	const size_t attr_sz = offsetofend(union bpf_attr, query);
825 	union bpf_attr attr;
826 	int ret;
827 
828 	if (!OPTS_VALID(opts, bpf_prog_query_opts))
829 		return libbpf_err(-EINVAL);
830 
831 	memset(&attr, 0, attr_sz);
832 
833 	attr.query.target_fd	= target_fd;
834 	attr.query.attach_type	= type;
835 	attr.query.query_flags	= OPTS_GET(opts, query_flags, 0);
836 	attr.query.prog_cnt	= OPTS_GET(opts, prog_cnt, 0);
837 	attr.query.prog_ids	= ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
838 	attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
839 
840 	ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
841 
842 	OPTS_SET(opts, attach_flags, attr.query.attach_flags);
843 	OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
844 
845 	return libbpf_err_errno(ret);
846 }
847 
848 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
849 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
850 {
851 	LIBBPF_OPTS(bpf_prog_query_opts, opts);
852 	int ret;
853 
854 	opts.query_flags = query_flags;
855 	opts.prog_ids = prog_ids;
856 	opts.prog_cnt = *prog_cnt;
857 
858 	ret = bpf_prog_query_opts(target_fd, type, &opts);
859 
860 	if (attach_flags)
861 		*attach_flags = opts.attach_flags;
862 	*prog_cnt = opts.prog_cnt;
863 
864 	return libbpf_err_errno(ret);
865 }
866 
867 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
868 {
869 	const size_t attr_sz = offsetofend(union bpf_attr, test);
870 	union bpf_attr attr;
871 	int ret;
872 
873 	if (!OPTS_VALID(opts, bpf_test_run_opts))
874 		return libbpf_err(-EINVAL);
875 
876 	memset(&attr, 0, attr_sz);
877 	attr.test.prog_fd = prog_fd;
878 	attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
879 	attr.test.cpu = OPTS_GET(opts, cpu, 0);
880 	attr.test.flags = OPTS_GET(opts, flags, 0);
881 	attr.test.repeat = OPTS_GET(opts, repeat, 0);
882 	attr.test.duration = OPTS_GET(opts, duration, 0);
883 	attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
884 	attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
885 	attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
886 	attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
887 	attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
888 	attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
889 	attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
890 	attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
891 
892 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz);
893 
894 	OPTS_SET(opts, data_size_out, attr.test.data_size_out);
895 	OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
896 	OPTS_SET(opts, duration, attr.test.duration);
897 	OPTS_SET(opts, retval, attr.test.retval);
898 
899 	return libbpf_err_errno(ret);
900 }
901 
902 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
903 {
904 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
905 	union bpf_attr attr;
906 	int err;
907 
908 	memset(&attr, 0, attr_sz);
909 	attr.start_id = start_id;
910 
911 	err = sys_bpf(cmd, &attr, attr_sz);
912 	if (!err)
913 		*next_id = attr.next_id;
914 
915 	return libbpf_err_errno(err);
916 }
917 
918 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
919 {
920 	return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
921 }
922 
923 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
924 {
925 	return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
926 }
927 
928 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
929 {
930 	return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
931 }
932 
933 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
934 {
935 	return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
936 }
937 
938 int bpf_prog_get_fd_by_id_opts(__u32 id,
939 			       const struct bpf_get_fd_by_id_opts *opts)
940 {
941 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
942 	union bpf_attr attr;
943 	int fd;
944 
945 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
946 		return libbpf_err(-EINVAL);
947 
948 	memset(&attr, 0, attr_sz);
949 	attr.prog_id = id;
950 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
951 
952 	fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
953 	return libbpf_err_errno(fd);
954 }
955 
956 int bpf_prog_get_fd_by_id(__u32 id)
957 {
958 	return bpf_prog_get_fd_by_id_opts(id, NULL);
959 }
960 
961 int bpf_map_get_fd_by_id_opts(__u32 id,
962 			      const struct bpf_get_fd_by_id_opts *opts)
963 {
964 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
965 	union bpf_attr attr;
966 	int fd;
967 
968 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
969 		return libbpf_err(-EINVAL);
970 
971 	memset(&attr, 0, attr_sz);
972 	attr.map_id = id;
973 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
974 
975 	fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
976 	return libbpf_err_errno(fd);
977 }
978 
979 int bpf_map_get_fd_by_id(__u32 id)
980 {
981 	return bpf_map_get_fd_by_id_opts(id, NULL);
982 }
983 
984 int bpf_btf_get_fd_by_id_opts(__u32 id,
985 			      const struct bpf_get_fd_by_id_opts *opts)
986 {
987 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
988 	union bpf_attr attr;
989 	int fd;
990 
991 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
992 		return libbpf_err(-EINVAL);
993 
994 	memset(&attr, 0, attr_sz);
995 	attr.btf_id = id;
996 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
997 
998 	fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
999 	return libbpf_err_errno(fd);
1000 }
1001 
1002 int bpf_btf_get_fd_by_id(__u32 id)
1003 {
1004 	return bpf_btf_get_fd_by_id_opts(id, NULL);
1005 }
1006 
1007 int bpf_link_get_fd_by_id_opts(__u32 id,
1008 			       const struct bpf_get_fd_by_id_opts *opts)
1009 {
1010 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
1011 	union bpf_attr attr;
1012 	int fd;
1013 
1014 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
1015 		return libbpf_err(-EINVAL);
1016 
1017 	memset(&attr, 0, attr_sz);
1018 	attr.link_id = id;
1019 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
1020 
1021 	fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
1022 	return libbpf_err_errno(fd);
1023 }
1024 
1025 int bpf_link_get_fd_by_id(__u32 id)
1026 {
1027 	return bpf_link_get_fd_by_id_opts(id, NULL);
1028 }
1029 
1030 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
1031 {
1032 	const size_t attr_sz = offsetofend(union bpf_attr, info);
1033 	union bpf_attr attr;
1034 	int err;
1035 
1036 	memset(&attr, 0, attr_sz);
1037 	attr.info.bpf_fd = bpf_fd;
1038 	attr.info.info_len = *info_len;
1039 	attr.info.info = ptr_to_u64(info);
1040 
1041 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
1042 	if (!err)
1043 		*info_len = attr.info.info_len;
1044 	return libbpf_err_errno(err);
1045 }
1046 
1047 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
1048 {
1049 	const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
1050 	union bpf_attr attr;
1051 	int fd;
1052 
1053 	memset(&attr, 0, attr_sz);
1054 	attr.raw_tracepoint.name = ptr_to_u64(name);
1055 	attr.raw_tracepoint.prog_fd = prog_fd;
1056 
1057 	fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
1058 	return libbpf_err_errno(fd);
1059 }
1060 
1061 int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
1062 {
1063 	const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
1064 	union bpf_attr attr;
1065 	char *log_buf;
1066 	size_t log_size;
1067 	__u32 log_level;
1068 	int fd;
1069 
1070 	bump_rlimit_memlock();
1071 
1072 	memset(&attr, 0, attr_sz);
1073 
1074 	if (!OPTS_VALID(opts, bpf_btf_load_opts))
1075 		return libbpf_err(-EINVAL);
1076 
1077 	log_buf = OPTS_GET(opts, log_buf, NULL);
1078 	log_size = OPTS_GET(opts, log_size, 0);
1079 	log_level = OPTS_GET(opts, log_level, 0);
1080 
1081 	if (log_size > UINT_MAX)
1082 		return libbpf_err(-EINVAL);
1083 	if (log_size && !log_buf)
1084 		return libbpf_err(-EINVAL);
1085 
1086 	attr.btf = ptr_to_u64(btf_data);
1087 	attr.btf_size = btf_size;
1088 	/* log_level == 0 and log_buf != NULL means "try loading without
1089 	 * log_buf, but retry with log_buf and log_level=1 on error", which is
1090 	 * consistent across low-level and high-level BTF and program loading
1091 	 * APIs within libbpf and provides a sensible behavior in practice
1092 	 */
1093 	if (log_level) {
1094 		attr.btf_log_buf = ptr_to_u64(log_buf);
1095 		attr.btf_log_size = (__u32)log_size;
1096 		attr.btf_log_level = log_level;
1097 	}
1098 
1099 	fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1100 	if (fd < 0 && log_buf && log_level == 0) {
1101 		attr.btf_log_buf = ptr_to_u64(log_buf);
1102 		attr.btf_log_size = (__u32)log_size;
1103 		attr.btf_log_level = 1;
1104 		fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1105 	}
1106 	return libbpf_err_errno(fd);
1107 }
1108 
1109 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
1110 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
1111 		      __u64 *probe_addr)
1112 {
1113 	const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query);
1114 	union bpf_attr attr;
1115 	int err;
1116 
1117 	memset(&attr, 0, attr_sz);
1118 	attr.task_fd_query.pid = pid;
1119 	attr.task_fd_query.fd = fd;
1120 	attr.task_fd_query.flags = flags;
1121 	attr.task_fd_query.buf = ptr_to_u64(buf);
1122 	attr.task_fd_query.buf_len = *buf_len;
1123 
1124 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz);
1125 
1126 	*buf_len = attr.task_fd_query.buf_len;
1127 	*prog_id = attr.task_fd_query.prog_id;
1128 	*fd_type = attr.task_fd_query.fd_type;
1129 	*probe_offset = attr.task_fd_query.probe_offset;
1130 	*probe_addr = attr.task_fd_query.probe_addr;
1131 
1132 	return libbpf_err_errno(err);
1133 }
1134 
1135 int bpf_enable_stats(enum bpf_stats_type type)
1136 {
1137 	const size_t attr_sz = offsetofend(union bpf_attr, enable_stats);
1138 	union bpf_attr attr;
1139 	int fd;
1140 
1141 	memset(&attr, 0, attr_sz);
1142 	attr.enable_stats.type = type;
1143 
1144 	fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz);
1145 	return libbpf_err_errno(fd);
1146 }
1147 
1148 int bpf_prog_bind_map(int prog_fd, int map_fd,
1149 		      const struct bpf_prog_bind_opts *opts)
1150 {
1151 	const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map);
1152 	union bpf_attr attr;
1153 	int ret;
1154 
1155 	if (!OPTS_VALID(opts, bpf_prog_bind_opts))
1156 		return libbpf_err(-EINVAL);
1157 
1158 	memset(&attr, 0, attr_sz);
1159 	attr.prog_bind_map.prog_fd = prog_fd;
1160 	attr.prog_bind_map.map_fd = map_fd;
1161 	attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
1162 
1163 	ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
1164 	return libbpf_err_errno(ret);
1165 }
1166