1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include <limits.h> 32 #include "bpf.h" 33 #include "libbpf.h" 34 #include "libbpf_internal.h" 35 36 /* 37 * When building perf, unistd.h is overridden. __NR_bpf is 38 * required to be defined explicitly. 39 */ 40 #ifndef __NR_bpf 41 # if defined(__i386__) 42 # define __NR_bpf 357 43 # elif defined(__x86_64__) 44 # define __NR_bpf 321 45 # elif defined(__aarch64__) 46 # define __NR_bpf 280 47 # elif defined(__sparc__) 48 # define __NR_bpf 349 49 # elif defined(__s390__) 50 # define __NR_bpf 351 51 # elif defined(__arc__) 52 # define __NR_bpf 280 53 # else 54 # error __NR_bpf not defined. libbpf does not support your arch. 55 # endif 56 #endif 57 58 static inline __u64 ptr_to_u64(const void *ptr) 59 { 60 return (__u64) (unsigned long) ptr; 61 } 62 63 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 64 unsigned int size) 65 { 66 return syscall(__NR_bpf, cmd, attr, size); 67 } 68 69 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, 70 unsigned int size) 71 { 72 int fd; 73 74 fd = sys_bpf(cmd, attr, size); 75 return ensure_good_fd(fd); 76 } 77 78 #define PROG_LOAD_ATTEMPTS 5 79 80 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) 81 { 82 int fd; 83 84 do { 85 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); 86 } while (fd < 0 && errno == EAGAIN && --attempts > 0); 87 88 return fd; 89 } 90 91 int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr) 92 { 93 union bpf_attr attr; 94 int fd; 95 96 memset(&attr, '\0', sizeof(attr)); 97 98 attr.map_type = create_attr->map_type; 99 attr.key_size = create_attr->key_size; 100 attr.value_size = create_attr->value_size; 101 attr.max_entries = create_attr->max_entries; 102 attr.map_flags = create_attr->map_flags; 103 if (create_attr->name) 104 memcpy(attr.map_name, create_attr->name, 105 min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1)); 106 attr.numa_node = create_attr->numa_node; 107 attr.btf_fd = create_attr->btf_fd; 108 attr.btf_key_type_id = create_attr->btf_key_type_id; 109 attr.btf_value_type_id = create_attr->btf_value_type_id; 110 attr.map_ifindex = create_attr->map_ifindex; 111 if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) 112 attr.btf_vmlinux_value_type_id = 113 create_attr->btf_vmlinux_value_type_id; 114 else 115 attr.inner_map_fd = create_attr->inner_map_fd; 116 attr.map_extra = create_attr->map_extra; 117 118 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); 119 return libbpf_err_errno(fd); 120 } 121 122 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 123 { 124 struct bpf_create_map_params p = {}; 125 126 p.map_type = create_attr->map_type; 127 p.key_size = create_attr->key_size; 128 p.value_size = create_attr->value_size; 129 p.max_entries = create_attr->max_entries; 130 p.map_flags = create_attr->map_flags; 131 p.name = create_attr->name; 132 p.numa_node = create_attr->numa_node; 133 p.btf_fd = create_attr->btf_fd; 134 p.btf_key_type_id = create_attr->btf_key_type_id; 135 p.btf_value_type_id = create_attr->btf_value_type_id; 136 p.map_ifindex = create_attr->map_ifindex; 137 if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS) 138 p.btf_vmlinux_value_type_id = 139 create_attr->btf_vmlinux_value_type_id; 140 else 141 p.inner_map_fd = create_attr->inner_map_fd; 142 143 return libbpf__bpf_create_map_xattr(&p); 144 } 145 146 int bpf_create_map_node(enum bpf_map_type map_type, const char *name, 147 int key_size, int value_size, int max_entries, 148 __u32 map_flags, int node) 149 { 150 struct bpf_create_map_attr map_attr = {}; 151 152 map_attr.name = name; 153 map_attr.map_type = map_type; 154 map_attr.map_flags = map_flags; 155 map_attr.key_size = key_size; 156 map_attr.value_size = value_size; 157 map_attr.max_entries = max_entries; 158 if (node >= 0) { 159 map_attr.numa_node = node; 160 map_attr.map_flags |= BPF_F_NUMA_NODE; 161 } 162 163 return bpf_create_map_xattr(&map_attr); 164 } 165 166 int bpf_create_map(enum bpf_map_type map_type, int key_size, 167 int value_size, int max_entries, __u32 map_flags) 168 { 169 struct bpf_create_map_attr map_attr = {}; 170 171 map_attr.map_type = map_type; 172 map_attr.map_flags = map_flags; 173 map_attr.key_size = key_size; 174 map_attr.value_size = value_size; 175 map_attr.max_entries = max_entries; 176 177 return bpf_create_map_xattr(&map_attr); 178 } 179 180 int bpf_create_map_name(enum bpf_map_type map_type, const char *name, 181 int key_size, int value_size, int max_entries, 182 __u32 map_flags) 183 { 184 struct bpf_create_map_attr map_attr = {}; 185 186 map_attr.name = name; 187 map_attr.map_type = map_type; 188 map_attr.map_flags = map_flags; 189 map_attr.key_size = key_size; 190 map_attr.value_size = value_size; 191 map_attr.max_entries = max_entries; 192 193 return bpf_create_map_xattr(&map_attr); 194 } 195 196 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, 197 int key_size, int inner_map_fd, int max_entries, 198 __u32 map_flags, int node) 199 { 200 union bpf_attr attr; 201 int fd; 202 203 memset(&attr, '\0', sizeof(attr)); 204 205 attr.map_type = map_type; 206 attr.key_size = key_size; 207 attr.value_size = 4; 208 attr.inner_map_fd = inner_map_fd; 209 attr.max_entries = max_entries; 210 attr.map_flags = map_flags; 211 if (name) 212 memcpy(attr.map_name, name, 213 min(strlen(name), BPF_OBJ_NAME_LEN - 1)); 214 215 if (node >= 0) { 216 attr.map_flags |= BPF_F_NUMA_NODE; 217 attr.numa_node = node; 218 } 219 220 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); 221 return libbpf_err_errno(fd); 222 } 223 224 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, 225 int key_size, int inner_map_fd, int max_entries, 226 __u32 map_flags) 227 { 228 return bpf_create_map_in_map_node(map_type, name, key_size, 229 inner_map_fd, max_entries, map_flags, 230 -1); 231 } 232 233 static void * 234 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 235 __u32 actual_rec_size, __u32 expected_rec_size) 236 { 237 __u64 info_len = (__u64)actual_rec_size * cnt; 238 void *info, *nrecord; 239 int i; 240 241 info = malloc(info_len); 242 if (!info) 243 return NULL; 244 245 /* zero out bytes kernel does not understand */ 246 nrecord = info; 247 for (i = 0; i < cnt; i++) { 248 memcpy(nrecord, orecord, expected_rec_size); 249 memset(nrecord + expected_rec_size, 0, 250 actual_rec_size - expected_rec_size); 251 orecord += actual_rec_size; 252 nrecord += actual_rec_size; 253 } 254 255 return info; 256 } 257 258 DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0) 259 int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, 260 const char *prog_name, const char *license, 261 const struct bpf_insn *insns, size_t insn_cnt, 262 const struct bpf_prog_load_opts *opts) 263 { 264 void *finfo = NULL, *linfo = NULL; 265 const char *func_info, *line_info; 266 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; 267 __u32 func_info_rec_size, line_info_rec_size; 268 int fd, attempts; 269 union bpf_attr attr; 270 char *log_buf; 271 272 if (!OPTS_VALID(opts, bpf_prog_load_opts)) 273 return libbpf_err(-EINVAL); 274 275 attempts = OPTS_GET(opts, attempts, 0); 276 if (attempts < 0) 277 return libbpf_err(-EINVAL); 278 if (attempts == 0) 279 attempts = PROG_LOAD_ATTEMPTS; 280 281 memset(&attr, 0, sizeof(attr)); 282 283 attr.prog_type = prog_type; 284 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); 285 286 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); 287 attr.prog_flags = OPTS_GET(opts, prog_flags, 0); 288 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); 289 attr.kern_version = OPTS_GET(opts, kern_version, 0); 290 291 if (prog_name) 292 strncat(attr.prog_name, prog_name, sizeof(attr.prog_name) - 1); 293 attr.license = ptr_to_u64(license); 294 295 if (insn_cnt > UINT_MAX) 296 return libbpf_err(-E2BIG); 297 298 attr.insns = ptr_to_u64(insns); 299 attr.insn_cnt = (__u32)insn_cnt; 300 301 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 302 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); 303 304 if (attach_prog_fd && attach_btf_obj_fd) 305 return libbpf_err(-EINVAL); 306 307 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); 308 if (attach_prog_fd) 309 attr.attach_prog_fd = attach_prog_fd; 310 else 311 attr.attach_btf_obj_fd = attach_btf_obj_fd; 312 313 log_buf = OPTS_GET(opts, log_buf, NULL); 314 log_size = OPTS_GET(opts, log_size, 0); 315 log_level = OPTS_GET(opts, log_level, 0); 316 317 if (!!log_buf != !!log_size) 318 return libbpf_err(-EINVAL); 319 if (log_level > (4 | 2 | 1)) 320 return libbpf_err(-EINVAL); 321 if (log_level && !log_buf) 322 return libbpf_err(-EINVAL); 323 324 attr.log_level = log_level; 325 attr.log_buf = ptr_to_u64(log_buf); 326 attr.log_size = log_size; 327 328 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); 329 func_info = OPTS_GET(opts, func_info, NULL); 330 attr.func_info_rec_size = func_info_rec_size; 331 attr.func_info = ptr_to_u64(func_info); 332 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); 333 334 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); 335 line_info = OPTS_GET(opts, line_info, NULL); 336 attr.line_info_rec_size = line_info_rec_size; 337 attr.line_info = ptr_to_u64(line_info); 338 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); 339 340 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); 341 342 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 343 if (fd >= 0) 344 return fd; 345 346 /* After bpf_prog_load, the kernel may modify certain attributes 347 * to give user space a hint how to deal with loading failure. 348 * Check to see whether we can make some changes and load again. 349 */ 350 while (errno == E2BIG && (!finfo || !linfo)) { 351 if (!finfo && attr.func_info_cnt && 352 attr.func_info_rec_size < func_info_rec_size) { 353 /* try with corrected func info records */ 354 finfo = alloc_zero_tailing_info(func_info, 355 attr.func_info_cnt, 356 func_info_rec_size, 357 attr.func_info_rec_size); 358 if (!finfo) { 359 errno = E2BIG; 360 goto done; 361 } 362 363 attr.func_info = ptr_to_u64(finfo); 364 attr.func_info_rec_size = func_info_rec_size; 365 } else if (!linfo && attr.line_info_cnt && 366 attr.line_info_rec_size < line_info_rec_size) { 367 linfo = alloc_zero_tailing_info(line_info, 368 attr.line_info_cnt, 369 line_info_rec_size, 370 attr.line_info_rec_size); 371 if (!linfo) { 372 errno = E2BIG; 373 goto done; 374 } 375 376 attr.line_info = ptr_to_u64(linfo); 377 attr.line_info_rec_size = line_info_rec_size; 378 } else { 379 break; 380 } 381 382 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 383 if (fd >= 0) 384 goto done; 385 } 386 387 if (log_level || !log_buf) 388 goto done; 389 390 /* Try again with log */ 391 log_buf[0] = 0; 392 attr.log_buf = ptr_to_u64(log_buf); 393 attr.log_size = log_size; 394 attr.log_level = 1; 395 396 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 397 done: 398 /* free() doesn't affect errno, so we don't need to restore it */ 399 free(finfo); 400 free(linfo); 401 return libbpf_err_errno(fd); 402 } 403 404 __attribute__((alias("bpf_load_program_xattr2"))) 405 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, 406 char *log_buf, size_t log_buf_sz); 407 408 static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr, 409 char *log_buf, size_t log_buf_sz) 410 { 411 LIBBPF_OPTS(bpf_prog_load_opts, p); 412 413 if (!load_attr || !log_buf != !log_buf_sz) 414 return libbpf_err(-EINVAL); 415 416 p.expected_attach_type = load_attr->expected_attach_type; 417 switch (load_attr->prog_type) { 418 case BPF_PROG_TYPE_STRUCT_OPS: 419 case BPF_PROG_TYPE_LSM: 420 p.attach_btf_id = load_attr->attach_btf_id; 421 break; 422 case BPF_PROG_TYPE_TRACING: 423 case BPF_PROG_TYPE_EXT: 424 p.attach_btf_id = load_attr->attach_btf_id; 425 p.attach_prog_fd = load_attr->attach_prog_fd; 426 break; 427 default: 428 p.prog_ifindex = load_attr->prog_ifindex; 429 p.kern_version = load_attr->kern_version; 430 } 431 p.log_level = load_attr->log_level; 432 p.log_buf = log_buf; 433 p.log_size = log_buf_sz; 434 p.prog_btf_fd = load_attr->prog_btf_fd; 435 p.func_info_rec_size = load_attr->func_info_rec_size; 436 p.func_info_cnt = load_attr->func_info_cnt; 437 p.func_info = load_attr->func_info; 438 p.line_info_rec_size = load_attr->line_info_rec_size; 439 p.line_info_cnt = load_attr->line_info_cnt; 440 p.line_info = load_attr->line_info; 441 p.prog_flags = load_attr->prog_flags; 442 443 return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license, 444 load_attr->insns, load_attr->insns_cnt, &p); 445 } 446 447 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 448 size_t insns_cnt, const char *license, 449 __u32 kern_version, char *log_buf, 450 size_t log_buf_sz) 451 { 452 struct bpf_load_program_attr load_attr; 453 454 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 455 load_attr.prog_type = type; 456 load_attr.expected_attach_type = 0; 457 load_attr.name = NULL; 458 load_attr.insns = insns; 459 load_attr.insns_cnt = insns_cnt; 460 load_attr.license = license; 461 load_attr.kern_version = kern_version; 462 463 return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz); 464 } 465 466 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 467 size_t insns_cnt, __u32 prog_flags, const char *license, 468 __u32 kern_version, char *log_buf, size_t log_buf_sz, 469 int log_level) 470 { 471 union bpf_attr attr; 472 int fd; 473 474 memset(&attr, 0, sizeof(attr)); 475 attr.prog_type = type; 476 attr.insn_cnt = (__u32)insns_cnt; 477 attr.insns = ptr_to_u64(insns); 478 attr.license = ptr_to_u64(license); 479 attr.log_buf = ptr_to_u64(log_buf); 480 attr.log_size = log_buf_sz; 481 attr.log_level = log_level; 482 log_buf[0] = 0; 483 attr.kern_version = kern_version; 484 attr.prog_flags = prog_flags; 485 486 fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); 487 return libbpf_err_errno(fd); 488 } 489 490 int bpf_map_update_elem(int fd, const void *key, const void *value, 491 __u64 flags) 492 { 493 union bpf_attr attr; 494 int ret; 495 496 memset(&attr, 0, sizeof(attr)); 497 attr.map_fd = fd; 498 attr.key = ptr_to_u64(key); 499 attr.value = ptr_to_u64(value); 500 attr.flags = flags; 501 502 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 503 return libbpf_err_errno(ret); 504 } 505 506 int bpf_map_lookup_elem(int fd, const void *key, void *value) 507 { 508 union bpf_attr attr; 509 int ret; 510 511 memset(&attr, 0, sizeof(attr)); 512 attr.map_fd = fd; 513 attr.key = ptr_to_u64(key); 514 attr.value = ptr_to_u64(value); 515 516 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 517 return libbpf_err_errno(ret); 518 } 519 520 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 521 { 522 union bpf_attr attr; 523 int ret; 524 525 memset(&attr, 0, sizeof(attr)); 526 attr.map_fd = fd; 527 attr.key = ptr_to_u64(key); 528 attr.value = ptr_to_u64(value); 529 attr.flags = flags; 530 531 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 532 return libbpf_err_errno(ret); 533 } 534 535 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 536 { 537 union bpf_attr attr; 538 int ret; 539 540 memset(&attr, 0, sizeof(attr)); 541 attr.map_fd = fd; 542 attr.key = ptr_to_u64(key); 543 attr.value = ptr_to_u64(value); 544 545 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 546 return libbpf_err_errno(ret); 547 } 548 549 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 550 { 551 union bpf_attr attr; 552 int ret; 553 554 memset(&attr, 0, sizeof(attr)); 555 attr.map_fd = fd; 556 attr.key = ptr_to_u64(key); 557 attr.value = ptr_to_u64(value); 558 attr.flags = flags; 559 560 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 561 return libbpf_err_errno(ret); 562 } 563 564 int bpf_map_delete_elem(int fd, const void *key) 565 { 566 union bpf_attr attr; 567 int ret; 568 569 memset(&attr, 0, sizeof(attr)); 570 attr.map_fd = fd; 571 attr.key = ptr_to_u64(key); 572 573 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 574 return libbpf_err_errno(ret); 575 } 576 577 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 578 { 579 union bpf_attr attr; 580 int ret; 581 582 memset(&attr, 0, sizeof(attr)); 583 attr.map_fd = fd; 584 attr.key = ptr_to_u64(key); 585 attr.next_key = ptr_to_u64(next_key); 586 587 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 588 return libbpf_err_errno(ret); 589 } 590 591 int bpf_map_freeze(int fd) 592 { 593 union bpf_attr attr; 594 int ret; 595 596 memset(&attr, 0, sizeof(attr)); 597 attr.map_fd = fd; 598 599 ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); 600 return libbpf_err_errno(ret); 601 } 602 603 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 604 void *out_batch, void *keys, void *values, 605 __u32 *count, 606 const struct bpf_map_batch_opts *opts) 607 { 608 union bpf_attr attr; 609 int ret; 610 611 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 612 return libbpf_err(-EINVAL); 613 614 memset(&attr, 0, sizeof(attr)); 615 attr.batch.map_fd = fd; 616 attr.batch.in_batch = ptr_to_u64(in_batch); 617 attr.batch.out_batch = ptr_to_u64(out_batch); 618 attr.batch.keys = ptr_to_u64(keys); 619 attr.batch.values = ptr_to_u64(values); 620 attr.batch.count = *count; 621 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 622 attr.batch.flags = OPTS_GET(opts, flags, 0); 623 624 ret = sys_bpf(cmd, &attr, sizeof(attr)); 625 *count = attr.batch.count; 626 627 return libbpf_err_errno(ret); 628 } 629 630 int bpf_map_delete_batch(int fd, void *keys, __u32 *count, 631 const struct bpf_map_batch_opts *opts) 632 { 633 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 634 NULL, keys, NULL, count, opts); 635 } 636 637 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 638 void *values, __u32 *count, 639 const struct bpf_map_batch_opts *opts) 640 { 641 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 642 out_batch, keys, values, count, opts); 643 } 644 645 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 646 void *keys, void *values, __u32 *count, 647 const struct bpf_map_batch_opts *opts) 648 { 649 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 650 fd, in_batch, out_batch, keys, values, 651 count, opts); 652 } 653 654 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count, 655 const struct bpf_map_batch_opts *opts) 656 { 657 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 658 keys, values, count, opts); 659 } 660 661 int bpf_obj_pin(int fd, const char *pathname) 662 { 663 union bpf_attr attr; 664 int ret; 665 666 memset(&attr, 0, sizeof(attr)); 667 attr.pathname = ptr_to_u64((void *)pathname); 668 attr.bpf_fd = fd; 669 670 ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); 671 return libbpf_err_errno(ret); 672 } 673 674 int bpf_obj_get(const char *pathname) 675 { 676 union bpf_attr attr; 677 int fd; 678 679 memset(&attr, 0, sizeof(attr)); 680 attr.pathname = ptr_to_u64((void *)pathname); 681 682 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); 683 return libbpf_err_errno(fd); 684 } 685 686 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 687 unsigned int flags) 688 { 689 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 690 .flags = flags, 691 ); 692 693 return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts); 694 } 695 696 int bpf_prog_attach_xattr(int prog_fd, int target_fd, 697 enum bpf_attach_type type, 698 const struct bpf_prog_attach_opts *opts) 699 { 700 union bpf_attr attr; 701 int ret; 702 703 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 704 return libbpf_err(-EINVAL); 705 706 memset(&attr, 0, sizeof(attr)); 707 attr.target_fd = target_fd; 708 attr.attach_bpf_fd = prog_fd; 709 attr.attach_type = type; 710 attr.attach_flags = OPTS_GET(opts, flags, 0); 711 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); 712 713 ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 714 return libbpf_err_errno(ret); 715 } 716 717 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 718 { 719 union bpf_attr attr; 720 int ret; 721 722 memset(&attr, 0, sizeof(attr)); 723 attr.target_fd = target_fd; 724 attr.attach_type = type; 725 726 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 727 return libbpf_err_errno(ret); 728 } 729 730 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 731 { 732 union bpf_attr attr; 733 int ret; 734 735 memset(&attr, 0, sizeof(attr)); 736 attr.target_fd = target_fd; 737 attr.attach_bpf_fd = prog_fd; 738 attr.attach_type = type; 739 740 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 741 return libbpf_err_errno(ret); 742 } 743 744 int bpf_link_create(int prog_fd, int target_fd, 745 enum bpf_attach_type attach_type, 746 const struct bpf_link_create_opts *opts) 747 { 748 __u32 target_btf_id, iter_info_len; 749 union bpf_attr attr; 750 int fd; 751 752 if (!OPTS_VALID(opts, bpf_link_create_opts)) 753 return libbpf_err(-EINVAL); 754 755 iter_info_len = OPTS_GET(opts, iter_info_len, 0); 756 target_btf_id = OPTS_GET(opts, target_btf_id, 0); 757 758 /* validate we don't have unexpected combinations of non-zero fields */ 759 if (iter_info_len || target_btf_id) { 760 if (iter_info_len && target_btf_id) 761 return libbpf_err(-EINVAL); 762 if (!OPTS_ZEROED(opts, target_btf_id)) 763 return libbpf_err(-EINVAL); 764 } 765 766 memset(&attr, 0, sizeof(attr)); 767 attr.link_create.prog_fd = prog_fd; 768 attr.link_create.target_fd = target_fd; 769 attr.link_create.attach_type = attach_type; 770 attr.link_create.flags = OPTS_GET(opts, flags, 0); 771 772 if (target_btf_id) { 773 attr.link_create.target_btf_id = target_btf_id; 774 goto proceed; 775 } 776 777 switch (attach_type) { 778 case BPF_TRACE_ITER: 779 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); 780 attr.link_create.iter_info_len = iter_info_len; 781 break; 782 case BPF_PERF_EVENT: 783 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); 784 if (!OPTS_ZEROED(opts, perf_event)) 785 return libbpf_err(-EINVAL); 786 break; 787 default: 788 if (!OPTS_ZEROED(opts, flags)) 789 return libbpf_err(-EINVAL); 790 break; 791 } 792 proceed: 793 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); 794 return libbpf_err_errno(fd); 795 } 796 797 int bpf_link_detach(int link_fd) 798 { 799 union bpf_attr attr; 800 int ret; 801 802 memset(&attr, 0, sizeof(attr)); 803 attr.link_detach.link_fd = link_fd; 804 805 ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); 806 return libbpf_err_errno(ret); 807 } 808 809 int bpf_link_update(int link_fd, int new_prog_fd, 810 const struct bpf_link_update_opts *opts) 811 { 812 union bpf_attr attr; 813 int ret; 814 815 if (!OPTS_VALID(opts, bpf_link_update_opts)) 816 return libbpf_err(-EINVAL); 817 818 memset(&attr, 0, sizeof(attr)); 819 attr.link_update.link_fd = link_fd; 820 attr.link_update.new_prog_fd = new_prog_fd; 821 attr.link_update.flags = OPTS_GET(opts, flags, 0); 822 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 823 824 ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); 825 return libbpf_err_errno(ret); 826 } 827 828 int bpf_iter_create(int link_fd) 829 { 830 union bpf_attr attr; 831 int fd; 832 833 memset(&attr, 0, sizeof(attr)); 834 attr.iter_create.link_fd = link_fd; 835 836 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); 837 return libbpf_err_errno(fd); 838 } 839 840 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 841 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 842 { 843 union bpf_attr attr; 844 int ret; 845 846 memset(&attr, 0, sizeof(attr)); 847 attr.query.target_fd = target_fd; 848 attr.query.attach_type = type; 849 attr.query.query_flags = query_flags; 850 attr.query.prog_cnt = *prog_cnt; 851 attr.query.prog_ids = ptr_to_u64(prog_ids); 852 853 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); 854 855 if (attach_flags) 856 *attach_flags = attr.query.attach_flags; 857 *prog_cnt = attr.query.prog_cnt; 858 859 return libbpf_err_errno(ret); 860 } 861 862 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, 863 void *data_out, __u32 *size_out, __u32 *retval, 864 __u32 *duration) 865 { 866 union bpf_attr attr; 867 int ret; 868 869 memset(&attr, 0, sizeof(attr)); 870 attr.test.prog_fd = prog_fd; 871 attr.test.data_in = ptr_to_u64(data); 872 attr.test.data_out = ptr_to_u64(data_out); 873 attr.test.data_size_in = size; 874 attr.test.repeat = repeat; 875 876 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 877 878 if (size_out) 879 *size_out = attr.test.data_size_out; 880 if (retval) 881 *retval = attr.test.retval; 882 if (duration) 883 *duration = attr.test.duration; 884 885 return libbpf_err_errno(ret); 886 } 887 888 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) 889 { 890 union bpf_attr attr; 891 int ret; 892 893 if (!test_attr->data_out && test_attr->data_size_out > 0) 894 return libbpf_err(-EINVAL); 895 896 memset(&attr, 0, sizeof(attr)); 897 attr.test.prog_fd = test_attr->prog_fd; 898 attr.test.data_in = ptr_to_u64(test_attr->data_in); 899 attr.test.data_out = ptr_to_u64(test_attr->data_out); 900 attr.test.data_size_in = test_attr->data_size_in; 901 attr.test.data_size_out = test_attr->data_size_out; 902 attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in); 903 attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out); 904 attr.test.ctx_size_in = test_attr->ctx_size_in; 905 attr.test.ctx_size_out = test_attr->ctx_size_out; 906 attr.test.repeat = test_attr->repeat; 907 908 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 909 910 test_attr->data_size_out = attr.test.data_size_out; 911 test_attr->ctx_size_out = attr.test.ctx_size_out; 912 test_attr->retval = attr.test.retval; 913 test_attr->duration = attr.test.duration; 914 915 return libbpf_err_errno(ret); 916 } 917 918 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 919 { 920 union bpf_attr attr; 921 int ret; 922 923 if (!OPTS_VALID(opts, bpf_test_run_opts)) 924 return libbpf_err(-EINVAL); 925 926 memset(&attr, 0, sizeof(attr)); 927 attr.test.prog_fd = prog_fd; 928 attr.test.cpu = OPTS_GET(opts, cpu, 0); 929 attr.test.flags = OPTS_GET(opts, flags, 0); 930 attr.test.repeat = OPTS_GET(opts, repeat, 0); 931 attr.test.duration = OPTS_GET(opts, duration, 0); 932 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); 933 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); 934 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); 935 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); 936 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); 937 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); 938 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 939 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 940 941 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 942 943 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 944 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); 945 OPTS_SET(opts, duration, attr.test.duration); 946 OPTS_SET(opts, retval, attr.test.retval); 947 948 return libbpf_err_errno(ret); 949 } 950 951 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 952 { 953 union bpf_attr attr; 954 int err; 955 956 memset(&attr, 0, sizeof(attr)); 957 attr.start_id = start_id; 958 959 err = sys_bpf(cmd, &attr, sizeof(attr)); 960 if (!err) 961 *next_id = attr.next_id; 962 963 return libbpf_err_errno(err); 964 } 965 966 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 967 { 968 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 969 } 970 971 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 972 { 973 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 974 } 975 976 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 977 { 978 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 979 } 980 981 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) 982 { 983 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); 984 } 985 986 int bpf_prog_get_fd_by_id(__u32 id) 987 { 988 union bpf_attr attr; 989 int fd; 990 991 memset(&attr, 0, sizeof(attr)); 992 attr.prog_id = id; 993 994 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); 995 return libbpf_err_errno(fd); 996 } 997 998 int bpf_map_get_fd_by_id(__u32 id) 999 { 1000 union bpf_attr attr; 1001 int fd; 1002 1003 memset(&attr, 0, sizeof(attr)); 1004 attr.map_id = id; 1005 1006 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); 1007 return libbpf_err_errno(fd); 1008 } 1009 1010 int bpf_btf_get_fd_by_id(__u32 id) 1011 { 1012 union bpf_attr attr; 1013 int fd; 1014 1015 memset(&attr, 0, sizeof(attr)); 1016 attr.btf_id = id; 1017 1018 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); 1019 return libbpf_err_errno(fd); 1020 } 1021 1022 int bpf_link_get_fd_by_id(__u32 id) 1023 { 1024 union bpf_attr attr; 1025 int fd; 1026 1027 memset(&attr, 0, sizeof(attr)); 1028 attr.link_id = id; 1029 1030 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); 1031 return libbpf_err_errno(fd); 1032 } 1033 1034 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 1035 { 1036 union bpf_attr attr; 1037 int err; 1038 1039 memset(&attr, 0, sizeof(attr)); 1040 attr.info.bpf_fd = bpf_fd; 1041 attr.info.info_len = *info_len; 1042 attr.info.info = ptr_to_u64(info); 1043 1044 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); 1045 1046 if (!err) 1047 *info_len = attr.info.info_len; 1048 1049 return libbpf_err_errno(err); 1050 } 1051 1052 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 1053 { 1054 union bpf_attr attr; 1055 int fd; 1056 1057 memset(&attr, 0, sizeof(attr)); 1058 attr.raw_tracepoint.name = ptr_to_u64(name); 1059 attr.raw_tracepoint.prog_fd = prog_fd; 1060 1061 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); 1062 return libbpf_err_errno(fd); 1063 } 1064 1065 int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, 1066 bool do_log) 1067 { 1068 union bpf_attr attr = {}; 1069 int fd; 1070 1071 attr.btf = ptr_to_u64(btf); 1072 attr.btf_size = btf_size; 1073 1074 retry: 1075 if (do_log && log_buf && log_buf_size) { 1076 attr.btf_log_level = 1; 1077 attr.btf_log_size = log_buf_size; 1078 attr.btf_log_buf = ptr_to_u64(log_buf); 1079 } 1080 1081 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr)); 1082 1083 if (fd < 0 && !do_log && log_buf && log_buf_size) { 1084 do_log = true; 1085 goto retry; 1086 } 1087 1088 return libbpf_err_errno(fd); 1089 } 1090 1091 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 1092 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1093 __u64 *probe_addr) 1094 { 1095 union bpf_attr attr = {}; 1096 int err; 1097 1098 attr.task_fd_query.pid = pid; 1099 attr.task_fd_query.fd = fd; 1100 attr.task_fd_query.flags = flags; 1101 attr.task_fd_query.buf = ptr_to_u64(buf); 1102 attr.task_fd_query.buf_len = *buf_len; 1103 1104 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); 1105 1106 *buf_len = attr.task_fd_query.buf_len; 1107 *prog_id = attr.task_fd_query.prog_id; 1108 *fd_type = attr.task_fd_query.fd_type; 1109 *probe_offset = attr.task_fd_query.probe_offset; 1110 *probe_addr = attr.task_fd_query.probe_addr; 1111 1112 return libbpf_err_errno(err); 1113 } 1114 1115 int bpf_enable_stats(enum bpf_stats_type type) 1116 { 1117 union bpf_attr attr; 1118 int fd; 1119 1120 memset(&attr, 0, sizeof(attr)); 1121 attr.enable_stats.type = type; 1122 1123 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); 1124 return libbpf_err_errno(fd); 1125 } 1126 1127 int bpf_prog_bind_map(int prog_fd, int map_fd, 1128 const struct bpf_prog_bind_opts *opts) 1129 { 1130 union bpf_attr attr; 1131 int ret; 1132 1133 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1134 return libbpf_err(-EINVAL); 1135 1136 memset(&attr, 0, sizeof(attr)); 1137 attr.prog_bind_map.prog_fd = prog_fd; 1138 attr.prog_bind_map.map_fd = map_fd; 1139 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1140 1141 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); 1142 return libbpf_err_errno(ret); 1143 } 1144