1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include "bpf.h" 32 #include "libbpf.h" 33 #include "libbpf_internal.h" 34 35 /* make sure libbpf doesn't use kernel-only integer typedefs */ 36 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 37 38 /* 39 * When building perf, unistd.h is overridden. __NR_bpf is 40 * required to be defined explicitly. 41 */ 42 #ifndef __NR_bpf 43 # if defined(__i386__) 44 # define __NR_bpf 357 45 # elif defined(__x86_64__) 46 # define __NR_bpf 321 47 # elif defined(__aarch64__) 48 # define __NR_bpf 280 49 # elif defined(__sparc__) 50 # define __NR_bpf 349 51 # elif defined(__s390__) 52 # define __NR_bpf 351 53 # elif defined(__arc__) 54 # define __NR_bpf 280 55 # else 56 # error __NR_bpf not defined. libbpf does not support your arch. 57 # endif 58 #endif 59 60 static inline __u64 ptr_to_u64(const void *ptr) 61 { 62 return (__u64) (unsigned long) ptr; 63 } 64 65 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 66 unsigned int size) 67 { 68 return syscall(__NR_bpf, cmd, attr, size); 69 } 70 71 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) 72 { 73 int fd; 74 75 do { 76 fd = sys_bpf(BPF_PROG_LOAD, attr, size); 77 } while (fd < 0 && errno == EAGAIN); 78 79 return fd; 80 } 81 82 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 83 { 84 union bpf_attr attr; 85 86 memset(&attr, '\0', sizeof(attr)); 87 88 attr.map_type = create_attr->map_type; 89 attr.key_size = create_attr->key_size; 90 attr.value_size = create_attr->value_size; 91 attr.max_entries = create_attr->max_entries; 92 attr.map_flags = create_attr->map_flags; 93 if (create_attr->name) 94 memcpy(attr.map_name, create_attr->name, 95 min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1)); 96 attr.numa_node = create_attr->numa_node; 97 attr.btf_fd = create_attr->btf_fd; 98 attr.btf_key_type_id = create_attr->btf_key_type_id; 99 attr.btf_value_type_id = create_attr->btf_value_type_id; 100 attr.map_ifindex = create_attr->map_ifindex; 101 if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) 102 attr.btf_vmlinux_value_type_id = 103 create_attr->btf_vmlinux_value_type_id; 104 else 105 attr.inner_map_fd = create_attr->inner_map_fd; 106 107 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 108 } 109 110 int bpf_create_map_node(enum bpf_map_type map_type, const char *name, 111 int key_size, int value_size, int max_entries, 112 __u32 map_flags, int node) 113 { 114 struct bpf_create_map_attr map_attr = {}; 115 116 map_attr.name = name; 117 map_attr.map_type = map_type; 118 map_attr.map_flags = map_flags; 119 map_attr.key_size = key_size; 120 map_attr.value_size = value_size; 121 map_attr.max_entries = max_entries; 122 if (node >= 0) { 123 map_attr.numa_node = node; 124 map_attr.map_flags |= BPF_F_NUMA_NODE; 125 } 126 127 return bpf_create_map_xattr(&map_attr); 128 } 129 130 int bpf_create_map(enum bpf_map_type map_type, int key_size, 131 int value_size, int max_entries, __u32 map_flags) 132 { 133 struct bpf_create_map_attr map_attr = {}; 134 135 map_attr.map_type = map_type; 136 map_attr.map_flags = map_flags; 137 map_attr.key_size = key_size; 138 map_attr.value_size = value_size; 139 map_attr.max_entries = max_entries; 140 141 return bpf_create_map_xattr(&map_attr); 142 } 143 144 int bpf_create_map_name(enum bpf_map_type map_type, const char *name, 145 int key_size, int value_size, int max_entries, 146 __u32 map_flags) 147 { 148 struct bpf_create_map_attr map_attr = {}; 149 150 map_attr.name = name; 151 map_attr.map_type = map_type; 152 map_attr.map_flags = map_flags; 153 map_attr.key_size = key_size; 154 map_attr.value_size = value_size; 155 map_attr.max_entries = max_entries; 156 157 return bpf_create_map_xattr(&map_attr); 158 } 159 160 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, 161 int key_size, int inner_map_fd, int max_entries, 162 __u32 map_flags, int node) 163 { 164 union bpf_attr attr; 165 166 memset(&attr, '\0', sizeof(attr)); 167 168 attr.map_type = map_type; 169 attr.key_size = key_size; 170 attr.value_size = 4; 171 attr.inner_map_fd = inner_map_fd; 172 attr.max_entries = max_entries; 173 attr.map_flags = map_flags; 174 if (name) 175 memcpy(attr.map_name, name, 176 min(strlen(name), BPF_OBJ_NAME_LEN - 1)); 177 178 if (node >= 0) { 179 attr.map_flags |= BPF_F_NUMA_NODE; 180 attr.numa_node = node; 181 } 182 183 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 184 } 185 186 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, 187 int key_size, int inner_map_fd, int max_entries, 188 __u32 map_flags) 189 { 190 return bpf_create_map_in_map_node(map_type, name, key_size, 191 inner_map_fd, max_entries, map_flags, 192 -1); 193 } 194 195 static void * 196 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 197 __u32 actual_rec_size, __u32 expected_rec_size) 198 { 199 __u64 info_len = (__u64)actual_rec_size * cnt; 200 void *info, *nrecord; 201 int i; 202 203 info = malloc(info_len); 204 if (!info) 205 return NULL; 206 207 /* zero out bytes kernel does not understand */ 208 nrecord = info; 209 for (i = 0; i < cnt; i++) { 210 memcpy(nrecord, orecord, expected_rec_size); 211 memset(nrecord + expected_rec_size, 0, 212 actual_rec_size - expected_rec_size); 213 orecord += actual_rec_size; 214 nrecord += actual_rec_size; 215 } 216 217 return info; 218 } 219 220 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, 221 char *log_buf, size_t log_buf_sz) 222 { 223 void *finfo = NULL, *linfo = NULL; 224 union bpf_attr attr; 225 __u32 log_level; 226 int fd; 227 228 if (!load_attr || !log_buf != !log_buf_sz) 229 return -EINVAL; 230 231 log_level = load_attr->log_level; 232 if (log_level > (4 | 2 | 1) || (log_level && !log_buf)) 233 return -EINVAL; 234 235 memset(&attr, 0, sizeof(attr)); 236 attr.prog_type = load_attr->prog_type; 237 attr.expected_attach_type = load_attr->expected_attach_type; 238 if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS || 239 attr.prog_type == BPF_PROG_TYPE_LSM) { 240 attr.attach_btf_id = load_attr->attach_btf_id; 241 } else if (attr.prog_type == BPF_PROG_TYPE_TRACING || 242 attr.prog_type == BPF_PROG_TYPE_EXT) { 243 attr.attach_btf_id = load_attr->attach_btf_id; 244 attr.attach_prog_fd = load_attr->attach_prog_fd; 245 } else { 246 attr.prog_ifindex = load_attr->prog_ifindex; 247 attr.kern_version = load_attr->kern_version; 248 } 249 attr.insn_cnt = (__u32)load_attr->insns_cnt; 250 attr.insns = ptr_to_u64(load_attr->insns); 251 attr.license = ptr_to_u64(load_attr->license); 252 253 attr.log_level = log_level; 254 if (log_level) { 255 attr.log_buf = ptr_to_u64(log_buf); 256 attr.log_size = log_buf_sz; 257 } else { 258 attr.log_buf = ptr_to_u64(NULL); 259 attr.log_size = 0; 260 } 261 262 attr.prog_btf_fd = load_attr->prog_btf_fd; 263 attr.func_info_rec_size = load_attr->func_info_rec_size; 264 attr.func_info_cnt = load_attr->func_info_cnt; 265 attr.func_info = ptr_to_u64(load_attr->func_info); 266 attr.line_info_rec_size = load_attr->line_info_rec_size; 267 attr.line_info_cnt = load_attr->line_info_cnt; 268 attr.line_info = ptr_to_u64(load_attr->line_info); 269 if (load_attr->name) 270 memcpy(attr.prog_name, load_attr->name, 271 min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1)); 272 attr.prog_flags = load_attr->prog_flags; 273 274 fd = sys_bpf_prog_load(&attr, sizeof(attr)); 275 if (fd >= 0) 276 return fd; 277 278 /* After bpf_prog_load, the kernel may modify certain attributes 279 * to give user space a hint how to deal with loading failure. 280 * Check to see whether we can make some changes and load again. 281 */ 282 while (errno == E2BIG && (!finfo || !linfo)) { 283 if (!finfo && attr.func_info_cnt && 284 attr.func_info_rec_size < load_attr->func_info_rec_size) { 285 /* try with corrected func info records */ 286 finfo = alloc_zero_tailing_info(load_attr->func_info, 287 load_attr->func_info_cnt, 288 load_attr->func_info_rec_size, 289 attr.func_info_rec_size); 290 if (!finfo) 291 goto done; 292 293 attr.func_info = ptr_to_u64(finfo); 294 attr.func_info_rec_size = load_attr->func_info_rec_size; 295 } else if (!linfo && attr.line_info_cnt && 296 attr.line_info_rec_size < 297 load_attr->line_info_rec_size) { 298 linfo = alloc_zero_tailing_info(load_attr->line_info, 299 load_attr->line_info_cnt, 300 load_attr->line_info_rec_size, 301 attr.line_info_rec_size); 302 if (!linfo) 303 goto done; 304 305 attr.line_info = ptr_to_u64(linfo); 306 attr.line_info_rec_size = load_attr->line_info_rec_size; 307 } else { 308 break; 309 } 310 311 fd = sys_bpf_prog_load(&attr, sizeof(attr)); 312 313 if (fd >= 0) 314 goto done; 315 } 316 317 if (log_level || !log_buf) 318 goto done; 319 320 /* Try again with log */ 321 attr.log_buf = ptr_to_u64(log_buf); 322 attr.log_size = log_buf_sz; 323 attr.log_level = 1; 324 log_buf[0] = 0; 325 fd = sys_bpf_prog_load(&attr, sizeof(attr)); 326 done: 327 free(finfo); 328 free(linfo); 329 return fd; 330 } 331 332 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 333 size_t insns_cnt, const char *license, 334 __u32 kern_version, char *log_buf, 335 size_t log_buf_sz) 336 { 337 struct bpf_load_program_attr load_attr; 338 339 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 340 load_attr.prog_type = type; 341 load_attr.expected_attach_type = 0; 342 load_attr.name = NULL; 343 load_attr.insns = insns; 344 load_attr.insns_cnt = insns_cnt; 345 load_attr.license = license; 346 load_attr.kern_version = kern_version; 347 348 return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); 349 } 350 351 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 352 size_t insns_cnt, __u32 prog_flags, const char *license, 353 __u32 kern_version, char *log_buf, size_t log_buf_sz, 354 int log_level) 355 { 356 union bpf_attr attr; 357 358 memset(&attr, 0, sizeof(attr)); 359 attr.prog_type = type; 360 attr.insn_cnt = (__u32)insns_cnt; 361 attr.insns = ptr_to_u64(insns); 362 attr.license = ptr_to_u64(license); 363 attr.log_buf = ptr_to_u64(log_buf); 364 attr.log_size = log_buf_sz; 365 attr.log_level = log_level; 366 log_buf[0] = 0; 367 attr.kern_version = kern_version; 368 attr.prog_flags = prog_flags; 369 370 return sys_bpf_prog_load(&attr, sizeof(attr)); 371 } 372 373 int bpf_map_update_elem(int fd, const void *key, const void *value, 374 __u64 flags) 375 { 376 union bpf_attr attr; 377 378 memset(&attr, 0, sizeof(attr)); 379 attr.map_fd = fd; 380 attr.key = ptr_to_u64(key); 381 attr.value = ptr_to_u64(value); 382 attr.flags = flags; 383 384 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 385 } 386 387 int bpf_map_lookup_elem(int fd, const void *key, void *value) 388 { 389 union bpf_attr attr; 390 391 memset(&attr, 0, sizeof(attr)); 392 attr.map_fd = fd; 393 attr.key = ptr_to_u64(key); 394 attr.value = ptr_to_u64(value); 395 396 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 397 } 398 399 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 400 { 401 union bpf_attr attr; 402 403 memset(&attr, 0, sizeof(attr)); 404 attr.map_fd = fd; 405 attr.key = ptr_to_u64(key); 406 attr.value = ptr_to_u64(value); 407 attr.flags = flags; 408 409 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 410 } 411 412 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 413 { 414 union bpf_attr attr; 415 416 memset(&attr, 0, sizeof(attr)); 417 attr.map_fd = fd; 418 attr.key = ptr_to_u64(key); 419 attr.value = ptr_to_u64(value); 420 421 return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 422 } 423 424 int bpf_map_delete_elem(int fd, const void *key) 425 { 426 union bpf_attr attr; 427 428 memset(&attr, 0, sizeof(attr)); 429 attr.map_fd = fd; 430 attr.key = ptr_to_u64(key); 431 432 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 433 } 434 435 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 436 { 437 union bpf_attr attr; 438 439 memset(&attr, 0, sizeof(attr)); 440 attr.map_fd = fd; 441 attr.key = ptr_to_u64(key); 442 attr.next_key = ptr_to_u64(next_key); 443 444 return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 445 } 446 447 int bpf_map_freeze(int fd) 448 { 449 union bpf_attr attr; 450 451 memset(&attr, 0, sizeof(attr)); 452 attr.map_fd = fd; 453 454 return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); 455 } 456 457 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 458 void *out_batch, void *keys, void *values, 459 __u32 *count, 460 const struct bpf_map_batch_opts *opts) 461 { 462 union bpf_attr attr; 463 int ret; 464 465 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 466 return -EINVAL; 467 468 memset(&attr, 0, sizeof(attr)); 469 attr.batch.map_fd = fd; 470 attr.batch.in_batch = ptr_to_u64(in_batch); 471 attr.batch.out_batch = ptr_to_u64(out_batch); 472 attr.batch.keys = ptr_to_u64(keys); 473 attr.batch.values = ptr_to_u64(values); 474 attr.batch.count = *count; 475 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 476 attr.batch.flags = OPTS_GET(opts, flags, 0); 477 478 ret = sys_bpf(cmd, &attr, sizeof(attr)); 479 *count = attr.batch.count; 480 481 return ret; 482 } 483 484 int bpf_map_delete_batch(int fd, void *keys, __u32 *count, 485 const struct bpf_map_batch_opts *opts) 486 { 487 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 488 NULL, keys, NULL, count, opts); 489 } 490 491 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 492 void *values, __u32 *count, 493 const struct bpf_map_batch_opts *opts) 494 { 495 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 496 out_batch, keys, values, count, opts); 497 } 498 499 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 500 void *keys, void *values, __u32 *count, 501 const struct bpf_map_batch_opts *opts) 502 { 503 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 504 fd, in_batch, out_batch, keys, values, 505 count, opts); 506 } 507 508 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count, 509 const struct bpf_map_batch_opts *opts) 510 { 511 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 512 keys, values, count, opts); 513 } 514 515 int bpf_obj_pin(int fd, const char *pathname) 516 { 517 union bpf_attr attr; 518 519 memset(&attr, 0, sizeof(attr)); 520 attr.pathname = ptr_to_u64((void *)pathname); 521 attr.bpf_fd = fd; 522 523 return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); 524 } 525 526 int bpf_obj_get(const char *pathname) 527 { 528 union bpf_attr attr; 529 530 memset(&attr, 0, sizeof(attr)); 531 attr.pathname = ptr_to_u64((void *)pathname); 532 533 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); 534 } 535 536 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 537 unsigned int flags) 538 { 539 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 540 .flags = flags, 541 ); 542 543 return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts); 544 } 545 546 int bpf_prog_attach_xattr(int prog_fd, int target_fd, 547 enum bpf_attach_type type, 548 const struct bpf_prog_attach_opts *opts) 549 { 550 union bpf_attr attr; 551 552 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 553 return -EINVAL; 554 555 memset(&attr, 0, sizeof(attr)); 556 attr.target_fd = target_fd; 557 attr.attach_bpf_fd = prog_fd; 558 attr.attach_type = type; 559 attr.attach_flags = OPTS_GET(opts, flags, 0); 560 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); 561 562 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 563 } 564 565 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 566 { 567 union bpf_attr attr; 568 569 memset(&attr, 0, sizeof(attr)); 570 attr.target_fd = target_fd; 571 attr.attach_type = type; 572 573 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 574 } 575 576 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 577 { 578 union bpf_attr attr; 579 580 memset(&attr, 0, sizeof(attr)); 581 attr.target_fd = target_fd; 582 attr.attach_bpf_fd = prog_fd; 583 attr.attach_type = type; 584 585 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 586 } 587 588 int bpf_link_create(int prog_fd, int target_fd, 589 enum bpf_attach_type attach_type, 590 const struct bpf_link_create_opts *opts) 591 { 592 union bpf_attr attr; 593 594 if (!OPTS_VALID(opts, bpf_link_create_opts)) 595 return -EINVAL; 596 597 memset(&attr, 0, sizeof(attr)); 598 attr.link_create.prog_fd = prog_fd; 599 attr.link_create.target_fd = target_fd; 600 attr.link_create.attach_type = attach_type; 601 602 return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); 603 } 604 605 int bpf_link_update(int link_fd, int new_prog_fd, 606 const struct bpf_link_update_opts *opts) 607 { 608 union bpf_attr attr; 609 610 if (!OPTS_VALID(opts, bpf_link_update_opts)) 611 return -EINVAL; 612 613 memset(&attr, 0, sizeof(attr)); 614 attr.link_update.link_fd = link_fd; 615 attr.link_update.new_prog_fd = new_prog_fd; 616 attr.link_update.flags = OPTS_GET(opts, flags, 0); 617 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 618 619 return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); 620 } 621 622 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 623 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 624 { 625 union bpf_attr attr; 626 int ret; 627 628 memset(&attr, 0, sizeof(attr)); 629 attr.query.target_fd = target_fd; 630 attr.query.attach_type = type; 631 attr.query.query_flags = query_flags; 632 attr.query.prog_cnt = *prog_cnt; 633 attr.query.prog_ids = ptr_to_u64(prog_ids); 634 635 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); 636 if (attach_flags) 637 *attach_flags = attr.query.attach_flags; 638 *prog_cnt = attr.query.prog_cnt; 639 return ret; 640 } 641 642 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, 643 void *data_out, __u32 *size_out, __u32 *retval, 644 __u32 *duration) 645 { 646 union bpf_attr attr; 647 int ret; 648 649 memset(&attr, 0, sizeof(attr)); 650 attr.test.prog_fd = prog_fd; 651 attr.test.data_in = ptr_to_u64(data); 652 attr.test.data_out = ptr_to_u64(data_out); 653 attr.test.data_size_in = size; 654 attr.test.repeat = repeat; 655 656 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 657 if (size_out) 658 *size_out = attr.test.data_size_out; 659 if (retval) 660 *retval = attr.test.retval; 661 if (duration) 662 *duration = attr.test.duration; 663 return ret; 664 } 665 666 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) 667 { 668 union bpf_attr attr; 669 int ret; 670 671 if (!test_attr->data_out && test_attr->data_size_out > 0) 672 return -EINVAL; 673 674 memset(&attr, 0, sizeof(attr)); 675 attr.test.prog_fd = test_attr->prog_fd; 676 attr.test.data_in = ptr_to_u64(test_attr->data_in); 677 attr.test.data_out = ptr_to_u64(test_attr->data_out); 678 attr.test.data_size_in = test_attr->data_size_in; 679 attr.test.data_size_out = test_attr->data_size_out; 680 attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in); 681 attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out); 682 attr.test.ctx_size_in = test_attr->ctx_size_in; 683 attr.test.ctx_size_out = test_attr->ctx_size_out; 684 attr.test.repeat = test_attr->repeat; 685 686 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 687 test_attr->data_size_out = attr.test.data_size_out; 688 test_attr->ctx_size_out = attr.test.ctx_size_out; 689 test_attr->retval = attr.test.retval; 690 test_attr->duration = attr.test.duration; 691 return ret; 692 } 693 694 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 695 { 696 union bpf_attr attr; 697 int err; 698 699 memset(&attr, 0, sizeof(attr)); 700 attr.start_id = start_id; 701 702 err = sys_bpf(cmd, &attr, sizeof(attr)); 703 if (!err) 704 *next_id = attr.next_id; 705 706 return err; 707 } 708 709 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 710 { 711 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 712 } 713 714 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 715 { 716 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 717 } 718 719 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 720 { 721 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 722 } 723 724 int bpf_prog_get_fd_by_id(__u32 id) 725 { 726 union bpf_attr attr; 727 728 memset(&attr, 0, sizeof(attr)); 729 attr.prog_id = id; 730 731 return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); 732 } 733 734 int bpf_map_get_fd_by_id(__u32 id) 735 { 736 union bpf_attr attr; 737 738 memset(&attr, 0, sizeof(attr)); 739 attr.map_id = id; 740 741 return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); 742 } 743 744 int bpf_btf_get_fd_by_id(__u32 id) 745 { 746 union bpf_attr attr; 747 748 memset(&attr, 0, sizeof(attr)); 749 attr.btf_id = id; 750 751 return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); 752 } 753 754 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len) 755 { 756 union bpf_attr attr; 757 int err; 758 759 memset(&attr, 0, sizeof(attr)); 760 attr.info.bpf_fd = prog_fd; 761 attr.info.info_len = *info_len; 762 attr.info.info = ptr_to_u64(info); 763 764 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); 765 if (!err) 766 *info_len = attr.info.info_len; 767 768 return err; 769 } 770 771 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 772 { 773 union bpf_attr attr; 774 775 memset(&attr, 0, sizeof(attr)); 776 attr.raw_tracepoint.name = ptr_to_u64(name); 777 attr.raw_tracepoint.prog_fd = prog_fd; 778 779 return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); 780 } 781 782 int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, 783 bool do_log) 784 { 785 union bpf_attr attr = {}; 786 int fd; 787 788 attr.btf = ptr_to_u64(btf); 789 attr.btf_size = btf_size; 790 791 retry: 792 if (do_log && log_buf && log_buf_size) { 793 attr.btf_log_level = 1; 794 attr.btf_log_size = log_buf_size; 795 attr.btf_log_buf = ptr_to_u64(log_buf); 796 } 797 798 fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr)); 799 if (fd == -1 && !do_log && log_buf && log_buf_size) { 800 do_log = true; 801 goto retry; 802 } 803 804 return fd; 805 } 806 807 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 808 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 809 __u64 *probe_addr) 810 { 811 union bpf_attr attr = {}; 812 int err; 813 814 attr.task_fd_query.pid = pid; 815 attr.task_fd_query.fd = fd; 816 attr.task_fd_query.flags = flags; 817 attr.task_fd_query.buf = ptr_to_u64(buf); 818 attr.task_fd_query.buf_len = *buf_len; 819 820 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); 821 *buf_len = attr.task_fd_query.buf_len; 822 *prog_id = attr.task_fd_query.prog_id; 823 *fd_type = attr.task_fd_query.fd_type; 824 *probe_offset = attr.task_fd_query.probe_offset; 825 *probe_addr = attr.task_fd_query.probe_addr; 826 827 return err; 828 } 829