1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * Copyright (C) 2019 Isovalent, Inc. 11 */ 12 13 #ifndef _GNU_SOURCE 14 #define _GNU_SOURCE 15 #endif 16 #include <stdlib.h> 17 #include <stdio.h> 18 #include <stdarg.h> 19 #include <libgen.h> 20 #include <inttypes.h> 21 #include <string.h> 22 #include <unistd.h> 23 #include <fcntl.h> 24 #include <errno.h> 25 #include <asm/unistd.h> 26 #include <linux/err.h> 27 #include <linux/kernel.h> 28 #include <linux/bpf.h> 29 #include <linux/btf.h> 30 #include <linux/filter.h> 31 #include <linux/list.h> 32 #include <linux/limits.h> 33 #include <linux/perf_event.h> 34 #include <linux/ring_buffer.h> 35 #include <sys/epoll.h> 36 #include <sys/ioctl.h> 37 #include <sys/mman.h> 38 #include <sys/stat.h> 39 #include <sys/types.h> 40 #include <sys/vfs.h> 41 #include <tools/libc_compat.h> 42 #include <libelf.h> 43 #include <gelf.h> 44 45 #include "libbpf.h" 46 #include "bpf.h" 47 #include "btf.h" 48 #include "str_error.h" 49 #include "libbpf_internal.h" 50 51 #ifndef EM_BPF 52 #define EM_BPF 247 53 #endif 54 55 #ifndef BPF_FS_MAGIC 56 #define BPF_FS_MAGIC 0xcafe4a11 57 #endif 58 59 /* vsprintf() in __base_pr() uses nonliteral format string. It may break 60 * compilation if user enables corresponding warning. Disable it explicitly. 61 */ 62 #pragma GCC diagnostic ignored "-Wformat-nonliteral" 63 64 #define __printf(a, b) __attribute__((format(printf, a, b))) 65 66 static int __base_pr(enum libbpf_print_level level, const char *format, 67 va_list args) 68 { 69 if (level == LIBBPF_DEBUG) 70 return 0; 71 72 return vfprintf(stderr, format, args); 73 } 74 75 static libbpf_print_fn_t __libbpf_pr = __base_pr; 76 77 void libbpf_set_print(libbpf_print_fn_t fn) 78 { 79 __libbpf_pr = fn; 80 } 81 82 __printf(2, 3) 83 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 84 { 85 va_list args; 86 87 if (!__libbpf_pr) 88 return; 89 90 va_start(args, format); 91 __libbpf_pr(level, format, args); 92 va_end(args); 93 } 94 95 #define STRERR_BUFSIZE 128 96 97 #define CHECK_ERR(action, err, out) do { \ 98 err = action; \ 99 if (err) \ 100 goto out; \ 101 } while(0) 102 103 104 /* Copied from tools/perf/util/util.h */ 105 #ifndef zfree 106 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 107 #endif 108 109 #ifndef zclose 110 # define zclose(fd) ({ \ 111 int ___err = 0; \ 112 if ((fd) >= 0) \ 113 ___err = close((fd)); \ 114 fd = -1; \ 115 ___err; }) 116 #endif 117 118 #ifdef HAVE_LIBELF_MMAP_SUPPORT 119 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 120 #else 121 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 122 #endif 123 124 static inline __u64 ptr_to_u64(const void *ptr) 125 { 126 return (__u64) (unsigned long) ptr; 127 } 128 129 struct bpf_capabilities { 130 /* v4.14: kernel support for program & map names. */ 131 __u32 name:1; 132 /* v5.2: kernel support for global data sections. */ 133 __u32 global_data:1; 134 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ 135 __u32 btf_func:1; 136 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ 137 __u32 btf_datasec:1; 138 }; 139 140 /* 141 * bpf_prog should be a better name but it has been used in 142 * linux/filter.h. 143 */ 144 struct bpf_program { 145 /* Index in elf obj file, for relocation use. */ 146 int idx; 147 char *name; 148 int prog_ifindex; 149 char *section_name; 150 /* section_name with / replaced by _; makes recursive pinning 151 * in bpf_object__pin_programs easier 152 */ 153 char *pin_name; 154 struct bpf_insn *insns; 155 size_t insns_cnt, main_prog_cnt; 156 enum bpf_prog_type type; 157 158 struct reloc_desc { 159 enum { 160 RELO_LD64, 161 RELO_CALL, 162 RELO_DATA, 163 } type; 164 int insn_idx; 165 union { 166 int map_idx; 167 int text_off; 168 }; 169 } *reloc_desc; 170 int nr_reloc; 171 int log_level; 172 173 struct { 174 int nr; 175 int *fds; 176 } instances; 177 bpf_program_prep_t preprocessor; 178 179 struct bpf_object *obj; 180 void *priv; 181 bpf_program_clear_priv_t clear_priv; 182 183 enum bpf_attach_type expected_attach_type; 184 int btf_fd; 185 void *func_info; 186 __u32 func_info_rec_size; 187 __u32 func_info_cnt; 188 189 struct bpf_capabilities *caps; 190 191 void *line_info; 192 __u32 line_info_rec_size; 193 __u32 line_info_cnt; 194 __u32 prog_flags; 195 }; 196 197 enum libbpf_map_type { 198 LIBBPF_MAP_UNSPEC, 199 LIBBPF_MAP_DATA, 200 LIBBPF_MAP_BSS, 201 LIBBPF_MAP_RODATA, 202 }; 203 204 static const char * const libbpf_type_to_btf_name[] = { 205 [LIBBPF_MAP_DATA] = ".data", 206 [LIBBPF_MAP_BSS] = ".bss", 207 [LIBBPF_MAP_RODATA] = ".rodata", 208 }; 209 210 struct bpf_map { 211 int fd; 212 char *name; 213 int sec_idx; 214 size_t sec_offset; 215 int map_ifindex; 216 int inner_map_fd; 217 struct bpf_map_def def; 218 __u32 btf_key_type_id; 219 __u32 btf_value_type_id; 220 void *priv; 221 bpf_map_clear_priv_t clear_priv; 222 enum libbpf_map_type libbpf_type; 223 }; 224 225 struct bpf_secdata { 226 void *rodata; 227 void *data; 228 }; 229 230 static LIST_HEAD(bpf_objects_list); 231 232 struct bpf_object { 233 char name[BPF_OBJ_NAME_LEN]; 234 char license[64]; 235 __u32 kern_version; 236 237 struct bpf_program *programs; 238 size_t nr_programs; 239 struct bpf_map *maps; 240 size_t nr_maps; 241 size_t maps_cap; 242 struct bpf_secdata sections; 243 244 bool loaded; 245 bool has_pseudo_calls; 246 247 /* 248 * Information when doing elf related work. Only valid if fd 249 * is valid. 250 */ 251 struct { 252 int fd; 253 void *obj_buf; 254 size_t obj_buf_sz; 255 Elf *elf; 256 GElf_Ehdr ehdr; 257 Elf_Data *symbols; 258 Elf_Data *data; 259 Elf_Data *rodata; 260 Elf_Data *bss; 261 size_t strtabidx; 262 struct { 263 GElf_Shdr shdr; 264 Elf_Data *data; 265 } *reloc; 266 int nr_reloc; 267 int maps_shndx; 268 int btf_maps_shndx; 269 int text_shndx; 270 int data_shndx; 271 int rodata_shndx; 272 int bss_shndx; 273 } efile; 274 /* 275 * All loaded bpf_object is linked in a list, which is 276 * hidden to caller. bpf_objects__<func> handlers deal with 277 * all objects. 278 */ 279 struct list_head list; 280 281 struct btf *btf; 282 struct btf_ext *btf_ext; 283 284 void *priv; 285 bpf_object_clear_priv_t clear_priv; 286 287 struct bpf_capabilities caps; 288 289 char path[]; 290 }; 291 #define obj_elf_valid(o) ((o)->efile.elf) 292 293 void bpf_program__unload(struct bpf_program *prog) 294 { 295 int i; 296 297 if (!prog) 298 return; 299 300 /* 301 * If the object is opened but the program was never loaded, 302 * it is possible that prog->instances.nr == -1. 303 */ 304 if (prog->instances.nr > 0) { 305 for (i = 0; i < prog->instances.nr; i++) 306 zclose(prog->instances.fds[i]); 307 } else if (prog->instances.nr != -1) { 308 pr_warning("Internal error: instances.nr is %d\n", 309 prog->instances.nr); 310 } 311 312 prog->instances.nr = -1; 313 zfree(&prog->instances.fds); 314 315 zclose(prog->btf_fd); 316 zfree(&prog->func_info); 317 zfree(&prog->line_info); 318 } 319 320 static void bpf_program__exit(struct bpf_program *prog) 321 { 322 if (!prog) 323 return; 324 325 if (prog->clear_priv) 326 prog->clear_priv(prog, prog->priv); 327 328 prog->priv = NULL; 329 prog->clear_priv = NULL; 330 331 bpf_program__unload(prog); 332 zfree(&prog->name); 333 zfree(&prog->section_name); 334 zfree(&prog->pin_name); 335 zfree(&prog->insns); 336 zfree(&prog->reloc_desc); 337 338 prog->nr_reloc = 0; 339 prog->insns_cnt = 0; 340 prog->idx = -1; 341 } 342 343 static char *__bpf_program__pin_name(struct bpf_program *prog) 344 { 345 char *name, *p; 346 347 name = p = strdup(prog->section_name); 348 while ((p = strchr(p, '/'))) 349 *p = '_'; 350 351 return name; 352 } 353 354 static int 355 bpf_program__init(void *data, size_t size, char *section_name, int idx, 356 struct bpf_program *prog) 357 { 358 const size_t bpf_insn_sz = sizeof(struct bpf_insn); 359 360 if (size == 0 || size % bpf_insn_sz) { 361 pr_warning("corrupted section '%s', size: %zu\n", 362 section_name, size); 363 return -EINVAL; 364 } 365 366 memset(prog, 0, sizeof(*prog)); 367 368 prog->section_name = strdup(section_name); 369 if (!prog->section_name) { 370 pr_warning("failed to alloc name for prog under section(%d) %s\n", 371 idx, section_name); 372 goto errout; 373 } 374 375 prog->pin_name = __bpf_program__pin_name(prog); 376 if (!prog->pin_name) { 377 pr_warning("failed to alloc pin name for prog under section(%d) %s\n", 378 idx, section_name); 379 goto errout; 380 } 381 382 prog->insns = malloc(size); 383 if (!prog->insns) { 384 pr_warning("failed to alloc insns for prog under section %s\n", 385 section_name); 386 goto errout; 387 } 388 prog->insns_cnt = size / bpf_insn_sz; 389 memcpy(prog->insns, data, size); 390 prog->idx = idx; 391 prog->instances.fds = NULL; 392 prog->instances.nr = -1; 393 prog->type = BPF_PROG_TYPE_UNSPEC; 394 prog->btf_fd = -1; 395 396 return 0; 397 errout: 398 bpf_program__exit(prog); 399 return -ENOMEM; 400 } 401 402 static int 403 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 404 char *section_name, int idx) 405 { 406 struct bpf_program prog, *progs; 407 int nr_progs, err; 408 409 err = bpf_program__init(data, size, section_name, idx, &prog); 410 if (err) 411 return err; 412 413 prog.caps = &obj->caps; 414 progs = obj->programs; 415 nr_progs = obj->nr_programs; 416 417 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); 418 if (!progs) { 419 /* 420 * In this case the original obj->programs 421 * is still valid, so don't need special treat for 422 * bpf_close_object(). 423 */ 424 pr_warning("failed to alloc a new program under section '%s'\n", 425 section_name); 426 bpf_program__exit(&prog); 427 return -ENOMEM; 428 } 429 430 pr_debug("found program %s\n", prog.section_name); 431 obj->programs = progs; 432 obj->nr_programs = nr_progs + 1; 433 prog.obj = obj; 434 progs[nr_progs] = prog; 435 return 0; 436 } 437 438 static int 439 bpf_object__init_prog_names(struct bpf_object *obj) 440 { 441 Elf_Data *symbols = obj->efile.symbols; 442 struct bpf_program *prog; 443 size_t pi, si; 444 445 for (pi = 0; pi < obj->nr_programs; pi++) { 446 const char *name = NULL; 447 448 prog = &obj->programs[pi]; 449 450 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; 451 si++) { 452 GElf_Sym sym; 453 454 if (!gelf_getsym(symbols, si, &sym)) 455 continue; 456 if (sym.st_shndx != prog->idx) 457 continue; 458 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) 459 continue; 460 461 name = elf_strptr(obj->efile.elf, 462 obj->efile.strtabidx, 463 sym.st_name); 464 if (!name) { 465 pr_warning("failed to get sym name string for prog %s\n", 466 prog->section_name); 467 return -LIBBPF_ERRNO__LIBELF; 468 } 469 } 470 471 if (!name && prog->idx == obj->efile.text_shndx) 472 name = ".text"; 473 474 if (!name) { 475 pr_warning("failed to find sym for prog %s\n", 476 prog->section_name); 477 return -EINVAL; 478 } 479 480 prog->name = strdup(name); 481 if (!prog->name) { 482 pr_warning("failed to allocate memory for prog sym %s\n", 483 name); 484 return -ENOMEM; 485 } 486 } 487 488 return 0; 489 } 490 491 static struct bpf_object *bpf_object__new(const char *path, 492 void *obj_buf, 493 size_t obj_buf_sz) 494 { 495 struct bpf_object *obj; 496 char *end; 497 498 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 499 if (!obj) { 500 pr_warning("alloc memory failed for %s\n", path); 501 return ERR_PTR(-ENOMEM); 502 } 503 504 strcpy(obj->path, path); 505 /* Using basename() GNU version which doesn't modify arg. */ 506 strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1); 507 end = strchr(obj->name, '.'); 508 if (end) 509 *end = 0; 510 511 obj->efile.fd = -1; 512 /* 513 * Caller of this function should also call 514 * bpf_object__elf_finish() after data collection to return 515 * obj_buf to user. If not, we should duplicate the buffer to 516 * avoid user freeing them before elf finish. 517 */ 518 obj->efile.obj_buf = obj_buf; 519 obj->efile.obj_buf_sz = obj_buf_sz; 520 obj->efile.maps_shndx = -1; 521 obj->efile.btf_maps_shndx = -1; 522 obj->efile.data_shndx = -1; 523 obj->efile.rodata_shndx = -1; 524 obj->efile.bss_shndx = -1; 525 526 obj->loaded = false; 527 528 INIT_LIST_HEAD(&obj->list); 529 list_add(&obj->list, &bpf_objects_list); 530 return obj; 531 } 532 533 static void bpf_object__elf_finish(struct bpf_object *obj) 534 { 535 if (!obj_elf_valid(obj)) 536 return; 537 538 if (obj->efile.elf) { 539 elf_end(obj->efile.elf); 540 obj->efile.elf = NULL; 541 } 542 obj->efile.symbols = NULL; 543 obj->efile.data = NULL; 544 obj->efile.rodata = NULL; 545 obj->efile.bss = NULL; 546 547 zfree(&obj->efile.reloc); 548 obj->efile.nr_reloc = 0; 549 zclose(obj->efile.fd); 550 obj->efile.obj_buf = NULL; 551 obj->efile.obj_buf_sz = 0; 552 } 553 554 static int bpf_object__elf_init(struct bpf_object *obj) 555 { 556 int err = 0; 557 GElf_Ehdr *ep; 558 559 if (obj_elf_valid(obj)) { 560 pr_warning("elf init: internal error\n"); 561 return -LIBBPF_ERRNO__LIBELF; 562 } 563 564 if (obj->efile.obj_buf_sz > 0) { 565 /* 566 * obj_buf should have been validated by 567 * bpf_object__open_buffer(). 568 */ 569 obj->efile.elf = elf_memory(obj->efile.obj_buf, 570 obj->efile.obj_buf_sz); 571 } else { 572 obj->efile.fd = open(obj->path, O_RDONLY); 573 if (obj->efile.fd < 0) { 574 char errmsg[STRERR_BUFSIZE], *cp; 575 576 err = -errno; 577 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 578 pr_warning("failed to open %s: %s\n", obj->path, cp); 579 return err; 580 } 581 582 obj->efile.elf = elf_begin(obj->efile.fd, 583 LIBBPF_ELF_C_READ_MMAP, NULL); 584 } 585 586 if (!obj->efile.elf) { 587 pr_warning("failed to open %s as ELF file\n", obj->path); 588 err = -LIBBPF_ERRNO__LIBELF; 589 goto errout; 590 } 591 592 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 593 pr_warning("failed to get EHDR from %s\n", obj->path); 594 err = -LIBBPF_ERRNO__FORMAT; 595 goto errout; 596 } 597 ep = &obj->efile.ehdr; 598 599 /* Old LLVM set e_machine to EM_NONE */ 600 if (ep->e_type != ET_REL || 601 (ep->e_machine && ep->e_machine != EM_BPF)) { 602 pr_warning("%s is not an eBPF object file\n", obj->path); 603 err = -LIBBPF_ERRNO__FORMAT; 604 goto errout; 605 } 606 607 return 0; 608 errout: 609 bpf_object__elf_finish(obj); 610 return err; 611 } 612 613 static int bpf_object__check_endianness(struct bpf_object *obj) 614 { 615 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 616 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) 617 return 0; 618 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 619 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) 620 return 0; 621 #else 622 # error "Unrecognized __BYTE_ORDER__" 623 #endif 624 pr_warning("endianness mismatch.\n"); 625 return -LIBBPF_ERRNO__ENDIAN; 626 } 627 628 static int 629 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) 630 { 631 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); 632 pr_debug("license of %s is %s\n", obj->path, obj->license); 633 return 0; 634 } 635 636 static int 637 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) 638 { 639 __u32 kver; 640 641 if (size != sizeof(kver)) { 642 pr_warning("invalid kver section in %s\n", obj->path); 643 return -LIBBPF_ERRNO__FORMAT; 644 } 645 memcpy(&kver, data, sizeof(kver)); 646 obj->kern_version = kver; 647 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); 648 return 0; 649 } 650 651 static int compare_bpf_map(const void *_a, const void *_b) 652 { 653 const struct bpf_map *a = _a; 654 const struct bpf_map *b = _b; 655 656 if (a->sec_idx != b->sec_idx) 657 return a->sec_idx - b->sec_idx; 658 return a->sec_offset - b->sec_offset; 659 } 660 661 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 662 { 663 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 664 type == BPF_MAP_TYPE_HASH_OF_MAPS) 665 return true; 666 return false; 667 } 668 669 static int bpf_object_search_section_size(const struct bpf_object *obj, 670 const char *name, size_t *d_size) 671 { 672 const GElf_Ehdr *ep = &obj->efile.ehdr; 673 Elf *elf = obj->efile.elf; 674 Elf_Scn *scn = NULL; 675 int idx = 0; 676 677 while ((scn = elf_nextscn(elf, scn)) != NULL) { 678 const char *sec_name; 679 Elf_Data *data; 680 GElf_Shdr sh; 681 682 idx++; 683 if (gelf_getshdr(scn, &sh) != &sh) { 684 pr_warning("failed to get section(%d) header from %s\n", 685 idx, obj->path); 686 return -EIO; 687 } 688 689 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 690 if (!sec_name) { 691 pr_warning("failed to get section(%d) name from %s\n", 692 idx, obj->path); 693 return -EIO; 694 } 695 696 if (strcmp(name, sec_name)) 697 continue; 698 699 data = elf_getdata(scn, 0); 700 if (!data) { 701 pr_warning("failed to get section(%d) data from %s(%s)\n", 702 idx, name, obj->path); 703 return -EIO; 704 } 705 706 *d_size = data->d_size; 707 return 0; 708 } 709 710 return -ENOENT; 711 } 712 713 int bpf_object__section_size(const struct bpf_object *obj, const char *name, 714 __u32 *size) 715 { 716 int ret = -ENOENT; 717 size_t d_size; 718 719 *size = 0; 720 if (!name) { 721 return -EINVAL; 722 } else if (!strcmp(name, ".data")) { 723 if (obj->efile.data) 724 *size = obj->efile.data->d_size; 725 } else if (!strcmp(name, ".bss")) { 726 if (obj->efile.bss) 727 *size = obj->efile.bss->d_size; 728 } else if (!strcmp(name, ".rodata")) { 729 if (obj->efile.rodata) 730 *size = obj->efile.rodata->d_size; 731 } else { 732 ret = bpf_object_search_section_size(obj, name, &d_size); 733 if (!ret) 734 *size = d_size; 735 } 736 737 return *size ? 0 : ret; 738 } 739 740 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, 741 __u32 *off) 742 { 743 Elf_Data *symbols = obj->efile.symbols; 744 const char *sname; 745 size_t si; 746 747 if (!name || !off) 748 return -EINVAL; 749 750 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { 751 GElf_Sym sym; 752 753 if (!gelf_getsym(symbols, si, &sym)) 754 continue; 755 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL || 756 GELF_ST_TYPE(sym.st_info) != STT_OBJECT) 757 continue; 758 759 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 760 sym.st_name); 761 if (!sname) { 762 pr_warning("failed to get sym name string for var %s\n", 763 name); 764 return -EIO; 765 } 766 if (strcmp(name, sname) == 0) { 767 *off = sym.st_value; 768 return 0; 769 } 770 } 771 772 return -ENOENT; 773 } 774 775 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) 776 { 777 struct bpf_map *new_maps; 778 size_t new_cap; 779 int i; 780 781 if (obj->nr_maps < obj->maps_cap) 782 return &obj->maps[obj->nr_maps++]; 783 784 new_cap = max((size_t)4, obj->maps_cap * 3 / 2); 785 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps)); 786 if (!new_maps) { 787 pr_warning("alloc maps for object failed\n"); 788 return ERR_PTR(-ENOMEM); 789 } 790 791 obj->maps_cap = new_cap; 792 obj->maps = new_maps; 793 794 /* zero out new maps */ 795 memset(obj->maps + obj->nr_maps, 0, 796 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); 797 /* 798 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) 799 * when failure (zclose won't close negative fd)). 800 */ 801 for (i = obj->nr_maps; i < obj->maps_cap; i++) { 802 obj->maps[i].fd = -1; 803 obj->maps[i].inner_map_fd = -1; 804 } 805 806 return &obj->maps[obj->nr_maps++]; 807 } 808 809 static int 810 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, 811 int sec_idx, Elf_Data *data, void **data_buff) 812 { 813 char map_name[BPF_OBJ_NAME_LEN]; 814 struct bpf_map_def *def; 815 struct bpf_map *map; 816 817 map = bpf_object__add_map(obj); 818 if (IS_ERR(map)) 819 return PTR_ERR(map); 820 821 map->libbpf_type = type; 822 map->sec_idx = sec_idx; 823 map->sec_offset = 0; 824 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name, 825 libbpf_type_to_btf_name[type]); 826 map->name = strdup(map_name); 827 if (!map->name) { 828 pr_warning("failed to alloc map name\n"); 829 return -ENOMEM; 830 } 831 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n", 832 map_name, map->sec_idx, map->sec_offset); 833 834 def = &map->def; 835 def->type = BPF_MAP_TYPE_ARRAY; 836 def->key_size = sizeof(int); 837 def->value_size = data->d_size; 838 def->max_entries = 1; 839 def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0; 840 if (data_buff) { 841 *data_buff = malloc(data->d_size); 842 if (!*data_buff) { 843 zfree(&map->name); 844 pr_warning("failed to alloc map content buffer\n"); 845 return -ENOMEM; 846 } 847 memcpy(*data_buff, data->d_buf, data->d_size); 848 } 849 850 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); 851 return 0; 852 } 853 854 static int bpf_object__init_global_data_maps(struct bpf_object *obj) 855 { 856 int err; 857 858 if (!obj->caps.global_data) 859 return 0; 860 /* 861 * Populate obj->maps with libbpf internal maps. 862 */ 863 if (obj->efile.data_shndx >= 0) { 864 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, 865 obj->efile.data_shndx, 866 obj->efile.data, 867 &obj->sections.data); 868 if (err) 869 return err; 870 } 871 if (obj->efile.rodata_shndx >= 0) { 872 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, 873 obj->efile.rodata_shndx, 874 obj->efile.rodata, 875 &obj->sections.rodata); 876 if (err) 877 return err; 878 } 879 if (obj->efile.bss_shndx >= 0) { 880 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, 881 obj->efile.bss_shndx, 882 obj->efile.bss, NULL); 883 if (err) 884 return err; 885 } 886 return 0; 887 } 888 889 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) 890 { 891 Elf_Data *symbols = obj->efile.symbols; 892 int i, map_def_sz = 0, nr_maps = 0, nr_syms; 893 Elf_Data *data = NULL; 894 Elf_Scn *scn; 895 896 if (obj->efile.maps_shndx < 0) 897 return 0; 898 899 if (!symbols) 900 return -EINVAL; 901 902 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); 903 if (scn) 904 data = elf_getdata(scn, NULL); 905 if (!scn || !data) { 906 pr_warning("failed to get Elf_Data from map section %d\n", 907 obj->efile.maps_shndx); 908 return -EINVAL; 909 } 910 911 /* 912 * Count number of maps. Each map has a name. 913 * Array of maps is not supported: only the first element is 914 * considered. 915 * 916 * TODO: Detect array of map and report error. 917 */ 918 nr_syms = symbols->d_size / sizeof(GElf_Sym); 919 for (i = 0; i < nr_syms; i++) { 920 GElf_Sym sym; 921 922 if (!gelf_getsym(symbols, i, &sym)) 923 continue; 924 if (sym.st_shndx != obj->efile.maps_shndx) 925 continue; 926 nr_maps++; 927 } 928 /* Assume equally sized map definitions */ 929 pr_debug("maps in %s: %d maps in %zd bytes\n", 930 obj->path, nr_maps, data->d_size); 931 932 map_def_sz = data->d_size / nr_maps; 933 if (!data->d_size || (data->d_size % nr_maps) != 0) { 934 pr_warning("unable to determine map definition size " 935 "section %s, %d maps in %zd bytes\n", 936 obj->path, nr_maps, data->d_size); 937 return -EINVAL; 938 } 939 940 /* Fill obj->maps using data in "maps" section. */ 941 for (i = 0; i < nr_syms; i++) { 942 GElf_Sym sym; 943 const char *map_name; 944 struct bpf_map_def *def; 945 struct bpf_map *map; 946 947 if (!gelf_getsym(symbols, i, &sym)) 948 continue; 949 if (sym.st_shndx != obj->efile.maps_shndx) 950 continue; 951 952 map = bpf_object__add_map(obj); 953 if (IS_ERR(map)) 954 return PTR_ERR(map); 955 956 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 957 sym.st_name); 958 if (!map_name) { 959 pr_warning("failed to get map #%d name sym string for obj %s\n", 960 i, obj->path); 961 return -LIBBPF_ERRNO__FORMAT; 962 } 963 964 map->libbpf_type = LIBBPF_MAP_UNSPEC; 965 map->sec_idx = sym.st_shndx; 966 map->sec_offset = sym.st_value; 967 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", 968 map_name, map->sec_idx, map->sec_offset); 969 if (sym.st_value + map_def_sz > data->d_size) { 970 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 971 obj->path, map_name); 972 return -EINVAL; 973 } 974 975 map->name = strdup(map_name); 976 if (!map->name) { 977 pr_warning("failed to alloc map name\n"); 978 return -ENOMEM; 979 } 980 pr_debug("map %d is \"%s\"\n", i, map->name); 981 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 982 /* 983 * If the definition of the map in the object file fits in 984 * bpf_map_def, copy it. Any extra fields in our version 985 * of bpf_map_def will default to zero as a result of the 986 * calloc above. 987 */ 988 if (map_def_sz <= sizeof(struct bpf_map_def)) { 989 memcpy(&map->def, def, map_def_sz); 990 } else { 991 /* 992 * Here the map structure being read is bigger than what 993 * we expect, truncate if the excess bits are all zero. 994 * If they are not zero, reject this map as 995 * incompatible. 996 */ 997 char *b; 998 for (b = ((char *)def) + sizeof(struct bpf_map_def); 999 b < ((char *)def) + map_def_sz; b++) { 1000 if (*b != 0) { 1001 pr_warning("maps section in %s: \"%s\" " 1002 "has unrecognized, non-zero " 1003 "options\n", 1004 obj->path, map_name); 1005 if (strict) 1006 return -EINVAL; 1007 } 1008 } 1009 memcpy(&map->def, def, sizeof(struct bpf_map_def)); 1010 } 1011 } 1012 return 0; 1013 } 1014 1015 static const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, 1016 __u32 id) 1017 { 1018 const struct btf_type *t = btf__type_by_id(btf, id); 1019 1020 while (true) { 1021 switch (BTF_INFO_KIND(t->info)) { 1022 case BTF_KIND_VOLATILE: 1023 case BTF_KIND_CONST: 1024 case BTF_KIND_RESTRICT: 1025 case BTF_KIND_TYPEDEF: 1026 t = btf__type_by_id(btf, t->type); 1027 break; 1028 default: 1029 return t; 1030 } 1031 } 1032 } 1033 1034 /* 1035 * Fetch integer attribute of BTF map definition. Such attributes are 1036 * represented using a pointer to an array, in which dimensionality of array 1037 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; 1038 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF 1039 * type definition, while using only sizeof(void *) space in ELF data section. 1040 */ 1041 static bool get_map_field_int(const char *map_name, const struct btf *btf, 1042 const struct btf_type *def, 1043 const struct btf_member *m, __u32 *res) { 1044 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type); 1045 const char *name = btf__name_by_offset(btf, m->name_off); 1046 const struct btf_array *arr_info; 1047 const struct btf_type *arr_t; 1048 1049 if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) { 1050 pr_warning("map '%s': attr '%s': expected PTR, got %u.\n", 1051 map_name, name, BTF_INFO_KIND(t->info)); 1052 return false; 1053 } 1054 1055 arr_t = btf__type_by_id(btf, t->type); 1056 if (!arr_t) { 1057 pr_warning("map '%s': attr '%s': type [%u] not found.\n", 1058 map_name, name, t->type); 1059 return false; 1060 } 1061 if (BTF_INFO_KIND(arr_t->info) != BTF_KIND_ARRAY) { 1062 pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n", 1063 map_name, name, BTF_INFO_KIND(arr_t->info)); 1064 return false; 1065 } 1066 arr_info = (const void *)(arr_t + 1); 1067 *res = arr_info->nelems; 1068 return true; 1069 } 1070 1071 static int bpf_object__init_user_btf_map(struct bpf_object *obj, 1072 const struct btf_type *sec, 1073 int var_idx, int sec_idx, 1074 const Elf_Data *data, bool strict) 1075 { 1076 const struct btf_type *var, *def, *t; 1077 const struct btf_var_secinfo *vi; 1078 const struct btf_var *var_extra; 1079 const struct btf_member *m; 1080 const char *map_name; 1081 struct bpf_map *map; 1082 int vlen, i; 1083 1084 vi = (const struct btf_var_secinfo *)(const void *)(sec + 1) + var_idx; 1085 var = btf__type_by_id(obj->btf, vi->type); 1086 var_extra = (const void *)(var + 1); 1087 map_name = btf__name_by_offset(obj->btf, var->name_off); 1088 vlen = BTF_INFO_VLEN(var->info); 1089 1090 if (map_name == NULL || map_name[0] == '\0') { 1091 pr_warning("map #%d: empty name.\n", var_idx); 1092 return -EINVAL; 1093 } 1094 if ((__u64)vi->offset + vi->size > data->d_size) { 1095 pr_warning("map '%s' BTF data is corrupted.\n", map_name); 1096 return -EINVAL; 1097 } 1098 if (BTF_INFO_KIND(var->info) != BTF_KIND_VAR) { 1099 pr_warning("map '%s': unexpected var kind %u.\n", 1100 map_name, BTF_INFO_KIND(var->info)); 1101 return -EINVAL; 1102 } 1103 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && 1104 var_extra->linkage != BTF_VAR_STATIC) { 1105 pr_warning("map '%s': unsupported var linkage %u.\n", 1106 map_name, var_extra->linkage); 1107 return -EOPNOTSUPP; 1108 } 1109 1110 def = skip_mods_and_typedefs(obj->btf, var->type); 1111 if (BTF_INFO_KIND(def->info) != BTF_KIND_STRUCT) { 1112 pr_warning("map '%s': unexpected def kind %u.\n", 1113 map_name, BTF_INFO_KIND(var->info)); 1114 return -EINVAL; 1115 } 1116 if (def->size > vi->size) { 1117 pr_warning("map '%s': invalid def size.\n", map_name); 1118 return -EINVAL; 1119 } 1120 1121 map = bpf_object__add_map(obj); 1122 if (IS_ERR(map)) 1123 return PTR_ERR(map); 1124 map->name = strdup(map_name); 1125 if (!map->name) { 1126 pr_warning("map '%s': failed to alloc map name.\n", map_name); 1127 return -ENOMEM; 1128 } 1129 map->libbpf_type = LIBBPF_MAP_UNSPEC; 1130 map->def.type = BPF_MAP_TYPE_UNSPEC; 1131 map->sec_idx = sec_idx; 1132 map->sec_offset = vi->offset; 1133 pr_debug("map '%s': at sec_idx %d, offset %zu.\n", 1134 map_name, map->sec_idx, map->sec_offset); 1135 1136 vlen = BTF_INFO_VLEN(def->info); 1137 m = (const void *)(def + 1); 1138 for (i = 0; i < vlen; i++, m++) { 1139 const char *name = btf__name_by_offset(obj->btf, m->name_off); 1140 1141 if (!name) { 1142 pr_warning("map '%s': invalid field #%d.\n", 1143 map_name, i); 1144 return -EINVAL; 1145 } 1146 if (strcmp(name, "type") == 0) { 1147 if (!get_map_field_int(map_name, obj->btf, def, m, 1148 &map->def.type)) 1149 return -EINVAL; 1150 pr_debug("map '%s': found type = %u.\n", 1151 map_name, map->def.type); 1152 } else if (strcmp(name, "max_entries") == 0) { 1153 if (!get_map_field_int(map_name, obj->btf, def, m, 1154 &map->def.max_entries)) 1155 return -EINVAL; 1156 pr_debug("map '%s': found max_entries = %u.\n", 1157 map_name, map->def.max_entries); 1158 } else if (strcmp(name, "map_flags") == 0) { 1159 if (!get_map_field_int(map_name, obj->btf, def, m, 1160 &map->def.map_flags)) 1161 return -EINVAL; 1162 pr_debug("map '%s': found map_flags = %u.\n", 1163 map_name, map->def.map_flags); 1164 } else if (strcmp(name, "key_size") == 0) { 1165 __u32 sz; 1166 1167 if (!get_map_field_int(map_name, obj->btf, def, m, 1168 &sz)) 1169 return -EINVAL; 1170 pr_debug("map '%s': found key_size = %u.\n", 1171 map_name, sz); 1172 if (map->def.key_size && map->def.key_size != sz) { 1173 pr_warning("map '%s': conflicting key size %u != %u.\n", 1174 map_name, map->def.key_size, sz); 1175 return -EINVAL; 1176 } 1177 map->def.key_size = sz; 1178 } else if (strcmp(name, "key") == 0) { 1179 __s64 sz; 1180 1181 t = btf__type_by_id(obj->btf, m->type); 1182 if (!t) { 1183 pr_warning("map '%s': key type [%d] not found.\n", 1184 map_name, m->type); 1185 return -EINVAL; 1186 } 1187 if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) { 1188 pr_warning("map '%s': key spec is not PTR: %u.\n", 1189 map_name, BTF_INFO_KIND(t->info)); 1190 return -EINVAL; 1191 } 1192 sz = btf__resolve_size(obj->btf, t->type); 1193 if (sz < 0) { 1194 pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n", 1195 map_name, t->type, sz); 1196 return sz; 1197 } 1198 pr_debug("map '%s': found key [%u], sz = %lld.\n", 1199 map_name, t->type, sz); 1200 if (map->def.key_size && map->def.key_size != sz) { 1201 pr_warning("map '%s': conflicting key size %u != %lld.\n", 1202 map_name, map->def.key_size, sz); 1203 return -EINVAL; 1204 } 1205 map->def.key_size = sz; 1206 map->btf_key_type_id = t->type; 1207 } else if (strcmp(name, "value_size") == 0) { 1208 __u32 sz; 1209 1210 if (!get_map_field_int(map_name, obj->btf, def, m, 1211 &sz)) 1212 return -EINVAL; 1213 pr_debug("map '%s': found value_size = %u.\n", 1214 map_name, sz); 1215 if (map->def.value_size && map->def.value_size != sz) { 1216 pr_warning("map '%s': conflicting value size %u != %u.\n", 1217 map_name, map->def.value_size, sz); 1218 return -EINVAL; 1219 } 1220 map->def.value_size = sz; 1221 } else if (strcmp(name, "value") == 0) { 1222 __s64 sz; 1223 1224 t = btf__type_by_id(obj->btf, m->type); 1225 if (!t) { 1226 pr_warning("map '%s': value type [%d] not found.\n", 1227 map_name, m->type); 1228 return -EINVAL; 1229 } 1230 if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) { 1231 pr_warning("map '%s': value spec is not PTR: %u.\n", 1232 map_name, BTF_INFO_KIND(t->info)); 1233 return -EINVAL; 1234 } 1235 sz = btf__resolve_size(obj->btf, t->type); 1236 if (sz < 0) { 1237 pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n", 1238 map_name, t->type, sz); 1239 return sz; 1240 } 1241 pr_debug("map '%s': found value [%u], sz = %lld.\n", 1242 map_name, t->type, sz); 1243 if (map->def.value_size && map->def.value_size != sz) { 1244 pr_warning("map '%s': conflicting value size %u != %lld.\n", 1245 map_name, map->def.value_size, sz); 1246 return -EINVAL; 1247 } 1248 map->def.value_size = sz; 1249 map->btf_value_type_id = t->type; 1250 } else { 1251 if (strict) { 1252 pr_warning("map '%s': unknown field '%s'.\n", 1253 map_name, name); 1254 return -ENOTSUP; 1255 } 1256 pr_debug("map '%s': ignoring unknown field '%s'.\n", 1257 map_name, name); 1258 } 1259 } 1260 1261 if (map->def.type == BPF_MAP_TYPE_UNSPEC) { 1262 pr_warning("map '%s': map type isn't specified.\n", map_name); 1263 return -EINVAL; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) 1270 { 1271 const struct btf_type *sec = NULL; 1272 int nr_types, i, vlen, err; 1273 const struct btf_type *t; 1274 const char *name; 1275 Elf_Data *data; 1276 Elf_Scn *scn; 1277 1278 if (obj->efile.btf_maps_shndx < 0) 1279 return 0; 1280 1281 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx); 1282 if (scn) 1283 data = elf_getdata(scn, NULL); 1284 if (!scn || !data) { 1285 pr_warning("failed to get Elf_Data from map section %d (%s)\n", 1286 obj->efile.maps_shndx, MAPS_ELF_SEC); 1287 return -EINVAL; 1288 } 1289 1290 nr_types = btf__get_nr_types(obj->btf); 1291 for (i = 1; i <= nr_types; i++) { 1292 t = btf__type_by_id(obj->btf, i); 1293 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 1294 continue; 1295 name = btf__name_by_offset(obj->btf, t->name_off); 1296 if (strcmp(name, MAPS_ELF_SEC) == 0) { 1297 sec = t; 1298 break; 1299 } 1300 } 1301 1302 if (!sec) { 1303 pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC); 1304 return -ENOENT; 1305 } 1306 1307 vlen = BTF_INFO_VLEN(sec->info); 1308 for (i = 0; i < vlen; i++) { 1309 err = bpf_object__init_user_btf_map(obj, sec, i, 1310 obj->efile.btf_maps_shndx, 1311 data, strict); 1312 if (err) 1313 return err; 1314 } 1315 1316 return 0; 1317 } 1318 1319 static int bpf_object__init_maps(struct bpf_object *obj, int flags) 1320 { 1321 bool strict = !(flags & MAPS_RELAX_COMPAT); 1322 int err; 1323 1324 err = bpf_object__init_user_maps(obj, strict); 1325 if (err) 1326 return err; 1327 1328 err = bpf_object__init_user_btf_maps(obj, strict); 1329 if (err) 1330 return err; 1331 1332 err = bpf_object__init_global_data_maps(obj); 1333 if (err) 1334 return err; 1335 1336 if (obj->nr_maps) { 1337 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), 1338 compare_bpf_map); 1339 } 1340 return 0; 1341 } 1342 1343 static bool section_have_execinstr(struct bpf_object *obj, int idx) 1344 { 1345 Elf_Scn *scn; 1346 GElf_Shdr sh; 1347 1348 scn = elf_getscn(obj->efile.elf, idx); 1349 if (!scn) 1350 return false; 1351 1352 if (gelf_getshdr(scn, &sh) != &sh) 1353 return false; 1354 1355 if (sh.sh_flags & SHF_EXECINSTR) 1356 return true; 1357 1358 return false; 1359 } 1360 1361 static void bpf_object__sanitize_btf(struct bpf_object *obj) 1362 { 1363 bool has_datasec = obj->caps.btf_datasec; 1364 bool has_func = obj->caps.btf_func; 1365 struct btf *btf = obj->btf; 1366 struct btf_type *t; 1367 int i, j, vlen; 1368 __u16 kind; 1369 1370 if (!obj->btf || (has_func && has_datasec)) 1371 return; 1372 1373 for (i = 1; i <= btf__get_nr_types(btf); i++) { 1374 t = (struct btf_type *)btf__type_by_id(btf, i); 1375 kind = BTF_INFO_KIND(t->info); 1376 1377 if (!has_datasec && kind == BTF_KIND_VAR) { 1378 /* replace VAR with INT */ 1379 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); 1380 t->size = sizeof(int); 1381 *(int *)(t+1) = BTF_INT_ENC(0, 0, 32); 1382 } else if (!has_datasec && kind == BTF_KIND_DATASEC) { 1383 /* replace DATASEC with STRUCT */ 1384 struct btf_var_secinfo *v = (void *)(t + 1); 1385 struct btf_member *m = (void *)(t + 1); 1386 struct btf_type *vt; 1387 char *name; 1388 1389 name = (char *)btf__name_by_offset(btf, t->name_off); 1390 while (*name) { 1391 if (*name == '.') 1392 *name = '_'; 1393 name++; 1394 } 1395 1396 vlen = BTF_INFO_VLEN(t->info); 1397 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); 1398 for (j = 0; j < vlen; j++, v++, m++) { 1399 /* order of field assignments is important */ 1400 m->offset = v->offset * 8; 1401 m->type = v->type; 1402 /* preserve variable name as member name */ 1403 vt = (void *)btf__type_by_id(btf, v->type); 1404 m->name_off = vt->name_off; 1405 } 1406 } else if (!has_func && kind == BTF_KIND_FUNC_PROTO) { 1407 /* replace FUNC_PROTO with ENUM */ 1408 vlen = BTF_INFO_VLEN(t->info); 1409 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); 1410 t->size = sizeof(__u32); /* kernel enforced */ 1411 } else if (!has_func && kind == BTF_KIND_FUNC) { 1412 /* replace FUNC with TYPEDEF */ 1413 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); 1414 } 1415 } 1416 } 1417 1418 static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) 1419 { 1420 if (!obj->btf_ext) 1421 return; 1422 1423 if (!obj->caps.btf_func) { 1424 btf_ext__free(obj->btf_ext); 1425 obj->btf_ext = NULL; 1426 } 1427 } 1428 1429 static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj) 1430 { 1431 return obj->efile.btf_maps_shndx >= 0; 1432 } 1433 1434 static int bpf_object__init_btf(struct bpf_object *obj, 1435 Elf_Data *btf_data, 1436 Elf_Data *btf_ext_data) 1437 { 1438 bool btf_required = bpf_object__is_btf_mandatory(obj); 1439 int err = 0; 1440 1441 if (btf_data) { 1442 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 1443 if (IS_ERR(obj->btf)) { 1444 pr_warning("Error loading ELF section %s: %d.\n", 1445 BTF_ELF_SEC, err); 1446 goto out; 1447 } 1448 err = btf__finalize_data(obj, obj->btf); 1449 if (err) { 1450 pr_warning("Error finalizing %s: %d.\n", 1451 BTF_ELF_SEC, err); 1452 goto out; 1453 } 1454 } 1455 if (btf_ext_data) { 1456 if (!obj->btf) { 1457 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 1458 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 1459 goto out; 1460 } 1461 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 1462 btf_ext_data->d_size); 1463 if (IS_ERR(obj->btf_ext)) { 1464 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 1465 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); 1466 obj->btf_ext = NULL; 1467 goto out; 1468 } 1469 } 1470 out: 1471 if (err || IS_ERR(obj->btf)) { 1472 if (btf_required) 1473 err = err ? : PTR_ERR(obj->btf); 1474 else 1475 err = 0; 1476 if (!IS_ERR_OR_NULL(obj->btf)) 1477 btf__free(obj->btf); 1478 obj->btf = NULL; 1479 } 1480 if (btf_required && !obj->btf) { 1481 pr_warning("BTF is required, but is missing or corrupted.\n"); 1482 return err == 0 ? -ENOENT : err; 1483 } 1484 return 0; 1485 } 1486 1487 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) 1488 { 1489 int err = 0; 1490 1491 if (!obj->btf) 1492 return 0; 1493 1494 bpf_object__sanitize_btf(obj); 1495 bpf_object__sanitize_btf_ext(obj); 1496 1497 err = btf__load(obj->btf); 1498 if (err) { 1499 pr_warning("Error loading %s into kernel: %d.\n", 1500 BTF_ELF_SEC, err); 1501 btf__free(obj->btf); 1502 obj->btf = NULL; 1503 if (bpf_object__is_btf_mandatory(obj)) 1504 return err; 1505 } 1506 return 0; 1507 } 1508 1509 static int bpf_object__elf_collect(struct bpf_object *obj, int flags) 1510 { 1511 Elf *elf = obj->efile.elf; 1512 GElf_Ehdr *ep = &obj->efile.ehdr; 1513 Elf_Data *btf_ext_data = NULL; 1514 Elf_Data *btf_data = NULL; 1515 Elf_Scn *scn = NULL; 1516 int idx = 0, err = 0; 1517 1518 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 1519 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 1520 pr_warning("failed to get e_shstrndx from %s\n", obj->path); 1521 return -LIBBPF_ERRNO__FORMAT; 1522 } 1523 1524 while ((scn = elf_nextscn(elf, scn)) != NULL) { 1525 char *name; 1526 GElf_Shdr sh; 1527 Elf_Data *data; 1528 1529 idx++; 1530 if (gelf_getshdr(scn, &sh) != &sh) { 1531 pr_warning("failed to get section(%d) header from %s\n", 1532 idx, obj->path); 1533 return -LIBBPF_ERRNO__FORMAT; 1534 } 1535 1536 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 1537 if (!name) { 1538 pr_warning("failed to get section(%d) name from %s\n", 1539 idx, obj->path); 1540 return -LIBBPF_ERRNO__FORMAT; 1541 } 1542 1543 data = elf_getdata(scn, 0); 1544 if (!data) { 1545 pr_warning("failed to get section(%d) data from %s(%s)\n", 1546 idx, name, obj->path); 1547 return -LIBBPF_ERRNO__FORMAT; 1548 } 1549 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 1550 idx, name, (unsigned long)data->d_size, 1551 (int)sh.sh_link, (unsigned long)sh.sh_flags, 1552 (int)sh.sh_type); 1553 1554 if (strcmp(name, "license") == 0) { 1555 err = bpf_object__init_license(obj, 1556 data->d_buf, 1557 data->d_size); 1558 if (err) 1559 return err; 1560 } else if (strcmp(name, "version") == 0) { 1561 err = bpf_object__init_kversion(obj, 1562 data->d_buf, 1563 data->d_size); 1564 if (err) 1565 return err; 1566 } else if (strcmp(name, "maps") == 0) { 1567 obj->efile.maps_shndx = idx; 1568 } else if (strcmp(name, MAPS_ELF_SEC) == 0) { 1569 obj->efile.btf_maps_shndx = idx; 1570 } else if (strcmp(name, BTF_ELF_SEC) == 0) { 1571 btf_data = data; 1572 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 1573 btf_ext_data = data; 1574 } else if (sh.sh_type == SHT_SYMTAB) { 1575 if (obj->efile.symbols) { 1576 pr_warning("bpf: multiple SYMTAB in %s\n", 1577 obj->path); 1578 return -LIBBPF_ERRNO__FORMAT; 1579 } 1580 obj->efile.symbols = data; 1581 obj->efile.strtabidx = sh.sh_link; 1582 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { 1583 if (sh.sh_flags & SHF_EXECINSTR) { 1584 if (strcmp(name, ".text") == 0) 1585 obj->efile.text_shndx = idx; 1586 err = bpf_object__add_program(obj, data->d_buf, 1587 data->d_size, name, idx); 1588 if (err) { 1589 char errmsg[STRERR_BUFSIZE]; 1590 char *cp = libbpf_strerror_r(-err, errmsg, 1591 sizeof(errmsg)); 1592 1593 pr_warning("failed to alloc program %s (%s): %s", 1594 name, obj->path, cp); 1595 return err; 1596 } 1597 } else if (strcmp(name, ".data") == 0) { 1598 obj->efile.data = data; 1599 obj->efile.data_shndx = idx; 1600 } else if (strcmp(name, ".rodata") == 0) { 1601 obj->efile.rodata = data; 1602 obj->efile.rodata_shndx = idx; 1603 } else { 1604 pr_debug("skip section(%d) %s\n", idx, name); 1605 } 1606 } else if (sh.sh_type == SHT_REL) { 1607 int nr_reloc = obj->efile.nr_reloc; 1608 void *reloc = obj->efile.reloc; 1609 int sec = sh.sh_info; /* points to other section */ 1610 1611 /* Only do relo for section with exec instructions */ 1612 if (!section_have_execinstr(obj, sec)) { 1613 pr_debug("skip relo %s(%d) for section(%d)\n", 1614 name, idx, sec); 1615 continue; 1616 } 1617 1618 reloc = reallocarray(reloc, nr_reloc + 1, 1619 sizeof(*obj->efile.reloc)); 1620 if (!reloc) { 1621 pr_warning("realloc failed\n"); 1622 return -ENOMEM; 1623 } 1624 1625 obj->efile.reloc = reloc; 1626 obj->efile.nr_reloc++; 1627 1628 obj->efile.reloc[nr_reloc].shdr = sh; 1629 obj->efile.reloc[nr_reloc].data = data; 1630 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) { 1631 obj->efile.bss = data; 1632 obj->efile.bss_shndx = idx; 1633 } else { 1634 pr_debug("skip section(%d) %s\n", idx, name); 1635 } 1636 } 1637 1638 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 1639 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 1640 return -LIBBPF_ERRNO__FORMAT; 1641 } 1642 err = bpf_object__init_btf(obj, btf_data, btf_ext_data); 1643 if (!err) 1644 err = bpf_object__init_maps(obj, flags); 1645 if (!err) 1646 err = bpf_object__sanitize_and_load_btf(obj); 1647 if (!err) 1648 err = bpf_object__init_prog_names(obj); 1649 return err; 1650 } 1651 1652 static struct bpf_program * 1653 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 1654 { 1655 struct bpf_program *prog; 1656 size_t i; 1657 1658 for (i = 0; i < obj->nr_programs; i++) { 1659 prog = &obj->programs[i]; 1660 if (prog->idx == idx) 1661 return prog; 1662 } 1663 return NULL; 1664 } 1665 1666 struct bpf_program * 1667 bpf_object__find_program_by_title(const struct bpf_object *obj, 1668 const char *title) 1669 { 1670 struct bpf_program *pos; 1671 1672 bpf_object__for_each_program(pos, obj) { 1673 if (pos->section_name && !strcmp(pos->section_name, title)) 1674 return pos; 1675 } 1676 return NULL; 1677 } 1678 1679 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, 1680 int shndx) 1681 { 1682 return shndx == obj->efile.data_shndx || 1683 shndx == obj->efile.bss_shndx || 1684 shndx == obj->efile.rodata_shndx; 1685 } 1686 1687 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, 1688 int shndx) 1689 { 1690 return shndx == obj->efile.maps_shndx || 1691 shndx == obj->efile.btf_maps_shndx; 1692 } 1693 1694 static bool bpf_object__relo_in_known_section(const struct bpf_object *obj, 1695 int shndx) 1696 { 1697 return shndx == obj->efile.text_shndx || 1698 bpf_object__shndx_is_maps(obj, shndx) || 1699 bpf_object__shndx_is_data(obj, shndx); 1700 } 1701 1702 static enum libbpf_map_type 1703 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) 1704 { 1705 if (shndx == obj->efile.data_shndx) 1706 return LIBBPF_MAP_DATA; 1707 else if (shndx == obj->efile.bss_shndx) 1708 return LIBBPF_MAP_BSS; 1709 else if (shndx == obj->efile.rodata_shndx) 1710 return LIBBPF_MAP_RODATA; 1711 else 1712 return LIBBPF_MAP_UNSPEC; 1713 } 1714 1715 static int 1716 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, 1717 Elf_Data *data, struct bpf_object *obj) 1718 { 1719 Elf_Data *symbols = obj->efile.symbols; 1720 struct bpf_map *maps = obj->maps; 1721 size_t nr_maps = obj->nr_maps; 1722 int i, nrels; 1723 1724 pr_debug("collecting relocating info for: '%s'\n", prog->section_name); 1725 nrels = shdr->sh_size / shdr->sh_entsize; 1726 1727 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 1728 if (!prog->reloc_desc) { 1729 pr_warning("failed to alloc memory in relocation\n"); 1730 return -ENOMEM; 1731 } 1732 prog->nr_reloc = nrels; 1733 1734 for (i = 0; i < nrels; i++) { 1735 struct bpf_insn *insns = prog->insns; 1736 enum libbpf_map_type type; 1737 unsigned int insn_idx; 1738 unsigned int shdr_idx; 1739 const char *name; 1740 size_t map_idx; 1741 GElf_Sym sym; 1742 GElf_Rel rel; 1743 1744 if (!gelf_getrel(data, i, &rel)) { 1745 pr_warning("relocation: failed to get %d reloc\n", i); 1746 return -LIBBPF_ERRNO__FORMAT; 1747 } 1748 1749 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { 1750 pr_warning("relocation: symbol %"PRIx64" not found\n", 1751 GELF_R_SYM(rel.r_info)); 1752 return -LIBBPF_ERRNO__FORMAT; 1753 } 1754 1755 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 1756 sym.st_name) ? : "<?>"; 1757 1758 pr_debug("relo for %lld value %lld name %d (\'%s\')\n", 1759 (long long) (rel.r_info >> 32), 1760 (long long) sym.st_value, sym.st_name, name); 1761 1762 shdr_idx = sym.st_shndx; 1763 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) { 1764 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n", 1765 prog->section_name, shdr_idx); 1766 return -LIBBPF_ERRNO__RELOC; 1767 } 1768 1769 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 1770 pr_debug("relocation: insn_idx=%u\n", insn_idx); 1771 1772 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { 1773 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { 1774 pr_warning("incorrect bpf_call opcode\n"); 1775 return -LIBBPF_ERRNO__RELOC; 1776 } 1777 prog->reloc_desc[i].type = RELO_CALL; 1778 prog->reloc_desc[i].insn_idx = insn_idx; 1779 prog->reloc_desc[i].text_off = sym.st_value; 1780 obj->has_pseudo_calls = true; 1781 continue; 1782 } 1783 1784 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 1785 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 1786 insn_idx, insns[insn_idx].code); 1787 return -LIBBPF_ERRNO__RELOC; 1788 } 1789 1790 if (bpf_object__shndx_is_maps(obj, shdr_idx) || 1791 bpf_object__shndx_is_data(obj, shdr_idx)) { 1792 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 1793 if (type != LIBBPF_MAP_UNSPEC) { 1794 if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) { 1795 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n", 1796 name, insn_idx, insns[insn_idx].code); 1797 return -LIBBPF_ERRNO__RELOC; 1798 } 1799 if (!obj->caps.global_data) { 1800 pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n", 1801 name, insn_idx); 1802 return -LIBBPF_ERRNO__RELOC; 1803 } 1804 } 1805 1806 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 1807 if (maps[map_idx].libbpf_type != type) 1808 continue; 1809 if (type != LIBBPF_MAP_UNSPEC || 1810 (maps[map_idx].sec_idx == sym.st_shndx && 1811 maps[map_idx].sec_offset == sym.st_value)) { 1812 pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n", 1813 map_idx, maps[map_idx].name, 1814 maps[map_idx].sec_idx, 1815 maps[map_idx].sec_offset, 1816 insn_idx); 1817 break; 1818 } 1819 } 1820 1821 if (map_idx >= nr_maps) { 1822 pr_warning("bpf relocation: map_idx %d larger than %d\n", 1823 (int)map_idx, (int)nr_maps - 1); 1824 return -LIBBPF_ERRNO__RELOC; 1825 } 1826 1827 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ? 1828 RELO_DATA : RELO_LD64; 1829 prog->reloc_desc[i].insn_idx = insn_idx; 1830 prog->reloc_desc[i].map_idx = map_idx; 1831 } 1832 } 1833 return 0; 1834 } 1835 1836 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) 1837 { 1838 struct bpf_map_def *def = &map->def; 1839 __u32 key_type_id = 0, value_type_id = 0; 1840 int ret; 1841 1842 /* if it's BTF-defined map, we don't need to search for type IDs */ 1843 if (map->sec_idx == obj->efile.btf_maps_shndx) 1844 return 0; 1845 1846 if (!bpf_map__is_internal(map)) { 1847 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, 1848 def->value_size, &key_type_id, 1849 &value_type_id); 1850 } else { 1851 /* 1852 * LLVM annotates global data differently in BTF, that is, 1853 * only as '.data', '.bss' or '.rodata'. 1854 */ 1855 ret = btf__find_by_name(obj->btf, 1856 libbpf_type_to_btf_name[map->libbpf_type]); 1857 } 1858 if (ret < 0) 1859 return ret; 1860 1861 map->btf_key_type_id = key_type_id; 1862 map->btf_value_type_id = bpf_map__is_internal(map) ? 1863 ret : value_type_id; 1864 return 0; 1865 } 1866 1867 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 1868 { 1869 struct bpf_map_info info = {}; 1870 __u32 len = sizeof(info); 1871 int new_fd, err; 1872 char *new_name; 1873 1874 err = bpf_obj_get_info_by_fd(fd, &info, &len); 1875 if (err) 1876 return err; 1877 1878 new_name = strdup(info.name); 1879 if (!new_name) 1880 return -errno; 1881 1882 new_fd = open("/", O_RDONLY | O_CLOEXEC); 1883 if (new_fd < 0) 1884 goto err_free_new_name; 1885 1886 new_fd = dup3(fd, new_fd, O_CLOEXEC); 1887 if (new_fd < 0) 1888 goto err_close_new_fd; 1889 1890 err = zclose(map->fd); 1891 if (err) 1892 goto err_close_new_fd; 1893 free(map->name); 1894 1895 map->fd = new_fd; 1896 map->name = new_name; 1897 map->def.type = info.type; 1898 map->def.key_size = info.key_size; 1899 map->def.value_size = info.value_size; 1900 map->def.max_entries = info.max_entries; 1901 map->def.map_flags = info.map_flags; 1902 map->btf_key_type_id = info.btf_key_type_id; 1903 map->btf_value_type_id = info.btf_value_type_id; 1904 1905 return 0; 1906 1907 err_close_new_fd: 1908 close(new_fd); 1909 err_free_new_name: 1910 free(new_name); 1911 return -errno; 1912 } 1913 1914 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 1915 { 1916 if (!map || !max_entries) 1917 return -EINVAL; 1918 1919 /* If map already created, its attributes can't be changed. */ 1920 if (map->fd >= 0) 1921 return -EBUSY; 1922 1923 map->def.max_entries = max_entries; 1924 1925 return 0; 1926 } 1927 1928 static int 1929 bpf_object__probe_name(struct bpf_object *obj) 1930 { 1931 struct bpf_load_program_attr attr; 1932 char *cp, errmsg[STRERR_BUFSIZE]; 1933 struct bpf_insn insns[] = { 1934 BPF_MOV64_IMM(BPF_REG_0, 0), 1935 BPF_EXIT_INSN(), 1936 }; 1937 int ret; 1938 1939 /* make sure basic loading works */ 1940 1941 memset(&attr, 0, sizeof(attr)); 1942 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1943 attr.insns = insns; 1944 attr.insns_cnt = ARRAY_SIZE(insns); 1945 attr.license = "GPL"; 1946 1947 ret = bpf_load_program_xattr(&attr, NULL, 0); 1948 if (ret < 0) { 1949 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1950 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", 1951 __func__, cp, errno); 1952 return -errno; 1953 } 1954 close(ret); 1955 1956 /* now try the same program, but with the name */ 1957 1958 attr.name = "test"; 1959 ret = bpf_load_program_xattr(&attr, NULL, 0); 1960 if (ret >= 0) { 1961 obj->caps.name = 1; 1962 close(ret); 1963 } 1964 1965 return 0; 1966 } 1967 1968 static int 1969 bpf_object__probe_global_data(struct bpf_object *obj) 1970 { 1971 struct bpf_load_program_attr prg_attr; 1972 struct bpf_create_map_attr map_attr; 1973 char *cp, errmsg[STRERR_BUFSIZE]; 1974 struct bpf_insn insns[] = { 1975 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), 1976 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 1977 BPF_MOV64_IMM(BPF_REG_0, 0), 1978 BPF_EXIT_INSN(), 1979 }; 1980 int ret, map; 1981 1982 memset(&map_attr, 0, sizeof(map_attr)); 1983 map_attr.map_type = BPF_MAP_TYPE_ARRAY; 1984 map_attr.key_size = sizeof(int); 1985 map_attr.value_size = 32; 1986 map_attr.max_entries = 1; 1987 1988 map = bpf_create_map_xattr(&map_attr); 1989 if (map < 0) { 1990 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1991 pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n", 1992 __func__, cp, errno); 1993 return -errno; 1994 } 1995 1996 insns[0].imm = map; 1997 1998 memset(&prg_attr, 0, sizeof(prg_attr)); 1999 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 2000 prg_attr.insns = insns; 2001 prg_attr.insns_cnt = ARRAY_SIZE(insns); 2002 prg_attr.license = "GPL"; 2003 2004 ret = bpf_load_program_xattr(&prg_attr, NULL, 0); 2005 if (ret >= 0) { 2006 obj->caps.global_data = 1; 2007 close(ret); 2008 } 2009 2010 close(map); 2011 return 0; 2012 } 2013 2014 static int bpf_object__probe_btf_func(struct bpf_object *obj) 2015 { 2016 const char strs[] = "\0int\0x\0a"; 2017 /* void x(int a) {} */ 2018 __u32 types[] = { 2019 /* int */ 2020 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 2021 /* FUNC_PROTO */ /* [2] */ 2022 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 2023 BTF_PARAM_ENC(7, 1), 2024 /* FUNC x */ /* [3] */ 2025 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), 2026 }; 2027 int btf_fd; 2028 2029 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), 2030 strs, sizeof(strs)); 2031 if (btf_fd >= 0) { 2032 obj->caps.btf_func = 1; 2033 close(btf_fd); 2034 return 1; 2035 } 2036 2037 return 0; 2038 } 2039 2040 static int bpf_object__probe_btf_datasec(struct bpf_object *obj) 2041 { 2042 const char strs[] = "\0x\0.data"; 2043 /* static int a; */ 2044 __u32 types[] = { 2045 /* int */ 2046 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 2047 /* VAR x */ /* [2] */ 2048 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 2049 BTF_VAR_STATIC, 2050 /* DATASEC val */ /* [3] */ 2051 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), 2052 BTF_VAR_SECINFO_ENC(2, 0, 4), 2053 }; 2054 int btf_fd; 2055 2056 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), 2057 strs, sizeof(strs)); 2058 if (btf_fd >= 0) { 2059 obj->caps.btf_datasec = 1; 2060 close(btf_fd); 2061 return 1; 2062 } 2063 2064 return 0; 2065 } 2066 2067 static int 2068 bpf_object__probe_caps(struct bpf_object *obj) 2069 { 2070 int (*probe_fn[])(struct bpf_object *obj) = { 2071 bpf_object__probe_name, 2072 bpf_object__probe_global_data, 2073 bpf_object__probe_btf_func, 2074 bpf_object__probe_btf_datasec, 2075 }; 2076 int i, ret; 2077 2078 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) { 2079 ret = probe_fn[i](obj); 2080 if (ret < 0) 2081 pr_debug("Probe #%d failed with %d.\n", i, ret); 2082 } 2083 2084 return 0; 2085 } 2086 2087 static int 2088 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) 2089 { 2090 char *cp, errmsg[STRERR_BUFSIZE]; 2091 int err, zero = 0; 2092 __u8 *data; 2093 2094 /* Nothing to do here since kernel already zero-initializes .bss map. */ 2095 if (map->libbpf_type == LIBBPF_MAP_BSS) 2096 return 0; 2097 2098 data = map->libbpf_type == LIBBPF_MAP_DATA ? 2099 obj->sections.data : obj->sections.rodata; 2100 2101 err = bpf_map_update_elem(map->fd, &zero, data, 0); 2102 /* Freeze .rodata map as read-only from syscall side. */ 2103 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) { 2104 err = bpf_map_freeze(map->fd); 2105 if (err) { 2106 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2107 pr_warning("Error freezing map(%s) as read-only: %s\n", 2108 map->name, cp); 2109 err = 0; 2110 } 2111 } 2112 return err; 2113 } 2114 2115 static int 2116 bpf_object__create_maps(struct bpf_object *obj) 2117 { 2118 struct bpf_create_map_attr create_attr = {}; 2119 int nr_cpus = 0; 2120 unsigned int i; 2121 int err; 2122 2123 for (i = 0; i < obj->nr_maps; i++) { 2124 struct bpf_map *map = &obj->maps[i]; 2125 struct bpf_map_def *def = &map->def; 2126 char *cp, errmsg[STRERR_BUFSIZE]; 2127 int *pfd = &map->fd; 2128 2129 if (map->fd >= 0) { 2130 pr_debug("skip map create (preset) %s: fd=%d\n", 2131 map->name, map->fd); 2132 continue; 2133 } 2134 2135 if (obj->caps.name) 2136 create_attr.name = map->name; 2137 create_attr.map_ifindex = map->map_ifindex; 2138 create_attr.map_type = def->type; 2139 create_attr.map_flags = def->map_flags; 2140 create_attr.key_size = def->key_size; 2141 create_attr.value_size = def->value_size; 2142 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && 2143 !def->max_entries) { 2144 if (!nr_cpus) 2145 nr_cpus = libbpf_num_possible_cpus(); 2146 if (nr_cpus < 0) { 2147 pr_warning("failed to determine number of system CPUs: %d\n", 2148 nr_cpus); 2149 err = nr_cpus; 2150 goto err_out; 2151 } 2152 pr_debug("map '%s': setting size to %d\n", 2153 map->name, nr_cpus); 2154 create_attr.max_entries = nr_cpus; 2155 } else { 2156 create_attr.max_entries = def->max_entries; 2157 } 2158 create_attr.btf_fd = 0; 2159 create_attr.btf_key_type_id = 0; 2160 create_attr.btf_value_type_id = 0; 2161 if (bpf_map_type__is_map_in_map(def->type) && 2162 map->inner_map_fd >= 0) 2163 create_attr.inner_map_fd = map->inner_map_fd; 2164 2165 if (obj->btf && !bpf_map_find_btf_info(obj, map)) { 2166 create_attr.btf_fd = btf__fd(obj->btf); 2167 create_attr.btf_key_type_id = map->btf_key_type_id; 2168 create_attr.btf_value_type_id = map->btf_value_type_id; 2169 } 2170 2171 *pfd = bpf_create_map_xattr(&create_attr); 2172 if (*pfd < 0 && (create_attr.btf_key_type_id || 2173 create_attr.btf_value_type_id)) { 2174 err = -errno; 2175 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 2176 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 2177 map->name, cp, err); 2178 create_attr.btf_fd = 0; 2179 create_attr.btf_key_type_id = 0; 2180 create_attr.btf_value_type_id = 0; 2181 map->btf_key_type_id = 0; 2182 map->btf_value_type_id = 0; 2183 *pfd = bpf_create_map_xattr(&create_attr); 2184 } 2185 2186 if (*pfd < 0) { 2187 size_t j; 2188 2189 err = -errno; 2190 err_out: 2191 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 2192 pr_warning("failed to create map (name: '%s'): %s(%d)\n", 2193 map->name, cp, err); 2194 for (j = 0; j < i; j++) 2195 zclose(obj->maps[j].fd); 2196 return err; 2197 } 2198 2199 if (bpf_map__is_internal(map)) { 2200 err = bpf_object__populate_internal_map(obj, map); 2201 if (err < 0) { 2202 zclose(*pfd); 2203 goto err_out; 2204 } 2205 } 2206 2207 pr_debug("created map %s: fd=%d\n", map->name, *pfd); 2208 } 2209 2210 return 0; 2211 } 2212 2213 static int 2214 check_btf_ext_reloc_err(struct bpf_program *prog, int err, 2215 void *btf_prog_info, const char *info_name) 2216 { 2217 if (err != -ENOENT) { 2218 pr_warning("Error in loading %s for sec %s.\n", 2219 info_name, prog->section_name); 2220 return err; 2221 } 2222 2223 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ 2224 2225 if (btf_prog_info) { 2226 /* 2227 * Some info has already been found but has problem 2228 * in the last btf_ext reloc. Must have to error out. 2229 */ 2230 pr_warning("Error in relocating %s for sec %s.\n", 2231 info_name, prog->section_name); 2232 return err; 2233 } 2234 2235 /* Have problem loading the very first info. Ignore the rest. */ 2236 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", 2237 info_name, prog->section_name, info_name); 2238 return 0; 2239 } 2240 2241 static int 2242 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, 2243 const char *section_name, __u32 insn_offset) 2244 { 2245 int err; 2246 2247 if (!insn_offset || prog->func_info) { 2248 /* 2249 * !insn_offset => main program 2250 * 2251 * For sub prog, the main program's func_info has to 2252 * be loaded first (i.e. prog->func_info != NULL) 2253 */ 2254 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, 2255 section_name, insn_offset, 2256 &prog->func_info, 2257 &prog->func_info_cnt); 2258 if (err) 2259 return check_btf_ext_reloc_err(prog, err, 2260 prog->func_info, 2261 "bpf_func_info"); 2262 2263 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); 2264 } 2265 2266 if (!insn_offset || prog->line_info) { 2267 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, 2268 section_name, insn_offset, 2269 &prog->line_info, 2270 &prog->line_info_cnt); 2271 if (err) 2272 return check_btf_ext_reloc_err(prog, err, 2273 prog->line_info, 2274 "bpf_line_info"); 2275 2276 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 2277 } 2278 2279 if (!insn_offset) 2280 prog->btf_fd = btf__fd(obj->btf); 2281 2282 return 0; 2283 } 2284 2285 static int 2286 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, 2287 struct reloc_desc *relo) 2288 { 2289 struct bpf_insn *insn, *new_insn; 2290 struct bpf_program *text; 2291 size_t new_cnt; 2292 int err; 2293 2294 if (relo->type != RELO_CALL) 2295 return -LIBBPF_ERRNO__RELOC; 2296 2297 if (prog->idx == obj->efile.text_shndx) { 2298 pr_warning("relo in .text insn %d into off %d\n", 2299 relo->insn_idx, relo->text_off); 2300 return -LIBBPF_ERRNO__RELOC; 2301 } 2302 2303 if (prog->main_prog_cnt == 0) { 2304 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); 2305 if (!text) { 2306 pr_warning("no .text section found yet relo into text exist\n"); 2307 return -LIBBPF_ERRNO__RELOC; 2308 } 2309 new_cnt = prog->insns_cnt + text->insns_cnt; 2310 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); 2311 if (!new_insn) { 2312 pr_warning("oom in prog realloc\n"); 2313 return -ENOMEM; 2314 } 2315 2316 if (obj->btf_ext) { 2317 err = bpf_program_reloc_btf_ext(prog, obj, 2318 text->section_name, 2319 prog->insns_cnt); 2320 if (err) 2321 return err; 2322 } 2323 2324 memcpy(new_insn + prog->insns_cnt, text->insns, 2325 text->insns_cnt * sizeof(*insn)); 2326 prog->insns = new_insn; 2327 prog->main_prog_cnt = prog->insns_cnt; 2328 prog->insns_cnt = new_cnt; 2329 pr_debug("added %zd insn from %s to prog %s\n", 2330 text->insns_cnt, text->section_name, 2331 prog->section_name); 2332 } 2333 insn = &prog->insns[relo->insn_idx]; 2334 insn->imm += prog->main_prog_cnt - relo->insn_idx; 2335 return 0; 2336 } 2337 2338 static int 2339 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 2340 { 2341 int i, err; 2342 2343 if (!prog) 2344 return 0; 2345 2346 if (obj->btf_ext) { 2347 err = bpf_program_reloc_btf_ext(prog, obj, 2348 prog->section_name, 0); 2349 if (err) 2350 return err; 2351 } 2352 2353 if (!prog->reloc_desc) 2354 return 0; 2355 2356 for (i = 0; i < prog->nr_reloc; i++) { 2357 if (prog->reloc_desc[i].type == RELO_LD64 || 2358 prog->reloc_desc[i].type == RELO_DATA) { 2359 bool relo_data = prog->reloc_desc[i].type == RELO_DATA; 2360 struct bpf_insn *insns = prog->insns; 2361 int insn_idx, map_idx; 2362 2363 insn_idx = prog->reloc_desc[i].insn_idx; 2364 map_idx = prog->reloc_desc[i].map_idx; 2365 2366 if (insn_idx + 1 >= (int)prog->insns_cnt) { 2367 pr_warning("relocation out of range: '%s'\n", 2368 prog->section_name); 2369 return -LIBBPF_ERRNO__RELOC; 2370 } 2371 2372 if (!relo_data) { 2373 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 2374 } else { 2375 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; 2376 insns[insn_idx + 1].imm = insns[insn_idx].imm; 2377 } 2378 insns[insn_idx].imm = obj->maps[map_idx].fd; 2379 } else if (prog->reloc_desc[i].type == RELO_CALL) { 2380 err = bpf_program__reloc_text(prog, obj, 2381 &prog->reloc_desc[i]); 2382 if (err) 2383 return err; 2384 } 2385 } 2386 2387 zfree(&prog->reloc_desc); 2388 prog->nr_reloc = 0; 2389 return 0; 2390 } 2391 2392 2393 static int 2394 bpf_object__relocate(struct bpf_object *obj) 2395 { 2396 struct bpf_program *prog; 2397 size_t i; 2398 int err; 2399 2400 for (i = 0; i < obj->nr_programs; i++) { 2401 prog = &obj->programs[i]; 2402 2403 err = bpf_program__relocate(prog, obj); 2404 if (err) { 2405 pr_warning("failed to relocate '%s'\n", 2406 prog->section_name); 2407 return err; 2408 } 2409 } 2410 return 0; 2411 } 2412 2413 static int bpf_object__collect_reloc(struct bpf_object *obj) 2414 { 2415 int i, err; 2416 2417 if (!obj_elf_valid(obj)) { 2418 pr_warning("Internal error: elf object is closed\n"); 2419 return -LIBBPF_ERRNO__INTERNAL; 2420 } 2421 2422 for (i = 0; i < obj->efile.nr_reloc; i++) { 2423 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 2424 Elf_Data *data = obj->efile.reloc[i].data; 2425 int idx = shdr->sh_info; 2426 struct bpf_program *prog; 2427 2428 if (shdr->sh_type != SHT_REL) { 2429 pr_warning("internal error at %d\n", __LINE__); 2430 return -LIBBPF_ERRNO__INTERNAL; 2431 } 2432 2433 prog = bpf_object__find_prog_by_idx(obj, idx); 2434 if (!prog) { 2435 pr_warning("relocation failed: no section(%d)\n", idx); 2436 return -LIBBPF_ERRNO__RELOC; 2437 } 2438 2439 err = bpf_program__collect_reloc(prog, shdr, data, obj); 2440 if (err) 2441 return err; 2442 } 2443 return 0; 2444 } 2445 2446 static int 2447 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, 2448 char *license, __u32 kern_version, int *pfd) 2449 { 2450 struct bpf_load_program_attr load_attr; 2451 char *cp, errmsg[STRERR_BUFSIZE]; 2452 int log_buf_size = BPF_LOG_BUF_SIZE; 2453 char *log_buf; 2454 int ret; 2455 2456 if (!insns || !insns_cnt) 2457 return -EINVAL; 2458 2459 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 2460 load_attr.prog_type = prog->type; 2461 load_attr.expected_attach_type = prog->expected_attach_type; 2462 if (prog->caps->name) 2463 load_attr.name = prog->name; 2464 load_attr.insns = insns; 2465 load_attr.insns_cnt = insns_cnt; 2466 load_attr.license = license; 2467 load_attr.kern_version = kern_version; 2468 load_attr.prog_ifindex = prog->prog_ifindex; 2469 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 2470 load_attr.func_info = prog->func_info; 2471 load_attr.func_info_rec_size = prog->func_info_rec_size; 2472 load_attr.func_info_cnt = prog->func_info_cnt; 2473 load_attr.line_info = prog->line_info; 2474 load_attr.line_info_rec_size = prog->line_info_rec_size; 2475 load_attr.line_info_cnt = prog->line_info_cnt; 2476 load_attr.log_level = prog->log_level; 2477 load_attr.prog_flags = prog->prog_flags; 2478 2479 retry_load: 2480 log_buf = malloc(log_buf_size); 2481 if (!log_buf) 2482 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 2483 2484 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size); 2485 2486 if (ret >= 0) { 2487 if (load_attr.log_level) 2488 pr_debug("verifier log:\n%s", log_buf); 2489 *pfd = ret; 2490 ret = 0; 2491 goto out; 2492 } 2493 2494 if (errno == ENOSPC) { 2495 log_buf_size <<= 1; 2496 free(log_buf); 2497 goto retry_load; 2498 } 2499 ret = -LIBBPF_ERRNO__LOAD; 2500 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2501 pr_warning("load bpf program failed: %s\n", cp); 2502 2503 if (log_buf && log_buf[0] != '\0') { 2504 ret = -LIBBPF_ERRNO__VERIFY; 2505 pr_warning("-- BEGIN DUMP LOG ---\n"); 2506 pr_warning("\n%s\n", log_buf); 2507 pr_warning("-- END LOG --\n"); 2508 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { 2509 pr_warning("Program too large (%zu insns), at most %d insns\n", 2510 load_attr.insns_cnt, BPF_MAXINSNS); 2511 ret = -LIBBPF_ERRNO__PROG2BIG; 2512 } else { 2513 /* Wrong program type? */ 2514 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 2515 int fd; 2516 2517 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 2518 load_attr.expected_attach_type = 0; 2519 fd = bpf_load_program_xattr(&load_attr, NULL, 0); 2520 if (fd >= 0) { 2521 close(fd); 2522 ret = -LIBBPF_ERRNO__PROGTYPE; 2523 goto out; 2524 } 2525 } 2526 2527 if (log_buf) 2528 ret = -LIBBPF_ERRNO__KVER; 2529 } 2530 2531 out: 2532 free(log_buf); 2533 return ret; 2534 } 2535 2536 int 2537 bpf_program__load(struct bpf_program *prog, 2538 char *license, __u32 kern_version) 2539 { 2540 int err = 0, fd, i; 2541 2542 if (prog->instances.nr < 0 || !prog->instances.fds) { 2543 if (prog->preprocessor) { 2544 pr_warning("Internal error: can't load program '%s'\n", 2545 prog->section_name); 2546 return -LIBBPF_ERRNO__INTERNAL; 2547 } 2548 2549 prog->instances.fds = malloc(sizeof(int)); 2550 if (!prog->instances.fds) { 2551 pr_warning("Not enough memory for BPF fds\n"); 2552 return -ENOMEM; 2553 } 2554 prog->instances.nr = 1; 2555 prog->instances.fds[0] = -1; 2556 } 2557 2558 if (!prog->preprocessor) { 2559 if (prog->instances.nr != 1) { 2560 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 2561 prog->section_name, prog->instances.nr); 2562 } 2563 err = load_program(prog, prog->insns, prog->insns_cnt, 2564 license, kern_version, &fd); 2565 if (!err) 2566 prog->instances.fds[0] = fd; 2567 goto out; 2568 } 2569 2570 for (i = 0; i < prog->instances.nr; i++) { 2571 struct bpf_prog_prep_result result; 2572 bpf_program_prep_t preprocessor = prog->preprocessor; 2573 2574 memset(&result, 0, sizeof(result)); 2575 err = preprocessor(prog, i, prog->insns, 2576 prog->insns_cnt, &result); 2577 if (err) { 2578 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 2579 i, prog->section_name); 2580 goto out; 2581 } 2582 2583 if (!result.new_insn_ptr || !result.new_insn_cnt) { 2584 pr_debug("Skip loading the %dth instance of program '%s'\n", 2585 i, prog->section_name); 2586 prog->instances.fds[i] = -1; 2587 if (result.pfd) 2588 *result.pfd = -1; 2589 continue; 2590 } 2591 2592 err = load_program(prog, result.new_insn_ptr, 2593 result.new_insn_cnt, 2594 license, kern_version, &fd); 2595 2596 if (err) { 2597 pr_warning("Loading the %dth instance of program '%s' failed\n", 2598 i, prog->section_name); 2599 goto out; 2600 } 2601 2602 if (result.pfd) 2603 *result.pfd = fd; 2604 prog->instances.fds[i] = fd; 2605 } 2606 out: 2607 if (err) 2608 pr_warning("failed to load program '%s'\n", 2609 prog->section_name); 2610 zfree(&prog->insns); 2611 prog->insns_cnt = 0; 2612 return err; 2613 } 2614 2615 static bool bpf_program__is_function_storage(const struct bpf_program *prog, 2616 const struct bpf_object *obj) 2617 { 2618 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; 2619 } 2620 2621 static int 2622 bpf_object__load_progs(struct bpf_object *obj, int log_level) 2623 { 2624 size_t i; 2625 int err; 2626 2627 for (i = 0; i < obj->nr_programs; i++) { 2628 if (bpf_program__is_function_storage(&obj->programs[i], obj)) 2629 continue; 2630 obj->programs[i].log_level |= log_level; 2631 err = bpf_program__load(&obj->programs[i], 2632 obj->license, 2633 obj->kern_version); 2634 if (err) 2635 return err; 2636 } 2637 return 0; 2638 } 2639 2640 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) 2641 { 2642 switch (type) { 2643 case BPF_PROG_TYPE_SOCKET_FILTER: 2644 case BPF_PROG_TYPE_SCHED_CLS: 2645 case BPF_PROG_TYPE_SCHED_ACT: 2646 case BPF_PROG_TYPE_XDP: 2647 case BPF_PROG_TYPE_CGROUP_SKB: 2648 case BPF_PROG_TYPE_CGROUP_SOCK: 2649 case BPF_PROG_TYPE_LWT_IN: 2650 case BPF_PROG_TYPE_LWT_OUT: 2651 case BPF_PROG_TYPE_LWT_XMIT: 2652 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2653 case BPF_PROG_TYPE_SOCK_OPS: 2654 case BPF_PROG_TYPE_SK_SKB: 2655 case BPF_PROG_TYPE_CGROUP_DEVICE: 2656 case BPF_PROG_TYPE_SK_MSG: 2657 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2658 case BPF_PROG_TYPE_LIRC_MODE2: 2659 case BPF_PROG_TYPE_SK_REUSEPORT: 2660 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2661 case BPF_PROG_TYPE_UNSPEC: 2662 case BPF_PROG_TYPE_TRACEPOINT: 2663 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2664 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2665 case BPF_PROG_TYPE_PERF_EVENT: 2666 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2667 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2668 return false; 2669 case BPF_PROG_TYPE_KPROBE: 2670 default: 2671 return true; 2672 } 2673 } 2674 2675 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) 2676 { 2677 if (needs_kver && obj->kern_version == 0) { 2678 pr_warning("%s doesn't provide kernel version\n", 2679 obj->path); 2680 return -LIBBPF_ERRNO__KVERSION; 2681 } 2682 return 0; 2683 } 2684 2685 static struct bpf_object * 2686 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, 2687 bool needs_kver, int flags) 2688 { 2689 struct bpf_object *obj; 2690 int err; 2691 2692 if (elf_version(EV_CURRENT) == EV_NONE) { 2693 pr_warning("failed to init libelf for %s\n", path); 2694 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 2695 } 2696 2697 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 2698 if (IS_ERR(obj)) 2699 return obj; 2700 2701 CHECK_ERR(bpf_object__elf_init(obj), err, out); 2702 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 2703 CHECK_ERR(bpf_object__probe_caps(obj), err, out); 2704 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); 2705 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 2706 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 2707 2708 bpf_object__elf_finish(obj); 2709 return obj; 2710 out: 2711 bpf_object__close(obj); 2712 return ERR_PTR(err); 2713 } 2714 2715 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, 2716 int flags) 2717 { 2718 /* param validation */ 2719 if (!attr->file) 2720 return NULL; 2721 2722 pr_debug("loading %s\n", attr->file); 2723 2724 return __bpf_object__open(attr->file, NULL, 0, 2725 bpf_prog_type__needs_kver(attr->prog_type), 2726 flags); 2727 } 2728 2729 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 2730 { 2731 return __bpf_object__open_xattr(attr, 0); 2732 } 2733 2734 struct bpf_object *bpf_object__open(const char *path) 2735 { 2736 struct bpf_object_open_attr attr = { 2737 .file = path, 2738 .prog_type = BPF_PROG_TYPE_UNSPEC, 2739 }; 2740 2741 return bpf_object__open_xattr(&attr); 2742 } 2743 2744 struct bpf_object *bpf_object__open_buffer(void *obj_buf, 2745 size_t obj_buf_sz, 2746 const char *name) 2747 { 2748 char tmp_name[64]; 2749 2750 /* param validation */ 2751 if (!obj_buf || obj_buf_sz <= 0) 2752 return NULL; 2753 2754 if (!name) { 2755 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 2756 (unsigned long)obj_buf, 2757 (unsigned long)obj_buf_sz); 2758 name = tmp_name; 2759 } 2760 pr_debug("loading object '%s' from buffer\n", name); 2761 2762 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); 2763 } 2764 2765 int bpf_object__unload(struct bpf_object *obj) 2766 { 2767 size_t i; 2768 2769 if (!obj) 2770 return -EINVAL; 2771 2772 for (i = 0; i < obj->nr_maps; i++) 2773 zclose(obj->maps[i].fd); 2774 2775 for (i = 0; i < obj->nr_programs; i++) 2776 bpf_program__unload(&obj->programs[i]); 2777 2778 return 0; 2779 } 2780 2781 int bpf_object__load_xattr(struct bpf_object_load_attr *attr) 2782 { 2783 struct bpf_object *obj; 2784 int err; 2785 2786 if (!attr) 2787 return -EINVAL; 2788 obj = attr->obj; 2789 if (!obj) 2790 return -EINVAL; 2791 2792 if (obj->loaded) { 2793 pr_warning("object should not be loaded twice\n"); 2794 return -EINVAL; 2795 } 2796 2797 obj->loaded = true; 2798 2799 CHECK_ERR(bpf_object__create_maps(obj), err, out); 2800 CHECK_ERR(bpf_object__relocate(obj), err, out); 2801 CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out); 2802 2803 return 0; 2804 out: 2805 bpf_object__unload(obj); 2806 pr_warning("failed to load object '%s'\n", obj->path); 2807 return err; 2808 } 2809 2810 int bpf_object__load(struct bpf_object *obj) 2811 { 2812 struct bpf_object_load_attr attr = { 2813 .obj = obj, 2814 }; 2815 2816 return bpf_object__load_xattr(&attr); 2817 } 2818 2819 static int check_path(const char *path) 2820 { 2821 char *cp, errmsg[STRERR_BUFSIZE]; 2822 struct statfs st_fs; 2823 char *dname, *dir; 2824 int err = 0; 2825 2826 if (path == NULL) 2827 return -EINVAL; 2828 2829 dname = strdup(path); 2830 if (dname == NULL) 2831 return -ENOMEM; 2832 2833 dir = dirname(dname); 2834 if (statfs(dir, &st_fs)) { 2835 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2836 pr_warning("failed to statfs %s: %s\n", dir, cp); 2837 err = -errno; 2838 } 2839 free(dname); 2840 2841 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 2842 pr_warning("specified path %s is not on BPF FS\n", path); 2843 err = -EINVAL; 2844 } 2845 2846 return err; 2847 } 2848 2849 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 2850 int instance) 2851 { 2852 char *cp, errmsg[STRERR_BUFSIZE]; 2853 int err; 2854 2855 err = check_path(path); 2856 if (err) 2857 return err; 2858 2859 if (prog == NULL) { 2860 pr_warning("invalid program pointer\n"); 2861 return -EINVAL; 2862 } 2863 2864 if (instance < 0 || instance >= prog->instances.nr) { 2865 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 2866 instance, prog->section_name, prog->instances.nr); 2867 return -EINVAL; 2868 } 2869 2870 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 2871 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2872 pr_warning("failed to pin program: %s\n", cp); 2873 return -errno; 2874 } 2875 pr_debug("pinned program '%s'\n", path); 2876 2877 return 0; 2878 } 2879 2880 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, 2881 int instance) 2882 { 2883 int err; 2884 2885 err = check_path(path); 2886 if (err) 2887 return err; 2888 2889 if (prog == NULL) { 2890 pr_warning("invalid program pointer\n"); 2891 return -EINVAL; 2892 } 2893 2894 if (instance < 0 || instance >= prog->instances.nr) { 2895 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 2896 instance, prog->section_name, prog->instances.nr); 2897 return -EINVAL; 2898 } 2899 2900 err = unlink(path); 2901 if (err != 0) 2902 return -errno; 2903 pr_debug("unpinned program '%s'\n", path); 2904 2905 return 0; 2906 } 2907 2908 static int make_dir(const char *path) 2909 { 2910 char *cp, errmsg[STRERR_BUFSIZE]; 2911 int err = 0; 2912 2913 if (mkdir(path, 0700) && errno != EEXIST) 2914 err = -errno; 2915 2916 if (err) { 2917 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 2918 pr_warning("failed to mkdir %s: %s\n", path, cp); 2919 } 2920 return err; 2921 } 2922 2923 int bpf_program__pin(struct bpf_program *prog, const char *path) 2924 { 2925 int i, err; 2926 2927 err = check_path(path); 2928 if (err) 2929 return err; 2930 2931 if (prog == NULL) { 2932 pr_warning("invalid program pointer\n"); 2933 return -EINVAL; 2934 } 2935 2936 if (prog->instances.nr <= 0) { 2937 pr_warning("no instances of prog %s to pin\n", 2938 prog->section_name); 2939 return -EINVAL; 2940 } 2941 2942 if (prog->instances.nr == 1) { 2943 /* don't create subdirs when pinning single instance */ 2944 return bpf_program__pin_instance(prog, path, 0); 2945 } 2946 2947 err = make_dir(path); 2948 if (err) 2949 return err; 2950 2951 for (i = 0; i < prog->instances.nr; i++) { 2952 char buf[PATH_MAX]; 2953 int len; 2954 2955 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2956 if (len < 0) { 2957 err = -EINVAL; 2958 goto err_unpin; 2959 } else if (len >= PATH_MAX) { 2960 err = -ENAMETOOLONG; 2961 goto err_unpin; 2962 } 2963 2964 err = bpf_program__pin_instance(prog, buf, i); 2965 if (err) 2966 goto err_unpin; 2967 } 2968 2969 return 0; 2970 2971 err_unpin: 2972 for (i = i - 1; i >= 0; i--) { 2973 char buf[PATH_MAX]; 2974 int len; 2975 2976 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2977 if (len < 0) 2978 continue; 2979 else if (len >= PATH_MAX) 2980 continue; 2981 2982 bpf_program__unpin_instance(prog, buf, i); 2983 } 2984 2985 rmdir(path); 2986 2987 return err; 2988 } 2989 2990 int bpf_program__unpin(struct bpf_program *prog, const char *path) 2991 { 2992 int i, err; 2993 2994 err = check_path(path); 2995 if (err) 2996 return err; 2997 2998 if (prog == NULL) { 2999 pr_warning("invalid program pointer\n"); 3000 return -EINVAL; 3001 } 3002 3003 if (prog->instances.nr <= 0) { 3004 pr_warning("no instances of prog %s to pin\n", 3005 prog->section_name); 3006 return -EINVAL; 3007 } 3008 3009 if (prog->instances.nr == 1) { 3010 /* don't create subdirs when pinning single instance */ 3011 return bpf_program__unpin_instance(prog, path, 0); 3012 } 3013 3014 for (i = 0; i < prog->instances.nr; i++) { 3015 char buf[PATH_MAX]; 3016 int len; 3017 3018 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 3019 if (len < 0) 3020 return -EINVAL; 3021 else if (len >= PATH_MAX) 3022 return -ENAMETOOLONG; 3023 3024 err = bpf_program__unpin_instance(prog, buf, i); 3025 if (err) 3026 return err; 3027 } 3028 3029 err = rmdir(path); 3030 if (err) 3031 return -errno; 3032 3033 return 0; 3034 } 3035 3036 int bpf_map__pin(struct bpf_map *map, const char *path) 3037 { 3038 char *cp, errmsg[STRERR_BUFSIZE]; 3039 int err; 3040 3041 err = check_path(path); 3042 if (err) 3043 return err; 3044 3045 if (map == NULL) { 3046 pr_warning("invalid map pointer\n"); 3047 return -EINVAL; 3048 } 3049 3050 if (bpf_obj_pin(map->fd, path)) { 3051 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 3052 pr_warning("failed to pin map: %s\n", cp); 3053 return -errno; 3054 } 3055 3056 pr_debug("pinned map '%s'\n", path); 3057 3058 return 0; 3059 } 3060 3061 int bpf_map__unpin(struct bpf_map *map, const char *path) 3062 { 3063 int err; 3064 3065 err = check_path(path); 3066 if (err) 3067 return err; 3068 3069 if (map == NULL) { 3070 pr_warning("invalid map pointer\n"); 3071 return -EINVAL; 3072 } 3073 3074 err = unlink(path); 3075 if (err != 0) 3076 return -errno; 3077 pr_debug("unpinned map '%s'\n", path); 3078 3079 return 0; 3080 } 3081 3082 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 3083 { 3084 struct bpf_map *map; 3085 int err; 3086 3087 if (!obj) 3088 return -ENOENT; 3089 3090 if (!obj->loaded) { 3091 pr_warning("object not yet loaded; load it first\n"); 3092 return -ENOENT; 3093 } 3094 3095 err = make_dir(path); 3096 if (err) 3097 return err; 3098 3099 bpf_object__for_each_map(map, obj) { 3100 char buf[PATH_MAX]; 3101 int len; 3102 3103 len = snprintf(buf, PATH_MAX, "%s/%s", path, 3104 bpf_map__name(map)); 3105 if (len < 0) { 3106 err = -EINVAL; 3107 goto err_unpin_maps; 3108 } else if (len >= PATH_MAX) { 3109 err = -ENAMETOOLONG; 3110 goto err_unpin_maps; 3111 } 3112 3113 err = bpf_map__pin(map, buf); 3114 if (err) 3115 goto err_unpin_maps; 3116 } 3117 3118 return 0; 3119 3120 err_unpin_maps: 3121 while ((map = bpf_map__prev(map, obj))) { 3122 char buf[PATH_MAX]; 3123 int len; 3124 3125 len = snprintf(buf, PATH_MAX, "%s/%s", path, 3126 bpf_map__name(map)); 3127 if (len < 0) 3128 continue; 3129 else if (len >= PATH_MAX) 3130 continue; 3131 3132 bpf_map__unpin(map, buf); 3133 } 3134 3135 return err; 3136 } 3137 3138 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 3139 { 3140 struct bpf_map *map; 3141 int err; 3142 3143 if (!obj) 3144 return -ENOENT; 3145 3146 bpf_object__for_each_map(map, obj) { 3147 char buf[PATH_MAX]; 3148 int len; 3149 3150 len = snprintf(buf, PATH_MAX, "%s/%s", path, 3151 bpf_map__name(map)); 3152 if (len < 0) 3153 return -EINVAL; 3154 else if (len >= PATH_MAX) 3155 return -ENAMETOOLONG; 3156 3157 err = bpf_map__unpin(map, buf); 3158 if (err) 3159 return err; 3160 } 3161 3162 return 0; 3163 } 3164 3165 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 3166 { 3167 struct bpf_program *prog; 3168 int err; 3169 3170 if (!obj) 3171 return -ENOENT; 3172 3173 if (!obj->loaded) { 3174 pr_warning("object not yet loaded; load it first\n"); 3175 return -ENOENT; 3176 } 3177 3178 err = make_dir(path); 3179 if (err) 3180 return err; 3181 3182 bpf_object__for_each_program(prog, obj) { 3183 char buf[PATH_MAX]; 3184 int len; 3185 3186 len = snprintf(buf, PATH_MAX, "%s/%s", path, 3187 prog->pin_name); 3188 if (len < 0) { 3189 err = -EINVAL; 3190 goto err_unpin_programs; 3191 } else if (len >= PATH_MAX) { 3192 err = -ENAMETOOLONG; 3193 goto err_unpin_programs; 3194 } 3195 3196 err = bpf_program__pin(prog, buf); 3197 if (err) 3198 goto err_unpin_programs; 3199 } 3200 3201 return 0; 3202 3203 err_unpin_programs: 3204 while ((prog = bpf_program__prev(prog, obj))) { 3205 char buf[PATH_MAX]; 3206 int len; 3207 3208 len = snprintf(buf, PATH_MAX, "%s/%s", path, 3209 prog->pin_name); 3210 if (len < 0) 3211 continue; 3212 else if (len >= PATH_MAX) 3213 continue; 3214 3215 bpf_program__unpin(prog, buf); 3216 } 3217 3218 return err; 3219 } 3220 3221 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 3222 { 3223 struct bpf_program *prog; 3224 int err; 3225 3226 if (!obj) 3227 return -ENOENT; 3228 3229 bpf_object__for_each_program(prog, obj) { 3230 char buf[PATH_MAX]; 3231 int len; 3232 3233 len = snprintf(buf, PATH_MAX, "%s/%s", path, 3234 prog->pin_name); 3235 if (len < 0) 3236 return -EINVAL; 3237 else if (len >= PATH_MAX) 3238 return -ENAMETOOLONG; 3239 3240 err = bpf_program__unpin(prog, buf); 3241 if (err) 3242 return err; 3243 } 3244 3245 return 0; 3246 } 3247 3248 int bpf_object__pin(struct bpf_object *obj, const char *path) 3249 { 3250 int err; 3251 3252 err = bpf_object__pin_maps(obj, path); 3253 if (err) 3254 return err; 3255 3256 err = bpf_object__pin_programs(obj, path); 3257 if (err) { 3258 bpf_object__unpin_maps(obj, path); 3259 return err; 3260 } 3261 3262 return 0; 3263 } 3264 3265 void bpf_object__close(struct bpf_object *obj) 3266 { 3267 size_t i; 3268 3269 if (!obj) 3270 return; 3271 3272 if (obj->clear_priv) 3273 obj->clear_priv(obj, obj->priv); 3274 3275 bpf_object__elf_finish(obj); 3276 bpf_object__unload(obj); 3277 btf__free(obj->btf); 3278 btf_ext__free(obj->btf_ext); 3279 3280 for (i = 0; i < obj->nr_maps; i++) { 3281 zfree(&obj->maps[i].name); 3282 if (obj->maps[i].clear_priv) 3283 obj->maps[i].clear_priv(&obj->maps[i], 3284 obj->maps[i].priv); 3285 obj->maps[i].priv = NULL; 3286 obj->maps[i].clear_priv = NULL; 3287 } 3288 3289 zfree(&obj->sections.rodata); 3290 zfree(&obj->sections.data); 3291 zfree(&obj->maps); 3292 obj->nr_maps = 0; 3293 3294 if (obj->programs && obj->nr_programs) { 3295 for (i = 0; i < obj->nr_programs; i++) 3296 bpf_program__exit(&obj->programs[i]); 3297 } 3298 zfree(&obj->programs); 3299 3300 list_del(&obj->list); 3301 free(obj); 3302 } 3303 3304 struct bpf_object * 3305 bpf_object__next(struct bpf_object *prev) 3306 { 3307 struct bpf_object *next; 3308 3309 if (!prev) 3310 next = list_first_entry(&bpf_objects_list, 3311 struct bpf_object, 3312 list); 3313 else 3314 next = list_next_entry(prev, list); 3315 3316 /* Empty list is noticed here so don't need checking on entry. */ 3317 if (&next->list == &bpf_objects_list) 3318 return NULL; 3319 3320 return next; 3321 } 3322 3323 const char *bpf_object__name(const struct bpf_object *obj) 3324 { 3325 return obj ? obj->path : ERR_PTR(-EINVAL); 3326 } 3327 3328 unsigned int bpf_object__kversion(const struct bpf_object *obj) 3329 { 3330 return obj ? obj->kern_version : 0; 3331 } 3332 3333 struct btf *bpf_object__btf(const struct bpf_object *obj) 3334 { 3335 return obj ? obj->btf : NULL; 3336 } 3337 3338 int bpf_object__btf_fd(const struct bpf_object *obj) 3339 { 3340 return obj->btf ? btf__fd(obj->btf) : -1; 3341 } 3342 3343 int bpf_object__set_priv(struct bpf_object *obj, void *priv, 3344 bpf_object_clear_priv_t clear_priv) 3345 { 3346 if (obj->priv && obj->clear_priv) 3347 obj->clear_priv(obj, obj->priv); 3348 3349 obj->priv = priv; 3350 obj->clear_priv = clear_priv; 3351 return 0; 3352 } 3353 3354 void *bpf_object__priv(const struct bpf_object *obj) 3355 { 3356 return obj ? obj->priv : ERR_PTR(-EINVAL); 3357 } 3358 3359 static struct bpf_program * 3360 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, 3361 bool forward) 3362 { 3363 size_t nr_programs = obj->nr_programs; 3364 ssize_t idx; 3365 3366 if (!nr_programs) 3367 return NULL; 3368 3369 if (!p) 3370 /* Iter from the beginning */ 3371 return forward ? &obj->programs[0] : 3372 &obj->programs[nr_programs - 1]; 3373 3374 if (p->obj != obj) { 3375 pr_warning("error: program handler doesn't match object\n"); 3376 return NULL; 3377 } 3378 3379 idx = (p - obj->programs) + (forward ? 1 : -1); 3380 if (idx >= obj->nr_programs || idx < 0) 3381 return NULL; 3382 return &obj->programs[idx]; 3383 } 3384 3385 struct bpf_program * 3386 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) 3387 { 3388 struct bpf_program *prog = prev; 3389 3390 do { 3391 prog = __bpf_program__iter(prog, obj, true); 3392 } while (prog && bpf_program__is_function_storage(prog, obj)); 3393 3394 return prog; 3395 } 3396 3397 struct bpf_program * 3398 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj) 3399 { 3400 struct bpf_program *prog = next; 3401 3402 do { 3403 prog = __bpf_program__iter(prog, obj, false); 3404 } while (prog && bpf_program__is_function_storage(prog, obj)); 3405 3406 return prog; 3407 } 3408 3409 int bpf_program__set_priv(struct bpf_program *prog, void *priv, 3410 bpf_program_clear_priv_t clear_priv) 3411 { 3412 if (prog->priv && prog->clear_priv) 3413 prog->clear_priv(prog, prog->priv); 3414 3415 prog->priv = priv; 3416 prog->clear_priv = clear_priv; 3417 return 0; 3418 } 3419 3420 void *bpf_program__priv(const struct bpf_program *prog) 3421 { 3422 return prog ? prog->priv : ERR_PTR(-EINVAL); 3423 } 3424 3425 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 3426 { 3427 prog->prog_ifindex = ifindex; 3428 } 3429 3430 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) 3431 { 3432 const char *title; 3433 3434 title = prog->section_name; 3435 if (needs_copy) { 3436 title = strdup(title); 3437 if (!title) { 3438 pr_warning("failed to strdup program title\n"); 3439 return ERR_PTR(-ENOMEM); 3440 } 3441 } 3442 3443 return title; 3444 } 3445 3446 int bpf_program__fd(const struct bpf_program *prog) 3447 { 3448 return bpf_program__nth_fd(prog, 0); 3449 } 3450 3451 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 3452 bpf_program_prep_t prep) 3453 { 3454 int *instances_fds; 3455 3456 if (nr_instances <= 0 || !prep) 3457 return -EINVAL; 3458 3459 if (prog->instances.nr > 0 || prog->instances.fds) { 3460 pr_warning("Can't set pre-processor after loading\n"); 3461 return -EINVAL; 3462 } 3463 3464 instances_fds = malloc(sizeof(int) * nr_instances); 3465 if (!instances_fds) { 3466 pr_warning("alloc memory failed for fds\n"); 3467 return -ENOMEM; 3468 } 3469 3470 /* fill all fd with -1 */ 3471 memset(instances_fds, -1, sizeof(int) * nr_instances); 3472 3473 prog->instances.nr = nr_instances; 3474 prog->instances.fds = instances_fds; 3475 prog->preprocessor = prep; 3476 return 0; 3477 } 3478 3479 int bpf_program__nth_fd(const struct bpf_program *prog, int n) 3480 { 3481 int fd; 3482 3483 if (!prog) 3484 return -EINVAL; 3485 3486 if (n >= prog->instances.nr || n < 0) { 3487 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 3488 n, prog->section_name, prog->instances.nr); 3489 return -EINVAL; 3490 } 3491 3492 fd = prog->instances.fds[n]; 3493 if (fd < 0) { 3494 pr_warning("%dth instance of program '%s' is invalid\n", 3495 n, prog->section_name); 3496 return -ENOENT; 3497 } 3498 3499 return fd; 3500 } 3501 3502 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 3503 { 3504 prog->type = type; 3505 } 3506 3507 static bool bpf_program__is_type(const struct bpf_program *prog, 3508 enum bpf_prog_type type) 3509 { 3510 return prog ? (prog->type == type) : false; 3511 } 3512 3513 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 3514 int bpf_program__set_##NAME(struct bpf_program *prog) \ 3515 { \ 3516 if (!prog) \ 3517 return -EINVAL; \ 3518 bpf_program__set_type(prog, TYPE); \ 3519 return 0; \ 3520 } \ 3521 \ 3522 bool bpf_program__is_##NAME(const struct bpf_program *prog) \ 3523 { \ 3524 return bpf_program__is_type(prog, TYPE); \ 3525 } \ 3526 3527 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 3528 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 3529 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 3530 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 3531 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 3532 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 3533 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 3534 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 3535 3536 void bpf_program__set_expected_attach_type(struct bpf_program *prog, 3537 enum bpf_attach_type type) 3538 { 3539 prog->expected_attach_type = type; 3540 } 3541 3542 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ 3543 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } 3544 3545 /* Programs that can NOT be attached. */ 3546 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) 3547 3548 /* Programs that can be attached. */ 3549 #define BPF_APROG_SEC(string, ptype, atype) \ 3550 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) 3551 3552 /* Programs that must specify expected attach type at load time. */ 3553 #define BPF_EAPROG_SEC(string, ptype, eatype) \ 3554 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) 3555 3556 /* Programs that can be attached but attach type can't be identified by section 3557 * name. Kept for backward compatibility. 3558 */ 3559 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) 3560 3561 static const struct { 3562 const char *sec; 3563 size_t len; 3564 enum bpf_prog_type prog_type; 3565 enum bpf_attach_type expected_attach_type; 3566 int is_attachable; 3567 enum bpf_attach_type attach_type; 3568 } section_names[] = { 3569 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 3570 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), 3571 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), 3572 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 3573 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 3574 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), 3575 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), 3576 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 3577 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 3578 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 3579 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 3580 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 3581 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), 3582 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, 3583 BPF_CGROUP_INET_INGRESS), 3584 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 3585 BPF_CGROUP_INET_EGRESS), 3586 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 3587 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 3588 BPF_CGROUP_INET_SOCK_CREATE), 3589 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, 3590 BPF_CGROUP_INET4_POST_BIND), 3591 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, 3592 BPF_CGROUP_INET6_POST_BIND), 3593 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, 3594 BPF_CGROUP_DEVICE), 3595 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, 3596 BPF_CGROUP_SOCK_OPS), 3597 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, 3598 BPF_SK_SKB_STREAM_PARSER), 3599 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, 3600 BPF_SK_SKB_STREAM_VERDICT), 3601 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), 3602 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, 3603 BPF_SK_MSG_VERDICT), 3604 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, 3605 BPF_LIRC_MODE2), 3606 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, 3607 BPF_FLOW_DISSECTOR), 3608 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3609 BPF_CGROUP_INET4_BIND), 3610 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3611 BPF_CGROUP_INET6_BIND), 3612 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3613 BPF_CGROUP_INET4_CONNECT), 3614 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3615 BPF_CGROUP_INET6_CONNECT), 3616 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3617 BPF_CGROUP_UDP4_SENDMSG), 3618 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3619 BPF_CGROUP_UDP6_SENDMSG), 3620 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3621 BPF_CGROUP_UDP4_RECVMSG), 3622 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3623 BPF_CGROUP_UDP6_RECVMSG), 3624 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, 3625 BPF_CGROUP_SYSCTL), 3626 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, 3627 BPF_CGROUP_GETSOCKOPT), 3628 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, 3629 BPF_CGROUP_SETSOCKOPT), 3630 }; 3631 3632 #undef BPF_PROG_SEC_IMPL 3633 #undef BPF_PROG_SEC 3634 #undef BPF_APROG_SEC 3635 #undef BPF_EAPROG_SEC 3636 #undef BPF_APROG_COMPAT 3637 3638 #define MAX_TYPE_NAME_SIZE 32 3639 3640 static char *libbpf_get_type_names(bool attach_type) 3641 { 3642 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; 3643 char *buf; 3644 3645 buf = malloc(len); 3646 if (!buf) 3647 return NULL; 3648 3649 buf[0] = '\0'; 3650 /* Forge string buf with all available names */ 3651 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3652 if (attach_type && !section_names[i].is_attachable) 3653 continue; 3654 3655 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { 3656 free(buf); 3657 return NULL; 3658 } 3659 strcat(buf, " "); 3660 strcat(buf, section_names[i].sec); 3661 } 3662 3663 return buf; 3664 } 3665 3666 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 3667 enum bpf_attach_type *expected_attach_type) 3668 { 3669 char *type_names; 3670 int i; 3671 3672 if (!name) 3673 return -EINVAL; 3674 3675 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3676 if (strncmp(name, section_names[i].sec, section_names[i].len)) 3677 continue; 3678 *prog_type = section_names[i].prog_type; 3679 *expected_attach_type = section_names[i].expected_attach_type; 3680 return 0; 3681 } 3682 pr_warning("failed to guess program type based on ELF section name '%s'\n", name); 3683 type_names = libbpf_get_type_names(false); 3684 if (type_names != NULL) { 3685 pr_info("supported section(type) names are:%s\n", type_names); 3686 free(type_names); 3687 } 3688 3689 return -EINVAL; 3690 } 3691 3692 int libbpf_attach_type_by_name(const char *name, 3693 enum bpf_attach_type *attach_type) 3694 { 3695 char *type_names; 3696 int i; 3697 3698 if (!name) 3699 return -EINVAL; 3700 3701 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3702 if (strncmp(name, section_names[i].sec, section_names[i].len)) 3703 continue; 3704 if (!section_names[i].is_attachable) 3705 return -EINVAL; 3706 *attach_type = section_names[i].attach_type; 3707 return 0; 3708 } 3709 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); 3710 type_names = libbpf_get_type_names(true); 3711 if (type_names != NULL) { 3712 pr_info("attachable section(type) names are:%s\n", type_names); 3713 free(type_names); 3714 } 3715 3716 return -EINVAL; 3717 } 3718 3719 static int 3720 bpf_program__identify_section(struct bpf_program *prog, 3721 enum bpf_prog_type *prog_type, 3722 enum bpf_attach_type *expected_attach_type) 3723 { 3724 return libbpf_prog_type_by_name(prog->section_name, prog_type, 3725 expected_attach_type); 3726 } 3727 3728 int bpf_map__fd(const struct bpf_map *map) 3729 { 3730 return map ? map->fd : -EINVAL; 3731 } 3732 3733 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) 3734 { 3735 return map ? &map->def : ERR_PTR(-EINVAL); 3736 } 3737 3738 const char *bpf_map__name(const struct bpf_map *map) 3739 { 3740 return map ? map->name : NULL; 3741 } 3742 3743 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 3744 { 3745 return map ? map->btf_key_type_id : 0; 3746 } 3747 3748 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 3749 { 3750 return map ? map->btf_value_type_id : 0; 3751 } 3752 3753 int bpf_map__set_priv(struct bpf_map *map, void *priv, 3754 bpf_map_clear_priv_t clear_priv) 3755 { 3756 if (!map) 3757 return -EINVAL; 3758 3759 if (map->priv) { 3760 if (map->clear_priv) 3761 map->clear_priv(map, map->priv); 3762 } 3763 3764 map->priv = priv; 3765 map->clear_priv = clear_priv; 3766 return 0; 3767 } 3768 3769 void *bpf_map__priv(const struct bpf_map *map) 3770 { 3771 return map ? map->priv : ERR_PTR(-EINVAL); 3772 } 3773 3774 bool bpf_map__is_offload_neutral(const struct bpf_map *map) 3775 { 3776 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 3777 } 3778 3779 bool bpf_map__is_internal(const struct bpf_map *map) 3780 { 3781 return map->libbpf_type != LIBBPF_MAP_UNSPEC; 3782 } 3783 3784 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 3785 { 3786 map->map_ifindex = ifindex; 3787 } 3788 3789 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 3790 { 3791 if (!bpf_map_type__is_map_in_map(map->def.type)) { 3792 pr_warning("error: unsupported map type\n"); 3793 return -EINVAL; 3794 } 3795 if (map->inner_map_fd != -1) { 3796 pr_warning("error: inner_map_fd already specified\n"); 3797 return -EINVAL; 3798 } 3799 map->inner_map_fd = fd; 3800 return 0; 3801 } 3802 3803 static struct bpf_map * 3804 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) 3805 { 3806 ssize_t idx; 3807 struct bpf_map *s, *e; 3808 3809 if (!obj || !obj->maps) 3810 return NULL; 3811 3812 s = obj->maps; 3813 e = obj->maps + obj->nr_maps; 3814 3815 if ((m < s) || (m >= e)) { 3816 pr_warning("error in %s: map handler doesn't belong to object\n", 3817 __func__); 3818 return NULL; 3819 } 3820 3821 idx = (m - obj->maps) + i; 3822 if (idx >= obj->nr_maps || idx < 0) 3823 return NULL; 3824 return &obj->maps[idx]; 3825 } 3826 3827 struct bpf_map * 3828 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj) 3829 { 3830 if (prev == NULL) 3831 return obj->maps; 3832 3833 return __bpf_map__iter(prev, obj, 1); 3834 } 3835 3836 struct bpf_map * 3837 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj) 3838 { 3839 if (next == NULL) { 3840 if (!obj->nr_maps) 3841 return NULL; 3842 return obj->maps + obj->nr_maps - 1; 3843 } 3844 3845 return __bpf_map__iter(next, obj, -1); 3846 } 3847 3848 struct bpf_map * 3849 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) 3850 { 3851 struct bpf_map *pos; 3852 3853 bpf_object__for_each_map(pos, obj) { 3854 if (pos->name && !strcmp(pos->name, name)) 3855 return pos; 3856 } 3857 return NULL; 3858 } 3859 3860 int 3861 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) 3862 { 3863 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 3864 } 3865 3866 struct bpf_map * 3867 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 3868 { 3869 return ERR_PTR(-ENOTSUP); 3870 } 3871 3872 long libbpf_get_error(const void *ptr) 3873 { 3874 return PTR_ERR_OR_ZERO(ptr); 3875 } 3876 3877 int bpf_prog_load(const char *file, enum bpf_prog_type type, 3878 struct bpf_object **pobj, int *prog_fd) 3879 { 3880 struct bpf_prog_load_attr attr; 3881 3882 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 3883 attr.file = file; 3884 attr.prog_type = type; 3885 attr.expected_attach_type = 0; 3886 3887 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 3888 } 3889 3890 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 3891 struct bpf_object **pobj, int *prog_fd) 3892 { 3893 struct bpf_object_open_attr open_attr = {}; 3894 struct bpf_program *prog, *first_prog = NULL; 3895 enum bpf_attach_type expected_attach_type; 3896 enum bpf_prog_type prog_type; 3897 struct bpf_object *obj; 3898 struct bpf_map *map; 3899 int err; 3900 3901 if (!attr) 3902 return -EINVAL; 3903 if (!attr->file) 3904 return -EINVAL; 3905 3906 open_attr.file = attr->file; 3907 open_attr.prog_type = attr->prog_type; 3908 3909 obj = bpf_object__open_xattr(&open_attr); 3910 if (IS_ERR_OR_NULL(obj)) 3911 return -ENOENT; 3912 3913 bpf_object__for_each_program(prog, obj) { 3914 /* 3915 * If type is not specified, try to guess it based on 3916 * section name. 3917 */ 3918 prog_type = attr->prog_type; 3919 prog->prog_ifindex = attr->ifindex; 3920 expected_attach_type = attr->expected_attach_type; 3921 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 3922 err = bpf_program__identify_section(prog, &prog_type, 3923 &expected_attach_type); 3924 if (err < 0) { 3925 bpf_object__close(obj); 3926 return -EINVAL; 3927 } 3928 } 3929 3930 bpf_program__set_type(prog, prog_type); 3931 bpf_program__set_expected_attach_type(prog, 3932 expected_attach_type); 3933 3934 prog->log_level = attr->log_level; 3935 prog->prog_flags = attr->prog_flags; 3936 if (!first_prog) 3937 first_prog = prog; 3938 } 3939 3940 bpf_object__for_each_map(map, obj) { 3941 if (!bpf_map__is_offload_neutral(map)) 3942 map->map_ifindex = attr->ifindex; 3943 } 3944 3945 if (!first_prog) { 3946 pr_warning("object file doesn't contain bpf program\n"); 3947 bpf_object__close(obj); 3948 return -ENOENT; 3949 } 3950 3951 err = bpf_object__load(obj); 3952 if (err) { 3953 bpf_object__close(obj); 3954 return -EINVAL; 3955 } 3956 3957 *pobj = obj; 3958 *prog_fd = bpf_program__fd(first_prog); 3959 return 0; 3960 } 3961 3962 struct bpf_link { 3963 int (*destroy)(struct bpf_link *link); 3964 }; 3965 3966 int bpf_link__destroy(struct bpf_link *link) 3967 { 3968 int err; 3969 3970 if (!link) 3971 return 0; 3972 3973 err = link->destroy(link); 3974 free(link); 3975 3976 return err; 3977 } 3978 3979 struct bpf_link_fd { 3980 struct bpf_link link; /* has to be at the top of struct */ 3981 int fd; /* hook FD */ 3982 }; 3983 3984 static int bpf_link__destroy_perf_event(struct bpf_link *link) 3985 { 3986 struct bpf_link_fd *l = (void *)link; 3987 int err; 3988 3989 err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0); 3990 if (err) 3991 err = -errno; 3992 3993 close(l->fd); 3994 return err; 3995 } 3996 3997 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, 3998 int pfd) 3999 { 4000 char errmsg[STRERR_BUFSIZE]; 4001 struct bpf_link_fd *link; 4002 int prog_fd, err; 4003 4004 if (pfd < 0) { 4005 pr_warning("program '%s': invalid perf event FD %d\n", 4006 bpf_program__title(prog, false), pfd); 4007 return ERR_PTR(-EINVAL); 4008 } 4009 prog_fd = bpf_program__fd(prog); 4010 if (prog_fd < 0) { 4011 pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n", 4012 bpf_program__title(prog, false)); 4013 return ERR_PTR(-EINVAL); 4014 } 4015 4016 link = malloc(sizeof(*link)); 4017 if (!link) 4018 return ERR_PTR(-ENOMEM); 4019 link->link.destroy = &bpf_link__destroy_perf_event; 4020 link->fd = pfd; 4021 4022 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { 4023 err = -errno; 4024 free(link); 4025 pr_warning("program '%s': failed to attach to pfd %d: %s\n", 4026 bpf_program__title(prog, false), pfd, 4027 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4028 return ERR_PTR(err); 4029 } 4030 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 4031 err = -errno; 4032 free(link); 4033 pr_warning("program '%s': failed to enable pfd %d: %s\n", 4034 bpf_program__title(prog, false), pfd, 4035 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4036 return ERR_PTR(err); 4037 } 4038 return (struct bpf_link *)link; 4039 } 4040 4041 /* 4042 * this function is expected to parse integer in the range of [0, 2^31-1] from 4043 * given file using scanf format string fmt. If actual parsed value is 4044 * negative, the result might be indistinguishable from error 4045 */ 4046 static int parse_uint_from_file(const char *file, const char *fmt) 4047 { 4048 char buf[STRERR_BUFSIZE]; 4049 int err, ret; 4050 FILE *f; 4051 4052 f = fopen(file, "r"); 4053 if (!f) { 4054 err = -errno; 4055 pr_debug("failed to open '%s': %s\n", file, 4056 libbpf_strerror_r(err, buf, sizeof(buf))); 4057 return err; 4058 } 4059 err = fscanf(f, fmt, &ret); 4060 if (err != 1) { 4061 err = err == EOF ? -EIO : -errno; 4062 pr_debug("failed to parse '%s': %s\n", file, 4063 libbpf_strerror_r(err, buf, sizeof(buf))); 4064 fclose(f); 4065 return err; 4066 } 4067 fclose(f); 4068 return ret; 4069 } 4070 4071 static int determine_kprobe_perf_type(void) 4072 { 4073 const char *file = "/sys/bus/event_source/devices/kprobe/type"; 4074 4075 return parse_uint_from_file(file, "%d\n"); 4076 } 4077 4078 static int determine_uprobe_perf_type(void) 4079 { 4080 const char *file = "/sys/bus/event_source/devices/uprobe/type"; 4081 4082 return parse_uint_from_file(file, "%d\n"); 4083 } 4084 4085 static int determine_kprobe_retprobe_bit(void) 4086 { 4087 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; 4088 4089 return parse_uint_from_file(file, "config:%d\n"); 4090 } 4091 4092 static int determine_uprobe_retprobe_bit(void) 4093 { 4094 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; 4095 4096 return parse_uint_from_file(file, "config:%d\n"); 4097 } 4098 4099 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, 4100 uint64_t offset, int pid) 4101 { 4102 struct perf_event_attr attr = {}; 4103 char errmsg[STRERR_BUFSIZE]; 4104 int type, pfd, err; 4105 4106 type = uprobe ? determine_uprobe_perf_type() 4107 : determine_kprobe_perf_type(); 4108 if (type < 0) { 4109 pr_warning("failed to determine %s perf type: %s\n", 4110 uprobe ? "uprobe" : "kprobe", 4111 libbpf_strerror_r(type, errmsg, sizeof(errmsg))); 4112 return type; 4113 } 4114 if (retprobe) { 4115 int bit = uprobe ? determine_uprobe_retprobe_bit() 4116 : determine_kprobe_retprobe_bit(); 4117 4118 if (bit < 0) { 4119 pr_warning("failed to determine %s retprobe bit: %s\n", 4120 uprobe ? "uprobe" : "kprobe", 4121 libbpf_strerror_r(bit, errmsg, 4122 sizeof(errmsg))); 4123 return bit; 4124 } 4125 attr.config |= 1 << bit; 4126 } 4127 attr.size = sizeof(attr); 4128 attr.type = type; 4129 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ 4130 attr.config2 = offset; /* kprobe_addr or probe_offset */ 4131 4132 /* pid filter is meaningful only for uprobes */ 4133 pfd = syscall(__NR_perf_event_open, &attr, 4134 pid < 0 ? -1 : pid /* pid */, 4135 pid == -1 ? 0 : -1 /* cpu */, 4136 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 4137 if (pfd < 0) { 4138 err = -errno; 4139 pr_warning("%s perf_event_open() failed: %s\n", 4140 uprobe ? "uprobe" : "kprobe", 4141 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4142 return err; 4143 } 4144 return pfd; 4145 } 4146 4147 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, 4148 bool retprobe, 4149 const char *func_name) 4150 { 4151 char errmsg[STRERR_BUFSIZE]; 4152 struct bpf_link *link; 4153 int pfd, err; 4154 4155 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, 4156 0 /* offset */, -1 /* pid */); 4157 if (pfd < 0) { 4158 pr_warning("program '%s': failed to create %s '%s' perf event: %s\n", 4159 bpf_program__title(prog, false), 4160 retprobe ? "kretprobe" : "kprobe", func_name, 4161 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 4162 return ERR_PTR(pfd); 4163 } 4164 link = bpf_program__attach_perf_event(prog, pfd); 4165 if (IS_ERR(link)) { 4166 close(pfd); 4167 err = PTR_ERR(link); 4168 pr_warning("program '%s': failed to attach to %s '%s': %s\n", 4169 bpf_program__title(prog, false), 4170 retprobe ? "kretprobe" : "kprobe", func_name, 4171 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4172 return link; 4173 } 4174 return link; 4175 } 4176 4177 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, 4178 bool retprobe, pid_t pid, 4179 const char *binary_path, 4180 size_t func_offset) 4181 { 4182 char errmsg[STRERR_BUFSIZE]; 4183 struct bpf_link *link; 4184 int pfd, err; 4185 4186 pfd = perf_event_open_probe(true /* uprobe */, retprobe, 4187 binary_path, func_offset, pid); 4188 if (pfd < 0) { 4189 pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n", 4190 bpf_program__title(prog, false), 4191 retprobe ? "uretprobe" : "uprobe", 4192 binary_path, func_offset, 4193 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 4194 return ERR_PTR(pfd); 4195 } 4196 link = bpf_program__attach_perf_event(prog, pfd); 4197 if (IS_ERR(link)) { 4198 close(pfd); 4199 err = PTR_ERR(link); 4200 pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n", 4201 bpf_program__title(prog, false), 4202 retprobe ? "uretprobe" : "uprobe", 4203 binary_path, func_offset, 4204 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4205 return link; 4206 } 4207 return link; 4208 } 4209 4210 static int determine_tracepoint_id(const char *tp_category, 4211 const char *tp_name) 4212 { 4213 char file[PATH_MAX]; 4214 int ret; 4215 4216 ret = snprintf(file, sizeof(file), 4217 "/sys/kernel/debug/tracing/events/%s/%s/id", 4218 tp_category, tp_name); 4219 if (ret < 0) 4220 return -errno; 4221 if (ret >= sizeof(file)) { 4222 pr_debug("tracepoint %s/%s path is too long\n", 4223 tp_category, tp_name); 4224 return -E2BIG; 4225 } 4226 return parse_uint_from_file(file, "%d\n"); 4227 } 4228 4229 static int perf_event_open_tracepoint(const char *tp_category, 4230 const char *tp_name) 4231 { 4232 struct perf_event_attr attr = {}; 4233 char errmsg[STRERR_BUFSIZE]; 4234 int tp_id, pfd, err; 4235 4236 tp_id = determine_tracepoint_id(tp_category, tp_name); 4237 if (tp_id < 0) { 4238 pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n", 4239 tp_category, tp_name, 4240 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); 4241 return tp_id; 4242 } 4243 4244 attr.type = PERF_TYPE_TRACEPOINT; 4245 attr.size = sizeof(attr); 4246 attr.config = tp_id; 4247 4248 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, 4249 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 4250 if (pfd < 0) { 4251 err = -errno; 4252 pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n", 4253 tp_category, tp_name, 4254 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4255 return err; 4256 } 4257 return pfd; 4258 } 4259 4260 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog, 4261 const char *tp_category, 4262 const char *tp_name) 4263 { 4264 char errmsg[STRERR_BUFSIZE]; 4265 struct bpf_link *link; 4266 int pfd, err; 4267 4268 pfd = perf_event_open_tracepoint(tp_category, tp_name); 4269 if (pfd < 0) { 4270 pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n", 4271 bpf_program__title(prog, false), 4272 tp_category, tp_name, 4273 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 4274 return ERR_PTR(pfd); 4275 } 4276 link = bpf_program__attach_perf_event(prog, pfd); 4277 if (IS_ERR(link)) { 4278 close(pfd); 4279 err = PTR_ERR(link); 4280 pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n", 4281 bpf_program__title(prog, false), 4282 tp_category, tp_name, 4283 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 4284 return link; 4285 } 4286 return link; 4287 } 4288 4289 static int bpf_link__destroy_fd(struct bpf_link *link) 4290 { 4291 struct bpf_link_fd *l = (void *)link; 4292 4293 return close(l->fd); 4294 } 4295 4296 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, 4297 const char *tp_name) 4298 { 4299 char errmsg[STRERR_BUFSIZE]; 4300 struct bpf_link_fd *link; 4301 int prog_fd, pfd; 4302 4303 prog_fd = bpf_program__fd(prog); 4304 if (prog_fd < 0) { 4305 pr_warning("program '%s': can't attach before loaded\n", 4306 bpf_program__title(prog, false)); 4307 return ERR_PTR(-EINVAL); 4308 } 4309 4310 link = malloc(sizeof(*link)); 4311 if (!link) 4312 return ERR_PTR(-ENOMEM); 4313 link->link.destroy = &bpf_link__destroy_fd; 4314 4315 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); 4316 if (pfd < 0) { 4317 pfd = -errno; 4318 free(link); 4319 pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n", 4320 bpf_program__title(prog, false), tp_name, 4321 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 4322 return ERR_PTR(pfd); 4323 } 4324 link->fd = pfd; 4325 return (struct bpf_link *)link; 4326 } 4327 4328 enum bpf_perf_event_ret 4329 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 4330 void **copy_mem, size_t *copy_size, 4331 bpf_perf_event_print_t fn, void *private_data) 4332 { 4333 struct perf_event_mmap_page *header = mmap_mem; 4334 __u64 data_head = ring_buffer_read_head(header); 4335 __u64 data_tail = header->data_tail; 4336 void *base = ((__u8 *)header) + page_size; 4337 int ret = LIBBPF_PERF_EVENT_CONT; 4338 struct perf_event_header *ehdr; 4339 size_t ehdr_size; 4340 4341 while (data_head != data_tail) { 4342 ehdr = base + (data_tail & (mmap_size - 1)); 4343 ehdr_size = ehdr->size; 4344 4345 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 4346 void *copy_start = ehdr; 4347 size_t len_first = base + mmap_size - copy_start; 4348 size_t len_secnd = ehdr_size - len_first; 4349 4350 if (*copy_size < ehdr_size) { 4351 free(*copy_mem); 4352 *copy_mem = malloc(ehdr_size); 4353 if (!*copy_mem) { 4354 *copy_size = 0; 4355 ret = LIBBPF_PERF_EVENT_ERROR; 4356 break; 4357 } 4358 *copy_size = ehdr_size; 4359 } 4360 4361 memcpy(*copy_mem, copy_start, len_first); 4362 memcpy(*copy_mem + len_first, base, len_secnd); 4363 ehdr = *copy_mem; 4364 } 4365 4366 ret = fn(ehdr, private_data); 4367 data_tail += ehdr_size; 4368 if (ret != LIBBPF_PERF_EVENT_CONT) 4369 break; 4370 } 4371 4372 ring_buffer_write_tail(header, data_tail); 4373 return ret; 4374 } 4375 4376 struct perf_buffer; 4377 4378 struct perf_buffer_params { 4379 struct perf_event_attr *attr; 4380 /* if event_cb is specified, it takes precendence */ 4381 perf_buffer_event_fn event_cb; 4382 /* sample_cb and lost_cb are higher-level common-case callbacks */ 4383 perf_buffer_sample_fn sample_cb; 4384 perf_buffer_lost_fn lost_cb; 4385 void *ctx; 4386 int cpu_cnt; 4387 int *cpus; 4388 int *map_keys; 4389 }; 4390 4391 struct perf_cpu_buf { 4392 struct perf_buffer *pb; 4393 void *base; /* mmap()'ed memory */ 4394 void *buf; /* for reconstructing segmented data */ 4395 size_t buf_size; 4396 int fd; 4397 int cpu; 4398 int map_key; 4399 }; 4400 4401 struct perf_buffer { 4402 perf_buffer_event_fn event_cb; 4403 perf_buffer_sample_fn sample_cb; 4404 perf_buffer_lost_fn lost_cb; 4405 void *ctx; /* passed into callbacks */ 4406 4407 size_t page_size; 4408 size_t mmap_size; 4409 struct perf_cpu_buf **cpu_bufs; 4410 struct epoll_event *events; 4411 int cpu_cnt; 4412 int epoll_fd; /* perf event FD */ 4413 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ 4414 }; 4415 4416 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, 4417 struct perf_cpu_buf *cpu_buf) 4418 { 4419 if (!cpu_buf) 4420 return; 4421 if (cpu_buf->base && 4422 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) 4423 pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); 4424 if (cpu_buf->fd >= 0) { 4425 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); 4426 close(cpu_buf->fd); 4427 } 4428 free(cpu_buf->buf); 4429 free(cpu_buf); 4430 } 4431 4432 void perf_buffer__free(struct perf_buffer *pb) 4433 { 4434 int i; 4435 4436 if (!pb) 4437 return; 4438 if (pb->cpu_bufs) { 4439 for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) { 4440 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 4441 4442 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); 4443 perf_buffer__free_cpu_buf(pb, cpu_buf); 4444 } 4445 free(pb->cpu_bufs); 4446 } 4447 if (pb->epoll_fd >= 0) 4448 close(pb->epoll_fd); 4449 free(pb->events); 4450 free(pb); 4451 } 4452 4453 static struct perf_cpu_buf * 4454 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, 4455 int cpu, int map_key) 4456 { 4457 struct perf_cpu_buf *cpu_buf; 4458 char msg[STRERR_BUFSIZE]; 4459 int err; 4460 4461 cpu_buf = calloc(1, sizeof(*cpu_buf)); 4462 if (!cpu_buf) 4463 return ERR_PTR(-ENOMEM); 4464 4465 cpu_buf->pb = pb; 4466 cpu_buf->cpu = cpu; 4467 cpu_buf->map_key = map_key; 4468 4469 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, 4470 -1, PERF_FLAG_FD_CLOEXEC); 4471 if (cpu_buf->fd < 0) { 4472 err = -errno; 4473 pr_warning("failed to open perf buffer event on cpu #%d: %s\n", 4474 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 4475 goto error; 4476 } 4477 4478 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, 4479 PROT_READ | PROT_WRITE, MAP_SHARED, 4480 cpu_buf->fd, 0); 4481 if (cpu_buf->base == MAP_FAILED) { 4482 cpu_buf->base = NULL; 4483 err = -errno; 4484 pr_warning("failed to mmap perf buffer on cpu #%d: %s\n", 4485 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 4486 goto error; 4487 } 4488 4489 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 4490 err = -errno; 4491 pr_warning("failed to enable perf buffer event on cpu #%d: %s\n", 4492 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 4493 goto error; 4494 } 4495 4496 return cpu_buf; 4497 4498 error: 4499 perf_buffer__free_cpu_buf(pb, cpu_buf); 4500 return (struct perf_cpu_buf *)ERR_PTR(err); 4501 } 4502 4503 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 4504 struct perf_buffer_params *p); 4505 4506 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, 4507 const struct perf_buffer_opts *opts) 4508 { 4509 struct perf_buffer_params p = {}; 4510 struct perf_event_attr attr = { 4511 .config = PERF_COUNT_SW_BPF_OUTPUT, 4512 .type = PERF_TYPE_SOFTWARE, 4513 .sample_type = PERF_SAMPLE_RAW, 4514 .sample_period = 1, 4515 .wakeup_events = 1, 4516 }; 4517 4518 p.attr = &attr; 4519 p.sample_cb = opts ? opts->sample_cb : NULL; 4520 p.lost_cb = opts ? opts->lost_cb : NULL; 4521 p.ctx = opts ? opts->ctx : NULL; 4522 4523 return __perf_buffer__new(map_fd, page_cnt, &p); 4524 } 4525 4526 struct perf_buffer * 4527 perf_buffer__new_raw(int map_fd, size_t page_cnt, 4528 const struct perf_buffer_raw_opts *opts) 4529 { 4530 struct perf_buffer_params p = {}; 4531 4532 p.attr = opts->attr; 4533 p.event_cb = opts->event_cb; 4534 p.ctx = opts->ctx; 4535 p.cpu_cnt = opts->cpu_cnt; 4536 p.cpus = opts->cpus; 4537 p.map_keys = opts->map_keys; 4538 4539 return __perf_buffer__new(map_fd, page_cnt, &p); 4540 } 4541 4542 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 4543 struct perf_buffer_params *p) 4544 { 4545 struct bpf_map_info map = {}; 4546 char msg[STRERR_BUFSIZE]; 4547 struct perf_buffer *pb; 4548 __u32 map_info_len; 4549 int err, i; 4550 4551 if (page_cnt & (page_cnt - 1)) { 4552 pr_warning("page count should be power of two, but is %zu\n", 4553 page_cnt); 4554 return ERR_PTR(-EINVAL); 4555 } 4556 4557 map_info_len = sizeof(map); 4558 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); 4559 if (err) { 4560 err = -errno; 4561 pr_warning("failed to get map info for map FD %d: %s\n", 4562 map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); 4563 return ERR_PTR(err); 4564 } 4565 4566 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 4567 pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 4568 map.name); 4569 return ERR_PTR(-EINVAL); 4570 } 4571 4572 pb = calloc(1, sizeof(*pb)); 4573 if (!pb) 4574 return ERR_PTR(-ENOMEM); 4575 4576 pb->event_cb = p->event_cb; 4577 pb->sample_cb = p->sample_cb; 4578 pb->lost_cb = p->lost_cb; 4579 pb->ctx = p->ctx; 4580 4581 pb->page_size = getpagesize(); 4582 pb->mmap_size = pb->page_size * page_cnt; 4583 pb->map_fd = map_fd; 4584 4585 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); 4586 if (pb->epoll_fd < 0) { 4587 err = -errno; 4588 pr_warning("failed to create epoll instance: %s\n", 4589 libbpf_strerror_r(err, msg, sizeof(msg))); 4590 goto error; 4591 } 4592 4593 if (p->cpu_cnt > 0) { 4594 pb->cpu_cnt = p->cpu_cnt; 4595 } else { 4596 pb->cpu_cnt = libbpf_num_possible_cpus(); 4597 if (pb->cpu_cnt < 0) { 4598 err = pb->cpu_cnt; 4599 goto error; 4600 } 4601 if (map.max_entries < pb->cpu_cnt) 4602 pb->cpu_cnt = map.max_entries; 4603 } 4604 4605 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); 4606 if (!pb->events) { 4607 err = -ENOMEM; 4608 pr_warning("failed to allocate events: out of memory\n"); 4609 goto error; 4610 } 4611 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); 4612 if (!pb->cpu_bufs) { 4613 err = -ENOMEM; 4614 pr_warning("failed to allocate buffers: out of memory\n"); 4615 goto error; 4616 } 4617 4618 for (i = 0; i < pb->cpu_cnt; i++) { 4619 struct perf_cpu_buf *cpu_buf; 4620 int cpu, map_key; 4621 4622 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; 4623 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; 4624 4625 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); 4626 if (IS_ERR(cpu_buf)) { 4627 err = PTR_ERR(cpu_buf); 4628 goto error; 4629 } 4630 4631 pb->cpu_bufs[i] = cpu_buf; 4632 4633 err = bpf_map_update_elem(pb->map_fd, &map_key, 4634 &cpu_buf->fd, 0); 4635 if (err) { 4636 err = -errno; 4637 pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n", 4638 cpu, map_key, cpu_buf->fd, 4639 libbpf_strerror_r(err, msg, sizeof(msg))); 4640 goto error; 4641 } 4642 4643 pb->events[i].events = EPOLLIN; 4644 pb->events[i].data.ptr = cpu_buf; 4645 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, 4646 &pb->events[i]) < 0) { 4647 err = -errno; 4648 pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n", 4649 cpu, cpu_buf->fd, 4650 libbpf_strerror_r(err, msg, sizeof(msg))); 4651 goto error; 4652 } 4653 } 4654 4655 return pb; 4656 4657 error: 4658 if (pb) 4659 perf_buffer__free(pb); 4660 return ERR_PTR(err); 4661 } 4662 4663 struct perf_sample_raw { 4664 struct perf_event_header header; 4665 uint32_t size; 4666 char data[0]; 4667 }; 4668 4669 struct perf_sample_lost { 4670 struct perf_event_header header; 4671 uint64_t id; 4672 uint64_t lost; 4673 uint64_t sample_id; 4674 }; 4675 4676 static enum bpf_perf_event_ret 4677 perf_buffer__process_record(struct perf_event_header *e, void *ctx) 4678 { 4679 struct perf_cpu_buf *cpu_buf = ctx; 4680 struct perf_buffer *pb = cpu_buf->pb; 4681 void *data = e; 4682 4683 /* user wants full control over parsing perf event */ 4684 if (pb->event_cb) 4685 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); 4686 4687 switch (e->type) { 4688 case PERF_RECORD_SAMPLE: { 4689 struct perf_sample_raw *s = data; 4690 4691 if (pb->sample_cb) 4692 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); 4693 break; 4694 } 4695 case PERF_RECORD_LOST: { 4696 struct perf_sample_lost *s = data; 4697 4698 if (pb->lost_cb) 4699 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); 4700 break; 4701 } 4702 default: 4703 pr_warning("unknown perf sample type %d\n", e->type); 4704 return LIBBPF_PERF_EVENT_ERROR; 4705 } 4706 return LIBBPF_PERF_EVENT_CONT; 4707 } 4708 4709 static int perf_buffer__process_records(struct perf_buffer *pb, 4710 struct perf_cpu_buf *cpu_buf) 4711 { 4712 enum bpf_perf_event_ret ret; 4713 4714 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, 4715 pb->page_size, &cpu_buf->buf, 4716 &cpu_buf->buf_size, 4717 perf_buffer__process_record, cpu_buf); 4718 if (ret != LIBBPF_PERF_EVENT_CONT) 4719 return ret; 4720 return 0; 4721 } 4722 4723 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) 4724 { 4725 int i, cnt, err; 4726 4727 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); 4728 for (i = 0; i < cnt; i++) { 4729 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; 4730 4731 err = perf_buffer__process_records(pb, cpu_buf); 4732 if (err) { 4733 pr_warning("error while processing records: %d\n", err); 4734 return err; 4735 } 4736 } 4737 return cnt < 0 ? -errno : cnt; 4738 } 4739 4740 struct bpf_prog_info_array_desc { 4741 int array_offset; /* e.g. offset of jited_prog_insns */ 4742 int count_offset; /* e.g. offset of jited_prog_len */ 4743 int size_offset; /* > 0: offset of rec size, 4744 * < 0: fix size of -size_offset 4745 */ 4746 }; 4747 4748 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { 4749 [BPF_PROG_INFO_JITED_INSNS] = { 4750 offsetof(struct bpf_prog_info, jited_prog_insns), 4751 offsetof(struct bpf_prog_info, jited_prog_len), 4752 -1, 4753 }, 4754 [BPF_PROG_INFO_XLATED_INSNS] = { 4755 offsetof(struct bpf_prog_info, xlated_prog_insns), 4756 offsetof(struct bpf_prog_info, xlated_prog_len), 4757 -1, 4758 }, 4759 [BPF_PROG_INFO_MAP_IDS] = { 4760 offsetof(struct bpf_prog_info, map_ids), 4761 offsetof(struct bpf_prog_info, nr_map_ids), 4762 -(int)sizeof(__u32), 4763 }, 4764 [BPF_PROG_INFO_JITED_KSYMS] = { 4765 offsetof(struct bpf_prog_info, jited_ksyms), 4766 offsetof(struct bpf_prog_info, nr_jited_ksyms), 4767 -(int)sizeof(__u64), 4768 }, 4769 [BPF_PROG_INFO_JITED_FUNC_LENS] = { 4770 offsetof(struct bpf_prog_info, jited_func_lens), 4771 offsetof(struct bpf_prog_info, nr_jited_func_lens), 4772 -(int)sizeof(__u32), 4773 }, 4774 [BPF_PROG_INFO_FUNC_INFO] = { 4775 offsetof(struct bpf_prog_info, func_info), 4776 offsetof(struct bpf_prog_info, nr_func_info), 4777 offsetof(struct bpf_prog_info, func_info_rec_size), 4778 }, 4779 [BPF_PROG_INFO_LINE_INFO] = { 4780 offsetof(struct bpf_prog_info, line_info), 4781 offsetof(struct bpf_prog_info, nr_line_info), 4782 offsetof(struct bpf_prog_info, line_info_rec_size), 4783 }, 4784 [BPF_PROG_INFO_JITED_LINE_INFO] = { 4785 offsetof(struct bpf_prog_info, jited_line_info), 4786 offsetof(struct bpf_prog_info, nr_jited_line_info), 4787 offsetof(struct bpf_prog_info, jited_line_info_rec_size), 4788 }, 4789 [BPF_PROG_INFO_PROG_TAGS] = { 4790 offsetof(struct bpf_prog_info, prog_tags), 4791 offsetof(struct bpf_prog_info, nr_prog_tags), 4792 -(int)sizeof(__u8) * BPF_TAG_SIZE, 4793 }, 4794 4795 }; 4796 4797 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) 4798 { 4799 __u32 *array = (__u32 *)info; 4800 4801 if (offset >= 0) 4802 return array[offset / sizeof(__u32)]; 4803 return -(int)offset; 4804 } 4805 4806 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) 4807 { 4808 __u64 *array = (__u64 *)info; 4809 4810 if (offset >= 0) 4811 return array[offset / sizeof(__u64)]; 4812 return -(int)offset; 4813 } 4814 4815 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, 4816 __u32 val) 4817 { 4818 __u32 *array = (__u32 *)info; 4819 4820 if (offset >= 0) 4821 array[offset / sizeof(__u32)] = val; 4822 } 4823 4824 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, 4825 __u64 val) 4826 { 4827 __u64 *array = (__u64 *)info; 4828 4829 if (offset >= 0) 4830 array[offset / sizeof(__u64)] = val; 4831 } 4832 4833 struct bpf_prog_info_linear * 4834 bpf_program__get_prog_info_linear(int fd, __u64 arrays) 4835 { 4836 struct bpf_prog_info_linear *info_linear; 4837 struct bpf_prog_info info = {}; 4838 __u32 info_len = sizeof(info); 4839 __u32 data_len = 0; 4840 int i, err; 4841 void *ptr; 4842 4843 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 4844 return ERR_PTR(-EINVAL); 4845 4846 /* step 1: get array dimensions */ 4847 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 4848 if (err) { 4849 pr_debug("can't get prog info: %s", strerror(errno)); 4850 return ERR_PTR(-EFAULT); 4851 } 4852 4853 /* step 2: calculate total size of all arrays */ 4854 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 4855 bool include_array = (arrays & (1UL << i)) > 0; 4856 struct bpf_prog_info_array_desc *desc; 4857 __u32 count, size; 4858 4859 desc = bpf_prog_info_array_desc + i; 4860 4861 /* kernel is too old to support this field */ 4862 if (info_len < desc->array_offset + sizeof(__u32) || 4863 info_len < desc->count_offset + sizeof(__u32) || 4864 (desc->size_offset > 0 && info_len < desc->size_offset)) 4865 include_array = false; 4866 4867 if (!include_array) { 4868 arrays &= ~(1UL << i); /* clear the bit */ 4869 continue; 4870 } 4871 4872 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 4873 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 4874 4875 data_len += count * size; 4876 } 4877 4878 /* step 3: allocate continuous memory */ 4879 data_len = roundup(data_len, sizeof(__u64)); 4880 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 4881 if (!info_linear) 4882 return ERR_PTR(-ENOMEM); 4883 4884 /* step 4: fill data to info_linear->info */ 4885 info_linear->arrays = arrays; 4886 memset(&info_linear->info, 0, sizeof(info)); 4887 ptr = info_linear->data; 4888 4889 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 4890 struct bpf_prog_info_array_desc *desc; 4891 __u32 count, size; 4892 4893 if ((arrays & (1UL << i)) == 0) 4894 continue; 4895 4896 desc = bpf_prog_info_array_desc + i; 4897 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 4898 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 4899 bpf_prog_info_set_offset_u32(&info_linear->info, 4900 desc->count_offset, count); 4901 bpf_prog_info_set_offset_u32(&info_linear->info, 4902 desc->size_offset, size); 4903 bpf_prog_info_set_offset_u64(&info_linear->info, 4904 desc->array_offset, 4905 ptr_to_u64(ptr)); 4906 ptr += count * size; 4907 } 4908 4909 /* step 5: call syscall again to get required arrays */ 4910 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); 4911 if (err) { 4912 pr_debug("can't get prog info: %s", strerror(errno)); 4913 free(info_linear); 4914 return ERR_PTR(-EFAULT); 4915 } 4916 4917 /* step 6: verify the data */ 4918 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 4919 struct bpf_prog_info_array_desc *desc; 4920 __u32 v1, v2; 4921 4922 if ((arrays & (1UL << i)) == 0) 4923 continue; 4924 4925 desc = bpf_prog_info_array_desc + i; 4926 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 4927 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 4928 desc->count_offset); 4929 if (v1 != v2) 4930 pr_warning("%s: mismatch in element count\n", __func__); 4931 4932 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 4933 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 4934 desc->size_offset); 4935 if (v1 != v2) 4936 pr_warning("%s: mismatch in rec size\n", __func__); 4937 } 4938 4939 /* step 7: update info_len and data_len */ 4940 info_linear->info_len = sizeof(struct bpf_prog_info); 4941 info_linear->data_len = data_len; 4942 4943 return info_linear; 4944 } 4945 4946 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) 4947 { 4948 int i; 4949 4950 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 4951 struct bpf_prog_info_array_desc *desc; 4952 __u64 addr, offs; 4953 4954 if ((info_linear->arrays & (1UL << i)) == 0) 4955 continue; 4956 4957 desc = bpf_prog_info_array_desc + i; 4958 addr = bpf_prog_info_read_offset_u64(&info_linear->info, 4959 desc->array_offset); 4960 offs = addr - ptr_to_u64(info_linear->data); 4961 bpf_prog_info_set_offset_u64(&info_linear->info, 4962 desc->array_offset, offs); 4963 } 4964 } 4965 4966 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) 4967 { 4968 int i; 4969 4970 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 4971 struct bpf_prog_info_array_desc *desc; 4972 __u64 addr, offs; 4973 4974 if ((info_linear->arrays & (1UL << i)) == 0) 4975 continue; 4976 4977 desc = bpf_prog_info_array_desc + i; 4978 offs = bpf_prog_info_read_offset_u64(&info_linear->info, 4979 desc->array_offset); 4980 addr = offs + ptr_to_u64(info_linear->data); 4981 bpf_prog_info_set_offset_u64(&info_linear->info, 4982 desc->array_offset, addr); 4983 } 4984 } 4985 4986 int libbpf_num_possible_cpus(void) 4987 { 4988 static const char *fcpu = "/sys/devices/system/cpu/possible"; 4989 int len = 0, n = 0, il = 0, ir = 0; 4990 unsigned int start = 0, end = 0; 4991 static int cpus; 4992 char buf[128]; 4993 int error = 0; 4994 int fd = -1; 4995 4996 if (cpus > 0) 4997 return cpus; 4998 4999 fd = open(fcpu, O_RDONLY); 5000 if (fd < 0) { 5001 error = errno; 5002 pr_warning("Failed to open file %s: %s\n", 5003 fcpu, strerror(error)); 5004 return -error; 5005 } 5006 len = read(fd, buf, sizeof(buf)); 5007 close(fd); 5008 if (len <= 0) { 5009 error = len ? errno : EINVAL; 5010 pr_warning("Failed to read # of possible cpus from %s: %s\n", 5011 fcpu, strerror(error)); 5012 return -error; 5013 } 5014 if (len == sizeof(buf)) { 5015 pr_warning("File %s size overflow\n", fcpu); 5016 return -EOVERFLOW; 5017 } 5018 buf[len] = '\0'; 5019 5020 for (ir = 0, cpus = 0; ir <= len; ir++) { 5021 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 5022 if (buf[ir] == ',' || buf[ir] == '\0') { 5023 buf[ir] = '\0'; 5024 n = sscanf(&buf[il], "%u-%u", &start, &end); 5025 if (n <= 0) { 5026 pr_warning("Failed to get # CPUs from %s\n", 5027 &buf[il]); 5028 return -EINVAL; 5029 } else if (n == 1) { 5030 end = start; 5031 } 5032 cpus += end - start + 1; 5033 il = ir + 1; 5034 } 5035 } 5036 if (cpus <= 0) { 5037 pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu); 5038 return -EINVAL; 5039 } 5040 return cpus; 5041 } 5042