1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * Copyright (C) 2019 Isovalent, Inc. 11 */ 12 13 #ifndef _GNU_SOURCE 14 #define _GNU_SOURCE 15 #endif 16 #include <stdlib.h> 17 #include <stdio.h> 18 #include <stdarg.h> 19 #include <libgen.h> 20 #include <inttypes.h> 21 #include <string.h> 22 #include <unistd.h> 23 #include <fcntl.h> 24 #include <errno.h> 25 #include <asm/unistd.h> 26 #include <linux/err.h> 27 #include <linux/kernel.h> 28 #include <linux/bpf.h> 29 #include <linux/btf.h> 30 #include <linux/filter.h> 31 #include <linux/list.h> 32 #include <linux/limits.h> 33 #include <linux/perf_event.h> 34 #include <linux/ring_buffer.h> 35 #include <sys/stat.h> 36 #include <sys/types.h> 37 #include <sys/vfs.h> 38 #include <tools/libc_compat.h> 39 #include <libelf.h> 40 #include <gelf.h> 41 42 #include "libbpf.h" 43 #include "bpf.h" 44 #include "btf.h" 45 #include "str_error.h" 46 #include "libbpf_util.h" 47 48 #ifndef EM_BPF 49 #define EM_BPF 247 50 #endif 51 52 #ifndef BPF_FS_MAGIC 53 #define BPF_FS_MAGIC 0xcafe4a11 54 #endif 55 56 /* vsprintf() in __base_pr() uses nonliteral format string. It may break 57 * compilation if user enables corresponding warning. Disable it explicitly. 58 */ 59 #pragma GCC diagnostic ignored "-Wformat-nonliteral" 60 61 #define __printf(a, b) __attribute__((format(printf, a, b))) 62 63 static int __base_pr(enum libbpf_print_level level, const char *format, 64 va_list args) 65 { 66 if (level == LIBBPF_DEBUG) 67 return 0; 68 69 return vfprintf(stderr, format, args); 70 } 71 72 static libbpf_print_fn_t __libbpf_pr = __base_pr; 73 74 void libbpf_set_print(libbpf_print_fn_t fn) 75 { 76 __libbpf_pr = fn; 77 } 78 79 __printf(2, 3) 80 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 81 { 82 va_list args; 83 84 if (!__libbpf_pr) 85 return; 86 87 va_start(args, format); 88 __libbpf_pr(level, format, args); 89 va_end(args); 90 } 91 92 #define STRERR_BUFSIZE 128 93 94 #define CHECK_ERR(action, err, out) do { \ 95 err = action; \ 96 if (err) \ 97 goto out; \ 98 } while(0) 99 100 101 /* Copied from tools/perf/util/util.h */ 102 #ifndef zfree 103 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 104 #endif 105 106 #ifndef zclose 107 # define zclose(fd) ({ \ 108 int ___err = 0; \ 109 if ((fd) >= 0) \ 110 ___err = close((fd)); \ 111 fd = -1; \ 112 ___err; }) 113 #endif 114 115 #ifdef HAVE_LIBELF_MMAP_SUPPORT 116 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 117 #else 118 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 119 #endif 120 121 static inline __u64 ptr_to_u64(const void *ptr) 122 { 123 return (__u64) (unsigned long) ptr; 124 } 125 126 struct bpf_capabilities { 127 /* v4.14: kernel support for program & map names. */ 128 __u32 name:1; 129 }; 130 131 /* 132 * bpf_prog should be a better name but it has been used in 133 * linux/filter.h. 134 */ 135 struct bpf_program { 136 /* Index in elf obj file, for relocation use. */ 137 int idx; 138 char *name; 139 int prog_ifindex; 140 char *section_name; 141 /* section_name with / replaced by _; makes recursive pinning 142 * in bpf_object__pin_programs easier 143 */ 144 char *pin_name; 145 struct bpf_insn *insns; 146 size_t insns_cnt, main_prog_cnt; 147 enum bpf_prog_type type; 148 149 struct reloc_desc { 150 enum { 151 RELO_LD64, 152 RELO_CALL, 153 RELO_DATA, 154 } type; 155 int insn_idx; 156 union { 157 int map_idx; 158 int text_off; 159 }; 160 } *reloc_desc; 161 int nr_reloc; 162 int log_level; 163 164 struct { 165 int nr; 166 int *fds; 167 } instances; 168 bpf_program_prep_t preprocessor; 169 170 struct bpf_object *obj; 171 void *priv; 172 bpf_program_clear_priv_t clear_priv; 173 174 enum bpf_attach_type expected_attach_type; 175 int btf_fd; 176 void *func_info; 177 __u32 func_info_rec_size; 178 __u32 func_info_cnt; 179 180 struct bpf_capabilities *caps; 181 182 void *line_info; 183 __u32 line_info_rec_size; 184 __u32 line_info_cnt; 185 }; 186 187 enum libbpf_map_type { 188 LIBBPF_MAP_UNSPEC, 189 LIBBPF_MAP_DATA, 190 LIBBPF_MAP_BSS, 191 LIBBPF_MAP_RODATA, 192 }; 193 194 static const char * const libbpf_type_to_btf_name[] = { 195 [LIBBPF_MAP_DATA] = ".data", 196 [LIBBPF_MAP_BSS] = ".bss", 197 [LIBBPF_MAP_RODATA] = ".rodata", 198 }; 199 200 struct bpf_map { 201 int fd; 202 char *name; 203 size_t offset; 204 int map_ifindex; 205 int inner_map_fd; 206 struct bpf_map_def def; 207 __u32 btf_key_type_id; 208 __u32 btf_value_type_id; 209 void *priv; 210 bpf_map_clear_priv_t clear_priv; 211 enum libbpf_map_type libbpf_type; 212 }; 213 214 struct bpf_secdata { 215 void *rodata; 216 void *data; 217 }; 218 219 static LIST_HEAD(bpf_objects_list); 220 221 struct bpf_object { 222 char name[BPF_OBJ_NAME_LEN]; 223 char license[64]; 224 __u32 kern_version; 225 226 struct bpf_program *programs; 227 size_t nr_programs; 228 struct bpf_map *maps; 229 size_t nr_maps; 230 struct bpf_secdata sections; 231 232 bool loaded; 233 bool has_pseudo_calls; 234 235 /* 236 * Information when doing elf related work. Only valid if fd 237 * is valid. 238 */ 239 struct { 240 int fd; 241 void *obj_buf; 242 size_t obj_buf_sz; 243 Elf *elf; 244 GElf_Ehdr ehdr; 245 Elf_Data *symbols; 246 Elf_Data *data; 247 Elf_Data *rodata; 248 Elf_Data *bss; 249 size_t strtabidx; 250 struct { 251 GElf_Shdr shdr; 252 Elf_Data *data; 253 } *reloc; 254 int nr_reloc; 255 int maps_shndx; 256 int text_shndx; 257 int data_shndx; 258 int rodata_shndx; 259 int bss_shndx; 260 } efile; 261 /* 262 * All loaded bpf_object is linked in a list, which is 263 * hidden to caller. bpf_objects__<func> handlers deal with 264 * all objects. 265 */ 266 struct list_head list; 267 268 struct btf *btf; 269 struct btf_ext *btf_ext; 270 271 void *priv; 272 bpf_object_clear_priv_t clear_priv; 273 274 struct bpf_capabilities caps; 275 276 char path[]; 277 }; 278 #define obj_elf_valid(o) ((o)->efile.elf) 279 280 void bpf_program__unload(struct bpf_program *prog) 281 { 282 int i; 283 284 if (!prog) 285 return; 286 287 /* 288 * If the object is opened but the program was never loaded, 289 * it is possible that prog->instances.nr == -1. 290 */ 291 if (prog->instances.nr > 0) { 292 for (i = 0; i < prog->instances.nr; i++) 293 zclose(prog->instances.fds[i]); 294 } else if (prog->instances.nr != -1) { 295 pr_warning("Internal error: instances.nr is %d\n", 296 prog->instances.nr); 297 } 298 299 prog->instances.nr = -1; 300 zfree(&prog->instances.fds); 301 302 zclose(prog->btf_fd); 303 zfree(&prog->func_info); 304 zfree(&prog->line_info); 305 } 306 307 static void bpf_program__exit(struct bpf_program *prog) 308 { 309 if (!prog) 310 return; 311 312 if (prog->clear_priv) 313 prog->clear_priv(prog, prog->priv); 314 315 prog->priv = NULL; 316 prog->clear_priv = NULL; 317 318 bpf_program__unload(prog); 319 zfree(&prog->name); 320 zfree(&prog->section_name); 321 zfree(&prog->pin_name); 322 zfree(&prog->insns); 323 zfree(&prog->reloc_desc); 324 325 prog->nr_reloc = 0; 326 prog->insns_cnt = 0; 327 prog->idx = -1; 328 } 329 330 static char *__bpf_program__pin_name(struct bpf_program *prog) 331 { 332 char *name, *p; 333 334 name = p = strdup(prog->section_name); 335 while ((p = strchr(p, '/'))) 336 *p = '_'; 337 338 return name; 339 } 340 341 static int 342 bpf_program__init(void *data, size_t size, char *section_name, int idx, 343 struct bpf_program *prog) 344 { 345 if (size < sizeof(struct bpf_insn)) { 346 pr_warning("corrupted section '%s'\n", section_name); 347 return -EINVAL; 348 } 349 350 memset(prog, 0, sizeof(*prog)); 351 352 prog->section_name = strdup(section_name); 353 if (!prog->section_name) { 354 pr_warning("failed to alloc name for prog under section(%d) %s\n", 355 idx, section_name); 356 goto errout; 357 } 358 359 prog->pin_name = __bpf_program__pin_name(prog); 360 if (!prog->pin_name) { 361 pr_warning("failed to alloc pin name for prog under section(%d) %s\n", 362 idx, section_name); 363 goto errout; 364 } 365 366 prog->insns = malloc(size); 367 if (!prog->insns) { 368 pr_warning("failed to alloc insns for prog under section %s\n", 369 section_name); 370 goto errout; 371 } 372 prog->insns_cnt = size / sizeof(struct bpf_insn); 373 memcpy(prog->insns, data, 374 prog->insns_cnt * sizeof(struct bpf_insn)); 375 prog->idx = idx; 376 prog->instances.fds = NULL; 377 prog->instances.nr = -1; 378 prog->type = BPF_PROG_TYPE_UNSPEC; 379 prog->btf_fd = -1; 380 381 return 0; 382 errout: 383 bpf_program__exit(prog); 384 return -ENOMEM; 385 } 386 387 static int 388 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 389 char *section_name, int idx) 390 { 391 struct bpf_program prog, *progs; 392 int nr_progs, err; 393 394 err = bpf_program__init(data, size, section_name, idx, &prog); 395 if (err) 396 return err; 397 398 prog.caps = &obj->caps; 399 progs = obj->programs; 400 nr_progs = obj->nr_programs; 401 402 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); 403 if (!progs) { 404 /* 405 * In this case the original obj->programs 406 * is still valid, so don't need special treat for 407 * bpf_close_object(). 408 */ 409 pr_warning("failed to alloc a new program under section '%s'\n", 410 section_name); 411 bpf_program__exit(&prog); 412 return -ENOMEM; 413 } 414 415 pr_debug("found program %s\n", prog.section_name); 416 obj->programs = progs; 417 obj->nr_programs = nr_progs + 1; 418 prog.obj = obj; 419 progs[nr_progs] = prog; 420 return 0; 421 } 422 423 static int 424 bpf_object__init_prog_names(struct bpf_object *obj) 425 { 426 Elf_Data *symbols = obj->efile.symbols; 427 struct bpf_program *prog; 428 size_t pi, si; 429 430 for (pi = 0; pi < obj->nr_programs; pi++) { 431 const char *name = NULL; 432 433 prog = &obj->programs[pi]; 434 435 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; 436 si++) { 437 GElf_Sym sym; 438 439 if (!gelf_getsym(symbols, si, &sym)) 440 continue; 441 if (sym.st_shndx != prog->idx) 442 continue; 443 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) 444 continue; 445 446 name = elf_strptr(obj->efile.elf, 447 obj->efile.strtabidx, 448 sym.st_name); 449 if (!name) { 450 pr_warning("failed to get sym name string for prog %s\n", 451 prog->section_name); 452 return -LIBBPF_ERRNO__LIBELF; 453 } 454 } 455 456 if (!name && prog->idx == obj->efile.text_shndx) 457 name = ".text"; 458 459 if (!name) { 460 pr_warning("failed to find sym for prog %s\n", 461 prog->section_name); 462 return -EINVAL; 463 } 464 465 prog->name = strdup(name); 466 if (!prog->name) { 467 pr_warning("failed to allocate memory for prog sym %s\n", 468 name); 469 return -ENOMEM; 470 } 471 } 472 473 return 0; 474 } 475 476 static struct bpf_object *bpf_object__new(const char *path, 477 void *obj_buf, 478 size_t obj_buf_sz) 479 { 480 struct bpf_object *obj; 481 char *end; 482 483 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 484 if (!obj) { 485 pr_warning("alloc memory failed for %s\n", path); 486 return ERR_PTR(-ENOMEM); 487 } 488 489 strcpy(obj->path, path); 490 /* Using basename() GNU version which doesn't modify arg. */ 491 strncpy(obj->name, basename((void *)path), 492 sizeof(obj->name) - 1); 493 end = strchr(obj->name, '.'); 494 if (end) 495 *end = 0; 496 497 obj->efile.fd = -1; 498 /* 499 * Caller of this function should also calls 500 * bpf_object__elf_finish() after data collection to return 501 * obj_buf to user. If not, we should duplicate the buffer to 502 * avoid user freeing them before elf finish. 503 */ 504 obj->efile.obj_buf = obj_buf; 505 obj->efile.obj_buf_sz = obj_buf_sz; 506 obj->efile.maps_shndx = -1; 507 obj->efile.data_shndx = -1; 508 obj->efile.rodata_shndx = -1; 509 obj->efile.bss_shndx = -1; 510 511 obj->loaded = false; 512 513 INIT_LIST_HEAD(&obj->list); 514 list_add(&obj->list, &bpf_objects_list); 515 return obj; 516 } 517 518 static void bpf_object__elf_finish(struct bpf_object *obj) 519 { 520 if (!obj_elf_valid(obj)) 521 return; 522 523 if (obj->efile.elf) { 524 elf_end(obj->efile.elf); 525 obj->efile.elf = NULL; 526 } 527 obj->efile.symbols = NULL; 528 obj->efile.data = NULL; 529 obj->efile.rodata = NULL; 530 obj->efile.bss = NULL; 531 532 zfree(&obj->efile.reloc); 533 obj->efile.nr_reloc = 0; 534 zclose(obj->efile.fd); 535 obj->efile.obj_buf = NULL; 536 obj->efile.obj_buf_sz = 0; 537 } 538 539 static int bpf_object__elf_init(struct bpf_object *obj) 540 { 541 int err = 0; 542 GElf_Ehdr *ep; 543 544 if (obj_elf_valid(obj)) { 545 pr_warning("elf init: internal error\n"); 546 return -LIBBPF_ERRNO__LIBELF; 547 } 548 549 if (obj->efile.obj_buf_sz > 0) { 550 /* 551 * obj_buf should have been validated by 552 * bpf_object__open_buffer(). 553 */ 554 obj->efile.elf = elf_memory(obj->efile.obj_buf, 555 obj->efile.obj_buf_sz); 556 } else { 557 obj->efile.fd = open(obj->path, O_RDONLY); 558 if (obj->efile.fd < 0) { 559 char errmsg[STRERR_BUFSIZE]; 560 char *cp = libbpf_strerror_r(errno, errmsg, 561 sizeof(errmsg)); 562 563 pr_warning("failed to open %s: %s\n", obj->path, cp); 564 return -errno; 565 } 566 567 obj->efile.elf = elf_begin(obj->efile.fd, 568 LIBBPF_ELF_C_READ_MMAP, 569 NULL); 570 } 571 572 if (!obj->efile.elf) { 573 pr_warning("failed to open %s as ELF file\n", 574 obj->path); 575 err = -LIBBPF_ERRNO__LIBELF; 576 goto errout; 577 } 578 579 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 580 pr_warning("failed to get EHDR from %s\n", 581 obj->path); 582 err = -LIBBPF_ERRNO__FORMAT; 583 goto errout; 584 } 585 ep = &obj->efile.ehdr; 586 587 /* Old LLVM set e_machine to EM_NONE */ 588 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { 589 pr_warning("%s is not an eBPF object file\n", 590 obj->path); 591 err = -LIBBPF_ERRNO__FORMAT; 592 goto errout; 593 } 594 595 return 0; 596 errout: 597 bpf_object__elf_finish(obj); 598 return err; 599 } 600 601 static int 602 bpf_object__check_endianness(struct bpf_object *obj) 603 { 604 static unsigned int const endian = 1; 605 606 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 607 case ELFDATA2LSB: 608 /* We are big endian, BPF obj is little endian. */ 609 if (*(unsigned char const *)&endian != 1) 610 goto mismatch; 611 break; 612 613 case ELFDATA2MSB: 614 /* We are little endian, BPF obj is big endian. */ 615 if (*(unsigned char const *)&endian != 0) 616 goto mismatch; 617 break; 618 default: 619 return -LIBBPF_ERRNO__ENDIAN; 620 } 621 622 return 0; 623 624 mismatch: 625 pr_warning("Error: endianness mismatch.\n"); 626 return -LIBBPF_ERRNO__ENDIAN; 627 } 628 629 static int 630 bpf_object__init_license(struct bpf_object *obj, 631 void *data, size_t size) 632 { 633 memcpy(obj->license, data, 634 min(size, sizeof(obj->license) - 1)); 635 pr_debug("license of %s is %s\n", obj->path, obj->license); 636 return 0; 637 } 638 639 static int 640 bpf_object__init_kversion(struct bpf_object *obj, 641 void *data, size_t size) 642 { 643 __u32 kver; 644 645 if (size != sizeof(kver)) { 646 pr_warning("invalid kver section in %s\n", obj->path); 647 return -LIBBPF_ERRNO__FORMAT; 648 } 649 memcpy(&kver, data, sizeof(kver)); 650 obj->kern_version = kver; 651 pr_debug("kernel version of %s is %x\n", obj->path, 652 obj->kern_version); 653 return 0; 654 } 655 656 static int compare_bpf_map(const void *_a, const void *_b) 657 { 658 const struct bpf_map *a = _a; 659 const struct bpf_map *b = _b; 660 661 return a->offset - b->offset; 662 } 663 664 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 665 { 666 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 667 type == BPF_MAP_TYPE_HASH_OF_MAPS) 668 return true; 669 return false; 670 } 671 672 static int bpf_object_search_section_size(const struct bpf_object *obj, 673 const char *name, size_t *d_size) 674 { 675 const GElf_Ehdr *ep = &obj->efile.ehdr; 676 Elf *elf = obj->efile.elf; 677 Elf_Scn *scn = NULL; 678 int idx = 0; 679 680 while ((scn = elf_nextscn(elf, scn)) != NULL) { 681 const char *sec_name; 682 Elf_Data *data; 683 GElf_Shdr sh; 684 685 idx++; 686 if (gelf_getshdr(scn, &sh) != &sh) { 687 pr_warning("failed to get section(%d) header from %s\n", 688 idx, obj->path); 689 return -EIO; 690 } 691 692 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 693 if (!sec_name) { 694 pr_warning("failed to get section(%d) name from %s\n", 695 idx, obj->path); 696 return -EIO; 697 } 698 699 if (strcmp(name, sec_name)) 700 continue; 701 702 data = elf_getdata(scn, 0); 703 if (!data) { 704 pr_warning("failed to get section(%d) data from %s(%s)\n", 705 idx, name, obj->path); 706 return -EIO; 707 } 708 709 *d_size = data->d_size; 710 return 0; 711 } 712 713 return -ENOENT; 714 } 715 716 int bpf_object__section_size(const struct bpf_object *obj, const char *name, 717 __u32 *size) 718 { 719 int ret = -ENOENT; 720 size_t d_size; 721 722 *size = 0; 723 if (!name) { 724 return -EINVAL; 725 } else if (!strcmp(name, ".data")) { 726 if (obj->efile.data) 727 *size = obj->efile.data->d_size; 728 } else if (!strcmp(name, ".bss")) { 729 if (obj->efile.bss) 730 *size = obj->efile.bss->d_size; 731 } else if (!strcmp(name, ".rodata")) { 732 if (obj->efile.rodata) 733 *size = obj->efile.rodata->d_size; 734 } else { 735 ret = bpf_object_search_section_size(obj, name, &d_size); 736 if (!ret) 737 *size = d_size; 738 } 739 740 return *size ? 0 : ret; 741 } 742 743 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, 744 __u32 *off) 745 { 746 Elf_Data *symbols = obj->efile.symbols; 747 const char *sname; 748 size_t si; 749 750 if (!name || !off) 751 return -EINVAL; 752 753 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { 754 GElf_Sym sym; 755 756 if (!gelf_getsym(symbols, si, &sym)) 757 continue; 758 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL || 759 GELF_ST_TYPE(sym.st_info) != STT_OBJECT) 760 continue; 761 762 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 763 sym.st_name); 764 if (!sname) { 765 pr_warning("failed to get sym name string for var %s\n", 766 name); 767 return -EIO; 768 } 769 if (strcmp(name, sname) == 0) { 770 *off = sym.st_value; 771 return 0; 772 } 773 } 774 775 return -ENOENT; 776 } 777 778 static bool bpf_object__has_maps(const struct bpf_object *obj) 779 { 780 return obj->efile.maps_shndx >= 0 || 781 obj->efile.data_shndx >= 0 || 782 obj->efile.rodata_shndx >= 0 || 783 obj->efile.bss_shndx >= 0; 784 } 785 786 static int 787 bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, 788 enum libbpf_map_type type, Elf_Data *data, 789 void **data_buff) 790 { 791 struct bpf_map_def *def = &map->def; 792 char map_name[BPF_OBJ_NAME_LEN]; 793 794 map->libbpf_type = type; 795 map->offset = ~(typeof(map->offset))0; 796 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name, 797 libbpf_type_to_btf_name[type]); 798 map->name = strdup(map_name); 799 if (!map->name) { 800 pr_warning("failed to alloc map name\n"); 801 return -ENOMEM; 802 } 803 804 def->type = BPF_MAP_TYPE_ARRAY; 805 def->key_size = sizeof(int); 806 def->value_size = data->d_size; 807 def->max_entries = 1; 808 def->map_flags = type == LIBBPF_MAP_RODATA ? 809 BPF_F_RDONLY_PROG : 0; 810 if (data_buff) { 811 *data_buff = malloc(data->d_size); 812 if (!*data_buff) { 813 zfree(&map->name); 814 pr_warning("failed to alloc map content buffer\n"); 815 return -ENOMEM; 816 } 817 memcpy(*data_buff, data->d_buf, data->d_size); 818 } 819 820 pr_debug("map %ld is \"%s\"\n", map - obj->maps, map->name); 821 return 0; 822 } 823 824 static int 825 bpf_object__init_maps(struct bpf_object *obj, int flags) 826 { 827 int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0; 828 bool strict = !(flags & MAPS_RELAX_COMPAT); 829 Elf_Data *symbols = obj->efile.symbols; 830 Elf_Data *data = NULL; 831 int ret = 0; 832 833 if (!symbols) 834 return -EINVAL; 835 nr_syms = symbols->d_size / sizeof(GElf_Sym); 836 837 if (obj->efile.maps_shndx >= 0) { 838 Elf_Scn *scn = elf_getscn(obj->efile.elf, 839 obj->efile.maps_shndx); 840 841 if (scn) 842 data = elf_getdata(scn, NULL); 843 if (!scn || !data) { 844 pr_warning("failed to get Elf_Data from map section %d\n", 845 obj->efile.maps_shndx); 846 return -EINVAL; 847 } 848 } 849 850 /* 851 * Count number of maps. Each map has a name. 852 * Array of maps is not supported: only the first element is 853 * considered. 854 * 855 * TODO: Detect array of map and report error. 856 */ 857 if (obj->efile.data_shndx >= 0) 858 nr_maps_glob++; 859 if (obj->efile.rodata_shndx >= 0) 860 nr_maps_glob++; 861 if (obj->efile.bss_shndx >= 0) 862 nr_maps_glob++; 863 for (i = 0; data && i < nr_syms; i++) { 864 GElf_Sym sym; 865 866 if (!gelf_getsym(symbols, i, &sym)) 867 continue; 868 if (sym.st_shndx != obj->efile.maps_shndx) 869 continue; 870 nr_maps++; 871 } 872 873 /* Alloc obj->maps and fill nr_maps. */ 874 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, 875 nr_maps, data->d_size); 876 if (!nr_maps && !nr_maps_glob) 877 return 0; 878 879 /* Assume equally sized map definitions */ 880 if (data) { 881 map_def_sz = data->d_size / nr_maps; 882 if (!data->d_size || (data->d_size % nr_maps) != 0) { 883 pr_warning("unable to determine map definition size " 884 "section %s, %d maps in %zd bytes\n", 885 obj->path, nr_maps, data->d_size); 886 return -EINVAL; 887 } 888 } 889 890 nr_maps += nr_maps_glob; 891 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 892 if (!obj->maps) { 893 pr_warning("alloc maps for object failed\n"); 894 return -ENOMEM; 895 } 896 obj->nr_maps = nr_maps; 897 898 for (i = 0; i < nr_maps; i++) { 899 /* 900 * fill all fd with -1 so won't close incorrect 901 * fd (fd=0 is stdin) when failure (zclose won't close 902 * negative fd)). 903 */ 904 obj->maps[i].fd = -1; 905 obj->maps[i].inner_map_fd = -1; 906 } 907 908 /* 909 * Fill obj->maps using data in "maps" section. 910 */ 911 for (i = 0, map_idx = 0; data && i < nr_syms; i++) { 912 GElf_Sym sym; 913 const char *map_name; 914 struct bpf_map_def *def; 915 916 if (!gelf_getsym(symbols, i, &sym)) 917 continue; 918 if (sym.st_shndx != obj->efile.maps_shndx) 919 continue; 920 921 map_name = elf_strptr(obj->efile.elf, 922 obj->efile.strtabidx, 923 sym.st_name); 924 925 obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC; 926 obj->maps[map_idx].offset = sym.st_value; 927 if (sym.st_value + map_def_sz > data->d_size) { 928 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 929 obj->path, map_name); 930 return -EINVAL; 931 } 932 933 obj->maps[map_idx].name = strdup(map_name); 934 if (!obj->maps[map_idx].name) { 935 pr_warning("failed to alloc map name\n"); 936 return -ENOMEM; 937 } 938 pr_debug("map %d is \"%s\"\n", map_idx, 939 obj->maps[map_idx].name); 940 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 941 /* 942 * If the definition of the map in the object file fits in 943 * bpf_map_def, copy it. Any extra fields in our version 944 * of bpf_map_def will default to zero as a result of the 945 * calloc above. 946 */ 947 if (map_def_sz <= sizeof(struct bpf_map_def)) { 948 memcpy(&obj->maps[map_idx].def, def, map_def_sz); 949 } else { 950 /* 951 * Here the map structure being read is bigger than what 952 * we expect, truncate if the excess bits are all zero. 953 * If they are not zero, reject this map as 954 * incompatible. 955 */ 956 char *b; 957 for (b = ((char *)def) + sizeof(struct bpf_map_def); 958 b < ((char *)def) + map_def_sz; b++) { 959 if (*b != 0) { 960 pr_warning("maps section in %s: \"%s\" " 961 "has unrecognized, non-zero " 962 "options\n", 963 obj->path, map_name); 964 if (strict) 965 return -EINVAL; 966 } 967 } 968 memcpy(&obj->maps[map_idx].def, def, 969 sizeof(struct bpf_map_def)); 970 } 971 map_idx++; 972 } 973 974 /* 975 * Populate rest of obj->maps with libbpf internal maps. 976 */ 977 if (obj->efile.data_shndx >= 0) 978 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 979 LIBBPF_MAP_DATA, 980 obj->efile.data, 981 &obj->sections.data); 982 if (!ret && obj->efile.rodata_shndx >= 0) 983 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 984 LIBBPF_MAP_RODATA, 985 obj->efile.rodata, 986 &obj->sections.rodata); 987 if (!ret && obj->efile.bss_shndx >= 0) 988 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 989 LIBBPF_MAP_BSS, 990 obj->efile.bss, NULL); 991 if (!ret) 992 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), 993 compare_bpf_map); 994 return ret; 995 } 996 997 static bool section_have_execinstr(struct bpf_object *obj, int idx) 998 { 999 Elf_Scn *scn; 1000 GElf_Shdr sh; 1001 1002 scn = elf_getscn(obj->efile.elf, idx); 1003 if (!scn) 1004 return false; 1005 1006 if (gelf_getshdr(scn, &sh) != &sh) 1007 return false; 1008 1009 if (sh.sh_flags & SHF_EXECINSTR) 1010 return true; 1011 1012 return false; 1013 } 1014 1015 static int bpf_object__elf_collect(struct bpf_object *obj, int flags) 1016 { 1017 Elf *elf = obj->efile.elf; 1018 GElf_Ehdr *ep = &obj->efile.ehdr; 1019 Elf_Data *btf_ext_data = NULL; 1020 Elf_Data *btf_data = NULL; 1021 Elf_Scn *scn = NULL; 1022 int idx = 0, err = 0; 1023 1024 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 1025 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 1026 pr_warning("failed to get e_shstrndx from %s\n", 1027 obj->path); 1028 return -LIBBPF_ERRNO__FORMAT; 1029 } 1030 1031 while ((scn = elf_nextscn(elf, scn)) != NULL) { 1032 char *name; 1033 GElf_Shdr sh; 1034 Elf_Data *data; 1035 1036 idx++; 1037 if (gelf_getshdr(scn, &sh) != &sh) { 1038 pr_warning("failed to get section(%d) header from %s\n", 1039 idx, obj->path); 1040 err = -LIBBPF_ERRNO__FORMAT; 1041 goto out; 1042 } 1043 1044 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 1045 if (!name) { 1046 pr_warning("failed to get section(%d) name from %s\n", 1047 idx, obj->path); 1048 err = -LIBBPF_ERRNO__FORMAT; 1049 goto out; 1050 } 1051 1052 data = elf_getdata(scn, 0); 1053 if (!data) { 1054 pr_warning("failed to get section(%d) data from %s(%s)\n", 1055 idx, name, obj->path); 1056 err = -LIBBPF_ERRNO__FORMAT; 1057 goto out; 1058 } 1059 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 1060 idx, name, (unsigned long)data->d_size, 1061 (int)sh.sh_link, (unsigned long)sh.sh_flags, 1062 (int)sh.sh_type); 1063 1064 if (strcmp(name, "license") == 0) { 1065 err = bpf_object__init_license(obj, 1066 data->d_buf, 1067 data->d_size); 1068 } else if (strcmp(name, "version") == 0) { 1069 err = bpf_object__init_kversion(obj, 1070 data->d_buf, 1071 data->d_size); 1072 } else if (strcmp(name, "maps") == 0) { 1073 obj->efile.maps_shndx = idx; 1074 } else if (strcmp(name, BTF_ELF_SEC) == 0) { 1075 btf_data = data; 1076 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 1077 btf_ext_data = data; 1078 } else if (sh.sh_type == SHT_SYMTAB) { 1079 if (obj->efile.symbols) { 1080 pr_warning("bpf: multiple SYMTAB in %s\n", 1081 obj->path); 1082 err = -LIBBPF_ERRNO__FORMAT; 1083 } else { 1084 obj->efile.symbols = data; 1085 obj->efile.strtabidx = sh.sh_link; 1086 } 1087 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { 1088 if (sh.sh_flags & SHF_EXECINSTR) { 1089 if (strcmp(name, ".text") == 0) 1090 obj->efile.text_shndx = idx; 1091 err = bpf_object__add_program(obj, data->d_buf, 1092 data->d_size, name, idx); 1093 if (err) { 1094 char errmsg[STRERR_BUFSIZE]; 1095 char *cp = libbpf_strerror_r(-err, errmsg, 1096 sizeof(errmsg)); 1097 1098 pr_warning("failed to alloc program %s (%s): %s", 1099 name, obj->path, cp); 1100 } 1101 } else if (strcmp(name, ".data") == 0) { 1102 obj->efile.data = data; 1103 obj->efile.data_shndx = idx; 1104 } else if (strcmp(name, ".rodata") == 0) { 1105 obj->efile.rodata = data; 1106 obj->efile.rodata_shndx = idx; 1107 } else { 1108 pr_debug("skip section(%d) %s\n", idx, name); 1109 } 1110 } else if (sh.sh_type == SHT_REL) { 1111 void *reloc = obj->efile.reloc; 1112 int nr_reloc = obj->efile.nr_reloc + 1; 1113 int sec = sh.sh_info; /* points to other section */ 1114 1115 /* Only do relo for section with exec instructions */ 1116 if (!section_have_execinstr(obj, sec)) { 1117 pr_debug("skip relo %s(%d) for section(%d)\n", 1118 name, idx, sec); 1119 continue; 1120 } 1121 1122 reloc = reallocarray(reloc, nr_reloc, 1123 sizeof(*obj->efile.reloc)); 1124 if (!reloc) { 1125 pr_warning("realloc failed\n"); 1126 err = -ENOMEM; 1127 } else { 1128 int n = nr_reloc - 1; 1129 1130 obj->efile.reloc = reloc; 1131 obj->efile.nr_reloc = nr_reloc; 1132 1133 obj->efile.reloc[n].shdr = sh; 1134 obj->efile.reloc[n].data = data; 1135 } 1136 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) { 1137 obj->efile.bss = data; 1138 obj->efile.bss_shndx = idx; 1139 } else { 1140 pr_debug("skip section(%d) %s\n", idx, name); 1141 } 1142 if (err) 1143 goto out; 1144 } 1145 1146 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 1147 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 1148 return LIBBPF_ERRNO__FORMAT; 1149 } 1150 if (btf_data) { 1151 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 1152 if (IS_ERR(obj->btf)) { 1153 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 1154 BTF_ELF_SEC, PTR_ERR(obj->btf)); 1155 obj->btf = NULL; 1156 } else { 1157 err = btf__finalize_data(obj, obj->btf); 1158 if (!err) 1159 err = btf__load(obj->btf); 1160 if (err) { 1161 pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n", 1162 BTF_ELF_SEC, err); 1163 btf__free(obj->btf); 1164 obj->btf = NULL; 1165 err = 0; 1166 } 1167 } 1168 } 1169 if (btf_ext_data) { 1170 if (!obj->btf) { 1171 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 1172 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 1173 } else { 1174 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 1175 btf_ext_data->d_size); 1176 if (IS_ERR(obj->btf_ext)) { 1177 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 1178 BTF_EXT_ELF_SEC, 1179 PTR_ERR(obj->btf_ext)); 1180 obj->btf_ext = NULL; 1181 } 1182 } 1183 } 1184 if (bpf_object__has_maps(obj)) { 1185 err = bpf_object__init_maps(obj, flags); 1186 if (err) 1187 goto out; 1188 } 1189 err = bpf_object__init_prog_names(obj); 1190 out: 1191 return err; 1192 } 1193 1194 static struct bpf_program * 1195 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 1196 { 1197 struct bpf_program *prog; 1198 size_t i; 1199 1200 for (i = 0; i < obj->nr_programs; i++) { 1201 prog = &obj->programs[i]; 1202 if (prog->idx == idx) 1203 return prog; 1204 } 1205 return NULL; 1206 } 1207 1208 struct bpf_program * 1209 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title) 1210 { 1211 struct bpf_program *pos; 1212 1213 bpf_object__for_each_program(pos, obj) { 1214 if (pos->section_name && !strcmp(pos->section_name, title)) 1215 return pos; 1216 } 1217 return NULL; 1218 } 1219 1220 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, 1221 int shndx) 1222 { 1223 return shndx == obj->efile.data_shndx || 1224 shndx == obj->efile.bss_shndx || 1225 shndx == obj->efile.rodata_shndx; 1226 } 1227 1228 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, 1229 int shndx) 1230 { 1231 return shndx == obj->efile.maps_shndx; 1232 } 1233 1234 static bool bpf_object__relo_in_known_section(const struct bpf_object *obj, 1235 int shndx) 1236 { 1237 return shndx == obj->efile.text_shndx || 1238 bpf_object__shndx_is_maps(obj, shndx) || 1239 bpf_object__shndx_is_data(obj, shndx); 1240 } 1241 1242 static enum libbpf_map_type 1243 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) 1244 { 1245 if (shndx == obj->efile.data_shndx) 1246 return LIBBPF_MAP_DATA; 1247 else if (shndx == obj->efile.bss_shndx) 1248 return LIBBPF_MAP_BSS; 1249 else if (shndx == obj->efile.rodata_shndx) 1250 return LIBBPF_MAP_RODATA; 1251 else 1252 return LIBBPF_MAP_UNSPEC; 1253 } 1254 1255 static int 1256 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, 1257 Elf_Data *data, struct bpf_object *obj) 1258 { 1259 Elf_Data *symbols = obj->efile.symbols; 1260 struct bpf_map *maps = obj->maps; 1261 size_t nr_maps = obj->nr_maps; 1262 int i, nrels; 1263 1264 pr_debug("collecting relocating info for: '%s'\n", 1265 prog->section_name); 1266 nrels = shdr->sh_size / shdr->sh_entsize; 1267 1268 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 1269 if (!prog->reloc_desc) { 1270 pr_warning("failed to alloc memory in relocation\n"); 1271 return -ENOMEM; 1272 } 1273 prog->nr_reloc = nrels; 1274 1275 for (i = 0; i < nrels; i++) { 1276 GElf_Sym sym; 1277 GElf_Rel rel; 1278 unsigned int insn_idx; 1279 unsigned int shdr_idx; 1280 struct bpf_insn *insns = prog->insns; 1281 enum libbpf_map_type type; 1282 const char *name; 1283 size_t map_idx; 1284 1285 if (!gelf_getrel(data, i, &rel)) { 1286 pr_warning("relocation: failed to get %d reloc\n", i); 1287 return -LIBBPF_ERRNO__FORMAT; 1288 } 1289 1290 if (!gelf_getsym(symbols, 1291 GELF_R_SYM(rel.r_info), 1292 &sym)) { 1293 pr_warning("relocation: symbol %"PRIx64" not found\n", 1294 GELF_R_SYM(rel.r_info)); 1295 return -LIBBPF_ERRNO__FORMAT; 1296 } 1297 1298 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 1299 sym.st_name) ? : "<?>"; 1300 1301 pr_debug("relo for %lld value %lld name %d (\'%s\')\n", 1302 (long long) (rel.r_info >> 32), 1303 (long long) sym.st_value, sym.st_name, name); 1304 1305 shdr_idx = sym.st_shndx; 1306 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) { 1307 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n", 1308 prog->section_name, shdr_idx); 1309 return -LIBBPF_ERRNO__RELOC; 1310 } 1311 1312 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 1313 pr_debug("relocation: insn_idx=%u\n", insn_idx); 1314 1315 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { 1316 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { 1317 pr_warning("incorrect bpf_call opcode\n"); 1318 return -LIBBPF_ERRNO__RELOC; 1319 } 1320 prog->reloc_desc[i].type = RELO_CALL; 1321 prog->reloc_desc[i].insn_idx = insn_idx; 1322 prog->reloc_desc[i].text_off = sym.st_value; 1323 obj->has_pseudo_calls = true; 1324 continue; 1325 } 1326 1327 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 1328 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 1329 insn_idx, insns[insn_idx].code); 1330 return -LIBBPF_ERRNO__RELOC; 1331 } 1332 1333 if (bpf_object__shndx_is_maps(obj, shdr_idx) || 1334 bpf_object__shndx_is_data(obj, shdr_idx)) { 1335 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 1336 if (type != LIBBPF_MAP_UNSPEC && 1337 GELF_ST_BIND(sym.st_info) == STB_GLOBAL) { 1338 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n", 1339 name, insn_idx, insns[insn_idx].code); 1340 return -LIBBPF_ERRNO__RELOC; 1341 } 1342 1343 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 1344 if (maps[map_idx].libbpf_type != type) 1345 continue; 1346 if (type != LIBBPF_MAP_UNSPEC || 1347 (type == LIBBPF_MAP_UNSPEC && 1348 maps[map_idx].offset == sym.st_value)) { 1349 pr_debug("relocation: find map %zd (%s) for insn %u\n", 1350 map_idx, maps[map_idx].name, insn_idx); 1351 break; 1352 } 1353 } 1354 1355 if (map_idx >= nr_maps) { 1356 pr_warning("bpf relocation: map_idx %d large than %d\n", 1357 (int)map_idx, (int)nr_maps - 1); 1358 return -LIBBPF_ERRNO__RELOC; 1359 } 1360 1361 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ? 1362 RELO_DATA : RELO_LD64; 1363 prog->reloc_desc[i].insn_idx = insn_idx; 1364 prog->reloc_desc[i].map_idx = map_idx; 1365 } 1366 } 1367 return 0; 1368 } 1369 1370 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1371 { 1372 struct bpf_map_def *def = &map->def; 1373 __u32 key_type_id = 0, value_type_id = 0; 1374 int ret; 1375 1376 if (!bpf_map__is_internal(map)) { 1377 ret = btf__get_map_kv_tids(btf, map->name, def->key_size, 1378 def->value_size, &key_type_id, 1379 &value_type_id); 1380 } else { 1381 /* 1382 * LLVM annotates global data differently in BTF, that is, 1383 * only as '.data', '.bss' or '.rodata'. 1384 */ 1385 ret = btf__find_by_name(btf, 1386 libbpf_type_to_btf_name[map->libbpf_type]); 1387 } 1388 if (ret < 0) 1389 return ret; 1390 1391 map->btf_key_type_id = key_type_id; 1392 map->btf_value_type_id = bpf_map__is_internal(map) ? 1393 ret : value_type_id; 1394 return 0; 1395 } 1396 1397 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 1398 { 1399 struct bpf_map_info info = {}; 1400 __u32 len = sizeof(info); 1401 int new_fd, err; 1402 char *new_name; 1403 1404 err = bpf_obj_get_info_by_fd(fd, &info, &len); 1405 if (err) 1406 return err; 1407 1408 new_name = strdup(info.name); 1409 if (!new_name) 1410 return -errno; 1411 1412 new_fd = open("/", O_RDONLY | O_CLOEXEC); 1413 if (new_fd < 0) 1414 goto err_free_new_name; 1415 1416 new_fd = dup3(fd, new_fd, O_CLOEXEC); 1417 if (new_fd < 0) 1418 goto err_close_new_fd; 1419 1420 err = zclose(map->fd); 1421 if (err) 1422 goto err_close_new_fd; 1423 free(map->name); 1424 1425 map->fd = new_fd; 1426 map->name = new_name; 1427 map->def.type = info.type; 1428 map->def.key_size = info.key_size; 1429 map->def.value_size = info.value_size; 1430 map->def.max_entries = info.max_entries; 1431 map->def.map_flags = info.map_flags; 1432 map->btf_key_type_id = info.btf_key_type_id; 1433 map->btf_value_type_id = info.btf_value_type_id; 1434 1435 return 0; 1436 1437 err_close_new_fd: 1438 close(new_fd); 1439 err_free_new_name: 1440 free(new_name); 1441 return -errno; 1442 } 1443 1444 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 1445 { 1446 if (!map || !max_entries) 1447 return -EINVAL; 1448 1449 /* If map already created, its attributes can't be changed. */ 1450 if (map->fd >= 0) 1451 return -EBUSY; 1452 1453 map->def.max_entries = max_entries; 1454 1455 return 0; 1456 } 1457 1458 static int 1459 bpf_object__probe_name(struct bpf_object *obj) 1460 { 1461 struct bpf_load_program_attr attr; 1462 char *cp, errmsg[STRERR_BUFSIZE]; 1463 struct bpf_insn insns[] = { 1464 BPF_MOV64_IMM(BPF_REG_0, 0), 1465 BPF_EXIT_INSN(), 1466 }; 1467 int ret; 1468 1469 /* make sure basic loading works */ 1470 1471 memset(&attr, 0, sizeof(attr)); 1472 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1473 attr.insns = insns; 1474 attr.insns_cnt = ARRAY_SIZE(insns); 1475 attr.license = "GPL"; 1476 1477 ret = bpf_load_program_xattr(&attr, NULL, 0); 1478 if (ret < 0) { 1479 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1480 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", 1481 __func__, cp, errno); 1482 return -errno; 1483 } 1484 close(ret); 1485 1486 /* now try the same program, but with the name */ 1487 1488 attr.name = "test"; 1489 ret = bpf_load_program_xattr(&attr, NULL, 0); 1490 if (ret >= 0) { 1491 obj->caps.name = 1; 1492 close(ret); 1493 } 1494 1495 return 0; 1496 } 1497 1498 static int 1499 bpf_object__probe_caps(struct bpf_object *obj) 1500 { 1501 return bpf_object__probe_name(obj); 1502 } 1503 1504 static int 1505 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) 1506 { 1507 char *cp, errmsg[STRERR_BUFSIZE]; 1508 int err, zero = 0; 1509 __u8 *data; 1510 1511 /* Nothing to do here since kernel already zero-initializes .bss map. */ 1512 if (map->libbpf_type == LIBBPF_MAP_BSS) 1513 return 0; 1514 1515 data = map->libbpf_type == LIBBPF_MAP_DATA ? 1516 obj->sections.data : obj->sections.rodata; 1517 1518 err = bpf_map_update_elem(map->fd, &zero, data, 0); 1519 /* Freeze .rodata map as read-only from syscall side. */ 1520 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) { 1521 err = bpf_map_freeze(map->fd); 1522 if (err) { 1523 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1524 pr_warning("Error freezing map(%s) as read-only: %s\n", 1525 map->name, cp); 1526 err = 0; 1527 } 1528 } 1529 return err; 1530 } 1531 1532 static int 1533 bpf_object__create_maps(struct bpf_object *obj) 1534 { 1535 struct bpf_create_map_attr create_attr = {}; 1536 unsigned int i; 1537 int err; 1538 1539 for (i = 0; i < obj->nr_maps; i++) { 1540 struct bpf_map *map = &obj->maps[i]; 1541 struct bpf_map_def *def = &map->def; 1542 char *cp, errmsg[STRERR_BUFSIZE]; 1543 int *pfd = &map->fd; 1544 1545 if (map->fd >= 0) { 1546 pr_debug("skip map create (preset) %s: fd=%d\n", 1547 map->name, map->fd); 1548 continue; 1549 } 1550 1551 if (obj->caps.name) 1552 create_attr.name = map->name; 1553 create_attr.map_ifindex = map->map_ifindex; 1554 create_attr.map_type = def->type; 1555 create_attr.map_flags = def->map_flags; 1556 create_attr.key_size = def->key_size; 1557 create_attr.value_size = def->value_size; 1558 create_attr.max_entries = def->max_entries; 1559 create_attr.btf_fd = 0; 1560 create_attr.btf_key_type_id = 0; 1561 create_attr.btf_value_type_id = 0; 1562 if (bpf_map_type__is_map_in_map(def->type) && 1563 map->inner_map_fd >= 0) 1564 create_attr.inner_map_fd = map->inner_map_fd; 1565 1566 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { 1567 create_attr.btf_fd = btf__fd(obj->btf); 1568 create_attr.btf_key_type_id = map->btf_key_type_id; 1569 create_attr.btf_value_type_id = map->btf_value_type_id; 1570 } 1571 1572 *pfd = bpf_create_map_xattr(&create_attr); 1573 if (*pfd < 0 && create_attr.btf_key_type_id) { 1574 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1575 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1576 map->name, cp, errno); 1577 create_attr.btf_fd = 0; 1578 create_attr.btf_key_type_id = 0; 1579 create_attr.btf_value_type_id = 0; 1580 map->btf_key_type_id = 0; 1581 map->btf_value_type_id = 0; 1582 *pfd = bpf_create_map_xattr(&create_attr); 1583 } 1584 1585 if (*pfd < 0) { 1586 size_t j; 1587 1588 err = *pfd; 1589 err_out: 1590 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1591 pr_warning("failed to create map (name: '%s'): %s\n", 1592 map->name, cp); 1593 for (j = 0; j < i; j++) 1594 zclose(obj->maps[j].fd); 1595 return err; 1596 } 1597 1598 if (bpf_map__is_internal(map)) { 1599 err = bpf_object__populate_internal_map(obj, map); 1600 if (err < 0) { 1601 zclose(*pfd); 1602 goto err_out; 1603 } 1604 } 1605 1606 pr_debug("create map %s: fd=%d\n", map->name, *pfd); 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int 1613 check_btf_ext_reloc_err(struct bpf_program *prog, int err, 1614 void *btf_prog_info, const char *info_name) 1615 { 1616 if (err != -ENOENT) { 1617 pr_warning("Error in loading %s for sec %s.\n", 1618 info_name, prog->section_name); 1619 return err; 1620 } 1621 1622 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ 1623 1624 if (btf_prog_info) { 1625 /* 1626 * Some info has already been found but has problem 1627 * in the last btf_ext reloc. Must have to error 1628 * out. 1629 */ 1630 pr_warning("Error in relocating %s for sec %s.\n", 1631 info_name, prog->section_name); 1632 return err; 1633 } 1634 1635 /* 1636 * Have problem loading the very first info. Ignore 1637 * the rest. 1638 */ 1639 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", 1640 info_name, prog->section_name, info_name); 1641 return 0; 1642 } 1643 1644 static int 1645 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, 1646 const char *section_name, __u32 insn_offset) 1647 { 1648 int err; 1649 1650 if (!insn_offset || prog->func_info) { 1651 /* 1652 * !insn_offset => main program 1653 * 1654 * For sub prog, the main program's func_info has to 1655 * be loaded first (i.e. prog->func_info != NULL) 1656 */ 1657 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, 1658 section_name, insn_offset, 1659 &prog->func_info, 1660 &prog->func_info_cnt); 1661 if (err) 1662 return check_btf_ext_reloc_err(prog, err, 1663 prog->func_info, 1664 "bpf_func_info"); 1665 1666 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); 1667 } 1668 1669 if (!insn_offset || prog->line_info) { 1670 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, 1671 section_name, insn_offset, 1672 &prog->line_info, 1673 &prog->line_info_cnt); 1674 if (err) 1675 return check_btf_ext_reloc_err(prog, err, 1676 prog->line_info, 1677 "bpf_line_info"); 1678 1679 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 1680 } 1681 1682 if (!insn_offset) 1683 prog->btf_fd = btf__fd(obj->btf); 1684 1685 return 0; 1686 } 1687 1688 static int 1689 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, 1690 struct reloc_desc *relo) 1691 { 1692 struct bpf_insn *insn, *new_insn; 1693 struct bpf_program *text; 1694 size_t new_cnt; 1695 int err; 1696 1697 if (relo->type != RELO_CALL) 1698 return -LIBBPF_ERRNO__RELOC; 1699 1700 if (prog->idx == obj->efile.text_shndx) { 1701 pr_warning("relo in .text insn %d into off %d\n", 1702 relo->insn_idx, relo->text_off); 1703 return -LIBBPF_ERRNO__RELOC; 1704 } 1705 1706 if (prog->main_prog_cnt == 0) { 1707 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); 1708 if (!text) { 1709 pr_warning("no .text section found yet relo into text exist\n"); 1710 return -LIBBPF_ERRNO__RELOC; 1711 } 1712 new_cnt = prog->insns_cnt + text->insns_cnt; 1713 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); 1714 if (!new_insn) { 1715 pr_warning("oom in prog realloc\n"); 1716 return -ENOMEM; 1717 } 1718 1719 if (obj->btf_ext) { 1720 err = bpf_program_reloc_btf_ext(prog, obj, 1721 text->section_name, 1722 prog->insns_cnt); 1723 if (err) 1724 return err; 1725 } 1726 1727 memcpy(new_insn + prog->insns_cnt, text->insns, 1728 text->insns_cnt * sizeof(*insn)); 1729 prog->insns = new_insn; 1730 prog->main_prog_cnt = prog->insns_cnt; 1731 prog->insns_cnt = new_cnt; 1732 pr_debug("added %zd insn from %s to prog %s\n", 1733 text->insns_cnt, text->section_name, 1734 prog->section_name); 1735 } 1736 insn = &prog->insns[relo->insn_idx]; 1737 insn->imm += prog->main_prog_cnt - relo->insn_idx; 1738 return 0; 1739 } 1740 1741 static int 1742 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 1743 { 1744 int i, err; 1745 1746 if (!prog) 1747 return 0; 1748 1749 if (obj->btf_ext) { 1750 err = bpf_program_reloc_btf_ext(prog, obj, 1751 prog->section_name, 0); 1752 if (err) 1753 return err; 1754 } 1755 1756 if (!prog->reloc_desc) 1757 return 0; 1758 1759 for (i = 0; i < prog->nr_reloc; i++) { 1760 if (prog->reloc_desc[i].type == RELO_LD64 || 1761 prog->reloc_desc[i].type == RELO_DATA) { 1762 bool relo_data = prog->reloc_desc[i].type == RELO_DATA; 1763 struct bpf_insn *insns = prog->insns; 1764 int insn_idx, map_idx; 1765 1766 insn_idx = prog->reloc_desc[i].insn_idx; 1767 map_idx = prog->reloc_desc[i].map_idx; 1768 1769 if (insn_idx + 1 >= (int)prog->insns_cnt) { 1770 pr_warning("relocation out of range: '%s'\n", 1771 prog->section_name); 1772 return -LIBBPF_ERRNO__RELOC; 1773 } 1774 1775 if (!relo_data) { 1776 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 1777 } else { 1778 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; 1779 insns[insn_idx + 1].imm = insns[insn_idx].imm; 1780 } 1781 insns[insn_idx].imm = obj->maps[map_idx].fd; 1782 } else if (prog->reloc_desc[i].type == RELO_CALL) { 1783 err = bpf_program__reloc_text(prog, obj, 1784 &prog->reloc_desc[i]); 1785 if (err) 1786 return err; 1787 } 1788 } 1789 1790 zfree(&prog->reloc_desc); 1791 prog->nr_reloc = 0; 1792 return 0; 1793 } 1794 1795 1796 static int 1797 bpf_object__relocate(struct bpf_object *obj) 1798 { 1799 struct bpf_program *prog; 1800 size_t i; 1801 int err; 1802 1803 for (i = 0; i < obj->nr_programs; i++) { 1804 prog = &obj->programs[i]; 1805 1806 err = bpf_program__relocate(prog, obj); 1807 if (err) { 1808 pr_warning("failed to relocate '%s'\n", 1809 prog->section_name); 1810 return err; 1811 } 1812 } 1813 return 0; 1814 } 1815 1816 static int bpf_object__collect_reloc(struct bpf_object *obj) 1817 { 1818 int i, err; 1819 1820 if (!obj_elf_valid(obj)) { 1821 pr_warning("Internal error: elf object is closed\n"); 1822 return -LIBBPF_ERRNO__INTERNAL; 1823 } 1824 1825 for (i = 0; i < obj->efile.nr_reloc; i++) { 1826 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 1827 Elf_Data *data = obj->efile.reloc[i].data; 1828 int idx = shdr->sh_info; 1829 struct bpf_program *prog; 1830 1831 if (shdr->sh_type != SHT_REL) { 1832 pr_warning("internal error at %d\n", __LINE__); 1833 return -LIBBPF_ERRNO__INTERNAL; 1834 } 1835 1836 prog = bpf_object__find_prog_by_idx(obj, idx); 1837 if (!prog) { 1838 pr_warning("relocation failed: no section(%d)\n", idx); 1839 return -LIBBPF_ERRNO__RELOC; 1840 } 1841 1842 err = bpf_program__collect_reloc(prog, 1843 shdr, data, 1844 obj); 1845 if (err) 1846 return err; 1847 } 1848 return 0; 1849 } 1850 1851 static int 1852 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, 1853 char *license, __u32 kern_version, int *pfd) 1854 { 1855 struct bpf_load_program_attr load_attr; 1856 char *cp, errmsg[STRERR_BUFSIZE]; 1857 int log_buf_size = BPF_LOG_BUF_SIZE; 1858 char *log_buf; 1859 int ret; 1860 1861 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 1862 load_attr.prog_type = prog->type; 1863 load_attr.expected_attach_type = prog->expected_attach_type; 1864 if (prog->caps->name) 1865 load_attr.name = prog->name; 1866 load_attr.insns = insns; 1867 load_attr.insns_cnt = insns_cnt; 1868 load_attr.license = license; 1869 load_attr.kern_version = kern_version; 1870 load_attr.prog_ifindex = prog->prog_ifindex; 1871 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 1872 load_attr.func_info = prog->func_info; 1873 load_attr.func_info_rec_size = prog->func_info_rec_size; 1874 load_attr.func_info_cnt = prog->func_info_cnt; 1875 load_attr.line_info = prog->line_info; 1876 load_attr.line_info_rec_size = prog->line_info_rec_size; 1877 load_attr.line_info_cnt = prog->line_info_cnt; 1878 load_attr.log_level = prog->log_level; 1879 if (!load_attr.insns || !load_attr.insns_cnt) 1880 return -EINVAL; 1881 1882 retry_load: 1883 log_buf = malloc(log_buf_size); 1884 if (!log_buf) 1885 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 1886 1887 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size); 1888 1889 if (ret >= 0) { 1890 if (load_attr.log_level) 1891 pr_debug("verifier log:\n%s", log_buf); 1892 *pfd = ret; 1893 ret = 0; 1894 goto out; 1895 } 1896 1897 if (errno == ENOSPC) { 1898 log_buf_size <<= 1; 1899 free(log_buf); 1900 goto retry_load; 1901 } 1902 ret = -LIBBPF_ERRNO__LOAD; 1903 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1904 pr_warning("load bpf program failed: %s\n", cp); 1905 1906 if (log_buf && log_buf[0] != '\0') { 1907 ret = -LIBBPF_ERRNO__VERIFY; 1908 pr_warning("-- BEGIN DUMP LOG ---\n"); 1909 pr_warning("\n%s\n", log_buf); 1910 pr_warning("-- END LOG --\n"); 1911 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { 1912 pr_warning("Program too large (%zu insns), at most %d insns\n", 1913 load_attr.insns_cnt, BPF_MAXINSNS); 1914 ret = -LIBBPF_ERRNO__PROG2BIG; 1915 } else { 1916 /* Wrong program type? */ 1917 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 1918 int fd; 1919 1920 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 1921 load_attr.expected_attach_type = 0; 1922 fd = bpf_load_program_xattr(&load_attr, NULL, 0); 1923 if (fd >= 0) { 1924 close(fd); 1925 ret = -LIBBPF_ERRNO__PROGTYPE; 1926 goto out; 1927 } 1928 } 1929 1930 if (log_buf) 1931 ret = -LIBBPF_ERRNO__KVER; 1932 } 1933 1934 out: 1935 free(log_buf); 1936 return ret; 1937 } 1938 1939 int 1940 bpf_program__load(struct bpf_program *prog, 1941 char *license, __u32 kern_version) 1942 { 1943 int err = 0, fd, i; 1944 1945 if (prog->instances.nr < 0 || !prog->instances.fds) { 1946 if (prog->preprocessor) { 1947 pr_warning("Internal error: can't load program '%s'\n", 1948 prog->section_name); 1949 return -LIBBPF_ERRNO__INTERNAL; 1950 } 1951 1952 prog->instances.fds = malloc(sizeof(int)); 1953 if (!prog->instances.fds) { 1954 pr_warning("Not enough memory for BPF fds\n"); 1955 return -ENOMEM; 1956 } 1957 prog->instances.nr = 1; 1958 prog->instances.fds[0] = -1; 1959 } 1960 1961 if (!prog->preprocessor) { 1962 if (prog->instances.nr != 1) { 1963 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 1964 prog->section_name, prog->instances.nr); 1965 } 1966 err = load_program(prog, prog->insns, prog->insns_cnt, 1967 license, kern_version, &fd); 1968 if (!err) 1969 prog->instances.fds[0] = fd; 1970 goto out; 1971 } 1972 1973 for (i = 0; i < prog->instances.nr; i++) { 1974 struct bpf_prog_prep_result result; 1975 bpf_program_prep_t preprocessor = prog->preprocessor; 1976 1977 memset(&result, 0, sizeof(result)); 1978 err = preprocessor(prog, i, prog->insns, 1979 prog->insns_cnt, &result); 1980 if (err) { 1981 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 1982 i, prog->section_name); 1983 goto out; 1984 } 1985 1986 if (!result.new_insn_ptr || !result.new_insn_cnt) { 1987 pr_debug("Skip loading the %dth instance of program '%s'\n", 1988 i, prog->section_name); 1989 prog->instances.fds[i] = -1; 1990 if (result.pfd) 1991 *result.pfd = -1; 1992 continue; 1993 } 1994 1995 err = load_program(prog, result.new_insn_ptr, 1996 result.new_insn_cnt, 1997 license, kern_version, &fd); 1998 1999 if (err) { 2000 pr_warning("Loading the %dth instance of program '%s' failed\n", 2001 i, prog->section_name); 2002 goto out; 2003 } 2004 2005 if (result.pfd) 2006 *result.pfd = fd; 2007 prog->instances.fds[i] = fd; 2008 } 2009 out: 2010 if (err) 2011 pr_warning("failed to load program '%s'\n", 2012 prog->section_name); 2013 zfree(&prog->insns); 2014 prog->insns_cnt = 0; 2015 return err; 2016 } 2017 2018 static bool bpf_program__is_function_storage(struct bpf_program *prog, 2019 struct bpf_object *obj) 2020 { 2021 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; 2022 } 2023 2024 static int 2025 bpf_object__load_progs(struct bpf_object *obj) 2026 { 2027 size_t i; 2028 int err; 2029 2030 for (i = 0; i < obj->nr_programs; i++) { 2031 if (bpf_program__is_function_storage(&obj->programs[i], obj)) 2032 continue; 2033 err = bpf_program__load(&obj->programs[i], 2034 obj->license, 2035 obj->kern_version); 2036 if (err) 2037 return err; 2038 } 2039 return 0; 2040 } 2041 2042 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) 2043 { 2044 switch (type) { 2045 case BPF_PROG_TYPE_SOCKET_FILTER: 2046 case BPF_PROG_TYPE_SCHED_CLS: 2047 case BPF_PROG_TYPE_SCHED_ACT: 2048 case BPF_PROG_TYPE_XDP: 2049 case BPF_PROG_TYPE_CGROUP_SKB: 2050 case BPF_PROG_TYPE_CGROUP_SOCK: 2051 case BPF_PROG_TYPE_LWT_IN: 2052 case BPF_PROG_TYPE_LWT_OUT: 2053 case BPF_PROG_TYPE_LWT_XMIT: 2054 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2055 case BPF_PROG_TYPE_SOCK_OPS: 2056 case BPF_PROG_TYPE_SK_SKB: 2057 case BPF_PROG_TYPE_CGROUP_DEVICE: 2058 case BPF_PROG_TYPE_SK_MSG: 2059 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2060 case BPF_PROG_TYPE_LIRC_MODE2: 2061 case BPF_PROG_TYPE_SK_REUSEPORT: 2062 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2063 case BPF_PROG_TYPE_UNSPEC: 2064 case BPF_PROG_TYPE_TRACEPOINT: 2065 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2066 case BPF_PROG_TYPE_PERF_EVENT: 2067 return false; 2068 case BPF_PROG_TYPE_KPROBE: 2069 default: 2070 return true; 2071 } 2072 } 2073 2074 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) 2075 { 2076 if (needs_kver && obj->kern_version == 0) { 2077 pr_warning("%s doesn't provide kernel version\n", 2078 obj->path); 2079 return -LIBBPF_ERRNO__KVERSION; 2080 } 2081 return 0; 2082 } 2083 2084 static struct bpf_object * 2085 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, 2086 bool needs_kver, int flags) 2087 { 2088 struct bpf_object *obj; 2089 int err; 2090 2091 if (elf_version(EV_CURRENT) == EV_NONE) { 2092 pr_warning("failed to init libelf for %s\n", path); 2093 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 2094 } 2095 2096 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 2097 if (IS_ERR(obj)) 2098 return obj; 2099 2100 CHECK_ERR(bpf_object__elf_init(obj), err, out); 2101 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 2102 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); 2103 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 2104 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 2105 2106 bpf_object__elf_finish(obj); 2107 return obj; 2108 out: 2109 bpf_object__close(obj); 2110 return ERR_PTR(err); 2111 } 2112 2113 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, 2114 int flags) 2115 { 2116 /* param validation */ 2117 if (!attr->file) 2118 return NULL; 2119 2120 pr_debug("loading %s\n", attr->file); 2121 2122 return __bpf_object__open(attr->file, NULL, 0, 2123 bpf_prog_type__needs_kver(attr->prog_type), 2124 flags); 2125 } 2126 2127 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 2128 { 2129 return __bpf_object__open_xattr(attr, 0); 2130 } 2131 2132 struct bpf_object *bpf_object__open(const char *path) 2133 { 2134 struct bpf_object_open_attr attr = { 2135 .file = path, 2136 .prog_type = BPF_PROG_TYPE_UNSPEC, 2137 }; 2138 2139 return bpf_object__open_xattr(&attr); 2140 } 2141 2142 struct bpf_object *bpf_object__open_buffer(void *obj_buf, 2143 size_t obj_buf_sz, 2144 const char *name) 2145 { 2146 char tmp_name[64]; 2147 2148 /* param validation */ 2149 if (!obj_buf || obj_buf_sz <= 0) 2150 return NULL; 2151 2152 if (!name) { 2153 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 2154 (unsigned long)obj_buf, 2155 (unsigned long)obj_buf_sz); 2156 tmp_name[sizeof(tmp_name) - 1] = '\0'; 2157 name = tmp_name; 2158 } 2159 pr_debug("loading object '%s' from buffer\n", 2160 name); 2161 2162 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); 2163 } 2164 2165 int bpf_object__unload(struct bpf_object *obj) 2166 { 2167 size_t i; 2168 2169 if (!obj) 2170 return -EINVAL; 2171 2172 for (i = 0; i < obj->nr_maps; i++) 2173 zclose(obj->maps[i].fd); 2174 2175 for (i = 0; i < obj->nr_programs; i++) 2176 bpf_program__unload(&obj->programs[i]); 2177 2178 return 0; 2179 } 2180 2181 int bpf_object__load(struct bpf_object *obj) 2182 { 2183 int err; 2184 2185 if (!obj) 2186 return -EINVAL; 2187 2188 if (obj->loaded) { 2189 pr_warning("object should not be loaded twice\n"); 2190 return -EINVAL; 2191 } 2192 2193 obj->loaded = true; 2194 2195 CHECK_ERR(bpf_object__probe_caps(obj), err, out); 2196 CHECK_ERR(bpf_object__create_maps(obj), err, out); 2197 CHECK_ERR(bpf_object__relocate(obj), err, out); 2198 CHECK_ERR(bpf_object__load_progs(obj), err, out); 2199 2200 return 0; 2201 out: 2202 bpf_object__unload(obj); 2203 pr_warning("failed to load object '%s'\n", obj->path); 2204 return err; 2205 } 2206 2207 static int check_path(const char *path) 2208 { 2209 char *cp, errmsg[STRERR_BUFSIZE]; 2210 struct statfs st_fs; 2211 char *dname, *dir; 2212 int err = 0; 2213 2214 if (path == NULL) 2215 return -EINVAL; 2216 2217 dname = strdup(path); 2218 if (dname == NULL) 2219 return -ENOMEM; 2220 2221 dir = dirname(dname); 2222 if (statfs(dir, &st_fs)) { 2223 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2224 pr_warning("failed to statfs %s: %s\n", dir, cp); 2225 err = -errno; 2226 } 2227 free(dname); 2228 2229 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 2230 pr_warning("specified path %s is not on BPF FS\n", path); 2231 err = -EINVAL; 2232 } 2233 2234 return err; 2235 } 2236 2237 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 2238 int instance) 2239 { 2240 char *cp, errmsg[STRERR_BUFSIZE]; 2241 int err; 2242 2243 err = check_path(path); 2244 if (err) 2245 return err; 2246 2247 if (prog == NULL) { 2248 pr_warning("invalid program pointer\n"); 2249 return -EINVAL; 2250 } 2251 2252 if (instance < 0 || instance >= prog->instances.nr) { 2253 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 2254 instance, prog->section_name, prog->instances.nr); 2255 return -EINVAL; 2256 } 2257 2258 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 2259 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2260 pr_warning("failed to pin program: %s\n", cp); 2261 return -errno; 2262 } 2263 pr_debug("pinned program '%s'\n", path); 2264 2265 return 0; 2266 } 2267 2268 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, 2269 int instance) 2270 { 2271 int err; 2272 2273 err = check_path(path); 2274 if (err) 2275 return err; 2276 2277 if (prog == NULL) { 2278 pr_warning("invalid program pointer\n"); 2279 return -EINVAL; 2280 } 2281 2282 if (instance < 0 || instance >= prog->instances.nr) { 2283 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 2284 instance, prog->section_name, prog->instances.nr); 2285 return -EINVAL; 2286 } 2287 2288 err = unlink(path); 2289 if (err != 0) 2290 return -errno; 2291 pr_debug("unpinned program '%s'\n", path); 2292 2293 return 0; 2294 } 2295 2296 static int make_dir(const char *path) 2297 { 2298 char *cp, errmsg[STRERR_BUFSIZE]; 2299 int err = 0; 2300 2301 if (mkdir(path, 0700) && errno != EEXIST) 2302 err = -errno; 2303 2304 if (err) { 2305 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 2306 pr_warning("failed to mkdir %s: %s\n", path, cp); 2307 } 2308 return err; 2309 } 2310 2311 int bpf_program__pin(struct bpf_program *prog, const char *path) 2312 { 2313 int i, err; 2314 2315 err = check_path(path); 2316 if (err) 2317 return err; 2318 2319 if (prog == NULL) { 2320 pr_warning("invalid program pointer\n"); 2321 return -EINVAL; 2322 } 2323 2324 if (prog->instances.nr <= 0) { 2325 pr_warning("no instances of prog %s to pin\n", 2326 prog->section_name); 2327 return -EINVAL; 2328 } 2329 2330 if (prog->instances.nr == 1) { 2331 /* don't create subdirs when pinning single instance */ 2332 return bpf_program__pin_instance(prog, path, 0); 2333 } 2334 2335 err = make_dir(path); 2336 if (err) 2337 return err; 2338 2339 for (i = 0; i < prog->instances.nr; i++) { 2340 char buf[PATH_MAX]; 2341 int len; 2342 2343 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2344 if (len < 0) { 2345 err = -EINVAL; 2346 goto err_unpin; 2347 } else if (len >= PATH_MAX) { 2348 err = -ENAMETOOLONG; 2349 goto err_unpin; 2350 } 2351 2352 err = bpf_program__pin_instance(prog, buf, i); 2353 if (err) 2354 goto err_unpin; 2355 } 2356 2357 return 0; 2358 2359 err_unpin: 2360 for (i = i - 1; i >= 0; i--) { 2361 char buf[PATH_MAX]; 2362 int len; 2363 2364 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2365 if (len < 0) 2366 continue; 2367 else if (len >= PATH_MAX) 2368 continue; 2369 2370 bpf_program__unpin_instance(prog, buf, i); 2371 } 2372 2373 rmdir(path); 2374 2375 return err; 2376 } 2377 2378 int bpf_program__unpin(struct bpf_program *prog, const char *path) 2379 { 2380 int i, err; 2381 2382 err = check_path(path); 2383 if (err) 2384 return err; 2385 2386 if (prog == NULL) { 2387 pr_warning("invalid program pointer\n"); 2388 return -EINVAL; 2389 } 2390 2391 if (prog->instances.nr <= 0) { 2392 pr_warning("no instances of prog %s to pin\n", 2393 prog->section_name); 2394 return -EINVAL; 2395 } 2396 2397 if (prog->instances.nr == 1) { 2398 /* don't create subdirs when pinning single instance */ 2399 return bpf_program__unpin_instance(prog, path, 0); 2400 } 2401 2402 for (i = 0; i < prog->instances.nr; i++) { 2403 char buf[PATH_MAX]; 2404 int len; 2405 2406 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2407 if (len < 0) 2408 return -EINVAL; 2409 else if (len >= PATH_MAX) 2410 return -ENAMETOOLONG; 2411 2412 err = bpf_program__unpin_instance(prog, buf, i); 2413 if (err) 2414 return err; 2415 } 2416 2417 err = rmdir(path); 2418 if (err) 2419 return -errno; 2420 2421 return 0; 2422 } 2423 2424 int bpf_map__pin(struct bpf_map *map, const char *path) 2425 { 2426 char *cp, errmsg[STRERR_BUFSIZE]; 2427 int err; 2428 2429 err = check_path(path); 2430 if (err) 2431 return err; 2432 2433 if (map == NULL) { 2434 pr_warning("invalid map pointer\n"); 2435 return -EINVAL; 2436 } 2437 2438 if (bpf_obj_pin(map->fd, path)) { 2439 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2440 pr_warning("failed to pin map: %s\n", cp); 2441 return -errno; 2442 } 2443 2444 pr_debug("pinned map '%s'\n", path); 2445 2446 return 0; 2447 } 2448 2449 int bpf_map__unpin(struct bpf_map *map, const char *path) 2450 { 2451 int err; 2452 2453 err = check_path(path); 2454 if (err) 2455 return err; 2456 2457 if (map == NULL) { 2458 pr_warning("invalid map pointer\n"); 2459 return -EINVAL; 2460 } 2461 2462 err = unlink(path); 2463 if (err != 0) 2464 return -errno; 2465 pr_debug("unpinned map '%s'\n", path); 2466 2467 return 0; 2468 } 2469 2470 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 2471 { 2472 struct bpf_map *map; 2473 int err; 2474 2475 if (!obj) 2476 return -ENOENT; 2477 2478 if (!obj->loaded) { 2479 pr_warning("object not yet loaded; load it first\n"); 2480 return -ENOENT; 2481 } 2482 2483 err = make_dir(path); 2484 if (err) 2485 return err; 2486 2487 bpf_object__for_each_map(map, obj) { 2488 char buf[PATH_MAX]; 2489 int len; 2490 2491 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2492 bpf_map__name(map)); 2493 if (len < 0) { 2494 err = -EINVAL; 2495 goto err_unpin_maps; 2496 } else if (len >= PATH_MAX) { 2497 err = -ENAMETOOLONG; 2498 goto err_unpin_maps; 2499 } 2500 2501 err = bpf_map__pin(map, buf); 2502 if (err) 2503 goto err_unpin_maps; 2504 } 2505 2506 return 0; 2507 2508 err_unpin_maps: 2509 while ((map = bpf_map__prev(map, obj))) { 2510 char buf[PATH_MAX]; 2511 int len; 2512 2513 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2514 bpf_map__name(map)); 2515 if (len < 0) 2516 continue; 2517 else if (len >= PATH_MAX) 2518 continue; 2519 2520 bpf_map__unpin(map, buf); 2521 } 2522 2523 return err; 2524 } 2525 2526 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 2527 { 2528 struct bpf_map *map; 2529 int err; 2530 2531 if (!obj) 2532 return -ENOENT; 2533 2534 bpf_object__for_each_map(map, obj) { 2535 char buf[PATH_MAX]; 2536 int len; 2537 2538 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2539 bpf_map__name(map)); 2540 if (len < 0) 2541 return -EINVAL; 2542 else if (len >= PATH_MAX) 2543 return -ENAMETOOLONG; 2544 2545 err = bpf_map__unpin(map, buf); 2546 if (err) 2547 return err; 2548 } 2549 2550 return 0; 2551 } 2552 2553 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 2554 { 2555 struct bpf_program *prog; 2556 int err; 2557 2558 if (!obj) 2559 return -ENOENT; 2560 2561 if (!obj->loaded) { 2562 pr_warning("object not yet loaded; load it first\n"); 2563 return -ENOENT; 2564 } 2565 2566 err = make_dir(path); 2567 if (err) 2568 return err; 2569 2570 bpf_object__for_each_program(prog, obj) { 2571 char buf[PATH_MAX]; 2572 int len; 2573 2574 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2575 prog->pin_name); 2576 if (len < 0) { 2577 err = -EINVAL; 2578 goto err_unpin_programs; 2579 } else if (len >= PATH_MAX) { 2580 err = -ENAMETOOLONG; 2581 goto err_unpin_programs; 2582 } 2583 2584 err = bpf_program__pin(prog, buf); 2585 if (err) 2586 goto err_unpin_programs; 2587 } 2588 2589 return 0; 2590 2591 err_unpin_programs: 2592 while ((prog = bpf_program__prev(prog, obj))) { 2593 char buf[PATH_MAX]; 2594 int len; 2595 2596 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2597 prog->pin_name); 2598 if (len < 0) 2599 continue; 2600 else if (len >= PATH_MAX) 2601 continue; 2602 2603 bpf_program__unpin(prog, buf); 2604 } 2605 2606 return err; 2607 } 2608 2609 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 2610 { 2611 struct bpf_program *prog; 2612 int err; 2613 2614 if (!obj) 2615 return -ENOENT; 2616 2617 bpf_object__for_each_program(prog, obj) { 2618 char buf[PATH_MAX]; 2619 int len; 2620 2621 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2622 prog->pin_name); 2623 if (len < 0) 2624 return -EINVAL; 2625 else if (len >= PATH_MAX) 2626 return -ENAMETOOLONG; 2627 2628 err = bpf_program__unpin(prog, buf); 2629 if (err) 2630 return err; 2631 } 2632 2633 return 0; 2634 } 2635 2636 int bpf_object__pin(struct bpf_object *obj, const char *path) 2637 { 2638 int err; 2639 2640 err = bpf_object__pin_maps(obj, path); 2641 if (err) 2642 return err; 2643 2644 err = bpf_object__pin_programs(obj, path); 2645 if (err) { 2646 bpf_object__unpin_maps(obj, path); 2647 return err; 2648 } 2649 2650 return 0; 2651 } 2652 2653 void bpf_object__close(struct bpf_object *obj) 2654 { 2655 size_t i; 2656 2657 if (!obj) 2658 return; 2659 2660 if (obj->clear_priv) 2661 obj->clear_priv(obj, obj->priv); 2662 2663 bpf_object__elf_finish(obj); 2664 bpf_object__unload(obj); 2665 btf__free(obj->btf); 2666 btf_ext__free(obj->btf_ext); 2667 2668 for (i = 0; i < obj->nr_maps; i++) { 2669 zfree(&obj->maps[i].name); 2670 if (obj->maps[i].clear_priv) 2671 obj->maps[i].clear_priv(&obj->maps[i], 2672 obj->maps[i].priv); 2673 obj->maps[i].priv = NULL; 2674 obj->maps[i].clear_priv = NULL; 2675 } 2676 2677 zfree(&obj->sections.rodata); 2678 zfree(&obj->sections.data); 2679 zfree(&obj->maps); 2680 obj->nr_maps = 0; 2681 2682 if (obj->programs && obj->nr_programs) { 2683 for (i = 0; i < obj->nr_programs; i++) 2684 bpf_program__exit(&obj->programs[i]); 2685 } 2686 zfree(&obj->programs); 2687 2688 list_del(&obj->list); 2689 free(obj); 2690 } 2691 2692 struct bpf_object * 2693 bpf_object__next(struct bpf_object *prev) 2694 { 2695 struct bpf_object *next; 2696 2697 if (!prev) 2698 next = list_first_entry(&bpf_objects_list, 2699 struct bpf_object, 2700 list); 2701 else 2702 next = list_next_entry(prev, list); 2703 2704 /* Empty list is noticed here so don't need checking on entry. */ 2705 if (&next->list == &bpf_objects_list) 2706 return NULL; 2707 2708 return next; 2709 } 2710 2711 const char *bpf_object__name(struct bpf_object *obj) 2712 { 2713 return obj ? obj->path : ERR_PTR(-EINVAL); 2714 } 2715 2716 unsigned int bpf_object__kversion(struct bpf_object *obj) 2717 { 2718 return obj ? obj->kern_version : 0; 2719 } 2720 2721 struct btf *bpf_object__btf(struct bpf_object *obj) 2722 { 2723 return obj ? obj->btf : NULL; 2724 } 2725 2726 int bpf_object__btf_fd(const struct bpf_object *obj) 2727 { 2728 return obj->btf ? btf__fd(obj->btf) : -1; 2729 } 2730 2731 int bpf_object__set_priv(struct bpf_object *obj, void *priv, 2732 bpf_object_clear_priv_t clear_priv) 2733 { 2734 if (obj->priv && obj->clear_priv) 2735 obj->clear_priv(obj, obj->priv); 2736 2737 obj->priv = priv; 2738 obj->clear_priv = clear_priv; 2739 return 0; 2740 } 2741 2742 void *bpf_object__priv(struct bpf_object *obj) 2743 { 2744 return obj ? obj->priv : ERR_PTR(-EINVAL); 2745 } 2746 2747 static struct bpf_program * 2748 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward) 2749 { 2750 size_t nr_programs = obj->nr_programs; 2751 ssize_t idx; 2752 2753 if (!nr_programs) 2754 return NULL; 2755 2756 if (!p) 2757 /* Iter from the beginning */ 2758 return forward ? &obj->programs[0] : 2759 &obj->programs[nr_programs - 1]; 2760 2761 if (p->obj != obj) { 2762 pr_warning("error: program handler doesn't match object\n"); 2763 return NULL; 2764 } 2765 2766 idx = (p - obj->programs) + (forward ? 1 : -1); 2767 if (idx >= obj->nr_programs || idx < 0) 2768 return NULL; 2769 return &obj->programs[idx]; 2770 } 2771 2772 struct bpf_program * 2773 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) 2774 { 2775 struct bpf_program *prog = prev; 2776 2777 do { 2778 prog = __bpf_program__iter(prog, obj, true); 2779 } while (prog && bpf_program__is_function_storage(prog, obj)); 2780 2781 return prog; 2782 } 2783 2784 struct bpf_program * 2785 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj) 2786 { 2787 struct bpf_program *prog = next; 2788 2789 do { 2790 prog = __bpf_program__iter(prog, obj, false); 2791 } while (prog && bpf_program__is_function_storage(prog, obj)); 2792 2793 return prog; 2794 } 2795 2796 int bpf_program__set_priv(struct bpf_program *prog, void *priv, 2797 bpf_program_clear_priv_t clear_priv) 2798 { 2799 if (prog->priv && prog->clear_priv) 2800 prog->clear_priv(prog, prog->priv); 2801 2802 prog->priv = priv; 2803 prog->clear_priv = clear_priv; 2804 return 0; 2805 } 2806 2807 void *bpf_program__priv(struct bpf_program *prog) 2808 { 2809 return prog ? prog->priv : ERR_PTR(-EINVAL); 2810 } 2811 2812 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 2813 { 2814 prog->prog_ifindex = ifindex; 2815 } 2816 2817 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 2818 { 2819 const char *title; 2820 2821 title = prog->section_name; 2822 if (needs_copy) { 2823 title = strdup(title); 2824 if (!title) { 2825 pr_warning("failed to strdup program title\n"); 2826 return ERR_PTR(-ENOMEM); 2827 } 2828 } 2829 2830 return title; 2831 } 2832 2833 int bpf_program__fd(struct bpf_program *prog) 2834 { 2835 return bpf_program__nth_fd(prog, 0); 2836 } 2837 2838 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 2839 bpf_program_prep_t prep) 2840 { 2841 int *instances_fds; 2842 2843 if (nr_instances <= 0 || !prep) 2844 return -EINVAL; 2845 2846 if (prog->instances.nr > 0 || prog->instances.fds) { 2847 pr_warning("Can't set pre-processor after loading\n"); 2848 return -EINVAL; 2849 } 2850 2851 instances_fds = malloc(sizeof(int) * nr_instances); 2852 if (!instances_fds) { 2853 pr_warning("alloc memory failed for fds\n"); 2854 return -ENOMEM; 2855 } 2856 2857 /* fill all fd with -1 */ 2858 memset(instances_fds, -1, sizeof(int) * nr_instances); 2859 2860 prog->instances.nr = nr_instances; 2861 prog->instances.fds = instances_fds; 2862 prog->preprocessor = prep; 2863 return 0; 2864 } 2865 2866 int bpf_program__nth_fd(struct bpf_program *prog, int n) 2867 { 2868 int fd; 2869 2870 if (!prog) 2871 return -EINVAL; 2872 2873 if (n >= prog->instances.nr || n < 0) { 2874 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 2875 n, prog->section_name, prog->instances.nr); 2876 return -EINVAL; 2877 } 2878 2879 fd = prog->instances.fds[n]; 2880 if (fd < 0) { 2881 pr_warning("%dth instance of program '%s' is invalid\n", 2882 n, prog->section_name); 2883 return -ENOENT; 2884 } 2885 2886 return fd; 2887 } 2888 2889 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 2890 { 2891 prog->type = type; 2892 } 2893 2894 static bool bpf_program__is_type(struct bpf_program *prog, 2895 enum bpf_prog_type type) 2896 { 2897 return prog ? (prog->type == type) : false; 2898 } 2899 2900 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 2901 int bpf_program__set_##NAME(struct bpf_program *prog) \ 2902 { \ 2903 if (!prog) \ 2904 return -EINVAL; \ 2905 bpf_program__set_type(prog, TYPE); \ 2906 return 0; \ 2907 } \ 2908 \ 2909 bool bpf_program__is_##NAME(struct bpf_program *prog) \ 2910 { \ 2911 return bpf_program__is_type(prog, TYPE); \ 2912 } \ 2913 2914 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 2915 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 2916 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 2917 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 2918 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 2919 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 2920 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 2921 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 2922 2923 void bpf_program__set_expected_attach_type(struct bpf_program *prog, 2924 enum bpf_attach_type type) 2925 { 2926 prog->expected_attach_type = type; 2927 } 2928 2929 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ 2930 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } 2931 2932 /* Programs that can NOT be attached. */ 2933 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) 2934 2935 /* Programs that can be attached. */ 2936 #define BPF_APROG_SEC(string, ptype, atype) \ 2937 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) 2938 2939 /* Programs that must specify expected attach type at load time. */ 2940 #define BPF_EAPROG_SEC(string, ptype, eatype) \ 2941 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) 2942 2943 /* Programs that can be attached but attach type can't be identified by section 2944 * name. Kept for backward compatibility. 2945 */ 2946 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) 2947 2948 static const struct { 2949 const char *sec; 2950 size_t len; 2951 enum bpf_prog_type prog_type; 2952 enum bpf_attach_type expected_attach_type; 2953 int is_attachable; 2954 enum bpf_attach_type attach_type; 2955 } section_names[] = { 2956 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 2957 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), 2958 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), 2959 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 2960 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 2961 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), 2962 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), 2963 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 2964 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 2965 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 2966 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 2967 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 2968 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), 2969 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, 2970 BPF_CGROUP_INET_INGRESS), 2971 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 2972 BPF_CGROUP_INET_EGRESS), 2973 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 2974 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 2975 BPF_CGROUP_INET_SOCK_CREATE), 2976 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, 2977 BPF_CGROUP_INET4_POST_BIND), 2978 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, 2979 BPF_CGROUP_INET6_POST_BIND), 2980 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, 2981 BPF_CGROUP_DEVICE), 2982 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, 2983 BPF_CGROUP_SOCK_OPS), 2984 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, 2985 BPF_SK_SKB_STREAM_PARSER), 2986 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, 2987 BPF_SK_SKB_STREAM_VERDICT), 2988 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), 2989 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, 2990 BPF_SK_MSG_VERDICT), 2991 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, 2992 BPF_LIRC_MODE2), 2993 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, 2994 BPF_FLOW_DISSECTOR), 2995 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2996 BPF_CGROUP_INET4_BIND), 2997 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2998 BPF_CGROUP_INET6_BIND), 2999 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3000 BPF_CGROUP_INET4_CONNECT), 3001 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3002 BPF_CGROUP_INET6_CONNECT), 3003 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3004 BPF_CGROUP_UDP4_SENDMSG), 3005 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3006 BPF_CGROUP_UDP6_SENDMSG), 3007 }; 3008 3009 #undef BPF_PROG_SEC_IMPL 3010 #undef BPF_PROG_SEC 3011 #undef BPF_APROG_SEC 3012 #undef BPF_EAPROG_SEC 3013 #undef BPF_APROG_COMPAT 3014 3015 #define MAX_TYPE_NAME_SIZE 32 3016 3017 static char *libbpf_get_type_names(bool attach_type) 3018 { 3019 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; 3020 char *buf; 3021 3022 buf = malloc(len); 3023 if (!buf) 3024 return NULL; 3025 3026 buf[0] = '\0'; 3027 /* Forge string buf with all available names */ 3028 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3029 if (attach_type && !section_names[i].is_attachable) 3030 continue; 3031 3032 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { 3033 free(buf); 3034 return NULL; 3035 } 3036 strcat(buf, " "); 3037 strcat(buf, section_names[i].sec); 3038 } 3039 3040 return buf; 3041 } 3042 3043 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 3044 enum bpf_attach_type *expected_attach_type) 3045 { 3046 char *type_names; 3047 int i; 3048 3049 if (!name) 3050 return -EINVAL; 3051 3052 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3053 if (strncmp(name, section_names[i].sec, section_names[i].len)) 3054 continue; 3055 *prog_type = section_names[i].prog_type; 3056 *expected_attach_type = section_names[i].expected_attach_type; 3057 return 0; 3058 } 3059 pr_warning("failed to guess program type based on ELF section name '%s'\n", name); 3060 type_names = libbpf_get_type_names(false); 3061 if (type_names != NULL) { 3062 pr_info("supported section(type) names are:%s\n", type_names); 3063 free(type_names); 3064 } 3065 3066 return -EINVAL; 3067 } 3068 3069 int libbpf_attach_type_by_name(const char *name, 3070 enum bpf_attach_type *attach_type) 3071 { 3072 char *type_names; 3073 int i; 3074 3075 if (!name) 3076 return -EINVAL; 3077 3078 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3079 if (strncmp(name, section_names[i].sec, section_names[i].len)) 3080 continue; 3081 if (!section_names[i].is_attachable) 3082 return -EINVAL; 3083 *attach_type = section_names[i].attach_type; 3084 return 0; 3085 } 3086 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); 3087 type_names = libbpf_get_type_names(true); 3088 if (type_names != NULL) { 3089 pr_info("attachable section(type) names are:%s\n", type_names); 3090 free(type_names); 3091 } 3092 3093 return -EINVAL; 3094 } 3095 3096 static int 3097 bpf_program__identify_section(struct bpf_program *prog, 3098 enum bpf_prog_type *prog_type, 3099 enum bpf_attach_type *expected_attach_type) 3100 { 3101 return libbpf_prog_type_by_name(prog->section_name, prog_type, 3102 expected_attach_type); 3103 } 3104 3105 int bpf_map__fd(struct bpf_map *map) 3106 { 3107 return map ? map->fd : -EINVAL; 3108 } 3109 3110 const struct bpf_map_def *bpf_map__def(struct bpf_map *map) 3111 { 3112 return map ? &map->def : ERR_PTR(-EINVAL); 3113 } 3114 3115 const char *bpf_map__name(struct bpf_map *map) 3116 { 3117 return map ? map->name : NULL; 3118 } 3119 3120 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 3121 { 3122 return map ? map->btf_key_type_id : 0; 3123 } 3124 3125 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 3126 { 3127 return map ? map->btf_value_type_id : 0; 3128 } 3129 3130 int bpf_map__set_priv(struct bpf_map *map, void *priv, 3131 bpf_map_clear_priv_t clear_priv) 3132 { 3133 if (!map) 3134 return -EINVAL; 3135 3136 if (map->priv) { 3137 if (map->clear_priv) 3138 map->clear_priv(map, map->priv); 3139 } 3140 3141 map->priv = priv; 3142 map->clear_priv = clear_priv; 3143 return 0; 3144 } 3145 3146 void *bpf_map__priv(struct bpf_map *map) 3147 { 3148 return map ? map->priv : ERR_PTR(-EINVAL); 3149 } 3150 3151 bool bpf_map__is_offload_neutral(struct bpf_map *map) 3152 { 3153 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 3154 } 3155 3156 bool bpf_map__is_internal(struct bpf_map *map) 3157 { 3158 return map->libbpf_type != LIBBPF_MAP_UNSPEC; 3159 } 3160 3161 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 3162 { 3163 map->map_ifindex = ifindex; 3164 } 3165 3166 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 3167 { 3168 if (!bpf_map_type__is_map_in_map(map->def.type)) { 3169 pr_warning("error: unsupported map type\n"); 3170 return -EINVAL; 3171 } 3172 if (map->inner_map_fd != -1) { 3173 pr_warning("error: inner_map_fd already specified\n"); 3174 return -EINVAL; 3175 } 3176 map->inner_map_fd = fd; 3177 return 0; 3178 } 3179 3180 static struct bpf_map * 3181 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i) 3182 { 3183 ssize_t idx; 3184 struct bpf_map *s, *e; 3185 3186 if (!obj || !obj->maps) 3187 return NULL; 3188 3189 s = obj->maps; 3190 e = obj->maps + obj->nr_maps; 3191 3192 if ((m < s) || (m >= e)) { 3193 pr_warning("error in %s: map handler doesn't belong to object\n", 3194 __func__); 3195 return NULL; 3196 } 3197 3198 idx = (m - obj->maps) + i; 3199 if (idx >= obj->nr_maps || idx < 0) 3200 return NULL; 3201 return &obj->maps[idx]; 3202 } 3203 3204 struct bpf_map * 3205 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) 3206 { 3207 if (prev == NULL) 3208 return obj->maps; 3209 3210 return __bpf_map__iter(prev, obj, 1); 3211 } 3212 3213 struct bpf_map * 3214 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj) 3215 { 3216 if (next == NULL) { 3217 if (!obj->nr_maps) 3218 return NULL; 3219 return obj->maps + obj->nr_maps - 1; 3220 } 3221 3222 return __bpf_map__iter(next, obj, -1); 3223 } 3224 3225 struct bpf_map * 3226 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) 3227 { 3228 struct bpf_map *pos; 3229 3230 bpf_object__for_each_map(pos, obj) { 3231 if (pos->name && !strcmp(pos->name, name)) 3232 return pos; 3233 } 3234 return NULL; 3235 } 3236 3237 int 3238 bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name) 3239 { 3240 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 3241 } 3242 3243 struct bpf_map * 3244 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 3245 { 3246 int i; 3247 3248 for (i = 0; i < obj->nr_maps; i++) { 3249 if (obj->maps[i].offset == offset) 3250 return &obj->maps[i]; 3251 } 3252 return ERR_PTR(-ENOENT); 3253 } 3254 3255 long libbpf_get_error(const void *ptr) 3256 { 3257 if (IS_ERR(ptr)) 3258 return PTR_ERR(ptr); 3259 return 0; 3260 } 3261 3262 int bpf_prog_load(const char *file, enum bpf_prog_type type, 3263 struct bpf_object **pobj, int *prog_fd) 3264 { 3265 struct bpf_prog_load_attr attr; 3266 3267 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 3268 attr.file = file; 3269 attr.prog_type = type; 3270 attr.expected_attach_type = 0; 3271 3272 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 3273 } 3274 3275 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 3276 struct bpf_object **pobj, int *prog_fd) 3277 { 3278 struct bpf_object_open_attr open_attr = { 3279 .file = attr->file, 3280 .prog_type = attr->prog_type, 3281 }; 3282 struct bpf_program *prog, *first_prog = NULL; 3283 enum bpf_attach_type expected_attach_type; 3284 enum bpf_prog_type prog_type; 3285 struct bpf_object *obj; 3286 struct bpf_map *map; 3287 int err; 3288 3289 if (!attr) 3290 return -EINVAL; 3291 if (!attr->file) 3292 return -EINVAL; 3293 3294 obj = bpf_object__open_xattr(&open_attr); 3295 if (IS_ERR_OR_NULL(obj)) 3296 return -ENOENT; 3297 3298 bpf_object__for_each_program(prog, obj) { 3299 /* 3300 * If type is not specified, try to guess it based on 3301 * section name. 3302 */ 3303 prog_type = attr->prog_type; 3304 prog->prog_ifindex = attr->ifindex; 3305 expected_attach_type = attr->expected_attach_type; 3306 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 3307 err = bpf_program__identify_section(prog, &prog_type, 3308 &expected_attach_type); 3309 if (err < 0) { 3310 bpf_object__close(obj); 3311 return -EINVAL; 3312 } 3313 } 3314 3315 bpf_program__set_type(prog, prog_type); 3316 bpf_program__set_expected_attach_type(prog, 3317 expected_attach_type); 3318 3319 prog->log_level = attr->log_level; 3320 if (!first_prog) 3321 first_prog = prog; 3322 } 3323 3324 bpf_object__for_each_map(map, obj) { 3325 if (!bpf_map__is_offload_neutral(map)) 3326 map->map_ifindex = attr->ifindex; 3327 } 3328 3329 if (!first_prog) { 3330 pr_warning("object file doesn't contain bpf program\n"); 3331 bpf_object__close(obj); 3332 return -ENOENT; 3333 } 3334 3335 err = bpf_object__load(obj); 3336 if (err) { 3337 bpf_object__close(obj); 3338 return -EINVAL; 3339 } 3340 3341 *pobj = obj; 3342 *prog_fd = bpf_program__fd(first_prog); 3343 return 0; 3344 } 3345 3346 enum bpf_perf_event_ret 3347 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 3348 void **copy_mem, size_t *copy_size, 3349 bpf_perf_event_print_t fn, void *private_data) 3350 { 3351 struct perf_event_mmap_page *header = mmap_mem; 3352 __u64 data_head = ring_buffer_read_head(header); 3353 __u64 data_tail = header->data_tail; 3354 void *base = ((__u8 *)header) + page_size; 3355 int ret = LIBBPF_PERF_EVENT_CONT; 3356 struct perf_event_header *ehdr; 3357 size_t ehdr_size; 3358 3359 while (data_head != data_tail) { 3360 ehdr = base + (data_tail & (mmap_size - 1)); 3361 ehdr_size = ehdr->size; 3362 3363 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 3364 void *copy_start = ehdr; 3365 size_t len_first = base + mmap_size - copy_start; 3366 size_t len_secnd = ehdr_size - len_first; 3367 3368 if (*copy_size < ehdr_size) { 3369 free(*copy_mem); 3370 *copy_mem = malloc(ehdr_size); 3371 if (!*copy_mem) { 3372 *copy_size = 0; 3373 ret = LIBBPF_PERF_EVENT_ERROR; 3374 break; 3375 } 3376 *copy_size = ehdr_size; 3377 } 3378 3379 memcpy(*copy_mem, copy_start, len_first); 3380 memcpy(*copy_mem + len_first, base, len_secnd); 3381 ehdr = *copy_mem; 3382 } 3383 3384 ret = fn(ehdr, private_data); 3385 data_tail += ehdr_size; 3386 if (ret != LIBBPF_PERF_EVENT_CONT) 3387 break; 3388 } 3389 3390 ring_buffer_write_tail(header, data_tail); 3391 return ret; 3392 } 3393 3394 struct bpf_prog_info_array_desc { 3395 int array_offset; /* e.g. offset of jited_prog_insns */ 3396 int count_offset; /* e.g. offset of jited_prog_len */ 3397 int size_offset; /* > 0: offset of rec size, 3398 * < 0: fix size of -size_offset 3399 */ 3400 }; 3401 3402 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { 3403 [BPF_PROG_INFO_JITED_INSNS] = { 3404 offsetof(struct bpf_prog_info, jited_prog_insns), 3405 offsetof(struct bpf_prog_info, jited_prog_len), 3406 -1, 3407 }, 3408 [BPF_PROG_INFO_XLATED_INSNS] = { 3409 offsetof(struct bpf_prog_info, xlated_prog_insns), 3410 offsetof(struct bpf_prog_info, xlated_prog_len), 3411 -1, 3412 }, 3413 [BPF_PROG_INFO_MAP_IDS] = { 3414 offsetof(struct bpf_prog_info, map_ids), 3415 offsetof(struct bpf_prog_info, nr_map_ids), 3416 -(int)sizeof(__u32), 3417 }, 3418 [BPF_PROG_INFO_JITED_KSYMS] = { 3419 offsetof(struct bpf_prog_info, jited_ksyms), 3420 offsetof(struct bpf_prog_info, nr_jited_ksyms), 3421 -(int)sizeof(__u64), 3422 }, 3423 [BPF_PROG_INFO_JITED_FUNC_LENS] = { 3424 offsetof(struct bpf_prog_info, jited_func_lens), 3425 offsetof(struct bpf_prog_info, nr_jited_func_lens), 3426 -(int)sizeof(__u32), 3427 }, 3428 [BPF_PROG_INFO_FUNC_INFO] = { 3429 offsetof(struct bpf_prog_info, func_info), 3430 offsetof(struct bpf_prog_info, nr_func_info), 3431 offsetof(struct bpf_prog_info, func_info_rec_size), 3432 }, 3433 [BPF_PROG_INFO_LINE_INFO] = { 3434 offsetof(struct bpf_prog_info, line_info), 3435 offsetof(struct bpf_prog_info, nr_line_info), 3436 offsetof(struct bpf_prog_info, line_info_rec_size), 3437 }, 3438 [BPF_PROG_INFO_JITED_LINE_INFO] = { 3439 offsetof(struct bpf_prog_info, jited_line_info), 3440 offsetof(struct bpf_prog_info, nr_jited_line_info), 3441 offsetof(struct bpf_prog_info, jited_line_info_rec_size), 3442 }, 3443 [BPF_PROG_INFO_PROG_TAGS] = { 3444 offsetof(struct bpf_prog_info, prog_tags), 3445 offsetof(struct bpf_prog_info, nr_prog_tags), 3446 -(int)sizeof(__u8) * BPF_TAG_SIZE, 3447 }, 3448 3449 }; 3450 3451 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) 3452 { 3453 __u32 *array = (__u32 *)info; 3454 3455 if (offset >= 0) 3456 return array[offset / sizeof(__u32)]; 3457 return -(int)offset; 3458 } 3459 3460 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) 3461 { 3462 __u64 *array = (__u64 *)info; 3463 3464 if (offset >= 0) 3465 return array[offset / sizeof(__u64)]; 3466 return -(int)offset; 3467 } 3468 3469 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, 3470 __u32 val) 3471 { 3472 __u32 *array = (__u32 *)info; 3473 3474 if (offset >= 0) 3475 array[offset / sizeof(__u32)] = val; 3476 } 3477 3478 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, 3479 __u64 val) 3480 { 3481 __u64 *array = (__u64 *)info; 3482 3483 if (offset >= 0) 3484 array[offset / sizeof(__u64)] = val; 3485 } 3486 3487 struct bpf_prog_info_linear * 3488 bpf_program__get_prog_info_linear(int fd, __u64 arrays) 3489 { 3490 struct bpf_prog_info_linear *info_linear; 3491 struct bpf_prog_info info = {}; 3492 __u32 info_len = sizeof(info); 3493 __u32 data_len = 0; 3494 int i, err; 3495 void *ptr; 3496 3497 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 3498 return ERR_PTR(-EINVAL); 3499 3500 /* step 1: get array dimensions */ 3501 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 3502 if (err) { 3503 pr_debug("can't get prog info: %s", strerror(errno)); 3504 return ERR_PTR(-EFAULT); 3505 } 3506 3507 /* step 2: calculate total size of all arrays */ 3508 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3509 bool include_array = (arrays & (1UL << i)) > 0; 3510 struct bpf_prog_info_array_desc *desc; 3511 __u32 count, size; 3512 3513 desc = bpf_prog_info_array_desc + i; 3514 3515 /* kernel is too old to support this field */ 3516 if (info_len < desc->array_offset + sizeof(__u32) || 3517 info_len < desc->count_offset + sizeof(__u32) || 3518 (desc->size_offset > 0 && info_len < desc->size_offset)) 3519 include_array = false; 3520 3521 if (!include_array) { 3522 arrays &= ~(1UL << i); /* clear the bit */ 3523 continue; 3524 } 3525 3526 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3527 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3528 3529 data_len += count * size; 3530 } 3531 3532 /* step 3: allocate continuous memory */ 3533 data_len = roundup(data_len, sizeof(__u64)); 3534 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 3535 if (!info_linear) 3536 return ERR_PTR(-ENOMEM); 3537 3538 /* step 4: fill data to info_linear->info */ 3539 info_linear->arrays = arrays; 3540 memset(&info_linear->info, 0, sizeof(info)); 3541 ptr = info_linear->data; 3542 3543 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3544 struct bpf_prog_info_array_desc *desc; 3545 __u32 count, size; 3546 3547 if ((arrays & (1UL << i)) == 0) 3548 continue; 3549 3550 desc = bpf_prog_info_array_desc + i; 3551 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3552 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3553 bpf_prog_info_set_offset_u32(&info_linear->info, 3554 desc->count_offset, count); 3555 bpf_prog_info_set_offset_u32(&info_linear->info, 3556 desc->size_offset, size); 3557 bpf_prog_info_set_offset_u64(&info_linear->info, 3558 desc->array_offset, 3559 ptr_to_u64(ptr)); 3560 ptr += count * size; 3561 } 3562 3563 /* step 5: call syscall again to get required arrays */ 3564 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); 3565 if (err) { 3566 pr_debug("can't get prog info: %s", strerror(errno)); 3567 free(info_linear); 3568 return ERR_PTR(-EFAULT); 3569 } 3570 3571 /* step 6: verify the data */ 3572 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3573 struct bpf_prog_info_array_desc *desc; 3574 __u32 v1, v2; 3575 3576 if ((arrays & (1UL << i)) == 0) 3577 continue; 3578 3579 desc = bpf_prog_info_array_desc + i; 3580 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3581 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3582 desc->count_offset); 3583 if (v1 != v2) 3584 pr_warning("%s: mismatch in element count\n", __func__); 3585 3586 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3587 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3588 desc->size_offset); 3589 if (v1 != v2) 3590 pr_warning("%s: mismatch in rec size\n", __func__); 3591 } 3592 3593 /* step 7: update info_len and data_len */ 3594 info_linear->info_len = sizeof(struct bpf_prog_info); 3595 info_linear->data_len = data_len; 3596 3597 return info_linear; 3598 } 3599 3600 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) 3601 { 3602 int i; 3603 3604 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3605 struct bpf_prog_info_array_desc *desc; 3606 __u64 addr, offs; 3607 3608 if ((info_linear->arrays & (1UL << i)) == 0) 3609 continue; 3610 3611 desc = bpf_prog_info_array_desc + i; 3612 addr = bpf_prog_info_read_offset_u64(&info_linear->info, 3613 desc->array_offset); 3614 offs = addr - ptr_to_u64(info_linear->data); 3615 bpf_prog_info_set_offset_u64(&info_linear->info, 3616 desc->array_offset, offs); 3617 } 3618 } 3619 3620 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) 3621 { 3622 int i; 3623 3624 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3625 struct bpf_prog_info_array_desc *desc; 3626 __u64 addr, offs; 3627 3628 if ((info_linear->arrays & (1UL << i)) == 0) 3629 continue; 3630 3631 desc = bpf_prog_info_array_desc + i; 3632 offs = bpf_prog_info_read_offset_u64(&info_linear->info, 3633 desc->array_offset); 3634 addr = offs + ptr_to_u64(info_linear->data); 3635 bpf_prog_info_set_offset_u64(&info_linear->info, 3636 desc->array_offset, addr); 3637 } 3638 } 3639